repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
govindap/lyft_motion_prediction
[ "15412444fec69ce4a0082d8de730cb882833eab0" ]
[ "lyft_CNN.py" ]
[ "import numpy as np\nimport torch\nfrom torch import nn, optim\nfrom torch.utils.data import DataLoader\nfrom torchvision.models.resnet import resnet50, resnet34\nfrom torch import Tensor\nfrom typing import Dict\n\nfrom l5kit.configs import load_config_data\nfrom l5kit.data import LocalDataManager, ChunkedDataset\nfrom l5kit.dataset import AgentDataset, EgoDataset\nfrom l5kit.rasterization import build_rasterizer\nfrom l5kit.evaluation import write_pred_csv, compute_metrics_csv, read_gt_csv, create_chopped_dataset\nfrom l5kit.evaluation.chop_dataset import MIN_FUTURE_STEPS\nfrom l5kit.evaluation.metrics import neg_multi_log_likelihood, time_displace\nfrom l5kit.geometry import transform_points\nfrom l5kit.visualization import PREDICTED_POINTS_COLOR, TARGET_POINTS_COLOR, draw_trajectory\nfrom pathlib import Path\nimport pandas as pd\nimport os\nimport random\nimport time\nimport gc, psutil\n\ncfg = {\n 'format_version': 4,\n 'model_params': {\n 'model_architecture': \"resnet34\",\n 'history_num_frames': 10,\n 'history_step_size': 1,\n 'history_delta_time': 0.1,\n 'future_num_frames': 50,\n 'future_step_size': 1,\n 'future_delta_time': 0.1,\n 'model_name': \"model_resnet34\",\n 'lr': 1e-3,\n 'train': True,\n 'predict': True\n },\n\n 'raster_params': {\n 'raster_size': [224, 224],\n 'pixel_size': [0.5, 0.5],\n 'ego_center': [0.25, 0.5],\n 'map_type': 'py_semantic',\n 'satellite_map_key': 'aerial_map/aerial_map.png',\n 'semantic_map_key': 'semantic_map/semantic_map.pb',\n 'dataset_meta_key': 'meta.json',\n 'filter_agents_threshold': 0.5\n },\n\n 'train_data_loader': {\n 'key': 'scenes/train.zarr',\n 'batch_size': 16,\n 'shuffle': True,\n 'num_workers': 0\n },\n 'test_data_loader': {\n 'key': 'scenes/test.zarr',\n 'batch_size': 16,\n 'shuffle': False,\n 'num_workers': 0,\n },\n 'train_params': {\n 'steps': 120,\n 'update_steps': 50,\n 'checkpoint_steps': 100,\n 'precision': True\n }\n}\n\nclass LyftCNNModel(nn.Module):\n\n def __init__(self, cfg: Dict, num_modes=3):\n super().__init__()\n\n architecture = cfg[\"model_params\"][\"model_architecture\"]\n backbone = eval(architecture)(pretrained=True, progress=True)\n self.backbone = backbone\n\n num_history_channels = (cfg[\"model_params\"][\"history_num_frames\"] + 1) * 2\n num_in_channels = 3 + num_history_channels\n\n self.backbone.conv1 = nn.Conv2d(\n num_in_channels,\n self.backbone.conv1.out_channels,\n kernel_size=self.backbone.conv1.kernel_size,\n stride=self.backbone.conv1.stride,\n padding=self.backbone.conv1.padding,\n bias=False,\n )\n\n if architecture == \"resnet50\":\n backbone_out_features = 2048\n else:\n backbone_out_features = 512\n\n # X, Y coords for the future positions (output shape: batch_sizex50x2)\n self.future_len = cfg[\"model_params\"][\"future_num_frames\"]\n num_targets = 2 * self.future_len\n\n # You can add more layers here.\n self.head = nn.Sequential(\n # nn.Dropout(0.2),\n nn.Linear(in_features=backbone_out_features, out_features=4096),\n )\n\n self.num_preds = num_targets * num_modes\n self.num_modes = num_modes\n\n self.logit = nn.Linear(4096, out_features=self.num_preds + num_modes)\n\n def forward(self, x):\n x = self.backbone.conv1(x)\n x = self.backbone.bn1(x)\n x = self.backbone.relu(x)\n x = self.backbone.maxpool(x)\n\n x = self.backbone.layer1(x)\n x = self.backbone.layer2(x)\n x = self.backbone.layer3(x)\n x = self.backbone.layer4(x)\n\n x = self.backbone.avgpool(x)\n x = torch.flatten(x, 1)\n\n x = self.head(x)\n x = self.logit(x)\n\n # pred (batch_size)x(modes)x(time)x(2D coords)\n # confidences (batch_size)x(modes)\n bs, _ = x.shape\n pred, confidences = torch.split(x, self.num_preds, dim=1)\n pred = pred.view(bs, self.num_modes, self.future_len, 2)\n assert confidences.shape == (bs, self.num_modes)\n confidences = torch.softmax(confidences, dim=1)\n return pred, confidences\n\n" ]
[ [ "torch.nn.Linear", "torch.split", "torch.flatten", "torch.nn.Conv2d", "torch.softmax" ] ]
zhouhan921001/DeepLearning-homework
[ "20562dc49ca5898b531a678c0e54c8d985fcc72f" ]
[ "DLCoursera_part1_week4_1.py" ]
[ "import numpy as np\nfrom dnn_utils import sigmoid,sigmoid_backward,relu,relu_backward\n\ndef initialize_two_layer(n_x,n_h,n_y):\n\n\tW1 = np.random.randn(n_h,n_x) * 0.01\n\tb1 = np.zeros(n_h,1)\n\tW2 = np.random.randn(n_y,n_h) * 0.01\n\tb2 = np.zeros(n_y,1)\n\n\tparam = {\"W1\":W1,\"b1\":b1,\"W2\":W2,\"b2\":b2}\n\n\treturn param\n\ndef initialize_l_layer(layer_dims):\n\t\n\tparam = {}\n\tL = len(layer_dims)\n\n\tfor l in range(1, L):\n\t\tparam['W' + str(l)] = np.random.randn(layer_dims[l],layer_dims[l-1]) * 0.01\n\t\tparam['b' + str(l)] = np.zeros(layer_dims[l],1)\n\n\treturn param\n\ndef linear_forward(W,A,b):\n\t\"\"\"\n\tImplement the linear part of neural unit\n\t\"\"\"\n\n\tZ = np.dot(W,A) + b\n\n\treturn Z\n\ndef linear_activation_forward(A_pre,W,b,activation):\n\t\"\"\"\n\tImplement neural unit with the activation of Relu or sigmoid\n\t\"\"\"\n\n\tif activation == \"Relu\":\n\n\t\tZ = linear_forward(W,A_pre,b)\n\t\tA,activation_cache = relu(Z)\n\n\telif activation == \"sigmoid\":\n\n\t\tZ = linear_forward(W,A_pre,b)\n\t\tA,activation_cache = sigmoid(Z)\n\n\t\tbackward_used_cache = (A_pre,W,b)\n\t\tcache = (backward_used_cache,activation_cache)\n\treturn A,cache\n\ndef L_model_forward(X,param):\n\t\"\"\"\n\tImplement forward propagation for L layers model\n\t\"\"\"\n\n\tcaches = []\n\tL = len(param) // 2\n\tA = X\n\n\tfor l in range(1,L):\n\n\t\tA,cache = linear_activation_forward(A,param['W'+str(l)],param['b'+str(l)],Relu)\n\t\tcaches.append(cache)\n\n\tAl,cache = linear_activation_forward(A,param['W'+str(l)],param['b'+str(l)],Relu)\n\tcaches.append(cache)\n\n\treturn Al,caches\n\ndef linear_backward(dz,cache):\n\t\"\"\"\n\tImplement the backward propagation of linear part\n\t\"\"\"\n\n\tm = dz.shape[1]\n\tdw = np.dot(dz,cache[0]) / m\n\tdb = np.sum(dz) / m\n\tdA_pre = np.dot(cache[1],dz)\n\n\treturn dw,db,dA_pre\n\ndef linear_activation_backward(dA,cache,activation):\n\t\"\"\"\n\tImplement the backward propagation of neural unit\n\t\"\"\"\n\n\tif activation == \"Relu\":\n\t\tdz = relu_backward(dA,cache[1])\n\n\telif activation == \"sigmoid\":\n\t\tdz = sigmoid_backward(dA,cache[1])\n\n\tdw,db,dA_pre = linear_backward(dz,cache[0])\n\n\treturn dw,db,dA_pre\n\ndef L_model_backward(AL,Y,caches):\n\t\"\"\"\n\tImplement the backward propagation for L layer model\n\t\"\"\"\n\tgrads = {}\n\tL = len(caches)\n\n\tdAl = - (np.divide(Y,AL) - np.divide(1-Y,1-AL))\n\tgrads['dw'+str(L)],grads['db'+str(L)],grads['dA'+str(L)] = linear_activation_backward(dAL,caches[-1],\"sigmoid\")\n\n\tfor l in reversed(range(L-1)):\n\t\tcache = caches[l]\n\t\tgrads['dw'+str(l+1)],grads['db'+str(l+1)],grads['dA'+str(l+1)] = linear_activation_backward(grads['dA'+str(l+2)],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcache,\"Relu\")\n\treturn grads\n\ndef update_param(param,grads,learning_rate):\n\t\"\"\"\n\tUpdate the parameters\n\t\"\"\"\n\n\tL = len(param) // 2\n\tfor l in range(L):\n\t\tparam['W'+str(l+1)] = param['W'+str(l+1)] - learning_rate * grads['W'+str(l+1)]\n\t\tparam['b'+str(l+1)] = param['b'+str(l+1)] - learning_rate * grads['b'+str(l+1)]\n\n\treturn param\n" ]
[ [ "numpy.sum", "numpy.divide", "numpy.zeros", "numpy.random.randn", "numpy.dot" ] ]
rmodi6/word-representations
[ "4f9a13cee9ff60ce3c667c833330b59de774ed39" ]
[ "word2vec_basic.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport math\nimport os, sys\nimport random\nimport zipfile\n\nimport numpy as np\nfrom six.moves import urllib\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\nimport loss_func as tf_func\n\nimport pickle\nfrom collections import namedtuple\n\n\n\n\nWord2Vec = namedtuple('Word2Vec', ['train_inputs', 'train_labels', 'loss', 'optimizer', 'global_step',\n 'embeddings', 'normalized_embeddings', 'valid_embeddings','similarity', \n 'saver','summary', 'summary_writer'])\n\ndef maybe_create_path(path):\n if not os.path.exists(path):\n os.mkdir(path)\n print (\"Created a path: %s\"%(path))\n\n\ndef maybe_download(filename, expected_bytes):\n #Download a file if not present, and make sure it's the right size.\n if not os.path.exists(filename):\n print('Downloading %s'%(url+filename))\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename\n\n\n\n# Read the data into a list of strings.\ndef read_data(filename):\n #Extract the first file enclosed in a zip file as a list of words\n with zipfile.ZipFile(filename) as f:\n data = tf.compat.as_str(f.read(f.namelist()[0])).split()\n return data\n\n\n\ndef build_dataset(words):\n count = [['UNK', -1]]\n count.extend(collections.Counter(words).most_common(vocabulary_size - 1))\n dictionary = dict()\n for word, _ in count:\n dictionary[word] = len(dictionary)\n data = list()\n unk_count = 0\n for word in words:\n if word in dictionary:\n index = dictionary[word]\n else:\n index = 0 # dictionary['UNK']\n unk_count += 1\n data.append(index)\n count[0][1] = unk_count\n reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n return data, count, dictionary, reverse_dictionary\n\n\ndef generate_batch(data, batch_size, num_skips, skip_window):\n \"\"\"\n Write the code generate a training batch\n\n @data_index: the index of a word. You can access a word using data[data_index]\n @batch_size: the number of instances in one batch\n @num_skips: the number of samples you want to draw in a window \n (In the below example, it was 2)\n @skip_windows: decides how many words to consider left and right from a context word. \n (So, skip_windows*2+1 = window_size)\n \n batch will contain word ids for context words. Dimension is [batch_size].\n labels will contain word ids for predicting(target) words. Dimension is [batch_size, 1].\n\n\n \"\"\"\n\n global data_index\n assert batch_size % num_skips == 0\n assert num_skips <= 2 * skip_window\n batch = np.ndarray(shape=(batch_size), dtype=np.int32)\n labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)\n\n \"\"\"\n =================================================================================\n\n You will generate small subset of training data, which is called batch.\n For skip-gram model, you will slide a window\n and sample training instances from the data insdie the window.\n\n Here is a small example.\n Suppose that we have a text: \"The quick brown fox jumps over the lazy dog.\"\n And batch_size = 8, window_size = 3\n\n \"[The quick brown] fox jumps over the lazy dog\"\n\n Context word would be 'quick' and predicting words are 'The' and 'brown'.\n This will generate training examples:\n context(x), predicted_word(y)\n (quick , The)\n (quick , brown)\n\n And then move the sliding window.\n \"The [quick brown fox] jumps over the lazy dog\"\n In the same way, we have to two more examples:\n (brown, quick)\n (brown, fox)\n\n move thd window again,\n \"The quick [brown fox jumps] over the lazy dog\"\n and we have\n (fox, brown)\n (fox, jumps)\n\n Finally we get two instance from the moved window,\n \"The quick brown [fox jumps over] the lazy dog\"\n (jumps, fox)\n (jumps, over)\n\n Since now we have 8 training instances, which is the batch size,\n stop generating batch and return batch data.\n\n\n ===============================================================================\n \"\"\"\n # Initialize batch_count to 0\n batch_count = 0\n while batch_count < batch_size: # Continue while we haven't generated required number of batches\n # Re-initialize data_index so that there are skip_window words on either side of data_index\n if (data_index - skip_window) < 0 or (data_index + skip_window) >= len(data):\n data_index = skip_window\n left_context_word = data_index - 1 # Index for outer words on left side of data_index\n right_context_word = data_index + 1 # Index for outer words on right side of data_index\n for x in range(skip_window): # Loop skip_window times\n batch[batch_count] = data[data_index] # Add data_index word to batch as center word\n labels[batch_count, 0] = data[left_context_word] # Add left index word to labels as target word\n batch[batch_count+1] = data[data_index] # Add data_index word to batch as center word\n labels[batch_count+1, 0] = data[right_context_word] # Add right index word to labels as target word\n batch_count += 2 # Increment batch_count by 2 as we added 2 words: one from left and one from right\n left_context_word -= 1 # Move left index towards left\n right_context_word += 1 # Move right index towards right\n data_index += 1 # Increment data_index making next word as center word\n return batch, labels # Return the generated batches and labels\n\n\ndef build_model(sess, graph, loss_model):\n \"\"\"\n Builds a tensor graph model\n \"\"\"\n model = None\n with graph.as_default():\n # Ops and variables pinned to the CPU because of missing GPU implementation\n with tf.device('/cpu:0'):\n # Input data.\n train_inputs = tf.placeholder(tf.int32, shape=[batch_size])\n train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])\n valid_dataset = tf.constant(valid_examples, dtype=tf.int32)\n\n global_step = tf.Variable(0, trainable=False)\n\n # Look up embeddings for inputs.\n embeddings = tf.Variable(\n tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))\n embed = tf.nn.embedding_lookup(embeddings, train_inputs)\n\n sm_weights = tf.Variable(\n tf.truncated_normal([vocabulary_size, embedding_size],\n stddev=1.0 / math.sqrt(embedding_size)))\n\n # Get context embeddings from lables\n true_w = tf.nn.embedding_lookup(sm_weights, train_labels)\n true_w = tf.reshape(true_w, [-1, embedding_size])\n\n\n # Construct the variables for the NCE loss \n nce_weights = tf.Variable(\n tf.truncated_normal([vocabulary_size, embedding_size],\n stddev=1.0 / math.sqrt(embedding_size)))\n nce_biases = tf.Variable(tf.zeros([vocabulary_size]))\n\n if loss_model == 'cross_entropy':\n loss = tf.reduce_mean(tf_func.cross_entropy_loss(embed, true_w))\n else:\n #sample negative examples with unigram probability\n sample = np.random.choice(vocabulary_size, num_sampled, p=unigram_prob, replace=False)\n\n loss = tf.reduce_mean(tf_func.nce_loss(embed, nce_weights, nce_biases, train_labels, sample, unigram_prob))\n\n # tf.summary.scalar('loss', loss)\n\n # Construct the SGD optimizer using a learning rate of 1.0.\n optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss, global_step=global_step)\n\n # Compute the cosine similarity between minibatch examples and all embeddings.\n norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))\n normalized_embeddings = embeddings / norm\n\n valid_embeddings = tf.nn.embedding_lookup(\n normalized_embeddings, valid_dataset)\n similarity = tf.matmul(\n valid_embeddings, normalized_embeddings, transpose_b=True)\n \n saver = tf.train.Saver(tf.global_variables())\n\n # Save summary\n # summary = tf.summary.merge_all()\n # summary_writer = tf.summary.FileWriter(summary_path + '/summary', sess.graph)\n summary = None\n summary_writer = None\n\n tf.global_variables_initializer().run()\n print(\"Initialized\")\n\n model = Word2Vec(train_inputs, train_labels, loss, optimizer, global_step, embeddings, \n normalized_embeddings, valid_embeddings, similarity, saver, summary, summary_writer)\n\n return model\n\n\ndef load_pretrained_model(sess, model, pretrained_model_path):\n if not os.path.exists(filename):\n print(\"Missing pre-trained model: [%s]\"%(pretrained_model_path)) \n return\n\n ckpt = tf.train.get_checkpoint_state(pretrained_model_path)\n if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):\n print(\"Reading model parameters from %s\" % ckpt.model_checkpoint_path)\n model.saver.restore(sess, ckpt.model_checkpoint_path)\n\n\ndef train(sess, model, data, dictionary, batch_size, num_skips, skip_window, \n max_num_steps, checkpoint_step, loss_model):\n \n average_loss_step = max(checkpoint_step/10, 100)\n\n average_loss = 0\n for step in xrange(max_num_steps):\n batch_inputs, batch_labels = generate_batch(data, batch_size, num_skips, skip_window)\n feed_dict = {model.train_inputs.name: batch_inputs, model.train_labels.name: batch_labels}\n\n # We perform one update step by evaluating the optimizer op (including it\n # in the list of returned values for session.run()\n # _, loss_val, summary = sess.run([model.optimizer, model.loss, model.summary], feed_dict=feed_dict)\n _, loss_val = sess.run([model.optimizer, model.loss], feed_dict=feed_dict)\n average_loss += loss_val\n\n if step % average_loss_step == 0:\n if step > 0:\n average_loss /= average_loss_step\n # The average loss is an estimate of the loss over the last 2000 batches.\n print(\"Average loss at step \", step, \": \", average_loss)\n average_loss = 0\n # model.summary_writer.add_summary(summary, model.global_step.eval())\n # model.summary_writer.flush()\n\n # Note that this is expensive (~20% slowdown if computed every 500 steps)\n if step % checkpoint_step == 0:\n sim = model.similarity.eval()\n for i in xrange(valid_size):\n valid_word = reverse_dictionary[valid_examples[i]]\n top_k = 8 # number of nearest neighbors\n nearest = (-sim[i, :]).argsort()[1:top_k + 1]\n log_str = \"Nearest to %s:\" % valid_word\n for k in xrange(top_k):\n close_word = reverse_dictionary[nearest[k]]\n log_str = \"%s %s,\" % (log_str, close_word)\n print(log_str)\n # chkpt_path = os.path.join(checkpoint_model_path, 'w2v_%s.cpkt'%(loss_model))\n # model.saver.save(sess, chkpt_path, global_step=model.global_step.eval())\n\n\n # model.summary_writer.close()\n\n # Saving the final embedding to a file \n final_embeddings = model.normalized_embeddings.eval()\n\n return final_embeddings\n\n\n\n\nif __name__ == '__main__':\n\n loss_model = 'cross_entropy'\n if len(sys.argv) > 1:\n if sys.argv[1] == 'nce':\n loss_model = 'nce'\n\n\n ####################################################################################\n # Step 1: Download the data.\n url = 'http://mattmahoney.net/dc/'\n filename = maybe_download('text8.zip', 31344016)\n\n\n words = read_data(filename)\n print('Data size', len(words))\n\n\n ####################################################################################\n # Step 2: Build the dictionary and replace rare words with UNK token.\n vocabulary_size = 100000 \n\n data, count, dictionary, reverse_dictionary = build_dataset(words)\n del words # Hint to reduce memory.\n print('Most common words (+UNK)', count[:5])\n print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])\n\n #Calculate the probability of unigrams\n unigram_cnt = [c for w, c in count]\n total = sum(unigram_cnt)\n unigram_prob = [c*1.0/total for c in unigram_cnt]\n\n data_index = 0\n\n\n ####################################################################################\n # Step 3: Test the function that generates a training batch for the skip-gram model.\n # TODO You must implement this method \"generate_batch\"\n # Uncomment below to check batch output\n\n # batch, labels = generate_batch(data, batch_size=8, num_skips=2, skip_window=1)\n # for i in range(8):\n # print(batch[i], reverse_dictionary[batch[i]],\n # '->', labels[i, 0], reverse_dictionary[labels[i, 0]])\n\n\n ####################################################################################\n # Hyper Parameters to config\n batch_size = 128\n embedding_size = 128 # Dimension of the embedding vector.\n skip_window = 4 # How many words to consider left and right.\n num_skips = 8 # How many times to reuse an input to generate a label.\n\n\n # We pick a random validation set to sample nearest neighbors. Here we limit the\n # validation samples to the words that have a low numeric ID, which by\n # construction are also the most frequent.\n valid_size = 16 # Random set of words to evaluate similarity on.\n valid_window = 100 # Only pick dev samples in the head of the distribution.\n valid_examples = np.random.choice(valid_window, valid_size, replace=False)\n num_sampled = 64 # Number of negative examples to sample.\n\n # summary_path = './summary_%s'%(loss_model)\n pretrained_model_path = './pretrained/'\n\n checkpoint_model_path = './checkpoints_%s/'%(loss_model)\n model_path = './models'\n\n \n # maximum training step\n max_num_steps = 200001\n checkpoint_step = 50000\n \n\n graph = tf.Graph()\n with tf.Session(graph=graph) as sess:\n\n ####################################################################################\n # Step 4: Build and train a skip-gram model.\n model = build_model(sess, graph, loss_model)\n\n # You must start with the pretrained model. \n # If you want to resume from your checkpoints, change this path name\n\n load_pretrained_model(sess, model, pretrained_model_path)\n\n\n ####################################################################################\n # Step 6: Begin training.\n maybe_create_path(checkpoint_model_path)\n embeddings = train(sess, model, data, dictionary, batch_size, num_skips, skip_window, \n max_num_steps, checkpoint_step, loss_model)\n\n\n ####################################################################################\n # Step 7: Save the trained model.\n trained_steps = model.global_step.eval()\n\n maybe_create_path(model_path)\n model_filepath = os.path.join(model_path, 'word2vec_%s.model'%(loss_model))\n print(\"Saving word2vec model as [%s]\"%(model_filepath))\n pickle.dump([dictionary, trained_steps, embeddings], open(model_filepath, 'w'))\n\n" ]
[ [ "tensorflow.reshape", "tensorflow.train.checkpoint_exists", "tensorflow.matmul", "tensorflow.Variable", "tensorflow.global_variables_initializer", "tensorflow.device", "numpy.random.choice", "numpy.ndarray", "tensorflow.Graph", "tensorflow.constant", "tensorflow.global_variables", "tensorflow.random_uniform", "tensorflow.Session", "tensorflow.nn.embedding_lookup", "tensorflow.placeholder", "tensorflow.zeros", "tensorflow.train.get_checkpoint_state", "tensorflow.train.GradientDescentOptimizer", "tensorflow.square" ] ]
daniel20162016/my-first
[ "f9554dd476302b26e8a296393025f150922f349c" ]
[ "read_xml_all/calcul_matrix_compare_ce_good_192matrix.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 31 15:45:22 2016\n\n@author: wang\n\"\"\"\n#from matplotlib import pylab as plt\n#from numpy import fft, fromstring, int16, linspace\n#import wave\n\nfrom read_wav_xml_good_1 import*\nfrom matrix_24_2 import*\nfrom max_matrix_norm import*\n\nimport numpy as np\n# open a wave file\nfilename = 'francois_filon_pure_3.wav'\nfilename_1 ='francois_filon_pure_3.xml'\nword ='ce'\n\nwave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word)\n#print 'word_start_point=',word_start_point\n#print 'word_length_point=',word_length_point\n#print 'word_end_point=',word_end_point\n\nXJ_1 =wave_signal_float\n\nt_step=1920;\nt_entre_step=1440;\n\nt_du_1_1 = int(word_start_point[0]);\nt_du_1_2 = int(word_end_point[0]);\n\nt_du_2_1 = int(word_start_point[1]);\nt_du_2_2 = int(word_end_point[1]);\n\nt_du_3_1 = int(word_start_point[2]);\nt_du_3_2 = int(word_end_point[2]);\n\nt_du_4_1 = int(word_start_point[3]);\nt_du_4_2 = int(word_end_point[3]);\n\nt_du_5_1 = int(word_start_point[4]);\nt_du_5_2 = int(word_end_point[4]);\nfs=framerate\n#XJ_du_1 = wave_signal_float[(t_du_1_1-1):t_du_1_2];\n#length_XJ_du_1 = int(word_length_point[0]+1);\n#x1,y1,z1=matrix_24_2(XJ_du_1,fs)\n#x1=max_matrix_norm(x1)\n\n\n#==============================================================================\n# this part is to calcul the first matrix \n#==============================================================================\nXJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];\nx1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)\nx1_1=max_matrix_norm(x1_1)\nmatrix_all_step_new_1 = np.zeros([192])\n\nfor i in range(0,24):\n matrix_all_step_new_1[i]=x1_1[i]\n#==============================================================================\n# the other colonne is the all fft\n#==============================================================================\nfor i in range(1,8):\n XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];\n x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)\n x1_all=max_matrix_norm(x1_all)\n for j in range(0,24):\n matrix_all_step_new_1[24*i+j]=x1_all[j]\n\n#==============================================================================\n# this part is to calcul the second matrix\n#==============================================================================\nfor k in range (1,2):\n t_start=t_du_2_1\n XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];\n x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)\n x1_1=max_matrix_norm(x1_1)\n matrix_all_step_new_2 = np.zeros([192])\n for i in range(0,24):\n matrix_all_step_new_2[i]=x1_1[i]\n#==============================================================================\n# the other colonne is the all fft\n#==============================================================================\n for i in range(1,8):\n XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];\n x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)\n x1_all=max_matrix_norm(x1_all)\n for j in range(0,24):\n matrix_all_step_new_2[24*i+j]=x1_all[j]\n \n#==============================================================================\n# this part is to calcul the 3 matrix\n#==============================================================================\nfor k in range (1,2):\n t_start=t_du_3_1\n XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];\n x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)\n x1_1=max_matrix_norm(x1_1)\n matrix_all_step_new_3 = np.zeros([192])\n for i in range(0,24):\n matrix_all_step_new_3[i]=x1_1[i]\n#==============================================================================\n# the other colonne is the all fft\n#==============================================================================\n for i in range(1,8):\n XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];\n x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)\n x1_all=max_matrix_norm(x1_all)\n for j in range(0,24): \n matrix_all_step_new_3[24*i+j]=x1_all[j]\n\n#==============================================================================\n# this part is to calcul the 4 matrix\n#==============================================================================\nfor k in range (1,2):\n t_start=t_du_4_1\n XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];\n x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)\n x1_1=max_matrix_norm(x1_1)\n matrix_all_step_new_4 = np.zeros([192])\n for i in range(0,24):\n matrix_all_step_new_4[i]=x1_1[i]\n#==============================================================================\n# the other colonne is the all fft\n#==============================================================================\n for i in range(1,8):\n# print i\n XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];\n x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)\n x1_all=max_matrix_norm(x1_all)\n for j in range(0,24):\n matrix_all_step_new_4[24*i+j]=x1_all[j]\n#print 'matrix_all_step_4=',matrix_all_step_4\n\n#==============================================================================\n# this part is to calcul the 5 matrix\n#==============================================================================\nfor k in range (1,2):\n t_start=t_du_5_1\n XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];\n x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)\n x1_1=max_matrix_norm(x1_1)\n matrix_all_step_new_5 = np.zeros([192])\n for i in range(0,24):\n matrix_all_step_new_5[i]=x1_1[i]\n#==============================================================================\n# the other colonne is the all fft\n#==============================================================================\n for i in range(1,8):\n# print i\n XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];\n x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)\n x1_all=max_matrix_norm(x1_all)\n for j in range(0,24):\n matrix_all_step_new_5[24*i+j]=x1_all[j] \n#print 'matrix_all_step_5=',matrix_all_step_5\n\nnp.savez('ce_compare_192_matrix.npz',matrix_all_step_new_1,matrix_all_step_new_2,matrix_all_step_new_3,matrix_all_step_new_4,matrix_all_step_new_5)\n\n" ]
[ [ "numpy.savez", "numpy.zeros" ] ]
daniel-thom/PyDSS
[ "8c7ae2d3a17d596b42a92e33f7d29329e26fbc30" ]
[ "PyDSS/pyPostprocessor/PostprocessScripts/DERMSOptimizer_helper_modules/opt_funcs.py" ]
[ "import numpy as np\nfrom scipy.sparse import lil_matrix\nimport scipy.sparse.linalg as sp\nimport scipy.sparse as sparse\nimport math\nimport csv\nimport matplotlib.pyplot as plt\n\ndef linear_powerflow_model(Y00,Y01,Y10,Y11_inv,I_coeff,V1,slack_no):\n # voltage linearlization\n V1_conj = np.conj(V1[slack_no:])\n V1_conj_inv = 1 / V1_conj\n coeff_V = Y11_inv * V1_conj_inv\n coeff_V_P = coeff_V\n coeff_V_Q = -1j*coeff_V\n coeff_Vm = -np.dot(Y11_inv,np.dot(Y10,V1[:slack_no]))\n\n # voltage magnitude linearization\n m = coeff_Vm\n m_inv = 1 / coeff_Vm\n coeff_Vmag_k = abs(m)\n A = (np.multiply(coeff_V.transpose(),m_inv)).transpose()\n coeff_Vmag_P = (np.multiply(A.real.transpose(),coeff_Vmag_k)).transpose()\n coeff_Vmag_Q = (np.multiply((-1j*A).real.transpose(),coeff_Vmag_k)).transpose()\n\n # current linearization\n if len(I_coeff):\n coeff_I_P = np.dot(I_coeff[:,slack_no:],coeff_V_P)\n coeff_I_Q = np.dot(I_coeff[:,slack_no:],coeff_V_Q)\n coeff_I_const = np.dot(I_coeff[:,slack_no:],coeff_Vm) + np.dot(I_coeff[:,:slack_no],V1[:slack_no])\n else:\n coeff_I_P = []\n coeff_I_Q = []\n coeff_I_const = []\n\n #=========================================Yiyun's Notes===========================================#\n # Output relations: Vmag = coeff_Vmag_P * Pnode + coeff_Vmag_Q * Qnode + coeff_Vm\n # I = coeff_I_P * Pnode + coeff_I_Q * Qnode + coeff_I_const (complex value)\n # ================================================================================================#\n\n return coeff_V_P, coeff_V_Q, coeff_Vm, coeff_Vmag_P, coeff_Vmag_Q, coeff_Vmag_k, coeff_I_P, coeff_I_Q, coeff_I_const\n\ndef validate_linear_model(coeff_Vp,coeff_Vq,coeff_Vm,PQ_node,slack_number):\n V_cal = coeff_Vm + np.dot(coeff_Vp,np.array([np.real(ii)*1000 for ii in PQ_node[slack_number:]])) + np.dot(coeff_Vq,np.array([np.imag(ii)*1000 for ii in PQ_node[slack_number:]]))\n v_cal_1 = coeff_Vm + np.dot(coeff_Vp,np.conj(PQ_node[slack_number:]*1000))\n #coeff_Vp*Pnode + coeff_Vq*Qnode + coeff_Vm\n\n # =========================================Yiyun's Notes===========================================#\n # 1000 should be the S base\n # =================================================================================================#\n\n return [V_cal,v_cal_1]\n\ndef check_VI_correct(V1,PQ_node,slack_number,coeff_V,coeff_Vm,coeff_Vmag_P,coeff_Vmag_Q,coeff_Vmag_k,Y10,Y11,coeff_I_P, coeff_I_Q, coeff_I_const,I_coeff):\n V1_linear = np.dot(coeff_V,np.conj(PQ_node[slack_number:]*1000)) + coeff_Vm\n V1_linear = list(V1_linear)\n Vdiff = list(map(lambda x: abs(x[0]-x[1])/abs(x[0])*100,zip(V1[slack_number:],V1_linear)))\n print(sum(Vdiff))\n with open('voltage_diff.csv','w') as f:\n csvwriter = csv.writer(f)\n csvwriter.writerow(Vdiff)\n f.close()\n\n V1_mag_linear = np.dot(coeff_Vmag_P,(PQ_node[slack_number:]*1000).real) + np.dot(coeff_Vmag_Q,(PQ_node[slack_number:]*1000).imag) + coeff_Vmag_k\n V1_mag_linear = list(V1_mag_linear)\n Vdiff = list(map(lambda x: abs(abs(x[0])-x[1])/abs(x[0])*100,zip(V1[slack_number:],V1_mag_linear)))\n print(sum(Vdiff))\n with open('voltageMag_diff.csv','w') as f:\n csvwriter = csv.writer(f)\n csvwriter.writerow(Vdiff)\n f.close()\n\n # get Ibus \n Ibus = list(map(lambda x: (x[0]*1000/x[1]).conjugate(),zip(list(PQ_node)[slack_number:],V1[slack_number:])))\n Ibus_cal_0 = np.dot(Y10,V1[0:slack_number])\n Ibus_cal_1 = np.dot(Y11,V1[slack_number:])\n Ibus_cal = list(map(lambda x: x[0]+x[1],zip(Ibus_cal_0,Ibus_cal_1)))\n Idiff = list(map(lambda x: abs(x[0]-x[1]),zip(Ibus,Ibus_cal)))\n print(sum(Idiff))\n with open('currentBus_diff.csv','w') as f:\n csvwriter = csv.writer(f)\n csvwriter.writerow(Idiff)\n f.close()\n\n # get Ibranch\n Ibranch = np.dot(I_coeff,V1)\n Ibranch_cal = np.dot(I_coeff[:,slack_number:],V1_linear)+np.dot(I_coeff[:,0:slack_number],V1[:slack_number])\n Ibranch_diff = list(map(lambda x: abs(x[0]-x[1]),zip(Ibranch,Ibranch_cal)))\n print(sum(Ibranch_diff))\n with open('current_diff.csv','w') as f:\n csvwriter = csv.writer(f)\n csvwriter.writerow(Ibranch_diff)\n f.close()\n\ndef costFun(x,dual_upper,dual_lower,v1_pu,Ppv_max,coeff_p,coeff_q,NPV,control_bus_index,Vupper,Vlower,dual_current,ThermalLimit,I1_mag):\n # cost_function = coeff_p*(Pmax-P)^2+coeff_q*Q^2+dual_upper*(v1-1.05)+dual_lower*(0.95-v1)\n f1 = 0\n for ii in range(NPV):\n f1 = f1 + coeff_p*(Ppv_max[ii]-x[ii])*(Ppv_max[ii]-x[ii])+coeff_q*x[ii+NPV]*x[ii+NPV]\n #f = f1 + np.dot(dual_upper,(np.array(v1_pu)[control_bus_index]-Vupper)) + np.dot(dual_lower,(Vlower-np.array(v1_pu)[control_bus_index]))\n v_evaluate = [v1_pu[ii] for ii in control_bus_index]\n f2 = f1 + np.dot(dual_upper,np.array([max(ii-Vupper,0) for ii in v_evaluate])) + np.dot(dual_lower,np.array([max(Vlower-ii,0) for ii in v_evaluate]))\n f3 = np.dot(dual_current,np.array([max(ii,0) for ii in list(map(lambda x: x[0]*x[0]-x[1]*x[1],zip(I1_mag,ThermalLimit)))]))\n f = f2+f3\n\n # =========================================Yiyun's Notes===========================================#\n # f1 is the quadratic PV curtailment plus quadratic reactive power injection\n # f2 is the Lagrangian term for voltage violations and line current violations\n # ===> Note the \"control_bus_index\" might be the index for measurement sensitivity analysis\n # =================================================================================================#\n\n return [f1,f]\n\ndef PV_costFun_gradient(x, coeff_p, coeff_q, Pmax):\n grad = np.zeros(len(x))\n for ii in range(int(len(x)/2)):\n grad[ii] = -2*coeff_p*(Pmax[ii]*1000-x[ii]*1000)\n grad[ii+int(len(x)/2)] = 2*coeff_q*x[ii+int(len(x)/2)]*1000\n #grad[ii + int(len(x) / 2)] = 0\n\n # =========================================Yiyun's Notes===========================================#\n # x is the decision vector [P,Q]\n # =================================================================================================#\n\n return grad\n\ndef voltage_constraint_gradient(AllNodeNames,node_withPV, dual_upper, dual_lower, coeff_Vmag_p, coeff_Vmag_q):\n node_noslackbus = AllNodeNames\n node_noslackbus[0:3] = []\n\n # =========================================Yiyun's Notes===========================================#\n # remove the slack bus\n # =================================================================================================#\n\n grad_upper = np.matrix([0] * len(node_noslackbus)*2).transpose()\n grad_lower = np.matrix([0] * len(node_noslackbus)*2).transpose()\n count = 0\n for node in node_noslackbus:\n if node in node_withPV:\n grad_upper[count] = dual_upper.transpose()*coeff_Vmag_p[:,count]\n grad_upper[count+len(node_noslackbus)] = dual_upper.transpose() * coeff_Vmag_q[:,count]\n grad_lower[count] = -dual_lower.transpose() * coeff_Vmag_p[:, count]\n grad_lower[count + len(node_noslackbus)] = -dual_lower.transpose() * coeff_Vmag_q[:, count]\n count = count + 1\n return [grad_upper,grad_lower]\n\ndef current_constraint_gradient(AllNodeNames,node_withPV, dual_upper,coeff_Imag_p, coeff_Imag_q):\n node_noslackbus = AllNodeNames\n node_noslackbus[0:3] = []\n grad_upper = np.matrix([0] * len(node_noslackbus)*2).transpose()\n count = 0\n for node in node_noslackbus:\n if node in node_withPV:\n grad_upper[count] = dual_upper.transpose()*coeff_Imag_p[:,count]\n grad_upper[count+len(node_noslackbus)] = dual_upper.transpose() * coeff_Imag_q[:,count]\n count = count + 1\n return grad_upper\n\n # =========================================Yiyun's Notes===========================================#\n # PV_costFun_gradient, voltage_constraint_gradient, current_constraint_gradient and project_PV..\n # ... are set up for updating the PV decision variables in eq(10)\n # =================================================================================================#\n\ndef voltage_constraint(V1_mag):\n g = V1_mag-1.05\n g.append(0.95-V1_mag)\n return g\n\ndef current_constraint(I1_mag,Imax):\n g = []\n g.append(I1_mag-Imax)\n\n # =========================================Yiyun's Notes===========================================#\n # assume single directional power flow\n # voltage_constraint, current_constraint, and project_dualvariable are set up for updating the dual...\n # ... variables in eq (11)\n # =================================================================================================#\n\n return g\n\ndef project_dualvariable(mu):\n for ii in range(len(mu)):\n mu[ii] = max(mu[ii],0)\n\n # =========================================Yiyun's Notes===========================================#\n # If the corresponding constraints in primal problem is in canonical form, then dual variable is >=0\n # =================================================================================================#\n\n return mu\n\ndef project_PV(x,Pmax,Sinv):\n Qavailable = 0\n Pavailable = 0\n num = len(Sinv)\n for ii in range(num):\n if x[ii] > Pmax[ii]:\n x[ii] = Pmax[ii]\n elif x[ii] < 0:\n x[ii] = 0\n\n if Sinv[ii] > x[ii]:\n Qmax = math.sqrt(Sinv[ii]*Sinv[ii]-x[ii]*x[ii])\n else:\n Qmax = 0\n if x[ii+num] > Qmax:\n x[ii+num] = Qmax\n # elif x[ii + num] < 0:\n # x[ii + num] = 0\n elif x[ii+num] < -Qmax:\n x[ii+num] = -Qmax\n\n Pavailable = Pavailable + Pmax[ii]\n Qavailable = Qavailable + Qmax\n return [x,Pavailable,Qavailable]\n\ndef dual_update(mu,coeff_mu,constraint):\n mu_new = mu + coeff_mu*constraint\n mu_new = project_dualvariable(mu_new)\n\n # =========================================Yiyun's Notes===========================================#\n # normal way for update Lagrangian variable is by the sub-gradient of cost function\n # Here is the equation (11) in the draft paper\n # =================================================================================================#\n\n return mu_new\n\ndef matrix_cal_for_subPower(V0, Y00, Y01, Y11, V1_noload):\n diag_V0 = np.matrix([[complex(0, 0)] * 3] * 3)\n diag_V0[0, 0] = V0[0]\n diag_V0[1, 1] = V0[1]\n diag_V0[2, 2] = V0[2]\n K = diag_V0 * Y01.conj() * np.linalg.inv(Y11.conj())\n g = diag_V0 * Y00.conj() * np.matrix(V0).transpose().conj() + diag_V0 * Y01.conj() * V1_noload.conj()\n return[K,g]\n\ndef subPower_PQ(V1, PQ_node, K, g):\n diag_V1 = np.matrix([[complex(0, 0)] * len(V1)] * len(V1))\n for ii in range(len(V1)):\n diag_V1[ii, ii] = V1[ii]\n M = K * np.linalg.inv(diag_V1)\n MR = M.real\n MI = M.imag\n P0 = g.real + (MR.dot(PQ_node.real)*1000 - MI.dot(PQ_node.imag)*1000)\n Q0 = g.imag + (MR.dot(PQ_node.imag)*1000 + MI.dot(PQ_node.real)*1000)\n\n P0 = P0/1000\n Q0 = Q0/1000 # convert to kW/kVar\n\n # =========================================Yiyun's Notes===========================================#\n # Power injection at substation/feeder head\n # =================================================================================================#\n\n return [P0, Q0, M]\n\ndef sub_costFun_gradient(x, sub_ref, coeff_sub, sub_measure, M, node_withPV):\n grad_a = np.matrix([0] * len(x)).transpose()\n grad_b = np.matrix([0] * len(x)).transpose()\n grad_c = np.matrix([0] * len(x)).transpose()\n\n MR = M.real\n MI = M.imag\n count = 0\n for node in node_withPV:\n grad_a[count] = -MR[0, int(node)]\n grad_b[count] = -MR[1, int(node)]\n grad_c[count] = -MR[2, int(node)]\n\n grad_a[count + len(node_withPV)] = MI[0, int(node)]\n grad_b[count + len(node_withPV)] = MI[1, int(node)]\n grad_c[count + len(node_withPV)] = MI[2, int(node)]\n\n count = count + 1\n\n res = coeff_sub * ((sub_measure[0] - sub_ref[0]) *1000* grad_a + (sub_measure[1] - sub_ref[1])*1000 * grad_b\n + (sub_measure[2] - sub_ref[2])*1000 * grad_c)\n res = res/1000\n\n return res\n\ndef projection(x,xmax,xmin):\n for ii in range(len(x)):\n if x.item(ii) > xmax[ii]:\n x[ii] = xmax[ii]\n if x.item(ii) < xmin[ii]:\n x[ii] = xmin[ii]\n return x\n\nclass DERMS:\n def __init__(self, pvData,controlbus,controlelem,controlelem_limit,sub_node_names,sub_elem_names):\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # PV_name: names of all PVs in the zone\n # PV_size: sizes of all PVs in the zone\n # PV_location: busnames of all PVs in the zone\n # controlbus: names of all controlled nodes\n # sub_node_names: names of all nodes in the zone\n # sub_node_names \"include\" controlbus\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n self.PV_name = pvData[\"pvName\"]\n self.PV_location = pvData[\"pvLocation\"]\n self.PV_size = pvData[\"pvSize\"]\n self.inverter_size = pvData[\"inverterSize\"]\n self.control_bus = controlbus\n\n sub_node_names = [ii.upper() for ii in sub_node_names]\n self.controlbus_index = [sub_node_names.index(ii.upper()) for ii in controlbus] # control bus index in the sub system (number)\n # here\n PVbus_index = []\n for bus in self.PV_location:\n temp = bus.split('.')\n if len(temp) == 1:\n temp = temp + ['1', '2', '3']\n for ii in range(len(temp) - 1):\n PVbus_index.append(sub_node_names.index((temp[0] + '.' + temp[ii + 1]).upper()))\n\n # =========================================Yiyun's Notes===========================================#\n # adding .1 .2 .3 following the number to recognize the three phases.\n # =================================================================================================#\n self.PVbus_index = PVbus_index\n self.control_elem = controlelem\n self.controlelem_limit = controlelem_limit\n self.controlelem_index = [sub_elem_names.index(ii) for ii in controlelem] # control branches index in the sub system (number)\n\n def monitor(self, dss, dssObjects, PVSystem_1phase):\n PVpowers = []\n for pv in PVSystem_1phase[\"Name\"].tolist():\n nPhases = dssObjects[\"Generators\"][pv].GetValue(\"phases\")\n power = dssObjects[\"Generators\"][pv].GetValue(\"Powers\")\n PVpowers.append([sum(power[::2])/nPhases, sum(power[1::2])/nPhases])\n PVpowers = np.asarray(PVpowers)\n\n Vmes = []\n for bus in self.control_bus:\n busName = bus.split('.')[0].lower()\n Vmag = dssObjects[\"Buses\"][busName].GetValue(\"puVmagAngle\")[::2]\n allbusnode = dss.Bus.Nodes()\n phase = bus.split('.')[1]\n index = allbusnode.index(int(phase))\n Vnode = Vmag[index]\n Vmes.append(Vnode)\n\n Imes = []\n for elem in self.control_elem:\n className = elem.split('.')[0] + \"s\"\n I = dssObjects[className][elem].GetValue(\"CurrentsMagAng\")[::2][:3] #TODO: Why is there a hardcoded [:3] ?\n Imes.append(I)\n\n return [self.PV_location,PVpowers,Vmes,Imes]\n\n\n\n def control(self, linear_PF_coeff, Options,stepsize,mu0,Vlimit,PVpower,Imes,Vmes,PV_Pmax_forecast):\n coeff_p = Options[\"coeff_p\"]\n coeff_q = Options[\"coeff_q\"]\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # linear_PF_coeff is the linear power flow model coefficients for the zone, and linear power flow model\n # coefficients are the result vector from function \"linear_powerflow_model\"\n # coeff_p, coeff_q are constant coefficients in PV cost function\n # stepsize is a vector of stepsize constants\n # mu0 is the dual variable from last time step: mu_Vmag_upper0, mu_Vmag_lower0, mu_I0\n # Vlimit is the allowed voltage limit: Vupper and Vlower\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n PVname = self.PV_name\n NPV = len(PVname)\n x0 = np.zeros(2 * NPV)\n for ii in range(NPV):\n x0[ii] = -PVpower[ii][0] # in kW\n x0[ii + NPV] = -PVpower[ii][1] # in kVar\n\n #coeff_V_P = linear_PF_coeff[0]\n #coeff_V_Q = linear_PF_coeff[1]\n #coeff_Vm = linear_PF_coeff[2]\n coeff_Vmag_P = linear_PF_coeff[3]\n coeff_Vmag_Q = linear_PF_coeff[4]\n #coeff_Vmag_k = linear_PF_coeff[5]\n coeff_I_P = linear_PF_coeff[6]\n coeff_I_Q = linear_PF_coeff[7]\n #coeff_I_const = linear_PF_coeff[8]\n stepsize_xp = stepsize[0]\n stepsize_xq = stepsize[1]\n stepsize_mu = stepsize[2]\n Vupper = Vlimit[0]\n Vlower = Vlimit[1]\n\n controlbus_index = self.controlbus_index\n PVbus_index = self.PVbus_index\n controlelem_index = self.controlelem_index\n PV_inverter_size = self.inverter_size\n Imes_limit = self.controlelem_limit\n\n mu_Vmag_upper0 = mu0[0]\n mu_Vmag_lower0 = mu0[1]\n mu_I0 = mu0[2]\n\n #print([max(mu_Vmag_upper0),max(mu_Vmag_lower0)])\n # compute gradient\n\n PVcost_fun_gradient = PV_costFun_gradient(x0, coeff_p, coeff_q, PV_Pmax_forecast)\n\n Vmag_upper_gradient = np.concatenate((np.dot(coeff_Vmag_P[np.ix_([ii for ii in controlbus_index],[ii for ii in PVbus_index])].transpose(), mu_Vmag_upper0),\n np.dot(coeff_Vmag_Q[np.ix_([ii for ii in controlbus_index], [ii for ii in PVbus_index])].transpose(), mu_Vmag_upper0)),axis=0)\n Vmag_lower_gradient = np.concatenate((np.dot(coeff_Vmag_P[np.ix_([ii for ii in controlbus_index],[ii for ii in PVbus_index])].transpose(), mu_Vmag_lower0),\n np.dot(coeff_Vmag_Q[np.ix_([ii for ii in controlbus_index],[ii for ii in PVbus_index])].transpose(), mu_Vmag_lower0)),axis=0)\n\n Vmag_gradient = Vmag_upper_gradient - Vmag_lower_gradient\n if len(mu_I0)>0 :\n temp_real = mu_I0 * np.array(Imes.real)\n temp_imag = mu_I0 * np.array(Imes.imag)\n\n I_gradient_real = np.concatenate((np.dot(\n coeff_I_P[np.ix_([ii for ii in controlelem_index], [ii for ii in PVbus_index])].real.transpose(),\n temp_real), np.dot(\n coeff_I_Q[np.ix_([ii for ii in controlelem_index], [ii for ii in PVbus_index])].real.transpose(),\n temp_real)), axis=0)\n I_gradient_imag = np.concatenate((np.dot(\n coeff_I_P[np.ix_([ii for ii in controlelem_index], [ii for ii in PVbus_index])].imag.transpose(),\n temp_imag), np.dot(\n coeff_I_Q[np.ix_([ii for ii in controlelem_index], [ii for ii in PVbus_index])].imag.transpose(),\n temp_imag)), axis=0)\n I_gradient = 2 * I_gradient_real + 2 * I_gradient_imag\n else:\n I_gradient = 0\n\n gradient = PVcost_fun_gradient + Vmag_gradient + I_gradient / 1000\n\n # compute x1, mu1\n x1 = np.concatenate([x0[:NPV] - stepsize_xp * gradient[:NPV], x0[NPV:] - stepsize_xq * gradient[NPV:]])\n #print('solved: '+str(sum(x1[0:NPV]))+','+str(sum(x1[NPV:]))) # in kW/kVar\n [x1, Pmax_allPV, Qmax_allPV] = project_PV(x1, PV_Pmax_forecast, PV_inverter_size)\n #print('Available P = '+str(Pmax_allPV)+' , Available Q = '+str(Qmax_allPV))\n #print('projected: ' + str(sum(x1[0:NPV])) + ',' + str(sum(x1[NPV:]))) # in kW/kVar\n x1 = np.array([round(ii, 5) for ii in x1])\n\n mu_Vmag_lower1 = mu_Vmag_lower0 + stepsize_mu * (Vlower - np.array(Vmes))\n mu_Vmag_upper1 = mu_Vmag_upper0 + stepsize_mu * (np.array(Vmes) - Vupper)\n mu_Vmag_lower1 = project_dualvariable(mu_Vmag_lower1)\n mu_Vmag_upper1 = project_dualvariable(mu_Vmag_upper1)\n if mu_I0:\n mu_I1 = mu_I0 + stepsize_mu / 300 * np.array(list(map(lambda x: x[0] * x[0] - x[1] * x[1], zip(Imes, Imes_limit))))\n mu_I1 = project_dualvariable(mu_I1)\n else:\n mu_I1 = mu_I0\n mu1 = [mu_Vmag_upper1,mu_Vmag_lower1,mu_I1]\n # =========================================Yiyun's Notes===========================================#\n # Each time of calling DERMS.control, it is a one step update of PV real and reactive power outputs\n # =================================================================================================#\n\n return [x1,mu1]\n" ]
[ [ "numpy.ix_", "numpy.zeros", "numpy.linalg.inv", "numpy.matrix", "numpy.conj", "numpy.concatenate", "numpy.asarray", "numpy.array", "numpy.dot", "numpy.real", "numpy.imag" ] ]
norberto-schmidt/openmc
[ "ff4844303154a68027b9c746300f5704f73e0875" ]
[ "tests/unit_tests/test_data_photon.py" ]
[ "#!/usr/bin/env python\n\nfrom collections.abc import Mapping, Callable\nimport os\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport openmc.data\n\n\[email protected](scope='module')\ndef elements_endf():\n \"\"\"Dictionary of element ENDF data indexed by atomic symbol.\"\"\"\n endf_data = os.environ['OPENMC_ENDF_DATA']\n elements = {'H': 1, 'O': 8, 'Al': 13, 'Cu': 29, 'Ag': 47, 'U': 92, 'Pu': 94}\n data = {}\n for symbol, Z in elements.items():\n p_file = 'photoat-{:03}_{}_000.endf'.format(Z, symbol)\n p_path = os.path.join(endf_data, 'photoat', p_file)\n a_file = 'atom-{:03}_{}_000.endf'.format(Z, symbol)\n a_path = os.path.join(endf_data, 'atomic_relax', a_file)\n data[symbol] = openmc.data.IncidentPhoton.from_endf(p_path, a_path)\n return data\n\n\[email protected]()\ndef element(request, elements_endf):\n \"\"\"Element ENDF data\"\"\"\n return elements_endf[request.param]\n\n\[email protected](\n 'element, atomic_number', [\n ('Al', 13),\n ('Cu', 29),\n ('Pu', 94)\n ],\n indirect=['element']\n)\ndef test_attributes(element, atomic_number):\n assert element.atomic_number == atomic_number\n\n\[email protected](\n 'element, subshell, binding_energy, num_electrons', [\n ('H', 'K', 13.61, 1.0),\n ('O', 'L3', 14.15, 2.67),\n ('U', 'P2', 34.09, 2.0)\n ],\n indirect=['element']\n)\ndef test_atomic_relaxation(element, subshell, binding_energy, num_electrons):\n atom_relax = element.atomic_relaxation\n assert isinstance(atom_relax, openmc.data.photon.AtomicRelaxation)\n assert subshell in atom_relax.subshells\n assert atom_relax.binding_energy[subshell] == binding_energy\n assert atom_relax.num_electrons[subshell] == num_electrons\n\n\[email protected]('element', ['Al', 'Cu', 'Pu'], indirect=True)\ndef test_transitions(element):\n transitions = element.atomic_relaxation.transitions\n assert transitions\n assert isinstance(transitions, Mapping)\n for matrix in transitions.values():\n assert isinstance(matrix, pd.core.frame.DataFrame)\n assert len(matrix.columns) == 4\n assert sum(matrix['probability']) == pytest.approx(1.0)\n\n\[email protected](\n 'element, I, i_shell, ionization_energy, num_electrons', [\n ('H', 19.2, 0, 13.6, 1),\n ('O', 95.0, 2, 13.62, 4),\n ('U', 890.0, 25, 6.033, -3)\n ],\n indirect=['element']\n)\ndef test_bremsstrahlung(element, I, i_shell, ionization_energy, num_electrons):\n brems = element.bremsstrahlung\n assert isinstance(brems, Mapping)\n assert brems['I'] == I\n assert brems['num_electrons'][i_shell] == num_electrons\n assert brems['ionization_energy'][i_shell] == ionization_energy\n assert np.all(np.diff(brems['electron_energy']) > 0.0)\n assert np.all(np.diff(brems['photon_energy']) > 0.0)\n assert brems['photon_energy'][0] == 0.0\n assert brems['photon_energy'][-1] == 1.0\n assert brems['dcs'].shape == (200, 30)\n\n\[email protected](\n 'element, n_shell', [\n ('H', 1),\n ('O', 3),\n ('Al', 5)\n ],\n indirect=['element']\n)\ndef test_compton_profiles(element, n_shell):\n profile = element.compton_profiles\n assert profile\n assert isinstance(profile, Mapping)\n assert all(isinstance(x, Callable) for x in profile['J'])\n assert all(len(x) == n_shell for x in profile.values())\n\n\[email protected](\n 'element, reaction', [\n ('Cu', 541),\n ('Ag', 502),\n ('Pu', 504)\n ],\n indirect=['element']\n)\ndef test_reactions(element, reaction):\n reactions = element.reactions\n assert all(isinstance(x, openmc.data.PhotonReaction) for x in reactions.values())\n assert reaction in reactions\n with pytest.raises(KeyError):\n reactions[18]\n\n\[email protected]('element', ['Pu'], indirect=True)\ndef test_export_to_hdf5(tmpdir, element):\n filename = str(tmpdir.join('tmp.h5'))\n element.export_to_hdf5(filename)\n assert os.path.exists(filename)\n # Read in data from hdf5\n element2 = openmc.data.IncidentPhoton.from_hdf5(filename)\n # Check for some cross section and datasets of element and element2\n energy = np.logspace(np.log10(1.0), np.log10(1.0e10), num=100)\n for mt in (502, 504, 515, 517, 522, 541, 570):\n xs = element[mt].xs(energy)\n xs2 = element2[mt].xs(energy)\n assert np.allclose(xs, xs2)\n assert element[502].scattering_factor == element2[502].scattering_factor\n assert element.atomic_relaxation.transitions['O3'].equals(\n element2.atomic_relaxation.transitions['O3'])\n assert (element.compton_profiles['binding_energy'] ==\n element2.compton_profiles['binding_energy']).all()\n assert (element.bremsstrahlung['electron_energy'] ==\n element2.bremsstrahlung['electron_energy']).all()\n # Export to hdf5 again\n element2.export_to_hdf5(filename, 'w')\n\ndef test_photodat_only(run_in_tmpdir):\n endf_dir = Path(os.environ['OPENMC_ENDF_DATA'])\n photoatomic_file = endf_dir / 'photoat' / 'photoat-001_H_000.endf'\n data = openmc.data.IncidentPhoton.from_endf(photoatomic_file)\n data.export_to_hdf5('tmp.h5', 'w')" ]
[ [ "numpy.allclose", "numpy.log10", "numpy.diff" ] ]
eric91sanchez/hair_seg
[ "4f688daac0ec4ea906ff0462ae51634293e35447" ]
[ "hair_seg/evaluate.py" ]
[ "\"\"\"\nEvaluate\n\"\"\"\n\nimport re\nimport math\nimport datetime\nimport random\nimport torch\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader\nimport matplotlib.pyplot as plt\n\nfrom loss import iou_loss, HairMattingLoss, acc_loss, F1_loss\nfrom utils import create_multi_figure\n\nUSE_CUDA = torch.cuda.is_available()\nDEVICE = torch.device(\"cuda\" if USE_CUDA else \"cpu\")\n\n\ndef evalTest(test_data, model, args):\n testloader = DataLoader(test_data, batch_size=4, shuffle=False)\n hairmat_loss = HairMattingLoss(args.grad_lambda)\n\n total_loss, total_iou, total_acc, total_f1 = 0, 0, 0, 0\n for batch in testloader:\n image, mask = (i.to(DEVICE) for i in batch)\n\n pred = model(image)\n total_loss += hairmat_loss(pred, mask, image).item()\n iloss = iou_loss(pred, mask).item()\n total_iou += iloss\n aloss = acc_loss(pred, mask).item()\n total_acc += aloss\n floss = F1_loss(pred, mask).item()\n total_f1 += floss\n\n print(\"Testing Loss: \", total_loss / len(testloader))\n print(\"Testing IOU: \", total_iou / len(testloader))\n print(\"Testing Acc: \", total_acc / len(testloader))\n print(\"Testing F1: \", total_f1 / len(testloader))\n\n\ndef evaluateOne(img, model, absolute=True):\n img = img.to(DEVICE).unsqueeze(0)\n pred = model(img)\n\n if absolute:\n pred[pred > 0.5] = 1.0\n pred[pred <= 0.5] = 0.0\n else:\n pred[pred < 0.4] = 0\n # pred[pred < .90] = 0\n\n rows = [[img[0], pred[0]]]\n create_multi_figure(rows, dye=True)\n plt.savefig(\"result.jpg\")\n\n\ndef evaluate(test_data, model, num, absolute=True):\n rows = [None] * num\n for i in range(num):\n idx = random.randint(0, len(test_data) - 1)\n\n image, mask = (i.to(DEVICE).unsqueeze(0) for i in test_data[idx])\n pred = model(image)\n\n if absolute:\n pred[pred > 0.5] = 1.0\n pred[pred <= 0.5] = 0.0\n else:\n pred[pred < 0.4] = 0\n\n rows[i] = [image[0], mask[0], pred[0]] # get batch\n\n create_multi_figure(rows, dye=True)\n plt.savefig(\"result.jpg\")\n" ]
[ [ "torch.utils.data.DataLoader", "torch.cuda.is_available", "torch.device", "matplotlib.pyplot.savefig" ] ]
pnijhara/improver
[ "5961a6fab9a79cd63a943eff07bf79d4e5f0ff03" ]
[ "improver_tests/between_thresholds/test_between_thresholds.py" ]
[ "# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# (C) British Crown Copyright 2017-2020 Met Office.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\"\"\"Tests for the OccurrenceBetweenThresholds plugin\"\"\"\n\nimport unittest\n\nimport iris\nimport numpy as np\nfrom iris.tests import IrisTest\n\nfrom improver.between_thresholds import OccurrenceBetweenThresholds\n\nfrom ..set_up_test_cubes import set_up_percentile_cube, set_up_probability_cube\n\n\nclass Test_process(IrisTest):\n \"\"\"Test the process method\"\"\"\n\n def setUp(self):\n \"\"\"Set up a test cube with probability data\"\"\"\n data = np.array(\n [\n [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],\n [[0.9, 0.9, 0.9], [0.8, 0.8, 0.8], [0.7, 0.7, 0.7]],\n [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3]],\n [[0.0, 0.0, 0.0], [0.1, 0.1, 0.1], [0.1, 0.2, 0.2]],\n ],\n dtype=np.float32,\n )\n temp_thresholds = np.array([279, 280, 281, 282], dtype=np.float32)\n vis_thresholds = np.array([100, 1000, 5000, 10000], dtype=np.float32)\n\n self.temp_cube = set_up_probability_cube(data, temp_thresholds)\n self.vis_cube = set_up_probability_cube(\n np.flip(data, axis=0),\n vis_thresholds,\n variable_name=\"visibility\",\n threshold_units=\"m\",\n spp__relative_to_threshold=\"below\",\n )\n\n # set up a cube of rainfall rates in m s-1 (~1e-8 values)\n self.precip_cube = self.temp_cube.copy()\n self.precip_cube.coord(\"air_temperature\").rename(\"rainfall_rate\")\n self.precip_cube.coord(\"rainfall_rate\").var_name = \"threshold\"\n self.precip_cube.coord(\"rainfall_rate\").points = np.array(\n [0, 0.25, 0.5, 1], dtype=np.float32\n )\n self.precip_cube.coord(\"rainfall_rate\").units = \"mm h-1\"\n self.precip_cube.coord(\"rainfall_rate\").convert_units(\"m s-1\")\n\n def test_above_threshold(self):\n \"\"\"Test values from an \"above threshold\" cube\"\"\"\n threshold_ranges = [[280, 281], [281, 282]]\n expected_data = np.array(\n [\n [[0.8, 0.7, 0.6], [0.7, 0.6, 0.5], [0.6, 0.5, 0.4]],\n [[0.1, 0.2, 0.3], [0.0, 0.1, 0.2], [0.0, 0.0, 0.1]],\n ],\n dtype=np.float32,\n )\n plugin = OccurrenceBetweenThresholds(threshold_ranges.copy(), \"K\")\n result = plugin(self.temp_cube)\n self.assertIsInstance(result, iris.cube.Cube)\n self.assertEqual(\n result.name(), \"probability_of_air_temperature_between_thresholds\"\n )\n self.assertArrayAlmostEqual(result.data, expected_data)\n thresh_coord = result.coord(\"air_temperature\")\n self.assertArrayAlmostEqual(thresh_coord.points, [281.0, 282.0])\n self.assertArrayAlmostEqual(thresh_coord.bounds, threshold_ranges)\n self.assertEqual(\n thresh_coord.attributes[\"spp__relative_to_threshold\"], \"between_thresholds\"\n )\n\n def test_below_threshold(self):\n \"\"\"Test values from a \"below threshold\" cube\"\"\"\n threshold_ranges = [[1000, 5000]]\n expected_data = np.array(\n [[0.8, 0.7, 0.6], [0.7, 0.6, 0.5], [0.6, 0.5, 0.4]], dtype=np.float32\n )\n plugin = OccurrenceBetweenThresholds(threshold_ranges.copy(), \"m\")\n result = plugin(self.vis_cube)\n self.assertArrayAlmostEqual(result.data, expected_data)\n self.assertArrayAlmostEqual(result.coord(\"visibility\").points, [5000.0])\n self.assertArrayAlmostEqual(result.coord(\"visibility\").bounds, threshold_ranges)\n\n def test_skip_threshold(self):\n \"\"\"Test calculation works for non-adjacent thresholds\"\"\"\n threshold_ranges = [[100, 1000], [1000, 10000]]\n expected_data = np.array(\n [\n [[0.1, 0.2, 0.3], [0.0, 0.1, 0.2], [0.0, 0.0, 0.1]],\n [[0.9, 0.8, 0.7], [0.9, 0.8, 0.7], [0.9, 0.8, 0.7]],\n ],\n dtype=np.float32,\n )\n plugin = OccurrenceBetweenThresholds(threshold_ranges, \"m\")\n result = plugin(self.vis_cube)\n self.assertArrayAlmostEqual(result.data, expected_data)\n\n def test_threshold_units(self):\n \"\"\"Test calculation works for thresholds specified in different units\n from the cube data\"\"\"\n threshold_ranges = [[0.1, 1], [1, 10]]\n expected_data = np.array(\n [\n [[0.1, 0.2, 0.3], [0.0, 0.1, 0.2], [0.0, 0.0, 0.1]],\n [[0.9, 0.8, 0.7], [0.9, 0.8, 0.7], [0.9, 0.8, 0.7]],\n ],\n dtype=np.float32,\n )\n plugin = OccurrenceBetweenThresholds(threshold_ranges, \"km\")\n result = plugin(self.vis_cube)\n self.assertArrayAlmostEqual(result.data, expected_data)\n # check original cube units are not modified\n self.assertEqual(self.vis_cube.coord(\"visibility\").units, \"m\")\n # check output cube units match original cube\n self.assertEqual(result.coord(\"visibility\").units, \"m\")\n self.assertArrayAlmostEqual(result.coord(\"visibility\").points, [1000, 10000])\n\n def test_error_non_probability_cube(self):\n \"\"\"Test failure if cube doesn't contain probabilities\"\"\"\n perc_cube = set_up_percentile_cube(\n np.ones((3, 3, 3), dtype=np.float32),\n np.array((25, 50, 75), dtype=np.float32),\n )\n plugin = OccurrenceBetweenThresholds([[25, 50]], \"K\")\n msg = \"Input is not a probability cube\"\n with self.assertRaisesRegex(ValueError, msg):\n plugin(perc_cube)\n\n def test_error_between_thresholds_cube(self):\n \"\"\"Test failure if cube isn't above or below threshold\"\"\"\n # use plugin to generate a \"between_thresholds\" cube...\n between_thresholds_cube = OccurrenceBetweenThresholds(\n [[280, 281], [281, 282]], \"K\"\n )(self.temp_cube)\n plugin = OccurrenceBetweenThresholds([[281, 282]], \"K\")\n msg = \"Input cube must contain\"\n with self.assertRaisesRegex(ValueError, msg):\n plugin(between_thresholds_cube)\n\n def test_error_thresholds_unavailable(self):\n \"\"\"Test error if cube doesn't contain the required thresholds\"\"\"\n threshold_ranges = [[10, 100], [1000, 30000]]\n plugin = OccurrenceBetweenThresholds(threshold_ranges, \"m\")\n msg = (\n \"visibility threshold 10 m is not available\\n\"\n \"visibility threshold 30000 m is not available\"\n )\n with self.assertRaisesRegex(ValueError, msg):\n plugin(self.vis_cube)\n\n def test_threshold_matching_tolerance(self):\n \"\"\"Test threshold matching succeeds for absolute values close to\n zero\"\"\"\n new_thresholds = np.array([272.15, 273.15, 274.15, 275.15], dtype=np.float32)\n self.temp_cube.coord(\"air_temperature\").points = new_thresholds\n threshold_ranges = [[-1, 0], [0, 2]]\n expected_data = np.array(\n [\n [[0.1, 0.1, 0.1], [0.2, 0.2, 0.2], [0.3, 0.3, 0.3]],\n [[0.9, 0.9, 0.9], [0.7, 0.7, 0.7], [0.6, 0.5, 0.5]],\n ],\n dtype=np.float32,\n )\n plugin = OccurrenceBetweenThresholds(threshold_ranges, \"degC\")\n result = plugin(self.temp_cube)\n self.assertArrayAlmostEqual(result.data, expected_data)\n\n def test_thresholds_indistinguishable(self):\n \"\"\"Test behaviour in a case where cube extraction cannot work within a\n tolerance of 1e-5\"\"\"\n # set threshold ranges in m s-1\n points = self.precip_cube.coord(\"rainfall_rate\").points.copy()\n threshold_ranges = [[points[1], points[2]]]\n msg = \"Plugin cannot distinguish between thresholds at\"\n with self.assertRaisesRegex(ValueError, msg):\n OccurrenceBetweenThresholds(threshold_ranges, \"m s-1\")\n\n def test_original_units_indistinguishable(self):\n \"\"\"Test cubes where thresholds are indistinguisable in SI units can be\n correctly processed using threshold ranges specified in a unit with\n more than 1e-5 discrimination\"\"\"\n expected_data = np.array(\n [[0.8, 0.7, 0.6], [0.7, 0.6, 0.5], [0.6, 0.5, 0.4]], dtype=np.float32\n )\n threshold_ranges = [[0.25, 0.5]]\n plugin = OccurrenceBetweenThresholds(threshold_ranges, \"mm h-1\")\n result = plugin(self.precip_cube)\n self.assertArrayAlmostEqual(result.data, expected_data)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.array", "numpy.ones", "numpy.flip" ] ]
lsqshr/pytorch-lightning
[ "c6b68883879e38719688865aceac746477f0a9b9" ]
[ "tests/core/test_datamodules.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport pickle\nfrom argparse import ArgumentParser\nfrom typing import Any, Dict\nfrom unittest import mock\nfrom unittest.mock import call, PropertyMock\n\nimport pytest\nimport torch\n\nfrom pytorch_lightning import LightningDataModule, Trainer\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.utilities import AttributeDict\nfrom pytorch_lightning.utilities.model_helpers import is_overridden\nfrom tests.helpers import BoringDataModule, BoringModel\nfrom tests.helpers.datamodules import ClassifDataModule\nfrom tests.helpers.runif import RunIf\nfrom tests.helpers.simple_models import ClassificationModel\nfrom tests.helpers.utils import reset_seed\n\n\[email protected](\"pytorch_lightning.trainer.trainer.Trainer.node_rank\", new_callable=PropertyMock)\[email protected](\"pytorch_lightning.trainer.trainer.Trainer.local_rank\", new_callable=PropertyMock)\ndef test_can_prepare_data(local_rank, node_rank):\n\n model = BoringModel()\n dm = BoringDataModule()\n trainer = Trainer()\n trainer.model = model\n trainer.datamodule = dm\n\n # 1 no DM\n # prepare_data_per_node = True\n # local rank = 0 (True)\n trainer.prepare_data_per_node = True\n\n dm.random_full = None\n dm._has_prepared_data = False\n local_rank.return_value = 0\n assert trainer.local_rank == 0\n assert trainer.data_connector.can_prepare_data()\n\n trainer.data_connector.prepare_data()\n assert dm.random_full is not None\n\n # local rank = 1 (False)\n dm.random_full = None\n dm._has_prepared_data = False\n local_rank.return_value = 1\n assert trainer.local_rank == 1\n assert not trainer.data_connector.can_prepare_data()\n\n trainer.data_connector.prepare_data()\n assert dm.random_full is None\n\n # prepare_data_per_node = False (prepare across all nodes)\n # global rank = 0 (True)\n dm.random_full = None\n dm._has_prepared_data = False\n trainer.prepare_data_per_node = False\n node_rank.return_value = 0\n local_rank.return_value = 0\n assert trainer.data_connector.can_prepare_data()\n\n trainer.data_connector.prepare_data()\n assert dm.random_full is not None\n\n # global rank = 1 (False)\n dm.random_full = None\n dm._has_prepared_data = False\n node_rank.return_value = 1\n local_rank.return_value = 0\n assert not trainer.data_connector.can_prepare_data()\n\n trainer.data_connector.prepare_data()\n assert dm.random_full is None\n\n node_rank.return_value = 0\n local_rank.return_value = 1\n assert not trainer.data_connector.can_prepare_data()\n\n trainer.data_connector.prepare_data()\n assert dm.random_full is None\n\n # 2 dm\n # prepar per node = True\n # local rank = 0 (True)\n trainer.prepare_data_per_node = True\n local_rank.return_value = 0\n\n # is_overridden prepare data = True\n # has been called\n # False\n dm._has_prepared_data = True\n assert not trainer.data_connector.can_prepare_data()\n\n # has not been called\n # True\n dm._has_prepared_data = False\n assert trainer.data_connector.can_prepare_data()\n\n # is_overridden prepare data = False\n # True\n dm.prepare_data = None\n assert trainer.data_connector.can_prepare_data()\n\n\ndef test_hooks_no_recursion_error():\n # hooks were appended in cascade every tine a new data module was instantiated leading to a recursion error.\n # See https://github.com/PyTorchLightning/pytorch-lightning/issues/3652\n class DummyDM(LightningDataModule):\n def setup(self, *args, **kwargs):\n pass\n\n def prepare_data(self, *args, **kwargs):\n pass\n\n for i in range(1005):\n dm = DummyDM()\n dm.setup()\n dm.prepare_data()\n\n\ndef test_helper_boringdatamodule():\n dm = BoringDataModule()\n dm.prepare_data()\n dm.setup()\n\n\ndef test_helper_boringdatamodule_with_verbose_setup():\n dm = BoringDataModule()\n dm.prepare_data()\n dm.setup(\"fit\")\n dm.setup(\"test\")\n\n\ndef test_data_hooks_called():\n dm = BoringDataModule()\n assert not dm.has_prepared_data\n assert not dm.has_setup_fit\n assert not dm.has_setup_test\n assert not dm.has_setup_validate\n assert not dm.has_setup_predict\n assert not dm.has_teardown_fit\n assert not dm.has_teardown_test\n assert not dm.has_teardown_validate\n assert not dm.has_teardown_predict\n\n dm.prepare_data()\n assert dm.has_prepared_data\n assert not dm.has_setup_fit\n assert not dm.has_setup_test\n assert not dm.has_setup_validate\n assert not dm.has_setup_predict\n assert not dm.has_teardown_fit\n assert not dm.has_teardown_test\n assert not dm.has_teardown_validate\n assert not dm.has_teardown_predict\n\n dm.setup()\n assert dm.has_prepared_data\n assert dm.has_setup_fit\n assert dm.has_setup_test\n assert dm.has_setup_validate\n assert not dm.has_setup_predict\n assert not dm.has_teardown_fit\n assert not dm.has_teardown_test\n assert not dm.has_teardown_validate\n assert not dm.has_teardown_predict\n\n dm.teardown()\n assert dm.has_prepared_data\n assert dm.has_setup_fit\n assert dm.has_setup_test\n assert dm.has_setup_validate\n assert not dm.has_setup_predict\n assert dm.has_teardown_fit\n assert dm.has_teardown_test\n assert dm.has_teardown_validate\n assert not dm.has_teardown_predict\n\n\[email protected](\"use_kwarg\", (False, True))\ndef test_data_hooks_called_verbose(use_kwarg):\n dm = BoringDataModule()\n dm.prepare_data()\n assert not dm.has_setup_fit\n assert not dm.has_setup_test\n assert not dm.has_setup_validate\n assert not dm.has_setup_predict\n assert not dm.has_teardown_fit\n assert not dm.has_teardown_test\n assert not dm.has_teardown_validate\n assert not dm.has_teardown_predict\n\n dm.setup(stage=\"fit\") if use_kwarg else dm.setup(\"fit\")\n assert dm.has_setup_fit\n assert not dm.has_setup_validate\n assert not dm.has_setup_test\n assert not dm.has_setup_predict\n\n dm.setup(stage=\"validate\") if use_kwarg else dm.setup(\"validate\")\n assert dm.has_setup_fit\n assert dm.has_setup_validate\n assert not dm.has_setup_test\n assert not dm.has_setup_predict\n\n dm.setup(stage=\"test\") if use_kwarg else dm.setup(\"test\")\n assert dm.has_setup_fit\n assert dm.has_setup_validate\n assert dm.has_setup_test\n assert not dm.has_setup_predict\n\n dm.setup(stage=\"predict\") if use_kwarg else dm.setup(\"predict\")\n assert dm.has_setup_fit\n assert dm.has_setup_validate\n assert dm.has_setup_test\n assert dm.has_setup_predict\n\n dm.teardown(stage=\"fit\") if use_kwarg else dm.teardown(\"fit\")\n assert dm.has_teardown_fit\n assert not dm.has_teardown_validate\n assert not dm.has_teardown_test\n assert not dm.has_teardown_predict\n\n dm.teardown(stage=\"validate\") if use_kwarg else dm.teardown(\"validate\")\n assert dm.has_teardown_fit\n assert dm.has_teardown_validate\n assert not dm.has_teardown_test\n assert not dm.has_teardown_predict\n\n dm.teardown(stage=\"test\") if use_kwarg else dm.teardown(\"test\")\n assert dm.has_teardown_fit\n assert dm.has_teardown_validate\n assert dm.has_teardown_test\n assert not dm.has_teardown_predict\n\n dm.teardown(stage=\"predict\") if use_kwarg else dm.teardown(\"predict\")\n assert dm.has_teardown_fit\n assert dm.has_teardown_validate\n assert dm.has_teardown_test\n assert dm.has_teardown_predict\n\n\ndef test_dm_add_argparse_args(tmpdir):\n parser = ArgumentParser()\n parser = BoringDataModule.add_argparse_args(parser)\n args = parser.parse_args([\"--data_dir\", str(tmpdir)])\n assert args.data_dir == str(tmpdir)\n\n\ndef test_dm_init_from_argparse_args(tmpdir):\n parser = ArgumentParser()\n parser = BoringDataModule.add_argparse_args(parser)\n args = parser.parse_args([\"--data_dir\", str(tmpdir)])\n dm = BoringDataModule.from_argparse_args(args)\n dm.prepare_data()\n dm.setup()\n assert dm.data_dir == args.data_dir == str(tmpdir)\n\n\ndef test_dm_pickle_after_init():\n dm = BoringDataModule()\n pickle.dumps(dm)\n\n\ndef test_train_loop_only(tmpdir):\n reset_seed()\n\n dm = ClassifDataModule()\n model = ClassificationModel()\n\n model.validation_step = None\n model.validation_step_end = None\n model.validation_epoch_end = None\n model.test_step = None\n model.test_step_end = None\n model.test_epoch_end = None\n\n trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, weights_summary=None)\n\n # fit model\n trainer.fit(model, datamodule=dm)\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n assert trainer.callback_metrics[\"train_loss\"] < 1.0\n\n\ndef test_train_val_loop_only(tmpdir):\n reset_seed()\n\n dm = ClassifDataModule()\n model = ClassificationModel()\n\n model.validation_step = None\n model.validation_step_end = None\n model.validation_epoch_end = None\n\n trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, weights_summary=None)\n\n # fit model\n trainer.fit(model, datamodule=dm)\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n assert trainer.callback_metrics[\"train_loss\"] < 1.0\n\n\ndef test_dm_checkpoint_save(tmpdir):\n class CustomBoringModel(BoringModel):\n def validation_step(self, batch, batch_idx):\n out = super().validation_step(batch, batch_idx)\n self.log(\"early_stop_on\", out[\"x\"])\n return out\n\n class CustomBoringDataModule(BoringDataModule):\n def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:\n checkpoint[self.__class__.__name__] = self.__class__.__name__\n\n def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:\n self.checkpoint_state = checkpoint.get(self.__class__.__name__)\n\n reset_seed()\n dm = CustomBoringDataModule()\n model = CustomBoringModel()\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_train_batches=2,\n limit_val_batches=1,\n weights_summary=None,\n callbacks=[ModelCheckpoint(dirpath=tmpdir, monitor=\"early_stop_on\")],\n )\n\n # fit model\n trainer.fit(model, dm)\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n checkpoint_path = list(trainer.checkpoint_callback.best_k_models.keys())[0]\n checkpoint = torch.load(checkpoint_path)\n assert dm.__class__.__name__ in checkpoint\n assert checkpoint[dm.__class__.__name__] == dm.__class__.__name__\n\n\ndef test_full_loop(tmpdir):\n reset_seed()\n\n dm = ClassifDataModule()\n model = ClassificationModel()\n\n trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, weights_summary=None, deterministic=True)\n\n # fit model\n trainer.fit(model, dm)\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n assert dm.trainer is not None\n\n # validate\n result = trainer.validate(model, dm)\n assert dm.trainer is not None\n assert result[0][\"val_acc\"] > 0.7\n\n # test\n result = trainer.test(model, dm)\n assert dm.trainer is not None\n assert result[0][\"test_acc\"] > 0.6\n\n\n@RunIf(min_gpus=1)\[email protected](\"pytorch_lightning.accelerators.accelerator.Accelerator.lightning_module\", new_callable=PropertyMock)\ndef test_dm_apply_batch_transfer_handler(get_module_mock):\n expected_device = torch.device(\"cuda\", 0)\n\n class CustomBatch:\n def __init__(self, data):\n self.samples = data[0]\n self.targets = data[1]\n\n class CurrentTestDM(LightningDataModule):\n rank = 0\n transfer_batch_to_device_hook_rank = None\n on_before_batch_transfer_hook_rank = None\n on_after_batch_transfer_hook_rank = None\n\n def on_before_batch_transfer(self, batch, dataloader_idx):\n assert dataloader_idx == 0\n self.on_before_batch_transfer_hook_rank = self.rank\n self.rank += 1\n batch.samples += 1\n return batch\n\n def on_after_batch_transfer(self, batch, dataloader_idx):\n assert dataloader_idx == 0\n assert batch.samples.device == batch.targets.device == expected_device\n self.on_after_batch_transfer_hook_rank = self.rank\n self.rank += 1\n batch.targets *= 2\n return batch\n\n def transfer_batch_to_device(self, batch, device, dataloader_idx):\n assert dataloader_idx == 0\n self.transfer_batch_to_device_hook_rank = self.rank\n self.rank += 1\n batch.samples = batch.samples.to(device)\n batch.targets = batch.targets.to(device)\n return batch\n\n dm = CurrentTestDM()\n model = BoringModel()\n\n batch = CustomBatch((torch.zeros(5, 32), torch.ones(5, 1, dtype=torch.long)))\n\n trainer = Trainer(gpus=1)\n # running .fit() would require us to implement custom data loaders, we mock the model reference instead\n get_module_mock.return_value = model\n if is_overridden(\"transfer_batch_to_device\", dm):\n model.transfer_batch_to_device = dm.transfer_batch_to_device\n\n model.on_before_batch_transfer = dm.on_before_batch_transfer\n model.transfer_batch_to_device = dm.transfer_batch_to_device\n model.on_after_batch_transfer = dm.on_after_batch_transfer\n\n batch_gpu = trainer.accelerator.batch_to_device(batch, expected_device)\n\n assert dm.on_before_batch_transfer_hook_rank == 0\n assert dm.transfer_batch_to_device_hook_rank == 1\n assert dm.on_after_batch_transfer_hook_rank == 2\n assert batch_gpu.samples.device == batch_gpu.targets.device == expected_device\n assert torch.allclose(batch_gpu.samples.cpu(), torch.ones(5, 32))\n assert torch.allclose(batch_gpu.targets.cpu(), torch.ones(5, 1, dtype=torch.long) * 2)\n\n\ndef test_dm_reload_dataloaders_every_n_epochs(tmpdir):\n \"\"\"\n Test datamodule, where trainer argument\n reload_dataloaders_every_n_epochs is set to a non negative integer\n \"\"\"\n\n class CustomBoringDataModule(BoringDataModule):\n def __init__(self):\n super().__init__()\n self._epochs_called_for = []\n\n def train_dataloader(self):\n assert self.trainer.current_epoch not in self._epochs_called_for\n self._epochs_called_for.append(self.trainer.current_epoch)\n return super().train_dataloader()\n\n dm = CustomBoringDataModule()\n model = BoringModel()\n\n model.validation_step = None\n model.validation_step_end = None\n model.validation_epoch_end = None\n model.test_step = None\n model.test_step_end = None\n model.test_epoch_end = None\n\n trainer = Trainer(default_root_dir=tmpdir, max_epochs=3, limit_train_batches=2, reload_dataloaders_every_n_epochs=2)\n trainer.fit(model, dm)\n\n\nclass DummyDS(torch.utils.data.Dataset):\n def __getitem__(self, index):\n return 1\n\n def __len__(self):\n return 100\n\n\nclass DummyIDS(torch.utils.data.IterableDataset):\n def __iter__(self):\n yield 1\n\n\[email protected](\"iterable\", (False, True))\ndef test_dm_init_from_datasets_dataloaders(iterable):\n ds = DummyIDS if iterable else DummyDS\n\n train_ds = ds()\n dm = LightningDataModule.from_datasets(train_ds, batch_size=4, num_workers=0)\n with mock.patch(\"pytorch_lightning.core.datamodule.DataLoader\") as dl_mock:\n dm.train_dataloader()\n dl_mock.assert_called_once_with(train_ds, batch_size=4, shuffle=not iterable, num_workers=0, pin_memory=True)\n assert dm.val_dataloader() is None\n assert dm.test_dataloader() is None\n\n train_ds_sequence = [ds(), ds()]\n dm = LightningDataModule.from_datasets(train_ds_sequence, batch_size=4, num_workers=0)\n with mock.patch(\"pytorch_lightning.core.datamodule.DataLoader\") as dl_mock:\n dm.train_dataloader()\n dl_mock.assert_has_calls(\n [\n call(train_ds_sequence[0], batch_size=4, shuffle=not iterable, num_workers=0, pin_memory=True),\n call(train_ds_sequence[1], batch_size=4, shuffle=not iterable, num_workers=0, pin_memory=True),\n ]\n )\n assert dm.val_dataloader() is None\n assert dm.test_dataloader() is None\n\n valid_ds = ds()\n test_ds = ds()\n dm = LightningDataModule.from_datasets(val_dataset=valid_ds, test_dataset=test_ds, batch_size=2, num_workers=0)\n with mock.patch(\"pytorch_lightning.core.datamodule.DataLoader\") as dl_mock:\n dm.val_dataloader()\n dl_mock.assert_called_with(valid_ds, batch_size=2, shuffle=False, num_workers=0, pin_memory=True)\n dm.test_dataloader()\n dl_mock.assert_called_with(test_ds, batch_size=2, shuffle=False, num_workers=0, pin_memory=True)\n assert dm.train_dataloader() is None\n\n valid_dss = [ds(), ds()]\n test_dss = [ds(), ds()]\n dm = LightningDataModule.from_datasets(train_ds, valid_dss, test_dss, batch_size=4, num_workers=0)\n with mock.patch(\"pytorch_lightning.core.datamodule.DataLoader\") as dl_mock:\n dm.val_dataloader()\n dm.test_dataloader()\n dl_mock.assert_has_calls(\n [\n call(valid_dss[0], batch_size=4, shuffle=False, num_workers=0, pin_memory=True),\n call(valid_dss[1], batch_size=4, shuffle=False, num_workers=0, pin_memory=True),\n call(test_dss[0], batch_size=4, shuffle=False, num_workers=0, pin_memory=True),\n call(test_dss[1], batch_size=4, shuffle=False, num_workers=0, pin_memory=True),\n ]\n )\n\n\nclass DataModuleWithHparams(LightningDataModule):\n def __init__(self, arg0, arg1, kwarg0=None):\n super().__init__()\n self.save_hyperparameters()\n\n\ndef test_simple_hyperparameters_saving():\n data = DataModuleWithHparams(10, \"foo\", kwarg0=\"bar\")\n assert data.hparams == AttributeDict({\"arg0\": 10, \"arg1\": \"foo\", \"kwarg0\": \"bar\"})\n" ]
[ [ "torch.zeros", "torch.ones", "torch.device", "torch.load" ] ]
bbo-lab/multitrackpy
[ "a25ebdb94969b0682c851ab69ba5895173b581d0" ]
[ "multitrackpy/mtt.py" ]
[ "import numpy as np\nimport h5py\nfrom pprint import pprint\n\ndef read_calib(mtt_path):\n mtt_file = h5py.File(mtt_path)\n\n istracking = np.squeeze(np.asarray([mtt_file['mt']['cam_istracking']]) == 1)\n calind = np.squeeze(np.int32(mtt_file['mt']['calind']))[istracking] - 1\n\n mc = {\n 'Rglobal': np.asarray(mtt_file['mt']['mc']['Rglobal']).transpose((0, 2, 1)), # in reverse order in h5 file!\n 'Tglobal': np.asarray(mtt_file['mt']['mc']['Tglobal']),\n 'cal': []\n }\n\n for ci in calind:\n mc['cal'].append({\n 'scaling': np.asarray(mtt_file[mtt_file['mt']['mc']['cal']['scaling'][ci, 0]]).T[0],\n 'icent': np.asarray(mtt_file[mtt_file['mt']['mc']['cal']['icent'][ci, 0]]).T[0],\n 'distortion_coefs': np.asarray(mtt_file[mtt_file['mt']['mc']['cal']['distortion_coefs'][ci, 0]]),\n 'sensorsize': np.asarray(mtt_file[mtt_file['mt']['mc']['cal']['sensorsize'][ci, 0]]).T[0],\n 'scale_pixels': np.asarray(mtt_file[mtt_file['mt']['mc']['cal']['scale_pixels'][ci, 0]]),\n })\n\n # pprint(mc)\n return mc\n\n\ndef read_video_paths(vid_dir, mtt_path):\n mtt_file = h5py.File(mtt_path)\n istracking = np.squeeze(np.asarray([mtt_file['mt']['cam_istracking']]) == 1)\n return [vid_dir + ''.join([chr(c) for c in mtt_file[mtt_file['mt']['vidname'][0, i]][:].T.astype(np.int)[0]]) for i\n in np.where(istracking)[0]]\n\n\ndef read_spacecoords(mtt_path):\n mtt_file = h5py.File(mtt_path)\n return np.asarray(mtt_file['mt']['objmodel']['space_coord'])\n\n\ndef read_frame_n(mtt_path):\n mtt_file = h5py.File(mtt_path)\n return len(mtt_file['mt']['t'])\n" ]
[ [ "numpy.int32", "numpy.where", "numpy.asarray" ] ]
Leofltt/rg_sound_generation
[ "8e79b4d9dce028def43284f80521a2ec61d0066c" ]
[ "members/amit/clf/data_generator_binary.py" ]
[ "import random\nimport shutil\nimport os\nimport numpy as np\nimport data_loader\nimport audio_processing\n\nfrom typing import Dict\nfrom loguru import logger\nfrom tqdm import tqdm\nfrom pprint import pprint\n\n\nclass DataGenerator:\n def __init__(self, conf: Dict, batch_size: int = 8):\n assert \"csv_file_path\" in conf\n assert \"base_dir\" in conf\n self.conf = conf.copy()\n self.batch_size = batch_size\n self.examples = data_loader.data_loader(conf)\n self.num_examples = len(self.examples)\n self.train = {0: [], 1: []}\n self.valid = {0: [], 1: []}\n self.train_counts = {0: 0, 1: 0}\n self.valid_counts = {0: 0, 1: 0}\n self.num_train = 0\n self.num_valid = 0\n self.classes = [0, 1]\n self.input_shapes = {\n \"spec\": (),\n \"hpss\": ()\n }\n logger.info(\"DataGenerator instantiated\")\n self.preprocess()\n logger.info(\"Preprocessing complete\")\n\n def preprocess(self):\n logger.info(\"Preprocessing examples\")\n logger.info(f\"{self.input_shapes['spec']} = Current input shape for spec\")\n\n folder = os.path.join(self.conf.get(\"preprocess_dir\"))\n\n if self.conf.get(\"reset_data\"):\n if os.path.isdir(folder):\n shutil.rmtree(folder)\n\n if not os.path.isdir(folder):\n os.mkdir(folder)\n\n min_level = 50 - self.conf.get(\"threshold\")\n max_level = 50 + self.conf.get(\"threshold\")\n valid_split = int(self.conf.get(\"valid_split\") * 100)\n\n logger.info(f\"Min level {min_level}, Max level {max_level}\")\n\n for key, value in tqdm(self.examples.items()):\n audio_file_name = value[\"audio_file_name\"]\n file_path = os.path.join(self.conf.get(\"base_dir\"), f\"{audio_file_name}.wav\")\n current_class = 1\n\n for j, feature in enumerate(self.conf.get(\"features\")):\n current_val = int(value[feature])\n current_class = -1\n if current_val < min_level:\n current_class = 0\n elif current_val > max_level:\n current_class = 1\n\n if current_class == -1:\n continue\n\n target_file_path = os.path.join(self.conf.get(\"preprocess_dir\"), audio_file_name)\n\n if not os.path.isfile(f\"{target_file_path}.spec.npy\"):\n spec, hpss = audio_processing.get_features(file_path, self.conf)\n self.input_shapes[\"spec\"] = spec.shape\n self.input_shapes[\"hpss\"] = hpss.shape\n np.save(f\"{target_file_path}.spec\", spec)\n np.save(f\"{target_file_path}.hpss\", hpss)\n elif len(self.input_shapes[\"spec\"]) == 0:\n spec = np.load(f\"{target_file_path}.spec.npy\")\n hpss = np.load(f\"{target_file_path}.hpss.npy\")\n logger.info(\"Setting input shapes based on previous files\")\n logger.info(f\"{spec.shape}, {hpss.shape}\")\n self.input_shapes[\"spec\"] = spec.shape\n self.input_shapes[\"hpss\"] = hpss.shape\n\n if random.randint(0, 99) < valid_split:\n self.valid[current_class].append(target_file_path)\n self.valid_counts[current_class] += 1\n else:\n self.train[current_class].append(target_file_path)\n self.train_counts[current_class] += 1\n self.num_train = sum(list(self.train_counts.values()))\n self.num_valid = sum(list(self.train_counts.values()))\n\n logger.info(\"Class counts in training set\")\n pprint(self.train_counts)\n logger.info(\"Class counts in validation set\")\n pprint(self.valid_counts)\n\n def generator(self, set_name: str):\n assert set_name in [\"train\", \"valid\"], \"Set name must be either train or valid\"\n\n while True:\n spec_batch = np.zeros((self.batch_size,) + self.input_shapes[\"spec\"])\n hpss_batch = np.zeros((self.batch_size,) + self.input_shapes[\"hpss\"])\n y_batch = np.zeros((self.batch_size, ))\n current_set = eval(f\"self.{set_name}\")\n\n for i in range(0, self.batch_size):\n target_class = random.choice([0, 1])\n example_file = random.choice(current_set[target_class])\n example_spec = np.load(f\"{example_file}.spec.npy\") * self.conf.get(\"scale_factor\")\n example_hpss = np.load(f\"{example_file}.hpss.npy\") * self.conf.get(\"scale_factor\")\n spec_batch[i] = example_spec\n hpss_batch[i] = example_hpss\n y_batch[i] = target_class\n\n yield {\"spec\": spec_batch, \"hpss\": hpss_batch}, {\"output\": y_batch}\n" ]
[ [ "numpy.save", "numpy.load", "numpy.zeros" ] ]
bshrram/Graduation-Project---Omnidirectional-Conveyor-Table
[ "6414fbcb3d53f3c3351c25ac8b48aa73397c250d" ]
[ "feedback_system/findTable.py" ]
[ "import numpy as np\nimport cv2 as cv\n\nflann_params= dict(algorithm = 6,\n table_number = 6, # 12\n key_size = 12, # 20\n multi_probe_level = 1) #2\n\n\ndef init_feature():\n \"\"\"initialize feature detector and matcher algorithm\n \"\"\"\n detector = cv.ORB_create(3000)\n norm = cv.NORM_HAMMING\n #matcher = cv.BFMatcher(norm)\n matcher = cv.FlannBasedMatcher(flann_params, {})\n return detector, matcher\n\n\ndef filter_matches(kp1, kp2, matches, ratio = 0.8):\n \"\"\"filter matches to keep strong matches only\n \"\"\"\n mkp1, mkp2 = [], []\n for m in matches:\n if len(m) == 2 and m[0].distance < m[1].distance * ratio:\n m = m[0]\n mkp1.append( kp1[m.queryIdx] )\n mkp2.append( kp2[m.trainIdx] )\n p1 = np.float32([kp.pt for kp in mkp1])\n p2 = np.float32([kp.pt for kp in mkp2])\n kp_pairs = zip(mkp1, mkp2)\n return p1, p2, list(kp_pairs)\n\n\nc = []\ndef explore_match(win, img1, img2, kp_pairs, status = None, H = None):\n h1, w1 = img1.shape[:2]\n h2, w2 = img2.shape[:2]\n vis = np.zeros((max(h1, h2), w1+w2, 3), np.uint8)\n vis[:h1, :w1, :3] = img1\n vis[:h2, w1:w1+w2, :3] = img2\n img3 = vis\n h3, w3 = img3.shape[:2]\n\n if H is not None:\n corners = np.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])\n corners1 = np.float32( cv.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0))\n corners = np.int32( cv.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0) )\n c = corners\n cv.polylines(vis, [corners], True, (0, 0, 255))\n\n if status is None:\n status = np.ones(len(kp_pairs), np.bool_)\n \n p1, p2 = [], [] \n for kpp in kp_pairs:\n p1.append(np.int32(kpp[0].pt))\n p2.append(np.int32(np.array(kpp[1].pt) + [w1, 0]))\n\n green = (0, 255, 0)\n red = (0, 0, 255)\n\n for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):\n if inlier:\n col = green\n cv.circle(vis, (x1, y1), 2, col, -1)\n cv.circle(vis, (x2, y2), 2, col, -1)\n else:\n col = red\n r = 2\n thickness = 3\n cv.line(vis, (x1-r, y1-r), (x1+r, y1+r), col, thickness)\n cv.line(vis, (x1-r, y1+r), (x1+r, y1-r), col, thickness)\n cv.line(vis, (x2-r, y2-r), (x2+r, y2+r), col, thickness)\n cv.line(vis, (x2-r, y2+r), (x2+r, y2-r), col, thickness)\n\n for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):\n if inlier:\n cv.line(vis, (x1, y1), (x2, y2), green)\n\n cv.imshow(win, vis)\n return corners1\n\n\nscale_percent =25\nimg1 = cv.imread(cv.samples.findFile('table7A.jpg'))\nwidth = int(img1.shape[1] * scale_percent / 100)\nheight = int(img1.shape[0] * scale_percent / 100)\n#img1 = cv.resize(img1, (width,height))\n\n\ndetector, matcher = init_feature()\n\n# apply orb on table image\nkp1, desc1 = detector.detectAndCompute(img1, None)\n\ndef getCorners(frame):\n \n # apply orb on frame\n kp2, desc2 = detector.detectAndCompute(frame, None)\n\n print('matching...')\n raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) \n #filter matches and keep strong matches\n p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)\n if len(p1) >= 4:\n # H: transformation matrix\n H, status = cv.findHomography(p1, p2, cv.RANSAC, 5.0)\n print('%d / %d inliers/matched' % (np.sum(status), len(status)))\n else:\n H, status = None, None\n print('%d matches found, not enough for homography estimation' % len(p1))\n\n corners = explore_match('find_table', img1, frame, kp_pairs, status, H)\n return corners\n\ndef getTableFromFrame (corners, frame):\n h1, w1 = img1.shape[:2]\n h2, w2 = frame.shape[:2]\n vis = np.zeros((max(h1, h2), w1+w2, 3), np.uint8)\n vis[:h1, :w1, :3] = img1\n vis[:h2, w1:w1+w2, :3] = frame\n pts1 = corners\n pts2 = np.float32([[0,0],[w1,0],[w1,h1], [0,h1]])\n M = cv.getPerspectiveTransform(pts1,pts2)\n # print((w1, h1))\n dst = cv.warpPerspective(vis, M,(w1,h1))\n return dst\n" ]
[ [ "numpy.int32", "numpy.sum", "numpy.float32", "numpy.array" ] ]
valkenzz/Bigeleisen_KIE
[ "aa82ee63c77be2e9d0bd97702c297aa70dfaa362" ]
[ "Bigeleisen_KIE.py" ]
[ "#Importation : \r\nimport pandas as pd\r\nimport numpy as np\r\n################################################\r\n#Parameters : \r\n#Planck constant (J/Hz)\r\nh=6.62607004*10**-34\r\n#Boltzmann constant (J/K)\r\nkB=1.38064852*10**-23\r\n#Light velocity in vaccum (m/s)\r\nc=299792458.0\r\n\r\n####################################################################################\r\n#Functions:\r\n######################################################################################\r\n#We check for errors : \r\n \r\n#We check if all values are positiv for initial states\r\ndef CheckPositiv(Data):\r\n if len(Data)!=len([i for i in Data if (i>0)]):\r\n print(\"At least one initial state hasn't every frequency that are positiv\")\r\n \r\ndef error(isH,isD,tsH,tsD):\r\n CheckPositiv(isH)\r\n CheckPositiv(isD) \r\n \r\n#####################################################################################\r\n\r\n\r\n#Function which takes the lists of vibration frequencies of 2 states to give the product of the ratio of frequencies\r\ndef Operation(Data1,Data2):\r\n if len(Data1)!=len(Data2):\r\n print(\"The number of frequencies isn't the same for two same states\")\r\n return \r\n x=1\r\n for i in range(len(Data1)):\r\n x=x*Data1[i]/Data2[i]\r\n return x\r\n\r\n#Function which takes one list of vibration frequencies to give the sinh of Ui = h*x/(kB*T) according to the Biegelheisen equation\r\ndef Ui(Data,T):\r\n return pd.Series(Data).apply(lambda x : np.sinh(float(x)*((h*100*c)/(2.0*kB*float(T)))))\r\n\r\n#Function which takes in entry the lists of frequencies (cm-1) and the temperature (K) and gives the KIE\r\n#isH is the vibration frequencies of the molecule containing the light isotope at the initial state\r\n#isD is the vibration frequencies of the molecule containing the heavy isotope at the initial state\r\n#tsH is the vibration frequencies of the molecule containing the light isotope at the transition state\r\n#tsD is the vibration frequencies of the molecule containing the heavy isotope at the transition state\r\n#T is the temperature in Kelvin\r\ndef KIE(isH,isD,tsH,tsD,T):\r\n error(isH,isD,tsH,tsD)\r\n #We calculate the sinh of h*x/(kB*T)\r\n UisH=Ui(isH,T).tolist()\r\n UtsH=Ui(tsH,T).tolist()\r\n UisD=Ui(isD,T).tolist()\r\n UtsD=Ui(tsD,T).tolist()\r\n #######################\r\n #We begin to calculate the ratio of the two imaginary frequencies\r\n op1=tsH[0]/tsD[0]\r\n Result=op1\r\n #We calculate the second factor\r\n Result=Result*Operation(tsH[1:],tsD[1:])\r\n #We calculate the third factor\r\n Result=Result*Operation(isD,isH)\r\n #We calculate the fourth factor\r\n Result=Result*Operation(UtsD[1:],UtsH[1:])\r\n #We calculate the fifth factor\r\n Result=Result*Operation(UisH,UisD) \r\n return Result\r\n\r\n\r\n####################################################################################\r\n\r\n\r\n" ]
[ [ "pandas.Series" ] ]
apexnetai/cifar-10-guide
[ "7c76f310e93da3a229ce9d66defd770ee1c7dc56" ]
[ "cifar10/custom_models.py" ]
[ "\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\n\n\nclass CustomResnetV1(nn.Module):\n\n def __init__(self):\n super(CustomResnetV1, self).__init__()\n self.resnet = torchvision.models.resnet18(pretrained=True)\n self.resnet.conv1 = nn.Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(0, 0), bias=False)\n self.resnet.fc = nn.Linear(512, 256)\n\n self.bn1a = nn.BatchNorm1d(256)\n self.fc11 = nn.Linear(256, 256)\n self.fc12 = nn.Linear(256, 256)\n self.bn1b = nn.BatchNorm1d(256)\n self.fc13 = nn.Linear(256, 256)\n self.fc14 = nn.Linear(256, 256)\n self.bn1c = nn.BatchNorm1d(256)\n self.fc15 = nn.Linear(256, 256)\n self.fc16 = nn.Linear(256, 256)\n self.fc_down1 = nn.Linear(256, 128)\n\n self.bn2a = nn.BatchNorm1d(128)\n self.fc21 = nn.Linear(128, 128)\n self.fc22 = nn.Linear(128, 128)\n self.bn2b = nn.BatchNorm1d(128)\n self.fc23 = nn.Linear(128, 128)\n self.fc24 = nn.Linear(128, 128)\n self.bn2c = nn.BatchNorm1d(128)\n self.fc25 = nn.Linear(128, 128)\n self.fc26 = nn.Linear(128, 128)\n self.fc_down2 = nn.Linear(128, 64)\n\n self.bn3a = nn.BatchNorm1d(64)\n self.fc31 = nn.Linear(64, 64)\n self.fc32 = nn.Linear(64, 64)\n self.bn3b = nn.BatchNorm1d(64)\n self.fc33 = nn.Linear(64, 64)\n self.fc34 = nn.Linear(64, 64)\n self.bn3c = nn.BatchNorm1d(64)\n self.fc35 = nn.Linear(64, 64)\n self.fc36 = nn.Linear(64, 64)\n\n self.fc4 = nn.Linear(64, 10)\n\n #self.drop1 = nn.Dropout2d(0.5)\n\n def forward(self, x):\n x_ = F.relu(self.resnet(x))\n\n x = self.bn1a(x_)\n x = F.relu(self.fc11(x))\n x = F.relu(self.fc12(x))\n x_ = torch.add(x, x_)\n x = self.bn1b(x_)\n x = F.relu(self.fc13(x))\n x = F.relu(self.fc14(x))\n x_ = torch.add(x, x_)\n x = self.bn1c(x_)\n x = F.relu(self.fc15(x))\n x = F.relu(self.fc16(x))\n x_ = self.fc_down1(torch.add(x, x_))\n\n x = self.bn2a(x_)\n x = F.relu(self.fc21(x))\n x = F.relu(self.fc22(x))\n x_ = torch.add(x, x_)\n x = self.bn2b(x_)\n x = F.relu(self.fc23(x))\n x = F.relu(self.fc24(x))\n x_ = torch.add(x, x_)\n x = self.bn2c(x_)\n x = F.relu(self.fc25(x))\n x = F.relu(self.fc26(x))\n x_ = self.fc_down2(torch.add(x, x_))\n\n x = self.bn3a(x_)\n x = F.relu(self.fc31(x))\n x = F.relu(self.fc32(x))\n x_ = torch.add(x, x_)\n x = self.bn3b(x_)\n x = F.relu(self.fc33(x))\n x = F.relu(self.fc34(x))\n x_ = torch.add(x, x_)\n x = self.bn3c(x_)\n x = F.relu(self.fc35(x))\n x = F.relu(self.fc36(x))\n x_ = torch.add(x, x_)\n\n x = self.fc4(x_)\n\n return F.log_softmax(x, dim=1)\n" ]
[ [ "torch.nn.functional.log_softmax", "torch.nn.Linear", "torch.add", "torch.nn.BatchNorm1d", "torch.nn.Conv2d" ] ]
zxpzhong/DR_3DFM
[ "6ef7d0d86813f4cc407a0d1011a2623e4775fbee" ]
[ "utils/Finger/tool/tools.py" ]
[ "# 定义全局变量和方法\nimport numpy as np\nimport math\n# import process.process_finger_data as pfd\n\n# 目前选用的图片尺寸\ncur_pic_size = [640, 400]\n# cur_pic_size = [1280, 800]\n# 相机索引对应相机名称\ncamera_index_to_name = ['A', 'B', 'C', 'D', 'E', 'F']\n# 6个相机的外参\ncamera_a_outer_para = np.mat([[0.574322111, 0.771054881, 0.275006333, 0.93847817],\n [0.565423192, -0.130698104, -0.814379899, -0.36935905],\n [-0.591988790, 0.623211341, -0.511035123, 4.78810628],\n [0, 0, 0, 1]])\ncamera_b_outer_para = np.mat([[0.456023570, 0.727006744, 0.513326112, 1.72205846],\n [-0.146061166, 0.630108915, -0.762645980, -0.30452329],\n [-0.877900131, 0.272807532, 0.393531969, 5.53092307],\n [0, 0, 0, 1]])\ncamera_c_outer_para = np.mat([[0.609183831, 0.528225460, 0.591500569, 1.59956459],\n [-0.738350101, 0.649953779, 0.179997814, 0.5030131],\n [-0.289368602, -0.546386263, 0.785956655, 5.58635091],\n [0, 0, 0, 1]])\ncamera_d_outer_para = np.mat([[0.771746127, 0.478767298, 0.418556793, 0.955855425],\n [-0.476877262, 0.000270229651, 0.878969854, 0.477556906],\n [0.420708915, -0.877941799, 0.228521787, 4.61760675],\n [0, 0, 0, 1]])\ncamera_e_outer_para = np.mat([[0.788882832, 0.555210653, 0.263448302, 0.71648894],\n [0.159053746, -0.598545227, 0.785140445, 0.00777088],\n [0.593604063, -0.577481378, -0.560490387, 4.30437514],\n [0, 0, 0, 1]])\ncamera_f_outer_para = np.mat([[0.712321206, 0.689000523, 0.133704068, 1.13938413],\n [0.694227260, -0.719684989, 0.0101009224, -0.28640104],\n [0.103184351, 0.0856259076, -0.990969825, 4.49819911],\n [0, 0, 0, 1]])\n\n# 六个相机的内参\ncamera_a_inner_para = np.mat([[967.5377197, 0, 703.1273732, 0],\n [0, 967.9393921, 351.0187561, 0],\n [0, 0, 1, 0]])\ncamera_b_inner_para = np.mat([[963.2991943, 0, 589.8122291, 0],\n [0, 962.7422485, 412.5244055, 0],\n [0, 0, 1, 0]])\ncamera_c_inner_para = np.mat([[967.4086914, 0, 612.7826353, 0],\n [0, 968.0758667, 451.7366286, 0],\n [0, 0, 1, 0]])\ncamera_d_inner_para = np.mat([[961.0868530, 0, 692.7282436, 0],\n [0, 960.6126708, 417.4375162, 0],\n [0, 0, 1, 0]])\ncamera_e_inner_para = np.mat([[955.4882812, 0, 730.3056525, 0],\n [0, 953.7589722, 451.5117967, 0],\n [0, 0, 1, 0]])\ncamera_f_inner_para = np.mat([[962.0779419, 0, 595.2503222, 0],\n [0, 961.0998535, 396.8389609, 0],\n [0, 0, 1, 0]])\n\n# 六个相机的投影矩阵为 投影矩阵=内参x外参\n# 所有相机的投影矩阵放到一个三维矩阵里(1280x800)\nall_camera_projection_mat = [\n [[1.39434783e+02, 1.18422163e+03, -9.32437833e+01, 4.27466162e+03],\n [3.39496212e+02, 9.22510264e+01, -9.67653298e+02, 1.32319794e+03],\n [-5.91988790e-01, 6.23211341e-01, -5.11035123e-01, 4.78810628e+00]],\n [[-7.85090956e+01, 8.61230229e+02, 7.26596598e+02, 4.92106359e+03],\n [-5.02774485e+02, 7.19172239e+02, -5.71889964e+02, 1.98846331e+03],\n [-8.77900131e-01, 2.72807532e-01, 3.93531969e-01, 5.53092307e+00]],\n [[4.12009678e+02, 1.76193887e+02, 1.05384338e+03, 4.97065152e+03],\n [-8.45497311e+02, 3.82381880e+02, 5.29296949e+02, 3.01051417e+03],\n [-2.89368602e-01, -5.46386263e-01, 7.85956655e-01, 5.58635091e+00]],\n [[1.03315200e+03, -1.48038125e+02, 5.60572927e+02, 4.11740670e+03],\n [-2.82474656e+02, -3.66226258e+02, 9.39743146e+02, 2.38630951e+03],\n [4.20708915e-01, -8.77941799e-01, 2.28521787e-01, 4.61760675e+00]],\n [[1.18728070e+03, 1.08759358e+02, -1.57607533e+02, 3.82810628e+03],\n [4.19718174e+02, -8.31607535e+02, 4.95766722e+02, 1.95088770e+03],\n [5.93604063e-01, -5.77481378e-01, -5.60490387e-01, 4.30437514e+00]],\n [[7.46729038e+02, 7.13841054e+02, -4.61241373e+02, 3.77373081e+03],\n [7.08169289e+02, -6.57709441e+02, -3.83547441e+02, 1.50980066e+03],\n [1.03184351e-01, 8.56259076e-02, -9.90969825e-01, 4.49819911e+00]]\n]\n# camera_a_projection_mat = np.mat([[1.39434783e+02, 1.18422163e+03, -9.32437833e+01, 4.27466162e+03],\n# [3.39496212e+02, 9.22510264e+01, -9.67653298e+02, 1.32319794e+03],\n# [-5.91988790e-01, 6.23211341e-01, -5.11035123e-01, 4.78810628e+00]])\n#\n# camera_b_projection_mat = np.mat([[-7.85090956e+01, 8.61230229e+02, 7.26596598e+02, 4.92106359e+03],\n# [-5.02774485e+02, 7.19172239e+02, -5.71889964e+02, 1.98846331e+03],\n# [-8.77900131e-01, 2.72807532e-01, 3.93531969e-01, 5.53092307e+00]])\n#\n# camera_c_projection_mat = np.mat([[4.12009678e+02, 1.76193887e+02, 1.05384338e+03, 4.97065152e+03],\n# [-8.45497311e+02, 3.82381880e+02, 5.29296949e+02, 3.01051417e+03],\n# [-2.89368602e-01, -5.46386263e-01, 7.85956655e-01, 5.58635091e+00]])\n#\n# camera_d_projection_mat = np.mat([[1.03315200e+03, -1.48038125e+02, 5.60572927e+02, 4.11740670e+03],\n# [-2.82474656e+02, -3.66226258e+02, 9.39743146e+02, 2.38630951e+03],\n# [4.20708915e-01, -8.77941799e-01, 2.28521787e-01, 4.61760675e+00]])\n#\n# camera_e_projection_mat = np.mat([[1.18728070e+03, 1.08759358e+02, -1.57607533e+02, 3.82810628e+03],\n# [4.19718174e+02, -8.31607535e+02, 4.95766722e+02, 1.95088770e+03],\n# [5.93604063e-01, -5.77481378e-01, -5.60490387e-01, 4.30437514e+00]])\n#\n# camera_f_projection_mat = np.mat([[7.46729038e+02, 7.13841054e+02, -4.61241373e+02, 3.77373081e+03],\n# [7.08169289e+02, -6.57709441e+02, -3.83547441e+02, 1.50980066e+03],\n# [1.03184351e-01, 8.56259076e-02, -9.90969825e-01, 4.49819911e+00]])\n\n# 将图片缩小为640*400后的相机内参为: 四个参数都除以二\ncamera_a_inner_para_640_400 = np.mat([[483.76885985, 0, 351.5636866, 0],\n [0, 483.96969605, 175.50937805, 0],\n [0, 0, 1, 0]])\ncamera_b_inner_para_640_400 = np.mat([[481.64959715, 0, 294.90611455, 0],\n [0, 481.37112425, 206.26220275, 0],\n [0, 0, 1, 0]])\ncamera_c_inner_para_640_400 = np.mat([[483.7043457, 0, 306.39131765, 0],\n [0, 484.03793335, 225.8683143, 0],\n [0, 0, 1, 0]])\ncamera_d_inner_para_640_400 = np.mat([[480.5434265, 0, 346.3641218, 0],\n [0, 480.3063354, 208.7187581, 0],\n [0, 0, 1, 0]])\ncamera_e_inner_para_640_400 = np.mat([[477.7441406, 0, 365.15282625, 0],\n [0, 476.8794861, 225.75589835, 0],\n [0, 0, 1, 0]])\ncamera_f_inner_para_640_400 = np.mat([[481.03897095, 0, 297.6251611, 0],\n [0, 480.54992675, 198.41948045, 0],\n [0, 0, 1, 0]])\n# 将图片resize为640*400后的投影矩阵\nall_camera_projection_mat_640_400 = [\n [[6.97173914e+01, 5.92110817e+02, - 4.66218917e+01, 2.13733081e+03],\n [1.69748106e+02, 4.61255132e+01, - 4.83826649e+02, 6.61598968e+02],\n [-5.91988790e-01, 6.23211341e-01, - 5.11035123e-01, 4.78810628e+00]],\n [[-3.92545478e+01, 4.30615115e+02, 3.63298299e+02, 2.46053180e+03],\n [-2.51387243e+02, 3.59586119e+02, - 2.85944982e+02, 9.94231657e+02],\n [-8.77900131e-01, 2.72807532e-01, 3.93531969e-01, 5.53092307e+00]],\n [[2.06004839e+02, 8.80969434e+01, 5.26921691e+02, 2.48532576e+03],\n [-4.22748655e+02, 1.91190940e+02, 2.64648475e+02, 1.50525708e+03],\n [-2.89368602e-01, - 5.46386263e-01, 7.85956655e-01, 5.58635091e+00]],\n [[5.16576002e+02, - 7.40190623e+01, 2.80286464e+02, 2.05870335e+03],\n [-1.41237328e+02, - 1.83113129e+02, 4.69871573e+02, 1.19315475e+03],\n [4.20708915e-01, - 8.77941799e-01, 2.28521787e-01, 4.61760675e+00]],\n [[5.93640352e+02, 5.43796790e+01, - 7.88037663e+01, 1.91405314e+03],\n [2.09859087e+02, - 4.15803768e+02, 2.47883361e+02, 9.75443850e+02],\n [5.93604063e-01, - 5.77481378e-01, - 5.60490387e-01, 4.30437514e+00]],\n [[3.73364519e+02, 3.56920527e+02, - 2.30620687e+02, 1.88686540e+03],\n [3.54084644e+02, - 3.28854721e+02, - 1.91773720e+02, 7.54900332e+02],\n [1.03184351e-01, 8.56259076e-02, - 9.90969825e-01, 4.49819911e+00]]\n]\n\n# 六个相机在世界坐标系下的坐标\ncameras_coordinate = [[2.50436065, -3.75589484, 1.88800446],\n [4.02581981, -2.56894275, -3.29281609],\n [1.01348544, 1.88043939, -5.4273143],\n [-2.45261002, 3.5962286, -1.87506165],\n [-3.12155638, 2.09254542, 2.21770186],\n [-1.07692383, -1.37631717, 4.3081322]]\n# 六个相机组成的空间平面方程参数 AX+BY+CZ+D=0\ncamera_plane_para = [19.467678495159983, 18.098947303577706, 10.253452426300939, 1.884526845005233]\n\n# 六个相机映射到同一平面后的相机坐标,这里选用的是BCD三个相机作为相机平面,因此只需要将AEF映射到平面\ncameras_coordinate_mapping = [[2.45592658, -3.80092362, 1.86249467],\n [4.02581981, -2.56894275, -3.29281609],\n [1.01348544, 1.88043939, -5.4273143],\n [-2.45261002, 3.5962286, -1.87506165],\n [-3.16297766, 2.05403639, 2.19588564],\n [-1.08130466, -1.38038999, 4.30582486]]\n\n# 六张bmp图片的像素信息,读取后放在全局变量中,避免每次都去重新读取\nbmp_pixel = [[], [], [], [], [], []]\n# 哈希表,存储顶点对应的像素uv信息\nmap_vertex_to_texture = dict()\n\n# 哈希表,存储三角面片顶点对应的vt的index(行数)\nmap_vertex_to_vt_index = dict()\n# 每个相机对应的三角面片 如faces_belong_camera_A=[[1,3,5],[2,3,5]...]\n# faces_belong_camera_A = []\n# faces_belong_camera_B = []\n# faces_belong_camera_C = []\n# faces_belong_camera_D = []\n# faces_belong_camera_E = []\n# faces_belong_camera_F = []\n\n# 所有相机对应的三角面片,A相机放在0索引,以此类推\nfaces_belong_camera = [[], [], [], [], [], []]\n\n# 所有相机对应的bmp应该crop出的范围,[Umin,Vmin,Umax,Vmax],初始化时给相反的最大最小值,这里取的10000和-100,因为不可能有超过这个范围的了\nbmp_crop_ranges = [[10000, 10000, -100, -100], [10000, 10000, -100, -100],\n [10000, 10000, -100, -100], [10000, 10000, -100, -100],\n [10000, 10000, -100, -100], [10000, 10000, -100, -100]]\n# 提前计算出crop的宽度u_width和高度v_height,先初始化为0\ncrops_width_and_height = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]\n# 在得到crops_width_and_height后,提前计算出各个相机crop出的图在png中v所占的范围比重(0-1),例如A:0-0.25,B:0.25-0.4...F:0.8-1\ncrops_v_scale_in_png = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]\n# uvmap_png的长度和宽度\nuv_map_size = [0, 0]\n\n# face的索引 寻找bug时使用\nface_index = 1\n\n\n# 打印数据点\ndef print_data_points(data_points):\n for li in data_points:\n print(li)\n\n\n# 计算两个向量的夹角的余弦\n# 公式为cos<a,b>=a.b/|a||b|. a.b=(x1x2+y1y2+z1z2) |a|=√(x1^2+y1^2+z1^2), |b|=√(x2^2+y2^2+z2^2).\ndef calculate_cosine(vector1, vector2):\n a = vector1[0] * vector2[0] + vector1[1] * vector2[1] + vector1[2] * vector2[2]\n b = math.sqrt(vector1[0] * vector1[0] + vector1[1] * vector1[1] + vector1[2] * vector1[2])\n c = math.sqrt(vector2[0] * vector2[0] + vector2[1] * vector2[1] + vector2[2] * vector2[2])\n res = a / (b * c)\n return res\n\n\n# 计算两个向量的向量积\n# AB=(x1,y1,z1) CD=(x2,y2,z2) cross(AB,CD)=(y1*z2-y2z1,z1x2-z2x1,x1y2-x2y1)\ndef calculate_vector_product(vector1, vector2):\n vector_product = [vector1[1] * vector2[2] - vector1[2] * vector2[1],\n vector1[2] * vector2[0] - vector1[0] * vector2[2],\n vector1[0] * vector2[1] - vector1[1] * vector2[0]]\n return vector_product\n\n\n# 点到空间平面的映射点(投影)\ndef get_mapping_point_in_camera_plane(point, camera_plane_para):\n a = camera_plane_para[0]\n b = camera_plane_para[1]\n c = camera_plane_para[2]\n d = camera_plane_para[3]\n x = point[0]\n y = point[1]\n z = point[2]\n # 避免重复计算,不知python是否已有优化\n a_ = a * a\n b_ = b * b\n c_ = c * c\n temp = a_ + b_ + c_\n x_ = ((b_ + c_) * x - a * (b * y + c * z + d)) / temp\n y_ = ((a_ + c_) * y - b * (a * x + c * z + d)) / temp\n z_ = ((a_ + b_) * z - c * (a * x + b * y + d)) / temp\n point_ = [x_, y_, z_]\n return point_\n\n\n# # 全局变量中部分数据的由来(在main函数中直接使用了)(因为外参已经固定,所以部分数据基本不会改变,减少计算量)\n# def pre_process():\n# # 求出六个相机在世界坐标系下的坐标\n# cameras_coordinate = pfd.get_cameras_coordinate()\n# # 求出相机参数平面\n# camera_plane_para = pfd.get_camera_plane(cameras_coordinate)\n# # 获取A,E,F的映射点\n# camera_a_point = get_mapping_point_in_camera_plane(cameras_coordinate[0], camera_plane_para)\n# camera_e_point = get_mapping_point_in_camera_plane(cameras_coordinate[4], camera_plane_para)\n# camera_f_point = get_mapping_point_in_camera_plane(cameras_coordinate[5], camera_plane_para)\n# # 六个相机归到一个平面之后的坐标:BCD不变,AEF映射到BCD平面\n# camera_point_mapping = [camera_a_point, cameras_coordinate[1], cameras_coordinate[2],\n# cameras_coordinate[3], camera_e_point, camera_f_point]\n# camera_point_mapping = np.array(camera_point_mapping)\n" ]
[ [ "numpy.mat" ] ]
jojotenya/LAMOL
[ "03c31d9f0c7bf71295bc2d362ddf40a7656956e1" ]
[ "train.py" ]
[ "import torch\nfrom torch.utils.data import DataLoader\nfrom torch import nn\nfrom pytorch_transformers import AdamW, WEIGHTS_NAME, WarmupLinearSchedule\nimport csv\nimport numpy as np\nimport os\nimport logging\nfrom fp16 import FP16_Module, FP16_Optimizer\nfrom parallel import DataParallelModel, DataParallelCriterion\nfrom collections import OrderedDict\nfrom utils import *\nfrom settings import args, TASK_DICT, init_logging, MODEL_CONFIG, MODEL_CLASS, SPECIAL_TOKENS, CONFIG_CLASS\nfrom settings import TOKENIZER, SPECIAL_TOKEN_IDS, FILL_VAL, SAVE_NAME, FINAL_SAVE_NAME, TOKENS_WEIGHT, CONFIG_NAME\nfrom scheduler import AnnealingLR\nfrom regularizers import REG_TYPES, REG_TYPE_KEYS, Weight_Regularized_AdamW, Weight_Regularized_SGD\nfrom torch.nn import CrossEntropyLoss\nlogger = logging.getLogger(__name__)\n\n\ndef train(task_ids, model):\n tasks = [args.tasks[task_id] for task_id in task_ids]\n\n logger.info(\"start to train { task: %s, seq train type: %s }\" % (tasks, args.seq_train_type))\n model_dir = get_model_dir(tasks)\n make_dir(model_dir)\n\n train_dataset = [TASK_DICT[t][\"train\"] for t in tasks]\n train_extra_data = []\n if \"lll\" in args.seq_train_type and task_ids[0] > 0 and not args.skip_tasks:\n prev_task = args.tasks[task_ids[0]-1]\n with torch.no_grad():\n create_extra_data(tasks[0], prev_task, model, train_extra_data)\n elif \"gem\" in args.seq_train_type and task_ids[0] > 0: \n get_real_data(tasks[0], train_extra_data, accum=False, encode=True)\n args.memory_data.append(train_extra_data)\n train_extra_data = []\n logger.info('extra training data size: {}'.format(len(train_extra_data)))\n\n if not model:\n # which_model_to_load = model_dir if os.path.isfile(os.path.join(model_dir, FINAL_SAVE_NAME)) else args.model_name\n model = MODEL_CLASS.from_pretrained(args.model_name).cuda()\n model.resize_token_embeddings(len(TOKENIZER))\n if not args.fp32:\n model = FP16_Module(model)\n\n gen_token = get_gen_token(tasks[0])\n TOKENIZER.add_tokens([gen_token])\n TOKENIZER.save_pretrained(model_dir)\n SPECIAL_TOKENS[tasks[0]] = gen_token\n SPECIAL_TOKEN_IDS[tasks[0]] = TOKENIZER.convert_tokens_to_ids(gen_token)\n logger.info('gen token = {} , gen token id = {}'.format(gen_token, SPECIAL_TOKEN_IDS[tasks[0]]))\n MODEL_CONFIG.vocab_size = len(TOKENIZER)\n MODEL_CONFIG.to_json_file(os.path.join(model_dir,CONFIG_NAME))\n global TOKENS_WEIGHT\n if len(TOKENIZER) != TOKENS_WEIGHT.shape[0]:\n TOKENS_WEIGHT = torch.cat((TOKENS_WEIGHT, torch.ones([1]).cuda()))\n\n if args.skip_tasks and len(tasks) == 1:\n logger.info(\"*********** skip task: {} ***********\".format(tasks[0]))\n if tasks[0] in args.skip_tasks:\n if len(args.skip_tasks) == 1:\n model_dir = get_model_dir(tasks)\n model_path = os.path.join(model_dir, FINAL_SAVE_NAME)\n config_path = os.path.join(model_dir,CONFIG_NAME)\n model_config = CONFIG_CLASS.from_json_file(config_path)\n model = MODEL_CLASS(model_config).cuda()\n state_dict = torch.load(model_path)\n model.load_state_dict(state_dict)\n if not args.fp32:\n model = FP16_Module(model)\n if args.seq_train_type in REG_TYPE_KEYS:\n logger.info(\"calulating reg_params ...\")\n train_qadata = QADataset(train_dataset, \"train\", SPECIAL_TOKEN_IDS[tasks[0]], train_extra_data)\n max_train_batch_size = max(len(train_qadata) // args.min_n_steps, args.min_batch_size)\n train_dataloader = create_dataloader(train_qadata, \"train\", max_train_batch_size)\n parallel_model = DataParallelModel(WrapModel(model), args.device_ids)\n regularizer = REG_TYPES[args.seq_train_type](model, parallel_model, [train_dataloader], tasks[0])\n regularizer.task_start_do()\n regularizer.task_end_do()\n torch.save(model.state_dict(), os.path.join(model_dir, FINAL_SAVE_NAME))\n logger.info(\"done reg_params!\")\n args.skip_tasks.remove(tasks[0])\n return model\n\n model.resize_token_embeddings(len(TOKENIZER))\n\n if not args.fp32: # again because resize_token_embeddings makes embedding layer fp32\n model = FP16_Module(model)\n\n parallel_model = DataParallelModel(WrapModel(model), args.device_ids)\n\n train_qadata = QADataset(train_dataset, \"train\", SPECIAL_TOKEN_IDS[tasks[0]], train_extra_data)\n max_train_batch_size = max(len(train_qadata) // args.min_n_steps, args.min_batch_size)\n train_dataloader = create_dataloader(train_qadata, \"train\", max_train_batch_size)\n if not args.unbound and args.seq_train_type != \"multitask\":\n #n_train_epochs = TASK_DICT[tasks[0]][\"n_train_epochs\"]\n n_train_epochs = args.n_train_epochs[tasks[0]]\n else:\n n_train_epochs = args.n_train_epochs['_'.join(tasks)]\n n_train_optimization_steps = len(train_qadata) * n_train_epochs\n logger.info('len of train dataset: {} , max train batch size {} , num of opt steps: {}'.format(\n len(train_qadata), max_train_batch_size, n_train_optimization_steps))\n\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n\n if \"gem\" in args.seq_train_type:\n model.task_id = task_ids[0]\n if not hasattr(model, \"grad_dims\"):\n model.grad_dims = []\n for param in model.parameters():\n model.grad_dims.append(param.data.numel())\n if not hasattr(model, \"grads\"):\n model.grads = torch.zeros(sum(model.grad_dims),len(args.tasks))\n model.grads = model.grads.cuda()\n\n if args.seq_train_type in REG_TYPE_KEYS:\n optimizer = Weight_Regularized_AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n else:\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n if not args.fp32:\n optimizer = FP16_Optimizer(optimizer, static_loss_scale=None, dynamic_loss_scale=True,\n dynamic_loss_args={'scale_window': 100, 'min_scale': 1, 'delayed_shift': 2})\n\n scheduler = AnnealingLR(optimizer, start_lr=args.learning_rate, warmup_iter=int(args.n_warmup_ratio*len(train_qadata)),\n num_iters=int(n_train_optimization_steps), decay_style=args.decay_style)\n train_loss_fct = DataParallelCriterion(CrossEntropyLoss(ignore_index=FILL_VAL, weight=TOKENS_WEIGHT), args.device_ids)\n\n if args.seq_train_type in REG_TYPE_KEYS:\n copy_train_dataloader = create_dataloader(train_qadata, \"train\", max_train_batch_size)\n prev_task = args.tasks[task_ids[0]-1]\n regularizer = REG_TYPES[args.seq_train_type](model, parallel_model, [copy_train_dataloader], tasks[0], prev_task)\n regularizer.task_start_do()\n\n tot_n_steps = 0\n train_once = TrainStep(model, optimizer, scheduler)\n if \"gem\" in args.seq_train_type and task_ids[0] != 0:\n gem_step = GEMStep(model, parallel_model, train_loss_fct, optimizer)\n model.train()\n for ep in range(n_train_epochs):\n cum_loss, cum_qa_loss, cum_lm_loss, cur_n_inputs = 0, 0, 0, 0\n for n_steps, (_, _, cqa, _, Y, gen_X, gen_Y) in enumerate(train_dataloader):\n\n n_inputs = sum(_cqa.shape[0] for _cqa in cqa)\n\n for i in range(len(cqa)):\n cqa[i] = (cqa[i].to(args.device_ids[i]),)\n Y[i] = Y[i].to(args.device_ids[i])\n gen_X[i] = (gen_X[i].to(args.device_ids[i]),)\n gen_Y[i] = gen_Y[i].to(args.device_ids[i])\n\n losses = get_losses(parallel_model, cqa, Y, gen_X, gen_Y, train_loss_fct)\n loss = sum(losses)\n if \"gem\" in args.seq_train_type and task_ids[0] != 0:\n gem_step(task_ids[0])\n train_once(loss, n_inputs)\n\n qa_loss = losses[0].item() * n_inputs\n lm_loss = losses[1].item() * n_inputs\n cum_loss += (qa_loss + lm_loss)\n cum_qa_loss += qa_loss\n cum_lm_loss += lm_loss\n cur_n_inputs += n_inputs\n\n if (n_steps + 1 ) % args.logging_steps == 0:\n logger.info('progress {:.3f} , lr {:.1E} , loss {:.3f} , qa loss {:.3f} , lm loss {:.3f} , avg batch size {:.1f}'.format(\n ep + cur_n_inputs/len(train_qadata), scheduler.get_lr(), cum_loss/cur_n_inputs, cum_qa_loss/cur_n_inputs, cum_lm_loss/cur_n_inputs,\n cur_n_inputs/(n_steps + 1)\n ))\n\n torch.save(model.state_dict(), os.path.join(model_dir, SAVE_NAME+str(ep+1)))\n tot_n_steps += (n_steps + 1)\n logger.info('epoch {}/{} done , tot steps {} , lr {:.1E} , loss {:.2f} , qa loss {:.2f} , lm loss {:.2f} , avg batch size {:.1f}'.format(\n ep+1, n_train_epochs, tot_n_steps, scheduler.get_lr(), cum_loss/cur_n_inputs, cum_qa_loss/cur_n_inputs, cum_lm_loss/cur_n_inputs, cur_n_inputs/(n_steps+1)\n ))\n\n # task end do for reg\n if args.seq_train_type in REG_TYPE_KEYS:\n regularizer.task_end_do()\n torch.save(model.state_dict(), os.path.join(model_dir, FINAL_SAVE_NAME))\n\n return model\n\n\nif __name__ == '__main__':\n\n if not args.debug:\n logging.getLogger(\"pytorch_transformers\").setLevel(logging.WARNING)\n logging.getLogger(\"pytorch_transformers.tokenization_utils\").setLevel(logging.CRITICAL)\n\n make_dir(args.model_dir_root)\n\n init_logging(os.path.join(args.model_dir_root, 'log_train.txt'))\n logger.info('args = {}'.format(str(args)))\n\n model = None\n if args.seq_train_type == \"multitask\":\n model = train(list(range(len(args.tasks))), model)\n else:\n if args.unbound:\n TASK_DICT = lll_unbound_setting(split_size=args.unbound)\n for task_id in range(len(args.tasks)):\n model = train([task_id], model)\n" ]
[ [ "torch.ones", "torch.no_grad", "torch.load", "torch.nn.CrossEntropyLoss" ] ]
RTHMaK/git-squash-master
[ "76c4c8437dd18114968e69a698f4581927fcdabf" ]
[ "scikit-learn-weighted_kde/examples/svm/plot_separating_hyperplane_unbalanced.py" ]
[ "\"\"\"\n=================================================\nSVM: Separating hyperplane for unbalanced classes\n=================================================\n\nFind the optimal separating hyperplane using an SVC for classes that\nare unbalanced.\n\nWe first find the separating plane with a plain SVC and then plot\n(dashed) the separating hyperplane with automatically correction for\nunbalanced classes.\n\n.. currentmodule:: sklearn.linear_model\n\n.. note::\n\n This example will also work by replacing ``SVC(kernel=\"linear\")``\n with ``SGDClassifier(loss=\"hinge\")``. Setting the ``loss`` parameter\n of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour\n such as that of a SVC with a linear kernel.\n\n For example try instead of the ``SVC``::\n\n clf = SGDClassifier(n_iter=100, alpha=0.01)\n\n\"\"\"\nprint(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import svm\n#from sklearn.linear_model import SGDClassifier\n\n# we create 40 separable points\nrng = np.random.RandomState(0)\nn_samples_1 = 1000\nn_samples_2 = 100\nX = np.r_[1.5 * rng.randn(n_samples_1, 2),\n 0.5 * rng.randn(n_samples_2, 2) + [2, 2]]\ny = [0] * (n_samples_1) + [1] * (n_samples_2)\n\n# fit the model and get the separating hyperplane\nclf = svm.SVC(kernel='linear', C=1.0)\nclf.fit(X, y)\n\nw = clf.coef_[0]\na = -w[0] / w[1]\nxx = np.linspace(-5, 5)\nyy = a * xx - clf.intercept_[0] / w[1]\n\n\n# get the separating hyperplane using weighted classes\nwclf = svm.SVC(kernel='linear', class_weight={1: 10})\nwclf.fit(X, y)\n\nww = wclf.coef_[0]\nwa = -ww[0] / ww[1]\nwyy = wa * xx - wclf.intercept_[0] / ww[1]\n\n# plot separating hyperplanes and samples\nh0 = plt.plot(xx, yy, 'k-', label='no weights')\nh1 = plt.plot(xx, wyy, 'k--', label='with weights')\nplt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)\nplt.legend()\n\nplt.axis('tight')\nplt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "sklearn.svm.SVC", "matplotlib.pyplot.axis", "numpy.random.RandomState", "matplotlib.pyplot.show", "matplotlib.pyplot.plot", "numpy.linspace", "matplotlib.pyplot.scatter" ] ]
josehoras/Advanced-Lane-Finding
[ "e6b83d602eb89661d3bf0f4d257ed5af0f6a58bb" ]
[ "video_pipeline.py" ]
[ "import numpy as np\nimport pickle\nimport cv2\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom moviepy.editor import VideoFileClip\nfrom image_thresholding import *\nfrom plotting_helpers import *\nfrom line_fit import *\nfrom Line import *\n\n\n# *** PIPELINE ***\ndef pipeline(img):\n global error_im, skipped_frames\n\n # 1. Correct distorsion\n # open distorsion matrix\n try:\n saved_dist = pickle.load(open('calibrate_camera.p', 'rb'), encoding='latin1')\n mtx = saved_dist['mtx']\n dist = saved_dist['dist']\n except (OSError, IOError): # No progress file yet available\n print(\"No saved distorsion data. Run camera_calibration.py\")\n # apply correction\n undist = cv2.undistort(img, mtx, dist, None, mtx)\n\n # 2. Apply filters to get binary map\n ksize = 3\n gradx = abs_sobel_thresh(undist, orient='x', sobel_kernel=ksize, thresh=(10, 100))\n grady = abs_sobel_thresh(undist, orient='y', sobel_kernel=ksize, thresh=(5, 100))\n mag_bin = mag_thresh(undist, sobel_kernel=ksize, mag_thresh=(10, 200))\n dir_bin = dir_threshold(undist, sobel_kernel=15, thresh=(0.9, 1.2))\n hls_bin = hls_select(img, thresh=(50, 255))\n white_bin = white_select(img, thresh=195)\n yellow_bin = yellow_select(img)\n # combine filters to a final output\n combined = np.zeros_like(dir_bin)\n combined[((mag_bin == 1) & (dir_bin == 1) & (hls_bin == 1)) |\n ((white_bin == 1) | (yellow_bin == 1))] = 1\n\n # 3. Define trapezoid points on the road and transform perspective\n X = combined.shape[1]\n Y = combined.shape[0]\n src = np.float32(\n [[205, 720],\n [1075, 720],\n [700, 460],\n [580, 460]])\n dst = np.float32(\n [[300, 720],\n [980, 720],\n [980, 0],\n [300, 0]])\n # get perspective transformation matrix\n M = cv2.getPerspectiveTransform(src, dst)\n Minv = cv2.getPerspectiveTransform(dst, src)\n # warp the result of binary thresholds\n warped = cv2.warpPerspective(combined, M, (X,Y), flags=cv2.INTER_LINEAR)\n\n # 4. Get polinomial fit of lines\n # if > 4 frames skipped (or first frame, as skipped_frames is initialized to 100) do full search\n if skipped_frames > 5:\n fit_method = \"Boxes\"\n leftx, lefty, rightx, righty, out_img = find_lane_pixels(warped)\n else:\n fit_method = \"Around fit\"\n leftx, lefty, rightx, righty, out_img = find_lane_around_fit(warped, left_lane.fit_x, right_lane.fit_x)\n\n # fit polynomials and sanity check\n try:\n left_fit, right_fit, left_px, right_px, ploty = fit(leftx, lefty, rightx, righty, warped.shape[0])\n detected, err_msg = sanity_chk(ploty, left_px, right_px)\n except:\n detected, err_msg = False, \"Empty data\"\n\n if detected: skipped_frames = 0\n else: skipped_frames += 1\n\n # 5. Calculate distance to center, curvature, and update Line objects\n if detected or (fit_method == \"Boxes\" and err_msg != \"Empty data\"):\n left_curv, right_curv = find_curv(ploty, left_fit, right_fit)\n left_lane.update(ploty, left_fit, left_px, left_curv)\n right_lane.update(ploty, right_fit, right_px, right_curv)\n lane_w = (right_lane.base_pos - left_lane.base_pos) * 3.7/700\n offset = (((right_lane.base_pos + left_lane.base_pos) - img.shape[1]) / 2) * 3.7/700\n\n # 6. Plot fitted lanes into original image\n # Create an image to draw the lines on\n warp_zero = np.zeros_like(warped).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n\n # Recast the x and y points into usable format for cv2.fillPoly()\n pts_left = np.array([np.transpose(np.vstack([left_lane.fit_x, left_lane.fit_y]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_lane.fit_x, right_lane.fit_y])))])\n pts = np.hstack((pts_left, pts_right))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))\n\n # Warp the blank back to original image space using inverse perspective matrix (Minv)\n newwarp = cv2.warpPerspective(color_warp, Minv, (img.shape[1], img.shape[0]))\n\n # Combine the result with the original image\n result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)\n\n # if error save original img to check closely in image pipeline\n if 1 < skipped_frames < 3:\n mpimg.imsave(err_msg + \"_\" + str(error_im) + \".jpg\", img)\n error_im += 1\n\n # Add text\n road_curv = (left_lane.curv_avg + right_lane.curv_avg) // 2\n if road_curv > 2000:\n road_curv_text = \"Road curvature: straight\"\n else:\n road_curv_text = \"Road curvature: \" + str(road_curv) + \"m\"\n side = {True: \"left\", False: \"right\"}\n offset_txt = \"Car is {0:.2f}m {1:s} of center\".format(offset, side[offset > 0])\n\n for i, txt in enumerate([road_curv_text, offset_txt]):\n cv2.putText(result, txt, (75, 75 * (i+1)), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 3)\n\n # Uncomment for debugging messages\n # lane_width_txt = \"Lane width: %.2f m\" % lane_w\n # for i, obj, txt in [(1, left_lane, \"Left\"), (2, right_lane, \"Right\")]:\n # if obj.curv_avg > 2000:\n # curv_txt = txt + \" curvature: straight\"\n # else:\n # curv_txt = txt + \" curvature: \" + str(int(obj.curv_avg)) + \"m\"\n # cv2.putText(result,curv_txt, (550, 50 * i), cv2.FONT_HERSHEY_SIMPLEX, 1, 0, 2)\n # cv2.putText(result, \"Skipped frames: \" + str(skipped_frames), (550,150), cv2.FONT_HERSHEY_SIMPLEX, 1, 0, 2)\n # cv2.putText(result, fit_method, (550, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, 0, 2)\n # if err_msg != \"\":\n # cv2.putText(result, \"Error!: \" + err_msg, (550, 250), cv2.FONT_HERSHEY_SIMPLEX, 1, 0, 2)\n\n return result\n\n\n# *** MAIN ***\n# define global variables to use in the pipeline\nleft_lane = Line()\nright_lane = Line()\nerror_im = 1\nskipped_frames = 100\n# load video\nclip_name = \"challenge_video\"\nclip1 = VideoFileClip(clip_name + \".mp4\")#.subclip(0, 8)\n# run video through the pipeline and save output\nout_clip = clip1.fl_image(pipeline)\nout_clip.write_videofile(\"output_videos/\" + clip_name + \"_output.mp4\", audio=False)\n" ]
[ [ "numpy.vstack", "numpy.zeros_like", "numpy.float32", "numpy.int_", "numpy.dstack", "numpy.hstack" ] ]
EdwardFerdian/4DFlowNet
[ "e9c8bf72660b41ef5c7b6c677a71283ead32bbab" ]
[ "src/Network/SR4DFlowNet.py" ]
[ "import tensorflow as tf\n\nclass SR4DFlowNet():\n def __init__(self, res_increase):\n self.res_increase = res_increase\n\n def build_network(self, u, v, w, u_mag, v_mag, w_mag, low_resblock=8, hi_resblock=4, channel_nr=64):\n channel_nr = 64\n\n speed = (u ** 2 + v ** 2 + w ** 2) ** 0.5\n mag = (u_mag ** 2 + v_mag ** 2 + w_mag ** 2) ** 0.5\n pcmr = mag * speed\n\n phase = tf.keras.layers.concatenate([u,v,w])\n pc = tf.keras.layers.concatenate([pcmr, mag, speed])\n \n pc = conv3d(pc,3,channel_nr, 'SYMMETRIC', 'relu')\n pc = conv3d(pc,3,channel_nr, 'SYMMETRIC', 'relu')\n\n phase = conv3d(phase,3,channel_nr, 'SYMMETRIC', 'relu')\n phase = conv3d(phase,3,channel_nr, 'SYMMETRIC', 'relu')\n\n concat_layer = tf.keras.layers.concatenate([phase, pc])\n concat_layer = conv3d(concat_layer, 1, channel_nr, 'SYMMETRIC', 'relu')\n concat_layer = conv3d(concat_layer, 3, channel_nr, 'SYMMETRIC', 'relu')\n \n # res blocks\n rb = concat_layer\n for i in range(low_resblock):\n rb = resnet_block(rb, \"ResBlock\", channel_nr, pad='SYMMETRIC')\n\n rb = upsample3d(rb, self.res_increase)\n \n # refinement in HR\n for i in range(hi_resblock):\n rb = resnet_block(rb, \"ResBlock\", channel_nr, pad='SYMMETRIC')\n\n # 3 separate path version\n u_path = conv3d(rb, 3, channel_nr, 'SYMMETRIC', 'relu')\n u_path = conv3d(u_path, 3, 1, 'SYMMETRIC', None)\n\n v_path = conv3d(rb, 3, channel_nr, 'SYMMETRIC', 'relu')\n v_path = conv3d(v_path, 3, 1, 'SYMMETRIC', None)\n\n w_path = conv3d(rb, 3, channel_nr, 'SYMMETRIC', 'relu')\n w_path = conv3d(w_path, 3, 1, 'SYMMETRIC', None)\n \n\n b_out = tf.keras.layers.concatenate([u_path, v_path, w_path])\n\n return b_out\n\ndef upsample3d(input_tensor, res_increase):\n \"\"\"\n Resize the image by linearly interpolating the input\n using TF '``'resize_bilinear' function.\n\n :param input_tensor: 2D/3D image tensor, with shape:\n 'batch, X, Y, Z, Channels'\n :return: interpolated volume\n\n Original source: https://niftynet.readthedocs.io/en/dev/_modules/niftynet/layer/linear_resize.html\n \"\"\"\n \n # We need this option for the bilinear resize to prevent shifting bug\n align = True \n\n b_size, x_size, y_size, z_size, c_size = input_tensor.shape\n\n x_size_new, y_size_new, z_size_new = x_size * res_increase, y_size * res_increase, z_size * res_increase\n\n if res_increase == 1:\n # already in the target shape\n return input_tensor\n\n # resize y-z\n squeeze_b_x = tf.reshape(input_tensor, [-1, y_size, z_size, c_size], name='reshape_bx')\n resize_b_x = tf.compat.v1.image.resize_bilinear(squeeze_b_x, [y_size_new, z_size_new], align_corners=align)\n resume_b_x = tf.reshape(resize_b_x, [-1, x_size, y_size_new, z_size_new, c_size], name='resume_bx')\n\n # Reorient\n reoriented = tf.transpose(resume_b_x, [0, 3, 2, 1, 4])\n \n # squeeze and 2d resize\n squeeze_b_z = tf.reshape(reoriented, [-1, y_size_new, x_size, c_size], name='reshape_bz')\n resize_b_z = tf.compat.v1.image.resize_bilinear(squeeze_b_z, [y_size_new, x_size_new], align_corners=align)\n resume_b_z = tf.reshape(resize_b_z, [-1, z_size_new, y_size_new, x_size_new, c_size], name='resume_bz')\n \n output_tensor = tf.transpose(resume_b_z, [0, 3, 2, 1, 4])\n return output_tensor\n\n\ndef conv3d(x, kernel_size, filters, padding='SYMMETRIC', activation=None, initialization=None, use_bias=True):\n \"\"\"\n Based on: https://github.com/gitlimlab/CycleGAN-Tensorflow/blob/master/ops.py\n For tf padding, refer to: https://www.tensorflow.org/api_docs/python/tf/pad\n\n \"\"\"\n reg_l2 = tf.keras.regularizers.l2(5e-7)\n\n if padding == 'SYMMETRIC' or padding == 'REFLECT':\n p = (kernel_size - 1) // 2\n x = tf.pad(x, [[0,0],[p,p],[p,p], [p,p],[0,0]], padding)\n x = tf.keras.layers.Conv3D(filters, kernel_size, activation=activation, kernel_initializer=initialization, use_bias=use_bias, kernel_regularizer=reg_l2)(x)\n else:\n assert padding in ['SAME', 'VALID']\n x = tf.keras.layers.Conv3D(filters, kernel_size, activation=activation, kernel_initializer=initialization, use_bias=use_bias, kernel_regularizer=reg_l2)(x)\n return x\n \n\ndef resnet_block(x, block_name='ResBlock', channel_nr=64, scale = 1, pad='SAME'):\n tmp = conv3d(x, kernel_size=3, filters=channel_nr, padding=pad, activation=None, use_bias=False, initialization=None)\n tmp = tf.keras.layers.LeakyReLU(alpha=0.2)(tmp)\n\n tmp = conv3d(tmp, kernel_size=3, filters=channel_nr, padding=pad, activation=None, use_bias=False, initialization=None)\n\n tmp = x + tmp * scale\n tmp = tf.keras.layers.LeakyReLU(alpha=0.2)(tmp)\n\n return tmp\n" ]
[ [ "tensorflow.pad", "tensorflow.reshape", "tensorflow.keras.layers.concatenate", "tensorflow.keras.regularizers.l2", "tensorflow.keras.layers.Conv3D", "tensorflow.keras.layers.LeakyReLU", "tensorflow.compat.v1.image.resize_bilinear", "tensorflow.transpose" ] ]
kingmoon3/xalpha
[ "dd877c6bce1b85a4facd38de9dc35a7bf0acf1c6" ]
[ "xalpha/universal.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nmodules for universal fetcher that gives historical daily data and realtime data\nfor almost everything in the market\n\"\"\"\n\nimport os\nimport sys\nimport time\nimport datetime as dt\nimport numpy as np\nimport pandas as pd\nimport logging\nimport inspect\nfrom bs4 import BeautifulSoup\nfrom functools import wraps, lru_cache\nfrom uuid import uuid4\nfrom sqlalchemy import exc\nfrom dateutil.relativedelta import relativedelta\n\ntry:\n from jqdatasdk import (\n get_index_weights,\n query,\n get_fundamentals,\n valuation,\n get_query_count,\n finance,\n get_index_stocks,\n macro,\n get_price,\n )\n\n # 本地导入\nexcept ImportError:\n try:\n from jqdata import finance, macro # 云平台导入\n except ImportError:\n pass\n\nfrom xalpha.info import basicinfo, fundinfo, mfundinfo, get_fund_holdings\nfrom xalpha.indicator import indicator\nfrom xalpha.cons import (\n rget,\n rpost,\n rget_json,\n rpost_json,\n tz_bj,\n last_onday,\n region_trans,\n today_obj,\n _float,\n)\nfrom xalpha.provider import data_source\nfrom xalpha.exceptions import DataPossiblyWrong, ParserFailure\n\npd.options.mode.chained_assignment = None # turn off setwith copy warning\nthismodule = sys.modules[__name__]\nxamodule = sys.modules[\"xalpha\"]\nlogger = logging.getLogger(__name__)\n\n\ndef tomorrow_ts():\n dto = dt.datetime.now() + dt.timedelta(1)\n return dto.timestamp()\n\n\ndef has_weekday(start, end):\n for d in pd.date_range(start, end):\n if d.weekday() < 5:\n return True\n return False\n\n\ndef ts2pdts(ts):\n dto = dt.datetime.fromtimestamp(ts / 1000, tz=tz_bj).replace(tzinfo=None)\n return dto.replace(\n hour=0, minute=0, second=0, microsecond=0\n ) # 雪球美股数据时间戳是美国0点,按北京时区换回时间后,把时分秒扔掉就重合了\n\n\ndef decouple_code(code):\n \"\"\"\n decompose SH600000.A into SH600000, after\n\n :param code:\n :return: Tuple\n \"\"\"\n if len(code[1:].split(\".\")) > 1: # .SPI in US stock!\n type_ = code.split(\".\")[-1]\n code = \".\".join(code.split(\".\")[:-1])\n if type_.startswith(\"b\") or type_.startswith(\"B\"):\n type_ = \"before\"\n elif type_.startswith(\"a\") or type_.startswith(\"A\"):\n type_ = \"after\"\n elif type_.startswith(\"n\") or type_.startswith(\"N\"):\n type_ = \"normal\"\n else:\n logger.warning(\n \"unrecoginzed flag for adjusted factor %s, use default\" % type_\n )\n type_ = \"before\"\n else:\n type_ = \"before\"\n return code, type_\n\n\ndef lru_cache_time(ttl=None, maxsize=None):\n \"\"\"\n TTL support on lru_cache\n\n :param ttl: float or int, seconds\n :param maxsize: int, maxsize for lru_cache\n :return:\n \"\"\"\n\n def wrapper(func):\n # Lazy function that makes sure the lru_cache() invalidate after X secs\n @lru_cache(maxsize)\n def time_aware(_ttl, *args, **kwargs):\n return func(*args, **kwargs)\n\n setattr(thismodule, func.__name__ + \"_ttl\", time_aware)\n\n @wraps(func)\n def newfunc(*args, **kwargs):\n ttl_hash = round(time.time() / ttl)\n f_ttl = getattr(thismodule, func.__name__ + \"_ttl\")\n return f_ttl(ttl_hash, *args, **kwargs)\n\n return newfunc\n\n return wrapper\n\n\n# TODO: 缓存 token 的合适时间尺度\n@lru_cache_time(ttl=300)\ndef get_token():\n \"\"\"\n 获取雪球的验权 token,匿名也可获取,而且似乎永远恒定(大时间范围内会改变)\n\n :return:\n \"\"\"\n r = rget(\"https://xueqiu.com\", headers={\"user-agent\": \"Mozilla\"})\n return r.cookies[\"xq_a_token\"]\n\n\ndef get_historical_fromxq(code, count, type_=\"before\", full=False):\n \"\"\"\n\n :param code:\n :param count:\n :param type_: str. normal, before, after\n :param full:\n :return:\n \"\"\"\n url = \"https://stock.xueqiu.com/v5/stock/chart/kline.json?symbol={code}&begin={tomorrow}&period=day&type={type_}&count=-{count}\"\n if full:\n url += \"&indicator=kline,pe,pb,ps,pcf,market_capital,agt,ggt,balance\"\n # pe 是 TTM 数据\n r = rget_json(\n url.format(\n code=code, tomorrow=int(tomorrow_ts() * 1000), count=count, type_=type_\n ),\n cookies={\"xq_a_token\": get_token()},\n headers={\"user-agent\": \"Mozilla/5.0\"},\n )\n df = pd.DataFrame(data=r[\"data\"][\"item\"], columns=r[\"data\"][\"column\"])\n df[\"date\"] = (df[\"timestamp\"]).apply(ts2pdts) # reset hours to zero\n return df\n\n\n@lru_cache()\ndef get_industry_fromxq(code):\n \"\"\"\n part of symbols has empty industry information\n\n :param code:\n :return: dict\n \"\"\"\n url = (\n \"https://xueqiu.com/stock/industry/stockList.json?code=%s&type=1&size=100\"\n % code\n )\n r = rget_json(url, cookies={\"xq_a_token\": get_token()})\n return r\n\n\ndef get_historical_fromcninvesting(curr_id, st_date, end_date, app=False):\n data = {\n \"curr_id\": curr_id,\n # \"smlID\": smlID, # ? but seems to be fixed with curr_id, it turns out it doesn't matter\n \"st_date\": st_date, # %Y/%m/%d\n \"end_date\": end_date,\n \"interval_sec\": \"Daily\",\n \"sort_col\": \"date\",\n \"sort_ord\": \"DESC\",\n \"action\": \"historical_data\",\n }\n if not app: # fetch from web api\n r = rpost(\n \"https://cn.investing.com/instruments/HistoricalDataAjax\",\n data=data,\n headers={\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4)\\\n AppleWebKit/537.36 (KHTML, like Gecko)\",\n \"Host\": \"cn.investing.com\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n },\n )\n else: # fetch from app api\n r = rpost(\n \"https://cnappapi.investing.com/instruments/HistoricalDataAjax\",\n data=data,\n headers={\n \"Accept\": \"*/*\",\n \"Accept-Encoding\": \"gzip\",\n \"Accept-Language\": \"zh-cn\",\n \"Cache-Control\": \"no-cache\",\n \"Connection\": \"keep-alive\",\n \"User-Agent\": \"Investing.China/0.0.3 CFNetwork/1121.2.2 Darwin/19.3.0\",\n \"ccode\": \"CN\",\n #'ccode_time': '1585551041.986028',\n \"x-app-ver\": \"117\",\n \"x-meta-ver\": \"14\",\n \"x-os\": \"ios\",\n \"x-uuid\": str(uuid4()),\n \"Host\": \"cn.investing.com\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n },\n )\n s = BeautifulSoup(r.text, \"lxml\")\n dfdict = {}\n cols = []\n for col in s.find_all(\"th\"):\n dfdict[str(col.contents[0])] = []\n cols.append(str(col.contents[0]))\n num_cols = len(cols)\n for i, td in enumerate(s.find_all(\"td\")[:-5]):\n if cols[i % num_cols] == \"日期\":\n dfdict[cols[i % num_cols]].append(\n dt.datetime.strptime(str(td.string), \"%Y年%m月%d日\")\n )\n else:\n dfdict[cols[i % num_cols]].append(str(td.string))\n return pd.DataFrame(dfdict)\n\n\ndef prettify(df):\n _map = {\n \"日期\": \"date\",\n \"收盘\": \"close\",\n \"开盘\": \"open\",\n \"高\": \"high\",\n \"低\": \"low\",\n \"涨跌幅\": \"percent\",\n \"交易量\": \"volume\",\n }\n df.rename(_map, axis=1, inplace=True)\n if len(df) > 1 and df.iloc[1][\"date\"] < df.iloc[0][\"date\"]:\n df = df[::-1]\n # df = df[[\"date\", \"open\", \"close\", \"high\", \"low\", \"percent\"]]\n df1 = df[[\"date\"]]\n for k in [\"open\", \"close\", \"high\", \"low\", \"volume\"]:\n if k in df.columns:\n df1[k] = df[k].apply(_float)\n df1[\"percent\"] = df[\"percent\"]\n return df1\n\n\ndef dstr2dobj(dstr):\n if len(dstr.split(\"/\")) > 1:\n d_obj = dt.datetime.strptime(dstr, \"%Y/%m/%d\")\n elif len(dstr.split(\".\")) > 1:\n d_obj = dt.datetime.strptime(dstr, \"%Y.%m.%d\")\n elif len(dstr.split(\"-\")) > 1:\n d_obj = dt.datetime.strptime(dstr, \"%Y-%m-%d\")\n else:\n d_obj = dt.datetime.strptime(dstr, \"%Y%m%d\")\n return d_obj\n\n\n@lru_cache(maxsize=1024)\ndef get_investing_id(suburl, app=False):\n if not app:\n url = \"https://cn.investing.com\"\n else:\n url = \"https://cnappapi.investing.com\"\n if not suburl.startswith(\"/\"):\n url += \"/\"\n url += suburl\n if not app:\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36\"\n }\n else:\n headers = {\n \"Accept\": \"*/*\",\n \"Accept-Encoding\": \"gzip\",\n \"Accept-Language\": \"zh-cn\",\n \"Cache-Control\": \"no-cache\",\n \"Connection\": \"keep-alive\",\n \"User-Agent\": \"Investing.China/0.0.3 CFNetwork/1121.2.2 Darwin/19.3.0\",\n \"ccode\": \"CN\",\n #'ccode_time': '1585551041.986028',\n \"x-app-ver\": \"117\",\n \"x-meta-ver\": \"14\",\n \"x-os\": \"ios\",\n \"x-uuid\": str(uuid4()),\n \"Host\": \"cn.investing.com\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n }\n r = rget(\n url,\n headers=headers,\n )\n s = BeautifulSoup(r.text, \"lxml\")\n pid = s.find(\"span\", id=\"last_last\")[\"class\"][-1].split(\"-\")[1]\n return pid\n\n\ndef _variate_ua():\n last = 20 + np.random.randint(20)\n ua = []\n ua.append(\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko)\"\n )\n ua.append(\n \"Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1\"\n )\n choice = np.random.randint(2)\n return ua[choice][:last]\n\n\n@lru_cache_time(ttl=120, maxsize=128)\ndef get_rmb(start=None, end=None, prev=360, currency=\"USD/CNY\"):\n \"\"\"\n 获取人民币汇率中间价, 该 API 官网数据源,稳定性很差\n\n :param start:\n :param end:\n :param prev:\n :param currency:\n :return: pd.DataFrame\n \"\"\"\n bl = [\"USD\", \"EUR\", \"100JPY\", \"HKD\", \"GBP\", \"AUD\", \"NZD\", \"SGD\", \"CHF\", \"CAD\"]\n al = [\n \"MYR\",\n \"RUB\",\n \"ZAR\",\n \"KRW\",\n \"AED\",\n \"SAR\",\n \"HUF\",\n \"PLN\",\n \"DKK\",\n \"SEK\",\n \"NOK\",\n \"TRY\",\n \"MXN\",\n \"THB\",\n ]\n is_inverse = False\n if (currency[:3] in al) or (currency[4:] in bl):\n is_inverse = True\n currency = currency[4:] + \"/\" + currency[:3]\n url = \"http://www.chinamoney.com.cn/ags/ms/cm-u-bk-ccpr/CcprHisNew?startDate={start_str}&endDate={end_str}&currency={currency}&pageNum=1&pageSize=300\"\n if not end:\n end_obj = today_obj()\n else:\n end_obj = dstr2dobj(end)\n if not start:\n start_obj = end_obj - dt.timedelta(prev)\n else:\n start_obj = dstr2dobj(start)\n start_str = start_obj.strftime(\"%Y-%m-%d\")\n end_str = end_obj.strftime(\"%Y-%m-%d\")\n count = (end_obj - start_obj).days + 1\n rl = []\n # API 很奇怪,需要经常变 UA 才好用\n\n headers = {\n \"Referer\": \"http://www.chinamoney.com.cn/chinese/bkccpr/\",\n \"Origin\": \"http://www.chinamoney.com.cn\",\n \"Host\": \"www.chinamoney.com.cn\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n }\n\n if count <= 360:\n headers.update({\"user-agent\": _variate_ua()})\n r = rpost_json(\n url.format(start_str=start_str, end_str=end_str, currency=currency),\n headers=headers,\n )\n rl.extend(r[\"records\"])\n else: # data more than 1 year cannot be fetched once due to API limitation\n sepo_obj = end_obj\n sepn_obj = sepo_obj - dt.timedelta(360)\n # sep0_obj = end_obj - dt.timedelta(361)\n while sepn_obj > start_obj: # [sepn sepo]\n headers.update({\"user-agent\": _variate_ua()})\n r = rpost_json(\n url.format(\n start_str=sepn_obj.strftime(\"%Y-%m-%d\"),\n end_str=sepo_obj.strftime(\"%Y-%m-%d\"),\n currency=currency,\n ),\n headers=headers,\n )\n rl.extend(r[\"records\"])\n\n sepo_obj = sepn_obj - dt.timedelta(1)\n sepn_obj = sepo_obj - dt.timedelta(360)\n headers.update({\"user-agent\": _variate_ua()})\n r = rpost_json(\n url.format(\n start_str=start_obj.strftime(\"%Y-%m-%d\"),\n end_str=sepo_obj.strftime(\"%Y-%m-%d\"),\n currency=currency,\n ),\n headers=headers,\n )\n rl.extend(r[\"records\"])\n data = {\"date\": [], \"close\": []}\n for d in rl:\n data[\"date\"].append(pd.Timestamp(d[\"date\"]))\n data[\"close\"].append(d[\"values\"][0])\n df = pd.DataFrame(data)\n df = df[::-1]\n df[\"close\"] = pd.to_numeric(df[\"close\"])\n if is_inverse:\n df[\"close\"] = 1 / df[\"close\"]\n return df\n\n\ndef get_fund(code):\n # 随意设置非空 path,防止嵌套缓存到 fundinfo\n if code[0] == \"F\":\n if code.startswith(\"F96\"):\n return get_historical_from_ttjj_oversea(code)\n else:\n df = fundinfo(code[1:], path=\"nobackend\", priceonly=True).price\n elif code[0] == \"T\":\n df = fundinfo(code[1:], path=\"nobackend\", priceonly=True).price\n df[\"netvalue\"] = df[\"totvalue\"]\n elif code[0] == \"M\":\n df = mfundinfo(code[1:], path=\"nobackend\").price\n else:\n raise ParserFailure(\"Unknown fund code %s\" % code)\n df[\"close\"] = df[\"netvalue\"]\n return df[[\"date\", \"close\"]]\n\n\ndef get_historical_from_ttjj_oversea(code, start=None, end=None):\n if code.startswith(\"F\"):\n code = code[1:]\n pagesize = (\n dt.datetime.strptime(end, \"%Y%m%d\") - dt.datetime.strptime(start, \"%Y%m%d\")\n ).days + 1\n r = rget_json(\n \"http://overseas.1234567.com.cn/overseasapi/OpenApiHander.ashx?api=HKFDApi&m=MethodJZ&hkfcode={hkfcode}&action=2&pageindex=0&pagesize={pagesize}&date1={startdash}&date2={enddash}&callback=\".format(\n hkfcode=get_hkfcode(code),\n pagesize=pagesize,\n startdash=start[:4] + \"-\" + start[4:6] + \"-\" + start[6:],\n enddash=end[:4] + \"-\" + end[4:6] + \"-\" + end[6:],\n )\n )\n datalist = {\"date\": [], \"close\": []}\n for dd in r[\"Data\"]:\n datalist[\"date\"].append(pd.to_datetime(dd[\"PDATE\"]))\n datalist[\"close\"].append(dd[\"NAV\"])\n df = pd.DataFrame(datalist)\n df = df[df[\"date\"] <= end]\n df = df[df[\"date\"] >= start]\n df = df.sort_values(\"date\", ascending=True)\n return df\n\n\ndef get_portfolio_fromttjj(code, start=None, end=None):\n startobj = dt.datetime.strptime(start, \"%Y%m%d\")\n endobj = dt.datetime.strptime(end, \"%Y%m%d\")\n if (endobj - startobj).days < 90:\n return None # note start is always 1.1 4.1 7.1 10.1 in incremental updates\n if code.startswith(\"F\"):\n code = code[1:]\n r = rget(\"http://fundf10.eastmoney.com/zcpz_{code}.html\".format(code=code))\n s = BeautifulSoup(r.text, \"lxml\")\n table = s.find(\"table\", class_=\"tzxq\")\n df = pd.read_html(str(table))[0]\n df[\"date\"] = pd.to_datetime(df[\"报告期\"])\n df[\"stock_ratio\"] = df[\"股票占净比\"].replace(\"---\", \"0%\").apply(lambda s: _float(s[:-1]))\n df[\"bond_ratio\"] = df[\"债券占净比\"].replace(\"---\", \"0%\").apply(lambda s: _float(s[:-1]))\n df[\"cash_ratio\"] = df[\"现金占净比\"].replace(\"---\", \"0%\").apply(lambda s: _float(s[:-1]))\n # df[\"dr_ratio\"] = df[\"存托凭证占净比\"].replace(\"---\", \"0%\").apply(lambda s: xa.cons._float(s[:-1]))\n df[\"assets\"] = df[\"净资产(亿元)\"]\n df = df[::-1]\n return df[[\"date\", \"stock_ratio\", \"bond_ratio\", \"cash_ratio\", \"assets\"]]\n\n\n# this is the most elegant approach to dispatch get_daily, the definition can be such simple\n# you actually don't need to bother on start end blah, everything is taken care of by ``cahcedio``\n@data_source(\"jq\")\ndef get_fundshare_byjq(code, **kws):\n code = _inverse_convert_code(code)\n df = finance.run_query(\n query(finance.FUND_SHARE_DAILY)\n .filter(finance.FUND_SHARE_DAILY.code == code)\n .filter(finance.FUND_SHARE_DAILY.date >= kws[\"start\"])\n .filter(finance.FUND_SHARE_DAILY.date <= kws[\"end\"])\n .order_by(finance.FUND_SHARE_DAILY.date)\n )\n df[\"date\"] = pd.to_datetime(df[\"date\"])\n df = df[[\"date\", \"shares\"]]\n return df\n\n\n@lru_cache(maxsize=1024)\ndef get_futu_id(code):\n r = rget(\"https://www.futunn.com/stock/{code}\".format(code=code))\n sind = r.text.find(\"securityId\")\n futuid = r.text[sind : sind + 30].split(\"=\")[1].split(\";\")[0].strip(\" \").strip(\"'\")\n sind = r.text.find(\"marketType\")\n market = r.text[sind : sind + 30].split(\"=\")[1].split(\";\")[0].strip().strip(\"''\")\n return futuid, market\n\n\ndef get_futu_historical(code, start=None, end=None):\n fid, market = get_futu_id(code)\n r = rget(\n \"https://www.futunn.com/new-quote/kline?security_id={fid}&type=2&market_type={market}\".format(\n fid=fid, market=market\n )\n )\n df = pd.DataFrame(r.json()[\"data\"][\"list\"])\n df[\"date\"] = df[\"k\"].map(\n lambda s: dt.datetime.fromtimestamp(s)\n .replace(hour=0, minute=0, second=0, microsecond=0)\n .replace(tzinfo=None)\n )\n df[\"open\"] = df[\"o\"] / 1000\n df[\"close\"] = df[\"c\"] / 1000\n df[\"high\"] = df[\"h\"] / 1000\n df[\"low\"] = df[\"l\"] / 1000\n df[\"volume\"] = df[\"v\"]\n df = df.drop([\"k\", \"t\", \"o\", \"c\", \"h\", \"l\", \"v\"], axis=1)\n return df\n\n\ndef get_historical_fromsp(code, start=None, end=None, region=\"us\", **kws):\n \"\"\"\n 标普官网数据源\n\n :param code:\n :param start:\n :param end:\n :param kws:\n :return:\n \"\"\"\n\n if code.startswith(\"SP\"):\n code = code[2:]\n if len(code.split(\".\")) > 1:\n col = code.split(\".\")[1]\n code = code.split(\".\")[0]\n else:\n col = \"1\"\n start_obj = dt.datetime.strptime(start, \"%Y%m%d\")\n fromnow = (today_obj() - start_obj).days\n if fromnow < 300:\n flag = \"one\"\n elif fromnow < 1000:\n flag = \"three\"\n else:\n flag = \"ten\"\n url = \"https://{region}.spindices.com/idsexport/file.xls?\\\nselectedModule=PerformanceGraphView&selectedSubModule=Graph\\\n&yearFlag={flag}YearFlag&indexId={code}\".format(\n region=region, flag=flag, code=code\n )\n r = rget(\n url,\n headers={\n \"sec-fetch-dest\": \"document\",\n \"sec-fetch-mode\": \"navigate\",\n \"sec-fetch-site\": \"same-origin\",\n \"sec-fetch-user\": \"?1\",\n \"upgrade-insecure-requests\": \"1\",\n },\n )\n df = pd.read_excel(r.content, engine=\"xlrd\")\n # print(df.iloc[:10])\n df = df.iloc[6:]\n df = df.dropna()\n df[\"close\"] = df[\"Unnamed: \" + col]\n df[\"date\"] = pd.to_datetime(df[\"Unnamed: 0\"])\n df = df[[\"date\", \"close\"]]\n return df\n\n\ndef get_historical_frombb(code, start=None, end=None, **kws):\n \"\"\"\n https://www.bloomberg.com/ 数据源, 试验性支持。\n 似乎有很严格的 IP 封禁措施, 且最新数据更新滞后,且国内会被 reset,似乎难以支持 T-1 净值预测。强烈建议从英为或雅虎能找到的标的,不要用彭博源,该 API 只能作为 last resort。\n\n :param code:\n :param start:\n :param end:\n :param kws:\n :return:\n \"\"\"\n if code.startswith(\"BB-\"):\n code = code[3:]\n # end_obj = dt.datetime.strptime(end, \"%Y%m%d\")\n start_obj = dt.datetime.strptime(start, \"%Y%m%d\")\n fromnow = (today_obj() - start_obj).days\n if fromnow < 20:\n years = \"1_MONTH\"\n elif fromnow < 300:\n years = \"1_YEAR\"\n else:\n years = \"5_YEAR\"\n url = \"https://www.bloomberg.com/markets2/api/history/{code}/PX_LAST?\\\ntimeframe={years}&period=daily&volumePeriod=daily\".format(\n years=years, code=code\n )\n r = rget_json(\n url,\n headers={\n \"user-agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko)\",\n \"referer\": \"https://www.bloomberg.com/quote/{code}\".format(code=code),\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-origin\",\n \"accept\": \"*/*\",\n },\n )\n df = pd.DataFrame(r[0][\"price\"])\n df[\"close\"] = df[\"value\"]\n df[\"date\"] = pd.to_datetime(df[\"dateTime\"])\n df = df[[\"date\", \"close\"]]\n return df\n\n\ndef get_historical_fromft(code, start, end, _type=\"indices\"):\n \"\"\"\n finance times 数据\n\n :param code:\n :param start:\n :param end:\n :return:\n \"\"\"\n if not code.isdigit():\n code = get_ft_id(code, _type=_type)\n start = start.replace(\"/\", \"\").replace(\"-\", \"\")\n end = end.replace(\"/\", \"\").replace(\"-\", \"\")\n start = start[:4] + \"/\" + start[4:6] + \"/\" + start[6:]\n end = end[:4] + \"/\" + end[4:6] + \"/\" + end[6:]\n url = \"https://markets.ft.com/data/equities/ajax/\\\nget-historical-prices?startDate={start}&endDate={end}&symbol={code}\".format(\n code=code, start=start, end=end\n )\n r = rget_json(url, headers={\"user-agent\": \"Mozilla/5.0\"})\n b = BeautifulSoup(r[\"html\"], \"lxml\")\n data = {\"date\": [], \"open\": [], \"close\": [], \"high\": [], \"low\": []}\n for i, td in enumerate(b.findAll(\"td\")):\n if i % 6 == 0:\n s = td.find(\"span\").string.split(\",\")[1:]\n s = \",\".join(s)\n data[\"date\"].append(dt.datetime.strptime(s, \" %B %d, %Y\"))\n elif i % 6 == 1:\n data[\"open\"].append(_float(td.string))\n elif i % 6 == 2:\n data[\"high\"].append(_float(td.string))\n elif i % 6 == 3:\n data[\"low\"].append(_float(td.string))\n elif i % 6 == 4:\n data[\"close\"].append(_float(td.string))\n df = pd.DataFrame(data)\n df = df.iloc[::-1]\n return df\n\n\ndef get_historical_fromyh(code, start=None, end=None):\n \"\"\"\n 雅虎财经数据源,支持数据丰富,不限于美股。但存在部分历史数据缺失 NAN 或者周末进入交易日的现象,可能数据需要进一步清洗和处理。\n\n :param code:\n :param start:\n :param end:\n :return:\n \"\"\"\n if code.startswith(\"YH-\"):\n code = code[3:]\n start_obj = dt.datetime.strptime(start, \"%Y%m%d\")\n fromnow = (today_obj() - start_obj).days\n if fromnow < 20:\n range_ = \"1mo\"\n elif fromnow < 50:\n range_ = \"3mo\"\n elif fromnow < 150:\n range_ = \"6mo\"\n elif fromnow < 300:\n range_ = \"1y\"\n elif fromnow < 600:\n range_ = \"2y\"\n elif fromnow < 1500:\n range_ = \"5y\"\n else:\n range_ = \"10y\"\n url = \"https://query1.finance.yahoo.com/v8\\\n/finance/chart/{code}?region=US&lang=en-US&includePrePost=false\\\n&interval=1d&range={range_}&corsDomain=finance.yahoo.com&.tsrc=finance\".format(\n code=code, range_=range_\n )\n # 该 API 似乎也支持起止时间选择参数,period1=1427500800&period2=1585353600\n # 也可直接从历史数据页面爬取: https://finance.yahoo.com/quote/CSGOLD.SW/history?period1=1427500800&period2=1585353600&interval=1d&filter=history&frequency=1d\n r = rget_json(url)\n data = {}\n datel = []\n for t in r[\"chart\"][\"result\"][0][\"timestamp\"]:\n t = dt.datetime.fromtimestamp(t)\n if t.second != 0:\n t -= dt.timedelta(hours=8)\n datel.append(t.replace(tzinfo=None, hour=0, minute=0, second=0, microsecond=0))\n\n data[\"date\"] = datel\n for k in [\"close\", \"open\", \"high\", \"low\"]:\n data[k] = r[\"chart\"][\"result\"][0][\"indicators\"][\"quote\"][0][k]\n df = pd.DataFrame(data)\n return df\n\n\ndef get_historical_fromzzindex(code, start, end=None):\n \"\"\"\n 中证指数源\n\n :param code:\n :param start:\n :param end:\n :return:\n \"\"\"\n if code.startswith(\"ZZ\"):\n code = code[2:]\n start_obj = dt.datetime.strptime(start, \"%Y%m%d\")\n fromnow = (today_obj() - start_obj).days\n if fromnow < 20:\n flag = \"1%E4%B8%AA%E6%9C%88\"\n elif fromnow < 60:\n flag = \"3%E4%B8%AA%E6%9C%88\" # 个月\n elif fromnow < 200:\n flag = \"1%E5%B9%B4\" # 年\n else:\n flag = \"5%E5%B9%B4\"\n r = rget_json(\n \"http://www.csindex.com.cn/zh-CN/indices/index-detail/\\\n{code}?earnings_performance={flag}&data_type=json\".format(\n code=code, flag=flag\n ),\n headers={\n \"Host\": \"www.csindex.com.cn\",\n \"Referer\": \"http://www.csindex.com.cn/zh-CN/indices/index-detail/{code}\".format(\n code=code\n ),\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Accept\": \"application/json, text/javascript, */*; q=0.01\",\n },\n )\n df = pd.DataFrame(r)\n df[\"date\"] = pd.to_datetime(df[\"tradedate\"])\n df[\"close\"] = df[\"tclose\"].apply(_float)\n return df[[\"date\", \"close\"]]\n\n\ndef get_historical_fromgzindex(code, start, end):\n \"\"\"\n 国证指数源\n\n :param code:\n :param start:\n :param end:\n :return:\n \"\"\"\n if code.startswith(\"GZ\"):\n code = code[2:]\n start = start[:4] + \"-\" + start[4:6] + \"-\" + start[6:]\n end = end[:4] + \"-\" + end[4:6] + \"-\" + end[6:]\n params = {\n \"indexCode\": code,\n \"startDate\": start,\n \"endDate\": end,\n \"frequency\": \"Day\",\n }\n\n r = rget_json(\n \"http://hq.cnindex.com.cn/market/market/getIndexDailyDataWithDataFormat\",\n params=params,\n )\n df = pd.DataFrame(r[\"data\"][\"data\"], columns=r[\"data\"][\"item\"])\n\n df[\"date\"] = pd.to_datetime(df[\"timestamp\"])\n df = df[[\"date\", \"close\", \"open\", \"low\", \"high\", \"percent\", \"amount\", \"volume\"]]\n # TODO: 是否有这些列不全的国证指数?\n df = df[::-1]\n return df\n\n\ndef get_historical_fromhzindex(code, start, end):\n \"\"\"\n 华证指数源\n\n :param code:\n :param start:\n :param end:\n :return:\n \"\"\"\n if code.startswith(\"HZ\"):\n code = code[2:]\n\n r = rget_json(\n \"http://www.chindices.com/index/values.val?code={code}\".format(code=code)\n )\n df = pd.DataFrame(r[\"data\"])\n df[\"date\"] = pd.to_datetime(df[\"date\"])\n df = df[[\"date\", \"price\", \"pctChange\"]]\n df.rename(columns={\"price\": \"close\", \"pctChange\": \"percent\"}, inplace=True)\n df = df[::-1]\n return df\n\n\ndef get_historical_fromesunny(code, start=None, end=None):\n \"\"\"\n 易盛商品指数\n\n :param code: eg. ESCI000201\n :param start: just placeholder\n :param end: just placeholder\n :return:\n \"\"\"\n # code\n if code.startswith(\"ESCI\"):\n code = code[4:] + \".ESCI\"\n r = rget(\n \"http://www.esunny.com.cn/chartES/csv/shareday/day_易盛指数_{code}.es\".format(\n code=code\n )\n )\n data = []\n for l in r.text.split(\"\\n\"):\n row = [s.strip() for s in l.split(\"|\")] # 开 高 低 收 结\n if len(row) > 1:\n data.append(row[:7])\n df = pd.DataFrame(\n data, columns=[\"date\", \"open\", \"high\", \"low\", \"close\", \"settlement\", \"amount\"]\n )\n df[\"date\"] = pd.to_datetime(df[\"date\"])\n for c in [\"open\", \"high\", \"low\", \"close\", \"settlement\", \"amount\"]:\n df[c] = df[c].apply(_float)\n return df\n\n\ndef get_historical_fromycharts(code, start, end, category, metric):\n params = {\n \"securities\": \"include:true,id:{code},,\".format(code=code),\n \"calcs\": \"include:true,id:{metric},,\".format(metric=metric),\n \"startDate\": start, # %m/%d/%Y\n \"endDate\": end, # %m/%d/%Y\n \"zoom\": \"custom\",\n }\n r = rget_json(\n \"https://ycharts.com/charts/fund_data.json\",\n params=params,\n headers={\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4)\\\n AppleWebKit/537.36 (KHTML, like Gecko)\",\n \"Host\": \"ycharts.com\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Referer\": \"https://ycharts.com/{category}/{code}/chart/\".format(\n category=category, code=code\n ),\n \"Sec-Fetch-Mode\": \"cors\",\n \"Sec-Fetch-Site\": \"same-origin\",\n },\n )\n df = pd.DataFrame(\n data=r[\"chart_data\"][0][0][\"raw_data\"], columns=[\"timestamp\", \"close\"]\n )\n df[\"date\"] = (df[\"timestamp\"]).apply(ts2pdts)\n return df[[\"date\", \"close\"]]\n\n\n@lru_cache()\ndef get_bond_rates(rating, date=None):\n \"\"\"\n 获取各评级企业债的不同久期的预期利率\n\n :param rating: str. eg AAA, AA-, N for 中国国债\n :param date: %Y-%m-%d\n :return:\n \"\"\"\n rating = rating.strip()\n rating_uid = {\n \"N\": \"2c9081e50a2f9606010a3068cae70001\", # 国债\n \"AAA\": \"2c9081e50a2f9606010a309f4af50111\",\n \"AAA-\": \"8a8b2ca045e879bf014607ebef677f8e\",\n \"AA+\": \"2c908188138b62cd01139a2ee6b51e25\",\n \"AA\": \"2c90818812b319130112c279222836c3\",\n \"AA-\": \"8a8b2ca045e879bf014607f9982c7fc0\",\n \"A+\": \"2c9081e91b55cc84011be40946ca0925\",\n \"A\": \"2c9081e91e6a3313011e6d438a58000d\",\n \"A-\": \"8a8b2ca04142df6a014148ca880f3046\",\n \"A\": \"2c9081e91e6a3313011e6d438a58000d\",\n \"BBB+\": \"2c9081e91ea160e5011eab1f116c1a59\",\n \"BBB\": \"8a8b2ca0455847ac0145650780ad68fb\",\n \"BB\": \"8a8b2ca0455847ac0145650ba23b68ff\",\n \"B\": \"8a8b2ca0455847ac0145650c3d726901\",\n }\n # 上边字典不全,非常欢迎贡献 :)\n def _fetch(date):\n r = rpost(\n \"https://yield.chinabond.com.cn/cbweb-mn/yc/searchYc?\\\nxyzSelect=txy&&workTimes={date}&&dxbj=0&&qxll=0,&&yqqxN=N&&yqqxK=K&&\\\nycDefIds={uid}&&wrjxCBFlag=0&&locale=zh_CN\".format(\n uid=rating_uid.get(rating, rating), date=date\n ),\n )\n return r\n\n if not date:\n date = dt.datetime.today().strftime(\"%Y-%m-%d\")\n\n r = _fetch(date)\n while len(r.text.strip()) < 20: # 当天没有数据,非交易日\n date = last_onday(date).strftime(\"%Y-%m-%d\")\n r = _fetch(date)\n l = r.json()[0][\"seriesData\"]\n l = [t for t in l if t[1]]\n df = pd.DataFrame(l, columns=[\"year\", \"rate\"])\n return df\n\n\ndef get_bond_rates_range(rating, duration=3, freq=\"W-FRI\", start=None, end=None):\n l = []\n if rating.startswith(\"B-\"):\n rating = rating[2:]\n rs = rating.split(\".\")\n if len(rs) > 1:\n duration = float(rs[1])\n rating = rs[0]\n\n for d in pd.date_range(start, end, freq=freq):\n df = get_bond_rates(rating, d.strftime(\"%Y-%m-%d\"))\n l.append([d, df[df[\"year\"] <= duration].iloc[-1][\"rate\"]])\n return pd.DataFrame(l, columns=[\"date\", \"close\"])\n\n\n@data_source(\"jq\")\ndef get_macro(table, start, end, datecol=\"stat_year\"):\n df = macro.run_query(\n query(getattr(macro, table))\n .filter(getattr(getattr(macro, table), datecol) >= start)\n .filter(getattr(getattr(macro, table), datecol) <= end)\n .order_by(getattr(getattr(macro, table), datecol))\n )\n df[datecol] = pd.to_datetime(df[datecol])\n df[\"date\"] = df[datecol]\n return df\n\n\ndef set_handler(method=\"daily\", f=None):\n \"\"\"\n 为 ``get_daily``, ``get_bar`` 或 ``get_rt`` 设置 hook,优先按照函数 f 进行处理,若返回 None,再按一般情形处理\n\n :param method: str. daily, rt, bar\n :param f: func, default None.\n :return: None\n \"\"\"\n setattr(thismodule, \"get_\" + method + \"_handler\", f)\n\n\ndef _get_daily(\n code, start=None, end=None, prev=365, _from=None, wrapper=True, handler=True, **kws\n):\n \"\"\"\n universal fetcher for daily historical data of literally everything has a value in market.\n 数据来源包括但不限于天天基金,雪球,英为财情,外汇局官网,聚宽,标普官网,bloomberg,雅虎财经,ycharts等。\n\n :param code: str.\n\n 1. 对于沪深市场的股票,指数,ETF,LOF 场内基金,可转债和债券,直接使用其代码,主要开头需要包括 SH 或者 SZ。如果数字代码之后接 .A .B .N 分别代表后复权,前复权和不复权数据,不加后缀默认前复权。港股美股同理。\n\n 2. 对于香港市场的股票,指数,使用其数字代码,同时开头要添加 HK。\n\n 3. 对于美国市场的股票,指数,ETF 等,直接使用其字母缩写代码即可。\n\n 4. 对于人民币中间价数据,使用 \"USD/CNY\" 的形式,具体可能的值可在 http://www.chinamoney.com.cn/chinese/bkccpr/ 历史数据的横栏查询,注意日元需要用 100JPY/CNY.\n\n 5. 对于所有可以在 cn.investing.com 网站查到的金融产品,其代码可以是该网站对应的统一代码,或者是网址部分,比如 DAX 30 的概览页面为 https://cn.investing.com/indices/germany-30,那么对应代码即为 \"indices/germany-30\"。也可去网页 inspect 手动查找其内部代码(一般不需要自己做,推荐直接使用网页url作为 code 变量值),手动 inspect 加粗的实时价格,其对应的网页 span class 中的 pid 的数值即为内部代码。\n\n 6. 对于国内发行的基金,使用基金代码,同时开头添加 F。若想考虑分红使用累计净值,则开头添加 T。\n\n 7. 对于国内发行的货币基金,使用基金代码,同时开头添加 M。(全部按照净值数据处理)\n\n 8. 形如 peb-000807.XSHG 或 peb-SH000807 格式的数据,可以返回每周的指数估值情况,需要 enable 聚宽数据源方可查看。\n\n 9. 形如 iw-000807.XSHG 或 iw-SH000807 格式的数据,可以返回每月的指数成分股和实时权重,需要 enable 聚宽数据源方可查看。\n\n 10. 形如 fs-SH501018 格式的数据,可以返回指定场内基金每日份额,需要 enable 聚宽数据源方可查看。\n\n 11. 形如 SP5475707.2 格式的数据,可以返回标普官网相关指数的日线数据(最近十年),id 5475707 部分可以从相关指数 export 按钮获取的链接中得到,小数点后的部分代表保存的列数。参考链接:https://us.spindices.com/indices/equity/sp-global-oil-index. 若SPC开头,则从中国网站获取。\n\n 12. 形如 BB-FGERBIU:ID 格式的数据,对应网页 https://www.bloomberg.com/quote/FGERBIU:ID,可以返回彭博的数据(最近五年)\n\n 13. 形如 sw-801720 格式的数据,可以返回对应申万行业的历史数据情况,需要 enable 聚宽数据源方可查看。\n\n 14. 形如 teb-SH000300 格式的数据,返回每周指数盈利和净资产总值数据(单位:亿人民币元),需要 enbale 聚宽数据方可查看。\n\n 15. 形如 YH-CSGOLD.SW 格式的数据,返回雅虎财经标的日线数据(最近十年)。代码来自标的网页 url:https://finance.yahoo.com/quote/CSGOLD.SW。\n\n 16. 形如 FT-22065529 格式的数据或 FT-INX:IOM,可以返回 financial times 的数据,推荐直接用后者。前者数字代码来源,打开浏览器 network 监视,切换图标时间轴时,会新增到 https://markets.ft.com/data/chartapi/series 的 XHR 请求,其 request payload 里的 [elements][symbol] 即为该指数对应数字。\n\n 17. 形如 FTC-WTI+Crude+Oil 格式的数据,开头可以是 FTC, FTE, FTX, FTF, FTB, FTI 对应 ft.com 子栏目 commdities,equities,currencies,funds,bonds,indicies。其中 FTI 和 FT 相同。\n\n 18. 形如 mcy-MAC_AREA_UNEMPLOY 格式的数据,返回相应的宏观数据,需要聚宽数据源。mcy,mcq,mcm 代表年度,季度和月度的数据,code 为表名,可以参考 https://www.joinquant.com/help/api/help?name=macroData\n\n 19. 形如 ZZ000905,ZZH30533 的代码,代表中证官网的指数,ZZ 之后接指数代码,注意有些指数代码里可能包含 H,历史数据最大到近五年。\n\n 20. 形如 GZB30018, GZ399299 格式的数据,代表国证系列指数, GZ 之后接指数代码,代码可能包含更多字母。\n\n 21. 形如 ESCI000201 格式的数据,易盛商品指数系列,参考 http://www.esunny.com.cn/index.php?a=lists&catid=60。\n\n 22. 形如 pt-F100032 格式的数据,返回指定基金每季度股票债券和现金的持仓比例\n\n 23. 形如 yc-companies/DBP,yc-companies/DBP/price 格式的数据,返回ycharts股票、ETF数据,对应网页 https://ycharts.com/companies/DBP/price,最后部分为数据含义,默认price,可选:net_asset_value(仅ETF可用)、total_return_price、total_return_forward_adjusted_price、average_volume_30,历史数据限制五年内。\n\n 24. 形如 yc-indices/^SPGSCICO,yc-indices/^SPGSCICO/level 格式的数据,返回ycharts指数数据,对应网页 https://ycharts.com/indices/%5ESPGSCICO/level,最后部分为数据含义,默认level,可选:total_return_forward_adjusted_price,历史数据限制五年内。\n\n 25. 形如 HZ999001 HZ999005 格式的数据,代表了华证系列指数 http://www.chindices.com/indicator.html#\n\n 26. 形如 B-AA+.3 格式的数据,代表了 AA+ 企业债三年久期利率数据 (每周)\n\n 27. 形如 fu-00700.HK 或 fu-BA.US 格式的数据,代表了来自 https://www.futunn.com/stock/BA-US 的日线行情数据\n\n :param start: str. \"20200101\", \"2020/01/01\", \"2020-01-01\" are all legal. The starting date of daily data.\n :param end: str. format is the same as start. The ending date of daily data.\n :param prev: Optional[int], default 365. If start is not specified, start = end-prev.\n :param _from: Optional[str]. 一般用户不需设定该选项。can be one of \"xueqiu\", \"zjj\", \"investing\", \"tiantianjijin\". Only used for debug to\n enforce data source. For common use, _from can be chosed automatically based on code in the run time.\n :param wrapper: bool. 一般用户不需设定该选项。\n :param handler: bool. Default True. 若为 False,则 handler 钩子失效,用于钩子函数中的原函数嵌套调用。\n :return: pd.Dataframe.\n must include cols: date[pd.Timestamp],close[float64]。\n \"\"\"\n if handler:\n if getattr(thismodule, \"get_daily_handler\", None):\n args = inspect.getargvalues(inspect.currentframe())\n f = getattr(thismodule, \"get_daily_handler\")\n fr = f(**args.locals)\n if fr is not None:\n return fr\n\n if not end:\n end_obj = today_obj()\n else:\n end_obj = dstr2dobj(end)\n if not start:\n start_obj = end_obj - dt.timedelta(days=prev)\n else:\n start_obj = dstr2dobj(start)\n\n if not _from:\n if (code.startswith(\"SH\") or code.startswith(\"SZ\")) and code[2:8].isdigit():\n _from = \"xueqiu\"\n elif code.endswith(\"/CNY\") or code.startswith(\"CNY/\"):\n _from = \"zjj\"\n elif code.isdigit():\n _from = \"cninvesting\"\n elif code[0] in [\"F\", \"M\", \"T\"] and code[1:].isdigit():\n _from = \"ttjj\"\n elif code.startswith(\"HK\") and code[2:7].isdigit():\n _from = \"xueqiu\"\n code = code[2:]\n elif code.startswith(\"SP\") and code[2:].split(\".\")[0].isdigit():\n _from = \"SP\"\n elif code.startswith(\"SPC\") and code[3:].split(\".\")[0].isdigit():\n _from = \"SPC\"\n elif code.startswith(\"ZZ\") and code[4:].isdigit(): # 注意中证系列指数的代码里可能包含字母!\n _from = \"ZZ\"\n elif code.startswith(\"GZ\") and code[-3:].isdigit(): # 注意国证系列指数的代码里可能包含多个字母!\n _from = \"GZ\"\n elif code.startswith(\"HZ\") and code[2:].isdigit():\n _from = \"HZ\"\n elif code.startswith(\"ESCI\") and code[4:].isdigit():\n _from = \"ES\"\n elif code.startswith(\"yc-companies/\") or code.startswith(\"yc-indices/\"):\n _from = \"ycharts\"\n params = code.split(\"/\")\n code = params[1]\n category = params[0].split(\"-\")[1]\n if len(params) == 3:\n metric = params[2]\n else:\n if category == \"companies\":\n metric = \"price\"\n elif category == \"indices\":\n metric = \"level\"\n elif len(code.split(\"-\")) >= 2 and len(code.split(\"-\")[0]) <= 3:\n # peb-000807.XSHG\n _from = code.split(\"-\")[0]\n code = \"-\".join(code.split(\"-\")[1:])\n elif len(code[1:].split(\"/\")) == 2:\n _from = \"cninvesting\"\n code = get_investing_id(code)\n else:\n _from = \"xueqiu\" # 美股代码\n\n count = (today_obj() - start_obj).days + 1\n start_str = start_obj.strftime(\"%Y/%m/%d\")\n end_str = end_obj.strftime(\"%Y/%m/%d\")\n if _from in [\"cninvesting\", \"investing\", \"default\", \"IN\"]:\n df = get_historical_fromcninvesting(code, start_str, end_str)\n df = prettify(df)\n elif _from in [\"xueqiu\", \"xq\", \"snowball\", \"XQ\"]:\n code, type_ = decouple_code(code)\n\n df = get_historical_fromxq(code, count, type_=type_)\n df = prettify(df)\n elif _from in [\"zhongjianjia\", \"zjj\", \"chinamoney\", \"ZJJ\"]:\n df = get_rmb(start, end, prev, currency=code)\n elif _from in [\"ttjj\", \"tiantianjijin\", \"xalpha\", \"eastmoney\"]:\n if code.startswith(\"F96\"):\n df = get_historical_from_ttjj_oversea(code, start=start, end=end)\n else:\n df = get_fund(code)\n\n elif _from == \"peb\":\n if (\n code.startswith(\"SH000\")\n or code.startswith(\"SZ399\")\n or code.startswith(\"399\")\n or code.startswith(\"000\")\n ):\n df = _get_peb_range(code=code, start=start_str, end=end_str)\n elif code.startswith(\"F\"):\n df = get_fund_peb_range(code=code, start=start, end=end)\n else:\n df = get_stock_peb_range(code=code, start=start, end=end, wrapper=True)\n\n elif _from == \"iw\":\n df = _get_index_weight_range(code=code, start=start_str, end=end_str)\n\n elif _from == \"fs\":\n df = get_fundshare_byjq(code, start=start, end=end)\n\n elif _from == \"SP\":\n df = get_historical_fromsp(code, start=start, end=end)\n\n elif _from == \"SPC\":\n df = get_historical_fromsp(code[3:], start=start, end=end, region=\"chinese\")\n\n elif _from == \"BB\":\n df = get_historical_frombb(code, start=start, end=end)\n\n elif _from == \"ZZ\":\n df = get_historical_fromzzindex(code, start=start, end=end)\n\n elif _from == \"GZ\":\n df = get_historical_fromgzindex(code, start=start, end=end)\n\n elif _from == \"HZ\":\n df = get_historical_fromhzindex(code, start=start, end=end)\n\n elif _from == \"ES\":\n df = get_historical_fromesunny(code, start=start, end=end)\n\n elif _from == \"B\":\n df = get_bond_rates_range(code, start=start, end=end)\n\n elif _from == \"fu\":\n code = code.replace(\".\", \"-\")\n df = get_futu_historical(code, start=start, end=end)\n\n elif _from == \"ycharts\":\n df = get_historical_fromycharts(\n code,\n start=start_obj.strftime(\"%m/%d/%Y\"),\n end=end_obj.strftime(\"%m/%d/%Y\"),\n category=category,\n metric=metric,\n )\n\n elif _from == \"sw\":\n df = get_sw_from_jq(code, start=start, end=end)\n\n elif _from == \"teb\":\n df = get_teb_range(code, start=start, end=end)\n\n elif _from in [\"pt\", \"portfolio\"]:\n df = get_portfolio_fromttjj(code, start=start, end=end)\n\n elif _from == \"YH\":\n df = get_historical_fromyh(code, start=start, end=end)\n\n elif _from in [\"FT\", \"FTI\"]:\n df = get_historical_fromft(code, start=start, end=end)\n\n elif _from == \"FTE\":\n df = get_historical_fromft(code, start=start, end=end, _type=\"equities\")\n\n elif _from == \"FTB\":\n df = get_historical_fromft(code, start=start, end=end, _type=\"bonds\")\n\n elif _from == \"FTF\":\n df = get_historical_fromft(code, start=start, end=end, _type=\"funds\")\n\n elif _from == \"FTX\":\n df = get_historical_fromft(code, start=start, end=end, _type=\"currencies\")\n\n elif _from == \"FTC\":\n df = get_historical_fromft(code, start=start, end=end, _type=\"commodities\")\n\n elif _from == \"INA\": # investing app\n code = get_investing_id(code, app=True)\n df = get_historical_fromcninvesting(code, start_str, end_str, app=True)\n df = prettify(df)\n\n elif _from == \"mcy\":\n df = get_macro(code, start=start[:4], end=end[:4], datecol=\"stat_year\")\n\n elif _from == \"mcq\":\n df = get_macro(code, start=start, end=end, datecol=\"stat_quarter\")\n\n elif _from == \"mcm\":\n df = get_macro(code, start=start, end=end, datecol=\"stat_month\")\n\n elif _from == \"mcd\":\n df = get_macro(code, start=start, end=end, datecol=\"day\")\n\n else:\n raise ParserFailure(\"no such data source: %s\" % _from)\n\n if wrapper or len(df) == 0:\n return df\n else:\n df = df[df.date <= end_str]\n df = df[df.date >= start_str]\n return df\n\n\ndef get_xueqiu_rt(code, token=\"a664afb60c7036c7947578ac1a5860c4cfb6b3b5\"):\n if code.startswith(\"HK\") and code[2:].isdigit():\n code = code[2:]\n url = \"https://stock.xueqiu.com/v5/stock/quote.json?symbol={code}&extend=detail\"\n r = rget_json(\n url.format(code=code),\n cookies={\"xq_a_token\": token},\n headers={\"user-agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4)\"},\n )\n n = r[\"data\"][\"quote\"][\"name\"]\n q = r[\"data\"][\"quote\"][\"current\"]\n try:\n q = _float(q)\n except TypeError: # 针对雪球实时在9点后开盘前可能出现其他情形的fixup, 效果待 check\n # 现在的怀疑是在9am 到9:15 am, 雪球 API current 字段返回 Null\n q = _float(r[\"data\"][\"quote\"][\"last_close\"])\n q_ext = r[\"data\"][\"quote\"].get(\"current_ext\", None)\n percent = r[\"data\"][\"quote\"][\"percent\"]\n try:\n percent = _float(percent)\n except:\n pass\n currency = r[\"data\"][\"quote\"][\"currency\"]\n market = r[\"data\"][\"market\"][\"region\"]\n timestr = dt.datetime.fromtimestamp(r[\"data\"][\"quote\"][\"time\"] / 1000).strftime(\n \"%Y-%m-%d %H:%M:%S\"\n )\n if r[\"data\"][\"quote\"].get(\"timestamp_ext\", None):\n time_ext = dt.datetime.fromtimestamp(\n r[\"data\"][\"quote\"][\"timestamp_ext\"] / 1000\n ).strftime(\"%Y-%m-%d %H:%M:%S\")\n else:\n time_ext = None\n share = r[\"data\"][\"quote\"][\"total_shares\"]\n fshare = r[\"data\"][\"quote\"][\"float_shares\"]\n volume = r[\"data\"][\"quote\"][\"volume\"]\n return {\n \"name\": n,\n \"current\": q,\n \"percent\": percent,\n \"current_ext\": _float(q_ext) if q_ext else None,\n \"currency\": currency,\n \"market\": market, # HK, US, CN\n \"time\": timestr,\n \"time_ext\": time_ext,\n \"totshare\": share,\n \"floatshare\": fshare,\n \"volume\": volume,\n }\n\n\ndef get_cninvesting_rt(suburl, app=False):\n if not app:\n url = \"https://cn.investing.com\"\n else:\n url = \"https://cnappapi.investing.com\"\n if not suburl.startswith(\"/\"):\n url += \"/\"\n url += suburl\n if not app:\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36\"\n }\n else:\n headers = {\n \"Accept\": \"*/*\",\n \"Accept-Encoding\": \"gzip\",\n \"Accept-Language\": \"zh-cn\",\n \"Cache-Control\": \"no-cache\",\n \"Connection\": \"keep-alive\",\n \"User-Agent\": \"Investing.China/0.0.3 CFNetwork/1121.2.2 Darwin/19.3.0\",\n \"ccode\": \"CN\",\n #'ccode_time': '1585551041.986028',\n \"x-app-ver\": \"117\",\n \"x-meta-ver\": \"14\",\n \"x-os\": \"ios\",\n \"x-uuid\": str(uuid4()),\n \"Host\": \"cn.investing.com\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n }\n\n r = rget(\n url,\n headers=headers,\n )\n s = BeautifulSoup(r.text, \"lxml\")\n last_last = s.find(\"span\", id=\"last_last\")\n q = _float(last_last.string)\n name = s.find(\"h1\").string.strip()\n ind = 0\n timestr = s.select('span[class*=\"ClockBigIcon\"]+span')[0].text\n l = s.find(\"div\", class_=\"lighterGrayFont\").contents\n for i, c in enumerate(l):\n if isinstance(c, str) and c.strip() == \"货币\":\n ind = i\n break\n if ind == 0:\n currency = None\n else:\n currency = l[ind - 1].string\n percent = _float(\n s.find(\"span\", attrs={\"dir\": \"ltr\", \"class\": \"parentheses\"}).string[:-1]\n )\n panhou = s.find(\"div\", class_=\"afterHoursInfo\")\n if panhou:\n q_ext = _float(panhou.find(\"span\").string)\n else:\n q_ext = None\n market = None\n for span in s.findAll(\"span\", class_=\"elp\"):\n if span.find(\"a\") and span.find(\"a\")[\"href\"].startswith(\"/markets\"):\n market = span.string\n market = region_trans.get(market, market)\n time_ext = s.select(\"div[class~=lastUpdated]\")\n if time_ext:\n time_ext = time_ext[0].text.strip()\n else:\n time_ext = None\n d = {\n \"name\": name,\n \"current\": q,\n \"current_ext\": q_ext,\n \"time\": timestr,\n \"time_ext\": time_ext,\n \"currency\": currency,\n \"percent\": percent,\n \"market\": market,\n }\n\n if suburl.startswith(\"commodities\"): # 商品期货展期日\n try:\n d[\"rollover\"] = s.select(\"span[class*=float_lang_base_2]\")[10].string\n d[\"lastrollover\"] = s.select(\"span[class*=float_lang_base_2]\")[13].string\n except (ValueError, IndexError, AttributeError):\n logger.warning(\"%s cannot extract rollover date\" % suburl)\n # in case some commodities with strong page structure\n return d\n\n\ndef get_rt_from_sina(code):\n if (\n code.startswith(\"SH\") or code.startswith(\"SZ\") or code.startswith(\"HK\")\n ) and code[2:].isdigit():\n tinycode = code[:2].lower() + code[2:]\n if code.startswith(\"HK\"): # 港股额外要求实时\n tinycode = \"rt_\" + tinycode\n else: # 美股\n tinycode = \"gb_\"\n if code.startswith(\".\"):\n code = code[1:]\n tinycode += code.lower()\n r = rget(\"https://hq.sinajs.cn/list={tinycode}\".format(tinycode=tinycode))\n l = r.text.split(\"=\")[1].split(\",\")\n d = {}\n d[\"name\"] = l[0].strip('\"')\n if (\n code.startswith(\"SH\") or code.startswith(\"SZ\") or code.startswith(\"HK\")\n ) and code[2:].isdigit():\n # TODO: 20200819: API seems changed a bit, index shift?\n # or things may get zero when the market is closed?\n if code.startswith(\"HK\"):\n d[\"current\"] = float(l[9]) # 英文股票名称占位\n d[\"currency\"] = \"HKD\"\n d[\"percent\"] = round(float(l[8]), 2)\n d[\"market\"] = \"HK\"\n d[\"time\"] = l[17] + \" \" + l[18]\n d[\"current_ext\"] = None\n\n else: # A 股\n d[\"current\"] = float(l[3])\n d[\"currency\"] = \"CNY\"\n d[\"percent\"] = round((float(l[3]) / float(l[2]) - 1) * 100, 2)\n d[\"market\"] = \"CN\"\n d[\"time\"] = l[-4] + \" \" + l[-3]\n for i in range(10, 19)[::2]:\n d[\"buy\" + str(int((i - 8) / 2))] = (l[i + 1], l[i])\n for i in range(20, 29)[::2]:\n d[\"sell\" + str(int((i - 18) / 2))] = (l[i + 1], l[i])\n d[\"current_ext\"] = None\n\n else:\n d[\"currency\"] = \"USD\"\n d[\"current\"] = float(l[1])\n d[\"percent\"] = float(l[2])\n d[\"current_ext\"] = _float(l[21]) if _float(l[21]) > 0 else None\n d[\"market\"] = \"US\"\n d[\"time\"] = l[3]\n return d\n\n\ndef make_ft_url(code, _type=\"indices\"):\n \"\"\"\n\n :param code:\n :param _type: indices, commodities, currencies, funds, equities, bonds\n :return:\n \"\"\"\n if _type == \"indices\":\n url = \"https://markets.ft.com/data/indices/tearsheet/summary?s={code}\".format(\n code=code\n )\n elif _type == \"commodities\":\n url = (\n \"https://markets.ft.com/data/commodities/tearsheet/summary?c={code}\".format(\n code=code\n )\n )\n elif _type == \"currencies\":\n url = (\n \"https://markets.ft.com/data/currencies/tearsheet/summary?s={code}\".format(\n code=code\n )\n )\n elif _type == \"funds\":\n url = \"https://markets.ft.com/data/funds/tearsheet/summary?s={code}\".format(\n code=code\n )\n elif _type == \"equities\":\n url = \"https://markets.ft.com/data/equities/tearsheet/summary?s={code}\".format(\n code=code\n )\n elif _type == \"bonds\":\n url = \"https://markets.ft.com/data/bonds/tearsheet/summary?s={code}\".format(\n code=code\n )\n else:\n raise ParserFailure(\"no reconginzed type for ft datasource: %s\" % _type)\n return url\n\n\n@lru_cache(maxsize=1024)\ndef get_ft_id(code, _type=\"indices\"):\n url = make_ft_url(code, _type=_type)\n r = rget(url)\n b = BeautifulSoup(r.text, \"lxml\")\n return eval(\n b.find(\"section\", class_=\"mod-tearsheet-add-to-watchlist\")[\"data-mod-config\"]\n )[\"xid\"]\n\n\ndef get_rt_from_ft(code, _type=\"indices\"):\n url = make_ft_url(code, _type=_type)\n r = rget(url)\n b = BeautifulSoup(r.text, \"lxml\")\n d = {}\n d[\"name\"] = b.find(\"h1\").string\n d[\"current\"] = _float(b.find(\"span\", class_=\"mod-ui-data-list__value\").string)\n d[\"percent\"] = _float(\n b.select(\"span[class^='mod-format--']\")[0].text.split(\"/\")[-1].strip()[:-1]\n )\n d[\"current_ext\"] = None\n d[\"market\"] = None\n d[\"currency\"] = b.find(\"span\", class_=\"mod-ui-data-list__label\").string.split(\"(\")[\n 1\n ][:-1]\n d[\"time\"] = b.find(\"div\", class_=\"mod-disclaimer\").string\n return d\n\n\ndef get_rt_from_ycharts(code):\n if code.startswith(\"yc-\"):\n code = code[3:]\n url = \"https://ycharts.com/\" + code\n r = rget(url)\n s = BeautifulSoup(r.text, \"lxml\")\n qdiv = s.select(\"div.index-rank.col-auto\") # current\n spans = [s for s in qdiv[0].contents if s != \"\\n\" and s.contents]\n d = {}\n d[\"name\"] = s.select(\"h1,h3[class=securityName]\")[0].text.strip()\n d[\"current\"], d[\"percent\"] = (\n _float(spans[0].string), # current,\n _float(spans[1].contents[-2].string[1:-1]), # percent\n )\n l = [\n c.strip()\n for c in s.select(\"span[class=index-info]\")[0].string.split(\"\\n\")\n if c.strip()\n ]\n d[\"time\"] = l[1]\n d[\"currency\"] = l[0].split(\" \")[0].strip()\n d[\"market\"] = None\n return d\n\n\n@lru_cache_time(ttl=300, maxsize=512)\ndef get_newest_netvalue(code):\n \"\"\"\n 防止天天基金总量 API 最新净值更新不及时,获取基金最新公布净值及对应日期, depracated, use get_rt(\"F501018\") instead\n\n :param code: six digits string for fund.\n :return: netvalue, %Y-%m-%d\n \"\"\"\n code = code[1:]\n r = rget(\"http://fund.eastmoney.com/{code}.html\".format(code=code))\n s = BeautifulSoup(r.text, \"lxml\")\n return (\n float(\n s.findAll(\"dd\", class_=\"dataNums\")[1]\n .find(\"span\", class_=\"ui-font-large\")\n .string\n ),\n str(s.findAll(\"dt\")[1]).split(\"(\")[1].split(\")\")[0][7:],\n )\n\n\n@lru_cache(maxsize=512)\ndef get_hkfcode(code):\n if code.startswith(\"F\"):\n code = code[1:]\n page = rget(\"http://overseas.1234567.com.cn/{code}\".format(code=code)).text\n page.find(\"hkfcode\")\n hkfcode = (\n page[page.find(\"hkfcode\") :]\n .split(\"=\")[1]\n .split(\";\")[0]\n .lstrip()\n .lstrip(\"'\")\n .strip(\"'\")\n )\n return hkfcode\n\n\ndef get_rt_from_ttjj_oversea(code):\n if code.startswith(\"F\"):\n code = code[1:]\n if not code.startswith(\"96\"):\n raise ValueError(\"%s is not an oversea fund\" % code)\n r = rget(\"http://overseas.1234567.com.cn/{code}.html\".format(code=code))\n r.encoding = \"utf-8\"\n s = BeautifulSoup(r.text, \"lxml\")\n start = s.select(\"dl.dataItem02\")[0].text\n start = start.split(\"(\")[1].split(\")\")[0]\n name = s.select(\"div[class='fundDetail-tit']\")[0].text.split(\"(\")[0].strip()\n name = name.split(\"(\")[0].strip()\n value = _float(s.select(\"span.ui-font-large.ui-num\")[0].text)\n date = (\n s.select(\"dl[class='dataItem01']\")[0]\n .find(\"p\")\n .text.split(\"(\")[-1]\n .split(\")\")[0]\n )\n infol = [\n r for r in s.select(\"div[class='infoOfFund']\")[0].text.split(\"\\n\") if r.strip()\n ]\n return {\n \"name\": name,\n \"time\": date,\n \"current\": value,\n \"market\": \"CN\",\n \"currency\": None, # 很可能存在非人民币计价的互认基金\n \"current_ext\": None,\n \"type\": infol[0].split(\":\")[1].strip(),\n \"scale\": infol[1].split(\":\")[1].strip(),\n \"manager\": infol[2].split(\":\")[1].strip(),\n \"startdate\": start,\n }\n\n\n@lru_cache_time(ttl=600, maxsize=512)\ndef get_rt_from_ttjj(code):\n code = code[1:]\n if code.startswith(\"96\"):\n return get_rt_from_ttjj_oversea(code)\n r = rget(\"http://fund.eastmoney.com/{code}.html\".format(code=code))\n r.encoding = \"utf-8\"\n s = BeautifulSoup(r.text, \"lxml\")\n name = s.select(\"div[style='float: left']\")[0].text.split(\"(\")[0]\n if s.findAll(\"dd\", class_=\"dataNums\")[1].find(\n \"span\", class_=\"ui-font-large\"\n ): # 非货币基金\n value, date = (\n float(\n s.findAll(\"dd\", class_=\"dataNums\")[1]\n .find(\"span\", class_=\"ui-font-large\")\n .string\n ),\n str(s.findAll(\"dt\")[1]).split(\"(\")[1].split(\")\")[0][7:],\n )\n estimate = s.select(\"span[id=gz_gsz]\")[0].text # after loading\n if estimate == \"--\":\n gsz = rget(\n \"http://fundgz.1234567.com.cn/js/{code}.js\".format(code=code),\n headers={\n \"Host\": \"fundgz.1234567.com.cn\",\n \"Referer\": \"http://fund.eastmoney.com/\",\n },\n )\n try: # in case eval error\n gsz_dict = eval(gsz.text[8:-2])\n estimate = _float(gsz_dict[\"gsz\"])\n estimate_time = gsz_dict[\"gztime\"]\n except:\n estimate = None\n else:\n try:\n estimate = _float(estimate)\n except ValueError:\n logger.warning(\"unrecognized estimate netvalue %s\" % estimate)\n estimate = None\n else:\n value, date = (\n s.findAll(\"dd\", class_=\"dataNums\")[1].text,\n str(s.findAll(\"dt\")[1]).split(\"(\")[1].split(\")\")[0],\n )\n estimate = None\n status = s.select(\"span[class='staticCell']\")[0].text.strip()\n tb = s.select(\"div.infoOfFund > table >tr>td\")\n infol = [i.text for i in tb]\n try:\n estimate_time\n except NameError:\n estimate_time = None\n return {\n \"name\": name,\n \"time\": date,\n \"current\": value,\n \"market\": \"CN\",\n \"currency\": \"CNY\",\n \"current_ext\": None,\n \"status\": status,\n \"type\": infol[0].split(\":\")[1].split(\"\\xa0\")[0],\n \"scale\": infol[1].split(\":\")[1],\n \"manager\": infol[2].split(\":\")[1],\n \"company\": infol[4].split(\":\")[1],\n \"estimate\": estimate,\n \"estimate_time\": estimate_time,\n }\n # 是否有美元份额计价的基金会出问题?\n\n\n@lru_cache(2048)\ndef get_fund_type(code):\n \"\"\"\n given fund code, return unified fund category which is extracted from get_rt(code)[\"type\"]\n\n :param code:\n :return: str.\n \"\"\"\n code = code[-6:]\n t = get_rt(\"F\" + code)[\"type\"]\n\n if t in [\"联接基金\", \"股票指数\"] or t.startswith(\"ETF\"):\n return \"指数基金\"\n elif t.startswith(\"QDII\"):\n return \"QDII\"\n elif t.startswith(\"股票\"):\n return \"股票基金\"\n elif t.startswith(\"混合\"):\n return \"混合基金\"\n elif t.startswith(\"债券\"):\n return \"债券基金\"\n elif t.startswith(\"货币\"):\n return \"货币基金\"\n else:\n return \"其他\"\n\n\ndef get_rt(\n code, _from=None, double_check=False, double_check_threhold=0.005, handler=True\n):\n \"\"\"\n universal fetcher for realtime price of literally everything.\n\n :param code: str. 规则同 :func:`get_daily`. 需要注意场外基金和外汇中间价是不支持实时行情的,因为其每日只有一个报价。对于 investing 的数据源,只支持网址格式代码。\n :param _from: Optional[str]. can be one of \"xueqiu\", \"investing\". Only used for debug to\n enfore data source. For common use, _from can be chosed automatically based on code in the run time.\n :param double_check: Optional[bool], default False. 如果设为 True,只适用于 A 股,美股,港股实时行情,会通过至少两个不同的数据源交叉验证,确保正确。\n 适用于需要自动交易等情形,防止实时数据异常。\n :param handler: bool. Default True. 若为 False,则 handler 钩子失效,用于钩子函数中的嵌套。\n :return: Dict[str, Any].\n 包括 \"name\", \"current\", \"percent\" 三个必有项和 \"current_ext\"(盘后价格), \"currency\" (计价货币), \"market\" (发行市场), \"time\"(记录时间) 可能为 ``None`` 的选项。\n \"\"\"\n # 对于一些标的,get_rt 的主任务可能不是 current 价格,而是去拿 market currency 这些元数据\n # 现在用的新浪实时数据源延迟严重, double check 并不靠谱,港股数据似乎有15分钟延迟(已解决)\n # 雪球实时和新浪实时在9:00之后一段时间可能都有问题\n # FT 数据源有10到20分钟的延迟\n if handler:\n if getattr(thismodule, \"get_rt_handler\", None):\n args = inspect.getargvalues(inspect.currentframe())\n f = getattr(thismodule, \"get_rt_handler\")\n fr = f(**args.locals)\n if fr:\n return fr\n\n if not _from:\n # if code.startswith(\"HK\") and code[2:].isdigit():\n # _from = \"xueqiu\"\n if code.startswith(\"yc-\"):\n _from = \"ycharts\"\n elif len(code.split(\"-\")) >= 2 and len(code.split(\"-\")[0]) <= 3:\n _from = code.split(\"-\")[0]\n code = \"-\".join(code.split(\"-\")[1:])\n elif (code.startswith(\"F\") or code.startswith(\"T\")) and code[1:].isdigit():\n _from = \"ttjj\"\n elif len(code.split(\"/\")) > 1:\n _from = \"investing\"\n else: # 默认启用雪球实时,新浪纯指数行情不完整\n _from = \"xueqiu\"\n if _from in [\"cninvesting\", \"investing\"]:\n try:\n return get_cninvesting_rt(code)\n except Exception as e:\n logger.warning(\n \"Fails due to %s, now trying app source of investing.com\" % e.args[0]\n )\n return get_cninvesting_rt(code, app=True)\n elif double_check and _from in [\"xueqiu\", \"sina\"]:\n r1 = get_xueqiu_rt(code, token=get_token())\n r2 = get_rt_from_sina(code)\n if abs(r1[\"current\"] / r2[\"current\"] - 1) > double_check_threhold:\n raise DataPossiblyWrong(\"realtime data unmatch for %s\" % code)\n return r2\n elif _from in [\"xueqiu\", \"xq\", \"snowball\"]:\n try:\n return get_xueqiu_rt(code, token=get_token())\n except (IndexError, ValueError, AttributeError, TypeError) as e: # 默认雪球实时引入备份机制\n logging.warning(\n \"Fails due to %s, now trying backup data source from sina\" % e.args[0]\n )\n return get_rt_from_sina(code)\n elif _from in [\"sina\", \"sn\", \"xinlang\"]:\n try:\n return get_rt_from_sina(code)\n except (IndexError, ValueError, AttributeError, TypeError) as e: # 默认雪球实时引入备份机制\n logging.warning(\n \"Fails due to %s, now trying backup data source from xueqiu\" % e.args[0]\n )\n return get_xueqiu_rt(code, token=get_token())\n elif _from in [\"ttjj\"]:\n return get_rt_from_ttjj(code)\n elif _from in [\"FT\", \"ft\", \"FTI\"]:\n return get_rt_from_ft(code)\n elif _from == \"FTE\":\n return get_rt_from_ft(code, _type=\"equities\")\n elif _from == \"FTB\":\n return get_rt_from_ft(code, _type=\"bonds\")\n elif _from == \"FTF\":\n return get_rt_from_ft(code, _type=\"funds\")\n elif _from == \"FTX\":\n return get_rt_from_ft(code, _type=\"currencies\")\n elif _from == \"FTC\":\n return get_rt_from_ft(code, _type=\"commodities\")\n elif _from in [\"INA\"]: # investing app\n return get_cninvesting_rt(code, app=True)\n elif _from in [\"yc\", \"ycharts\"]:\n return get_rt_from_ycharts(code)\n else:\n raise ParserFailure(\"unrecoginzed _from for %s\" % _from)\n\n\nget_realtime = get_rt\nget_now = get_rt\n\n_cached_data = {}\n\n\ndef reset_cache():\n \"\"\"\n clear all cache of daily data in memory.\n\n :return: None.\n \"\"\"\n global _cached_data\n _cached_data = {}\n setattr(thismodule, \"cached_dict\", {})\n\n\ndef cached(s):\n \"\"\"\n **Deprecated**, use :func:`cachedio` instead, where ``backend=\"memory\"``.\n\n Usage as follows:\n\n .. code-block:: python\n\n @cached(\"20170101\")\n def get_daily(*args, **kws):\n return xa.get_daily(*args, **kws)\n\n Automatically cache the result in memory and avoid refetching\n :param s: str. eg. \"20160101\", the starting date of cached table.\n :return: wrapped function.\n \"\"\"\n\n def cached_start(f):\n @wraps(f)\n def wrapper(*args, **kws):\n print(\"cached function is deprecated, please instead use cachedio\")\n if args:\n code = args[0]\n else:\n code = kws.get(\"code\")\n start = kws.get(\"start\", None)\n end = kws.get(\"end\", None)\n prev = kws.get(\"prev\", None)\n if not prev:\n prev = 365\n if not end:\n end_obj = today_obj()\n else:\n end_obj = dstr2dobj(end)\n if not start:\n start_obj = end_obj - dt.timedelta(prev)\n else:\n start_obj = dstr2dobj(start)\n start_str = start_obj.strftime(\"%Y%m%d\")\n end_str = end_obj.strftime(\"%Y%m%d\")\n kws[\"start\"] = s\n kws[\"end\"] = dt.datetime.now().strftime(\"%Y%m%d\")\n global _cached_data\n _cached_data.setdefault(s, {})\n if code not in _cached_data[s]:\n df = f(*args, **kws)\n # print(\"cached %s\" % code)\n _cached_data[s][code] = df\n else:\n pass\n # print(\"directly call cache\")\n df = _cached_data[s][code]\n df = df[df[\"date\"] <= end_str]\n df = df[df[\"date\"] >= start_str]\n\n return df\n\n return wrapper\n\n return cached_start\n\n\ndef cachedio(**ioconf):\n \"\"\"\n 用法类似:func:`cached`,通用透明缓存器,用来作为 (code, start, end ...) -> pd.DataFrame 形式函数的缓存层,\n 避免重复爬取已有数据。\n\n :param **ioconf: 可选关键字参数 backend: csv or sql or memory,\n path: csv 文件夹或 sql engine, refresh True 会刷新结果,重新爬取, default False,\n prefix 是 key 前统一部分, 缓存 hash 标志\n :return:\n \"\"\"\n\n def cached(f):\n @wraps(f)\n def wrapper(*args, **kws):\n if args:\n code = args[0]\n else:\n code = kws.get(\"code\")\n date = ioconf.get(\"date\", \"date\") # 没利用上这个栏的名字变化\n precached = ioconf.get(\"precached\", None)\n precached = kws.get(\"precached\", precached)\n key = kws.get(\"key\", code)\n key = key.replace(\"/\", \" \")\n key_func = ioconf.get(\"key_func\", None)\n key_func = ioconf.get(\"keyfunc\", key_func)\n if key_func is not None:\n key = key_func(key)\n defaultend = ioconf.get(\"defaultend\", today_obj)\n defaultend = ioconf.get(\"default_end\", defaultend)\n defaultprev = ioconf.get(\"defaultprev\", 365)\n defaultprev = ioconf.get(\"default_prev\", defaultprev)\n if isinstance(defaultend, str):\n defaultend = defaultend.replace(\"/\", \"\").replace(\"-\", \"\")\n defaultend = dt.datetime.strptime(defaultend, \"%Y%m%d\")\n if callable(defaultend):\n defaultend = defaultend()\n start = kws.get(\"start\", None)\n end = kws.get(\"end\", None)\n prev = kws.get(\"prev\", None)\n prefix = ioconf.get(\"prefix\", \"\")\n key = prefix + key\n if precached:\n precached = precached.replace(\"/\", \"\").replace(\"-\", \"\")\n precached_obj = dt.datetime.strptime(precached, \"%Y%m%d\")\n if not prev:\n prev = defaultprev\n if not end:\n end_obj = defaultend\n else:\n end_obj = dt.datetime.strptime(\n end.replace(\"/\", \"\").replace(\"-\", \"\"), \"%Y%m%d\"\n )\n\n if not start:\n start_obj = end_obj - dt.timedelta(days=prev)\n else:\n start_obj = dt.datetime.strptime(\n start.replace(\"/\", \"\").replace(\"-\", \"\"), \"%Y%m%d\"\n )\n\n start_str = start_obj.strftime(\"%Y%m%d\")\n end_str = end_obj.strftime(\"%Y%m%d\")\n backend = ioconf.get(\"backend\")\n backend = kws.get(\"backend\", backend)\n # if backend == \"sql\": # reserved for case insensitive database settings\n # key = key.lower()\n refresh = ioconf.get(\"refresh\", False)\n refresh = kws.get(\"refresh\", refresh)\n fetchonly = ioconf.get(\"fetchonly\", False)\n fetchonly = ioconf.get(\"fetch_only\", fetchonly)\n fetchonly = kws.get(\"fetchonly\", fetchonly)\n fetchonly = kws.get(\"fetch_only\", fetchonly)\n path = ioconf.get(\"path\")\n path = kws.get(\"path\", path)\n kws[\"start\"] = start_str\n kws[\"end\"] = end_str\n if not backend:\n df = f(*args, **kws)\n df = df[df[\"date\"] <= kws[\"end\"]]\n df = df[df[\"date\"] >= kws[\"start\"]]\n return df\n else:\n if backend == \"csv\":\n key = key + \".csv\"\n if not getattr(thismodule, \"cached_dict\", None):\n setattr(thismodule, \"cached_dict\", {})\n if refresh:\n is_changed = True\n df0 = f(*args, **kws)\n\n else: # non refresh\n try:\n if backend == \"csv\":\n if key in getattr(thismodule, \"cached_dict\"):\n # 即使硬盘级别的缓存,也有内存层,加快读写速度\n df0 = getattr(thismodule, \"cached_dict\")[key]\n else:\n df0 = pd.read_csv(os.path.join(path, key))\n elif backend == \"sql\":\n if key in getattr(thismodule, \"cached_dict\"):\n df0 = getattr(thismodule, \"cached_dict\")[key]\n else:\n df0 = pd.read_sql(key, path)\n elif backend == \"memory\":\n df0 = getattr(thismodule, \"cached_dict\")[key]\n else:\n raise ValueError(\"no %s option for backend\" % backend)\n df0[date] = pd.to_datetime(df0[date])\n # 向前延拓\n is_changed = False\n if df0.iloc[0][date] > start_obj and not fetchonly:\n kws[\"start\"] = start_str\n kws[\"end\"] = (\n df0.iloc[0][date] - pd.Timedelta(days=1)\n ).strftime(\"%Y%m%d\")\n if has_weekday(kws[\"start\"], kws[\"end\"]):\n # 考虑到海外市场的不同情况,不用 opendate 判断,采取保守型判别\n df1 = f(*args, **kws)\n if df1 is not None and len(df1) > 0:\n df1 = df1[df1[\"date\"] <= kws[\"end\"]]\n if df1 is not None and len(df1) > 0:\n is_changed = True\n df0 = df1.append(df0, ignore_index=True, sort=False)\n # 向后延拓\n if df0.iloc[-1][date] < end_obj and not fetchonly:\n nextday_str = (\n df0.iloc[-1][date] + dt.timedelta(days=1)\n ).strftime(\"%Y%m%d\")\n if len(df0[df0[\"date\"] == df0.iloc[-1][\"date\"]]) == 1:\n kws[\"start\"] = (df0.iloc[-1][date]).strftime(\"%Y%m%d\")\n else: # 单日多行的表默认最后一日是准确的,不再刷新了\n kws[\"start\"] = nextday_str\n kws[\"end\"] = end_str\n if has_weekday(nextday_str, kws[\"end\"]): # 新更新的日期里有工作日\n df2 = f(*args, **kws)\n if df2 is not None and len(df2) > 0:\n df2 = df2[df2[\"date\"] >= kws[\"start\"]]\n if df2 is not None and len(df2) > 0:\n is_changed = True\n if (\n len(df0[df0[\"date\"] == df0.iloc[-1][\"date\"]])\n == 1\n ):\n df0 = df0.iloc[:-1]\n df0 = df0.append(df2, ignore_index=True, sort=False)\n # 注意这里抹去更新了原有最后一天的缓存,这是因为日线最新一天可能有实时数据污染\n\n except (FileNotFoundError, exc.ProgrammingError, KeyError) as e:\n if fetchonly:\n logger.error(\n \"no cache in backend for %s but you insist `fetchonly`\"\n % code\n )\n raise e\n if precached:\n if start_obj > precached_obj:\n kws[\"start\"] = precached\n if end_obj < today_obj():\n kws[\"end\"] = (\n today_obj() - dt.timedelta(days=1)\n ).strftime(\"%Y%m%d\")\n is_changed = True\n df0 = f(*args, **kws)\n\n if df0 is not None and len(df0) > 0 and is_changed:\n if backend == \"csv\":\n df0.to_csv(os.path.join(path, key), index=False)\n elif backend == \"sql\":\n df0.to_sql(key, con=path, if_exists=\"replace\", index=False)\n # elif backend == \"memory\":\n # 总是刷新内存层,即使是硬盘缓存\n d = getattr(thismodule, \"cached_dict\")\n d[key] = df0\n\n if df0 is not None and len(df0) > 0:\n df0 = df0[df0[\"date\"] <= end_str]\n df0 = df0[df0[\"date\"] >= start_str]\n\n return df0\n\n return wrapper\n\n return cached\n\n\ndef fetch_backend(key):\n prefix = ioconf.get(\"prefix\", \"\")\n key = prefix + key\n backend = ioconf.get(\"backend\")\n path = ioconf.get(\"path\")\n if backend == \"csv\":\n key = key + \".csv\"\n\n try:\n if backend == \"csv\":\n df0 = pd.read_csv(os.path.join(path, key))\n elif backend == \"sql\":\n df0 = pd.read_sql(key, path)\n else:\n raise ValueError(\"no %s option for backend\" % backend)\n\n return df0\n\n except (FileNotFoundError, exc.ProgrammingError, KeyError):\n return None\n\n\ndef save_backend(key, df, mode=\"a\", header=False):\n prefix = ioconf.get(\"prefix\", \"\")\n key = prefix + key\n backend = ioconf.get(\"backend\")\n path = ioconf.get(\"path\")\n if backend == \"csv\":\n key = key + \".csv\"\n\n if backend == \"csv\":\n if mode == \"a\":\n df.to_csv(os.path.join(path, key), index=False, header=header, mode=mode)\n else:\n df.to_csv(os.path.join(path, key), index=False, mode=mode)\n elif backend == \"sql\":\n if mode == \"a\":\n mode = \"append\"\n else:\n mode = \"replace\"\n df.to_sql(key, con=path, if_exists=mode, index=False)\n else:\n raise ValueError(\"no %s option for backend\" % backend)\n\n logger.debug(\"%s saved into backend successfully\" % key)\n\n\ndef check_cache(*args, omit_lines=0, **kws):\n if omit_lines == 0:\n assert (\n _get_daily(*args, wrapper=False, **kws)\n .reset_index(drop=True)\n .equals(get_daily(*args, **kws).reset_index(drop=True))\n )\n else:\n assert (\n _get_daily(*args, wrapper=False, **kws)\n .reset_index(drop=True)[:-omit_lines]\n .equals(get_daily(*args, **kws).reset_index(drop=True)[:-omit_lines])\n )\n\n\n@data_source(\"jq\")\ndef _get_index_weight_range(code, start, end):\n if len(code.split(\".\")) != 2:\n code = _inverse_convert_code(code)\n start_obj = dt.datetime.strptime(start.replace(\"-\", \"\").replace(\"/\", \"\"), \"%Y%m%d\")\n end_obj = dt.datetime.strptime(end.replace(\"-\", \"\").replace(\"/\", \"\"), \"%Y%m%d\")\n start_m = start_obj.replace(day=1)\n if start_m < start_obj:\n start_m = start_m + relativedelta(months=1)\n end_m = end_obj.replace(day=1)\n if end_obj < end_m:\n end_m = end_m - relativedelta(months=1)\n d = start_m\n\n df = pd.DataFrame({\"code\": [], \"weight\": [], \"display_name\": [], \"date\": []})\n while True:\n if d > end_m:\n\n df[\"date\"] = pd.to_datetime(df[\"date\"])\n return df\n logger.debug(\"fetch index weight on %s for %s\" % (d, code))\n df0 = get_index_weights(index_id=code, date=d.strftime(\"%Y-%m-%d\"))\n df0[\"code\"] = df0.index\n df = df.append(df0, ignore_index=True, sort=False)\n d = d + relativedelta(months=1)\n\n\n@data_source(\"jq\")\ndef _get_peb_range(code, start, end): # 盈利,净资产,总市值\n \"\"\"\n 获取指定指数一段时间内的 pe pb 值。\n\n :param code: 聚宽形式指数代码。\n :param start:\n :param end:\n :return: pd.DataFrame\n \"\"\"\n if len(code.split(\".\")) != 2:\n code = _inverse_convert_code(code)\n data = {\"date\": [], \"pe\": [], \"pb\": []}\n for d in pd.date_range(start=start, end=end, freq=\"W-FRI\"):\n data[\"date\"].append(d)\n logger.debug(\"compute pe pb on %s\" % d)\n r = get_peb(code, date=d.strftime(\"%Y-%m-%d\"))\n data[\"pe\"].append(r[\"pe\"])\n data[\"pb\"].append(r[\"pb\"])\n return pd.DataFrame(data)\n\n\ndef get_stock_peb_range(code, start, end, wrapper=False):\n \"\"\"\n 获取股票历史 pe pb\n\n :param code:\n :param start:\n :param end:\n :return:\n \"\"\"\n if code.startswith(\"HK\") and code[2:].isdigit():\n code = code[2:]\n count = (today_obj() - dt.datetime.strptime(start, \"%Y%m%d\")).days\n df = get_historical_fromxq(code, count, full=True)\n df = df[[\"date\", \"pe\", \"pb\", \"ps\"]]\n if not wrapper:\n df = df[df[\"date\"] >= start]\n df = df[df[\"date\"] <= end]\n return df\n\n\n@lru_cache()\ndef ttjjcode(code):\n \"\"\"\n 将天天基金的持仓股票代码或其他来源的代码标准化\n\n :param code: str.\n :return: str.\n \"\"\"\n code = code.strip()\n if code.endswith(\".HK\"):\n return \"HK\" + code[:-3]\n elif code.endswith(\".US\"):\n return code[:-3]\n elif code.isdigit() and len(code) == 5:\n return \"HK\" + code\n elif code.isdigit() and len(code) == 6:\n if (\n code.startswith(\"16\")\n or code.startswith(\"15\")\n or code.startswith(\"12\")\n or code.startswith(\"0\")\n or code.startswith(\"3\")\n ):\n # 注意这里只能对应个股,指数代码有重叠没有办法的事\n return \"SZ\" + code\n elif code.startswith(\"5\") or code.startswith(\"6\") or code.startswith(\"11\"):\n return \"SH\" + code\n else:\n logger.warning(\"unrecognized code format %s\" % code)\n return \"0\"\n else:\n logger.info(\"not so sure about code format %s, taken as US stock\" % code)\n return code\n\n\ndef get_fund_peb(code, date, threhold=0.3):\n \"\"\"\n 根据基金的股票持仓,获取对应日期的 pe,pb 估值\n\n :param code: str. 基金代码\n :param date:\n :param threhold: float, default 0.3. 为了计算快速,占比小于千分之三的股票将舍弃\n :return:\n \"\"\"\n if code.startswith(\"F\"):\n code = code[1:]\n date = date.replace(\"/\", \"\").replace(\"-\", \"\")\n d = dt.datetime.strptime(date, \"%Y%m%d\")\n if d.month > 3 and d.month < 8:\n year = d.year - 1\n season = 4\n elif d.month <= 3:\n year = d.year - 1\n season = 2\n else:\n year = d.year\n season = 2\n # season 只选 2,4, 具有更详细的持仓信息\n df = get_fund_holdings(code, year, season)\n if df is None:\n if season == 4:\n season = 2\n else:\n year -= 1\n season = 4\n df = get_fund_holdings(code, year, season)\n if df is None:\n logger.warning(\"%s seems has no holdings data in this time %s\" % (code, year))\n return {\"pe\": None, \"pb\": None}\n df = df[df[\"ratio\"] >= threhold]\n df[\"scode\"] = df[\"code\"].apply(ttjjcode)\n df = df[df[\"scode\"] != \"0\"]\n if len(df) == 0:\n return {\"pe\": None, \"pb\": None}\n\n pel, pbl = [], []\n for i, r in df.iterrows():\n try:\n fdf = get_daily(\"peb-\" + r[\"scode\"], end=date, prev=60)\n if len(fdf) == 0:\n # 已退市或改名\n logger.warning(\"%s: 无法获取,可能已退市,当时休市或改名\" % r[\"scode\"])\n pel.append(None)\n pbl.append(None)\n else:\n fdf = fdf.iloc[-1]\n pel.append(fdf[\"pe\"])\n pbl.append(fdf[\"pb\"])\n except (KeyError, TypeError, IndexError) as e:\n logger.warning(\n \"%s: 获取历史估值出现问题: %s, 可能由于网站故障或股票代码非中美市场\" % (r[\"scode\"], e.args[0])\n )\n pel.append(None)\n pbl.append(None)\n df[\"pe\"] = pel\n df[\"pb\"] = pbl\n r = {}\n pedf = df[~pd.isna(df[\"pe\"])]\n pbdf = df[~pd.isna(df[\"pb\"])]\n if len(pbdf) < 0.5 * len(df): # 有时候会有个别标的有pb值\n r[\"pb\"] = None\n else:\n pbdf[\"b\"] = pbdf[\"ratio\"] / (pbdf[\"pb\"] + 0.000001)\n r[\"pb\"] = pbdf.ratio.sum() / pbdf.b.sum()\n if len(pedf) == 0:\n r[\"pe\"] = None\n else:\n pedf[\"e\"] = pedf[\"ratio\"] / (pedf[\"pe\"] + 0.000001)\n r[\"pe\"] = pedf.ratio.sum() / pedf.e.sum()\n return r\n\n\ndef get_fund_peb_range(code, start, end):\n \"\"\"\n 获取一段时间的基金历史估值,每周五为频率\n\n :param code:\n :param start:\n :param end:\n :return:\n \"\"\"\n if code.startswith(\"F\"):\n code = code[1:]\n data = {\"date\": [], \"pe\": [], \"pb\": []}\n for d in pd.date_range(start=start, end=end, freq=\"W-FRI\"):\n data[\"date\"].append(d)\n r = get_fund_peb(code, date=d.strftime(\"%Y-%m-%d\"))\n data[\"pe\"].append(r[\"pe\"])\n data[\"pb\"].append(r[\"pb\"])\n return pd.DataFrame(data)\n\n\ndef set_backend(**ioconf):\n \"\"\"\n 设定 xalpha get_daily 函数的缓存后端,默认为内存。 ioconf 参数设置可参考 :func:`cachedio`\n\n :param ioconf:\n :return: None.\n \"\"\"\n\n if not ioconf:\n ioconf = {\"backend\": \"memory\"}\n get_daily = cachedio(**ioconf)(_get_daily)\n prefix = ioconf.get(\"prefix\", \"\")\n ioconf[\"prefix\"] = \"iw-\" + prefix\n get_index_weight_range = cachedio(**ioconf)(_get_index_weight_range)\n ioconf[\"prefix\"] = \"peb-\" + prefix\n get_peb_range = cachedio(**ioconf)(_get_peb_range)\n setattr(thismodule, \"get_daily\", get_daily)\n setattr(xamodule, \"get_daily\", get_daily)\n setattr(thismodule, \"get_index_weight_range\", get_index_weight_range)\n setattr(thismodule, \"get_peb_range\", get_peb_range)\n ioconf[\"prefix\"] = prefix\n setattr(thismodule, \"ioconf\", ioconf)\n\n\nset_backend()\n\n\n@data_source(\"jq\")\ndef get_peb(index, date=None, table=False):\n \"\"\"\n 获取指数在指定日期的 pe 和 pb。采用当时各公司的最新财报和当时的指数成分股权重加权计算。\n\n :param index: str. 聚宽形式的指数代码。\n :param date: str. %Y-%m-%d\n :param table: Optioanl[bool], default False. True 时返回整个计算的 DataFrame,用于 debug。\n :return: Dict[str, float]. 包含 pe 和 pb 值的字典。\n \"\"\"\n if len(index.split(\".\")) == 2:\n index = _convert_code(index)\n middle = dt.datetime.strptime(\n date.replace(\"/\", \"\").replace(\"-\", \"\"), \"%Y%m%d\"\n ).replace(day=1)\n iwdf = get_index_weight_range(\n index,\n start=(middle - dt.timedelta(days=10)).strftime(\"%Y-%m-%d\"),\n end=(middle + dt.timedelta(days=6)).strftime(\"%Y-%m-%d\"),\n )\n q = query(valuation).filter(valuation.code.in_(list(iwdf.code)))\n logger.debug(\"get_fundamentals on %s\" % (date))\n df = get_fundamentals(q, date=date)\n df = df.merge(iwdf, on=\"code\")\n df[\"e\"] = df[\"weight\"] / df[\"pe_ratio\"]\n df[\"b\"] = df[\"weight\"] / df[\"pb_ratio\"]\n df[\"p\"] = df[\"weight\"]\n tote = df.e.sum()\n totb = df.b.sum()\n if table:\n return df\n return {\n \"pe\": (round(100.0 / tote, 3) if tote != 0 else np.inf),\n \"pb\": (round(100.0 / totb, 3) if totb != 0 else np.inf),\n }\n\n\n@data_source(\"jq\")\ndef get_sw_from_jq(code, start=None, end=None, **kws):\n \"\"\"\n\n :param code: str. eg. 801180 申万行业指数\n :param start:\n :param end:\n :param kws:\n :return:\n \"\"\"\n logger.debug(\"get sw data of %s\" % code)\n df = finance.run_query(\n query(finance.SW1_DAILY_VALUATION)\n .filter(finance.SW1_DAILY_VALUATION.date >= start)\n .filter(finance.SW1_DAILY_VALUATION.date <= end)\n .filter(finance.SW1_DAILY_VALUATION.code == code)\n .order_by(finance.SW1_DAILY_VALUATION.date.asc())\n )\n df[\"date\"] = pd.to_datetime(df[\"date\"])\n return df\n\n\n@data_source(\"jq\")\ndef get_teb(code, date):\n if len(code.split(\".\")) != 2:\n code = _inverse_convert_code(code)\n sl = get_index_stocks(code, date=date)\n logger.debug(\"get fundamentals from jq for %s\" % code)\n df = get_fundamentals(query(valuation).filter(valuation.code.in_(sl)), date=date)\n df[\"e\"] = df[\"market_cap\"] / df[\"pe_ratio\"]\n df[\"b\"] = df[\"market_cap\"] / df[\"pb_ratio\"]\n return {\"e\": df[\"e\"].sum(), \"b\": df[\"b\"].sum(), \"m\": df[\"market_cap\"].sum()} # 亿人民币\n\n\ndef get_teb_range(code, start, end, freq=\"W-FRI\"):\n if len(code.split(\".\")) != 2:\n code = _inverse_convert_code(code)\n data = {\"date\": [], \"e\": [], \"b\": [], \"m\": []}\n for d in pd.date_range(start, end, freq=freq):\n data[\"date\"].append(d)\n r = get_teb(code, d.strftime(\"%Y-%m-%d\"))\n data[\"e\"].append(r[\"e\"])\n data[\"b\"].append(r[\"b\"])\n data[\"m\"].append(r[\"m\"])\n df = pd.DataFrame(data)\n return df\n\n\ndef _convert_code(code):\n \"\"\"\n 将聚宽形式的代码转化为 xalpha 形式\n\n :param code:\n :return:\n \"\"\"\n no, mk = code.split(\".\")\n if mk == \"XSHG\":\n return \"SH\" + no\n elif mk == \"XSHE\":\n return \"SZ\" + no\n\n\ndef _inverse_convert_code(code):\n \"\"\"\n 将 xalpha 形式的代码转化为聚宽形式\n\n :param code:\n :return:\n \"\"\"\n\n if code.startswith(\"SH\"):\n return code[2:] + \".XSHG\"\n elif code.startswith(\"SZ\"):\n return code[2:] + \".XSHE\"\n\n\n@lru_cache_time(ttl=60, maxsize=512)\ndef get_bar(\n code, prev=24, interval=3600, _from=None, handler=True, start=None, end=None\n):\n \"\"\"\n\n :param code: str. 支持雪球和英为的代码\n :param prev: points of data from now to back, often limited by API around several hundreds\n :param interval: float, seconds. need to match the corresponding API,\n typical values include 60, 300, 3600, 86400, 86400*7\n :param handler: bool. Default True. 若为 False,则 handler 钩子失效,用于钩子函数中的嵌套。\n :return: pd.DataFrame\n \"\"\"\n if handler:\n if getattr(thismodule, \"get_bar_handler\", None):\n args = inspect.getargvalues(inspect.currentframe())\n f = getattr(thismodule, \"get_bar_handler\")\n fr = f(**args.locals)\n if fr is not None:\n return fr\n\n if not _from:\n if (\n (start is not None)\n and (end is not None)\n and (code.startswith(\"SH\") or code.startswith(\"SZ\"))\n ):\n _from = \"jq\"\n elif code.startswith(\"SH\") or code.startswith(\"SZ\"):\n _from = \"xueqiu\"\n elif code.isdigit():\n _from = \"cninvesting\"\n elif code.startswith(\"HK\") and code[2:7].isdigit():\n _from = \"xueqiu\"\n code = code[2:]\n elif len(code.split(\"-\")) >= 2 and len(code.split(\"-\")[0]) <= 3:\n _from = code.split(\"-\")[0]\n code = \"-\".join(code.split(\"-\")[1:])\n elif len(code.split(\"/\")) > 1:\n _from = \"cninvesting\"\n code = get_investing_id(code)\n else:\n _from = \"xueqiu\" # 美股\n if _from in [\"xq\", \"xueqiu\", \"XQ\"]:\n return get_bar_fromxq(code, prev, interval)\n elif _from in [\"IN\", \"cninvesting\", \"investing\"]:\n return get_bar_frominvesting(code, prev, interval)\n elif _from in [\"INA\"]:\n return get_bar_frominvesting(code, prev, interval)\n # 这里 investing app 源是 404,只能用网页源\n elif _from in [\"jq\"]:\n code, type_ = decouple_code(code)\n # 关于复权,聚宽各个时间密度的数据都有复权,雪球源日线以上的高频数据没有复权\n type_map = {\"after\": \"post\", \"before\": \"pre\", \"normal\": None}\n return get_bar_fromjq(\n code, start=start, end=end, interval=interval, fq=type_map[type_]\n )\n elif _from in [\"wsj\"]:\n return get_bar_fromwsj(code, interval=interval)[-prev:]\n else:\n raise ParserFailure(\"unrecoginized _from %s\" % _from)\n\n\n@data_source(\"jq\")\ndef get_bar_fromjq(code, start, end, interval, fq=\"pre\"):\n code = _inverse_convert_code(code)\n trans = {\n \"60\": \"1m\",\n \"120\": \"2m\",\n \"300\": \"5m\",\n \"900\": \"15m\",\n \"1800\": \"30m\",\n \"3600\": \"60m\",\n \"7200\": \"120m\",\n \"86400\": \"daily\",\n }\n interval = trans.get(str(interval), interval)\n logger.debug(\"calling ``get_price`` from jq with %s\" % code)\n return get_price(code, start_date=start, end_date=end, frequency=interval, fq=fq)\n\n\ndef get_bar_frominvesting(code, prev=120, interval=3600):\n \"\"\"\n get bar data beyond daily bar\n\n :param code: str. investing id or url\n :param prev: int, data points from now, max might be around 500, if exceed, only None is returnd\n :param interval: default 3600. optional 60, 300, 900, 1800, 18000, 86400, \"week\", \"month\"\n :return: pd.DataFrame or None if prev and interval unmatch the API\n \"\"\"\n if interval == \"day\":\n interval = 86400\n elif interval == \"hour\":\n interval = 3600\n elif interval == \"minute\":\n interval = 60\n elif interval == 86400 * 7:\n interval = \"week\"\n elif interval == 86400 * 30:\n interval = \"month\"\n if len(code.split(\"/\")) == 2:\n code = get_investing_id(code)\n\n url = \"https://cn.investing.com\"\n headers = {\n \"user-agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4)\\\n AppleWebKit/537.36 (KHTML, like Gecko)\",\n \"Host\": \"cn.investing.com\",\n \"Referer\": \"https://cn.investing.com/commodities/\",\n \"Sec-Fetch-Dest\": \"empty\",\n \"Sec-Fetch-Mode\": \"cors\",\n \"Sec-Fetch-Site\": \"same-origin\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n }\n\n r = rget(\n url\n + \"/common/modules/js_instrument_chart/api/data.php?pair_id={code}&pair_id_for_news={code}\\\n&chart_type=area&pair_interval={interval}&candle_count={prev}&events=yes&volume_series=yes&period=\".format(\n code=code, prev=str(prev), interval=str(interval)\n ),\n headers=headers,\n )\n if not r.text:\n return # None\n r = r.json()\n df = pd.DataFrame(r[\"candles\"], columns=[\"date\", \"close\", \"0\", \"1\"])\n df = df.drop([\"0\", \"1\"], axis=1)\n df[\"date\"] = df[\"date\"].apply(\n lambda t: dt.datetime.fromtimestamp(t / 1000, tz=tz_bj).replace(tzinfo=None)\n )\n return df\n\n\ndef get_bar_fromxq(code, prev, interval=3600):\n \"\"\"\n\n :param code:\n :param prev:\n :param interval: 1m, 5m, 15m, 30m, 60m, 120m, month, quarter, year, week, day\n :return:\n \"\"\"\n # max interval is also around 500\n trans = {\n \"60\": \"1m\",\n \"300\": \"5m\",\n \"900\": \"15m\",\n \"1800\": \"30m\",\n \"3600\": \"60m\",\n \"7200\": \"120m\",\n \"86400\": \"day\",\n \"604800\": \"week\",\n \"2592000\": \"month\",\n }\n code, type_ = decouple_code(code)\n interval = trans.get(str(interval), interval)\n url = \"https://stock.xueqiu.com/v5/stock/chart/kline.json?symbol={code}&begin={tomorrow}&period={interval}&type={type_}\\\n&count=-{prev}&indicator=kline,pe,pb,ps,pcf,market_capital,agt,ggt,balance\".format(\n code=code,\n tomorrow=int(tomorrow_ts() * 1000),\n prev=prev,\n interval=interval,\n type_=type_,\n )\n r = rget(\n url, headers={\"user-agent\": \"Mozilla/5.0\"}, cookies={\"xq_a_token\": get_token()}\n )\n if not r.text:\n return # None\n else:\n df = pd.DataFrame(r.json()[\"data\"][\"item\"], columns=r.json()[\"data\"][\"column\"])\n df[\"date\"] = df[\"timestamp\"].apply(\n lambda t: dt.datetime.fromtimestamp(t / 1000, tz=tz_bj).replace(tzinfo=None)\n )\n df = df[\n [\n \"date\",\n \"open\",\n \"high\",\n \"low\",\n \"close\",\n \"volume\",\n \"turnoverrate\",\n \"percent\",\n ]\n ]\n return df\n\n\ndef get_bar_fromwsj(code, token=None, interval=3600):\n # proxy required\n # code = \"FUTURE/US/XNYM/CLM20\"\n # TODO: also not explore the code format here extensively\n trans = {\"3600\": \"1H\"}\n # TODO: there is other freq tags, but I have no time to explore them, contributions are welcome:)\n freq = trans.get(str(interval), interval)\n if not token:\n token = \"cecc4267a0194af89ca343805a3e57af\"\n # the thing I am concerned here is whether token is refreshed\n\n params = {\n \"json\": '{\"Step\":\"PT%s\",\"TimeFrame\":\"D5\",\"EntitlementToken\":\"%s\",\\\n\"IncludeMockTick\":true,\"FilterNullSlots\":false,\"FilterClosedPoints\":true,\"IncludeClosedSlots\":false,\\\n\"IncludeOfficialClose\":true,\"InjectOpen\":false,\"ShowPreMarket\":false,\"ShowAfterHours\":false,\\\n\"UseExtendedTimeFrame\":false,\"WantPriorClose\":true,\"IncludeCurrentQuotes\":false,\\\n\"ResetTodaysAfterHoursPercentChange\":false,\\\n\"Series\":[{\"Key\":\"%s\",\"Dialect\":\"Charting\",\"Kind\":\"Ticker\",\"SeriesId\":\"s1\",\"DataTypes\":[\"Last\"]}]}'\n % (freq, token, code),\n \"ckey\": token[:10],\n }\n r = rget_json(\n \"https://api-secure.wsj.net/api/michelangelo/timeseries/history\",\n params=params,\n headers={\n \"user-agent\": \"Mozilla/5.0\",\n \"Accept\": \"application/json, text/javascript, */*; q=0.01\",\n \"Dylan2010.EntitlementToken\": token,\n \"Host\": \"api-secure.wsj.net\",\n \"Origin\": \"https://www.marketwatch.com\",\n \"Sec-Fetch-Dest\": \"empty\",\n \"Sec-Fetch-Mode\": \"cors\",\n \"Sec-Fetch-Site\": \"cross-site\",\n },\n )\n\n df = pd.DataFrame(\n {\n \"date\": r[\"TimeInfo\"][\"Ticks\"],\n \"close\": [n[0] for n in r[\"Series\"][0][\"DataPoints\"]],\n }\n )\n df[\"date\"] = pd.to_datetime(df[\"date\"] * 1000000) + pd.Timedelta(hours=8)\n df = df[df[\"close\"] > -100.0] # 存在未来数据占位符需要排除\n return df\n\n\nclass vinfo(basicinfo, indicator):\n \"\"\"\n vinfo is an info like class wrapper for get_daily, it behaves like info\n \"\"\"\n\n def __init__(\n self,\n code,\n name=None,\n start=None,\n end=None,\n rate=0,\n col=\"close\",\n normalization=True,\n **kws\n ):\n if not name:\n try:\n name = get_rt(code)[\"name\"]\n except:\n name = code\n self.name = name\n self.code = code\n self.start = start # None is one year ago\n self.end = end # None is yesterday\n df = get_daily(code, start=start, end=end)\n df[col] = pd.to_numeric(df[col]) # in case the col is not float\n df[\"totvalue\"] = df[col]\n if normalization:\n df[\"netvalue\"] = df[col] / df.iloc[0][col]\n else:\n df[\"netvalue\"] = df[col]\n self.price = df\n self.round_label = kws.get(\"round_label\", 0)\n self.dividend_label = kws.get(\"dividend_label\", 0)\n self.value_label = kws.get(\"value_label\", 1) # 默认按金额赎回\n self.specialdate = []\n self.fenhongdate = []\n self.zhesuandate = []\n self.rate = rate\n\n\nVInfo = vinfo\n" ]
[ [ "pandas.read_sql", "pandas.date_range", "pandas.to_numeric", "pandas.DataFrame", "pandas.read_excel", "pandas.Timedelta", "pandas.to_datetime", "numpy.random.randint", "pandas.Timestamp", "pandas.isna" ] ]
cs-giung/giung2
[ "c8560fd1b56f20eb1f3cf57202975d8325b591f5" ]
[ "giung2/modeling/backbone/resnet.py" ]
[ "import torch\nimport torch.nn as nn\nfrom typing import Dict, List\nfrom functools import partial\n\nfrom fvcore.common.config import CfgNode\nfrom giung2.layers import *\n\n\n__all__ = [\n \"build_resnet_backbone\",\n]\n\n\nclass IdentityShortcut(nn.Module):\n\n def __init__(\n self,\n in_planes: int,\n planes: int,\n stride: int,\n expansion: int,\n conv: nn.Module = Conv2d,\n norm: nn.Module = BatchNorm2d,\n relu: nn.Module = ReLU,\n **kwargs\n ) -> None:\n super(IdentityShortcut, self).__init__()\n self.identity = MaxPool2d(kernel_size=1, stride=stride)\n self.pad_size = expansion * planes - in_planes\n\n def forward(self, x: torch.Tensor, **kwargs) -> torch.Tensor:\n out = self.identity(x)\n out = nn.functional.pad(out, (0, 0, 0, 0, 0, self.pad_size), mode=\"constant\", value=0)\n return out\n\n\nclass ProjectionShortcut(nn.Module):\n\n def __init__(\n self,\n in_planes: int,\n planes: int,\n stride: int,\n expansion: int,\n conv: nn.Module = Conv2d,\n norm: nn.Module = BatchNorm2d,\n relu: nn.Module = ReLU,\n **kwargs\n ) -> None:\n super(ProjectionShortcut, self).__init__()\n self.conv = conv(in_channels=in_planes, out_channels=expansion*planes,\n kernel_size=1, stride=stride, padding=0, **kwargs)\n self.norm = norm(num_features=expansion*planes)\n\n def forward(self, x: torch.Tensor, **kwargs) -> torch.Tensor:\n out = self.norm(self.conv(x, **kwargs), **kwargs)\n return out\n\n\nclass FirstBlock(nn.Module):\n\n def __init__(\n self,\n in_planes: int,\n planes: int,\n conv: nn.Module,\n conv_ksp: List[int],\n norm: nn.Module,\n relu: nn.Module,\n pool: nn.Module,\n pool_ksp: List[int],\n **kwargs\n ) -> None:\n super(FirstBlock, self).__init__()\n self.conv1 = conv(in_channels=in_planes, out_channels=planes,\n kernel_size=conv_ksp[0], stride=conv_ksp[1], padding=conv_ksp[2], **kwargs)\n self.norm1 = norm(num_features=planes)\n self.relu1 = relu()\n self.pool1 = pool(kernel_size=pool_ksp[0], stride=pool_ksp[1], padding=pool_ksp[2])\n\n def forward(self, x: torch.Tensor, **kwargs) -> torch.Tensor:\n out = self.pool1(self.relu1(self.norm1(self.conv1(x, **kwargs), **kwargs), **kwargs), **kwargs)\n return out\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(\n self,\n in_planes: int,\n planes: int,\n stride: int,\n shortcut: nn.Module,\n conv: nn.Module = Conv2d,\n norm: nn.Module = BatchNorm2d,\n relu: nn.Module = ReLU,\n **kwargs\n ) -> None:\n super(BasicBlock,self).__init__()\n self.conv1 = conv(in_channels=in_planes, out_channels=planes,\n kernel_size=3, stride=stride, padding=1, **kwargs)\n self.norm1 = norm(num_features=planes)\n self.relu1 = relu()\n self.conv2 = conv(in_channels=planes, out_channels=self.expansion*planes,\n kernel_size=3, stride=1, padding=1, **kwargs)\n self.norm2 = norm(num_features=self.expansion*planes)\n self.relu2 = relu()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = shortcut(\n in_planes, planes, stride, self.expansion, conv, norm, **kwargs\n )\n else:\n self.shortcut = Identity()\n\n def forward(self, x: torch.Tensor, **kwargs) -> torch.Tensor:\n out = self.relu1(self.norm1(self.conv1(x, **kwargs), **kwargs), **kwargs)\n out = self.relu2(self.norm2(self.conv2(out, **kwargs), **kwargs) + self.shortcut(x, **kwargs), **kwargs)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(\n self,\n in_planes: int,\n planes: int,\n stride: int,\n shortcut: nn.Module,\n conv: nn.Module = Conv2d,\n norm: nn.Module = BatchNorm2d,\n relu: nn.Module = ReLU,\n **kwargs\n ) -> None:\n super(Bottleneck,self).__init__()\n self.conv1 = conv(in_channels=in_planes, out_channels=planes,\n kernel_size=1, stride=1, padding=0, **kwargs)\n self.norm1 = norm(num_features=planes)\n self.relu1 = relu()\n self.conv2 = conv(in_channels=planes, out_channels=planes,\n kernel_size=3, stride=stride, padding=1, **kwargs)\n self.norm2 = norm(num_features=planes)\n self.relu2 = relu()\n self.conv3 = conv(in_channels=planes, out_channels=self.expansion*planes,\n kernel_size=1, stride=1, padding=0, **kwargs)\n self.norm3 = norm(num_features=self.expansion*planes)\n self.relu3 = relu()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = shortcut(\n in_planes, planes, stride, self.expansion, conv, norm, **kwargs\n )\n else:\n self.shortcut = Identity()\n\n def forward(self, x: torch.Tensor, **kwargs) -> torch.Tensor:\n out = self.relu1(self.norm1(self.conv1(x, **kwargs), **kwargs), **kwargs)\n out = self.relu2(self.norm2(self.conv2(out, **kwargs), **kwargs), **kwargs)\n out = self.relu3(self.norm3(self.conv3(out, **kwargs), **kwargs) + self.shortcut(x, **kwargs), **kwargs)\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(\n self,\n channels: int,\n in_planes: int,\n first_block: nn.Module,\n block: nn.Module,\n shortcut: nn.Module,\n num_blocks: List[int],\n widen_factor: int,\n conv: nn.Module = Conv2d,\n norm: nn.Module = BatchNorm2d,\n relu: nn.Module = ReLU,\n **kwargs\n ) -> None:\n super(ResNet, self).__init__()\n self.channels = channels\n self.in_planes = in_planes\n self._in_planes = in_planes\n self.first_block = first_block\n self.block = block\n self.shortcut = shortcut\n self.num_blocks = num_blocks\n self.widen_factor = widen_factor\n self.conv = conv\n self.norm = norm\n self.relu = relu\n\n _layers = [self.first_block(in_planes=self.channels, planes=self.in_planes, **kwargs)]\n\n _layers += self._make_layer(\n self.in_planes * self.widen_factor, self.num_blocks[0], stride=1, **kwargs\n )\n for idx, num_block in enumerate(self.num_blocks[1:], start=1):\n _layers += self._make_layer(\n self.in_planes * (2 ** idx) * self.widen_factor, num_block, stride=2, **kwargs\n )\n self.layers = nn.Sequential(*_layers)\n\n def _make_layer(self, planes: int, num_block: int, stride: int, **kwargs) -> List[nn.Module]:\n strides = [stride] + [1] * (num_block - 1)\n _layers = []\n for stride in strides:\n _layers.append(self.block(self._in_planes, planes, stride,\n self.shortcut, self.conv, self.norm, self.relu, **kwargs))\n self._in_planes = planes * self.block.expansion\n return _layers\n\n def forward(self, x: torch.Tensor, **kwargs) -> Dict[str, torch.Tensor]:\n\n outputs = dict()\n\n # intermediate feature maps\n for layer_idx, layer in enumerate(self.layers):\n x = layer(x, **kwargs)\n outputs[f\"layer{layer_idx}\"] = x\n\n # final feature vector\n x = nn.functional.adaptive_avg_pool2d(x, (1, 1))\n x = x.view(x.size(0), -1)\n outputs[\"features\"] = x\n\n return outputs\n\n\ndef build_resnet_backbone(cfg: CfgNode) -> nn.Module:\n\n # Conv2d layers may be replaced by its variations\n _conv_layers = cfg.MODEL.BACKBONE.RESNET.CONV_LAYERS\n kwargs = {\n \"bias\": cfg.MODEL.BACKBONE.RESNET.CONV_LAYERS_BIAS,\n \"same_padding\": cfg.MODEL.BACKBONE.RESNET.CONV_LAYERS_SAME_PADDING,\n }\n if _conv_layers == \"Conv2d\":\n conv_layers = Conv2d\n elif _conv_layers == \"Conv2d_Bezier\":\n conv_layers = Conv2d_Bezier\n elif _conv_layers in [\"Conv2d_BatchEnsemble\", \"Conv2d_BatchEnsembleV2\",]:\n if cfg.MODEL.BATCH_ENSEMBLE.ENABLED is False:\n raise AssertionError(\n f\"Set MODEL.BATCH_ENSEMBLE.ENABLED=True to use {_conv_layers}\"\n )\n if _conv_layers == \"Conv2d_BatchEnsemble\":\n conv_layers = Conv2d_BatchEnsemble\n if _conv_layers == \"Conv2d_BatchEnsembleV2\":\n conv_layers = Conv2d_BatchEnsembleV2\n kwargs.update({\n \"ensemble_size\": cfg.MODEL.BATCH_ENSEMBLE.ENSEMBLE_SIZE,\n \"use_ensemble_bias\": cfg.MODEL.BATCH_ENSEMBLE.USE_ENSEMBLE_BIAS,\n \"alpha_initializer\": {\n \"initializer\": cfg.MODEL.BATCH_ENSEMBLE.ALPHA_INITIALIZER.NAME,\n \"init_values\": cfg.MODEL.BATCH_ENSEMBLE.ALPHA_INITIALIZER.VALUES,\n },\n \"gamma_initializer\": {\n \"initializer\": cfg.MODEL.BATCH_ENSEMBLE.GAMMA_INITIALIZER.NAME,\n \"init_values\": cfg.MODEL.BATCH_ENSEMBLE.GAMMA_INITIALIZER.VALUES,\n },\n })\n elif _conv_layers == \"Conv2d_Dropout\":\n if cfg.MODEL.DROPOUT.ENABLED is False:\n raise AssertionError(\n f\"Set MODEL.DROPOUT.ENABLED=True to use {_conv_layers}\"\n )\n conv_layers = Conv2d_Dropout\n kwargs.update({\n \"drop_p\": cfg.MODEL.DROPOUT.DROP_PROBABILITY,\n })\n elif _conv_layers == \"Conv2d_SpatialDropout\":\n if cfg.MODEL.SPATIAL_DROPOUT.ENABLED is False:\n raise AssertionError(\n f\"Set MODEL.SPATIAL_DROPOUT.ENABLED=True to use {_conv_layers}\"\n )\n conv_layers = Conv2d_SpatialDropout\n kwargs.update({\n \"drop_p\": cfg.MODEL.SPATIAL_DROPOUT.DROP_PROBABILITY,\n })\n elif _conv_layers == \"Conv2d_DropBlock\":\n if cfg.MODEL.DROP_BLOCK.ENABLED is False:\n raise AssertionError(\n f\"Set MODEL.DROP_BLOCK.ENABLED=True to use {_conv_layers}\"\n )\n conv_layers = Conv2d_DropBlock\n kwargs.update({\n \"drop_p\": cfg.MODEL.DROP_BLOCK.DROP_PROBABILITY,\n \"block_size\": cfg.MODEL.DROP_BLOCK.BLOCK_SIZE,\n \"use_shared_masks\": cfg.MODEL.DROP_BLOCK.USE_SHARED_MASKS,\n })\n else:\n raise NotImplementedError(\n f\"Unknown MODEL.BACKBONE.RESNET.CONV_LAYERS: {_conv_layers}\"\n )\n\n # BatchNorm2d layers may be replaced by its variations\n _norm_layers = cfg.MODEL.BACKBONE.RESNET.NORM_LAYERS\n if _norm_layers == \"NONE\":\n norm_layers = Identity\n elif _norm_layers == \"BatchNorm2d\":\n norm_layers = BatchNorm2d\n elif _norm_layers == \"GroupNorm2d\":\n norm_layers = partial(GroupNorm2d, num_groups=cfg.MODEL.BACKBONE.RESNET.IN_PLANES // 2)\n elif _norm_layers == \"FilterResponseNorm2d\":\n norm_layers = FilterResponseNorm2d\n elif _norm_layers == \"FilterResponseNorm2d_Bezier\":\n norm_layers = FilterResponseNorm2d_Bezier\n else:\n raise NotImplementedError(\n f\"Unknown MODEL.BACKBONE.RESNET.NORM_LAYERS: {_norm_layers}\"\n )\n\n # ReLU layers may be replaced by its variations\n _activations = cfg.MODEL.BACKBONE.RESNET.ACTIVATIONS\n if _activations == \"NONE\":\n activations = Identity\n elif _activations == \"ReLU\":\n activations = ReLU\n elif _activations == \"SiLU\":\n activations = SiLU\n else:\n raise NotImplementedError(\n f\"Unknown MODEL.BACKBONE.RESNET.ACTIVATIONS: {_activations}\"\n )\n\n # specify the first block\n first_block = partial(\n FirstBlock,\n conv = conv_layers,\n conv_ksp = cfg.MODEL.BACKBONE.RESNET.FIRST_BLOCK.CONV_KSP,\n norm = norm_layers if cfg.MODEL.BACKBONE.RESNET.FIRST_BLOCK.USE_NORM_LAYER else Identity,\n relu = activations if cfg.MODEL.BACKBONE.RESNET.FIRST_BLOCK.USE_ACTIVATION else Identity,\n pool = MaxPool2d if cfg.MODEL.BACKBONE.RESNET.FIRST_BLOCK.USE_POOL_LAYER else Identity,\n pool_ksp = cfg.MODEL.BACKBONE.RESNET.FIRST_BLOCK.POOL_KSP,\n )\n\n # specify block\n _block = cfg.MODEL.BACKBONE.RESNET.BLOCK\n if _block == \"BasicBlock\":\n block = BasicBlock\n elif _block == \"Bottleneck\":\n block = Bottleneck\n else:\n raise NotImplementedError(\n f\"Unknown MODEL.BACKBONE.RESNET.BLOCK: {_block}\"\n )\n\n # specify shortcut\n _shortcut = cfg.MODEL.BACKBONE.RESNET.SHORTCUT\n if _shortcut == \"IdentityShortcut\":\n shortcut = IdentityShortcut\n elif _shortcut == \"ProjectionShortcut\":\n shortcut = ProjectionShortcut\n else:\n raise NotImplementedError(\n f\"Unknown MODEL.BACKBONE.RESNET.SHORTCUT: {_shortcut}\"\n )\n\n # build backbone\n backbone = ResNet(\n channels = cfg.MODEL.BACKBONE.RESNET.CHANNELS,\n in_planes = cfg.MODEL.BACKBONE.RESNET.IN_PLANES,\n first_block = first_block,\n block = block,\n shortcut = shortcut,\n num_blocks = cfg.MODEL.BACKBONE.RESNET.NUM_BLOCKS,\n widen_factor = cfg.MODEL.BACKBONE.RESNET.WIDEN_FACTOR,\n conv = conv_layers,\n norm = norm_layers,\n relu = activations,\n **kwargs\n )\n\n # initialize weights\n for m in backbone.modules():\n if isinstance(m, Conv2d):\n if isinstance(m.weight, nn.ParameterList):\n for idx in range(len(m.weight)):\n nn.init.kaiming_normal_(m.weight[idx], mode=\"fan_out\", nonlinearity=\"relu\")\n else:\n nn.init.kaiming_normal_(m.weight, mode=\"fan_out\", nonlinearity=\"relu\")\n\n return backbone\n" ]
[ [ "torch.nn.init.kaiming_normal_", "torch.nn.functional.pad", "torch.nn.functional.adaptive_avg_pool2d", "torch.nn.Sequential" ] ]
nasimanousheh/dipy
[ "9d20c911b4afe83e52ded698eff9ba0f0fafeca8" ]
[ "dipy/data/tests/test_fetcher.py" ]
[ "import tempfile\nimport os.path as op\nimport sys\nimport os\nimport numpy.testing as npt\nfrom nibabel.tmpdirs import TemporaryDirectory\nimport dipy.data.fetcher as fetcher\nfrom dipy.data import SPHERE_FILES\nfrom threading import Thread\nif sys.version_info[0] < 3:\n from SimpleHTTPServer import SimpleHTTPRequestHandler # Python 2\n from SocketServer import TCPServer as HTTPServer\nelse:\n from http.server import HTTPServer, SimpleHTTPRequestHandler # Python 3\n\n\ndef test_check_md5():\n fd, fname = tempfile.mkstemp()\n stored_md5 = fetcher._get_file_md5(fname)\n # If all is well, this shouldn't return anything:\n npt.assert_equal(fetcher.check_md5(fname, stored_md5), None)\n # If None is provided as input, it should silently not check either:\n npt.assert_equal(fetcher.check_md5(fname, None), None)\n # Otherwise, it will raise its exception class:\n npt.assert_raises(fetcher.FetcherError, fetcher.check_md5, fname, 'foo')\n\n\ndef test_make_fetcher():\n symmetric362 = SPHERE_FILES['symmetric362']\n with TemporaryDirectory() as tmpdir:\n stored_md5 = fetcher._get_file_md5(symmetric362)\n\n # create local HTTP Server\n testfile_url = op.split(symmetric362)[0] + os.sep\n test_server_url = \"http://127.0.0.1:8000/\"\n print(testfile_url)\n print(symmetric362)\n current_dir = os.getcwd()\n # change pwd to directory containing testfile.\n os.chdir(testfile_url)\n server = HTTPServer(('localhost', 8000), SimpleHTTPRequestHandler)\n server_thread = Thread(target=server.serve_forever)\n server_thread.deamon = True\n server_thread.start()\n\n # test make_fetcher\n sphere_fetcher = fetcher._make_fetcher(\"sphere_fetcher\",\n tmpdir, test_server_url,\n [op.split(symmetric362)[-1]],\n [\"sphere_name\"],\n md5_list=[stored_md5])\n\n sphere_fetcher()\n assert op.isfile(op.join(tmpdir, \"sphere_name\"))\n npt.assert_equal(fetcher._get_file_md5(op.join(tmpdir, \"sphere_name\")),\n stored_md5)\n\n # stop local HTTP Server\n server.shutdown()\n # change to original working directory\n os.chdir(current_dir)\n\n\ndef test_fetch_data():\n symmetric362 = SPHERE_FILES['symmetric362']\n with TemporaryDirectory() as tmpdir:\n md5 = fetcher._get_file_md5(symmetric362)\n bad_md5 = '8' * len(md5)\n\n newfile = op.join(tmpdir, \"testfile.txt\")\n # Test that the fetcher can get a file\n testfile_url = symmetric362\n print(testfile_url)\n testfile_dir, testfile_name = op.split(testfile_url)\n # create local HTTP Server\n test_server_url = \"http://127.0.0.1:8001/\" + testfile_name\n current_dir = os.getcwd()\n # change pwd to directory containing testfile.\n os.chdir(testfile_dir + os.sep)\n # use different port as shutdown() takes time to release socket.\n server = HTTPServer(('localhost', 8001), SimpleHTTPRequestHandler)\n server_thread = Thread(target=server.serve_forever)\n server_thread.deamon = True\n server_thread.start()\n\n files = {\"testfile.txt\": (test_server_url, md5)}\n fetcher.fetch_data(files, tmpdir)\n npt.assert_(op.exists(newfile))\n\n # Test that the file is replaced when the md5 doesn't match\n with open(newfile, 'a') as f:\n f.write(\"some junk\")\n fetcher.fetch_data(files, tmpdir)\n npt.assert_(op.exists(newfile))\n npt.assert_equal(fetcher._get_file_md5(newfile), md5)\n\n # Test that an error is raised when the md5 checksum of the download\n # file does not match the expected value\n files = {\"testfile.txt\": (test_server_url, bad_md5)}\n npt.assert_raises(fetcher.FetcherError,\n fetcher.fetch_data, files, tmpdir)\n\n # stop local HTTP Server\n server.shutdown()\n # change to original working directory\n os.chdir(current_dir)\n\n def test_dipy_home():\n test_path = 'TEST_PATH'\n if 'DIPY_HOME' in os.environ:\n old_home = os.environ['DIPY_HOME']\n del os.environ['DIPY_HOME']\n else:\n old_home = None\n\n reload(fetcher)\n\n npt.assert_string_equal(fetcher.dipy_home,\n op.join(os.path.expanduser('~'), '.dipy'))\n os.environ['DIPY_HOME'] = test_path\n reload(fetcher)\n npt.assert_string_equal(fetcher.dipy_home, test_path)\n\n # return to previous state\n if old_home:\n os.environ['DIPY_HOME'] = old_home\n" ]
[ [ "numpy.testing.assert_raises", "numpy.testing.assert_string_equal" ] ]
BendeguzToth/NeuralLanguageModel
[ "f4bb60375019acd57c7396768d62ad0f3166391c" ]
[ "Project/_visualize.py" ]
[ "\"\"\"\nIn this file we visualize the activations of\nparticular neurons, at different positions\nof a provided sample text.\n\"\"\"\n\n# Standard libraries\nimport json\nimport tkinter as tk\n\n# Third-party libraries\nimport numpy as np\n\n# Project files\nfrom layers import LSTM\n\n# SETUP\nMODEL = \"saves/ShakespeareNet.json\"\nLOOKUP_FILE = \"saves/ShakespeareLookup.json\"\nTEXT_FILE = \"saves/sample.txt\"\n\n\ndef main():\n with open(LOOKUP_FILE, 'r') as file:\n chars = json.load(file)\n\n # Here we make dictionaries that can be used to convert\n # between characters, integer id-s of characters, and one-hot\n # vectors that will be used to represent the characters.\n char_to_int = dict()\n int_to_char = dict()\n char_to_vec = dict()\n\n for i in range(len(chars)):\n char_to_int[chars[i]] = i\n int_to_char[i] = chars[i]\n vec = np.zeros((len(chars), 1))\n vec[i] = 1.\n char_to_vec[chars[i]] = vec\n\n # The length of the vector that represents a character\n # is equivalent to the number of different characters\n # in the text.\n EMBEDDING_LENGTH = len(chars)\n # Create the LSTM layers only. We don't use the Network class,\n # since we are only interested in the activations of the recurrent\n # layers.\n first_layer = LSTM(size=512, input_size=EMBEDDING_LENGTH, batch_size=1, backprop_depth=1, stateful=True)\n second_layer = LSTM(size=512, input_size=512, batch_size=1, backprop_depth=1, stateful=True)\n\n # Load the weights.\n with open(MODEL, 'r') as file:\n weights = json.load(file)\n first_layer.loadParams(weights[0])\n second_layer.loadParams(weights[1])\n\n # Loading in the file.\n with open(TEXT_FILE, 'r', encoding='utf8') as file:\n text = file.read()\n source = list(text)\n\n for i in range(len(source)):\n source[i] = char_to_vec[source[i]]\n\n # Feed the text to the network.\n # Here we look at the activation of the neurons of the\n # hidden state at the 2nd LSTM layer.\n # We take the first element of the output as there is only one\n # batch.\n out = second_layer.forward(first_layer.forward(np.array([source])))[0]\n\n # ###############---TKINTER---#############################################\n class Wrap:\n NEURON_INDEX = 0\n\n def showNeuron():\n for j in range(out.shape[0]):\n # We will leave the background of the newline characters white,\n # regardless of its activation. The reason for that is that the color\n # would fill the entire remainder of the line, which is very disturbing to look at.\n intensity = 255 if text[j] == '\\n' else 255 - int((out[j, Wrap.NEURON_INDEX, 0] + 1) * 127.5)\n text_box.tag_config(str(j), background=\"#%02x%02x%02x\" % (\n 255, intensity, intensity))\n\n def inputFromEntry(evt):\n Wrap.NEURON_INDEX = int(entry.get())\n entry.delete(0, \"end\")\n showNeuron()\n\n def nextButtonClicked():\n Wrap.NEURON_INDEX += 1\n entry.delete(0, \"end\")\n entry.insert(tk.INSERT, str(Wrap.NEURON_INDEX))\n showNeuron()\n\n # Making the tkinter window.\n root = tk.Tk()\n text_box = tk.Text(root, height=35)\n text_box.insert(tk.INSERT, text)\n text_box.pack()\n current_line = 1\n current_char = 0\n for i in range(out.shape[0]):\n text_box.tag_add(str(i), f\"{current_line}.{current_char}\")\n current_char += 1\n if text[i] == '\\n':\n current_line += 1\n current_char = 0\n\n # Making the entry box.\n entry = tk.Entry(root, width=5)\n entry.pack()\n entry.bind(\"<Return>\", inputFromEntry)\n\n # Buttons\n up = tk.Button(text=\"Next\", command=nextButtonClicked)\n up.pack()\n\n # Show the first neuron by default.\n showNeuron()\n\n root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.array" ] ]
vjaguilera/BERT4Rec
[ "8c460676af224c90c9cc89f1ba837b38f04e4210" ]
[ "gen_data_fin.py" ]
[ "# -*- coding: UTF-8 -*-\nimport os\nimport codecs\n\nimport collections\nimport random\n\nimport sys\n\nimport tensorflow as tf\n\nimport six\n\nfrom util import *\nfrom vocab import *\nimport pickle\nimport multiprocessing\nimport time\n\n\nrandom_seed = 12345\nshort_seq_prob = 0 # Probability of creating sequences which are shorter than the maximum length。\n\nflags = tf.flags\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\"signature\", 'default', \"signature_name\")\n\nflags.DEFINE_integer(\n \"pool_size\", 10,\n \"multiprocesses pool size.\")\n\nflags.DEFINE_integer(\n \"max_seq_length\", 200,\n \"max sequence length.\")\n\nflags.DEFINE_integer(\n \"max_predictions_per_seq\", 20,\n \"max_predictions_per_seq.\")\n\nflags.DEFINE_float(\n \"masked_lm_prob\", 0.15,\n \"Masked LM probability.\")\n\nflags.DEFINE_float(\n \"mask_prob\", 1.0,\n \"mask probabaility\")\n\nflags.DEFINE_integer(\n \"dupe_factor\", 10,\n \"Number of times to duplicate the input data (with different masks).\")\n\nflags.DEFINE_float(\"prop_sliding_window\", 0.1, \"sliding window step size.\")\n \nflags.DEFINE_string(\n \"data_dir\", './data/',\n \"data dir.\")\n\nflags.DEFINE_string(\n \"dataset_name\", 'ml-1m',\n \"dataset name.\")\n\n\ndef printable_text(text):\n \"\"\"Returns text encoded in a way suitable for print or `tf.logging`.\"\"\"\n\n # These functions want `str` for both Python2 and Python3, but in one case\n # it's a Unicode string and in the other it's a byte string.\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text\n elif isinstance(text, unicode):\n return text.encode(\"utf-8\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")\n\n\ndef convert_to_unicode(text):\n \"\"\"Converts `text` to Unicode (if it's not already), assuming utf-8 input.\"\"\"\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text.decode(\"utf-8\", \"ignore\")\n elif isinstance(text, unicode):\n return text\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")\n\n\nclass TrainingInstance(object):\n \"\"\"A single training instance (sentence pair).\"\"\"\n\n def __init__(self, info, tokens, masked_lm_positions, masked_lm_labels):\n self.info = info # info = [user]\n self.tokens = tokens\n self.masked_lm_positions = masked_lm_positions\n self.masked_lm_labels = masked_lm_labels\n\n def __str__(self):\n s = \"\"\n s += \"info: %s\\n\" % (\" \".join([printable_text(x) for x in self.info]))\n s += \"tokens: %s\\n\" % (\n \" \".join([printable_text(x) for x in self.tokens]))\n s += \"masked_lm_positions: %s\\n\" % (\n \" \".join([str(x) for x in self.masked_lm_positions]))\n s += \"masked_lm_labels: %s\\n\" % (\n \" \".join([printable_text(x) for x in self.masked_lm_labels]))\n s += \"\\n\"\n return s\n\n def __repr__(self):\n return self.__str__()\n\n\ndef write_instance_to_example_files(instances, max_seq_length,\n max_predictions_per_seq, vocab,\n output_files):\n \"\"\"Create TF example files from `TrainingInstance`s.\"\"\"\n writers = []\n for output_file in output_files:\n writers.append(tf.python_io.TFRecordWriter(output_file))\n\n writer_index = 0\n\n total_written = 0\n for (inst_index, instance) in enumerate(instances):\n try:\n input_ids = vocab.convert_tokens_to_ids(instance.tokens)\n except:\n print(instance)\n\n input_mask = [1] * len(input_ids)\n assert len(input_ids) <= max_seq_length\n\n input_ids += [0] * (max_seq_length - len(input_ids))\n input_mask += [0] * (max_seq_length - len(input_mask))\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n\n masked_lm_positions = list(instance.masked_lm_positions)\n masked_lm_ids = vocab.convert_tokens_to_ids(instance.masked_lm_labels)\n masked_lm_weights = [1.0] * len(masked_lm_ids)\n\n masked_lm_positions += [0] * (max_predictions_per_seq - len(masked_lm_positions))\n masked_lm_ids += [0] * (max_predictions_per_seq - len(masked_lm_ids))\n masked_lm_weights += [0.0] * (max_predictions_per_seq - len(masked_lm_weights))\n\n features = collections.OrderedDict()\n features[\"info\"] = create_int_feature(instance.info)\n features[\"input_ids\"] = create_int_feature(input_ids)\n features[\"input_mask\"] = create_int_feature(input_mask)\n features[\"masked_lm_positions\"] = create_int_feature(\n masked_lm_positions)\n features[\"masked_lm_ids\"] = create_int_feature(masked_lm_ids)\n features[\"masked_lm_weights\"] = create_float_feature(masked_lm_weights)\n\n tf_example = tf.train.Example(\n features=tf.train.Features(feature=features))\n\n writers[writer_index].write(tf_example.SerializeToString())\n writer_index = (writer_index + 1) % len(writers)\n\n total_written += 1\n\n if inst_index < 20:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [printable_text(x) for x in instance.tokens]))\n\n for feature_name in features.keys():\n feature = features[feature_name]\n values = []\n if feature.int64_list.value:\n values = feature.int64_list.value\n elif feature.float_list.value:\n values = feature.float_list.value\n tf.logging.info(\"%s: %s\" % (feature_name,\n \" \".join([str(x)\n for x in values])))\n\n for writer in writers:\n writer.close()\n\n tf.logging.info(\"Wrote %d total instances\", total_written)\n\n\ndef create_int_feature(values):\n feature = tf.train.Feature(\n int64_list=tf.train.Int64List(value=list(values)))\n return feature\n\n\ndef create_float_feature(values):\n feature = tf.train.Feature(\n float_list=tf.train.FloatList(value=list(values)))\n return feature\n\n\ndef create_training_instances(all_documents_raw,\n max_seq_length,\n dupe_factor,\n short_seq_prob,\n masked_lm_prob,\n max_predictions_per_seq,\n rng,\n vocab,\n mask_prob,\n prop_sliding_window,\n pool_size,\n force_last=False):\n \"\"\"Create `TrainingInstance`s from raw text.\n PARAMS:\n - all_documents_raw (dict): Dict containing users as \n keys and item-list as value\n \"\"\"\n all_documents = {}\n\n # TEST\n if force_last:\n max_num_tokens = max_seq_length\n for user, item_seq in all_documents_raw.items():\n if len(item_seq) == 0:\n print(\"got empty seq:\" + user)\n continue\n all_documents[user] = [item_seq[-max_num_tokens:]]\n # Assign list of list from the last to the max_num_tokens\n\n # TRAIN\n else:\n max_num_tokens = max_seq_length # we need two sentence\n\n sliding_step = (int)(\n prop_sliding_window *\n max_num_tokens) if prop_sliding_window != -1.0 else max_num_tokens\n for user, item_seq in all_documents_raw.items():\n if len(item_seq) == 0:\n print(\"got empty seq:\" + user)\n continue\n\n #todo: add slide\n if len(item_seq) <= max_num_tokens:\n # All to token\n all_documents[user] = [item_seq]\n else:\n beg_idx = list(range(len(item_seq)-max_num_tokens, 0, -sliding_step))\n beg_idx.append(0)\n # Reverse ordered list with 0 appended\n all_documents[user] = [item_seq[i:i + max_num_tokens] for i in beg_idx[::-1]]\n\n instances = []\n\n # TEST\n if force_last:\n for user in all_documents:\n instances.extend(\n create_instances_from_document_test(\n all_documents, user, max_seq_length))\n print(\"num of instance:{}\".format(len(instances)))\n\n # TRAIN\n else:\n start_time = time.clock()\n pool = multiprocessing.Pool(processes=pool_size)\n instances = []\n print(\"Document quantity: {}\".format(len(all_documents)))\n\n def log_result(result):\n print(\"callback function result type: {}, size: {} \".format(type(result), len(result)))\n # RESULT CAN BE error_callback or the result of create_instances_threading\n instances.extend(result)\n # Add Training Instances to instances list if result is correct\n\n for step in range(dupe_factor):\n # Run a process async as a thread\n pool.apply_async(\n create_instances_threading, args=(\n all_documents, user, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab, random.Random(random.randint(1,10000)),\n mask_prob, step, dupe_factor), callback=log_result)\n pool.close()\n pool.join()\n \n # Always masking the last item\n for user in all_documents:\n instances.extend(\n mask_last(\n all_documents, user, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab, rng))\n\n print(\"num of instance:{}; time:{}\".format(len(instances), time.clock() - start_time))\n rng.shuffle(instances)\n return instances\n\n\ndef create_instances_threading(all_documents, user, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab, rng,\n mask_prob, step, dupe_factor):\n cnt = 0\n start_time = time.clock()\n instances = []\n for user in all_documents:\n cnt += 1\n if cnt % 1000 == 0:\n print(\"step: {}/{}, name: {}, user: {}, time: {}\".format(step, dupe_factor, multiprocessing.current_process().name, cnt, time.clock()-start_time))\n start_time = time.clock()\n\n instances.extend(create_instances_from_document_train(\n all_documents, user, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab, rng,\n mask_prob))\n \n return instances\n\n\ndef mask_last(\n all_documents, user, max_seq_length, short_seq_prob, masked_lm_prob,\n max_predictions_per_seq, vocab, rng):\n \"\"\"Creates `TrainingInstance`s for a single document.\"\"\"\n document = all_documents[user]\n max_num_tokens = max_seq_length\n \n instances = []\n info = [int(user.split(\"_\")[1])]\n vocab_items = vocab.get_items()\n\n for tokens in document:\n assert len(tokens) >= 1 and len(tokens) <= max_num_tokens\n \n (tokens, masked_lm_positions,\n masked_lm_labels) = create_masked_lm_predictions_force_last(tokens)\n instance = TrainingInstance(\n info=info,\n tokens=tokens,\n masked_lm_positions=masked_lm_positions,\n masked_lm_labels=masked_lm_labels)\n instances.append(instance)\n\n return instances\n\n\ndef create_instances_from_document_test(all_documents, user, max_seq_length):\n \"\"\"Creates `TrainingInstance`s for a single document.\"\"\"\n document = all_documents[user]\n max_num_tokens = max_seq_length\n \n assert len(document) == 1 and len(document[0]) <= max_num_tokens\n \n tokens = document[0]\n assert len(tokens) >= 1\n\n (tokens, masked_lm_positions,\n masked_lm_labels) = create_masked_lm_predictions_force_last(tokens)\n\n info = [int(user.split(\"_\")[1])]\n instance = TrainingInstance(\n info=info,\n tokens=tokens,\n masked_lm_positions=masked_lm_positions,\n masked_lm_labels=masked_lm_labels)\n\n return [instance]\n\n\ndef create_instances_from_document_train(\n all_documents, user, max_seq_length, short_seq_prob, masked_lm_prob,\n max_predictions_per_seq, vocab, rng, mask_prob):\n \"\"\"Creates `TrainingInstance`s for a single document.\"\"\"\n document = all_documents[user]\n\n max_num_tokens = max_seq_length\n\n instances = []\n info = [int(user.split(\"_\")[1])]\n vocab_items = vocab.get_items()\n\n for tokens in document:\n assert len(tokens) >= 1 and len(tokens) <= max_num_tokens\n \n # Return the tokens, the masked positions and the masked labels\n (tokens, masked_lm_positions,\n masked_lm_labels) = create_masked_lm_predictions(\n tokens, masked_lm_prob, max_predictions_per_seq,\n vocab_items, rng, mask_prob)\n \n # Instantiate a TrainingInstance\n instance = TrainingInstance(\n info=info,\n tokens=tokens,\n masked_lm_positions=masked_lm_positions,\n masked_lm_labels=masked_lm_labels)\n instances.append(instance)\n\n return instances\n\n\nMaskedLmInstance = collections.namedtuple(\"MaskedLmInstance\",\n [\"index\", \"label\"])\n\n\ndef create_masked_lm_predictions_force_last(tokens):\n \"\"\"Creates the predictions for the masked LM objective, BUT JUST MASKING THE LAST ITEM\"\"\"\n\n last_index = -1\n for (i, token) in enumerate(tokens):\n if token == \"[CLS]\" or token == \"[PAD]\" or token == '[NO_USE]':\n continue\n last_index = i\n\n assert last_index > 0\n\n output_tokens = list(tokens)\n output_tokens[last_index] = \"[MASK]\"\n\n masked_lm_positions = [last_index]\n masked_lm_labels = [tokens[last_index]]\n\n return (output_tokens, masked_lm_positions, masked_lm_labels)\n\n\ndef create_masked_lm_predictions(tokens, masked_lm_prob,\n max_predictions_per_seq, vocab_words, rng,\n mask_prob):\n \"\"\"Creates the predictions for the masked LM objective.\"\"\"\n\n cand_indexes = []\n for (i, token) in enumerate(tokens):\n if token not in vocab_words:\n continue\n cand_indexes.append(i)\n\n rng.shuffle(cand_indexes)\n\n output_tokens = list(tokens)\n\n num_to_predict = min(max_predictions_per_seq,\n max(1, int(round(len(tokens) * masked_lm_prob))))\n\n masked_lms = []\n covered_indexes = set()\n for index in cand_indexes:\n if len(masked_lms) >= num_to_predict:\n break\n if index in covered_indexes:\n continue\n covered_indexes.add(index)\n\n masked_token = None\n # 80% of the time, replace with [MASK]\n if rng.random() < mask_prob:\n masked_token = \"[MASK]\"\n else:\n # 10% of the time, keep original\n if rng.random() < 0.5:\n masked_token = tokens[index]\n # 10% of the time, replace with random word\n else:\n # masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]\n masked_token = rng.choice(vocab_words) \n\n output_tokens[index] = masked_token\n\n masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))\n\n masked_lms = sorted(masked_lms, key=lambda x: x.index)\n\n masked_lm_positions = []\n masked_lm_labels = []\n for p in masked_lms:\n masked_lm_positions.append(p.index)\n masked_lm_labels.append(p.label)\n\n return (output_tokens, masked_lm_positions, masked_lm_labels)\n\n\ndef gen_samples(data,\n output_filename,\n rng,\n vocab,\n max_seq_length,\n dupe_factor,\n short_seq_prob,\n mask_prob,\n masked_lm_prob,\n max_predictions_per_seq,\n prop_sliding_window,\n pool_size,\n force_last=False):\n # create train instances\n instances = create_training_instances(\n data, max_seq_length, dupe_factor, short_seq_prob, masked_lm_prob,\n max_predictions_per_seq, rng, vocab, mask_prob, prop_sliding_window,\n pool_size, force_last)\n\n tf.logging.info(\"*** Writing to output files ***\")\n tf.logging.info(\" %s\", output_filename)\n\n # Write training instances\n write_instance_to_example_files(instances, max_seq_length,\n max_predictions_per_seq, vocab,\n [output_filename])\n\n\ndef main():\n tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG)\n \n max_seq_length = FLAGS.max_seq_length\n max_predictions_per_seq = FLAGS.max_predictions_per_seq\n masked_lm_prob = FLAGS.masked_lm_prob\n mask_prob = FLAGS.mask_prob\n dupe_factor = FLAGS.dupe_factor\n prop_sliding_window = FLAGS.prop_sliding_window\n pool_size = FLAGS.pool_size\n\n output_dir = FLAGS.data_dir\n dataset_name = FLAGS.dataset_name\n version_id = FLAGS.signature\n print(version_id)\n print(output_dir)\n print(dataset_name)\n\n if not os.path.isdir(output_dir):\n print(output_dir + ' is not exist')\n print(os.getcwd())\n exit(1)\n\n dataset = data_partition(output_dir+dataset_name+'.txt')\n [user_train, user_valid, user_test, usernum, itemnum] = dataset\n cc = 0.0\n max_len = 0\n min_len = 100000\n for u in user_train:\n cc += len(user_train[u])\n max_len = max(len(user_train[u]), max_len)\n min_len = min(len(user_train[u]), min_len)\n\n print('average sequence length: %.2f' % (cc / len(user_train)))\n print('max:{}, min:{}'.format(max_len, min_len))\n\n print('len_train:{}, len_valid:{}, len_test:{}, usernum:{}, itemnum:{}'.\n format(\n len(user_train),\n len(user_valid), len(user_test), usernum, itemnum))\n\n for idx, u in enumerate(user_train):\n if idx < 10:\n print(user_train[u])\n print(user_valid[u])\n print(user_test[u])\n\n # put validate into train\n for u in user_train:\n if u in user_valid:\n user_train[u].extend(user_valid[u])\n\n # get the max index of the data\n user_train_data = {\n 'user_' + str(k): ['item_' + str(item) for item in v]\n for k, v in user_train.items() if len(v) > 0\n }\n user_test_data = {\n 'user_' + str(u):\n ['item_' + str(item) for item in (user_train[u] + user_test[u])]\n for u in user_train if len(user_train[u]) > 0 and len(user_test[u]) > 0\n }\n rng = random.Random(random_seed)\n\n vocab = FreqVocab(user_test_data)\n user_test_data_output = {\n k: [vocab.convert_tokens_to_ids(v)]\n for k, v in user_test_data.items()\n }\n\n print('begin to generate train')\n output_filename = output_dir + dataset_name + version_id + '.train.tfrecord'\n ## Generating training masked samples\n gen_samples(\n user_train_data,\n output_filename,\n rng,\n vocab,\n max_seq_length,\n dupe_factor,\n short_seq_prob,\n mask_prob,\n masked_lm_prob,\n max_predictions_per_seq,\n prop_sliding_window,\n pool_size,\n force_last=False)\n print('train:{}'.format(output_filename))\n\n print('begin to generate test')\n output_filename = output_dir + dataset_name + version_id + '.test.tfrecord'\n ## Generating test masked samples\n ## force_last is True\n gen_samples(\n user_test_data,\n output_filename,\n rng,\n vocab,\n max_seq_length,\n dupe_factor,\n short_seq_prob,\n mask_prob,\n masked_lm_prob,\n max_predictions_per_seq,\n -1.0,\n pool_size,\n force_last=True)\n print('test:{}'.format(output_filename))\n\n print('vocab_size:{}, user_size:{}, item_size:{}, item_with_other_size:{}'.\n format(vocab.get_vocab_size(),\n vocab.get_user_count(),\n vocab.get_item_count(),\n vocab.get_item_count() + vocab.get_special_token_count()))\n vocab_file_name = output_dir + dataset_name + version_id + '.vocab'\n print('vocab pickle file: ' + vocab_file_name)\n with open(vocab_file_name, 'wb') as output_file:\n pickle.dump(vocab, output_file, protocol=2)\n\n his_file_name = output_dir + dataset_name + version_id + '.his'\n print('test data pickle file: ' + his_file_name)\n with open(his_file_name, 'wb') as output_file:\n pickle.dump(user_test_data_output, output_file, protocol=2)\n print('done.')\n\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "tensorflow.python_io.TFRecordWriter", "tensorflow.train.Features", "tensorflow.logging.info", "tensorflow.compat.v1.logging.set_verbosity" ] ]
richardsfc/neural_rerendering_plus
[ "f5b2bd2ebe7e9657e3584612818eb0d137714276" ]
[ "layers.py" ]
[ "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\nfrom options import FLAGS as opts\nimport numpy as np\nimport tensorflow as tf\nfrom plyfile import PlyData, PlyElement\n\nclass LayerDescriptor(object):\n \n def __init__(self, name, m): \n with tf.variable_scope(name):\n plydata = PlyData.read(opts.descriptor_folder + '/fused.ply')\n shape = [(plydata.elements[0].count // opts.descriptor_div) + 1, m]\n self.dim = m\n with tf.device('/device:GPU:1'):\n self.descriptors = tf.get_variable('descriptors', shape=shape) # 0 index is the null descriptor\n\n def __call__(self, x):\n \"\"\"Apply layer to tensor x.\"\"\"\n with tf.device('/device:GPU:1'):\n shape = x.get_shape().as_list()\n indices = tf.reshape(x[:, :, :, -1], shape=[-1, 1])\n indices = tf.compat.v1.cast(tf.math.ceil(tf.compat.v1.divide(indices, opts.descriptor_div)), tf.int64)\n D = tf.gather_nd(self.descriptors, indices)\n D = tf.reshape(D, shape=[-1, shape[1], shape[2], self.dim])\n return tf.compat.v1.concat([tf.slice(x, [0, 0, 0, 0], [-1, -1, -1, opts.deep_buffer_nc]), D], axis=-1)\n\n\nclass LayerInstanceNorm(object):\n\n def __init__(self, scope_suffix='instance_norm'):\n curr_scope = tf.compat.v1.get_variable_scope().name\n self._scope = curr_scope + '/' + scope_suffix\n\n def __call__(self, x):\n with tf.compat.v1.variable_scope(self._scope, reuse=tf.compat.v1.AUTO_REUSE):\n return tf.contrib.layers.instance_norm(\n x, epsilon=1e-05, center=True, scale=True)\n\n\ndef layer_norm(x, scope='layer_norm'):\n return tf.contrib.layers.layer_norm(x, center=True, scale=True)\n\n\ndef pixel_norm(x):\n \"\"\"Pixel normalization.\n\n Args:\n x: 4D image tensor in B01C format.\n\n Returns:\n 4D tensor with pixel normalized channels.\n \"\"\"\n return x * tf.compat.v1.rsqrt(tf.compat.v1.reduce_mean(tf.compat.v1.square(x), [-1], keepdims=True) + 1e-8)\n\n\ndef global_avg_pooling(x):\n return tf.compat.v1.reduce_mean(x, axis=[1, 2], keepdims=True)\n\n\nclass FullyConnected(object):\n\n def __init__(self, n_out_units, scope_suffix='FC'):\n weight_init = tf.compat.v1.random_normal_initializer(mean=0., stddev=0.02)\n weight_regularizer = tf.contrib.layers.l2_regularizer(scale=0.0001)\n\n curr_scope = tf.get_variable_scope().name\n self._scope = curr_scope + '/' + scope_suffix\n self.fc_layer = functools.partial(\n tf.layers.dense, units=n_out_units, kernel_initializer=weight_init,\n kernel_regularizer=weight_regularizer, use_bias=True)\n\n def __call__(self, x):\n with tf.compat.v1.variable_scope(self._scope, reuse=tf.AUTO_REUSE):\n return self.fc_layer(x)\n\n\ndef init_he_scale(shape, slope=1.0):\n \"\"\"He neural network random normal scaling for initialization.\n\n Args:\n shape: list of the dimensions of the tensor.\n slope: float, slope of the ReLu following the layer.\n\n Returns:\n a float, He's standard deviation.\n \"\"\"\n fan_in = np.prod(shape[:-1])\n return np.sqrt(2. / ((1. + slope**2) * fan_in))\n\n\nclass LayerConv(object):\n \"\"\"Convolution layer with support for equalized learning.\"\"\"\n\n def __init__(self,\n name,\n w,\n n,\n stride,\n padding='SAME',\n use_scaling=False,\n relu_slope=1.):\n \"\"\"Layer constructor.\n\n Args:\n name: string, layer name.\n w: int or 2-tuple, width of the convolution kernel.\n n: 2-tuple of ints, input and output channel depths.\n stride: int or 2-tuple, stride for the convolution kernel.\n padding: string, the padding method. {SAME, VALID, REFLECT}.\n use_scaling: bool, whether to use weight norm and scaling.\n relu_slope: float, the slope of the ReLu following the layer.\n \"\"\"\n assert padding in ['SAME', 'VALID', 'REFLECT'], 'Error: unsupported padding'\n self._padding = padding\n with tf.compat.v1.variable_scope(name):\n if isinstance(stride, int):\n stride = [1, stride, stride, 1]\n else:\n assert len(stride) == 2, \"stride is either an int or a 2-tuple\"\n stride = [1, stride[0], stride[1], 1]\n if isinstance(w, int):\n w = [w, w]\n self.w = w\n shape = [w[0], w[1], n[0], n[1]]\n init_scale, pre_scale = init_he_scale(shape, relu_slope), 1.\n if use_scaling:\n init_scale, pre_scale = pre_scale, init_scale\n self._stride = stride\n self._pre_scale = pre_scale\n self._weight = tf.compat.v1.get_variable(\n 'weight',\n shape=shape,\n initializer=tf.compat.v1.random_normal_initializer(stddev=init_scale))\n self._bias = tf.compat.v1.get_variable(\n 'bias', shape=[n[1]], initializer=tf.compat.v1.zeros_initializer)\n\n def __call__(self, x):\n \"\"\"Apply layer to tensor x.\"\"\"\n if self._padding != 'REFLECT':\n padding = self._padding\n else:\n padding = 'VALID'\n pad_top = self.w[0] // 2\n pad_left = self.w[1] // 2\n if (self.w[0] - self._stride[1]) % 2 == 0:\n pad_bottom = pad_top\n else:\n pad_bottom = self.w[0] - self._stride[1] - pad_top\n if (self.w[1] - self._stride[2]) % 2 == 0:\n pad_right = pad_left\n else:\n pad_right = self.w[1] - self._stride[2] - pad_left\n x = tf.compat.v1.pad(x, [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right],\n [0, 0]], mode='REFLECT')\n y = tf.compat.v1.nn.conv2d(x, self._weight, strides=self._stride, padding=padding)\n return self._pre_scale * y + self._bias\n\n\nclass LayerTransposedConv(object):\n \"\"\"Convolution layer with support for equalized learning.\"\"\"\n\n def __init__(self,\n name,\n w,\n n,\n stride,\n padding='SAME',\n use_scaling=False,\n relu_slope=1.):\n \"\"\"Layer constructor.\n\n Args:\n name: string, layer name.\n w: int or 2-tuple, width of the convolution kernel.\n n: 2-tuple int, [n_in_channels, n_out_channels]\n stride: int or 2-tuple, stride for the convolution kernel.\n padding: string, the padding method {SAME, VALID, REFLECT}.\n use_scaling: bool, whether to use weight norm and scaling.\n relu_slope: float, the slope of the ReLu following the layer.\n \"\"\"\n assert padding in ['SAME'], 'Error: unsupported padding for transposed conv'\n if isinstance(stride, int):\n stride = [1, stride, stride, 1]\n else:\n assert len(stride) == 2, \"stride is either an int or a 2-tuple\"\n stride = [1, stride[0], stride[1], 1]\n if isinstance(w, int):\n w = [w, w]\n self.padding = padding\n self.nc_in, self.nc_out = n\n self.stride = stride\n with tf.variable_scope(name):\n kernel_shape = [w[0], w[1], self.nc_out, self.nc_in]\n init_scale, pre_scale = init_he_scale(kernel_shape, relu_slope), 1.\n if use_scaling:\n init_scale, pre_scale = pre_scale, init_scale\n self._pre_scale = pre_scale\n self._weight = tf.get_variable(\n 'weight',\n shape=kernel_shape,\n initializer=tf.random_normal_initializer(stddev=init_scale))\n self._bias = tf.get_variable(\n 'bias', shape=[self.nc_out], initializer=tf.zeros_initializer)\n\n def __call__(self, x):\n \"\"\"Apply layer to tensor x.\"\"\"\n x_shape = x.get_shape().as_list()\n batch_size = tf.shape(x)[0]\n stride_x, stride_y = self.stride[1], self.stride[2]\n output_shape = tf.stack([\n batch_size, x_shape[1] * stride_x, x_shape[2] * stride_y, self.nc_out])\n y = tf.nn.conv2d_transpose(\n x, filter=self._weight, output_shape=output_shape, strides=self.stride,\n padding=self.padding)\n return self._pre_scale * y + self._bias\n\n\nclass ResBlock(object):\n def __init__(self,\n name,\n nc,\n norm_layer_constructor,\n activation,\n padding='SAME',\n use_scaling=False,\n relu_slope=1.):\n \"\"\"Layer constructor.\"\"\"\n self.name = name\n conv2d = functools.partial(\n LayerConv, w=3, n=[nc, nc], stride=1, padding=padding,\n use_scaling=use_scaling, relu_slope=relu_slope)\n self.blocks = []\n with tf.variable_scope(self.name):\n with tf.variable_scope('res0'):\n self.blocks.append(\n LayerPipe([\n conv2d('res0_conv'),\n norm_layer_constructor('res0_norm'),\n activation\n ])\n )\n with tf.variable_scope('res1'):\n self.blocks.append(\n LayerPipe([\n conv2d('res1_conv'),\n norm_layer_constructor('res1_norm')\n ])\n )\n\n def __call__(self, x_init):\n \"\"\"Apply layer to tensor x.\"\"\"\n x = x_init\n for f in self.blocks:\n x = f(x)\n return x + x_init\n\n\nclass BasicBlock(object):\n def __init__(self,\n name,\n n,\n activation=functools.partial(tf.compat.v1.nn.leaky_relu, alpha=0.2),\n padding='SAME',\n use_scaling=True,\n relu_slope=1.):\n \"\"\"Layer constructor.\"\"\"\n self.name = name\n conv2d = functools.partial(\n LayerConv, stride=1, padding=padding,\n use_scaling=use_scaling, relu_slope=relu_slope)\n nc_in, nc_out = n # n is a 2-tuple\n with tf.compat.v1.variable_scope(self.name):\n self.path1_blocks = []\n with tf.compat.v1.variable_scope('bb_path1'):\n self.path1_blocks.append(\n LayerPipe([\n activation,\n conv2d('bb_conv0', w=3, n=[nc_in, nc_out]),\n activation,\n conv2d('bb_conv1', w=3, n=[nc_out, nc_out]),\n downscale\n ])\n )\n\n self.path2_blocks = []\n with tf.compat.v1.variable_scope('bb_path2'):\n self.path2_blocks.append(\n LayerPipe([\n downscale,\n conv2d('path2_conv', w=1, n=[nc_in, nc_out])\n ])\n )\n\n def __call__(self, x_init):\n \"\"\"Apply layer to tensor x.\"\"\"\n x1 = x_init\n x2 = x_init\n for f in self.path1_blocks:\n x1 = f(x1)\n for f in self.path2_blocks:\n x2 = f(x2)\n return x1 + x2\n\n\nclass LayerDense(object):\n \"\"\"Dense layer with a non-linearity.\"\"\"\n\n def __init__(self, name, n, use_scaling=False, relu_slope=1.):\n \"\"\"Layer constructor.\n\n Args:\n name: string, layer name.\n n: 2-tuple of ints, input and output widths.\n use_scaling: bool, whether to use weight norm and scaling.\n relu_slope: float, the slope of the ReLu following the layer.\n \"\"\"\n with tf.variable_scope(name):\n init_scale, pre_scale = init_he_scale(n, relu_slope), 1.\n if use_scaling:\n init_scale, pre_scale = pre_scale, init_scale\n self._pre_scale = pre_scale\n self._weight = tf.get_variable(\n 'weight',\n shape=n,\n initializer=tf.random_normal_initializer(stddev=init_scale))\n self._bias = tf.get_variable(\n 'bias', shape=[n[1]], initializer=tf.zeros_initializer)\n\n def __call__(self, x):\n \"\"\"Apply layer to tensor x.\"\"\"\n return self._pre_scale * tf.matmul(x, self._weight) + self._bias\n\n\nclass LayerPipe(object):\n \"\"\"Pipe a sequence of functions.\"\"\"\n\n def __init__(self, functions):\n \"\"\"Layer constructor.\n\n Args:\n functions: list, functions to pipe.\n \"\"\"\n self._functions = tuple(functions)\n\n def __call__(self, x, **kwargs):\n \"\"\"Apply pipe to tensor x and return result.\"\"\"\n del kwargs\n for f in self._functions:\n x = f(x)\n return x\n\n\ndef downscale(x, n=2):\n \"\"\"Box downscaling.\n\n Args:\n x: 4D image tensor.\n n: integer scale (must be a power of 2).\n\n Returns:\n 4D tensor of images down scaled by a factor n.\n \"\"\"\n if n == 1:\n return x\n return tf.compat.v1.nn.avg_pool(x, [1, n, n, 1], [1, n, n, 1], 'VALID')\n\n\ndef upscale(x, n):\n \"\"\"Box upscaling (also called nearest neighbors).\n\n Args:\n x: 4D image tensor in B01C format.\n n: integer scale (must be a power of 2).\n\n Returns:\n 4D tensor of images up scaled by a factor n.\n \"\"\"\n if n == 1:\n return x\n x_shape = tf.compat.v1.shape(x)\n height, width = x_shape[1], x_shape[2]\n return tf.compat.v1.image.resize_nearest_neighbor(x, [n * height, n * width])\n\n\ndef tile_and_concatenate(x, z, n_z):\n z = tf.compat.v1.reshape(z, shape=[-1, 1, 1, n_z])\n z = tf.compat.v1.tile(z, [1, tf.compat.v1.shape(x)[1], tf.compat.v1.shape(x)[2], 1])\n x = tf.compat.v1.concat([x, z], axis=-1)\n return x\n\n\ndef minibatch_mean_variance(x):\n \"\"\"Computes the variance average.\n\n This is used by the discriminator as a form of batch discrimination.\n\n Args:\n x: nD tensor for which to compute variance average.\n\n Returns:\n a scalar, the mean variance of variable x.\n \"\"\"\n mean = tf.compat.v1.reduce_mean(x, 0, keepdims=True)\n vals = tf.compat.v1.sqrt(tf.compat.v1.reduce_mean(tf.compat.v1.squared_difference(x, mean), 0) + 1e-8)\n vals = tf.compat.v1.reduce_mean(vals)\n return vals\n\n\ndef scalar_concat(x, scalar):\n \"\"\"Concatenate a scalar to a 4D tensor as an extra channel.\n\n Args:\n x: 4D image tensor in B01C format.\n scalar: a scalar to concatenate to the tensor.\n\n Returns:\n a 4D tensor with one extra channel containing the value scalar at\n every position.\n \"\"\"\n s = tf.compat.v1.shape(x)\n return tf.compat.v1.concat([x, tf.compat.v1.ones([s[0], s[1], s[2], 1]) * scalar], axis=3)\n" ]
[ [ "tensorflow.reshape", "tensorflow.gather_nd", "tensorflow.variable_scope", "tensorflow.matmul", "tensorflow.compat.v1.nn.avg_pool", "tensorflow.slice", "tensorflow.get_variable_scope", "tensorflow.compat.v1.shape", "tensorflow.contrib.layers.layer_norm", "tensorflow.compat.v1.image.resize_nearest_neighbor", "tensorflow.compat.v1.square", "tensorflow.compat.v1.get_variable", "tensorflow.device", "tensorflow.compat.v1.reduce_mean", "tensorflow.compat.v1.random_normal_initializer", "tensorflow.compat.v1.get_variable_scope", "tensorflow.contrib.layers.l2_regularizer", "tensorflow.random_normal_initializer", "tensorflow.compat.v1.pad", "tensorflow.compat.v1.reshape", "tensorflow.stack", "tensorflow.shape", "tensorflow.nn.conv2d_transpose", "tensorflow.compat.v1.ones", "tensorflow.compat.v1.concat", "numpy.prod", "tensorflow.compat.v1.nn.conv2d", "tensorflow.compat.v1.divide", "tensorflow.contrib.layers.instance_norm", "tensorflow.compat.v1.squared_difference", "tensorflow.compat.v1.variable_scope", "numpy.sqrt", "tensorflow.get_variable" ] ]
Abxhor/Coldairarrow
[ "3735beec8a6fa7ad9356375081229c68f0e83f3d" ]
[ "models/final_model.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"Stacking of some good solutions.\nIMPORTANT:\nTo run this model you need run before the differents models.\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\ndf1 = pd.read_csv('submission40.csv') # 0.309812 (public leaderboard)\ndf2 = pd.read_csv('submission41.csv') # 0.305985 (public leaderboard)\ndf3 = pd.read_csv('submission42.csv') # 0.313587 (public leaderboard)\ndf4 = pd.read_csv('submission45.csv') # 0.309749 (public leaderboard)\ndf5 = pd.read_csv('submission47.csv') # 0.306439 (public leaderboard)\n\ndf = pd.DataFrame()\n\ndf['y'] = 0.2*df1['y'] + 0.23*df2['y'] + 0.2*df3['y'] + 0.15*df4['y'] + 0.22*df5['y']\ndf.to_csv('submission53.csv') # 0.301697 (public leaderboard)\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
vlbthambawita/singan-polyp-aug-exp
[ "b4ec5155f5c36a931fad022aec04dda6b3180b55" ]
[ "777_all_in_one_v1.py" ]
[ "#=========================================================\n# Developer: Vajira Thambawita\n# Reference: https://github.com/meetshah1995/pytorch-semseg\n#=========================================================\n\n\n\nimport argparse\nfrom datetime import datetime\nimport os\nimport copy\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n#Pytorch\nimport torch\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom torchvision import models, transforms,datasets, utils\nfrom torchvision.utils import save_image\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.autograd import Variable\nfrom torchsummary import summary\n\nimport segmentation_models_pytorch as smp\n\n\nfrom data.dataset import Dataset\nfrom data.prepare_data import prepare_data, prepare_test_data\n#from data import PolypsDatasetWithGridEncoding\n#from data import PolypsDatasetWithGridEncoding_TestData\nimport pyra_pytorch as pyra\nfrom utils import dice_coeff, iou_pytorch, visualize\n\nimport segmentation_models_pytorch as smp\n\n\n#======================================\n# Get and set all input parameters\n#======================================\n\nparser = argparse.ArgumentParser()\n\n# Hardware\n#parser.add_argument(\"--device\", default=\"gpu\", help=\"Device to run the code\")\nparser.add_argument(\"--device_id\", type=int, default=0, help=\"\")\n\n# Optional parameters to identify the experiments\nparser.add_argument(\"--exp_name\", type=str, help=\"A name to identify the experiment\", required=True)\n#parser.add_argument(\"--py_file\",default=os.path.abspath(__file__)) # store current python file\n\n\n# Directory and file handling\nparser.add_argument(\"--train_CSVs\", \n nargs=\"+\",\n default=None,\n help=\"CSV file list with image and mask paths\")\n\nparser.add_argument(\"--val_CSVs\",\n nargs=\"+\",\n default=None,\n help=\"CSV file list with image and mask paths\")\n\nparser.add_argument(\"--test_CSVs\",\n nargs=\"+\",\n default=None,\n help=\"CSV file list with image and mask paths\")\n\nparser.add_argument(\"--out_dir\", \n default=\"/work/vajira/DATA/sinGAN_polyps/sinGAN_exp_out/checkpoints\",\n help=\"Main output dierectory\")\n\nparser.add_argument(\"--tensorboard_dir\", \n default=\"/work/vajira/DATA/sinGAN_polyps/sinGAN_exp_out/tensorboard\",\n help=\"Folder to save output of tensorboard\")\n\nparser.add_argument(\"--test_out_dir\",\n default= \"/work/vajira/DATA/sinGAN_polyps/sinGAN_exp_out/test_samples\",\n help=\"Output folder for testing data\"\n) \n\nparser.add_argument(\"--best_checkpoint_name\", type=str, default=\"best_checkpoint.pth\", help=\"A name to save bet checkpoint\")\n\nparser.add_argument(\"--img_size\", type=int, default=128, help=\"Image height and width to resize\")\n\n\n# Action handling \nparser.add_argument(\"--num_epochs\", type=int, default=1, help=\"Numbe of epochs to train\")\nparser.add_argument(\"--start_epoch\", type=int, default=0, help=\"start epoch of training\")\nparser.add_argument(\"--num_test_samples\", type=int, default=5, help=\"Number of samples to test.\")\n\n# smp parameters\nparser.add_argument(\"--model\", help=\"The model to perform segmentation\", required=True)\nparser.add_argument(\"--encoder\", type=str, default='se_resnext50_32x4d', help=\"smp encoders\")\nparser.add_argument(\"--encoder_weights\", type=str, default='imagenet', help=\"encoder weights\")\nparser.add_argument(\"--classes\", default=[0,255], help=\"classes per pixel\")\nparser.add_argument(\"--activation\", type=str, default='softmax2d', help=\"last activation layers activation\")\n\n#PYRA\nparser.add_argument(\"--pyra\", type=bool, default=False, help=\"To enable PYRA grid encoding.\")\nparser.add_argument(\"--grid_sizes_train\", type=list, default=[256], help=\"Grid sizes to use in training\")\nparser.add_argument(\"--grid_sizes_val\", type=list, default=[256], help=\"Grid sizes to use in training\")\nparser.add_argument(\"--grid_sizes_test\", type=list, default=[256], help=\"Grid sizes to use in testing\")\nparser.add_argument(\"--in_channels\", type=int, default=3, help=\"Number of input channgels\")\n\n# Parameters\nparser.add_argument(\"--bs\", type=int, default=8, help=\"Mini batch size\")\nparser.add_argument(\"--val_bs\", type=int, default=1, help=\"Batch size\")\nparser.add_argument(\"--lr\", type=float, default=0.0001, help=\"Learning rate for training\")\nparser.add_argument(\"--lr_change_point\", type=int, default=50, help=\"After this point LR will be changed.\")\n\n\nparser.add_argument(\"--num_workers\", type=int, default=12, help=\"Number of workers in dataloader\")\nparser.add_argument(\"--weight_decay\", type=float, default=1e-5, help=\"weight decay of the optimizer\")\nparser.add_argument(\"--lr_sch_factor\", type=float, default=0.1, help=\"Factor to reduce lr in the scheduler\")\nparser.add_argument(\"--lr_sch_patience\", type=int, default=25, help=\"Num of epochs to be patience for updating lr\")\n\n\nparser.add_argument(\"--num_samples\", type=int, default=5, help=\"Number of samples to print from validation set\")\nparser.add_argument(\"action\", type=str, help=\"Select an action to run\", choices=[\"train\", \"retrain\", \"test\", \"check\", \"check_val\"])\nparser.add_argument(\"--checkpoint_interval\", type=int, default=25, help=\"Interval to save checkpoint models\")\n#parser.add_argument(\"--fold\", type=str, default=\"fold_1\", help=\"Select the validation fold\", choices=[\"fold_1\", \"fold_2\", \"fold_3\"])\n#parser.add_argument(\"--num_test\", default= 200, type=int, help=\"Number of samples to test set from 1k dataset\")\n#parser.add_argument(\"--model_path\", default=\"\", help=\"Model path to load weights\")\n#parser.add_argument(\"--num_of_samples\", default=30, type=int, help=\"Number of samples to validate (Montecalo sampling)\")\nparser.add_argument(\"--record_name\", type=str, default=\"VAL\", help=\"Some name to identify records in tensorboard output\")\n\nopt = parser.parse_args()\n\n\n#==========================================\n# Device handling\n#==========================================\ntorch.cuda.set_device(opt.device_id)\nDEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nopt.device = DEVICE\n\n#===========================================\n# Folder handling\n#===========================================\n\n#make output folder if not exist\nos.makedirs(opt.out_dir, exist_ok=True)\n\n\n# make subfolder in the output folder \n#py_file_name = opt.py_file.split(\"/\")[-1] # Get python file name (soruce code name)\nCHECKPOINT_DIR = os.path.join(opt.out_dir, opt.exp_name + \"/checkpoints\")\nos.makedirs(CHECKPOINT_DIR, exist_ok=True)\n\n# make tensorboard subdirectory for the experiment\ntensorboard_exp_dir = os.path.join(opt.tensorboard_dir, opt.exp_name)\nos.makedirs( tensorboard_exp_dir, exist_ok=True)\n\n#==========================================\n# Tensorboard\n#==========================================\n# Initialize summary writer\nwriter = SummaryWriter(tensorboard_exp_dir)\n\n#==========================================\n# Prepare Data\n#==========================================\n\n\n#================================================\n# Train the model\n#================================================\ndef train_model(train_loader, valid_loader, model, loss, metrics, optimizer, opt):\n\n # create epoch runners \n # it is a simple loop of iterating over dataloader`s samples\n train_epoch = smp.utils.train.TrainEpoch(\n model, \n loss=loss, \n metrics=metrics, \n optimizer=optimizer,\n device=DEVICE,\n verbose=True,\n )\n\n valid_epoch = smp.utils.train.ValidEpoch(\n model, \n loss=loss, \n metrics=metrics, \n device=DEVICE,\n verbose=True,\n )\n\n\n\n max_score = 0\n\n best_chk_path = os.path.join(CHECKPOINT_DIR, opt.best_checkpoint_name)\n\n for i in range(opt.start_epoch + 1, opt.start_epoch + opt.num_epochs +1 ):\n \n print('\\nEpoch: {}'.format(i))\n train_logs = train_epoch.run(train_loader)\n valid_logs = valid_epoch.run(valid_loader)\n \n # do something (save model, change lr, etc.)\n if max_score < valid_logs['iou_score']:\n max_score = valid_logs['iou_score']\n torch.save({\"model\":model, \"epoch\": i}, best_chk_path)\n print('Best Model saved!')\n print(\"Testing....\")\n do_test(opt)\n print(\"Tested\")\n\n \n if i == opt.lr_change_point:\n optimizer.param_groups[0]['lr'] = 1e-5\n print('Decrease decoder learning rate to 1e-5!')\n\n # writing to logs to tensorboard\n for key, value in train_logs.items():\n writer.add_scalar(f\"Train/{key}\", value, i)\n\n for key, value in valid_logs.items():\n writer.add_scalar(f\"Valid/{key}\", value, i)\n\n\n \n\n\n# update here\n \n\n#==============================================\n# Heatmap generator from tensor\n#==============================================\ndef generate_heatmapts(img_tensor):\n print(img_tensor.shape)\n fig_list = []\n for n in range(img_tensor.shape[0]):\n img = img_tensor[n]\n img = img.squeeze(dim=0)\n img_np = img.detach().cpu().numpy()\n #img_np = np.transforms(img_np, (1,2,0))\n \n plt.imshow(img_np, cmap=\"hot\")\n fig = plt.gcf()\n fig_list.append(fig)\n # plt.clf()\n plt.close()\n\n return fig_list\n\n\n\n#===============================================\n# Prepare models\n#===============================================\ndef prepare_model(opt):\n # model = UNet(n_channels=4, n_classes=1) # 4 = 3 channels + 1 grid encode\n\n # create segmentation model with pretrained encoder\n model = getattr(smp, opt.model)(\n encoder_name=opt.encoder,\n in_channels=opt.in_channels, \n encoder_weights=opt.encoder_weights, \n classes=len(opt.classes), \n activation=opt.activation,\n )\n\n return model\n\n#====================================\n# Run training process\n#====================================\ndef run_train(opt):\n model = prepare_model(opt)\n\n preprocessing_fn = smp.encoders.get_preprocessing_fn(opt.encoder, opt.encoder_weights)\n\n train_loader, val_loader = prepare_data(opt, preprocessing_fn=None)\n\n loss = smp.utils.losses.DiceLoss(ignore_channels=[0])\n\n metrics = [\n smp.utils.metrics.IoU(threshold=0.5, ignore_channels=[0]),\n ]\n\n optimizer = torch.optim.Adam([ \n dict(params=model.parameters(), lr=opt.lr),\n ])\n\n train_model(train_loader, val_loader, model, loss, metrics, optimizer, opt)\n#====================================\n# Re-train process\n#====================================\ndef run_retrain(opt):\n\n checkpoint_dict = torch.load(os.path.join(CHECKPOINT_DIR, opt.best_checkpoint_name))\n\n opt.start_epoch = checkpoint_dict[\"epoch\"]\n model = checkpoint_dict[\"model\"]\n\n print(\"Model epoch:\", checkpoint_dict[\"epoch\"])\n print(\"Model retrain started from epoch:\", opt.start_epoch)\n\n preprocessing_fn = smp.encoders.get_preprocessing_fn(opt.encoder, opt.encoder_weights)\n\n train_loader, val_loader = prepare_data(opt, preprocessing_fn)\n\n loss = smp.utils.losses.DiceLoss()\n\n metrics = [\n smp.utils.metrics.IoU(threshold=0.5),\n ]\n\n optimizer = torch.optim.Adam([ \n dict(params=model.parameters(), lr=opt.lr),\n ])\n\n train_model(train_loader, val_loader, model, loss, metrics, optimizer, opt)\n\n#=====================================\n# Check model\n#====================================\ndef check_model_graph():\n raise NotImplementedError\n\n\n#===================================\n# Inference from pre-trained models\n#===================================\n\ndef do_test(opt):\n\n\n checkpoint_dict = torch.load(os.path.join(CHECKPOINT_DIR, opt.best_checkpoint_name))\n\n test_epoch = checkpoint_dict[\"epoch\"]\n best_model = checkpoint_dict[\"model\"]\n\n print(\"Model best epoch:\", test_epoch)\n\n preprocessing_fn = smp.encoders.get_preprocessing_fn(opt.encoder, opt.encoder_weights)\n test_dataset = prepare_test_data(opt, preprocessing_fn=None)\n test_dataset_vis = prepare_test_data(opt, preprocessing_fn=None)\n \n \n for i in range(opt.num_test_samples):\n image, mask = test_dataset[i]\n image_vis, _ = test_dataset_vis[i]\n\n #print(image)\n\n mask_tensor = torch.from_numpy(mask).to(opt.device).unsqueeze(0)\n\n image_tensor = torch.from_numpy(image).to(opt.device).unsqueeze(0)\n pr_mask = best_model.predict(image_tensor)\n\n pr_mask = pr_mask.squeeze().cpu().numpy().round()\n\n fig = visualize(\n input_image_new=np.transpose(image_vis, (1,2,0)).astype(int),\n GT_mask_0=mask[0, :,:],\n Pred_mask_0 = pr_mask[0,:,:],\n GT_mask_1= mask[1,:,:],\n Pred_mask_1 = pr_mask[1, :,:]\n )\n\n fig.savefig(f\"./test_202_{i}.png\")\n writer.add_figure(f\"Test_sample/sample-{i}\", fig, global_step=test_epoch)\n\n\n\n\n\ndef check_test_score(opt):\n\n \n\n checkpoint_dict = torch.load(os.path.join(CHECKPOINT_DIR, opt.best_checkpoint_name))\n\n test_best_epoch = checkpoint_dict[\"epoch\"]\n best_model = checkpoint_dict[\"model\"]\n\n print(\"Model best epoch:\", test_best_epoch)\n \n \n\n preprocessing_fn = smp.encoders.get_preprocessing_fn(opt.encoder, opt.encoder_weights)\n test_dataset = prepare_test_data(opt, preprocessing_fn=None)\n \n test_dataloader = DataLoader(test_dataset, num_workers=48)\n\n loss = smp.utils.losses.DiceLoss()\n # Testing with two class layers\n metrics = [\n #smp.utils.metrics.IoU(threshold=0.5),\n smp.utils.metrics.IoU(threshold=0.5, ignore_channels=None),\n ]\n\n test_epoch = smp.utils.train.ValidEpoch(\n model=best_model,\n loss=loss,\n metrics=metrics,\n device=DEVICE,\n )\n\n logs = test_epoch.run(test_dataloader)\n print(\"logs=\", str(logs))\n writer.add_text(f\"{opt.exp_name}-test-score\", str(logs), global_step=test_best_epoch)\n\n # Testing with only class layer 1 (polyps)\n loss = smp.utils.losses.DiceLoss(ignore_channels=[0])\n metrics = [\n #smp.utils.metrics.IoU(threshold=0.5),\n smp.utils.metrics.IoU(threshold=0.5, ignore_channels=[0]),\n ]\n\n test_epoch = smp.utils.train.ValidEpoch(\n model=best_model,\n loss=loss,\n metrics=metrics,\n device=DEVICE,\n )\n\n logs = test_epoch.run(test_dataloader)\n print(\"logs=\", str(logs))\n writer.add_text(f\"{opt.exp_name}-test-score-ignore-channel-0\", str(logs), global_step=test_best_epoch)\n\n\n\n # Testing with only class layer 0 (BG)\n\n loss = smp.utils.losses.DiceLoss(ignore_channels=[1])\n metrics = [\n #smp.utils.metrics.IoU(threshold=0.5),\n smp.utils.metrics.IoU(threshold=0.5, ignore_channels=[1]),\n ]\n\n test_epoch = smp.utils.train.ValidEpoch(\n model=best_model,\n loss=loss,\n metrics=metrics,\n device=DEVICE,\n )\n\n logs = test_epoch.run(test_dataloader)\n print(\"logs=\", str(logs))\n writer.add_text(f\"{opt.exp_name}-test-score-ignore-channel-1\", str(logs), global_step=test_best_epoch)\n\n\n\ndef check_val_full_score(opt):\n\n # changing test data files into val data\n\n #opt.test_CSVs = opt.val_CSVs\n\n #opt.record_name = \"VAL\"\n\n checkpoint_dict = torch.load(os.path.join(CHECKPOINT_DIR, opt.best_checkpoint_name))\n\n test_best_epoch = checkpoint_dict[\"epoch\"]\n best_model = checkpoint_dict[\"model\"]\n\n print(\"Model best epoch:\", test_best_epoch)\n \n \n\n preprocessing_fn = smp.encoders.get_preprocessing_fn(opt.encoder, opt.encoder_weights)\n test_dataset = prepare_test_data(opt, preprocessing_fn=None)\n \n test_dataloader = DataLoader(test_dataset, num_workers=12)\n\n loss = smp.utils.losses.DiceLoss()\n # Testing with two class layers\n metrics = [\n #smp.utils.metrics.IoU(threshold=0.5),\n smp.utils.metrics.IoU(threshold=0.5, ignore_channels=None),\n smp.utils.metrics.Fscore(threshold=0.5, ignore_channels=None),\n smp.utils.metrics.Accuracy(threshold=0.5, ignore_channels=None),\n smp.utils.metrics.Recall(threshold=0.5, ignore_channels=None),\n smp.utils.metrics.Precision(threshold=0.5, ignore_channels=None),\n ]\n\n test_epoch = smp.utils.train.ValidEpoch(\n model=best_model,\n loss=loss,\n metrics=metrics,\n device=DEVICE,\n )\n\n logs = test_epoch.run(test_dataloader)\n print(\"logs=\", str(logs))\n writer.add_text(f\"{opt.exp_name}-scores-->{opt.record_name}\", str(logs), global_step=test_best_epoch)\n\n # Testing with only class layer 1 (polyps)\n loss = smp.utils.losses.DiceLoss(ignore_channels=[0])\n \n metrics = [\n #smp.utils.metrics.IoU(threshold=0.5),\n smp.utils.metrics.IoU(threshold=0.5, ignore_channels=[0]),\n smp.utils.metrics.Fscore(threshold=0.5, ignore_channels=[0]),\n smp.utils.metrics.Accuracy(threshold=0.5, ignore_channels=[0]),\n smp.utils.metrics.Recall(threshold=0.5, ignore_channels=[0]),\n smp.utils.metrics.Precision(threshold=0.5, ignore_channels=[0]),\n ]\n\n test_epoch = smp.utils.train.ValidEpoch(\n model=best_model,\n loss=loss,\n metrics=metrics,\n device=DEVICE,\n )\n\n logs = test_epoch.run(test_dataloader)\n print(\"logs=\", str(logs))\n writer.add_text(f\"{opt.exp_name}-val-scores-ignore-channel-0-->{opt.record_name}\", str(logs), global_step=test_best_epoch)\n\n\n\n # Testing with only class layer 0 (BG)\n\n loss = smp.utils.losses.DiceLoss(ignore_channels=[1])\n metrics = [\n #smp.utils.metrics.IoU(threshold=0.5),\n smp.utils.metrics.IoU(threshold=0.5, ignore_channels=[1]),\n smp.utils.metrics.Fscore(threshold=0.5, ignore_channels=[1]),\n smp.utils.metrics.Accuracy(threshold=0.5, ignore_channels=[1]),\n smp.utils.metrics.Recall(threshold=0.5, ignore_channels=[1]),\n smp.utils.metrics.Precision(threshold=0.5, ignore_channels=[1]),\n ]\n\n test_epoch = smp.utils.train.ValidEpoch(\n model=best_model,\n loss=loss,\n metrics=metrics,\n device=DEVICE,\n )\n\n logs = test_epoch.run(test_dataloader)\n print(\"logs=\", str(logs))\n writer.add_text(f\"{opt.exp_name}-val-scores-ignore-channel-1-->{opt.record_name}\", str(logs), global_step=test_best_epoch) \n\n\n\n\n\nif __name__ == \"__main__\":\n\n #data_loaders = prepare_data()\n print(vars(opt))\n print(\"Test OK\")\n\n # Train or retrain or inference\n if opt.action == \"train\":\n print(\"Training process is strted..!\")\n run_train(opt)\n pass\n\n elif opt.action == \"retrain\":\n print(\"Retrainning process is strted..!\")\n run_retrain(opt)\n pass\n\n elif opt.action == \"test\":\n print(\"Inference process is strted..!\")\n do_test(opt)\n print(\"Done\")\n\n elif opt.action == \"check\":\n check_test_score(opt)\n print(\"Check pass\")\n\n elif opt.action == \"check_val\":\n check_val_full_score(opt)\n\n # Finish tensorboard writer\n writer.close()\n\n" ]
[ [ "torch.utils.data.DataLoader", "numpy.transpose", "matplotlib.pyplot.gcf", "torch.save", "matplotlib.pyplot.imshow", "torch.cuda.is_available", "torch.from_numpy", "matplotlib.pyplot.close", "torch.utils.tensorboard.SummaryWriter", "torch.cuda.set_device" ] ]
MasaYan24/pytorch-lightning
[ "046ac714f6955ed14b831657ea1b7b16bc28ac93" ]
[ "pytorch_lightning/trainer/training_loop.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom contextlib import contextmanager, suppress\nfrom copy import copy, deepcopy\n\nimport numpy as np\nimport torch\n\nfrom pytorch_lightning.callbacks import EarlyStopping\nfrom pytorch_lightning.core.memory import ModelSummary\nfrom pytorch_lightning.core.optimizer import LightningOptimizer\nfrom pytorch_lightning.core.step_result import Result\nfrom pytorch_lightning.plugins import ParallelPlugin\nfrom pytorch_lightning.trainer.states import RunningStage, TrainerState\nfrom pytorch_lightning.trainer.supporters import Accumulator, TensorRunningAccum\nfrom pytorch_lightning.utilities import _TPU_AVAILABLE, AMPType, DeviceType, parsing\nfrom pytorch_lightning.utilities.distributed import rank_zero_info, rank_zero_warn\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.memory import recursive_detach\nfrom pytorch_lightning.utilities.model_helpers import is_overridden\nfrom pytorch_lightning.utilities.parsing import AttributeDict\nfrom pytorch_lightning.utilities.warnings import WarningCache\n\n\nclass TrainLoop:\n\n def __init__(self, trainer, multiple_trainloader_mode):\n self.trainer = trainer\n self.early_stopping_accumulator = None\n self.checkpoint_accumulator = None\n self.accumulated_loss = None\n self.warning_cache = WarningCache()\n self._teardown_already_run = False\n self.running_loss = TensorRunningAccum(window_length=20)\n self.automatic_optimization = True\n self._curr_step_result = None\n self._cur_grad_norm_dict = None\n self._multiple_trainloader_mode = multiple_trainloader_mode\n self._skip_backward = False\n self.trainer._multiple_trainloader_mode = multiple_trainloader_mode\n\n def on_trainer_init(\n self,\n max_epochs,\n min_epochs,\n max_steps,\n min_steps,\n num_sanity_val_steps,\n automatic_optimization,\n weights_summary,\n ):\n self.trainer.global_step = 0\n self.trainer.current_epoch = 0\n self.trainer.interrupted = False\n self.trainer.should_stop = False\n self.trainer._state = TrainerState.INITIALIZING\n\n self.trainer.total_batch_idx = 0\n self.trainer.batch_idx = 0\n self.trainer.num_training_batches = 0\n self.trainer.train_dataloader = None\n self.automatic_optimization = automatic_optimization\n\n # If neither max_epochs or max_steps is set, then use existing default of max_epochs = 1000\n self.trainer.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs\n # If neither min_epochs or min_steps is set, then use existing default of min_epochs = 1\n self.trainer.min_epochs = 1 if (min_epochs is None and min_steps is None) else min_epochs\n self.trainer.max_steps = max_steps\n self.trainer.min_steps = min_steps\n\n if num_sanity_val_steps == -1:\n self.trainer.num_sanity_val_steps = float(\"inf\")\n else:\n self.trainer.num_sanity_val_steps = num_sanity_val_steps\n\n self.trainer.weights_summary = weights_summary\n if weights_summary is not None and weights_summary not in ModelSummary.MODES:\n raise MisconfigurationException(\n f\"`weights_summary` can be None, {', '.join(ModelSummary.MODES)}, got {weights_summary}\"\n )\n\n @property\n def num_optimizers(self):\n num_optimizers = len(self.get_optimizers_iterable())\n return num_optimizers\n\n def should_skip_training(self):\n should_by_max_steps = self.trainer.max_steps is not None and self.trainer.global_step >= self.trainer.max_steps\n should_by_epoch = self.trainer.max_epochs is not None and self.trainer.current_epoch >= self.trainer.max_epochs\n return should_by_max_steps or should_by_epoch or self.trainer.num_training_batches == 0\n\n def on_train_start(self):\n # hook\n self.trainer.call_hook(\"on_train_start\")\n\n # provide rank to profiler\n self.trainer.profile_connector.on_train_start(self.trainer)\n\n def setup_fit(self, model, train_dataloader, val_dataloaders, datamodule):\n # clean hparams\n if hasattr(model, \"hparams\"):\n parsing.clean_namespace(model.hparams)\n\n # links data to the trainer\n self.trainer.data_connector.attach_data(model, train_dataloader, val_dataloaders, datamodule)\n\n # check that model is configured correctly\n self.trainer.config_validator.verify_loop_configurations(model)\n\n # attach model log function to callback\n self.trainer.callback_connector.attach_model_logging_functions(model)\n\n def on_train_end(self):\n if self._teardown_already_run:\n return\n\n self._teardown_already_run = True\n\n # trigger checkpoint check. need to temporarily decrease the global step to avoid saving duplicates\n # when a checkpoint was saved at the last step\n self.trainer.global_step -= 1\n self.check_checkpoint_callback(should_update=True, is_last=True)\n self.trainer.global_step += 1\n\n # hook\n self.trainer.call_hook(\"on_train_end\")\n\n # todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers.\n # It might be related to xla tensors blocked when moving the cpu\n # kill loggers\n if self.trainer.logger is not None and self.trainer.training_type_plugin.should_finalize:\n self.trainer.logger.finalize(\"success\")\n\n # summarize profile results\n if self.trainer.global_rank == 0:\n self.trainer.profiler.describe()\n\n # give accelerators a chance to finish\n self.trainer.accelerator_backend.on_train_end()\n\n # clear mem\n if self.trainer._device_type == DeviceType.GPU:\n model = self.trainer.get_model()\n model.cpu()\n torch.cuda.empty_cache()\n\n def check_checkpoint_callback(self, should_update, is_last=False):\n # TODO bake this logic into the ModelCheckpoint callback\n if should_update and self.trainer.checkpoint_connector.has_trained:\n callbacks = self.trainer.checkpoint_callbacks\n\n if is_last and any(cb.save_last for cb in callbacks):\n rank_zero_info(\"Saving latest checkpoint...\")\n\n model = self.trainer.get_model()\n\n for cb in callbacks:\n cb.on_validation_end(self.trainer, model)\n\n def check_early_stopping_callback(self, should_update):\n # TODO bake this logic into the EarlyStopping callback\n if should_update and self.trainer.checkpoint_connector.has_trained:\n callbacks = [c for c in self.trainer.callbacks if isinstance(c, EarlyStopping)]\n model = self.trainer.get_model()\n\n for cb in callbacks:\n cb.on_validation_end(self.trainer, model)\n\n def on_train_epoch_start(self, epoch):\n\n # update training progress in trainer\n self.trainer.current_epoch = epoch\n\n model = self.trainer.get_model()\n\n # reset train dataloader\n if epoch != 0 and self.trainer.reload_dataloaders_every_epoch:\n self.trainer.reset_train_dataloader(model)\n\n # todo: specify the possible exception\n with suppress(Exception):\n # set seed for distributed sampler (enables shuffling for each epoch)\n self.trainer.train_dataloader.sampler.set_epoch(epoch)\n\n # changing gradient according accumulation_scheduler\n self.trainer.accumulation_scheduler.on_epoch_start(self.trainer, self.trainer.get_model())\n\n # stores accumulated grad fractions per batch\n self.accumulated_loss = TensorRunningAccum(window_length=self.trainer.accumulate_grad_batches)\n\n # structured result accumulators for callbacks\n self.early_stopping_accumulator = Accumulator()\n self.checkpoint_accumulator = Accumulator()\n\n # hook\n self.trainer.call_hook(\"on_epoch_start\")\n self.trainer.call_hook(\"on_train_epoch_start\")\n\n def on_train_batch_end(self, epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx):\n # hook\n self.trainer.call_hook('on_train_batch_end', batch_end_outputs, batch, batch_idx, dataloader_idx)\n self.trainer.call_hook('on_batch_end')\n\n # figure out what to track for epoch end\n self.track_epoch_end_reduce_metrics(epoch_output, batch_end_outputs)\n\n # reset batch logger internals\n self.trainer.logger_connector.on_train_batch_end()\n\n def reset_train_val_dataloaders(self, model):\n if self.trainer.train_dataloader is None or not self.trainer.reload_dataloaders_every_epoch:\n self.trainer.reset_train_dataloader(model)\n\n if self.trainer.val_dataloaders is None and not self.trainer.reload_dataloaders_every_epoch:\n self.trainer.reset_val_dataloader(model)\n\n def track_epoch_end_reduce_metrics(self, epoch_output, batch_end_outputs):\n\n # track the outputs to reduce at the end of the epoch\n for opt_idx, opt_outputs in enumerate(batch_end_outputs):\n sample_output = opt_outputs[-1]\n\n # decide if we need to reduce at the end of the epoch automatically\n auto_reduce_tng_result = isinstance(sample_output, Result) and sample_output.should_reduce_on_epoch_end\n hook_overridden = (\n is_overridden(\"training_epoch_end\", model=self.trainer.get_model())\n or is_overridden(\"on_train_epoch_end\", model=self.trainer.get_model())\n )\n\n # only track when a) it needs to be autoreduced OR b) the user wants to manually reduce on epoch end\n if not (hook_overridden or auto_reduce_tng_result):\n continue\n\n # with 1 step (no tbptt) don't use a sequence at epoch end\n if isinstance(opt_outputs, list) and len(opt_outputs) == 1 and not isinstance(opt_outputs[0], Result):\n opt_outputs = opt_outputs[0]\n\n epoch_output[opt_idx].append(opt_outputs)\n\n def get_optimizers_iterable(self):\n \"\"\"\n Generates an iterable with (idx, optimizer) for each optimizer.\n \"\"\"\n if not self.trainer.optimizer_frequencies:\n # call training_step once per optimizer\n return list(enumerate(self.trainer.optimizers))\n\n optimizer_freq_cumsum = np.cumsum(self.trainer.optimizer_frequencies)\n optimizers_loop_length = optimizer_freq_cumsum[-1]\n current_place_in_loop = self.trainer.total_batch_idx % optimizers_loop_length\n\n # find optimzier index by looking for the first {item > current_place} in the cumsum list\n opt_idx = np.argmax(optimizer_freq_cumsum > current_place_in_loop)\n return [[opt_idx, self.trainer.optimizers[opt_idx]]]\n\n def on_after_backward(self, training_step_output, batch_idx, untouched_loss):\n is_result_obj = isinstance(training_step_output, Result)\n\n if is_result_obj:\n training_step_output.detach()\n else:\n training_step_output.batch_loss = training_step_output.batch_loss.detach()\n\n # insert after step hook\n self.trainer.call_hook(\"on_after_backward\")\n\n # when in dev debugging track the losses\n self.trainer.dev_debugger.track_train_loss_history(batch_idx, untouched_loss.detach())\n\n def _check_training_step_output(self, training_step_output):\n if isinstance(training_step_output, torch.Tensor) and not self.automatic_optimization:\n if training_step_output.grad_fn is None:\n # TODO: Find why - RuntimeError: Expected to mark a variable ready only once ...\n raise MisconfigurationException(\"In manual optimization, `training_step` should not return a Tensor\")\n\n def training_step(self, split_batch, batch_idx, opt_idx, hiddens):\n # give the PL module a result for logging\n model_ref = self.trainer.get_model()\n\n with self.trainer.profiler.profile(\"model_forward\"):\n args = self.build_train_args(split_batch, batch_idx, opt_idx, hiddens)\n\n # manually capture logged metrics\n model_ref._current_fx_name = 'training_step'\n model_ref._results = Result()\n with self.trainer.profiler.profile(\"training_step\"):\n training_step_output = self.trainer.accelerator_backend.training_step(args)\n self.trainer.accelerator_backend.post_training_step()\n\n self.trainer.logger_connector.cache_logged_metrics()\n\n self._check_training_step_output(training_step_output)\n\n training_step_output = self.trainer.call_hook(\"training_step_end\", training_step_output)\n\n training_step_output_for_epoch_end, training_step_output = self._process_training_step_output(\n training_step_output, split_batch\n )\n is_result_obj = isinstance(training_step_output, Result)\n\n if training_step_output_for_epoch_end is None:\n return None\n\n # enable empty loss when using manual opt\n closure_loss = None\n untouched_loss = None\n\n if self.trainer.train_loop.automatic_optimization:\n # accumulate loss\n # (if accumulate_grad_batches = 1 no effect)\n if is_result_obj:\n closure_loss = training_step_output.minimize\n else:\n closure_loss = training_step_output.batch_loss\n\n closure_loss = closure_loss / self.trainer.accumulate_grad_batches\n\n # the loss will get scaled for amp. avoid any modifications to it\n untouched_loss = closure_loss.detach().clone()\n\n # result\n result = AttributeDict(\n closure_loss=closure_loss,\n loss=untouched_loss,\n training_step_output=training_step_output,\n training_step_output_for_epoch_end=training_step_output_for_epoch_end,\n hiddens=training_step_output.hiddens,\n )\n return result\n\n def _process_training_step_output(self, training_step_output, split_batch):\n training_step_output_for_epoch_end = training_step_output\n\n # enable validation_step return None\n if training_step_output_for_epoch_end is None:\n return None, None\n\n # -----------------------------------------\n # process result return (DEPRECATE in 1.0)\n # -----------------------------------------\n if isinstance(training_step_output, Result):\n training_step_output_for_epoch_end = self._process_result(training_step_output, split_batch)\n return training_step_output_for_epoch_end, training_step_output\n\n # -----------------------------------------\n # process hybrid (1.0)\n # -----------------------------------------\n # no need for these checks in 1.0.0\n # TODO: remove checks in 1.0.0\n is_tensor = isinstance(training_step_output_for_epoch_end, torch.Tensor)\n is_1_0_output = is_tensor or (\"log\" not in training_step_output and \"progress_bar\" not in training_step_output)\n if is_1_0_output:\n return self._process_training_step_output_1_0(training_step_output, split_batch)\n\n # -----------------------------------------\n # process old dict (deprecate 1.0)\n # -----------------------------------------\n training_step_output = self.trainer.process_dict_result(training_step_output, train=True)\n\n training_step_output = AttributeDict(\n batch_loss=training_step_output[0],\n pbar_on_batch_end=training_step_output[1],\n log_metrics=training_step_output[2],\n callback_metrics=training_step_output[3],\n hiddens=training_step_output[4],\n )\n # if the user decides to finally reduce things in epoch_end, save raw output without graphs\n if isinstance(training_step_output_for_epoch_end, torch.Tensor):\n training_step_output_for_epoch_end = training_step_output_for_epoch_end.detach()\n else:\n training_step_output_for_epoch_end = recursive_detach(training_step_output_for_epoch_end)\n\n return training_step_output_for_epoch_end, training_step_output\n\n def _process_training_step_output_1_0(self, training_step_output, split_batch):\n result = self.trainer.get_model()._results\n\n loss = None\n hiddens = None\n\n # handle dict return\n if isinstance(training_step_output, dict):\n loss = training_step_output.pop(\"loss\", None)\n hiddens = training_step_output.pop(\"hiddens\", None)\n result[\"extra\"] = training_step_output\n\n # handle scalar return\n elif isinstance(training_step_output, torch.Tensor):\n loss = training_step_output\n result[\"extra\"] = {}\n\n # map to results under the hood\n result.minimize = loss\n result.hiddens = hiddens\n\n # track batch for manual reduction with result\n result.track_batch_size(len(split_batch))\n\n # track metrics without grads for epoch reduction\n training_step_output_for_epoch_end = copy(result)\n training_step_output_for_epoch_end.detach()\n if self.trainer.move_metrics_to_cpu:\n training_step_output_for_epoch_end.cpu()\n\n # what flows back into the system\n training_step_output = result\n\n return training_step_output_for_epoch_end, training_step_output\n\n def _process_result(self, training_step_output, split_batch):\n training_step_output.track_batch_size(len(split_batch))\n m = \"\"\"\n TrainResult and EvalResult were deprecated in 0.9.1 and support will drop in 1.0.0.\n Use self.log and .write from the LightningModule to log metrics and write predictions.\n training_step can now only return a scalar (for the loss) or a dictionary with anything you want.\n\n Option 1:\n return loss\n\n Option 2:\n return {'loss': loss, 'anything_else': ...}\n\n Option 3:\n return {'loss': loss, 'hiddens': hiddens, 'anything_else': ...}\n \"\"\"\n rank_zero_warn(m)\n\n training_step_output_for_epoch_end = copy(training_step_output)\n training_step_output_for_epoch_end.detach()\n\n return training_step_output_for_epoch_end\n\n def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure):\n model_ref = self.trainer.get_model()\n\n is_lbfgs = isinstance(optimizer, torch.optim.LBFGS)\n using_native_amp = self.trainer.amp_backend == AMPType.NATIVE\n\n # native amp + lbfgs is a no go right now\n if using_native_amp and is_lbfgs:\n raise MisconfigurationException(\n 'native PyTorch amp and lbfgs are not compatible.'\n ' To request, please file a Github issue in PyTorch and tag @mcarilli'\n )\n\n # wraps into LightningOptimizer only for running step\n optimizer = LightningOptimizer._to_lightning_optimizer(optimizer, self.trainer, opt_idx)\n\n # model hook\n model_ref.optimizer_step(\n self.trainer.current_epoch,\n batch_idx,\n optimizer,\n opt_idx,\n train_step_and_backward_closure,\n on_tpu=self.trainer._device_type == DeviceType.TPU and _TPU_AVAILABLE,\n using_native_amp=using_native_amp,\n using_lbfgs=is_lbfgs,\n )\n\n def on_before_zero_grad(self, optimizer):\n self.trainer.call_hook('on_before_zero_grad', optimizer)\n\n def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx):\n self.trainer.accelerator_backend.optimizer_zero_grad(self.trainer.current_epoch, batch_idx, optimizer, opt_idx)\n\n def track_and_norm_grad(self, optimizer):\n # track gradient norms\n grad_norm_dic = self._track_gradient_norm()\n\n # clip gradients\n self.trainer.accelerator_backend.clip_gradients(optimizer, self.trainer.gradient_clip_val)\n self._cur_grad_norm_dict = grad_norm_dic\n\n def _track_gradient_norm(self):\n grad_norm_dict = {}\n if (self.trainer.global_step + 1) % self.trainer.log_every_n_steps == 0:\n if float(self.trainer.track_grad_norm) > 0:\n model = self.trainer.get_model()\n grad_norm_dict = model.grad_norm(self.trainer.track_grad_norm)\n return grad_norm_dict\n\n def process_hiddens(self, opt_closure_result):\n hiddens = opt_closure_result.hiddens\n if isinstance(opt_closure_result.training_step_output, Result):\n opt_closure_result.training_step_output_for_epoch_end.drop_hiddens()\n return hiddens\n\n def tbptt_split_batch(self, batch):\n splits = [batch]\n if self.trainer.truncated_bptt_steps is not None:\n model_ref = self.trainer.get_model()\n with self.trainer.profiler.profile(\"tbptt_split_batch\"):\n splits = model_ref.tbptt_split_batch(batch, self.trainer.truncated_bptt_steps)\n return splits\n\n def run_training_epoch(self):\n # modify dataloader if needed (ddp, etc...)\n train_dataloader = self.trainer.accelerator_backend.process_dataloader(self.trainer.train_dataloader)\n\n # track epoch output\n epoch_output = [[] for _ in range(self.num_optimizers)]\n\n train_dataloader = self.trainer.data_connector.get_profiled_train_dataloader(train_dataloader)\n dataloader_idx = 0\n should_check_val = False\n\n for batch_idx, (batch, is_last_batch) in train_dataloader:\n\n self.trainer.batch_idx = batch_idx\n\n # ------------------------------------\n # TRAINING_STEP + TRAINING_STEP_END\n # ------------------------------------\n with self.trainer.profiler.profile(\"run_training_batch\"):\n batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)\n\n # when returning -1 from train_step, we end epoch early\n if batch_output.signal == -1:\n break\n\n batch_end_outputs = self.process_train_step_outputs(\n batch_output.training_step_output_for_epoch_end,\n self.early_stopping_accumulator,\n self.checkpoint_accumulator,\n )\n # hook\n # TODO: add outputs to batches\n self.on_train_batch_end(epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx)\n\n # -----------------------------------------\n # SAVE METRICS TO LOGGERS\n # -----------------------------------------\n self.trainer.logger_connector.log_train_step_metrics(batch_output)\n\n # -----------------------------------------\n # VALIDATE IF NEEDED + CHECKPOINT CALLBACK\n # -----------------------------------------\n should_check_val = self.should_check_val_fx(batch_idx, is_last_batch)\n if should_check_val:\n self.trainer.run_evaluation()\n\n # reset stage to train\n self.trainer._set_wide_running_stage(RunningStage.TRAINING)\n\n # -----------------------------------------\n # SAVE LOGGERS (ie: Tensorboard, etc...)\n # -----------------------------------------\n self.save_loggers_on_train_batch_end()\n\n # update LR schedulers\n monitor_metrics = deepcopy(self.trainer.logger_connector.callback_metrics)\n self.update_train_loop_lr_schedulers(monitor_metrics=monitor_metrics)\n self.trainer.checkpoint_connector.has_trained = True\n\n # max steps reached, end training\n if (\n self.trainer.max_steps is not None and self.trainer.max_steps == self.trainer.global_step + 1\n and self._accumulated_batches_reached()\n ):\n break\n\n # end epoch early\n # stop when the flag is changed or we've gone past the amount\n # requested in the batches\n if self.trainer.should_stop:\n break\n\n self.trainer.total_batch_idx += 1\n\n # stop epoch if we limited the number of training batches\n if self._num_training_batches_reached(is_last_batch):\n break\n\n # progress global step according to grads progress\n self.increment_accumulated_grad_global_step()\n\n # epoch end hook\n self.run_on_epoch_end_hook(epoch_output)\n\n # log epoch metrics\n self.trainer.logger_connector.log_train_epoch_end_metrics(\n epoch_output, self.checkpoint_accumulator, self.early_stopping_accumulator, self.num_optimizers\n )\n\n should_check_val = self.should_check_val_fx(batch_idx, is_last_batch, on_epoch=True)\n if should_check_val:\n self.trainer.run_evaluation(on_epoch=True)\n\n # reset stage to train\n self.trainer._set_wide_running_stage(RunningStage.TRAINING)\n\n should_skip_eval = self.trainer.evaluation_loop.should_skip_evaluation(self.trainer.num_val_batches)\n should_train_only = self.trainer.disable_validation or should_skip_eval\n\n if should_train_only:\n # update epoch level lr_schedulers\n self.trainer.optimizer_connector.update_learning_rates(interval='epoch')\n self.check_checkpoint_callback(True)\n self.check_early_stopping_callback(True)\n\n # increment the global step once\n # progress global step according to grads progress\n self.increment_accumulated_grad_global_step()\n\n def run_training_batch(self, batch, batch_idx, dataloader_idx):\n # track grad norms\n grad_norm_dic = {}\n\n # bookkeeping\n self.trainer.hiddens = None\n\n # track all outputs across time and num of optimizers\n batch_outputs = [[] for _ in range(len(self.get_optimizers_iterable()))]\n\n if batch is None:\n return AttributeDict(signal=0, grad_norm_dic=grad_norm_dic)\n\n # hook\n response = self.trainer.call_hook(\"on_batch_start\")\n if response == -1:\n return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)\n\n # hook\n response = self.trainer.call_hook(\"on_train_batch_start\", batch, batch_idx, dataloader_idx)\n if response == -1:\n return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)\n\n # lightning module hook\n splits = self.tbptt_split_batch(batch)\n\n for split_idx, split_batch in enumerate(splits):\n\n # create an iterable for optimizers and loop over them\n for opt_idx, optimizer in self.prepare_optimizers():\n\n # toggle model params + set info to logger_connector\n self.run_train_split_start(split_idx, split_batch, opt_idx, optimizer)\n\n if self.should_accumulate():\n # For gradient accumulation\n\n # -------------------\n # calculate loss (train step + train step end)\n # -------------------\n\n # automatic_optimization=True: perform dpp sync only when performing optimizer_step\n # automatic_optimization=False: don't block synchronization here\n with self.block_ddp_sync_behaviour():\n self.training_step_and_backward(\n split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens\n )\n\n batch_outputs = self._process_closure_result(\n batch_outputs=batch_outputs,\n opt_idx=opt_idx,\n )\n\n # ------------------------------\n # BACKWARD PASS\n # ------------------------------\n # gradient update with accumulated gradients\n\n else:\n if self.automatic_optimization:\n\n def train_step_and_backward_closure():\n result = self.training_step_and_backward(\n split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens\n )\n return None if result is None else result.loss\n\n # optimizer step\n self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)\n\n else:\n self._curr_step_result = self.training_step(\n split_batch, batch_idx, opt_idx, self.trainer.hiddens\n )\n\n if self._curr_step_result is None:\n # user decided to skip optimization\n # make sure to zero grad.\n continue\n\n batch_outputs = self._process_closure_result(\n batch_outputs=batch_outputs,\n opt_idx=opt_idx,\n )\n\n # todo: Properly aggregate grad_norm accros opt_idx and split_idx\n grad_norm_dic = self._cur_grad_norm_dict\n self._cur_grad_norm_dict = None\n\n # update running loss + reset accumulated loss\n self.update_running_loss()\n\n result = AttributeDict(\n signal=0,\n grad_norm_dic=grad_norm_dic,\n training_step_output_for_epoch_end=batch_outputs,\n )\n return result\n\n @contextmanager\n def block_ddp_sync_behaviour(self, should_block_sync: bool = False):\n \"\"\"\n automatic_optimization = True\n Blocks ddp sync gradients behaviour on backwards pass.\n This is useful for skipping sync when accumulating gradients, reducing communication overhead\n\n automatic_optimization = False\n do not block ddp gradient sync when using manual optimization\n as gradients are needed within the training step\n\n Returns:\n context manager with sync behaviour off\n\n \"\"\"\n if (\n isinstance(self.trainer.training_type_plugin, ParallelPlugin)\n and (self.automatic_optimization or should_block_sync)\n ):\n with self.trainer.training_type_plugin.block_backward_sync():\n yield None\n else:\n yield None\n\n def _process_closure_result(self, batch_outputs: list, opt_idx: int) -> list:\n opt_closure_result = self._curr_step_result\n\n if opt_closure_result is not None:\n\n # cache metrics\n self.trainer.logger_connector.cache_training_step_metrics(opt_closure_result)\n\n # track hiddens\n self.trainer.hiddens = self.process_hiddens(opt_closure_result)\n\n # check if loss or model weights are nan\n if self.trainer.terminate_on_nan:\n self.trainer.detect_nan_tensors(opt_closure_result.loss)\n\n # track all the outputs across all steps\n batch_opt_idx = opt_idx if len(batch_outputs) > 1 else 0\n batch_outputs[batch_opt_idx].append(opt_closure_result.training_step_output_for_epoch_end)\n\n if self.automatic_optimization:\n # track total loss for logging (avoid mem leaks)\n self.accumulated_loss.append(opt_closure_result.loss)\n\n self._curr_step_result = None\n\n return batch_outputs\n\n def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens):\n \"\"\"\n wrap the forward step in a closure so second order methods work\n \"\"\"\n with self.trainer.profiler.profile(\"training_step_and_backward\"):\n # lightning module hook\n result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)\n self._curr_step_result = result\n\n if result is None:\n if self.automatic_optimization:\n self.warning_cache.warn(\"training_step returned None if it was on purpose, ignore this warning...\")\n return None\n\n if not self._skip_backward and self.trainer.train_loop.automatic_optimization:\n # backward pass\n with self.trainer.profiler.profile(\"model_backward\"):\n self.backward(result, optimizer, opt_idx)\n\n # hook - call this hook only\n # when gradients have finished to accumulate\n if not self.should_accumulate():\n self.on_after_backward(result.training_step_output, batch_idx, result.loss)\n\n # check if loss or model weights are nan\n if self.trainer.terminate_on_nan:\n self.trainer.detect_nan_tensors(result.loss)\n\n if len(self.trainer.optimizers) > 1:\n # revert back to previous state\n self.trainer.get_model().untoggle_optimizer(opt_idx)\n\n return result\n\n def backward(self, result, optimizer, opt_idx, *args, **kwargs):\n self.trainer.dev_debugger.track_event(\"backward_call\")\n\n should_accumulate = self.should_accumulate()\n\n # backward can be called manually in the training loop\n if isinstance(result, torch.Tensor):\n self.trainer.accelerator_backend.backward(result, optimizer, opt_idx, should_accumulate, *args, **kwargs)\n else:\n result.closure_loss = self.trainer.accelerator_backend.backward(\n result.closure_loss, optimizer, opt_idx, should_accumulate, *args, **kwargs\n )\n\n if not self.should_accumulate():\n # track gradients\n self.track_and_norm_grad(optimizer=optimizer)\n\n def update_train_loop_lr_schedulers(self, monitor_metrics=None):\n num_accumulated_batches_reached = self._accumulated_batches_reached()\n num_training_batches_reached = self._num_training_batches_reached()\n\n if num_accumulated_batches_reached or num_training_batches_reached:\n # update lr\n self.trainer.optimizer_connector.update_learning_rates(interval=\"step\", monitor_metrics=monitor_metrics)\n\n def run_on_epoch_end_hook(self, epoch_output):\n # inform logger the batch loop has finished\n self.trainer.logger_connector.on_train_epoch_end()\n\n self.trainer.call_hook('on_train_epoch_end', epoch_output)\n self.trainer.call_hook('on_epoch_end')\n\n def increment_accumulated_grad_global_step(self):\n num_accumulated_batches_reached = self._accumulated_batches_reached()\n num_training_batches_reached = self._num_training_batches_reached()\n\n # progress global step according to grads progress\n if num_accumulated_batches_reached or num_training_batches_reached:\n self.trainer.global_step += 1\n\n def _accumulated_batches_reached(self):\n return (self.trainer.batch_idx + 1) % self.trainer.accumulate_grad_batches == 0\n\n def _num_training_batches_reached(self, is_last_batch=False):\n return (self.trainer.batch_idx + 1) == self.trainer.num_training_batches or is_last_batch\n\n def should_accumulate(self):\n # checks if backward or backward + optimizer step (via closure)\n accumulation_done = self._accumulated_batches_reached()\n is_final_batch = self._num_training_batches_reached()\n return not (accumulation_done or is_final_batch)\n\n def should_check_val_fx(self, batch_idx, is_last_batch, on_epoch=False):\n # decide if we should run validation\n is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0\n is_val_check_epoch = (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch == 0\n can_check_val = self.trainer.enable_validation and is_val_check_epoch\n is_last_batch_for_infinite_dataset = is_last_batch and self.trainer.val_check_batch == float(\"inf\")\n epoch_end_val_check = self.trainer.val_check_batch == self.trainer.num_training_batches\n\n should_check_val = ((is_val_check_batch and epoch_end_val_check) or self.trainer.should_stop\n or is_last_batch_for_infinite_dataset\n ) if on_epoch else (is_val_check_batch and not epoch_end_val_check)\n\n return should_check_val and can_check_val\n\n def build_train_args(self, batch, batch_idx, opt_idx, hiddens):\n # enable not needing to add opt_idx to training_step\n args = [batch, batch_idx]\n\n if len(self.trainer.optimizers) > 1:\n if self.trainer.has_arg(\"training_step\", \"optimizer_idx\"):\n args.append(opt_idx)\n else:\n num_opts = len(self.trainer.optimizers)\n raise ValueError(\n f\"Your LightningModule defines {num_opts} optimizers but \"\n f'training_step is missing the \"optimizer_idx\" argument.'\n )\n\n # pass hiddens if using tbptt\n if self.trainer.truncated_bptt_steps is not None:\n args.append(hiddens)\n\n return args\n\n def save_loggers_on_train_batch_end(self):\n # when loggers should save to disk\n should_flush_logs = self.trainer.logger_connector.should_flush_logs\n if should_flush_logs and self.trainer.is_global_zero and self.trainer.logger is not None:\n self.trainer.logger.save()\n\n def process_train_step_outputs(self, all_train_step_outputs, early_stopping_accumulator, checkpoint_accumulator):\n \"\"\"\n Figure out what needs to be tracked/logged at the end of the epoch\n \"\"\"\n\n # the training step outputs a list per optimizer. The list contains the outputs at each time step\n # when no TBPTT is used, then the list has 1 item per batch\n # when TBPTT IS used, then the list has n items (1 per time step)\n batch_end_outputs = []\n for optimizer_idx_outputs in all_train_step_outputs:\n # extract one representative sample from each time step (1 if no tbptt) and 0th optimizer\n if len(optimizer_idx_outputs) == 0:\n continue\n\n sample_output = optimizer_idx_outputs[-1]\n\n # pull out callback info if available (ie: Results object)\n if isinstance(sample_output, dict) and \"early_stop_on\" in sample_output:\n early_stopping_accumulator.accumulate(sample_output[\"early_stop_on\"])\n\n if isinstance(sample_output, dict) and \"checkpoint_on\" in sample_output:\n checkpoint_accumulator.accumulate(sample_output[\"checkpoint_on\"])\n\n batch_end_outputs.append(optimizer_idx_outputs)\n\n return batch_end_outputs\n\n def prepare_optimizers(self):\n # in manual optimization we loop over all optimizers at once\n optimizers = self.get_optimizers_iterable()\n if not self.automatic_optimization:\n optimizers = [optimizers[0]]\n return optimizers\n\n def run_train_split_start(self, split_idx, split_batch, opt_idx, optimizer):\n # set split_idx to trainer for tracking\n self.trainer.split_idx = split_idx\n\n # make sure only the gradients of the current optimizer's parameters are calculated\n # in the training step to prevent dangling gradients in multiple-optimizer setup.\n if self.automatic_optimization and len(self.trainer.optimizers) > 1:\n model = self.trainer.get_model()\n model.toggle_optimizer(optimizer, opt_idx)\n\n # use to track metrics internally\n self.trainer.logger_connector.on_train_split_start(split_idx, opt_idx, split_batch)\n\n def update_running_loss(self):\n accumulated_loss = self.accumulated_loss.mean()\n\n if accumulated_loss is not None:\n # calculate running loss for display\n self.running_loss.append(self.accumulated_loss.mean() * self.trainer.accumulate_grad_batches)\n\n # reset for next set of accumulated grads\n self.accumulated_loss.reset()\n" ]
[ [ "torch.cuda.empty_cache", "numpy.cumsum", "numpy.argmax" ] ]
accordproject/labs-cicero-classify
[ "3a52ebaf45252515c417bf94a05e33fc1c2628b8" ]
[ "Practice/adapter_roberta_v4/adapter_model.py" ]
[ "import pandas as pd\nimport numpy as np\nimport torch\nprint(f\"Torch Version: {torch.__version__}\")\n\nimport transformers\nprint(f\"transformers (Adapter) Version: {transformers.__version__}\")\n\nfrom transformers import RobertaTokenizer\nimport numpy as np\n\ntokenizer = RobertaTokenizer.from_pretrained(\"roberta-base\")\n\nfrom transformers import RobertaTokenizer\n\ntokenizer = RobertaTokenizer.from_pretrained(\"roberta-base\")\n\ndef encode_batch(batch):\n \"\"\"Encodes a batch of input data using the model tokenizer.\"\"\"\n return tokenizer(batch[\"text\"], max_length=80, truncation=True, padding=\"max_length\")\n\ndata_path = \"./NER_multilabel_data_v4.csv\"\ndf = pd.read_csv(data_path)\n\nall_tags = df.newTag\n\nall_tags = set(all_tags)\n\nall_tags = \"|\".join(all_tags)\nall_tags = all_tags.split(\"|\")\nall_tags = set(all_tags)\nall_tags = list(all_tags)\n\nfrom ner_dataset import get_trainset_data_loader\n\nall_tags, trainset, trainloader = get_trainset_data_loader(tokenizer, BATCH_SIZE=128)\n\n\nfrom transformers import RobertaConfig, RobertaModelWithHeads\n\nconfig = RobertaConfig.from_pretrained(\n \"roberta-base\",\n num_labels=len(all_tags),\n label2id = trainset.label_map, \n id2label = trainset.id2label\n)\nmodel = RobertaModelWithHeads.from_pretrained(\n \"roberta-base\",\n config=config,\n)\n\nall_adapter_name = []\nfor tag in all_tags:\n adapter_name = f\"{tag}_0731\"\n name = model.load_adapter(f\"./save_adapters/{adapter_name}\")\n all_adapter_name.append(name)\n model.load_head(f\"./save_heads/{adapter_name}\")\n\nimport re\n\nparallel_text = \"','\".join(all_adapter_name)\nresult = re.findall(r'[;|(|)]',parallel_text)\nif len(result) != 0:\n raise(ValueError(\"Adapter Name must not contain \\\"\" + '\\\", \\\"'.join(result) + '\"'))\n\nfrom transformers.adapters.composition import Parallel\nparallel = eval(\"Parallel('\" + \"','\".join(all_adapter_name) + \"')\")\n\nmodel.set_active_adapters(parallel)\n\ndevice = \"cpu\"\n\ndef get_adapter_mapping(model):\n print(model.active_head)\n label_2_id_mapping = dict()\n id_2_label_mapping = dict()\n for i, head in enumerate(model.active_head):\n label_2_id_mapping[head] = i\n id_2_label_mapping[i] = head\n return label_2_id_mapping, id_2_label_mapping\n\n\n\ndef model_predict(model, sentence, device = \"cpu\"):\n tokenized_sentence = torch.tensor([tokenizer.encode(sentence)])\n pos = torch.tensor([[0] * len(tokenized_sentence)])\n tags = torch.tensor([[1] * len(tokenized_sentence)])\n\n model = model.to(device)\n with torch.no_grad():\n outputs = model(input_ids=tokenized_sentence.to(device), \n token_type_ids=pos.to(device), \n attention_mask=tags.to(device))\n\n logits = outputs[1][0]\n\n return_tags_order = {}\n all_output = None\n for i, output in enumerate(outputs):\n\n return_tags_order[i] = (model.active_head[i])\n\n output = outputs[i][0]\n\n if all_output != None:\n all_output = torch.cat((all_output, output), dim=2)\n else:\n all_output = output\n all_output = torch.sigmoid(all_output)\n\n output_array = np.array(all_output)\n output_array = output_array.reshape(output_array.shape[-2], output_array.shape[-1])\n\n label_confidences = []\n for label_confidence in list(output_array):\n label_confidences.append(list(label_confidence))\n\n #Drop Head and End since it is start/stop Token\n label_confidences = label_confidences[1:-1]\n\n max_value = np.array(label_confidences).argmax(axis=1)\n trans_func = np.vectorize(lambda x: model.active_head[x])\n out_labels = trans_func(max_value)\n\n out_sentence = tokenizer.tokenize(sentence)\n\n return out_sentence, out_labels, label_confidences, return_tags_order\n\ndevice = \"cpu\"\n\ndef get_adapter_mapping(model):\n print(model.active_head)\n label_2_id_mapping = dict()\n id_2_label_mapping = dict()\n for i, head in enumerate(model.active_head):\n label_2_id_mapping[head] = i\n id_2_label_mapping[i] = head\n return label_2_id_mapping, id_2_label_mapping\n\n\n\ndef model_predict(model, sentence, device = \"cpu\"):\n tokenized_sentence = torch.tensor([tokenizer.encode(sentence)])\n pos = torch.tensor([[0] * len(tokenized_sentence)])\n tags = torch.tensor([[1] * len(tokenized_sentence)])\n\n model = model.to(device)\n with torch.no_grad():\n outputs = model(input_ids=tokenized_sentence.to(device), \n token_type_ids=pos.to(device), \n attention_mask=tags.to(device))\n\n logits = outputs[1][0]\n\n return_tags_order = {}\n all_output = None\n for i, output in enumerate(outputs):\n\n return_tags_order[i] = (model.active_head[i])\n\n output = outputs[i][0]\n\n if all_output != None:\n all_output = torch.cat((all_output, output), dim=2)\n else:\n all_output = output\n all_output = torch.sigmoid(all_output)\n\n output_array = np.array(all_output)\n output_array = output_array.reshape(output_array.shape[-2], output_array.shape[-1])\n\n label_confidences = []\n for label_confidence in list(output_array):\n label_confidences.append(list(label_confidence))\n\n #Drop Head and End since it is start/stop Token\n label_confidences = label_confidences[1:-1]\n\n max_value = np.array(label_confidences).argmax(axis=1)\n trans_func = np.vectorize(lambda x: model.active_head[x])\n out_labels = trans_func(max_value)\n\n out_sentence = tokenizer.tokenize(sentence)\n\n return out_sentence, out_labels, label_confidences, return_tags_order" ]
[ [ "numpy.vectorize", "pandas.read_csv", "torch.no_grad", "numpy.array", "torch.sigmoid", "torch.cat" ] ]
falcon2212/detr-tensorflow
[ "119da1390a02b6013e7147d822e72c38fc3a2dd9" ]
[ "detr_tf/data/tfcsv.py" ]
[ "import tensorflow as tf\nfrom random import shuffle\nimport pandas as pd\nimport numpy as np\nimport imageio\nimport os\n\nfrom detr_tf.data import processing\nfrom detr_tf.data.transformation import detr_transform\nfrom detr_tf import bbox\n\n\ndef morethan1(img, tbbox, tclass):\n ret = False\n print(\"morethan1 \", tbbox.shape)\n try:\n ret = tbbox.shape[0] > 0\n except:\n ret = False\n return ret\n\n\ndef load_data_from_index(index, class_names, filenames, anns, config, augmentation, img_dir):\n # Open the image\n image = imageio.imread(config.datadir+img_dir+\"/\"+filenames[index])\n # Select all the annotatiom (bbox and class) on this image\n image_anns = anns[anns[\"filename\"] == filenames[index]]\n\n # Convert all string class to number (the target class)\n t_class = image_anns[\"class\"].map(\n lambda x: class_names.index(x)).to_numpy()\n # Select the width&height of each image (should be the same since all the ann belongs to the same image)\n width = image_anns[\"width\"].to_numpy()\n height = image_anns[\"height\"].to_numpy()\n # Select the xmin, ymin, xmax and ymax of each bbox, Then, normalized the bbox to be between and 0 and 1\n # Finally, convert the bbox from xmin,ymin,xmax,ymax to x_center,y_center,width,height\n bbox_list = image_anns[[\"xmin\", \"ymin\", \"xmax\", \"ymax\"]].to_numpy()\n bbox_list = bbox_list / [width[0], height[0], width[0], height[0]]\n t_bbox = bbox.xy_min_xy_max_to_xcycwh(bbox_list)\n\n # Transform and augment image with bbox and class if needed\n image, t_bbox, t_class = detr_transform(\n image, t_bbox, t_class, config, augmentation=augmentation)\n\n # Normalized image\n image = processing.normalized_images(image, config)\n\n return image.astype(np.float32), t_bbox.astype(np.float32), np.expand_dims(t_class, axis=-1).astype(np.int64)\n\n\ndef load_tfcsv_dataset(config, batch_size, augmentation=False, exclude=[], ann_dir=None, ann_file=None, img_dir=None):\n \"\"\" Load the hardhat dataset\n \"\"\"\n ann_dir = config.data.ann_dir if ann_dir is None else ann_dir\n ann_file = config.data.ann_file if ann_file is None else ann_file\n img_dir = config.data.img_dir if img_dir is None else img_dir\n\n anns = pd.read_csv(config.datadir+ann_file)\n for name in exclude:\n anns = anns[anns[\"class\"] != name]\n\n unique_class = anns[\"class\"].unique()\n unique_class.sort()\n\n # Set the background class to 0\n config.background_class = 0\n class_names = [\"background\"] + unique_class.tolist()\n\n filenames = anns[\"filename\"].unique().tolist()\n indexes = list(range(0, len(filenames)))\n shuffle(indexes)\n\n dataset = tf.data.Dataset.from_tensor_slices(indexes)\n dataset = dataset.map(lambda idx: processing.numpy_fc(\n idx, load_data_from_index,\n class_names=class_names, filenames=filenames, anns=anns, config=config, augmentation=augmentation, img_dir=img_dir), num_parallel_calls=tf.data.experimental.AUTOTUNE)\n # Filter labels to be sure to keep only sample with at least one bbox\n dataset = dataset.filter(\n lambda imgs, tbbox, tclass: tf.shape(tbbox)[0] > 0)\n\n # Pad bbox and labels\n dataset = dataset.map(processing.pad_labels,\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n # Batch images\n dataset = dataset.batch(batch_size, drop_remainder=True)\n\n return dataset, class_names\n\n\n# print(config.data_dir)\n# train_iterator, class_names = load_tfcsv_dataset(\n# config=config, batch_size=config.batch_size, augmentation=True, img_dir=\"train\", ann_file=\"train/_annotations.csv\")\n# test_iterator, class_names = load_tfcsv_dataset(\n# config=config, batch_size=config.batch_size, augmentation=True, img_dir=\"test\", ann_file=\"test/_annotations.csv\")\n# print(test_iterator.cardinality())\n# print(train_iterator.cardinality())\n# # tmp = list(train_iterator)\n# # for i, _ in enumerate(train_iterator):\n# # print(i)\n# # print(int(None))\n" ]
[ [ "pandas.read_csv", "tensorflow.data.Dataset.from_tensor_slices", "numpy.expand_dims", "tensorflow.shape" ] ]
tomAntoine/multi-UAV-simulator
[ "2fbd8b802ea1a5f388722714bac5563d0718b28f" ]
[ "Simulation_Python/scenarios.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nauthor: John Bass\nemail: [email protected]\nlicense: MIT\nPlease feel free to use and modify this, but keep the above information. Thanks!\n\nadaptation\nauthor: Tom Antoine and Alex Martinez\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport cProfile\nfrom trajectory import Trajectory\nfrom ctrl import Control\nfrom quadFiles.quad import Quadcopter\nfrom utils.windModel import Wind\nimport utils\nimport config\nimport mpl_toolkits.mplot3d.axes3d as p3\nfrom matplotlib.legend import Legend\nimport random\n\n\n\"\"\"\nThe variable “quad_id” is provided as an integer from 0 to the number of drones\nin the simulation.\n\nThe “mode” is provided as a string and it can be split into three categories,\ndepending whether or not they are associated with the agent, the target or both.\nThe latter are simple actions such as “takeoff”, “home”, “land”, “fall” or\n“charging”. Then, there are specific modes for agents like “guided” or “track”;\nand targets like “enemy”. The change of mode can be pre-defined or provided\nby the Mission planning and Task control subsystem. In the case of targets, the\ntransition is automated internally. They will be initialized in “enemy” mode and\nchanged into “neutralized” if the conditions are met to finally change into “fall”.\nIn the case of agents, the change of modes is performed externally after system\nintegration. However, due to the very intuitive transitions, some of them were\npredefined in sequences for the subsystem validation and verification. For this\nreason, “takeoff” and “land” mode were integrated at the beginning and end of\neach mission. Similarly, after an agent in “track” mode neutralized its target, or\na “guided” one has reached its goal position, the mode was switched to “home”.\nThe “id_targ” is a specific integer input associated to the mode “track”. It\ncorresponds to the target identification number and is assigned as -1 by default\nif any other mode is employed.\n\nThe “pos_goal” is a set of coordinates x, y and z in the global reference frame\nthat represent the goal position. It should be noted that although x and y are\nnot bounded, the z coordinate is restricted so that the drones cannot go through\nthe ground and by consistency with the guidance algorithms, it is defined as\nnegative. It should be noted that although this is an input from Mission planning\nand Task control subsystem it will be updated for specific modes such as\n“track”.\n\nThe “pos_obs” is a list of sets of coordinates x, y and z in the global reference\nframe corresponding to the static obstacles and therefore should be kept\nconstant for all the drones in the simulation environment. This information is\npredefined but will need to be provided by the Situation Awareness subsystem.\n\nThe “pos_ini” is a set of coordinates x, y and z in the global reference frame\nthat represent the initial position. It should be noted that as for the rest of\ncoordinates, the z coordinate is defined as negative.\n\nThe “color” is employed for the easy identification of the drones. It allows to\neasily verify the correct functioning of the algorithms.\n\nThe “ctrlType” xyz_pos by default.\n\nThe “trajSelect” minimum velocity, no yaw control, average speedby default.\n\nThe “Ti” input is given as a number and indicates the initial time for the\nsimulation. It is common for all drones and by default set at 0s.\n\nFor most modes, the “Tf” input is given as a number and corresponds to the\nfinal time of the simulation “Tf”. It is therefore employed for creating the\ntrajectories to reach goal position. However, in modes that require regular\nupdates as “track” or “guided”, it is substituted by the update time. In these\ncases, it should be slightly modified within drones. It is usually around 0.5s.\n\nThe numerical time step “numTimeStep” is employed for the trajectories.\n\n\"\"\"\n\n\n\ndef full_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):\n pos_obs = np.array([[1, 5, -2], [8, 2, -8], [5, 8, -9], [0, 0, -2], [3, 3, -1],[3, 9, -17],[5, 7, -18],[0, 0, -10],[5, 10, -16],[10,10,-12],[13,13,-13]])\n quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,0], pos_goal= [15,15,-15], pos_obs = pos_obs)\n quad1 = Quadcopter(Ti, Ts*90, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='guided', id_targ = -1, color = 'green', pos_ini = [0,3,0], pos_goal = [15,10,-15], pos_obs = pos_obs)\n quad2 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2, mode='track', id_targ = 0, color = 'pink', pos_ini = [3,0,0], pos_goal = [15,20,-15], pos_obs = pos_obs)\n quads = [quad0, quad1, quad2]\n return pos_obs,quads\n\ndef multi_waypoint_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):\n pos_obs = np.array([[50,0,0]])\n quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,0], pos_goal= [0,-17,-10], pos_obs = pos_obs)\n quad1 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [20,0,0], pos_goal = [-20,-15,-10], pos_obs = pos_obs)\n quad2 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2, mode='ennemy', id_targ = -1, color = 'red', pos_ini = [-20,-10,0], pos_goal = [-10,0,-20], pos_obs = pos_obs)\n quads = [quad0, quad1, quad2]\n return pos_obs,quads\n\ndef static_OA_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):\n pos_obs = []\n for i in range(30):\n pos_obs.append(random.sample(range(-10, 0), 3))\n pos_obs = np.array(pos_obs)\n print(pos_obs)\n quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,0], pos_goal= [-10,-10,-10], pos_obs = pos_obs)\n quads = [quad0]\n return pos_obs,quads\n\ndef dynamic_CA_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):\n #Tf =8s\n pos_obs = np.array([[50,0,0]])\n quad0 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='guided', id_targ = -1, color = 'blue', pos_ini = [0,10,-5],pos_goal = [30,10,-5], pos_obs = pos_obs)\n quad1 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [3,0,-5], pos_goal = [3,20,-5], pos_obs = pos_obs)\n quad2 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [8,0,-5], pos_goal = [8,20,-5], pos_obs = pos_obs)\n quad3 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 3, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [15,0,-5], pos_goal = [15,20,-5], pos_obs = pos_obs)\n quads = [quad0, quad1,quad2,quad3]\n return pos_obs,quads\n\ndef dynamic_CA_scenario_random_pos(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):\n #Tf =8s\n pos_obs = np.array([[50,0,0]])\n quad0 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='guided', id_targ = -1, color = 'blue', pos_ini = [0,10,-5],pos_goal = [30,10,-5], pos_obs = pos_obs)\n x, z = random.randint(3,17),-1*random.randint(1,8)\n quad1 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [x,0,z], pos_goal = [x,20,z], pos_obs = pos_obs)\n x, z = random.randint(3,17),-1*random.randint(1,8)\n quad2 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [x,0,z], pos_goal = [x,20,z], pos_obs = pos_obs)\n x, z = random.randint(3,17),-1*random.randint(1,8)\n quad3 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 3, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [x,0,z], pos_goal = [x,20,z], pos_obs = pos_obs)\n quads = [quad0, quad1,quad2,quad3]\n return pos_obs,quads\n\ndef simple_tracking_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):\n pos_obs = np.array([[-10,-10,0]])\n quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,0], pos_goal = [15,15,-15], pos_obs = pos_obs)\n quad1 = Quadcopter(Ti, Ts*90, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='track', id_targ = 0, color = 'green', pos_ini = [5,5,0], pos_goal = [2,2,-10], pos_obs = pos_obs)\n quads = [quad0, quad1]\n return pos_obs,quads\n\ndef multi_tracking_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):\n pos_obs = np.array([[-10,-10,0]])\n quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,0], pos_goal = [15,15,-15], pos_obs = pos_obs)\n quad1 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='track', id_targ = 0, color = 'green', pos_ini = [4,0,0], pos_goal = [4,4,-10], pos_obs = pos_obs)\n quad2 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2, mode='track', id_targ = 0, color = 'green', pos_ini = [4,4,0], pos_goal = [4,4,-10], pos_obs = pos_obs)\n quad3 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 3, mode='track', id_targ = 0, color = 'green', pos_ini = [4,-4,0], pos_goal = [4,4,-10], pos_obs = pos_obs)\n quads = [quad0, quad1, quad2, quad3]\n return pos_obs,quads\n\ndef tracking_loop_scenario(x,Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):\n pos_obs = np.array([[x/2,x/2,-10]])\n quad0 = Quadcopter(Ti, Ts*99, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='track', id_targ = 1, color = 'blue', pos_ini = [0,0,-10], pos_goal = [0,x,-10], pos_obs = pos_obs)\n quad1 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='track', id_targ = 2, color = 'green', pos_ini = [x,0,-10], pos_goal = [0,0,-10], pos_obs = pos_obs)\n quad2 = Quadcopter(Ti, Ts*101, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2, mode='track', id_targ = 3, color = 'orange', pos_ini = [x,x,-10],pos_goal = [x,0,-10], pos_obs = pos_obs)\n quad3 = Quadcopter(Ti, Ts*102, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 3, mode='track', id_targ = 0, color = 'pink', pos_ini = [0,x,-10], pos_goal = [x,x,-10],pos_obs = pos_obs)\n quads = [quad0, quad1,quad2,quad3]\n return pos_obs,quads\n\ndef tracking_and_kill_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):\n pos_obs = np.array([[-10,-10,0]])\n quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,-5], pos_goal = [20,15,-20], pos_obs = pos_obs)\n quad1 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='track', id_targ = 0, color = 'green', pos_ini = [5,0,0], pos_goal = [4,4,-10], pos_obs = pos_obs)\n quads = [quad0, quad1]\n return pos_obs,quads\n\ndef simple_guided_for_PF(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):\n pos_obs = []\n for i in range(20):\n pos_obs.append(random.sample(range(-10, 0), 3))\n pos_obs = np.array(pos_obs)\n quad0 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='guided', id_targ = -1, color = 'blue', pos_ini = [0,0,-5], pos_goal = [-10,-10,-10], pos_obs = pos_obs)\n quads = [quad0]\n return pos_obs,quads\n\ndef ROS_simu(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):\n fire_station=[]\n fire_truck=[]\n tree_1=[]\n tree_2=[]\n pos_obs=[]\n for i in range(20):\n x = random.sample(range(-10, 10), 1)[0]\n y = random.sample(range(-55, -45), 1)[0]\n z = random.sample(range(-12, 0), 1)[0]\n fire_station.append([x,y,z])\n \n for i in range(5):\n x = random.sample(range(-19, 21), 1)[0]\n y = random.sample(range(-55, -45), 1)[0]\n z = random.sample(range(-3, 0), 1)[0]\n fire_truck.append([x,y,z])\n\n for i in range(5):\n x = random.sample(range(-12, -8), 1)[0]\n y = random.sample(range(-42,-38), 1)[0]\n z = random.sample(range(-5, 0), 1)[0]\n tree_1.append([x,y,z])\n for i in range(5):\n x = random.sample(range(8, 12), 1)[0]\n y = random.sample(range(-42,-38), 1)[0]\n z = random.sample(range(-5, 0), 1)[0]\n tree_2.append([x,y,z])\n\n pos_obs = fire_station + fire_truck + tree_1 + tree_2\n pos_obs = np.array(pos_obs)\n quad0 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='guided', id_targ = -1, color = 'blue', pos_ini = [0,0,0], pos_goal = [0,-100,-10], pos_obs = pos_obs)\n quads = [quad0]\n return(pos_obs,quads)\n\ndef real_map(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):\n\n xs = [-1,0,1]\n ys = [-1,0,1]\n zs = [0,-1,-2,-3,-4,-5,-6,-7,-8,-9,-10]\n tower = [[x,y,z] for x in xs for y in ys for z in zs]\n\n xs = [-20,5,10]\n ys = [5,-10,10]\n zs = [0,-1,-2,-3]\n trees = [[x,y,z] for x in xs for y in ys for z in zs]\n\n xs = [-20,5,10]\n ys = [5,-10,10]\n zs = [-4,-5]\n\n tops = []\n for i in range(3):\n x, y = xs[i], ys[i]\n for z in zs:\n tops = tops + [[x-1,y,z],[x+1,y,z],[x,y,z],[x,y-1,z],[x,y+1,z]]\n print(tops)\n\n pos_obs = np.array(tower + trees + tops)\n\n quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,-5], pos_goal = [-10,-10,-10], pos_obs = pos_obs)\n quads = [quad0]\n return pos_obs,quads\n" ]
[ [ "numpy.array" ] ]
sunkr1995/genetic-drawing
[ "6e5cc755a55c1994770c3f18fb14f1cc651bb700" ]
[ "Sketch.py" ]
[ "'''\nAuthor: your name\nDate: 2021-07-02 17:20:23\nLastEditTime: 2021-07-08 16:28:05\nLastEditors: Please set LastEditors\nDescription: In User Settings Edit\nFilePath: /genetic-drawing/2.py\n'''\n#coding:utf-8\nimport cv2 \nimport math\nimport numpy as np\n \n \ndef dodgeNaive(image, mask):\n # determine the shape of the input image\n width, height = image.shape[:2]\n \n # prepare output argument with same size as image\n blend = np.zeros((width, height), np.uint8)\n \n for col in range(width):\n for row in range(height):\n # do for every pixel\n if mask[col, row] == 255:\n # avoid division by zero\n blend[col, row] = 255\n else:\n # shift image pixel value by 8 bits\n # divide by the inverse of the mask\n tmp = (image[col, row] << 8) / (255 - mask)\n # print('tmp={}'.format(tmp.shape))\n # make sure resulting value stays within bounds\n if tmp.any() > 255:\n tmp = 255\n blend[col, row] = tmp\n \n return blend\n \n \ndef dodgeV2(image, mask):\n return cv2.divide(image, 255 - mask, scale=256)\n \n \ndef burnV2(image, mask):\n return 255 - cv2.divide(255 - image, 255 - mask, scale=256)\n \n \ndef rgb_to_sketch(src_image_name, dst_image_name):\n img_rgb = cv2.imread(src_image_name)\n img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)\n # 读取图片时直接转换操作\n # img_gray = cv2.imread('example.jpg', cv2.IMREAD_GRAYSCALE)\n \n img_gray_inv = 255 - img_gray\n img_blur = cv2.GaussianBlur(img_gray_inv, ksize=(21, 21),\n sigmaX=0, sigmaY=0)\n img_blend = dodgeV2(img_gray, img_blur)\n \n cv2.imshow('original', img_rgb)\n cv2.imshow('gray', img_gray)\n cv2.imshow('gray_inv', img_gray_inv)\n cv2.imshow('gray_blur', img_blur)\n cv2.imshow(\"pencil sketch\", img_blend)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n cv2.imwrite(dst_image_name, img_blend)\n \n \nif __name__ == '__main__':\n src_image_name = '02.jpg'\n dst_image_name = 'sketch_02.jpg'\n rgb_to_sketch(src_image_name, dst_image_name)\n" ]
[ [ "numpy.zeros" ] ]
yyht/bert
[ "480c909e0835a455606e829310ff949c9dd23549", "480c909e0835a455606e829310ff949c9dd23549" ]
[ "t2t_bert/utils/tensor2tensor/trax/rlax/ppo.py", "BERT-keras-master/BERT-keras-master/transformer/embedding.py" ]
[ "# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"PPO in JAX.\n\nNotation:\n\nB, scalar - batch size\nT, scalar - number of time-steps in a trajectory, or the value of the padded\n time-step dimension.\nOBS, tuple - shape of a singular observation from the environment.\n Ex: For CartPole-v0 this is (4,) and Pong-v0 it's (210, 160, 3)\nA, scalar - Number of actions, assuming a discrete space.\n\nPolicy and Value function signatures:\n\nPolicy Function :: [B, T] + OBS -> [B, T, A]\nValue Function :: [B, T] + OBS -> [B, T, 1]\nPolicy and Value Function :: [B, T] + OBS -> ([B, T, A], [B, T, 1])\n\ni.e. the policy net should take a batch of *trajectories* and at each time-step\nin each batch deliver a probability distribution over actions.\n\nNOTE: It doesn't return logits, rather the expectation is that it returns\nlog-probabilities instead.\n\nNOTE: The policy and value functions need to take care to not take into account\nfuture time-steps while deciding the actions (or value) for the current\ntime-step.\n\nPolicy and Value Function produces a tuple of the expected output of a policy\nfunction and a value function.\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport functools\nimport os\nimport time\n\nfrom absl import logging\nimport cloudpickle as pickle\nimport gin\nimport gym\nfrom jax import grad\nfrom jax import jit\nfrom jax import lax\nfrom jax import numpy as np\nfrom jax import random as jax_random\nimport numpy as onp\nfrom tensor2tensor.envs import env_problem\nfrom tensor2tensor.envs import env_problem_utils\nfrom tensor2tensor.trax import jaxboard\nfrom tensor2tensor.trax import layers as tl\nfrom tensor2tensor.trax import optimizers as trax_opt\nfrom tensor2tensor.trax import trax\nfrom tensorflow.io import gfile\n\nDEBUG_LOGGING = False\nGAMMA = 0.99\nLAMBDA = 0.95\nEPSILON = 0.1\nEPOCHS = 50 # 100\nN_OPTIMIZER_STEPS = 100\nPRINT_EVERY_OPTIMIZER_STEP = 20\nBATCH_TRAJECTORIES = 32\n\n\ndef policy_and_value_net(rng_key,\n batch_observations_shape,\n observations_dtype,\n n_actions,\n bottom_layers_fn=(),\n two_towers=True):\n \"\"\"A policy and value net function.\"\"\"\n\n # Layers.\n\n # Now, with the current logits, one head computes action probabilities and the\n # other computes the value function.\n # NOTE: The LogSoftmax instead of the Softmax because of numerical stability.\n\n if two_towers:\n layers = [\n tl.Dup(),\n tl.Parallel(\n [bottom_layers_fn(), tl.Dense(n_actions), tl.LogSoftmax()],\n [bottom_layers_fn(), tl.Dense(1)],\n )\n ]\n else:\n layers = [\n bottom_layers_fn(),\n tl.Dup(),\n tl.Parallel(\n [tl.Dense(n_actions), tl.LogSoftmax()],\n [tl.Dense(1)],\n )\n ]\n net = tl.Model(layers)\n params = net.initialize(batch_observations_shape, observations_dtype, rng_key)\n return params, net\n\n\ndef optimizer_fn(net_params, step_size=1e-3):\n opt = trax_opt.Adam(step_size=step_size, b1=0.9, b2=0.999, eps=1e-08)\n opt_init = lambda x: (x, opt.tree_init(x))\n opt_update = lambda i, g, s: opt.tree_update(i, g, s[0], s[1])\n get_params = lambda x: x[0]\n opt_state = opt_init(net_params)\n return opt_state, opt_update, get_params\n\n\n# Should this be collect 'n' trajectories, or\n# Run the env for 'n' steps and take completed trajectories, or\n# Any other option?\ndef collect_trajectories(env,\n policy_fn,\n n_trajectories=1,\n policy=env_problem_utils.GUMBEL_SAMPLING,\n max_timestep=None,\n epsilon=0.1,\n reset=True,\n len_history_for_policy=32,\n rng=None):\n \"\"\"Collect trajectories with the given policy net and behaviour.\n\n Args:\n env: A gym env interface, for now this is not-batched.\n policy_fn: observations(B,T+1) -> log-probabs(B,T+1, A) callable.\n n_trajectories: int, number of trajectories.\n policy: string, \"greedy\", \"epsilon-greedy\", or \"categorical-sampling\" i.e.\n how to use the policy_fn to return an action.\n max_timestep: int or None, the index of the maximum time-step at which we\n return the trajectory, None for ending a trajectory only when env returns\n done.\n epsilon: float, the epsilon for `epsilon-greedy` policy.\n reset: bool, true if we want to reset the envs. The envs are also reset if\n max_max_timestep is None or < 0\n len_history_for_policy: int, the maximum history to keep for applying the\n policy on.\n rng: jax rng, splittable.\n\n Returns:\n A tuple (trajectory, number of trajectories that are done)\n trajectory: list of (observation, action, reward) tuples, where each element\n `i` is a tuple of numpy arrays with shapes as follows:\n observation[i] = (B, T_i + 1)\n action[i] = (B, T_i)\n reward[i] = (B, T_i)\n \"\"\"\n\n assert isinstance(env, env_problem.EnvProblem)\n # This is an env_problem, run its collect function.\n trajs, n_done, timing_info = env_problem_utils.play_env_problem_with_policy(\n env,\n policy_fn,\n num_trajectories=n_trajectories,\n max_timestep=max_timestep,\n policy_sampling=policy,\n eps=epsilon,\n reset=reset,\n len_history_for_policy=len_history_for_policy,\n rng=rng)\n # Skip returning raw_rewards here, since they aren't used.\n\n # t is the return value of Trajectory.as_numpy, so:\n # (observation, action, processed_reward, raw_reward, infos)\n return [(t[0], t[1], t[2], t[4]) for t in trajs], n_done, timing_info\n\n\n# This function can probably be simplified, ask how?\n# Can we do something much simpler than lax.pad, maybe np.pad?\n# Others?\n\n\ndef get_padding_value(dtype):\n \"\"\"Returns the padding value given a dtype.\"\"\"\n padding_value = None\n if dtype == np.uint8:\n padding_value = np.uint8(0)\n elif dtype == np.uint16:\n padding_value = np.uint16(0)\n elif dtype == np.float32 or dtype == np.float64:\n padding_value = 0.0\n else:\n padding_value = 0\n assert padding_value is not None\n return padding_value\n\n\n# TODO(afrozm): Use np.pad instead and make jittable?\ndef pad_trajectories(trajectories, boundary=20):\n \"\"\"Pad trajectories to a bucket length that is a multiple of boundary.\n\n Args:\n trajectories: list[(observation, actions, rewards)], where each observation\n is shaped (t+1,) + OBS and actions & rewards are shaped (t,), with the\n length of the list being B (batch size).\n boundary: int, bucket length, the actions and rewards are padded to integer\n multiples of boundary.\n\n Returns:\n tuple: (padding lengths, reward_mask, padded_observations, padded_actions,\n padded_rewards) where padded_observations is shaped (B, T+1) + OBS and\n padded_actions, padded_rewards & reward_mask are shaped (B, T).\n Where T is max(t) rounded up to an integer multiple of boundary.\n padded_length is how much padding we've added and\n reward_mask is 1s for actual rewards and 0s for the padding.\n \"\"\"\n\n # Let's compute max(t) over all trajectories.\n t_max = max(r.shape[0] for (_, _, r, _) in trajectories)\n\n # t_max is rounded to the next multiple of `boundary`\n boundary = int(boundary)\n bucket_length = boundary * int(np.ceil(float(t_max) / boundary))\n\n # So all obs will be padded to t_max + 1 and actions and rewards to t_max.\n padded_observations = []\n padded_actions = []\n padded_rewards = []\n padded_infos = collections.defaultdict(list)\n padded_lengths = []\n reward_masks = []\n\n for (o, a, r, i) in trajectories:\n # Determine the amount to pad, this holds true for obs, actions and rewards.\n num_to_pad = bucket_length + 1 - o.shape[0]\n padded_lengths.append(num_to_pad)\n if num_to_pad == 0:\n padded_observations.append(o)\n padded_actions.append(a)\n padded_rewards.append(r)\n reward_masks.append(onp.ones_like(r, dtype=np.int32))\n if i:\n for k, v in i.items():\n padded_infos[k].append(v)\n continue\n\n # First pad observations.\n padding_config = tuple([(0, num_to_pad, 0)] + [(0, 0, 0)] * (o.ndim - 1))\n\n padding_value = get_padding_value(o.dtype)\n action_padding_value = get_padding_value(a.dtype)\n reward_padding_value = get_padding_value(r.dtype)\n\n padded_obs = lax.pad(o, padding_value, padding_config)\n padded_observations.append(padded_obs)\n\n # Now pad actions and rewards.\n assert a.ndim == 1 and r.ndim == 1\n padding_config = ((0, num_to_pad, 0),)\n\n padded_action = lax.pad(a, action_padding_value, padding_config)\n padded_actions.append(padded_action)\n padded_reward = lax.pad(r, reward_padding_value, padding_config)\n padded_rewards.append(padded_reward)\n\n # Also create the mask to use later.\n reward_mask = onp.ones_like(r, dtype=np.int32)\n reward_masks.append(lax.pad(reward_mask, 0, padding_config))\n\n if i:\n for k, v in i.items():\n # Create a padding configuration for this value.\n padding_config = [(0, num_to_pad, 0)] + [(0, 0, 0)] * (v.ndim - 1)\n padded_infos[k].append(lax.pad(v, 0.0, tuple(padding_config)))\n\n # Now stack these padded_infos if they exist.\n stacked_padded_infos = None\n if padded_infos:\n stacked_padded_infos = {k: np.stack(v) for k, v in padded_infos.items()}\n\n return padded_lengths, np.stack(reward_masks), np.stack(\n padded_observations), np.stack(padded_actions), np.stack(\n padded_rewards), stacked_padded_infos\n\n\ndef rewards_to_go(rewards, mask, gamma=0.99):\n r\"\"\"Computes rewards to go.\n\n Reward to go is defined as follows, the discounted reward that we have to\n yet collect, going forward from this point, i.e.:\n\n r2g_t = \\sum_{l=0}^{\\infty} (\\gamma^{l} * reward_{t+l})\n\n Args:\n rewards: np.ndarray of shape (B, T) of rewards.\n mask: np.ndarray of shape (B, T) of mask for the rewards.\n gamma: float, discount factor.\n\n Returns:\n rewards to go, np.ndarray of shape (B, T).\n \"\"\"\n B, T = rewards.shape # pylint: disable=invalid-name,unused-variable\n\n masked_rewards = rewards * mask # (B, T)\n\n # The lax.scan version of this is slow, but we still show it here for\n # completeness.\n # rewards_rev = np.flip(masked_rewards, axis=1) # (B, T) flipped on time.\n # rrt = np.transpose(rewards_rev) # (T, B) transpose to scan over time.\n #\n # def discounting_add(carry, reward):\n # x = reward + (gamma * carry)\n # return x, x\n #\n # _, ys = lax.scan(discounting_add,\n # np.zeros_like(rrt[0], dtype=np.float32),\n # rrt.astype(np.float32))\n #\n # # ys is (T, B) and T is in reverse order.\n # return np.flip(np.transpose(ys), axis=1)\n\n # We use the following recurrence relation, derived from the equation above:\n #\n # r2g[t+1] = (r2g[t] - r[t]) / gamma\n #\n # This means we'll need to calculate r2g[0] first and then r2g[1] and so on ..\n #\n # **However** this leads to overflows for long sequences: r2g[t] - r[t] > 0\n # and gamma < 1.0, so the division keeps increasing.\n #\n # So we just run the recurrence in reverse, i.e.\n #\n # r2g[t] = r[t] + (gamma*r2g[t+1])\n #\n # This is much better, but might have lost updates since the (small) rewards\n # at earlier time-steps may get added to a (very?) large sum.\n\n # Compute r2g_{T-1} at the start and then compute backwards in time.\n r2gs = [masked_rewards[:, -1]]\n\n # Go from T-2 down to 0.\n for t in reversed(range(T - 1)):\n r2gs.append(masked_rewards[:, t] + (gamma * r2gs[-1]))\n\n # The list should have length T.\n assert T == len(r2gs)\n\n # First we stack them in the correct way to make it (B, T), but these are\n # still from newest (T-1) to oldest (0), so then we flip it on time axis.\n return np.flip(np.stack(r2gs, axis=1), axis=1)\n\n\n@jit\ndef value_loss_given_predictions(value_prediction,\n rewards,\n reward_mask,\n gamma=0.99,\n epsilon=0.2,\n value_prediction_old=None):\n \"\"\"Computes the value loss given the prediction of the value function.\n\n Args:\n value_prediction: np.ndarray of shape (B, T+1, 1)\n rewards: np.ndarray of shape (B, T) of rewards.\n reward_mask: np.ndarray of shape (B, T), the mask over rewards.\n gamma: float, discount factor.\n epsilon: float, clip-fraction, used if value_value_prediction_old isn't None\n value_prediction_old: np.ndarray of shape (B, T+1, 1) of value predictions\n using the old parameters. If provided, we incorporate this in the loss as\n well. This is from the OpenAI baselines implementation.\n\n Returns:\n The average L2 value loss, averaged over instances where reward_mask is 1.\n \"\"\"\n\n B, T = rewards.shape # pylint: disable=invalid-name\n assert (B, T) == reward_mask.shape\n assert (B, T + 1, 1) == value_prediction.shape\n\n value_prediction = np.squeeze(value_prediction, axis=2) # (B, T+1)\n value_prediction = value_prediction[:, :-1] * reward_mask # (B, T)\n r2g = rewards_to_go(rewards, reward_mask, gamma=gamma) # (B, T)\n loss = (value_prediction - r2g)**2\n\n # From the baselines implementation.\n if value_prediction_old is not None:\n value_prediction_old = np.squeeze(value_prediction_old, axis=2) # (B, T+1)\n value_prediction_old = value_prediction_old[:, :-1] * reward_mask # (B, T)\n\n v_clipped = value_prediction_old + np.clip(\n value_prediction - value_prediction_old, -epsilon, epsilon)\n v_clipped_loss = (v_clipped - r2g)**2\n loss = np.maximum(v_clipped_loss, loss)\n\n # Take an average on only the points where mask != 0.\n return np.sum(loss) / np.sum(reward_mask)\n\n\ndef deltas(predicted_values, rewards, mask, gamma=0.99):\n r\"\"\"Computes TD-residuals from V(s) and rewards.\n\n Where a `delta`, i.e. a td-residual is defined as:\n\n delta_{b,t} = r_{b,t} + \\gamma * v_{b,t+1} - v_{b,t}.\n\n Args:\n predicted_values: ndarray of shape (B, T+1). NOTE: Expects axis 2 was\n squeezed. These represent V(s_bt) for b < B and t < T+1\n rewards: ndarray of shape (B, T) of rewards.\n mask: ndarray of shape (B, T) of mask for rewards.\n gamma: float, discount factor.\n\n Returns:\n ndarray of shape (B, T) of one-step TD-residuals.\n \"\"\"\n\n # Predicted values at time t, cutting off the last to have shape (B, T).\n predicted_values_bt = predicted_values[:, :-1]\n # Predicted values at time t+1, by cutting off the first to have shape (B, T)\n predicted_values_btplus1 = predicted_values[:, 1:]\n # Return the deltas as defined above.\n return (rewards +\n (gamma * predicted_values_btplus1) - predicted_values_bt) * mask\n\n\ndef gae_advantages(td_deltas, mask, lambda_=0.95, gamma=0.99):\n r\"\"\"Computes the GAE advantages given the one step TD-residuals.\n\n The formula for a GAE advantage estimator is as follows:\n\n A_{bt} = \\sum_{l=0}^{\\infty}(\\gamma * \\lambda)^{l}(\\delta_{b,t+l}).\n\n Internally we just call rewards_to_go, since it is the same computation.\n\n Args:\n td_deltas: np.ndarray of shape (B, T) of one step TD-residuals.\n mask: np.ndarray of shape (B, T) of mask for the residuals. It maybe the\n case that the `td_deltas` are already masked correctly since they are\n produced by `deltas(...)`\n lambda_: float, lambda parameter for GAE estimators.\n gamma: float, lambda parameter for GAE estimators.\n\n Returns:\n GAE advantage estimates.\n \"\"\"\n\n return rewards_to_go(td_deltas, mask, lambda_ * gamma)\n\n\ndef chosen_probabs(probab_observations, actions):\n \"\"\"Picks out the probabilities of the actions along batch and time-steps.\n\n Args:\n probab_observations: ndarray of shape `[B, T+1, A]`, where\n probab_observations[b, t, i] contains the log-probability of action = i at\n the t^th time-step in the b^th trajectory.\n actions: ndarray of shape `[B, T]`, with each entry in [0, A) denoting which\n action was chosen in the b^th trajectory's t^th time-step.\n\n Returns:\n `[B, T]` ndarray with the log-probabilities of the chosen actions.\n \"\"\"\n B, T = actions.shape # pylint: disable=invalid-name\n assert (B, T + 1) == probab_observations.shape[:2]\n return probab_observations[np.arange(B)[:, None], np.arange(T), actions]\n\n\ndef compute_probab_ratios(p_new, p_old, actions, reward_mask):\n \"\"\"Computes the probability ratios for each time-step in a trajectory.\n\n Args:\n p_new: ndarray of shape [B, T+1, A] of the log-probabilities that the policy\n network assigns to all the actions at each time-step in each batch using\n the old parameters.\n p_old: ndarray of shape [B, T+1, A], same as above, but using old policy\n network parameters.\n actions: ndarray of shape [B, T] where each element is from [0, A).\n reward_mask: ndarray of shape [B, T] masking over probabilities.\n\n Returns:\n probab_ratios: ndarray of shape [B, T], where\n probab_ratios_{b,t} = p_new_{b,t,action_{b,t}} / p_old_{b,t,action_{b,t}}\n \"\"\"\n\n B, T = actions.shape # pylint: disable=invalid-name\n assert (B, T + 1) == p_old.shape[:2]\n assert (B, T + 1) == p_new.shape[:2]\n\n logp_old = chosen_probabs(p_old, actions)\n logp_new = chosen_probabs(p_new, actions)\n\n assert (B, T) == logp_old.shape\n assert (B, T) == logp_new.shape\n\n # Since these are log-probabilities, we just subtract them.\n probab_ratios = np.exp(logp_new - logp_old) * reward_mask\n assert (B, T) == probab_ratios.shape\n return probab_ratios\n\n\ndef clipped_probab_ratios(probab_ratios, epsilon=0.2):\n return np.clip(probab_ratios, 1 - epsilon, 1 + epsilon)\n\n\ndef clipped_objective(probab_ratios, advantages, reward_mask, epsilon=0.2):\n return np.minimum(\n probab_ratios * advantages,\n clipped_probab_ratios(probab_ratios, epsilon=epsilon) *\n advantages) * reward_mask\n\n\n@jit\ndef ppo_loss_given_predictions(log_probab_actions_new,\n log_probab_actions_old,\n value_predictions_old,\n padded_actions,\n padded_rewards,\n reward_mask,\n gamma=0.99,\n lambda_=0.95,\n epsilon=0.2):\n \"\"\"PPO objective, with an eventual minus sign, given predictions.\"\"\"\n B, T = padded_rewards.shape # pylint: disable=invalid-name\n assert (B, T) == padded_actions.shape\n assert (B, T) == reward_mask.shape\n\n _, _, A = log_probab_actions_old.shape # pylint: disable=invalid-name\n assert (B, T + 1, 1) == value_predictions_old.shape\n assert (B, T + 1, A) == log_probab_actions_old.shape\n assert (B, T + 1, A) == log_probab_actions_new.shape\n\n # (B, T)\n td_deltas = deltas(\n np.squeeze(value_predictions_old, axis=2), # (B, T+1)\n padded_rewards,\n reward_mask,\n gamma=gamma)\n\n # (B, T)\n advantages = gae_advantages(\n td_deltas, reward_mask, lambda_=lambda_, gamma=gamma)\n\n # Normalize the advantages.\n advantages = (advantages - np.mean(advantages)) / np.std(advantages)\n\n # (B, T)\n ratios = compute_probab_ratios(log_probab_actions_new, log_probab_actions_old,\n padded_actions, reward_mask)\n assert (B, T) == ratios.shape\n\n # (B, T)\n objective = clipped_objective(\n ratios, advantages, reward_mask, epsilon=epsilon)\n assert (B, T) == objective.shape\n\n # ()\n average_objective = np.sum(objective) / np.sum(reward_mask)\n\n # Loss is negative objective.\n return -average_objective\n\n\n@jit\ndef combined_loss_given_predictions(log_probab_actions_new,\n log_probab_actions_old,\n value_prediction_new,\n value_prediction_old,\n padded_actions,\n padded_rewards,\n reward_mask,\n gamma=0.99,\n lambda_=0.95,\n epsilon=0.2,\n c1=1.0,\n c2=0.01):\n \"\"\"Computes the combined (clipped loss + value loss) given predictions.\"\"\"\n loss_value = value_loss_given_predictions(\n value_prediction_new,\n padded_rewards,\n reward_mask,\n gamma=gamma,\n value_prediction_old=value_prediction_old,\n epsilon=epsilon)\n loss_ppo = ppo_loss_given_predictions(\n log_probab_actions_new,\n log_probab_actions_old,\n value_prediction_old,\n padded_actions,\n padded_rewards,\n reward_mask,\n gamma=gamma,\n lambda_=lambda_,\n epsilon=epsilon)\n entropy_bonus = masked_entropy(log_probab_actions_new, reward_mask)\n return (loss_ppo + (c1 * loss_value) - (c2 * entropy_bonus), loss_ppo,\n loss_value, entropy_bonus)\n\n\[email protected](jit, static_argnums=(3,))\ndef combined_loss(new_params,\n log_probab_actions_old,\n value_predictions_old,\n policy_and_value_net_apply,\n padded_observations,\n padded_actions,\n padded_rewards,\n reward_mask,\n gamma=0.99,\n lambda_=0.95,\n epsilon=0.2,\n c1=1.0,\n c2=0.01,\n rng=None):\n \"\"\"Computes the combined (clipped loss + value loss) given observations.\"\"\"\n log_probab_actions_new, value_predictions_new = policy_and_value_net_apply(\n padded_observations, new_params, rng=rng)\n\n # (combined_loss, ppo_loss, value_loss, entropy_bonus)\n return combined_loss_given_predictions(\n log_probab_actions_new,\n log_probab_actions_old,\n value_predictions_new,\n value_predictions_old,\n padded_actions,\n padded_rewards,\n reward_mask,\n gamma=gamma,\n lambda_=lambda_,\n epsilon=epsilon,\n c1=c1,\n c2=c2)\n\n\[email protected](jit, static_argnums=(2, 3, 4))\ndef policy_and_value_opt_step(i,\n opt_state,\n opt_update,\n get_params,\n policy_and_value_net_apply,\n log_probab_actions_old,\n value_predictions_old,\n padded_observations,\n padded_actions,\n padded_rewards,\n reward_mask,\n c1=1.0,\n c2=0.01,\n gamma=0.99,\n lambda_=0.95,\n epsilon=0.1,\n rng=None):\n \"\"\"Policy and Value optimizer step.\"\"\"\n\n # Combined loss function given the new params.\n def policy_and_value_loss(params):\n \"\"\"Returns the combined loss given just parameters.\"\"\"\n (loss, _, _, _) = combined_loss(\n params,\n log_probab_actions_old,\n value_predictions_old,\n policy_and_value_net_apply,\n padded_observations,\n padded_actions,\n padded_rewards,\n reward_mask,\n c1=c1,\n c2=c2,\n gamma=gamma,\n lambda_=lambda_,\n epsilon=epsilon,\n rng=rng)\n return loss\n\n new_params = get_params(opt_state)\n g = grad(policy_and_value_loss)(new_params)\n # TODO(afrozm): Maybe clip gradients?\n return opt_update(i, g, opt_state)\n\n\ndef get_time(t1, t2=None):\n if t2 is None:\n t2 = time.time()\n return round((t2 - t1) * 1000, 2)\n\n\ndef approximate_kl(log_prob_new, log_prob_old, mask):\n \"\"\"Computes the approximate KL divergence between the old and new log-probs.\n\n Args:\n log_prob_new: (B, T+1, A) log probs new\n log_prob_old: (B, T+1, A) log probs old\n mask: (B, T)\n\n Returns:\n Approximate KL.\n \"\"\"\n diff = log_prob_old - log_prob_new\n # Cut the last time-step out.\n diff = diff[:, :-1]\n # Mask out the irrelevant part.\n diff *= mask[:, :, np.newaxis] # make mask (B, T, 1)\n # Average on non-masked part.\n return np.sum(diff) / np.sum(mask)\n\n\ndef masked_entropy(log_probs, mask):\n \"\"\"Computes the entropy for the given log-probs.\n\n Args:\n log_probs: (B, T+1, A) log probs\n mask: (B, T) mask.\n\n Returns:\n Entropy.\n \"\"\"\n # Cut the last time-step out.\n lp = log_probs[:, :-1]\n # Mask out the irrelevant part.\n lp *= mask[:, :, np.newaxis] # make mask (B, T, 1)\n p = np.exp(lp) * mask[:, :, np.newaxis] # (B, T, 1)\n # Average on non-masked part and take negative.\n return -(np.sum(lp * p) / np.sum(mask))\n\n\ndef evaluate_policy(eval_env,\n get_predictions,\n temperatures,\n max_timestep=20000,\n n_evals=1,\n len_history_for_policy=32,\n rng=None):\n \"\"\"Evaluate the policy.\"\"\"\n\n processed_reward_sums = collections.defaultdict(list)\n raw_reward_sums = collections.defaultdict(list)\n for eval_rng in jax_random.split(rng, num=n_evals):\n for temperature in temperatures:\n trajs, _, _ = env_problem_utils.play_env_problem_with_policy(\n eval_env,\n get_predictions,\n num_trajectories=eval_env.batch_size,\n max_timestep=max_timestep,\n reset=True,\n policy_sampling=env_problem_utils.GUMBEL_SAMPLING,\n temperature=temperature,\n rng=eval_rng,\n len_history_for_policy=len_history_for_policy)\n processed_reward_sums[temperature].extend(sum(traj[2]) for traj in trajs)\n raw_reward_sums[temperature].extend(sum(traj[3]) for traj in trajs)\n\n # Return the mean and standard deviation for each temperature.\n def compute_stats(reward_dict):\n return {\n temperature: {\"mean\": onp.mean(rewards), \"std\": onp.std(rewards)}\n for (temperature, rewards) in reward_dict.items()\n }\n return {\n \"processed\": compute_stats(processed_reward_sums),\n \"raw\": compute_stats(raw_reward_sums),\n }\n\n\ndef maybe_restore_params(output_dir, policy_and_value_net_params):\n \"\"\"Maybe restore the params from the checkpoint dir.\n\n Args:\n output_dir: Directory where saved model checkpoints are stored.\n policy_and_value_net_params: Default params, returned if model is'nt found.\n\n Returns:\n triple (restore (bool), params, iter(int)) where iter is the epoch from\n which we restored the params, 0 is restore = False.\n \"\"\"\n model_files = gfile.glob(os.path.join(output_dir, \"model-??????.pkl\"))\n for model_file in reversed(sorted(model_files)):\n logging.info(\"Trying to restore model from %s\", model_file)\n try:\n with gfile.GFile(model_file, \"rb\") as f:\n loaded_policy_and_value_net_params = pickle.load(f)\n policy_and_value_net_params = loaded_policy_and_value_net_params\n model_file_basename = os.path.basename(model_file) # model-??????.pkl\n i = int(filter(str.isdigit, model_file_basename))\n return True, policy_and_value_net_params, i\n except EOFError as e:\n logging.error(\"Unable to load model from: %s with %s\", model_file, e)\n # Try an older version.\n continue\n return False, policy_and_value_net_params, 0\n\n\ndef write_eval_reward_summaries(reward_stats_by_mode, summary_writer, epoch):\n \"\"\"Writes evaluation reward statistics to summary and logs them.\n\n Args:\n reward_stats_by_mode: Nested dict of structure:\n {\n \"raw\": {\n <temperature 1>: {\n \"mean\": <reward mean>,\n \"std\": <reward std>,\n },\n <temperature 2>: ...\n },\n \"processed\": ...\n }\n summary_writer: jaxboard.SummaryWriter.\n epoch: Current epoch number.\n \"\"\"\n for (reward_mode, reward_stats_by_temp) in reward_stats_by_mode.items():\n for (temperature, reward_stats) in reward_stats_by_temp.items():\n for (stat_name, stat) in reward_stats.items():\n summary_writer.scalar(\n \"eval/{reward_mode}_reward_{stat_name}/\"\n \"temperature_{temperature}\".format(reward_mode=reward_mode,\n stat_name=stat_name,\n temperature=temperature),\n stat, step=epoch)\n logging.info(\"Epoch [% 6d] Policy Evaluation (%s reward) \"\n \"[temperature %.2f] = %10.2f (+/- %.2f)\",\n epoch, reward_mode, temperature,\n reward_stats[\"mean\"], reward_stats[\"std\"])\n\n\[email protected](blacklist=[\"output_dir\"])\ndef training_loop(\n env,\n eval_env,\n env_name,\n policy_and_value_net_fn,\n policy_and_value_optimizer_fn,\n output_dir,\n epochs=EPOCHS,\n n_optimizer_steps=N_OPTIMIZER_STEPS,\n print_every_optimizer_steps=PRINT_EVERY_OPTIMIZER_STEP,\n target_kl=0.01,\n boundary=20,\n max_timestep=None,\n max_timestep_eval=20000,\n random_seed=None,\n gamma=GAMMA,\n lambda_=LAMBDA,\n epsilon=EPSILON,\n c1=1.0,\n c2=0.01,\n eval_every_n=1000,\n done_frac_for_policy_save=0.5,\n enable_early_stopping=True,\n n_evals=1,\n len_history_for_policy=4,\n eval_temperatures=(1.0, 0.5),\n):\n \"\"\"Runs the training loop for PPO, with fixed policy and value nets.\n\n Args:\n env: gym.Env to use for training.\n eval_env: gym.Env to use for evaluation.\n env_name: Name of the environment.\n policy_and_value_net_fn: Function defining the policy and value network.\n policy_and_value_optimizer_fn: Function defining the optimizer.\n output_dir: Output dir.\n epochs: Number of epochs to run for.\n n_optimizer_steps: Number of optimizer steps.\n print_every_optimizer_steps: How often to log during the policy optimization\n process.\n target_kl: Policy iteration early stopping.\n boundary: We pad trajectories at integer multiples of this number.\n max_timestep: If set to an integer, maximum number of time-steps in\n a trajectory. Used in the collect procedure.\n max_timestep_eval: If set to an integer, maximum number of time-steps in an\n evaluation trajectory. Used in the collect procedure.\n random_seed: Random seed.\n gamma: Reward discount factor.\n lambda_: N-step TD-error discount factor in GAE.\n epsilon: Random action probability in epsilon-greedy sampling.\n c1: Value loss coefficient.\n c2: Entropy loss coefficient.\n eval_every_n: How frequently to eval the policy.\n done_frac_for_policy_save: Fraction of the trajectories that should be done\n to checkpoint the policy.\n enable_early_stopping: Whether to enable early stopping.\n n_evals: Number of times to evaluate.\n len_history_for_policy: How much of history to give to the policy.\n eval_temperatures: Sequence of temperatures to try for categorical sampling\n during evaluation.\n \"\"\"\n gfile.makedirs(output_dir)\n\n # Create summary writers and history.\n train_sw = jaxboard.SummaryWriter(os.path.join(output_dir, \"train\"))\n timing_sw = jaxboard.SummaryWriter(os.path.join(output_dir, \"timing\"))\n eval_sw = jaxboard.SummaryWriter(os.path.join(output_dir, \"eval\"))\n\n train_sw.text(\"env_name\", env_name)\n timing_sw.text(\"env_name\", env_name)\n eval_sw.text(\"env_name\", env_name)\n\n jax_rng_key = trax.get_random_number_generator_and_set_seed(random_seed)\n\n # Batch Observations Shape = [1, 1] + OBS, because we will eventually call\n # policy and value networks on shape [B, T] +_OBS\n batch_observations_shape = (1, 1) + env.observation_space.shape\n observations_dtype = env.observation_space.dtype\n\n assert isinstance(env.action_space, gym.spaces.Discrete)\n n_actions = env.action_space.n\n\n jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2)\n\n # Initialize the policy and value network.\n policy_and_value_net_params, policy_and_value_net_apply = (\n policy_and_value_net_fn(key1, batch_observations_shape,\n observations_dtype, n_actions))\n\n # Maybe restore the policy params. If there is nothing to restore, then\n # iteration = 0 and policy_and_value_net_params are returned as is.\n restore, policy_and_value_net_params, iteration = (\n maybe_restore_params(output_dir, policy_and_value_net_params))\n\n if restore:\n logging.info(\"Restored parameters from iteration [%d]\", iteration)\n # We should start from the next iteration.\n iteration += 1\n\n policy_and_value_net_apply = jit(policy_and_value_net_apply)\n\n # Initialize the optimizers.\n policy_and_value_optimizer = (\n policy_and_value_optimizer_fn(policy_and_value_net_params))\n (policy_and_value_opt_state, policy_and_value_opt_update,\n policy_and_value_get_params) = policy_and_value_optimizer\n\n n_trajectories_done = 0\n last_saved_at = 0\n\n logging.info(\"Starting the PPO training loop.\")\n for i in range(iteration, epochs):\n epoch_start_time = time.time()\n\n # Params we'll use to collect the trajectories.\n policy_and_value_net_params = policy_and_value_get_params(\n policy_and_value_opt_state)\n\n # A function to get the policy and value predictions.\n def get_predictions(observations, rng=None):\n \"\"\"Returns log-probs, value predictions and key back.\"\"\"\n key, key1 = jax_random.split(rng, num=2)\n\n log_probs, value_preds = policy_and_value_net_apply(\n observations, policy_and_value_net_params, rng=key1)\n\n return log_probs, value_preds, key\n\n # Evaluate the policy.\n policy_eval_start_time = time.time()\n if ((i + 1) % eval_every_n == 0) or (i == epochs - 1):\n jax_rng_key, key = jax_random.split(jax_rng_key, num=2)\n\n logging.vlog(1, \"Epoch [% 6d] evaluating policy.\", i)\n\n reward_stats = evaluate_policy(\n eval_env,\n get_predictions,\n temperatures=eval_temperatures,\n max_timestep=max_timestep_eval,\n n_evals=n_evals,\n len_history_for_policy=len_history_for_policy,\n rng=key)\n write_eval_reward_summaries(reward_stats, eval_sw, epoch=i)\n policy_eval_time = get_time(policy_eval_start_time)\n\n trajectory_collection_start_time = time.time()\n logging.vlog(1, \"Epoch [% 6d] collecting trajectories.\", i)\n jax_rng_key, key = jax_random.split(jax_rng_key)\n trajs, n_done, timing_info = collect_trajectories(\n env,\n policy_fn=get_predictions,\n n_trajectories=env.batch_size,\n max_timestep=max_timestep,\n rng=key,\n len_history_for_policy=len_history_for_policy,\n reset=(i == 0) or restore,\n epsilon=(10.0 / (i + 10.0))) # this is a different epsilon.\n trajectory_collection_time = get_time(trajectory_collection_start_time)\n\n logging.vlog(1, \"Collecting trajectories took %0.2f msec.\",\n trajectory_collection_time)\n\n avg_reward = float(sum(np.sum(traj[2]) for traj in trajs)) / len(trajs)\n max_reward = max(np.sum(traj[2]) for traj in trajs)\n min_reward = min(np.sum(traj[2]) for traj in trajs)\n\n train_sw.scalar(\"train/reward_mean_truncated\", avg_reward, step=i)\n\n logging.vlog(1, \"Rewards avg=[%0.2f], max=[%0.2f], min=[%0.2f], all=%s\",\n avg_reward, max_reward, min_reward,\n [float(np.sum(traj[2])) for traj in trajs])\n\n logging.vlog(1,\n \"Trajectory Length average=[%0.2f], max=[%0.2f], min=[%0.2f]\",\n float(sum(len(traj[0]) for traj in trajs)) / len(trajs),\n max(len(traj[0]) for traj in trajs),\n min(len(traj[0]) for traj in trajs))\n logging.vlog(2, \"Trajectory Lengths: %s\", [len(traj[0]) for traj in trajs])\n\n padding_start_time = time.time()\n (_, reward_mask, padded_observations, padded_actions,\n padded_rewards, padded_infos) = pad_trajectories(\n trajs, boundary=boundary)\n padding_time = get_time(padding_start_time)\n\n logging.vlog(1, \"Padding trajectories took %0.2f msec.\",\n get_time(padding_start_time))\n logging.vlog(1, \"Padded Observations' shape [%s]\",\n str(padded_observations.shape))\n logging.vlog(1, \"Padded Actions' shape [%s]\", str(padded_actions.shape))\n logging.vlog(1, \"Padded Rewards' shape [%s]\", str(padded_rewards.shape))\n\n # Some assertions.\n B, T = padded_actions.shape # pylint: disable=invalid-name\n assert (B, T) == padded_rewards.shape\n assert (B, T) == reward_mask.shape\n assert (B, T + 1) == padded_observations.shape[:2]\n assert (B, T + 1) + env.observation_space.shape == padded_observations.shape\n\n log_prob_recompute_start_time = time.time()\n assert (\"log_prob_actions\" in padded_infos and\n \"value_predictions\" in padded_infos)\n # These are the actual log-probabs and value predictions seen while picking\n # the actions.\n actual_log_probabs_traj = padded_infos[\"log_prob_actions\"]\n actual_value_predictions_traj = padded_infos[\"value_predictions\"]\n\n assert (B, T) == actual_log_probabs_traj.shape[:2]\n A = actual_log_probabs_traj.shape[2] # pylint: disable=invalid-name\n assert (B, T, 1) == actual_value_predictions_traj.shape\n\n # TODO(afrozm): log-probabs doesn't need to be (B, T+1, A) it can do with\n # (B, T, A), so make that change throughout.\n\n # NOTE: We don't have the log-probabs and value-predictions for the last\n # observation, so we re-calculate for everything, but use the original ones\n # for all but the last time-step.\n jax_rng_key, key = jax_random.split(jax_rng_key)\n log_probabs_traj, value_predictions_traj, _ = get_predictions(\n padded_observations, rng=key)\n\n assert (B, T + 1, A) == log_probabs_traj.shape\n assert (B, T + 1, 1) == value_predictions_traj.shape\n\n # Concatenate the last time-step's log-probabs and value predictions to the\n # actual log-probabs and value predictions and use those going forward.\n log_probabs_traj = np.concatenate(\n (actual_log_probabs_traj, log_probabs_traj[:, -1:, :]), axis=1)\n value_predictions_traj = np.concatenate(\n (actual_value_predictions_traj, value_predictions_traj[:, -1:, :]),\n axis=1)\n\n log_prob_recompute_time = get_time(log_prob_recompute_start_time)\n\n # Linear annealing from 0.1 to 0.0\n # epsilon_schedule = epsilon if epochs == 1 else epsilon * (1.0 -\n # (i /\n # (epochs - 1)))\n\n # Constant epsilon.\n epsilon_schedule = epsilon\n\n # Compute value and ppo losses.\n jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2)\n logging.vlog(2, \"Starting to compute P&V loss.\")\n loss_compute_start_time = time.time()\n cur_combined_loss, cur_ppo_loss, cur_value_loss, entropy_bonus = (\n combined_loss(\n policy_and_value_net_params,\n log_probabs_traj,\n value_predictions_traj,\n policy_and_value_net_apply,\n padded_observations,\n padded_actions,\n padded_rewards,\n reward_mask,\n gamma=gamma,\n lambda_=lambda_,\n epsilon=epsilon_schedule,\n c1=c1,\n c2=c2,\n rng=key1))\n loss_compute_time = get_time(loss_compute_start_time)\n logging.vlog(\n 1,\n \"Calculating P&V loss [%10.2f(%10.2f, %10.2f, %10.2f)] took %0.2f msec.\",\n cur_combined_loss, cur_value_loss, cur_ppo_loss, entropy_bonus,\n get_time(loss_compute_start_time))\n\n jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2)\n logging.vlog(1, \"Policy and Value Optimization\")\n optimization_start_time = time.time()\n keys = jax_random.split(key1, num=n_optimizer_steps)\n for j in range(n_optimizer_steps):\n k1, k2, k3 = jax_random.split(keys[j], num=3)\n t = time.time()\n # Update the optimizer state.\n policy_and_value_opt_state = policy_and_value_opt_step(\n j,\n policy_and_value_opt_state,\n policy_and_value_opt_update,\n policy_and_value_get_params,\n policy_and_value_net_apply,\n log_probabs_traj,\n value_predictions_traj,\n padded_observations,\n padded_actions,\n padded_rewards,\n reward_mask,\n c1=c1,\n c2=c2,\n gamma=gamma,\n lambda_=lambda_,\n epsilon=epsilon_schedule,\n rng=k1)\n\n # Compute the approx KL for early stopping.\n new_policy_and_value_net_params = policy_and_value_get_params(\n policy_and_value_opt_state)\n\n log_probab_actions_new, _ = policy_and_value_net_apply(\n padded_observations, new_policy_and_value_net_params, rng=k2)\n\n approx_kl = approximate_kl(log_probab_actions_new, log_probabs_traj,\n reward_mask)\n\n early_stopping = enable_early_stopping and approx_kl > 1.5 * target_kl\n if early_stopping:\n logging.vlog(\n 1, \"Early stopping policy and value optimization at iter: %d, \"\n \"with approx_kl: %0.2f\", j, approx_kl)\n # We don't return right-away, we want the below to execute on the last\n # iteration.\n\n t2 = time.time()\n if (((j + 1) % print_every_optimizer_steps == 0) or\n (j == n_optimizer_steps - 1) or early_stopping):\n # Compute and log the loss.\n (loss_combined, loss_ppo, loss_value, entropy_bonus) = (\n combined_loss(\n new_policy_and_value_net_params,\n log_probabs_traj,\n value_predictions_traj,\n policy_and_value_net_apply,\n padded_observations,\n padded_actions,\n padded_rewards,\n reward_mask,\n gamma=gamma,\n lambda_=lambda_,\n epsilon=epsilon_schedule,\n c1=c1,\n c2=c2,\n rng=k3))\n logging.vlog(1, \"One Policy and Value grad desc took: %0.2f msec\",\n get_time(t, t2))\n logging.vlog(\n 1, \"Combined Loss(value, ppo, entropy_bonus) [%10.2f] ->\"\n \" [%10.2f(%10.2f,%10.2f,%10.2f)]\", cur_combined_loss, loss_combined,\n loss_value, loss_ppo, entropy_bonus)\n\n if early_stopping:\n break\n\n optimization_time = get_time(optimization_start_time)\n\n logging.vlog(\n 1, \"Total Combined Loss reduction [%0.2f]%%\",\n (100 * (cur_combined_loss - loss_combined) / np.abs(cur_combined_loss)))\n\n # Save parameters every time we see the end of at least a fraction of batch\n # number of trajectories that are done (not completed -- completed includes\n # truncated and done).\n # Also don't save too frequently, enforce a minimum gap.\n # Or if this is the last iteration.\n policy_save_start_time = time.time()\n n_trajectories_done += n_done\n # TODO(afrozm): Refactor to trax.save_state.\n if (((n_trajectories_done >= done_frac_for_policy_save * env.batch_size) and\n (i - last_saved_at > eval_every_n) and\n (((i + 1) % eval_every_n == 0))) or (i == epochs - 1)):\n logging.vlog(1, \"Epoch [% 6d] saving model.\", i)\n old_model_files = gfile.glob(os.path.join(output_dir, \"model-??????.pkl\"))\n params_file = os.path.join(output_dir, \"model-%06d.pkl\" % i)\n with gfile.GFile(params_file, \"wb\") as f:\n pickle.dump(policy_and_value_net_params, f)\n # Remove the old model files.\n for path in old_model_files:\n gfile.remove(path)\n # Reset this number.\n n_trajectories_done = 0\n last_saved_at = i\n policy_save_time = get_time(policy_save_start_time)\n\n epoch_time = get_time(epoch_start_time)\n\n logging.info(\n \"Epoch [% 6d], Reward[min, max, avg] [%5.2f,%5.2f,%5.2f], Combined\"\n \" Loss(value, ppo, entropy) [%2.5f(%2.5f,%2.5f,%2.5f)]\", i, min_reward,\n max_reward, avg_reward, loss_combined, loss_value, loss_ppo,\n entropy_bonus)\n\n timing_dict = {\n \"epoch\": epoch_time,\n \"policy_eval\": policy_eval_time,\n \"trajectory_collection\": trajectory_collection_time,\n \"padding\": padding_time,\n \"log_prob_recompute\": log_prob_recompute_time,\n \"loss_compute\": loss_compute_time,\n \"optimization\": optimization_time,\n \"policy_save\": policy_save_time,\n }\n\n timing_dict.update(timing_info)\n\n for k, v in timing_dict.items():\n timing_sw.scalar(\"timing/%s\" % k, v, step=i)\n\n max_key_len = max(len(k) for k in timing_dict)\n timing_info_list = [\n \"%s : % 10.2f\" % (k.rjust(max_key_len + 1), v)\n for k, v in sorted(timing_dict.items())\n ]\n logging.info(\"Epoch [% 6d], Timings: \\n%s\", i, \"\\n\".join(timing_info_list))\n\n # Reset restore.\n restore = False\n\n # Flush summary writers once in a while.\n if (i + 1) % 1000 == 0 or i == epochs - 1:\n train_sw.flush()\n timing_sw.flush()\n eval_sw.flush()\n", "import keras\nimport numpy as np\nfrom data.vocab import TextEncoder\n\n\ndef _get_pos_encoding_matrix(max_len: int, d_emb: int) -> np.array:\n pos_enc = np.array(\n [[pos / np.power(10000, 2 * (j // 2) / d_emb) for j in range(d_emb)] if pos != 0 else np.zeros(d_emb) for pos in\n range(max_len)], dtype=np.float32)\n pos_enc[1:, 0::2] = np.sin(pos_enc[1:, 0::2]) # dim 2i\n pos_enc[1:, 1::2] = np.cos(pos_enc[1:, 1::2]) # dim 2i+1\n return pos_enc\n\n\n# NOTE that for vocab_size you should also add special_count\nclass Embedding(keras.layers.Layer):\n def __init__(self, output_dim: int = 768, dropout: float = 0.1, vocab_size: int = 30000 + TextEncoder.SPECIAL_COUNT,\n max_len: int = 512, trainable_pos_embedding: bool = True,\n use_one_dropout: bool = False, **kwargs):\n super().__init__(**kwargs)\n self.max_len = max_len\n self.use_one_dropout = use_one_dropout\n self.output_dim = output_dim\n self.dropout = dropout\n self.vocab_size = vocab_size\n self.trainable_pos_embedding = trainable_pos_embedding\n\n self.segment_emb = keras.layers.Embedding(TextEncoder.NUM_SEGMENTS, output_dim, input_length=max_len,\n name='SegmentEmbedding')\n if not trainable_pos_embedding:\n self.pos_emb = keras.layers.Embedding(max_len, output_dim, trainable=False, input_length=max_len,\n name='PositionEmbedding',\n weights=[_get_pos_encoding_matrix(max_len, output_dim)])\n else:\n self.pos_emb = keras.layers.Embedding(max_len, output_dim, input_length=max_len, name='PositionEmbedding')\n self.token_emb = keras.layers.Embedding(vocab_size, output_dim, input_length=max_len, name='TokenEmbedding')\n self.embedding_dropout = keras.layers.Dropout(dropout, name='EmbeddingDropOut')\n self.add_embeddings = keras.layers.Add(name='AddEmbeddings')\n\n def compute_output_shape(self, input_shape):\n return input_shape[0][0], input_shape[0][1], self.output_dim\n\n def get_config(self):\n config = {\n 'max_len': self.max_len,\n 'use_one_dropout': self.use_one_dropout,\n 'output_dim': self.output_dim,\n 'dropout': self.dropout,\n 'vocab_size': self.vocab_size,\n 'trainable_pos_embedding': self.trainable_pos_embedding,\n }\n base_config = super().get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def __call__(self, inputs, **kwargs):\n tokens, segment_ids, pos_ids = inputs\n segment_embedding = self.segment_emb(segment_ids)\n pos_embedding = self.pos_emb(pos_ids)\n token_embedding = self.token_emb(tokens)\n if self.use_one_dropout:\n return self.embedding_dropout(self.add_embeddings([segment_embedding, pos_embedding, token_embedding]))\n return self.add_embeddings([self.embedding_dropout(segment_embedding), self.embedding_dropout(pos_embedding),\n self.embedding_dropout(token_embedding)])\n" ]
[ [ "tensorflow.io.gfile.makedirs", "tensorflow.io.gfile.GFile", "tensorflow.io.gfile.remove", "numpy.ones_like", "numpy.std", "numpy.mean" ], [ "numpy.sin", "numpy.power", "numpy.cos", "numpy.zeros" ] ]
viitormiiguel/AnalysisFinancial
[ "21d19c4eb200655ffd8605d4c38ab280a4552384" ]
[ "Results/ResultUniLex.py" ]
[ "import nltk\r\nimport csv\r\nimport datetime\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nnow = datetime.datetime.now()\r\ntoday = now.strftime(\"%Y-%m-%d\")\r\n\r\ndInfoMoney = 'C:/Users/vitor/Documents/GetDataset/Infomoney/'\r\ndInvesting = 'C:/Users/vitor/Documents/GetDataset/Investing.com/'\r\ndTrading = 'C:/Users/vitor/Documents/GetDataset/TradingView/'\r\n\r\n# Resultados Investing.com\r\nr_investing = open(dInvesting + today +'/polarityUniLexPre.csv', 'r', encoding='utf8')\r\n# r_investing = open(dInvesting + today +'/polarityUniLexNo.csv', 'r', encoding='utf8')\r\nposInv = 0 \r\nneuInv = 0 \r\nnegInv = 0\r\nfor t in r_investing.readlines():\r\n if 'Positivo' in t:\r\n posInv += 1\r\n if 'Neutro' in t:\r\n neuInv += 1\r\n if 'Negativo' in t:\r\n negInv += 1\r\nprint('Investing Pos ', posInv)\r\nprint('Investing Neu ', neuInv)\r\nprint('Investing Neg ', negInv)\r\n\r\n# Resultados InfoMoney\r\nr_infomoney = open(dInfoMoney + today +'/polarityUniLexPre.csv', 'r', encoding='utf8')\r\n# r_infomoney = open(dInfoMoney + today +'/polarityUniLexNo.csv', 'r', encoding='utf8')\r\nposInf = 0\r\nneuInf = 0 \r\nnegInf = 0\r\nfor t in r_infomoney.readlines():\r\n if 'Positivo' in t:\r\n posInf += 1\r\n if 'Neutro' in t:\r\n neuInf += 1\r\n if 'Negativo' in t:\r\n negInf += 1\r\nprint('InfoMoney Pos ', posInf)\r\nprint('InfoMoney Neu ', neuInf)\r\nprint('InfoMoney Neg ', negInf)\r\n\r\n# Resultados TradingView\r\nr_tradingview = open(dTrading + today +'/polarityUniLexPre.csv', 'r', encoding='utf8')\r\n# r_tradingview = open(dTrading + today +'/polarityUniLexNo.csv', 'r', encoding='utf8')\r\nposTrd = 0\r\nneuTrd = 0 \r\nnegTrd = 0\r\nfor t in r_tradingview.readlines():\r\n if 'Positivo' in t:\r\n posTrd += 1\r\n if 'Neutro' in t:\r\n neuTrd += 1\r\n if 'Negativo' in t:\r\n negTrd += 1\r\nprint('TradingView Pos ', posTrd)\r\nprint('TradingView Neu ', neuTrd)\r\nprint('TradingView Neg ', negTrd)\r\n\r\nraw_data = {'Fonte de Dados': ['Investing.com', 'InfoMoney', 'TradingView'],\r\n 'Pos': [posInv, posInf, posTrd],\r\n 'Neu': [neuInv, neuInf, neuTrd],\r\n 'Neg': [negInv, negInf, negTrd]}\r\ndf = pd.DataFrame(raw_data, columns = ['Fonte de Dados', 'Pos', 'Neu', 'Neg'])\r\ndf\r\n\r\n# Setting the positions and width for the bars\r\npos = list(range(len(df['Pos']))) \r\nwidth = 0.25 \r\nfig, ax = plt.subplots(figsize=(10,5))\r\n\r\n# Create a bar with pre_score data, # in position pos,\r\nplt.bar(pos, df['Pos'], width, alpha=0.5, color='#EE3224', label=df['Fonte de Dados'][0]) \r\n\r\n# Create a bar with mid_score data, # in position pos + some width buffer,\r\nplt.bar([p + width for p in pos], df['Neu'], width, alpha=0.5, color='#F78F1E', label=df['Fonte de Dados'][1]) \r\n\r\n# Create a bar with post_score data, # in position pos + some width buffer,\r\nplt.bar([p + width*2 for p in pos], df['Neg'], width, alpha=0.5, color='#FFC222', label=df['Fonte de Dados'][2]) \r\n\r\nax.set_title(\"OpLexicon sem Pré-Processamento\")\r\nax.set_ylabel('N° de Textos')\r\nax.set_xticks([p + 1 * width for p in pos])\r\nax.set_xticklabels(df['Fonte de Dados'])\r\n\r\nplt.xlim(min(pos)-width, max(pos)+width*4)\r\nplt.ylim([0, max(df['Pos'] + df['Neu'] + df['Neg'])] )\r\n\r\nplt.legend(['Positivo', 'Neutro', 'Negativo'], loc='upper left')\r\nplt.grid()\r\nplt.show()" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.grid", "pandas.DataFrame", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "matplotlib.pyplot.bar" ] ]
gruber-sciencelab/MAPP
[ "81563f676b284c5b283a193a698ce618c044d3b5" ]
[ "modules/REPORT_RESULTS/scripts/select-motifs.py" ]
[ "\"\"\"\n##############################################################################\n#\n# Select top N distinct motifs with highest (statistically significant)\n# activity Z-score (for every site separately)\n#\n# AUTHOR: Maciej_Bak\n# AFFILIATION: University_of_Basel\n# AFFILIATION: Swiss_Institute_of_Bioinformatics\n# CONTACT: [email protected]\n# CREATED: 04-06-2020\n# LICENSE: Apache_2.0\n#\n##############################################################################\n\"\"\"\n\n# imports\nimport time\nimport logging\nimport logging.handlers\nfrom argparse import ArgumentParser, RawTextHelpFormatter\nimport os\nimport pandas as pd\n\n\ndef parse_arguments():\n \"\"\"Parser of the command-line arguments.\"\"\"\n parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)\n parser.add_argument(\n \"-v\",\n \"--verbosity\",\n dest=\"verbosity\",\n choices=(\"DEBUG\", \"INFO\", \"WARN\", \"ERROR\", \"CRITICAL\"),\n default=\"ERROR\",\n help=\"Verbosity/Log level. Defaults to ERROR\",\n )\n parser.add_argument(\n \"-l\", \"--logfile\", dest=\"logfile\", help=\"Store log to this file.\"\n )\n parser.add_argument(\n \"--topN-motifs\",\n dest=\"N\",\n default=1000000, # by default: effectively select all stat. sign. motifs\n required=False,\n help=\"Number of top motifs to select.\",\n )\n parser.add_argument(\n \"--infile-splicing-3ss\",\n dest=\"results_3ss\",\n required=True,\n help=\"Annotated results table (3ss).\",\n )\n parser.add_argument(\n \"--infile-splicing-5ss\",\n dest=\"results_5ss\",\n required=True,\n help=\"Annotated results table (5ss).\",\n )\n parser.add_argument(\n \"--infile-polyadenylation-pas\",\n dest=\"results_pas\",\n required=True,\n help=\"Annotated results table (pas).\",\n )\n parser.add_argument(\n \"--outfile-splicing-3ss-motifs\",\n dest=\"motifs_3ss\",\n required=True,\n help=\"Path for the text file with top motifs (3ss).\",\n )\n parser.add_argument(\n \"--outfile-splicing-5ss-motifs\",\n dest=\"motifs_5ss\",\n required=True,\n help=\"Path for the text file with top motifs (5ss).\",\n )\n parser.add_argument(\n \"--outfile-polyadenylation-pas-motifs\",\n dest=\"motifs_pas\",\n required=True,\n help=\"Path for the text file with top motifs (pas).\",\n )\n return parser\n\n\n##############################################################################\n\n\ndef main():\n \"\"\"Main body of the script.\"\"\"\n\n df = pd.read_csv(options.results_3ss, sep=\"\\t\", index_col=0)\n df = df[df[\"significance-marker\"]]\n motifs = []\n for ID, row in df.iterrows():\n if len(motifs) == int(options.N):\n break\n m = ID.split(\"|\")[-1]\n if m not in motifs:\n motifs.append(m)\n with open(options.motifs_3ss, \"w\") as f:\n for m in motifs:\n f.write(m + os.linesep)\n\n df = pd.read_csv(options.results_5ss, sep=\"\\t\", index_col=0)\n df = df[df[\"significance-marker\"]]\n motifs = []\n for ID, row in df.iterrows():\n if len(motifs) == int(options.N):\n break\n m = ID.split(\"|\")[-1]\n if m not in motifs:\n motifs.append(m)\n with open(options.motifs_5ss, \"w\") as f:\n for m in motifs:\n f.write(m + os.linesep)\n\n df = pd.read_csv(options.results_pas, sep=\"\\t\", index_col=0)\n df = df[df[\"significance-marker\"]]\n motifs = []\n for ID, row in df.iterrows():\n if len(motifs) == int(options.N):\n break\n m = ID.split(\"|\")[-1]\n if m not in motifs:\n motifs.append(m)\n with open(options.motifs_pas, \"w\") as f:\n for m in motifs:\n f.write(m + os.linesep)\n\n\n##############################################################################\n\nif __name__ == \"__main__\":\n\n try:\n # parse the command-line arguments\n options = parse_arguments().parse_args()\n\n # set up logging during the execution\n formatter = logging.Formatter(\n fmt=\"[%(asctime)s] %(levelname)s - %(message)s\",\n datefmt=\"%d-%b-%Y %H:%M:%S\",\n )\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(formatter)\n logger = logging.getLogger(\"logger\")\n logger.setLevel(logging.getLevelName(options.verbosity))\n logger.addHandler(console_handler)\n if options.logfile is not None:\n logfile_handler = logging.handlers.RotatingFileHandler(\n options.logfile, maxBytes=50000, backupCount=2\n )\n logfile_handler.setFormatter(formatter)\n logger.addHandler(logfile_handler)\n\n # execute the body of the script\n start_time = time.time()\n logger.info(\"Starting script\")\n main()\n seconds = time.time() - start_time\n\n # log the execution time\n minutes, seconds = divmod(seconds, 60)\n hours, minutes = divmod(minutes, 60)\n logger.info(\n \"Successfully finished in {hours}h:{minutes}m:{seconds}s\",\n hours=int(hours),\n minutes=int(minutes),\n seconds=int(seconds) if seconds > 1.0 else 1,\n )\n # log the exception in case it happens\n except Exception as e:\n logger.exception(str(e))\n raise e\n" ]
[ [ "pandas.read_csv" ] ]
KevBarbour/cryptobot
[ "57239c83ca5dd84d2a0e273f20782cf608ce99ba" ]
[ "top10losers/top10losers.py" ]
[ "#!/usr/bin/env python\nimport requests\nfrom bs4 import BeautifulSoup\nimport sys\nfrom twython import Twython\nimport numpy as np\n\napiKey = '...'\napiSecret = '...'\naccessToken = '...'\naccessTokenSecret = '...'\n\n#BeautifulSoup scraping algorythm\nurl = 'https://coinmarketcap.com'\nsoup = BeautifulSoup(requests.get(url).text, 'lxml')\nL=[]\n#H =[\"Rank\",\"Name\",\"M Cap\",\"$/1\", \"HURR\", \"DURR\", \"24 hr\"] \nF=0\n\nfor tr in soup.select('#currencies tr'):\n if not tr.select('td'):\n continue\n\n for i, td in enumerate(tr.select('td')[:7]) :\n txt = td.text.replace('\\n',' ').replace('*', '').replace('%','').replace('.com','').replace('chain','').replace('coin','').strip()\n L.append(txt)\n \n #dictates how many lines will be read\n F=F+1 \n if F>99:\n break\n \n #reshapes array to only include necessary columns and re orders them\nA = np.reshape(L, (100,7)) \nPerm = [1,3,6,2,4,5,0]\nA = A[:, Perm]\nA = np.delete(A, (1,3,4,5,6), 1)\n\n#sorting array based on percent change\nA = sorted(A,key=lambda x: (float(x[1])))\nA = A[:10]\n\n#write table to a python file and re reads it, possibly poor method\nwith open(\"output10losers.txt\", \"w\") as txt_file:\n for line in A:\n txt_file.write(\"#\" + \" \".join(line) + \"%\" + \"\\n\" )\n\nT = open(\"output10losers.txt\", \"r\")\n\nfinaltweet = T.read()\ntweetStr = \"Top 10 #Crypto Losers 24hrs:\" + \"\\n\" + finaltweet\n\n#twitter API commands\napi = Twython(apiKey,apiSecret,accessToken,accessTokenSecret)\napi.update_status(status=tweetStr)\nprint(\"Tweeted: \" + tweetStr)\n\n" ]
[ [ "numpy.reshape", "numpy.delete" ] ]
threefoldo/allennlp
[ "9fcc79566cc148cce9f967a7962ac03bc300f011" ]
[ "allennlp/nn/util.py" ]
[ "\"\"\"\nAssorted utilities for working with neural networks in AllenNLP.\n\"\"\"\n# pylint: disable=too-many-lines\nfrom collections import defaultdict\nfrom typing import Any, Dict, List, Optional, Sequence, Tuple, TypeVar\nimport logging\nimport math\nimport warnings\n\nimport torch\n\nfrom allennlp.common.checks import ConfigurationError\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\n\nT = TypeVar('T')\n\n\ndef has_tensor(obj) -> bool:\n \"\"\"\n Given a possibly complex data structure,\n check if it has any torch.Tensors in it.\n \"\"\"\n if isinstance(obj, torch.Tensor):\n return True\n elif isinstance(obj, dict):\n return any(has_tensor(value) for value in obj.values())\n elif isinstance(obj, (list, tuple)):\n return any(has_tensor(item) for item in obj)\n else:\n return False\n\n\ndef move_to_device(obj, cuda_device: int):\n \"\"\"\n Given a structure (possibly) containing Tensors on the CPU,\n move all the Tensors to the specified GPU (or do nothing, if they should be on the CPU).\n \"\"\"\n if cuda_device < 0 or not has_tensor(obj):\n return obj\n elif isinstance(obj, torch.Tensor):\n return obj.cuda(cuda_device)\n elif isinstance(obj, dict):\n return {key: move_to_device(value, cuda_device) for key, value in obj.items()}\n elif isinstance(obj, list):\n return [move_to_device(item, cuda_device) for item in obj]\n elif isinstance(obj, tuple):\n return tuple([move_to_device(item, cuda_device) for item in obj])\n else:\n return obj\n\n\ndef batch_tensor_dicts(tensor_dicts: List[Dict[str, torch.Tensor]],\n remove_trailing_dimension: bool = False) -> Dict[str, torch.Tensor]:\n \"\"\"\n Takes a list of tensor dictionaries, where each dictionary is assumed to have matching keys,\n and returns a single dictionary with all tensors with the same key batched together.\n\n Parameters\n ----------\n tensor_dicts : ``List[Dict[str, torch.Tensor]]``\n The list of tensor dictionaries to batch.\n remove_trailing_dimension : ``bool``\n If ``True``, we will check for a trailing dimension of size 1 on the tensors that are being\n batched, and remove it if we find it.\n \"\"\"\n key_to_tensors: Dict[str, List[torch.Tensor]] = defaultdict(list)\n for tensor_dict in tensor_dicts:\n for key, tensor in tensor_dict.items():\n key_to_tensors[key].append(tensor)\n batched_tensors = {}\n for key, tensor_list in key_to_tensors.items():\n batched_tensor = torch.stack(tensor_list)\n if remove_trailing_dimension and all(tensor.size(-1) == 1 for tensor in tensor_list):\n batched_tensor = batched_tensor.squeeze(-1)\n batched_tensors[key] = batched_tensor\n return batched_tensors\n\n\ndef get_lengths_from_binary_sequence_mask(mask: torch.Tensor):\n \"\"\"\n Compute sequence lengths for each batch element in a tensor using a\n binary mask.\n\n Parameters\n ----------\n mask : torch.Tensor, required.\n A 2D binary mask of shape (batch_size, sequence_length) to\n calculate the per-batch sequence lengths from.\n\n Returns\n -------\n A torch.LongTensor of shape (batch_size,) representing the lengths\n of the sequences in the batch.\n \"\"\"\n return mask.long().sum(-1)\n\n\ndef get_mask_from_sequence_lengths(sequence_lengths: torch.Tensor, max_length: int) -> torch.Tensor:\n \"\"\"\n Given a variable of shape ``(batch_size,)`` that represents the sequence lengths of each batch\n element, this function returns a ``(batch_size, max_length)`` mask variable. For example, if\n our input was ``[2, 2, 3]``, with a ``max_length`` of 4, we'd return\n ``[[1, 1, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0]]``.\n\n We require ``max_length`` here instead of just computing it from the input ``sequence_lengths``\n because it lets us avoid finding the max, then copying that value from the GPU to the CPU so\n that we can use it to construct a new tensor.\n \"\"\"\n # (batch_size, max_length)\n ones = sequence_lengths.new_ones(sequence_lengths.size(0), max_length)\n range_tensor = ones.cumsum(dim=1)\n return (sequence_lengths.unsqueeze(1) >= range_tensor).long()\n\n\ndef sort_batch_by_length(tensor: torch.Tensor, sequence_lengths: torch.Tensor):\n \"\"\"\n Sort a batch first tensor by some specified lengths.\n\n Parameters\n ----------\n tensor : torch.FloatTensor, required.\n A batch first Pytorch tensor.\n sequence_lengths : torch.LongTensor, required.\n A tensor representing the lengths of some dimension of the tensor which\n we want to sort by.\n\n Returns\n -------\n sorted_tensor : torch.FloatTensor\n The original tensor sorted along the batch dimension with respect to sequence_lengths.\n sorted_sequence_lengths : torch.LongTensor\n The original sequence_lengths sorted by decreasing size.\n restoration_indices : torch.LongTensor\n Indices into the sorted_tensor such that\n ``sorted_tensor.index_select(0, restoration_indices) == original_tensor``\n permuation_index : torch.LongTensor\n The indices used to sort the tensor. This is useful if you want to sort many\n tensors using the same ordering.\n \"\"\"\n\n if not isinstance(tensor, torch.Tensor) or not isinstance(sequence_lengths, torch.Tensor):\n raise ConfigurationError(\"Both the tensor and sequence lengths must be torch.Tensors.\")\n\n sorted_sequence_lengths, permutation_index = sequence_lengths.sort(0, descending=True)\n sorted_tensor = tensor.index_select(0, permutation_index)\n\n index_range = sequence_lengths.new_tensor(torch.arange(0, len(sequence_lengths)))\n # This is the equivalent of zipping with index, sorting by the original\n # sequence lengths and returning the now sorted indices.\n _, reverse_mapping = permutation_index.sort(0, descending=False)\n restoration_indices = index_range.index_select(0, reverse_mapping)\n return sorted_tensor, sorted_sequence_lengths, restoration_indices, permutation_index\n\n\ndef get_final_encoder_states(encoder_outputs: torch.Tensor,\n mask: torch.Tensor,\n bidirectional: bool = False) -> torch.Tensor:\n \"\"\"\n Given the output from a ``Seq2SeqEncoder``, with shape ``(batch_size, sequence_length,\n encoding_dim)``, this method returns the final hidden state for each element of the batch,\n giving a tensor of shape ``(batch_size, encoding_dim)``. This is not as simple as\n ``encoder_outputs[:, -1]``, because the sequences could have different lengths. We use the\n mask (which has shape ``(batch_size, sequence_length)``) to find the final state for each batch\n instance.\n\n Additionally, if ``bidirectional`` is ``True``, we will split the final dimension of the\n ``encoder_outputs`` into two and assume that the first half is for the forward direction of the\n encoder and the second half is for the backward direction. We will concatenate the last state\n for each encoder dimension, giving ``encoder_outputs[:, -1, :encoding_dim/2]`` concated with\n ``encoder_outputs[:, 0, encoding_dim/2:]``.\n \"\"\"\n # These are the indices of the last words in the sequences (i.e. length sans padding - 1). We\n # are assuming sequences are right padded.\n # Shape: (batch_size,)\n last_word_indices = mask.sum(1).long() - 1\n batch_size, _, encoder_output_dim = encoder_outputs.size()\n expanded_indices = last_word_indices.view(-1, 1, 1).expand(batch_size, 1, encoder_output_dim)\n # Shape: (batch_size, 1, encoder_output_dim)\n final_encoder_output = encoder_outputs.gather(1, expanded_indices)\n final_encoder_output = final_encoder_output.squeeze(1) # (batch_size, encoder_output_dim)\n if bidirectional:\n final_forward_output = final_encoder_output[:, :(encoder_output_dim // 2)]\n final_backward_output = encoder_outputs[:, 0, (encoder_output_dim // 2):]\n final_encoder_output = torch.cat([final_forward_output, final_backward_output], dim=-1)\n return final_encoder_output\n\n\ndef get_dropout_mask(dropout_probability: float, tensor_for_masking: torch.Tensor):\n \"\"\"\n Computes and returns an element-wise dropout mask for a given tensor, where\n each element in the mask is dropped out with probability dropout_probability.\n Note that the mask is NOT applied to the tensor - the tensor is passed to retain\n the correct CUDA tensor type for the mask.\n\n Parameters\n ----------\n dropout_probability : float, required.\n Probability of dropping a dimension of the input.\n tensor_for_masking : torch.Tensor, required.\n\n\n Returns\n -------\n A torch.FloatTensor consisting of the binary mask scaled by 1/ (1 - dropout_probability).\n This scaling ensures expected values and variances of the output of applying this mask\n and the original tensor are the same.\n \"\"\"\n binary_mask = tensor_for_masking.new_tensor(torch.rand(tensor_for_masking.size()) > dropout_probability)\n # Scale mask by 1/keep_prob to preserve output statistics.\n dropout_mask = binary_mask.float().div(1.0 - dropout_probability)\n return dropout_mask\n\n\ndef masked_softmax(vector: torch.Tensor, mask: torch.Tensor, dim: int = -1) -> torch.Tensor:\n \"\"\"\n ``torch.nn.functional.softmax(vector)`` does not work if some elements of ``vector`` should be\n masked. This performs a softmax on just the non-masked portions of ``vector``. Passing\n ``None`` in for the mask is also acceptable; you'll just get a regular softmax.\n\n ``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is\n broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will\n unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,\n do it yourself before passing the mask into this function.\n\n In the case that the input vector is completely masked, this function returns an array\n of ``0.0``. This behavior may cause ``NaN`` if this is used as the last layer of a model\n that uses categorical cross-entropy loss.\n \"\"\"\n if mask is None:\n result = torch.nn.functional.softmax(vector, dim=dim)\n else:\n mask = mask.float()\n while mask.dim() < vector.dim():\n mask = mask.unsqueeze(1)\n # To limit numerical errors from large vector elements outside the mask, we zero these out.\n result = torch.nn.functional.softmax(vector * mask, dim=dim)\n result = result * mask\n result = result / (result.sum(dim=dim, keepdim=True) + 1e-13)\n return result\n\n\ndef masked_log_softmax(vector: torch.Tensor, mask: torch.Tensor, dim: int = -1) -> torch.Tensor:\n \"\"\"\n ``torch.nn.functional.log_softmax(vector)`` does not work if some elements of ``vector`` should be\n masked. This performs a log_softmax on just the non-masked portions of ``vector``. Passing\n ``None`` in for the mask is also acceptable; you'll just get a regular log_softmax.\n\n ``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is\n broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will\n unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,\n do it yourself before passing the mask into this function.\n\n In the case that the input vector is completely masked, the return value of this function is\n arbitrary, but not ``nan``. You should be masking the result of whatever computation comes out\n of this in that case, anyway, so the specific values returned shouldn't matter. Also, the way\n that we deal with this case relies on having single-precision floats; mixing half-precision\n floats with fully-masked vectors will likely give you ``nans``.\n\n If your logits are all extremely negative (i.e., the max value in your logit vector is -50 or\n lower), the way we handle masking here could mess you up. But if you've got logit values that\n extreme, you've got bigger problems than this.\n \"\"\"\n if mask is not None:\n mask = mask.float()\n while mask.dim() < vector.dim():\n mask = mask.unsqueeze(1)\n # vector + mask.log() is an easy way to zero out masked elements in logspace, but it\n # results in nans when the whole vector is masked. We need a very small value instead of a\n # zero in the mask for these cases. log(1 + 1e-45) is still basically 0, so we can safely\n # just add 1e-45 before calling mask.log(). We use 1e-45 because 1e-46 is so small it\n # becomes 0 - this is just the smallest value we can actually use.\n vector = vector + (mask + 1e-45).log()\n return torch.nn.functional.log_softmax(vector, dim=dim)\n\n\ndef masked_max(vector: torch.Tensor,\n mask: torch.Tensor,\n dim: int,\n keepdim: bool = False,\n min_val: float = -1e7) -> torch.Tensor:\n \"\"\"\n To calculate max along certain dimensions on masked values\n\n Parameters\n ----------\n vector : ``torch.Tensor``\n The vector to calculate max, assume unmasked parts are already zeros\n mask : ``torch.Tensor``\n The mask of the vector. It must be broadcastable with vector.\n dim : ``int``\n The dimension to calculate max\n keepdim : ``bool``\n Whether to keep dimension\n min_val : ``float``\n The minimal value for paddings\n\n Returns\n -------\n A ``torch.Tensor`` of including the maximum values.\n \"\"\"\n one_minus_mask = (1.0 - mask).byte()\n replaced_vector = vector.masked_fill(one_minus_mask, min_val)\n max_value, _ = replaced_vector.max(dim=dim, keepdim=keepdim)\n return max_value\n\n\ndef masked_mean(vector: torch.Tensor,\n mask: torch.Tensor,\n dim: int,\n keepdim: bool = False,\n eps: float = 1e-8) -> torch.Tensor:\n \"\"\"\n To calculate mean along certain dimensions on masked values\n\n Parameters\n ----------\n vector : ``torch.Tensor``\n The vector to calculate mean.\n mask : ``torch.Tensor``\n The mask of the vector. It must be broadcastable with vector.\n dim : ``int``\n The dimension to calculate mean\n keepdim : ``bool``\n Whether to keep dimension\n eps : ``float``\n A small value to avoid zero division problem.\n\n Returns\n -------\n A ``torch.Tensor`` of including the mean values.\n \"\"\"\n one_minus_mask = (1.0 - mask).byte()\n replaced_vector = vector.masked_fill(one_minus_mask, 0.0)\n\n value_sum = torch.sum(replaced_vector, dim=dim, keepdim=keepdim)\n value_count = torch.sum(mask.float(), dim=dim, keepdim=keepdim)\n return value_sum / value_count.clamp(min=eps)\n\n\ndef viterbi_decode(tag_sequence: torch.Tensor,\n transition_matrix: torch.Tensor,\n tag_observations: Optional[List[int]] = None):\n \"\"\"\n Perform Viterbi decoding in log space over a sequence given a transition matrix\n specifying pairwise (transition) potentials between tags and a matrix of shape\n (sequence_length, num_tags) specifying unary potentials for possible tags per\n timestep.\n\n Parameters\n ----------\n tag_sequence : torch.Tensor, required.\n A tensor of shape (sequence_length, num_tags) representing scores for\n a set of tags over a given sequence.\n transition_matrix : torch.Tensor, required.\n A tensor of shape (num_tags, num_tags) representing the binary potentials\n for transitioning between a given pair of tags.\n tag_observations : Optional[List[int]], optional, (default = None)\n A list of length ``sequence_length`` containing the class ids of observed\n elements in the sequence, with unobserved elements being set to -1. Note that\n it is possible to provide evidence which results in degenerate labellings if\n the sequences of tags you provide as evidence cannot transition between each\n other, or those transitions are extremely unlikely. In this situation we log a\n warning, but the responsibility for providing self-consistent evidence ultimately\n lies with the user.\n\n Returns\n -------\n viterbi_path : List[int]\n The tag indices of the maximum likelihood tag sequence.\n viterbi_score : torch.Tensor\n The score of the viterbi path.\n \"\"\"\n sequence_length, num_tags = list(tag_sequence.size())\n if tag_observations:\n if len(tag_observations) != sequence_length:\n raise ConfigurationError(\"Observations were provided, but they were not the same length \"\n \"as the sequence. Found sequence of length: {} and evidence: {}\"\n .format(sequence_length, tag_observations))\n else:\n tag_observations = [-1 for _ in range(sequence_length)]\n\n path_scores = []\n path_indices = []\n\n if tag_observations[0] != -1:\n one_hot = torch.zeros(num_tags)\n one_hot[tag_observations[0]] = 100000.\n path_scores.append(one_hot)\n else:\n path_scores.append(tag_sequence[0, :])\n\n # Evaluate the scores for all possible paths.\n for timestep in range(1, sequence_length):\n # Add pairwise potentials to current scores.\n summed_potentials = path_scores[timestep - 1].unsqueeze(-1) + transition_matrix\n scores, paths = torch.max(summed_potentials, 0)\n\n # If we have an observation for this timestep, use it\n # instead of the distribution over tags.\n observation = tag_observations[timestep]\n # Warn the user if they have passed\n # invalid/extremely unlikely evidence.\n if tag_observations[timestep - 1] != -1:\n if transition_matrix[tag_observations[timestep - 1], observation] < -10000:\n logger.warning(\"The pairwise potential between tags you have passed as \"\n \"observations is extremely unlikely. Double check your evidence \"\n \"or transition potentials!\")\n if observation != -1:\n one_hot = torch.zeros(num_tags)\n one_hot[observation] = 100000.\n path_scores.append(one_hot)\n else:\n path_scores.append(tag_sequence[timestep, :] + scores.squeeze())\n path_indices.append(paths.squeeze())\n\n # Construct the most likely sequence backwards.\n viterbi_score, best_path = torch.max(path_scores[-1], 0)\n viterbi_path = [int(best_path.numpy())]\n for backward_timestep in reversed(path_indices):\n viterbi_path.append(int(backward_timestep[viterbi_path[-1]]))\n # Reverse the backward path.\n viterbi_path.reverse()\n return viterbi_path, viterbi_score\n\n\ndef get_text_field_mask(text_field_tensors: Dict[str, torch.Tensor],\n num_wrapping_dims: int = 0) -> torch.LongTensor:\n \"\"\"\n Takes the dictionary of tensors produced by a ``TextField`` and returns a mask\n with 0 where the tokens are padding, and 1 otherwise. We also handle ``TextFields``\n wrapped by an arbitrary number of ``ListFields``, where the number of wrapping ``ListFields``\n is given by ``num_wrapping_dims``.\n\n If ``num_wrapping_dims == 0``, the returned mask has shape ``(batch_size, num_tokens)``.\n If ``num_wrapping_dims > 0`` then the returned mask has ``num_wrapping_dims`` extra\n dimensions, so the shape will be ``(batch_size, ..., num_tokens)``.\n\n There could be several entries in the tensor dictionary with different shapes (e.g., one for\n word ids, one for character ids). In order to get a token mask, we use the tensor in\n the dictionary with the lowest number of dimensions. After subtracting ``num_wrapping_dims``,\n if this tensor has two dimensions we assume it has shape ``(batch_size, ..., num_tokens)``,\n and use it for the mask. If instead it has three dimensions, we assume it has shape\n ``(batch_size, ..., num_tokens, num_features)``, and sum over the last dimension to produce\n the mask. Most frequently this will be a character id tensor, but it could also be a\n featurized representation of each token, etc.\n\n If the input ``text_field_tensors`` contains the \"mask\" key, this is returned instead of inferring the mask.\n\n TODO(joelgrus): can we change this?\n NOTE: Our functions for generating masks create torch.LongTensors, because using\n torch.ByteTensors makes it easy to run into overflow errors\n when doing mask manipulation, such as summing to get the lengths of sequences - see below.\n >>> mask = torch.ones([260]).byte()\n >>> mask.sum() # equals 260.\n >>> var_mask = torch.autograd.V(mask)\n >>> var_mask.sum() # equals 4, due to 8 bit precision - the sum overflows.\n \"\"\"\n if \"mask\" in text_field_tensors:\n return text_field_tensors[\"mask\"]\n\n tensor_dims = [(tensor.dim(), tensor) for tensor in text_field_tensors.values()]\n tensor_dims.sort(key=lambda x: x[0])\n\n smallest_dim = tensor_dims[0][0] - num_wrapping_dims\n if smallest_dim == 2:\n token_tensor = tensor_dims[0][1]\n return (token_tensor != 0).long()\n elif smallest_dim == 3:\n character_tensor = tensor_dims[0][1]\n return ((character_tensor > 0).long().sum(dim=-1) > 0).long()\n else:\n raise ValueError(\"Expected a tensor with dimension 2 or 3, found {}\".format(smallest_dim))\n\n\ndef last_dim_softmax(tensor: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:\n \"\"\"\n Takes a tensor with 3 or more dimensions and does a masked softmax over the last dimension. We\n assume the tensor has shape ``(batch_size, ..., sequence_length)`` and that the mask (if given)\n has shape ``(batch_size, sequence_length)``.\n\n .. deprecated:: 0.6.1\n ``last_dim_softmax`` was deprecated in favor of just using ``masked_softmax`` in version\n 0.6.1. It will be removed in version 0.8.\n \"\"\"\n warnings.warn(\"``last_dim_softmax`` was deprecated in favor of just using ``masked_softmax`` \"\n \"in version 0.6.1. It will be removed in version 0.8.\", DeprecationWarning)\n return masked_softmax(tensor, mask, dim=-1)\n\n\ndef last_dim_log_softmax(tensor: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:\n \"\"\"\n Takes a tensor with 3 or more dimensions and does a masked log softmax over the last dimension.\n We assume the tensor has shape ``(batch_size, ..., sequence_length)`` and that the mask (if given)\n has shape ``(batch_size, sequence_length)``.\n\n .. deprecated:: 0.6.1\n ``last_dim_log_softmax`` was deprecated in favor of just using ``masked_log_softmax`` in\n version 0.6.1. It will be removed in version 0.8.\n \"\"\"\n warnings.warn(\"``last_dim_log_softmax`` was deprecated in favor of just using \"\n \"``masked_log_softmax`` in version 0.6.1. It will be removed in version 0.8.\",\n DeprecationWarning)\n return masked_log_softmax(tensor, mask, dim=-1)\n\n\ndef weighted_sum(matrix: torch.Tensor, attention: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Takes a matrix of vectors and a set of weights over the rows in the matrix (which we call an\n \"attention\" vector), and returns a weighted sum of the rows in the matrix. This is the typical\n computation performed after an attention mechanism.\n\n Note that while we call this a \"matrix\" of vectors and an attention \"vector\", we also handle\n higher-order tensors. We always sum over the second-to-last dimension of the \"matrix\", and we\n assume that all dimensions in the \"matrix\" prior to the last dimension are matched in the\n \"vector\". Non-matched dimensions in the \"vector\" must be `directly after the batch dimension`.\n\n For example, say I have a \"matrix\" with dimensions ``(batch_size, num_queries, num_words,\n embedding_dim)``. The attention \"vector\" then must have at least those dimensions, and could\n have more. Both:\n\n - ``(batch_size, num_queries, num_words)`` (distribution over words for each query)\n - ``(batch_size, num_documents, num_queries, num_words)`` (distribution over words in a\n query for each document)\n\n are valid input \"vectors\", producing tensors of shape:\n ``(batch_size, num_queries, embedding_dim)`` and\n ``(batch_size, num_documents, num_queries, embedding_dim)`` respectively.\n \"\"\"\n # We'll special-case a few settings here, where there are efficient (but poorly-named)\n # operations in pytorch that already do the computation we need.\n if attention.dim() == 2 and matrix.dim() == 3:\n return attention.unsqueeze(1).bmm(matrix).squeeze(1)\n if attention.dim() == 3 and matrix.dim() == 3:\n return attention.bmm(matrix)\n if matrix.dim() - 1 < attention.dim():\n expanded_size = list(matrix.size())\n for i in range(attention.dim() - matrix.dim() + 1):\n matrix = matrix.unsqueeze(1)\n expanded_size.insert(i + 1, attention.size(i + 1))\n matrix = matrix.expand(*expanded_size)\n intermediate = attention.unsqueeze(-1).expand_as(matrix) * matrix\n return intermediate.sum(dim=-2)\n\n\ndef sequence_cross_entropy_with_logits(logits: torch.FloatTensor,\n targets: torch.LongTensor,\n weights: torch.FloatTensor,\n batch_average: bool = None,\n average: str = \"batch\",\n label_smoothing: float = None) -> torch.FloatTensor:\n \"\"\"\n Computes the cross entropy loss of a sequence, weighted with respect to\n some user provided weights. Note that the weighting here is not the same as\n in the :func:`torch.nn.CrossEntropyLoss()` criterion, which is weighting\n classes; here we are weighting the loss contribution from particular elements\n in the sequence. This allows loss computations for models which use padding.\n\n Parameters\n ----------\n logits : ``torch.FloatTensor``, required.\n A ``torch.FloatTensor`` of size (batch_size, sequence_length, num_classes)\n which contains the unnormalized probability for each class.\n targets : ``torch.LongTensor``, required.\n A ``torch.LongTensor`` of size (batch, sequence_length) which contains the\n index of the true class for each corresponding step.\n weights : ``torch.FloatTensor``, required.\n A ``torch.FloatTensor`` of size (batch, sequence_length)\n batch_average : bool, optional, (default = None).\n A bool indicating whether the loss should be averaged across the batch,\n or returned as a vector of losses per batch element.\n\n .. deprecated:: 0.6.2\n ``batch_average`` was deprecated and replaced with\n the more general ``average`` in version 0.6.2. It will be removed\n in version 0.8.\n\n average: str, optional (default = \"batch\")\n If \"batch\", average the loss across the batches. If \"token\", average\n the loss across each item in the input. If ``None``, return a vector\n of losses per batch element.\n label_smoothing : ``float``, optional (default = None)\n Whether or not to apply label smoothing to the cross-entropy loss.\n For example, with a label smoothing value of 0.2, a 4 class classifcation\n target would look like ``[0.05, 0.05, 0.85, 0.05]`` if the 3rd class was\n the correct label.\n\n Returns\n -------\n A torch.FloatTensor representing the cross entropy loss.\n If ``average==\"batch\"`` or ``average==\"token\"``, the returned loss is a scalar.\n If ``average is None``, the returned loss is a vector of shape (batch_size,).\n\n \"\"\"\n if batch_average is not None:\n # Maintain old behavior\n if batch_average:\n warnings.warn(\"batch_average=True was deprecated and replaced \"\n \"with average='batch' in version 0.6.2. It will be \"\n \"removed in version 0.8.\", DeprecationWarning)\n average = \"batch\"\n else:\n warnings.warn(\"batch_average=False was deprecated and replaced \"\n \"with average=None in version 0.6.2. It will be \"\n \"removed in version 0.8.\", DeprecationWarning)\n average = None\n if average not in {None, \"token\", \"batch\"}:\n raise ValueError(\"Got average f{average}, expected one of \"\n \"None, 'token', or 'batch'\")\n\n # shape : (batch * sequence_length, num_classes)\n logits_flat = logits.view(-1, logits.size(-1))\n # shape : (batch * sequence_length, num_classes)\n log_probs_flat = torch.nn.functional.log_softmax(logits_flat, dim=-1)\n # shape : (batch * max_len, 1)\n targets_flat = targets.view(-1, 1).long()\n\n if label_smoothing is not None and label_smoothing > 0.0:\n num_classes = logits.size(-1)\n smoothing_value = label_smoothing / num_classes\n # Fill all the correct indices with 1 - smoothing value.\n one_hot_targets = torch.zeros_like(log_probs_flat).scatter_(-1, targets_flat, 1.0 - label_smoothing)\n smoothed_targets = one_hot_targets + smoothing_value\n negative_log_likelihood_flat = - log_probs_flat * smoothed_targets\n negative_log_likelihood_flat = negative_log_likelihood_flat.sum(-1, keepdim=True)\n else:\n # Contribution to the negative log likelihood only comes from the exact indices\n # of the targets, as the target distributions are one-hot. Here we use torch.gather\n # to extract the indices of the num_classes dimension which contribute to the loss.\n # shape : (batch * sequence_length, 1)\n negative_log_likelihood_flat = - torch.gather(log_probs_flat, dim=1, index=targets_flat)\n # shape : (batch, sequence_length)\n negative_log_likelihood = negative_log_likelihood_flat.view(*targets.size())\n # shape : (batch, sequence_length)\n negative_log_likelihood = negative_log_likelihood * weights.float()\n\n if average == \"batch\":\n # shape : (batch_size,)\n per_batch_loss = negative_log_likelihood.sum(1) / (weights.sum(1).float() + 1e-13)\n num_non_empty_sequences = ((weights.sum(1) > 0).float().sum() + 1e-13)\n return per_batch_loss.sum() / num_non_empty_sequences\n elif average == \"token\":\n return negative_log_likelihood.sum() / (weights.sum().float() + 1e-13)\n else:\n # shape : (batch_size,)\n per_batch_loss = negative_log_likelihood.sum(1) / (weights.sum(1).float() + 1e-13)\n return per_batch_loss\n\n\ndef replace_masked_values(tensor: torch.Tensor, mask: torch.Tensor, replace_with: float) -> torch.Tensor:\n \"\"\"\n Replaces all masked values in ``tensor`` with ``replace_with``. ``mask`` must be broadcastable\n to the same shape as ``tensor``. We require that ``tensor.dim() == mask.dim()``, as otherwise we\n won't know which dimensions of the mask to unsqueeze.\n\n This just does ``tensor.masked_fill()``, except the pytorch method fills in things with a mask\n value of 1, where we want the opposite. You can do this in your own code with\n ``tensor.masked_fill((1 - mask).byte(), replace_with)``.\n \"\"\"\n if tensor.dim() != mask.dim():\n raise ConfigurationError(\"tensor.dim() (%d) != mask.dim() (%d)\" % (tensor.dim(), mask.dim()))\n return tensor.masked_fill((1 - mask).byte(), replace_with)\n\n\ndef tensors_equal(tensor1: torch.Tensor, tensor2: torch.Tensor, tolerance: float = 1e-12) -> bool:\n \"\"\"\n A check for tensor equality (by value). We make sure that the tensors have the same shape,\n then check all of the entries in the tensor for equality. We additionally allow the input\n tensors to be lists or dictionaries, where we then do the above check on every position in the\n list / item in the dictionary. If we find objects that aren't tensors as we're doing that, we\n just defer to their equality check.\n\n This is kind of a catch-all method that's designed to make implementing ``__eq__`` methods\n easier, in a way that's really only intended to be useful for tests.\n \"\"\"\n # pylint: disable=too-many-return-statements\n if isinstance(tensor1, (list, tuple)):\n if not isinstance(tensor2, (list, tuple)) or len(tensor1) != len(tensor2):\n return False\n return all([tensors_equal(t1, t2, tolerance) for t1, t2 in zip(tensor1, tensor2)])\n elif isinstance(tensor1, dict):\n if not isinstance(tensor2, dict):\n return False\n if tensor1.keys() != tensor2.keys():\n return False\n return all([tensors_equal(tensor1[key], tensor2[key], tolerance) for key in tensor1])\n elif isinstance(tensor1, torch.Tensor):\n if not isinstance(tensor2, torch.Tensor):\n return False\n if tensor1.size() != tensor2.size():\n return False\n return ((tensor1 - tensor2).abs().float() < tolerance).all()\n else:\n try:\n return tensor1 == tensor2\n except RuntimeError:\n print(type(tensor1), type(tensor2))\n raise\n\n\ndef device_mapping(cuda_device: int):\n \"\"\"\n In order to `torch.load()` a GPU-trained model onto a CPU (or specific GPU),\n you have to supply a `map_location` function. Call this with\n the desired `cuda_device` to get the function that `torch.load()` needs.\n \"\"\"\n\n def inner_device_mapping(storage: torch.Storage, location) -> torch.Storage: # pylint: disable=unused-argument\n if cuda_device >= 0:\n return storage.cuda(cuda_device)\n else:\n return storage\n\n return inner_device_mapping\n\n\ndef combine_tensors(combination: str, tensors: List[torch.Tensor]) -> torch.Tensor:\n \"\"\"\n Combines a list of tensors using element-wise operations and concatenation, specified by a\n ``combination`` string. The string refers to (1-indexed) positions in the input tensor list,\n and looks like ``\"1,2,1+2,3-1\"``.\n\n We allow the following kinds of combinations: ``x``, ``x*y``, ``x+y``, ``x-y``, and ``x/y``,\n where ``x`` and ``y`` are positive integers less than or equal to ``len(tensors)``. Each of\n the binary operations is performed elementwise. You can give as many combinations as you want\n in the ``combination`` string. For example, for the input string ``\"1,2,1*2\"``, the result\n would be ``[1;2;1*2]``, as you would expect, where ``[;]`` is concatenation along the last\n dimension.\n\n If you have a fixed, known way to combine tensors that you use in a model, you should probably\n just use something like ``torch.cat([x_tensor, y_tensor, x_tensor * y_tensor])``. This\n function adds some complexity that is only necessary if you want the specific combination used\n to be `configurable`.\n\n If you want to do any element-wise operations, the tensors involved in each element-wise\n operation must have the same shape.\n\n This function also accepts ``x`` and ``y`` in place of ``1`` and ``2`` in the combination\n string.\n \"\"\"\n if len(tensors) > 9:\n raise ConfigurationError(\"Double-digit tensor lists not currently supported\")\n combination = combination.replace('x', '1').replace('y', '2')\n to_concatenate = [_get_combination(piece, tensors) for piece in combination.split(',')]\n return torch.cat(to_concatenate, dim=-1)\n\n\ndef _rindex(sequence: Sequence[T], obj: T) -> int:\n \"\"\"\n Return zero-based index in the sequence of the last item whose value is equal to obj. Raises a\n ValueError if there is no such item.\n\n Parameters\n ----------\n sequence : ``Sequence[T]``\n obj : ``T``\n\n Returns\n -------\n zero-based index associated to the position of the last item equal to obj\n \"\"\"\n for i in range(len(sequence) - 1, -1, -1):\n if sequence[i] == obj:\n return i\n\n raise ValueError(f\"Unable to find {obj} in sequence {sequence}.\")\n\n\ndef _get_combination(combination: str, tensors: List[torch.Tensor]) -> torch.Tensor:\n if combination.isdigit():\n index = int(combination) - 1\n return tensors[index]\n else:\n if len(combination) != 3:\n raise ConfigurationError(\"Invalid combination: \" + combination)\n first_tensor = _get_combination(combination[0], tensors)\n second_tensor = _get_combination(combination[2], tensors)\n operation = combination[1]\n if operation == '*':\n return first_tensor * second_tensor\n elif operation == '/':\n return first_tensor / second_tensor\n elif operation == '+':\n return first_tensor + second_tensor\n elif operation == '-':\n return first_tensor - second_tensor\n else:\n raise ConfigurationError(\"Invalid operation: \" + operation)\n\n\ndef combine_tensors_and_multiply(combination: str,\n tensors: List[torch.Tensor],\n weights: torch.nn.Parameter) -> torch.Tensor:\n \"\"\"\n Like :func:`combine_tensors`, but does a weighted (linear) multiplication while combining.\n This is a separate function from ``combine_tensors`` because we try to avoid instantiating\n large intermediate tensors during the combination, which is possible because we know that we're\n going to be multiplying by a weight vector in the end.\n\n Parameters\n ----------\n combination : ``str``\n Same as in :func:`combine_tensors`\n tensors : ``List[torch.Tensor]``\n A list of tensors to combine, where the integers in the ``combination`` are (1-indexed)\n positions in this list of tensors. These tensors are all expected to have either three or\n four dimensions, with the final dimension being an embedding. If there are four\n dimensions, one of them must have length 1.\n weights : ``torch.nn.Parameter``\n A vector of weights to use for the combinations. This should have shape (combined_dim,),\n as calculated by :func:`get_combined_dim`.\n \"\"\"\n if len(tensors) > 9:\n raise ConfigurationError(\"Double-digit tensor lists not currently supported\")\n combination = combination.replace('x', '1').replace('y', '2')\n pieces = combination.split(',')\n tensor_dims = [tensor.size(-1) for tensor in tensors]\n combination_dims = [_get_combination_dim(piece, tensor_dims) for piece in pieces]\n dims_so_far = 0\n to_sum = []\n for piece, combination_dim in zip(pieces, combination_dims):\n weight = weights[dims_so_far:(dims_so_far + combination_dim)]\n dims_so_far += combination_dim\n to_sum.append(_get_combination_and_multiply(piece, tensors, weight))\n result = to_sum[0]\n for result_piece in to_sum[1:]:\n result = result + result_piece\n return result\n\n\ndef _get_combination_and_multiply(combination: str,\n tensors: List[torch.Tensor],\n weight: torch.nn.Parameter) -> torch.Tensor:\n if combination.isdigit():\n index = int(combination) - 1\n return torch.matmul(tensors[index], weight)\n else:\n if len(combination) != 3:\n raise ConfigurationError(\"Invalid combination: \" + combination)\n first_tensor = _get_combination(combination[0], tensors)\n second_tensor = _get_combination(combination[2], tensors)\n operation = combination[1]\n if operation == '*':\n if first_tensor.dim() > 4 or second_tensor.dim() > 4:\n raise ValueError(\"Tensors with dim > 4 not currently supported\")\n if first_tensor.dim() == 4:\n expanded_dim = _rindex(first_tensor.size(), 1)\n first_tensor = first_tensor.squeeze(expanded_dim)\n if second_tensor.dim() == 4:\n expanded_dim = _rindex(second_tensor.size(), 1)\n second_tensor = second_tensor.squeeze(expanded_dim)\n intermediate = first_tensor * weight\n return torch.matmul(intermediate, second_tensor.transpose(-1, -2)).squeeze(-1)\n elif operation == '/':\n if first_tensor.dim() > 4 or second_tensor.dim() > 4:\n raise ValueError(\"Tensors with dim > 4 not currently supported\")\n if first_tensor.dim() == 4:\n expanded_dim = _rindex(first_tensor.size(), 1)\n first_tensor = first_tensor.squeeze(expanded_dim)\n if second_tensor.dim() == 4:\n expanded_dim = _rindex(second_tensor.size(), 1)\n second_tensor = second_tensor.squeeze(expanded_dim)\n intermediate = first_tensor * weight\n return torch.matmul(intermediate, second_tensor.pow(-1).transpose(-1, -2)).squeeze(-1)\n elif operation == '+':\n return torch.matmul(first_tensor, weight) + torch.matmul(second_tensor, weight)\n elif operation == '-':\n return torch.matmul(first_tensor, weight) - torch.matmul(second_tensor, weight)\n else:\n raise ConfigurationError(\"Invalid operation: \" + operation)\n\n\ndef get_combined_dim(combination: str, tensor_dims: List[int]) -> int:\n \"\"\"\n For use with :func:`combine_tensors`. This function computes the resultant dimension when\n calling ``combine_tensors(combination, tensors)``, when the tensor dimension is known. This is\n necessary for knowing the sizes of weight matrices when building models that use\n ``combine_tensors``.\n\n Parameters\n ----------\n combination : ``str``\n A comma-separated list of combination pieces, like ``\"1,2,1*2\"``, specified identically to\n ``combination`` in :func:`combine_tensors`.\n tensor_dims : ``List[int]``\n A list of tensor dimensions, where each dimension is from the `last axis` of the tensors\n that will be input to :func:`combine_tensors`.\n \"\"\"\n if len(tensor_dims) > 9:\n raise ConfigurationError(\"Double-digit tensor lists not currently supported\")\n combination = combination.replace('x', '1').replace('y', '2')\n return sum([_get_combination_dim(piece, tensor_dims) for piece in combination.split(',')])\n\n\ndef _get_combination_dim(combination: str, tensor_dims: List[int]) -> int:\n if combination.isdigit():\n index = int(combination) - 1\n return tensor_dims[index]\n else:\n if len(combination) != 3:\n raise ConfigurationError(\"Invalid combination: \" + combination)\n first_tensor_dim = _get_combination_dim(combination[0], tensor_dims)\n second_tensor_dim = _get_combination_dim(combination[2], tensor_dims)\n operation = combination[1]\n if first_tensor_dim != second_tensor_dim:\n raise ConfigurationError(\"Tensor dims must match for operation \\\"{}\\\"\".format(operation))\n return first_tensor_dim\n\n\ndef logsumexp(tensor: torch.Tensor,\n dim: int = -1,\n keepdim: bool = False) -> torch.Tensor:\n \"\"\"\n A numerically stable computation of logsumexp. This is mathematically equivalent to\n `tensor.exp().sum(dim, keep=keepdim).log()`. This function is typically used for summing log\n probabilities.\n\n Parameters\n ----------\n tensor : torch.FloatTensor, required.\n A tensor of arbitrary size.\n dim : int, optional (default = -1)\n The dimension of the tensor to apply the logsumexp to.\n keepdim: bool, optional (default = False)\n Whether to retain a dimension of size one at the dimension we reduce over.\n \"\"\"\n max_score, _ = tensor.max(dim, keepdim=keepdim)\n if keepdim:\n stable_vec = tensor - max_score\n else:\n stable_vec = tensor - max_score.unsqueeze(dim)\n return max_score + (stable_vec.exp().sum(dim, keepdim=keepdim)).log()\n\n\ndef get_device_of(tensor: torch.Tensor) -> int:\n \"\"\"\n Returns the device of the tensor.\n \"\"\"\n if not tensor.is_cuda:\n return -1\n else:\n return tensor.get_device()\n\n\ndef flatten_and_batch_shift_indices(indices: torch.Tensor,\n sequence_length: int) -> torch.Tensor:\n \"\"\"\n This is a subroutine for :func:`~batched_index_select`. The given ``indices`` of size\n ``(batch_size, d_1, ..., d_n)`` indexes into dimension 2 of a target tensor, which has size\n ``(batch_size, sequence_length, embedding_size)``. This function returns a vector that\n correctly indexes into the flattened target. The sequence length of the target must be\n provided to compute the appropriate offsets.\n\n .. code-block:: python\n\n indices = torch.ones([2,3], dtype=torch.long)\n # Sequence length of the target tensor.\n sequence_length = 10\n shifted_indices = flatten_and_batch_shift_indices(indices, sequence_length)\n # Indices into the second element in the batch are correctly shifted\n # to take into account that the target tensor will be flattened before\n # the indices are applied.\n assert shifted_indices == [1, 1, 1, 11, 11, 11]\n\n Parameters\n ----------\n indices : ``torch.LongTensor``, required.\n sequence_length : ``int``, required.\n The length of the sequence the indices index into.\n This must be the second dimension of the tensor.\n\n Returns\n -------\n offset_indices : ``torch.LongTensor``\n \"\"\"\n # Shape: (batch_size)\n offsets = get_range_vector(indices.size(0), get_device_of(indices)) * sequence_length\n for _ in range(len(indices.size()) - 1):\n offsets = offsets.unsqueeze(1)\n\n # Shape: (batch_size, d_1, ..., d_n)\n offset_indices = indices + offsets\n\n # Shape: (batch_size * d_1 * ... * d_n)\n offset_indices = offset_indices.view(-1)\n return offset_indices\n\n\ndef batched_index_select(target: torch.Tensor,\n indices: torch.LongTensor,\n flattened_indices: Optional[torch.LongTensor] = None) -> torch.Tensor:\n \"\"\"\n The given ``indices`` of size ``(batch_size, d_1, ..., d_n)`` indexes into the sequence\n dimension (dimension 2) of the target, which has size ``(batch_size, sequence_length,\n embedding_size)``.\n\n This function returns selected values in the target with respect to the provided indices, which\n have size ``(batch_size, d_1, ..., d_n, embedding_size)``. This can use the optionally\n precomputed :func:`~flattened_indices` with size ``(batch_size * d_1 * ... * d_n)`` if given.\n\n An example use case of this function is looking up the start and end indices of spans in a\n sequence tensor. This is used in the\n :class:`~allennlp.models.coreference_resolution.CoreferenceResolver`. Model to select\n contextual word representations corresponding to the start and end indices of mentions. The key\n reason this can't be done with basic torch functions is that we want to be able to use look-up\n tensors with an arbitrary number of dimensions (for example, in the coref model, we don't know\n a-priori how many spans we are looking up).\n\n Parameters\n ----------\n target : ``torch.Tensor``, required.\n A 3 dimensional tensor of shape (batch_size, sequence_length, embedding_size).\n This is the tensor to be indexed.\n indices : ``torch.LongTensor``\n A tensor of shape (batch_size, ...), where each element is an index into the\n ``sequence_length`` dimension of the ``target`` tensor.\n flattened_indices : Optional[torch.Tensor], optional (default = None)\n An optional tensor representing the result of calling :func:~`flatten_and_batch_shift_indices`\n on ``indices``. This is helpful in the case that the indices can be flattened once and\n cached for many batch lookups.\n\n Returns\n -------\n selected_targets : ``torch.Tensor``\n A tensor with shape [indices.size(), target.size(-1)] representing the embedded indices\n extracted from the batch flattened target tensor.\n \"\"\"\n if flattened_indices is None:\n # Shape: (batch_size * d_1 * ... * d_n)\n flattened_indices = flatten_and_batch_shift_indices(indices, target.size(1))\n\n # Shape: (batch_size * sequence_length, embedding_size)\n flattened_target = target.view(-1, target.size(-1))\n\n # Shape: (batch_size * d_1 * ... * d_n, embedding_size)\n flattened_selected = flattened_target.index_select(0, flattened_indices)\n selected_shape = list(indices.size()) + [target.size(-1)]\n # Shape: (batch_size, d_1, ..., d_n, embedding_size)\n selected_targets = flattened_selected.view(*selected_shape)\n return selected_targets\n\n\ndef flattened_index_select(target: torch.Tensor,\n indices: torch.LongTensor) -> torch.Tensor:\n \"\"\"\n The given ``indices`` of size ``(set_size, subset_size)`` specifies subsets of the ``target``\n that each of the set_size rows should select. The `target` has size\n ``(batch_size, sequence_length, embedding_size)``, and the resulting selected tensor has size\n ``(batch_size, set_size, subset_size, embedding_size)``.\n\n Parameters\n ----------\n target : ``torch.Tensor``, required.\n A Tensor of shape (batch_size, sequence_length, embedding_size).\n indices : ``torch.LongTensor``, required.\n A LongTensor of shape (set_size, subset_size). All indices must be < sequence_length\n as this tensor is an index into the sequence_length dimension of the target.\n\n Returns\n -------\n selected : ``torch.Tensor``, required.\n A Tensor of shape (batch_size, set_size, subset_size, embedding_size).\n \"\"\"\n if indices.dim() != 2:\n raise ConfigurationError(\"Indices passed to flattened_index_select had shape {} but \"\n \"only 2 dimensional inputs are supported.\".format(indices.size()))\n # Shape: (batch_size, set_size * subset_size, embedding_size)\n flattened_selected = target.index_select(1, indices.view(-1))\n\n # Shape: (batch_size, set_size, subset_size, embedding_size)\n selected = flattened_selected.view(target.size(0), indices.size(0), indices.size(1), -1)\n return selected\n\n\ndef get_range_vector(size: int, device: int) -> torch.Tensor:\n \"\"\"\n Returns a range vector with the desired size, starting at 0. The CUDA implementation\n is meant to avoid copy data from CPU to GPU.\n \"\"\"\n if device > -1:\n return torch.cuda.LongTensor(size, device=device).fill_(1).cumsum(0) - 1\n else:\n return torch.arange(0, size, dtype=torch.long)\n\n\ndef bucket_values(distances: torch.Tensor,\n num_identity_buckets: int = 4,\n num_total_buckets: int = 10) -> torch.Tensor:\n \"\"\"\n Places the given values (designed for distances) into ``num_total_buckets``semi-logscale\n buckets, with ``num_identity_buckets`` of these capturing single values.\n\n The default settings will bucket values into the following buckets:\n [0, 1, 2, 3, 4, 5-7, 8-15, 16-31, 32-63, 64+].\n\n Parameters\n ----------\n distances : ``torch.Tensor``, required.\n A Tensor of any size, to be bucketed.\n num_identity_buckets: int, optional (default = 4).\n The number of identity buckets (those only holding a single value).\n num_total_buckets : int, (default = 10)\n The total number of buckets to bucket values into.\n\n Returns\n -------\n A tensor of the same shape as the input, containing the indices of the buckets\n the values were placed in.\n \"\"\"\n # Chunk the values into semi-logscale buckets using .floor().\n # This is a semi-logscale bucketing because we divide by log(2) after taking the log.\n # We do this to make the buckets more granular in the initial range, where we expect\n # most values to fall. We then add (num_identity_buckets - 1) because we want these indices\n # to start _after_ the fixed number of buckets which we specified would only hold single values.\n logspace_index = (distances.float().log() / math.log(2)).floor().long() + (num_identity_buckets - 1)\n # create a mask for values which will go into single number buckets (i.e not a range).\n use_identity_mask = (distances <= num_identity_buckets).long()\n use_buckets_mask = 1 + (-1 * use_identity_mask)\n # Use the original values if they are less than num_identity_buckets, otherwise\n # use the logspace indices.\n combined_index = use_identity_mask * distances + use_buckets_mask * logspace_index\n # Clamp to put anything > num_total_buckets into the final bucket.\n return combined_index.clamp(0, num_total_buckets - 1)\n\n\ndef add_sentence_boundary_token_ids(tensor: torch.Tensor,\n mask: torch.Tensor,\n sentence_begin_token: Any,\n sentence_end_token: Any) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Add begin/end of sentence tokens to the batch of sentences.\n Given a batch of sentences with size ``(batch_size, timesteps)`` or\n ``(batch_size, timesteps, dim)`` this returns a tensor of shape\n ``(batch_size, timesteps + 2)`` or ``(batch_size, timesteps + 2, dim)`` respectively.\n\n Returns both the new tensor and updated mask.\n\n Parameters\n ----------\n tensor : ``torch.Tensor``\n A tensor of shape ``(batch_size, timesteps)`` or ``(batch_size, timesteps, dim)``\n mask : ``torch.Tensor``\n A tensor of shape ``(batch_size, timesteps)``\n sentence_begin_token: Any (anything that can be broadcast in torch for assignment)\n For 2D input, a scalar with the <S> id. For 3D input, a tensor with length dim.\n sentence_end_token: Any (anything that can be broadcast in torch for assignment)\n For 2D input, a scalar with the </S> id. For 3D input, a tensor with length dim.\n\n Returns\n -------\n tensor_with_boundary_tokens : ``torch.Tensor``\n The tensor with the appended and prepended boundary tokens. If the input was 2D,\n it has shape (batch_size, timesteps + 2) and if the input was 3D, it has shape\n (batch_size, timesteps + 2, dim).\n new_mask : ``torch.Tensor``\n The new mask for the tensor, taking into account the appended tokens\n marking the beginning and end of the sentence.\n \"\"\"\n # TODO: matthewp, profile this transfer\n sequence_lengths = mask.sum(dim=1).detach().cpu().numpy()\n tensor_shape = list(tensor.data.shape)\n new_shape = list(tensor_shape)\n new_shape[1] = tensor_shape[1] + 2\n tensor_with_boundary_tokens = tensor.new_zeros(*new_shape)\n if len(tensor_shape) == 2:\n tensor_with_boundary_tokens[:, 1:-1] = tensor\n tensor_with_boundary_tokens[:, 0] = sentence_begin_token\n for i, j in enumerate(sequence_lengths):\n tensor_with_boundary_tokens[i, j + 1] = sentence_end_token\n new_mask = (tensor_with_boundary_tokens != 0).long()\n elif len(tensor_shape) == 3:\n tensor_with_boundary_tokens[:, 1:-1, :] = tensor\n for i, j in enumerate(sequence_lengths):\n tensor_with_boundary_tokens[i, 0, :] = sentence_begin_token\n tensor_with_boundary_tokens[i, j + 1, :] = sentence_end_token\n new_mask = ((tensor_with_boundary_tokens > 0).long().sum(dim=-1) > 0).long()\n else:\n raise ValueError(\"add_sentence_boundary_token_ids only accepts 2D and 3D input\")\n\n return tensor_with_boundary_tokens, new_mask\n\n\ndef remove_sentence_boundaries(tensor: torch.Tensor,\n mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Remove begin/end of sentence embeddings from the batch of sentences.\n Given a batch of sentences with size ``(batch_size, timesteps, dim)``\n this returns a tensor of shape ``(batch_size, timesteps - 2, dim)`` after removing\n the beginning and end sentence markers. The sentences are assumed to be padded on the right,\n with the beginning of each sentence assumed to occur at index 0 (i.e., ``mask[:, 0]`` is assumed\n to be 1).\n\n Returns both the new tensor and updated mask.\n\n This function is the inverse of ``add_sentence_boundary_token_ids``.\n\n Parameters\n ----------\n tensor : ``torch.Tensor``\n A tensor of shape ``(batch_size, timesteps, dim)``\n mask : ``torch.Tensor``\n A tensor of shape ``(batch_size, timesteps)``\n\n Returns\n -------\n tensor_without_boundary_tokens : ``torch.Tensor``\n The tensor after removing the boundary tokens of shape ``(batch_size, timesteps - 2, dim)``\n new_mask : ``torch.Tensor``\n The new mask for the tensor of shape ``(batch_size, timesteps - 2)``.\n \"\"\"\n # TODO: matthewp, profile this transfer\n sequence_lengths = mask.sum(dim=1).detach().cpu().numpy()\n tensor_shape = list(tensor.data.shape)\n new_shape = list(tensor_shape)\n new_shape[1] = tensor_shape[1] - 2\n tensor_without_boundary_tokens = tensor.new_zeros(*new_shape)\n new_mask = tensor.new_zeros((new_shape[0], new_shape[1]), dtype=torch.long)\n for i, j in enumerate(sequence_lengths):\n if j > 2:\n tensor_without_boundary_tokens[i, :(j - 2), :] = tensor[i, 1:(j - 1), :]\n new_mask[i, :(j - 2)] = 1\n\n return tensor_without_boundary_tokens, new_mask\n\n\ndef add_positional_features(tensor: torch.Tensor,\n min_timescale: float = 1.0,\n max_timescale: float = 1.0e4):\n # pylint: disable=line-too-long\n \"\"\"\n Implements the frequency-based positional encoding described\n in `Attention is all you Need\n <https://www.semanticscholar.org/paper/Attention-Is-All-You-Need-Vaswani-Shazeer/0737da0767d77606169cbf4187b83e1ab62f6077>`_ .\n\n Adds sinusoids of different frequencies to a ``Tensor``. A sinusoid of a\n different frequency and phase is added to each dimension of the input ``Tensor``.\n This allows the attention heads to use absolute and relative positions.\n\n The number of timescales is equal to hidden_dim / 2 within the range\n (min_timescale, max_timescale). For each timescale, the two sinusoidal\n signals sin(timestep / timescale) and cos(timestep / timescale) are\n generated and concatenated along the hidden_dim dimension.\n\n Parameters\n ----------\n tensor : ``torch.Tensor``\n a Tensor with shape (batch_size, timesteps, hidden_dim).\n min_timescale : ``float``, optional (default = 1.0)\n The smallest timescale to use.\n max_timescale : ``float``, optional (default = 1.0e4)\n The largest timescale to use.\n\n Returns\n -------\n The input tensor augmented with the sinusoidal frequencies.\n \"\"\"\n _, timesteps, hidden_dim = tensor.size()\n\n timestep_range = get_range_vector(timesteps, get_device_of(tensor)).data.float()\n # We're generating both cos and sin frequencies,\n # so half for each.\n num_timescales = hidden_dim // 2\n timescale_range = get_range_vector(num_timescales, get_device_of(tensor)).data.float()\n\n log_timescale_increments = math.log(float(max_timescale) / float(min_timescale)) / float(num_timescales - 1)\n inverse_timescales = min_timescale * torch.exp(timescale_range * -log_timescale_increments)\n\n # Broadcasted multiplication - shape (timesteps, num_timescales)\n scaled_time = timestep_range.unsqueeze(1) * inverse_timescales.unsqueeze(0)\n # shape (timesteps, 2 * num_timescales)\n sinusoids = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 1)\n if hidden_dim % 2 != 0:\n # if the number of dimensions is odd, the cos and sin\n # timescales had size (hidden_dim - 1) / 2, so we need\n # to add a row of zeros to make up the difference.\n sinusoids = torch.cat([sinusoids, sinusoids.new_zeros(timesteps, 1)], 1)\n return tensor + sinusoids.unsqueeze(0)\n" ]
[ [ "torch.sum", "torch.nn.functional.log_softmax", "torch.stack", "torch.cos", "torch.nn.functional.softmax", "torch.zeros_like", "torch.gather", "torch.exp", "torch.sin", "torch.arange", "torch.max", "torch.zeros", "torch.cuda.LongTensor", "torch.cat", "torch.matmul" ] ]
DylanHooz/uestc_yolov3
[ "72ed60aaf68a0ab2dbc8d4dfad7bddffce826dde" ]
[ "train.py" ]
[ "\"\"\"\r\nRetrain the YOLO model for your own dataset.\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport keras.backend as K\r\nfrom keras.layers import Input, Lambda\r\nfrom keras.models import Model\r\nfrom keras.optimizers import Adam\r\nfrom keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\r\n\r\nfrom yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss\r\nfrom yolo3.utils import get_random_data\r\n\r\n\r\ndef _main():\r\n annotation_path = '2007_trainval.txt'\r\n log_dir = 'logs/000/'\r\n classes_path = 'model_data/helmet_classes.txt'\r\n anchors_path = 'model_data/helmet_anchors.txt'\r\n class_names = get_classes(classes_path)\r\n num_classes = len(class_names)\r\n anchors = get_anchors(anchors_path)\r\n\r\n input_shape = (416,416) # multiple of 32, hw\r\n\r\n is_tiny_version = len(anchors)==6 # default setting\r\n if is_tiny_version:\r\n model = create_tiny_model(input_shape, anchors, num_classes,\r\n freeze_body=2, weights_path='model_data/tiny_yolo_weights.h5')\r\n else:\r\n model = create_model(input_shape, anchors, num_classes,\r\n freeze_body=2, weights_path='model_data/yolo_weights.h5') # make sure you know what you freeze\r\n\r\n logging = TensorBoard(log_dir=log_dir)\r\n checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',\r\n monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)\r\n reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)\r\n early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)\r\n\r\n val_split = 0.1\r\n with open(annotation_path) as f:\r\n lines = f.readlines()\r\n np.random.seed(10101)\r\n np.random.shuffle(lines)\r\n np.random.seed(None)\r\n num_val = int(len(lines)*val_split)\r\n num_train = len(lines) - num_val\r\n\r\n # Train with frozen layers first, to get a stable loss.\r\n # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.\r\n if True:\r\n model.compile(optimizer=Adam(lr=1e-3), loss={\r\n # use custom yolo_loss Lambda layer.\r\n 'yolo_loss': lambda y_true, y_pred: y_pred})\r\n\r\n batch_size = 32\r\n print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))\r\n model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),\r\n steps_per_epoch=max(1, num_train//batch_size),\r\n validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),\r\n validation_steps=max(1, num_val//batch_size),\r\n epochs=50,\r\n initial_epoch=0,\r\n callbacks=[logging, checkpoint])\r\n model.save_weights(log_dir + 'trained_weights_stage_1.h5')\r\n\r\n # Unfreeze and continue training, to fine-tune.\r\n # Train longer if the result is not good.\r\n if True:\r\n for i in range(len(model.layers)):\r\n model.layers[i].trainable = True\r\n model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change\r\n print('Unfreeze all of the layers.')\r\n\r\n batch_size = 16 # note that more GPU memory is required after unfreezing the body\r\n print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))\r\n model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),\r\n steps_per_epoch=max(1, num_train//batch_size),\r\n validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),\r\n validation_steps=max(1, num_val//batch_size),\r\n epochs=100,\r\n initial_epoch=50,\r\n callbacks=[logging, checkpoint, reduce_lr, early_stopping])\r\n model.save_weights(log_dir + 'trained_weights_final.h5')\r\n\r\n # Further training if needed.\r\n\r\n\r\ndef get_classes(classes_path):\r\n '''loads the classes'''\r\n with open(classes_path) as f:\r\n class_names = f.readlines()\r\n class_names = [c.strip() for c in class_names]\r\n return class_names\r\n\r\ndef get_anchors(anchors_path):\r\n '''loads the anchors from a file'''\r\n with open(anchors_path) as f:\r\n anchors = f.readline()\r\n anchors = [float(x) for x in anchors.split(',')]\r\n return np.array(anchors).reshape(-1, 2)\r\n\r\n\r\ndef create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,\r\n weights_path='model_data/yolo_weights.h5'):\r\n '''create the training model'''\r\n K.clear_session() # get a new session\r\n image_input = Input(shape=(None, None, 3))\r\n h, w = input_shape\r\n num_anchors = len(anchors)\r\n\r\n y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \\\r\n num_anchors//3, num_classes+5)) for l in range(3)]\r\n\r\n model_body = yolo_body(image_input, num_anchors//3, num_classes)\r\n print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))\r\n\r\n if load_pretrained:\r\n model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)\r\n print('Load weights {}.'.format(weights_path))\r\n if freeze_body in [1, 2]:\r\n # Freeze darknet53 body or freeze all but 3 output layers.\r\n num = (185, len(model_body.layers)-3)[freeze_body-1]\r\n for i in range(num): model_body.layers[i].trainable = False\r\n print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))\r\n\r\n model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',\r\n arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(\r\n [*model_body.output, *y_true])\r\n model = Model([model_body.input, *y_true], model_loss)\r\n\r\n return model\r\n\r\ndef create_tiny_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,\r\n weights_path='model_data/tiny_yolo_weights.h5'):\r\n '''create the training model, for Tiny YOLOv3'''\r\n K.clear_session() # get a new session\r\n image_input = Input(shape=(None, None, 3))\r\n h, w = input_shape\r\n num_anchors = len(anchors)\r\n\r\n y_true = [Input(shape=(h//{0:32, 1:16}[l], w//{0:32, 1:16}[l], \\\r\n num_anchors//2, num_classes+5)) for l in range(2)]\r\n\r\n model_body = tiny_yolo_body(image_input, num_anchors//2, num_classes)\r\n print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))\r\n\r\n if load_pretrained:\r\n model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)\r\n print('Load weights {}.'.format(weights_path))\r\n if freeze_body in [1, 2]:\r\n # Freeze the darknet body or freeze all but 2 output layers.\r\n num = (20, len(model_body.layers)-2)[freeze_body-1]\r\n for i in range(num): model_body.layers[i].trainable = False\r\n print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))\r\n\r\n model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',\r\n arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})(\r\n [*model_body.output, *y_true])\r\n model = Model([model_body.input, *y_true], model_loss)\r\n\r\n return model\r\n\r\ndef data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes):\r\n '''data generator for fit_generator'''\r\n n = len(annotation_lines)\r\n i = 0\r\n while True:\r\n image_data = []\r\n box_data = []\r\n for b in range(batch_size):\r\n if i==0:\r\n np.random.shuffle(annotation_lines)\r\n image, box = get_random_data(annotation_lines[i], input_shape, random=True)\r\n image_data.append(image)\r\n box_data.append(box)\r\n i = (i+1) % n\r\n image_data = np.array(image_data)\r\n box_data = np.array(box_data)\r\n y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)\r\n yield [image_data, *y_true], np.zeros(batch_size)\r\n\r\ndef data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes):\r\n n = len(annotation_lines)\r\n if n==0 or batch_size<=0: return None\r\n return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)\r\n\r\nif __name__ == '__main__':\r\n _main()\r\n\r\n" ]
[ [ "numpy.array", "numpy.random.shuffle", "numpy.random.seed", "numpy.zeros" ] ]
tyburam/python-machine-learning
[ "7cb346c99d24e959c1af63532603dd118558b16f" ]
[ "sigmoid.py" ]
[ "#!/usr/bin/python3\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef sigmoid(z):\n return 1.0 / (1.0 + np.exp(-z))\n\nz = np.arange(-7, 7, 0.01)\nphi_z = sigmoid(z)\n\nplt.plot(z, phi_z)\nplt.axvline(0.0, color = 'k')\nplt.axhspan(0.0, 1.0, facecolor = '1.0', alpha = 1.0, ls = 'dotted')\nplt.axhline(0.5, ls = 'dotted', color = 'k')\nplt.yticks([0.0, 0.5, 1.0])\nplt.ylim(-0.1, 1.1)\nplt.xlabel('z')\nplt.ylabel('$\\phi (z)$')\nplt.show()" ]
[ [ "matplotlib.pyplot.axvline", "matplotlib.pyplot.axhline", "numpy.exp", "numpy.arange", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "matplotlib.pyplot.yticks", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.axhspan" ] ]
kalyanvasudev/pyrobot
[ "839ab89a5b3cdd6af9b1e884fa8e8f0007497e32" ]
[ "src/pyrobot/locobot/camera.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nimport rospkg\nimport threading\nimport yaml\nfrom copy import deepcopy\n\nimport message_filters\nimport numpy as np\nimport pyrobot.util as prutil\nimport rospy\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom pyrobot.core import Camera\nfrom sensor_msgs.msg import CameraInfo\nfrom sensor_msgs.msg import Image\nfrom sensor_msgs.msg import JointState\nfrom std_msgs.msg import Float64\nfrom tf import TransformListener\n\n\ndef constrain_within_range(value, MIN, MAX):\n return min(max(value, MIN), MAX)\n\n\ndef is_within_range(value, MIN, MAX):\n return (value <= MAX) and (value >= MIN)\n\n\nclass SimpleCamera(Camera):\n \"\"\"\n This is camera class that interfaces with the Realsense\n camera on the locobot and locobot-lite.\n This class does not have the pan and tilt actuation\n capabilities for the camera.\n \"\"\"\n\n def __init__(self, configs):\n \"\"\"\n Constructor of the SimpleCamera class.\n\n :param configs: Camera specific configuration object\n\n :type configs: YACS CfgNode\n \"\"\"\n super(SimpleCamera, self).__init__(configs=configs)\n self.cv_bridge = CvBridge()\n self.camera_info_lock = threading.RLock()\n self.camera_img_lock = threading.RLock()\n self._tf_listener = TransformListener()\n self.rgb_img = None\n self.depth_img = None\n self.camera_info = None\n self.camera_P = None\n rospy.Subscriber(self.configs.CAMERA.ROSTOPIC_CAMERA_INFO_STREAM,\n CameraInfo,\n self._camera_info_callback)\n\n rgb_topic = self.configs.CAMERA.ROSTOPIC_CAMERA_RGB_STREAM\n self.rgb_sub = message_filters.Subscriber(rgb_topic, Image)\n depth_topic = self.configs.CAMERA.ROSTOPIC_CAMERA_DEPTH_STREAM\n self.depth_sub = message_filters.Subscriber(depth_topic, Image)\n img_subs = [self.rgb_sub, self.depth_sub]\n self.sync = message_filters.ApproximateTimeSynchronizer(img_subs,\n queue_size=10,\n slop=0.2)\n self.sync.registerCallback(self._sync_callback)\n depth_threshold = (self.configs.BASE.VSLAM.DEPTH_MIN,\n self.configs.BASE.VSLAM.DEPTH_MAX)\n cfg_filename = self.configs.BASE.VSLAM.CFG_FILENAME\n self.depth_cam = DepthImgProcessor(subsample_pixs=1,\n depth_threshold=depth_threshold,\n cfg_filename=cfg_filename)\n self.cam_cf = self.configs.BASE.VSLAM.RGB_CAMERA_CENTER_FRAME\n self.base_f = self.configs.BASE.VSLAM.VSLAM_BASE_FRAME\n\n def _sync_callback(self, rgb, depth):\n self.camera_img_lock.acquire()\n try:\n self.rgb_img = self.cv_bridge.imgmsg_to_cv2(rgb, \"bgr8\")\n self.rgb_img = self.rgb_img[:, :, ::-1]\n self.depth_img = self.cv_bridge.imgmsg_to_cv2(depth, \"passthrough\")\n except CvBridgeError as e:\n rospy.logerr(e)\n self.camera_img_lock.release()\n\n def _camera_info_callback(self, msg):\n self.camera_info_lock.acquire()\n self.camera_info = msg\n self.camera_P = np.array(msg.P).reshape((3, 4))\n self.camera_info_lock.release()\n\n def get_rgb(self):\n '''\n This function returns the RGB image perceived by the camera.\n\n :rtype: np.ndarray or None\n '''\n self.camera_img_lock.acquire()\n rgb = deepcopy(self.rgb_img)\n self.camera_img_lock.release()\n return rgb\n\n def get_depth(self):\n '''\n This function returns the depth image perceived by the camera.\n\n :rtype: np.ndarray or None\n '''\n self.camera_img_lock.acquire()\n depth = deepcopy(self.depth_img)\n self.camera_img_lock.release()\n return depth\n\n def get_rgb_depth(self):\n '''\n This function returns both the RGB and depth\n images perceived by the camera.\n\n :rtype: np.ndarray or None\n '''\n self.camera_img_lock.acquire()\n rgb = deepcopy(self.rgb_img)\n depth = deepcopy(self.depth_img)\n self.camera_img_lock.release()\n return rgb, depth\n\n def get_intrinsics(self):\n \"\"\"\n This function returns the camera intrinsics.\n\n :rtype: np.ndarray\n \"\"\"\n if self.camera_P is None:\n return self.camera_P\n self.camera_info_lock.acquire()\n P = deepcopy(self.camera_P)\n self.camera_info_lock.release()\n return P[:3, :3]\n\n def get_current_pcd(self, in_cam=True):\n \"\"\"\n Return the point cloud at current time step (one frame only)\n\n :param in_cam: return points in camera frame,\n otherwise, return points in base frame\n\n :type in_cam: bool\n :returns: tuple (pts, colors)\n\n pts: point coordinates in world frame (shape: :math:`[N, 3]`)\n\n colors: rgb values for pts_in_cam (shape: :math:`[N, 3]`)\n :rtype: tuple(np.ndarray, np.ndarray)\n \"\"\"\n trans, rot, T = self.get_link_transform(self.cam_cf,\n self.base_f)\n base2cam_trans = np.array(trans).reshape(-1, 1)\n base2cam_rot = np.array(rot)\n rgb_im, depth_im = self.get_rgb_depth()\n pcd_in_cam, colors = self.depth_cam.get_pcd_ic(depth_im=depth_im,\n rgb_im=rgb_im)\n pts = pcd_in_cam[:3, :].T\n if in_cam:\n return pts, colors\n pts = np.dot(pts, base2cam_rot.T)\n pts = pts + base2cam_trans.T\n return pts, colors\n\n def pix_to_3dpt(self, rs, cs, in_cam=False):\n \"\"\"\n Get the 3D points of the pixels in RGB images.\n\n :param rs: rows of interest in the RGB image.\n It can be a list or 1D numpy array\n which contains the row indices.\n The default value is None,\n which means all rows.\n :param cs: columns of interest in the RGB image.\n It can be a list or 1D numpy array\n which contains the column indices.\n The default value is None,\n which means all columns.\n :param in_cam: return points in camera frame,\n otherwise, return points in base frame\n\n :type rs: list or np.ndarray\n :type cs: list or np.ndarray\n :type in_cam: bool\n\n :returns: tuple (pts, colors)\n\n pts: point coordinates in world frame\n (shape: :math:`[N, 3]`)\n\n colors: rgb values for pts_in_cam\n (shape: :math:`[N, 3]`)\n\n :rtype: tuple(np.ndarray, np.ndarray)\n \"\"\"\n trans, rot, T = self.get_link_transform(self.cam_cf,\n self.base_f)\n base2cam_trans = np.array(trans).reshape(-1, 1)\n base2cam_rot = np.array(rot)\n rgb_im, depth_im = self.get_rgb_depth()\n pcd_in_cam = self.depth_cam.get_pix_3dpt(depth_im=depth_im,\n rs=rs,\n cs=cs)\n pts = pcd_in_cam[:3, :].T\n colors = rgb_im[rs, cs].reshape(-1, 3)\n if in_cam:\n return pts, colors\n pts = np.dot(pts, base2cam_rot.T)\n pts = pts + base2cam_trans.T\n return pts, colors\n\n def get_link_transform(self, src, tgt):\n \"\"\"\n Returns the latest transformation from the\n target_frame to the source frame,\n i.e., the transform of source frame w.r.t\n target frame. If the returned\n transform is applied to data, it will transform\n data in the source_frame into\n the target_frame\n\n For more information, please refer to\n http://wiki.ros.org/tf/Overview/Using%20Published%20Transforms\n\n :param src: source frame\n :param tgt: target frame\n :type src: string\n :type tgt: string\n\n :returns: tuple(trans, rot, T)\n\n trans: translational vector (shape: :math:`[3,]`)\n\n rot: rotation matrix (shape: :math:`[3, 3]`)\n\n T: transofrmation matrix (shape: :math:`[4, 4]`)\n :rtype: tuple(np.ndarray, np.ndarray, np.ndarray)\n \"\"\"\n trans, quat = prutil.get_tf_transform(self._tf_listener,\n tgt,\n src)\n rot = prutil.quat_to_rot_mat(quat)\n T = np.eye(4)\n T[:3, :3] = rot\n T[:3, 3] = trans\n return trans, rot, T\n\n\nclass LoCoBotCamera(SimpleCamera):\n \"\"\"\n This is camera class that interfaces with the Realsense\n camera and the pan and tilt joints on the robot.\n \"\"\"\n\n def __init__(self, configs):\n \"\"\"\n Constructor of the LoCoBotCamera class.\n\n :param configs: Object containing configurations for camera,\n pan joint and tilt joint.\n\n :type configs: YACS CfgNode\n \"\"\"\n use_camera = rospy.get_param('use_camera', False)\n use_sim = rospy.get_param('use_sim', False)\n use_camera = use_camera or use_sim\n if not use_camera:\n rospy.logwarn('Neither use_camera, nor use_sim, is not set'\n ' to True when the LoCoBot driver is launched.'\n 'You may not be able to command the camera'\n ' correctly using PyRobot!!!')\n return\n super(LoCoBotCamera, self).__init__(configs=configs)\n\n rospy.Subscriber(self.configs.ARM.ROSTOPIC_JOINT_STATES,\n JointState,\n self._camera_pose_callback)\n\n self.set_pan_pub = rospy.Publisher(\n self.configs.CAMERA.ROSTOPIC_SET_PAN, Float64, queue_size=1)\n self.set_tilt_pub = rospy.Publisher(\n self.configs.CAMERA.ROSTOPIC_SET_TILT, Float64, queue_size=1)\n self.pan = None\n self.tilt = None\n self.tol = 0.01\n\n def _camera_pose_callback(self, msg):\n if 'head_pan_joint' in msg.name:\n pan_id = msg.name.index('head_pan_joint')\n self.pan = msg.position[pan_id]\n if 'head_tilt_joint' in msg.name:\n tilt_id = msg.name.index('head_tilt_joint')\n self.tilt = msg.position[tilt_id]\n\n @property\n def state(self):\n \"\"\"\n Return the current pan and tilt joint angles of the robot camera.\n\n :return:\n pan_tilt: A list the form [pan angle, tilt angle]\n :rtype: list\n \"\"\"\n return self.get_state()\n\n def get_state(self):\n \"\"\"\n Return the current pan and tilt joint angles of the robot camera.\n\n :return:\n pan_tilt: A list the form [pan angle, tilt angle]\n :rtype: list\n \"\"\"\n return [self.pan, self.tilt]\n\n def get_pan(self):\n \"\"\"\n Return the current pan joint angle of the robot camera.\n\n :return:\n pan: Pan joint angle\n :rtype: float\n \"\"\"\n return self.pan\n\n def get_tilt(self):\n \"\"\"\n Return the current tilt joint angle of the robot camera.\n\n :return:\n tilt: Tilt joint angle\n :rtype: float\n \"\"\"\n return self.tilt\n\n def set_pan(self, pan, wait=True):\n \"\"\"\n Sets the pan joint angle to the specified value.\n\n :param pan: value to be set for pan joint\n :param wait: wait until the pan angle is set to\n the target angle.\n\n :type pan: float\n :type wait: bool\n \"\"\"\n pan = constrain_within_range(np.mod(pan + np.pi,\n 2 * np.pi) - np.pi,\n self.configs.CAMERA.MIN_PAN,\n self.configs.CAMERA.MAX_PAN)\n self.set_pan_pub.publish(pan)\n if wait:\n for i in range(30):\n rospy.sleep(0.1)\n if np.fabs(self.get_pan() - pan) < self.tol:\n break\n\n def set_tilt(self, tilt, wait=True):\n \"\"\"\n Sets the tilt joint angle to the specified value.\n\n :param tilt: value to be set for the tilt joint\n :param wait: wait until the tilt angle is set to\n the target angle.\n\n :type tilt: float\n :type wait: bool\n \"\"\"\n tilt = constrain_within_range(np.mod(tilt + np.pi,\n 2 * np.pi) - np.pi,\n self.configs.CAMERA.MIN_TILT,\n self.configs.CAMERA.MAX_TILT)\n self.set_tilt_pub.publish(tilt)\n if wait:\n for i in range(30):\n rospy.sleep(0.1)\n if np.fabs(self.get_tilt() - tilt) < self.tol:\n break\n\n def set_pan_tilt(self, pan, tilt, wait=True):\n \"\"\"\n Sets both the pan and tilt joint angles to the specified values.\n\n :param pan: value to be set for pan joint\n :param tilt: value to be set for the tilt joint\n :param wait: wait until the pan and tilt angles are set to\n the target angles.\n\n :type pan: float\n :type tilt: float\n :type wait: bool\n \"\"\"\n pan = constrain_within_range(np.mod(pan + np.pi,\n 2 * np.pi) - np.pi,\n self.configs.CAMERA.MIN_PAN,\n self.configs.CAMERA.MAX_PAN)\n tilt = constrain_within_range(np.mod(tilt + np.pi,\n 2 * np.pi) - np.pi,\n self.configs.CAMERA.MIN_TILT,\n self.configs.CAMERA.MAX_TILT)\n self.set_pan_pub.publish(pan)\n self.set_tilt_pub.publish(tilt)\n if wait:\n for i in range(30):\n rospy.sleep(0.1)\n if np.fabs(self.get_pan() - pan) < self.tol and \\\n np.fabs(self.get_tilt() - tilt) < self.tol:\n break\n\n def reset(self):\n \"\"\"\n This function resets the pan and tilt joints by actuating\n them to their home configuration.\n \"\"\"\n self.set_pan_tilt(self.configs.CAMERA.RESET_PAN,\n self.configs.CAMERA.RESET_TILT)\n\n\nclass DepthImgProcessor:\n \"\"\"\n This class transforms the depth image and rgb image to point cloud\n \"\"\"\n\n def __init__(self, subsample_pixs=1, depth_threshold=(0, 1.5),\n cfg_filename='realsense_d435.yaml'):\n \"\"\"\n The constructor for :class:`DepthImgProcessor` class.\n\n :param subsample_pixs: sample rows and columns for the images\n :param depth_threshold: minimum and maximum of valid depth values\n :param cfg_filename: configuration file name for ORB-SLAM2\n\n :type subsample_pixs: int\n :type depth_threshold: tuple\n :type cfg_filename: string\n \"\"\"\n assert (type(depth_threshold) is tuple and\n 0 < len(depth_threshold) < 3) or \\\n (depth_threshold is None)\n self.subsample_pixs = subsample_pixs\n self.depth_threshold = depth_threshold\n self.cfg_data = self.read_cfg(cfg_filename)\n self.intrinsic_mat = self.get_intrinsic()\n self.intrinsic_mat_inv = np.linalg.inv(self.intrinsic_mat)\n\n img_pixs = np.mgrid[0: self.cfg_data['Camera.height']: subsample_pixs,\n 0: self.cfg_data['Camera.width']: subsample_pixs]\n img_pixs = img_pixs.reshape(2, -1)\n img_pixs[[0, 1], :] = img_pixs[[1, 0], :]\n self.uv_one = np.concatenate((img_pixs,\n np.ones((1, img_pixs.shape[1]))))\n self.uv_one_in_cam = np.dot(self.intrinsic_mat_inv, self.uv_one)\n\n def get_pix_3dpt(self, depth_im, rs, cs):\n \"\"\"\n :param depth_im: depth image (shape: :math:`[H, W]`)\n :param rs: rows of interest. It can be a list or 1D numpy array\n which contains the row indices. The default value is None,\n which means all rows.\n :param cs: columns of interest. It can be a list or 1D numpy array\n which contains the column indices.\n The default value is None,\n which means all columns.\n :type depth_im: np.ndarray\n :type rs: list or np.ndarray\n :type cs: list or np.ndarray\n\n :return: 3D point coordinates of the pixels in\n camera frame (shape: :math:`[4, N]`)\n :rtype np.ndarray\n \"\"\"\n assert isinstance(rs,\n int) or isinstance(rs,\n list) or isinstance(rs,\n np.ndarray)\n assert isinstance(cs,\n int) or isinstance(cs,\n list) or isinstance(cs,\n np.ndarray)\n if isinstance(rs, int):\n rs = [rs]\n if isinstance(cs, int):\n cs = [cs]\n if isinstance(rs, np.ndarray):\n rs = rs.flatten()\n if isinstance(cs, np.ndarray):\n cs = cs.flatten()\n depth_im = depth_im[rs, cs]\n depth = depth_im.reshape(-1) / float(self.cfg_data['DepthMapFactor'])\n img_pixs = np.stack((rs, cs)).reshape(2, -1)\n img_pixs[[0, 1], :] = img_pixs[[1, 0], :]\n uv_one = np.concatenate((img_pixs,\n np.ones((1, img_pixs.shape[1]))))\n uv_one_in_cam = np.dot(self.intrinsic_mat_inv, uv_one)\n pts_in_cam = np.multiply(uv_one_in_cam, depth)\n pts_in_cam = np.concatenate((pts_in_cam,\n np.ones((1, pts_in_cam.shape[1]))),\n axis=0)\n return pts_in_cam\n\n def get_pcd_ic(self, depth_im, rgb_im=None):\n \"\"\"\n Returns the point cloud (filtered by minimum\n and maximum depth threshold)\n in camera's coordinate frame\n\n :param depth_im: depth image (shape: :math:`[H, W]`)\n :param rgb_im: rgb image (shape: :math:`[H, W, 3]`)\n\n :type depth_im: np.ndarray\n :type rgb_im: np.ndarray\n\n :returns: tuple (pts_in_cam, rgb_im)\n\n pts_in_cam: point coordinates in\n camera frame (shape: :math:`[4, N]`)\n\n rgb: rgb values for pts_in_cam (shape: :math:`[N, 3]`)\n :rtype tuple(np.ndarray, np.ndarray)\n \"\"\"\n # pcd in camera from depth\n depth_im = depth_im[0::self.subsample_pixs, 0::self.subsample_pixs]\n rgb_im = rgb_im[0::self.subsample_pixs, 0::self.subsample_pixs]\n depth = depth_im.reshape(-1) / float(self.cfg_data['DepthMapFactor'])\n rgb = None\n if rgb_im is not None:\n rgb = rgb_im.reshape(-1, 3)\n if self.depth_threshold is not None:\n valid = depth > self.depth_threshold[0]\n if len(self.depth_threshold) > 1:\n valid = np.logical_and(valid,\n depth < self.depth_threshold[1])\n uv_one_in_cam = self.uv_one_in_cam[:, valid]\n depth = depth[valid]\n rgb = rgb[valid]\n else:\n uv_one_in_cam = self.uv_one_in_cam\n pts_in_cam = np.multiply(uv_one_in_cam, depth)\n pts_in_cam = np.concatenate((pts_in_cam,\n np.ones((1, pts_in_cam.shape[1]))),\n axis=0)\n return pts_in_cam, rgb\n\n def get_pcd_iw(self, pts_in_cam, extrinsic_mat):\n \"\"\"\n Returns the point cloud in the world coordinate frame\n\n :param pts_in_cam: point coordinates in\n camera frame (shape: :math:`[4, N]`)\n :param extrinsic_mat: extrinsic matrix for\n the camera (shape: :math:`[4, 4]`)\n\n :type pts_in_cam: np.ndarray\n :type extrinsic_mat: np.ndarray\n\n :return: point coordinates in\n ORB-SLAM2's world frame (shape: :math:`[N, 3]`)\n :rtype: np.ndarray\n \"\"\"\n # pcd in world\n pts_in_world = np.dot(extrinsic_mat, pts_in_cam)\n pts_in_world = pts_in_world[:3, :].T\n return pts_in_world\n\n def read_cfg(self, cfg_filename):\n \"\"\"\n Reads the configuration file\n\n :param cfg_filename: configuration file name for ORB-SLAM2\n\n :type cfg_filename: string\n\n :return: configurations in the configuration file\n :rtype: dict\n \"\"\"\n rospack = rospkg.RosPack()\n slam_pkg_path = rospack.get_path('orb_slam2_ros')\n cfg_path = os.path.join(slam_pkg_path,\n 'cfg',\n cfg_filename)\n with open(cfg_path, 'r') as f:\n for i in range(1):\n f.readline()\n cfg_data = yaml.load(f)\n return cfg_data\n\n def get_intrinsic(self):\n \"\"\"\n Returns the instrinsic matrix of the camera\n\n :return: the intrinsic matrix (shape: :math:`[3, 3]`)\n :rtype: np.ndarray\n \"\"\"\n fx = self.cfg_data['Camera.fx']\n fy = self.cfg_data['Camera.fy']\n cx = self.cfg_data['Camera.cx']\n cy = self.cfg_data['Camera.cy']\n Itc = np.array([[fx, 0, cx],\n [0, fy, cy],\n [0, 0, 1]])\n return Itc\n" ]
[ [ "numpy.eye", "numpy.multiply", "numpy.ones", "numpy.linalg.inv", "numpy.stack", "numpy.logical_and", "numpy.mod", "numpy.array", "numpy.dot" ] ]
j-varun/enas
[ "1a19ccbd7c06168ae51e0de2986b30ea01cce070" ]
[ "enas/cifar10/data_utils.py" ]
[ "import os\nimport sys\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import _pickle as pickle\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n\ndef _read_data(data_path, train_files):\n \"\"\"Reads CIFAR-10 format data. Always returns NHWC format.\n\n Returns:\n images: np tensor of size [N, H, W, C]\n labels: np tensor of size [N]\n \"\"\"\n images, labels = [], []\n for file_name in train_files:\n print(file_name)\n full_name = os.path.join(data_path, file_name)\n with open(full_name, 'rb') as finp:\n data = pickle.load(finp, encoding='bytes')\n batch_images = data[b\"data\"].astype(np.float32) / 255.0\n batch_labels = np.array(data[b\"labels\"], dtype=np.int32)\n images.append(batch_images)\n labels.append(batch_labels)\n images = np.concatenate(images, axis=0)\n labels = np.concatenate(labels, axis=0)\n images = np.reshape(images, [-1, 3, 32, 32])\n images = np.transpose(images, [0, 2, 3, 1])\n\n return images, labels\n\ndef _read_fmnist_data(data_path):\n \"\"\"Reads Fashion-Mnist data. Returns NHWC format.\n\n Returns:\n images: np tensor of size [N, H, W, C]\n labels: np tensor of size [N]\n \"\"\"\n images, labels = {},{}\n data = input_data.read_data_sets(data_path)\n images[\"train\"] = data.train.images.reshape(-1, 1, 28, 28) / 255.0\n images[\"test\"] = data.test.images.reshape(-1, 1, 28, 28) / 255.0\n\n images[\"train\"] = np.transpose(images[\"train\"], [0, 2, 3, 1])\n images[\"test\"] = np.transpose(images[\"test\"], [0, 2, 3, 1])\n\n labels[\"train\"] = np.array(data.train.labels, dtype = np.int32)\n labels[\"test\"] = np.array(data.test.labels, dtype = np.int32)\n print(\"Read and processed data..\")\n print(labels[\"test\"])\n\n return images, labels\n\n\ndef valid_split_data(images, labels, num_valids=5000):\n if num_valids:\n images[\"valid\"] = images[\"train\"][-num_valids:]\n labels[\"valid\"] = labels[\"train\"][-num_valids:]\n\n images[\"train\"] = images[\"train\"][:-num_valids]\n labels[\"train\"] = labels[\"train\"][:-num_valids]\n else:\n images[\"valid\"], labels[\"valid\"] = None, None\n return images, labels\n\ndef read_data(data_path, num_valids=5000, dataset = \"cifar\"):\n print(\"-\" * 80)\n print(\"Reading data\")\n print(os.getcwd())\n\n images, labels = {}, {}\n if(dataset == \"fmnist\"):\n print(\"Fashion-Mnist\")\n images, labels = _read_fmnist_data(data_path)\n images, labels = valid_split_data(images, labels, num_valids)\n return images, labels\n\n if dataset == \"stacking\":\n images[\"path\"] = data_path\n return images, labels\n else:\n train_files = [\n \"data_batch_1\",\n \"data_batch_2\",\n \"data_batch_3\",\n \"data_batch_4\",\n \"data_batch_5\",\n ]\n test_file = [\n \"test_batch\",\n ]\n images[\"train\"], labels[\"train\"] = _read_data(data_path, train_files)\n\n images, labels = valid_split_data(images, labels, num_valids)\n\n images[\"test\"], labels[\"test\"] = _read_data(data_path, test_file)\n\n print(\"Prepropcess: [subtract mean], [divide std]\")\n mean = np.mean(images[\"train\"], axis=(0, 1, 2), keepdims=True)\n std = np.std(images[\"train\"], axis=(0, 1, 2), keepdims=True)\n\n print(\"mean: {}\".format(np.reshape(mean * 255.0, [-1])))\n print(\"std: {}\".format(np.reshape(std * 255.0, [-1])))\n\n images[\"train\"] = (images[\"train\"] - mean) / std\n if num_valids:\n images[\"valid\"] = (images[\"valid\"] - mean) / std\n images[\"test\"] = (images[\"test\"] - mean) / std\n\n return images, labels\n\n" ]
[ [ "numpy.transpose", "numpy.reshape", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "numpy.array", "numpy.std", "numpy.concatenate", "numpy.mean" ] ]
minhhoangbui/PICK-pytorch
[ "c74d2d1e5d1f8c7e837ea9776146bc84a7ecf30a" ]
[ "src/runner/trainer.py" ]
[ "# -*- coding: utf-8 -*-\n# @Author: Wenwen Yu\n# @Created Time: 7/12/2020 9:50 PM\n\nimport os\nimport numpy as np\nfrom numpy import inf\n\nimport torch\nimport torch.distributed as dist\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\nfrom src.utils import inf_loop\nfrom src.utils.metrics import MetricTracker, SpanBasedF1MetricTracker\nfrom torch.utils.tensorboard import SummaryWriter\n# from src.logger import TensorboardWriter\nfrom src.utils.utils import to_union\n\n\nclass Trainer:\n \"\"\"\n Trainer class\n \"\"\"\n\n def __init__(self, model, optimizer, config, data_loader, iob_labels_vocab_cls,\n valid_data_loader=None, lr_scheduler=None, max_len_step=None):\n \"\"\"\n :param model:\n :param optimizer:\n :param config:\n :param data_loader:\n :param iob_labels_vocab_cls\n :param valid_data_loader:\n :param lr_scheduler:\n :param max_len_step: controls number of batches(steps) in each epoch.\n \"\"\"\n self.config = config\n self.iob_labels_vocab_cls = iob_labels_vocab_cls\n self.distributed = config['distributed']\n if self.distributed:\n self.local_master = (config['local_rank'] == 0)\n self.global_master = (dist.get_rank() == 0)\n else:\n self.local_master = True\n self.global_master = True\n self.logger = config.get_logger('trainer', config['trainer']['log_verbosity']) if self.local_master else None\n\n # setup GPU device if available, move model into configured device\n self.device, self.device_ids = self._prepare_device(config['local_rank'], config['local_world_size'])\n self.model = model.to(self.device)\n\n self.optimizer = optimizer\n\n cfg_trainer = config['trainer']\n self.epochs = cfg_trainer['epochs']\n self.save_period = cfg_trainer['save_period']\n monitor_open = cfg_trainer['monitor_open']\n if monitor_open:\n self.monitor = cfg_trainer.get('monitor', 'off')\n else:\n self.monitor = 'off'\n\n # configuration to monitor model performance and save best\n if self.monitor == 'off':\n self.monitor_mode = 'off'\n self.monitor_best = 0\n else:\n self.monitor_mode, self.monitor_metric = self.monitor.split()\n assert self.monitor_mode in ['min', 'max']\n\n self.monitor_best = inf if self.monitor_mode == 'min' else -inf\n self.early_stop = cfg_trainer.get('early_stop', inf)\n self.early_stop = inf if self.early_stop == -1 else self.early_stop\n\n self.start_epoch = 1\n\n if self.local_master:\n self.checkpoint_dir = config.save_dir\n # setup visualization writer instance\n # self.writer = TensorboardWriter(config.log_dir, self.logger, cfg_trainer['tensorboard'])\n self.writer = SummaryWriter(config.tensorboard_dir)\n # load checkpoint for resume training\n if config.resume is not None:\n self._resume_checkpoint(config.resume)\n\n # load checkpoint following load to multi-gpu, avoid 'module.' prefix\n if self.config['trainer']['sync_batch_norm'] and self.distributed:\n self.model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.model)\n\n if self.distributed:\n self.model = DDP(self.model, device_ids=self.device_ids, output_device=self.device_ids[0],\n find_unused_parameters=True)\n\n self.data_loader = data_loader\n if max_len_step is None: # max length of iteration step of every epoch\n # epoch-based training\n self.len_step = len(self.data_loader)\n else:\n # iteration-based training\n self.data_loader = inf_loop(data_loader)\n self.len_step = max_len_step\n self.valid_data_loader = valid_data_loader\n self.do_validation = self.valid_data_loader is not None\n self.lr_scheduler = lr_scheduler\n\n log_step = self.config['trainer']['log_step_interval']\n self.log_step = log_step if log_step != -1 and 0 < log_step < self.len_step else int(\n np.sqrt(data_loader.batch_size))\n\n self.val_epoch_interval = self.config['trainer']['val_epoch_interval']\n\n self.gl_loss_lambda = self.config['trainer']['gl_loss_lambda']\n\n self.train_loss_metrics = MetricTracker('loss', 'gl_loss', 'crf_loss',\n writer=self.writer if self.local_master else None)\n self.valid_f1_metrics = SpanBasedF1MetricTracker(iob_labels_vocab_cls)\n\n def train(self):\n \"\"\"\n Full training logic, including train and validation.\n \"\"\"\n\n if self.distributed:\n dist.barrier() # Syncing machines before training\n\n not_improved_count = 0\n val_result_dict = None\n if self.config['evaluate_only']:\n print(\"------Evaluation only------\")\n val_result_dict = self._valid_epoch(0)\n val_res = SpanBasedF1MetricTracker.dict2str(val_result_dict)\n\n self.logger_info('[Step Validation] Epoch:[{}/{}]] \\n{}'.\n format(0, self.epochs, val_res))\n return\n for epoch in range(self.start_epoch, self.epochs + 1):\n\n # ensure distribute worker sample different data,\n # set different random seed by passing epoch to sampler\n if self.distributed:\n self.data_loader.sampler.set_epoch(epoch)\n result_dict = self._train_epoch(epoch)\n\n # print logged information to the screen\n if self.do_validation:\n val_result_dict = result_dict['val_result_dict']\n val_res = SpanBasedF1MetricTracker.dict2str(val_result_dict)\n else:\n val_res = ''\n # every epoch log information\n self.logger_info('[Epoch Validation] Epoch:[{}/{}] Total Loss: {:.6f} '\n 'GL_Loss: {:.6f} CRF_Loss: {:.6f} \\n{}'.\n format(epoch, self.epochs, result_dict['loss'],\n result_dict['gl_loss'] * self.gl_loss_lambda,\n result_dict['crf_loss'], val_res))\n\n # evaluate model performance according to configured metric, check early stop, and\n # save best checkpoint as model_best\n best = False\n if self.monitor_mode != 'off' and self.do_validation:\n best, not_improved_count = self._is_best_monitor_metric(best, not_improved_count, val_result_dict)\n if not_improved_count > self.early_stop:\n self.logger_info(\"Validation performance didn't improve for {} epochs. \"\n \"Training stops.\".format(self.early_stop))\n break\n\n if epoch % self.save_period == 0:\n self._save_checkpoint(epoch, save_best=best)\n\n def _is_best_monitor_metric(self, best, not_improved_count, val_result_dict):\n \"\"\"\n monitor metric\n :param best:\n :param not_improved_count:\n :param val_result_dict:\n :return:\n \"\"\"\n entity_name, metric = self.monitor_metric.split('-')\n val_monitor_metric_res = val_result_dict[entity_name][metric]\n try:\n # check whether model performance improved or not, according to specified metric(monitor_metric)\n improved = (self.monitor_mode == 'min' and val_monitor_metric_res <= self.monitor_best) or \\\n (self.monitor_mode == 'max' and val_monitor_metric_res >= self.monitor_best)\n except KeyError:\n self.logger_warning(\"Warning: Metric '{}' is not found. \"\n \"Model performance monitoring is disabled.\".format(self.monitor_metric))\n self.monitor_mode = 'off'\n improved = False\n if improved:\n self.monitor_best = val_monitor_metric_res\n not_improved_count = 0\n best = True\n else:\n not_improved_count += 1\n return best, not_improved_count\n\n def _train_epoch(self, epoch):\n \"\"\"\n Training logic for an epoch\n :param epoch: Integer, current training epoch.\n :return: A log dict that contains average loss and metric in this epoch.\n \"\"\"\n self.model.train()\n self.train_loss_metrics.reset()\n # step iteration start ##\n for step_idx, input_data_item in enumerate(self.data_loader):\n step_idx += 1\n\n for key, input_value in input_data_item.items():\n if input_value is not None and isinstance(input_value, torch.Tensor):\n input_data_item[key] = input_value.to(self.device, non_blocking=True)\n if self.config['trainer']['anomaly_detection']:\n # This mode will increase the runtime and should only be enabled for debugging\n with torch.autograd.detect_anomaly():\n self.optimizer.zero_grad()\n # model forward\n output = self.model(**input_data_item)\n # calculate loss\n gl_loss = output['gl_loss']\n crf_loss = output['crf_loss']\n total_loss = torch.sum(crf_loss) + self.gl_loss_lambda * torch.sum(gl_loss)\n # backward\n total_loss.backward()\n # self.average_gradients(self.model)\n self.optimizer.step()\n else:\n self.optimizer.zero_grad()\n # model forward\n output = self.model(**input_data_item)\n # calculate loss\n gl_loss = output['gl_loss']\n crf_loss = output['crf_loss']\n total_loss = torch.sum(crf_loss) + self.gl_loss_lambda * torch.sum(gl_loss)\n # backward\n total_loss.backward()\n # self.average_gradients(self.model)\n self.optimizer.step()\n\n # Use a barrier() to make sure that all process have finished forward and backward\n if self.distributed:\n dist.barrier()\n # obtain the sum of all total_loss at all processes\n dist.all_reduce(total_loss, op=dist.reduce_op.SUM)\n\n size = dist.get_world_size()\n else:\n size = 1\n gl_loss /= size # averages gl_loss across the whole world\n crf_loss /= size # averages crf_loss across the whole world\n\n # calculate average loss across the batch size\n avg_gl_loss = torch.mean(gl_loss)\n avg_crf_loss = torch.mean(crf_loss)\n avg_loss = avg_crf_loss + self.gl_loss_lambda * avg_gl_loss\n # update metrics\n # self.writer.set_step((epoch - 1) * self.len_step + step_idx - 1) if self.local_master else None\n self.train_loss_metrics.update('loss', avg_loss.item(), epoch)\n self.train_loss_metrics.update('gl_loss', avg_gl_loss.item() * self.gl_loss_lambda, epoch)\n self.train_loss_metrics.update('crf_loss', avg_crf_loss.item(), epoch)\n\n # log messages\n if step_idx % self.log_step == 0:\n self.logger_info('Train Epoch:[{}/{}] Step:[{}/{}] Total Loss: {:.6f} GL_Loss: {:.6f} CRF_Loss: {:.6f}'.\n format(epoch, self.epochs, step_idx, self.len_step,\n avg_loss.item(), avg_gl_loss.item() * self.gl_loss_lambda, avg_crf_loss.item()))\n\n # decide whether continue iter\n if step_idx == self.len_step + 1:\n break\n\n # step iteration end ##\n\n # do validation after val_step_interval iteration\n if self.do_validation and epoch % self.val_epoch_interval == 0:\n val_result_dict = self._valid_epoch(epoch)\n self.logger_info('[Step Validation] Epoch:[{}/{}]] \\n{}'.\n format(epoch, self.epochs, self.len_step,\n SpanBasedF1MetricTracker.dict2str(val_result_dict)))\n\n # check if best metric, if true, then save as model_best checkpoint.\n best, not_improved_count = self._is_best_monitor_metric(False, 0, val_result_dict)\n if best:\n self._save_checkpoint(epoch, best)\n\n # {'loss': avg_loss, 'gl_loss': avg_gl_loss, 'crf_loss': avg_crf_loss}\n log = self.train_loss_metrics.result()\n\n # do validation after training an epoch\n if self.do_validation:\n val_result_dict = self._valid_epoch(epoch)\n log['val_result_dict'] = val_result_dict\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n self.model.train()\n\n return log\n\n def _valid_epoch(self, epoch):\n \"\"\"\n Validate after training an epoch or regular step, this is a time-consuming procedure if validation data is big.\n :param epoch: Integer, current training epoch.\n :return: A dict that contains information about validation\n \"\"\"\n\n self.model.eval()\n self.valid_f1_metrics.reset()\n with torch.no_grad():\n for step_idx, input_data_item in enumerate(self.valid_data_loader):\n for key, input_value in input_data_item.items():\n if input_value is not None and isinstance(input_value, torch.Tensor):\n input_data_item[key] = input_value.to(self.device, non_blocking=True)\n\n output = self.model(**input_data_item)\n logits = output['logits']\n new_mask = output['new_mask']\n if hasattr(self.model, 'module'):\n # List[(List[int], torch.Tensor)] contain the tag indices of the maximum likelihood tag sequence.\n # and the score of the viterbi path.\n best_paths = self.model.module.decoder.crf_layer.viterbi_tags(logits, mask=new_mask,\n logits_batch_first=True)\n else:\n best_paths = self.model.decoder.crf_layer.viterbi_tags(logits, mask=new_mask,\n logits_batch_first=True)\n\n predicted_tags = []\n for path, score in best_paths:\n predicted_tags.append(path)\n\n # self.writer.set_step((epoch - 1) * len(self.valid_data_loader) + step_idx, 'valid') \\\n # if self.local_master else None\n\n # calculate and update f1 metrics\n # (B, N*T, out_dim)\n predicted_tags_hard_prob = logits * 0\n for i, instance_tags in enumerate(predicted_tags):\n for j, tag_id in enumerate(instance_tags):\n predicted_tags_hard_prob[i, j, tag_id] = 1\n\n golden_tags = input_data_item['iob_tags_label']\n mask = input_data_item['mask']\n\n union_iob_tags = to_union(golden_tags, mask, self.iob_labels_vocab_cls)\n\n if self.distributed:\n dist.barrier() #\n self.valid_f1_metrics.update(predicted_tags_hard_prob.long(), union_iob_tags, new_mask)\n\n # add histogram of model parameters to the tensorboard\n # for name, p in self.model.named_parameters():\n # self.writer.add_histogram(name, p, bins='auto')\n\n f1_result_dict = self.valid_f1_metrics.result()\n\n overall_dict = f1_result_dict['overall']\n if self.local_master:\n for key, value in overall_dict.items():\n self.writer.add_scalar(key, value, epoch)\n\n return f1_result_dict\n\n @staticmethod\n def average_gradients(model):\n \"\"\"\n Gradient averaging\n :param model:\n :return:\n \"\"\"\n size = float(dist.get_world_size())\n for param in model.parameters():\n dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM)\n param.grad.data /= size\n\n def logger_info(self, msg):\n self.logger.info(msg) if self.local_master else None\n\n def logger_warning(self, msg):\n self.logger.warning(msg) if self.local_master else None\n\n def _prepare_device(self, local_rank, local_world_size):\n \"\"\"\n setup GPU device if available, move model into configured device\n :param local_rank:\n :param local_world_size:\n :return:\n \"\"\"\n if self.distributed:\n n_gpu_per_process = torch.cuda.device_count() // local_world_size\n device_ids = list(range(local_rank * n_gpu_per_process, (local_rank + 1) * n_gpu_per_process))\n\n if torch.cuda.is_available() and local_rank != -1:\n torch.cuda.set_device(device_ids[0]) # device_ids[0] =local_rank if local_world_size = n_gpu per node\n device = 'cuda'\n self.logger_info(\n f\"[Process {os.getpid()}] world_size = {dist.get_world_size()}, \"\n + f\"rank = {dist.get_rank()}, n_gpu/process = {n_gpu_per_process}, device_ids = {device_ids}\"\n )\n else:\n self.logger_warning('Training will be using CPU!')\n device = 'cpu'\n device = torch.device(device)\n return device, device_ids\n else:\n n_gpu = torch.cuda.device_count()\n n_gpu_use = local_world_size\n if n_gpu_use > 0 and n_gpu == 0:\n self.logger_warning(\"Warning: There\\'s no GPU available on this machine,\"\n \"training will be performed on CPU.\")\n n_gpu_use = 0\n if n_gpu_use > n_gpu:\n self.logger_warning(\"Warning: The number of GPU\\'s configured to use is {}, but only {} are available \"\n \"on this machine.\".format(n_gpu_use, n_gpu))\n n_gpu_use = n_gpu\n\n list_ids = list(range(n_gpu_use))\n if n_gpu_use > 0:\n torch.cuda.set_device(list_ids[0]) # only use first available gpu as devices\n self.logger_warning(f'Training is using GPU {list_ids[0]}!')\n device = 'cuda'\n else:\n self.logger_warning('Training is using CPU!')\n device = 'cpu'\n device = torch.device(device)\n return device, list_ids\n\n def _save_checkpoint(self, epoch, save_best=False):\n \"\"\"\n Saving checkpoints\n :param epoch: current epoch number\n :param save_best: if True, rename the saved checkpoint to 'model_best.pth'\n :return:\n \"\"\"\n # only local master process do save model\n if not self.local_master:\n return\n\n if hasattr(self.model, 'module'):\n arch = type(self.model.module).__name__\n state_dict = self.model.module.state_dict()\n else:\n arch = type(self.model).__name__\n state_dict = self.model.state_dict()\n state = {\n 'arch': arch,\n 'epoch': epoch,\n 'state_dict': state_dict,\n 'optimizer': self.optimizer.state_dict(),\n 'monitor_best': self.monitor_best,\n 'config': self.config\n }\n if save_best:\n best_path = str(self.checkpoint_dir / 'model_best.pth')\n torch.save(state, best_path)\n self.logger_info(\"Saving current best: model_best.pth ...\")\n else:\n filename = str(self.checkpoint_dir / 'checkpoint-epoch{}.pth'.format(epoch))\n torch.save(state, filename)\n self.logger_info(\"Saving checkpoint: {} ...\".format(filename))\n\n def _resume_checkpoint(self, resume_path):\n \"\"\"\n Resume from saved checkpoints\n :param resume_path: Checkpoint path to be resumed\n :return:\n \"\"\"\n resume_path = str(resume_path)\n self.logger_info(\"Loading checkpoint: {} ...\".format(resume_path))\n # map_location = {'cuda:%d' % 0: 'cuda:%d' % self.config['local_rank']}\n checkpoint = torch.load(resume_path, map_location=self.device)\n self.start_epoch = checkpoint['epoch'] + 1\n self.monitor_best = checkpoint['monitor_best']\n\n # load architecture params from checkpoint.\n if checkpoint['config']['model_arch'] != self.config['model_arch']:\n self.logger_warning(\"Warning: Architecture configuration given in config file is different from that of \"\n \"checkpoint. This may yield an exception while state_dict is being loaded.\")\n self.model.load_state_dict(checkpoint['state_dict'])\n\n # load optimizer state from checkpoint only when optimizer type is not changed.\n if checkpoint['config']['optimizer']['type'] != self.config['optimizer']['type']:\n self.logger_warning(\"Warning: Optimizer type given in config file is different from that of checkpoint. \"\n \"Optimizer parameters not being resumed.\")\n else:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n self.logger_info(\"Checkpoint loaded. Resume training from epoch {}\".format(self.start_epoch))\n" ]
[ [ "numpy.sqrt", "torch.sum", "torch.nn.SyncBatchNorm.convert_sync_batchnorm", "torch.load", "torch.distributed.get_world_size", "torch.distributed.get_rank", "torch.autograd.detect_anomaly", "torch.no_grad", "torch.save", "torch.cuda.device_count", "torch.distributed.barrier", "torch.nn.parallel.DistributedDataParallel", "torch.cuda.is_available", "torch.distributed.all_reduce", "torch.utils.tensorboard.SummaryWriter", "torch.device", "torch.mean", "torch.cuda.set_device" ] ]
Toulik1729231/WebScraping1-Using-Python
[ "42562c66c905f925ea0848b8ae7dfbca6b5a1afd" ]
[ "scrap_players.py" ]
[ "import requests\r\nfrom bs4 import BeautifulSoup\r\nfrom logger_impl import *\r\nimport MongoDao\r\nimport pandas as pd\r\nimport time\r\n\r\npayload = {'key': 'ac9e8cf2dec81949d9ee1235ed6ae3fb', 'url':\r\n'https://httpbin.org/ip'}\r\n\r\n\r\n\r\ndef scrapData(scorecardSoup, matchId, matchDesc, matchTypeText, pageUrl, season, Date, venue):\r\n\r\n #pageUrl = \"http://www.espncricinfo.com/series/11422/scorecard/858491/bangladesh-vs-pakistan-only-t20i-pakistan-tour-of-bangladesh-2015\"\r\n try:\r\n \"\"\"page = urllib.request.urlopen(pageUrl)\r\n\r\n ## get match-id and match-name from url\r\n pageUrlArr = pageUrl.split('/')\r\n matchId = pageUrlArr[len(pageUrlArr ) - 2]\r\n matchDesc = pageUrlArr[len(pageUrlArr ) - 1] \"\"\"\r\n #soup = BeautifulSoup(page, 'html.parser')\r\n soup = scorecardSoup\r\n\r\n #print(\"page html: \", soup.prettify())\r\n scorecardDiv = soup.find_all('article', class_='sub-module scorecard')\r\n playerBatsmanDict = {}\r\n playerBowlerDict = {}\r\n batsmanScorecardParam = ['run_scored', 'balls_faced','M', '4s', '6s', 'strike_rate']\r\n bowlerScorecardParam = ['O', 'M', 'R', 'W', 'Econ', 'WD', 'NB']\r\n\r\n teamList = []\r\n teamIDList = []\r\n inningsTeam = []\r\n## print(len(scorecardDiv))\r\n #creating playing team list\r\n for scorecardVal in scorecardDiv:\r\n #print(scorecardVal)\r\n team = scorecardVal.find('h2').get_text()\r\n if matchTypeText == 'Tests':\r\n team = str(team).replace('1st Innings', '').replace('2nd Innings', '')\r\n else:\r\n team = str(team).replace('Innings', '')\r\n if team.strip() in teamList:\r\n break\r\n teamList.append(team.strip())\r\n count = {teamList[0]:0,teamList[1]:0}\r\n\r\n for team in teamList:\r\n word = team.split(' ')\r\n if len(word) == 1:\r\n id_ = team[:3]\r\n teamIDList.append(id_)\r\n else:\r\n id_ = ''\r\n for x in word:\r\n id_ = id_ + x[0]\r\n teamIDList.append(id_)\r\n\r\n for scorecardVal in scorecardDiv:\r\n team = scorecardVal.find('h2').get_text()\r\n inn = ''\r\n if matchTypeText == 'Tests':\r\n inn = ' '.join(str(team).split(' ')[-2:])\r\n team = str(team).replace('1st Innings', '').replace('2nd Innings', '')\r\n else:\r\n team = str(team).replace('Innings', '')\r\n team = team.strip()\r\n count[team] += 1\r\n## print(count)\r\n logger.info(\"team: \" + team)\r\n #print(\"batsman div: \", scorecardVal)\r\n batsmanList = scorecardVal.find_all('div', class_='wrap batsmen')\r\n batsmanListNotBatted = scorecardVal.find('div', class_='wrap dnb').find_all('a')\r\n## for bt in batsmanListNotBatted:\r\n## print(bt.get('href'))\r\n## print(bt.get_text())\r\n for batsman in batsmanList:\r\n batsmanDict = {}\r\n #print(\"batsman data: \", batsman)\r\n batsmanAnchor = batsman.find('div', class_=\"cell batsmen\").find('a')\r\n batsmanLink = batsmanAnchor.get('href')\r\n batsmanName = batsmanAnchor.get_text()\r\n\r\n batsmanLinkArr = str(batsmanLink).split('/')\r\n cricInfoBatsmanId = batsmanLinkArr[len(batsmanLinkArr) - 1]\r\n cricInfoBatsmanId = str(cricInfoBatsmanId).replace('.html', '')\r\n #print(\"batsman Name: \", batsmanName, \" batsmanId: \", cricInfoBatsmanId)\r\n batsmanDict['short_name'] = batsmanName\r\n batsmanDict['player_cric_info_link'] = batsmanLink\r\n batsmanDict['team'] = team\r\n \r\n\r\n #print(\"batsmanDiv: \", batsmanDiv.get_text())\r\n try:\r\n commentry = batsman.find('div', class_=\"cell commentary\").find('a').get_text()\r\n batsmanDict['commentry'] = commentry\r\n except AttributeError as ae:\r\n batsmanDict['commentry'] = ''\r\n\r\n #print(\"batsman commentry: \", commentry)\r\n #print(\"commentryDiv: \", commentryDiv.get_text())\r\n batsmanStatsList = batsman.find_all('div', class_=\"cell runs\")\r\n ctr = 0\r\n tempList = []\r\n for batsmanStats in batsmanStatsList:\r\n #print(\"anchor: \", batsmanStats.get_text())\r\n #param = batsmanScorecardParam[ctr]\r\n #ctr += 1\r\n #batsmanDict[param] = batsmanStats.get_text()\r\n tempList.append(batsmanStats.get_text())\r\n \r\n if len(tempList) == 6:\r\n batsmanDict['run_scored'] = tempList[0]\r\n batsmanDict['balls_faced'] = tempList[1]\r\n batsmanDict['M'] = tempList[2]\r\n batsmanDict['4s'] = tempList[3]\r\n batsmanDict['6s'] = tempList[4]\r\n batsmanDict['strike_rate'] = tempList[5]\r\n else:\r\n batsmanDict['run_scored'] = tempList[0]\r\n batsmanDict['balls_faced'] = tempList[1]\r\n batsmanDict['M'] = '-'\r\n batsmanDict['4s'] = tempList[2]\r\n batsmanDict['6s'] = tempList[3]\r\n batsmanDict['strike_rate'] = tempList[4]\r\n\r\n \r\n \r\n batsmanDict['innings'] = inn\r\n key = cricInfoBatsmanId# + \"_\" + team\r\n if matchTypeText == 'Tests':\r\n key = key + inn[0]\r\n playerBatsmanDict[key] = batsmanDict\r\n \r\n #break\r\n## print(batsmanListNotBatted)\r\n\r\n for batsmen in batsmanListNotBatted:\r\n batsmanDict={}\r\n batsmanLink = batsmen.get('href')\r\n batsmanName = batsmen.get_text()\r\n batsmanLinkArr = str(batsmanLink).split('/')\r\n cricInfoBatsmanId = batsmanLinkArr[len(batsmanLinkArr) - 1]\r\n cricInfoBatsmanId = str(cricInfoBatsmanId).replace('.html', '')\r\n batsmanDict['short_name'] = batsmanName\r\n batsmanDict['player_cric_info_link'] = batsmanLink\r\n batsmanDict['team'] = team\r\n batsmanDict['run_scored'] = '-'\r\n batsmanDict['balls_faced'] = '-'\r\n batsmanDict['M'] = '-'\r\n batsmanDict['4s'] = '-'\r\n batsmanDict['6s'] = '-'\r\n batsmanDict['strike_rate'] = '-'\r\n batsmanDict['innings'] = inn\r\n key = cricInfoBatsmanId# + \"_\" + team\r\n #print('id : ',cricInfoBatsmanId)\r\n #print('key : ',key)\r\n #print(batsmanDict)\r\n if matchTypeText == 'Tests':\r\n key = key+inn[0]\r\n playerBatsmanDict[key] = batsmanDict\r\n #print('Dict added : ',playerBatsmanDict[key])\r\n\r\n bowlersTR = scorecardVal.find('tbody').find_all('tr')\r\n #print(\"bowler section: \", bowlersTR)\r\n for bowlerRow in bowlersTR:\r\n bowlersTD = bowlerRow.find_all('td')\r\n bowlerAnchor = bowlersTD[0].find('a')\r\n bowlerLink = bowlerAnchor.get('href')\r\n bowlerName = bowlerAnchor.get_text()\r\n #print(\"bowler name: \", bowlerName, \" link: \", bowlerLink)\r\n bowlerLinkArr = str(bowlerLink).split('/')\r\n cricInfoBowlerId = bowlerLinkArr[len(bowlerLinkArr) - 1]\r\n cricInfoBowlerId = str(cricInfoBowlerId).replace('.html', '')\r\n logger.info(\"bowlersTD: \" + str(bowlersTD))\r\n logger.info(\"length bowlersTD: \" + str(len(bowlersTD)))\r\n if len(bowlersTD) == 13:\r\n overs = bowlersTD[2].find(text=True)\r\n maidens = bowlersTD[3].find(text=True)\r\n runs = bowlersTD[4].find(text=True)\r\n wickets = bowlersTD[5].find(text=True)\r\n economy = bowlersTD[6].find(text=True)\r\n dotBalls = bowlersTD[7].find(text=True)\r\n ballerFours = bowlersTD[8].find(text=True)\r\n ballerSixes = bowlersTD[9].find(text=True)\r\n wideBalls = bowlersTD[10].find(text=True)\r\n noBalls = bowlersTD[11].find(text=True)\r\n \r\n else:\r\n overs = bowlersTD[2].find(text=True)\r\n maidens = bowlersTD[3].find(text=True)\r\n runs = bowlersTD[4].find(text=True)\r\n wickets = bowlersTD[5].find(text=True)\r\n economy = bowlersTD[6].find(text=True)\r\n dotBalls = 0\r\n ballerFours = 0\r\n ballerSixes = 0\r\n wideBalls = bowlersTD[7].find(text=True)\r\n noBalls = bowlersTD[8].find(text=True)\r\n \r\n## print('o'+overs)\r\n## print(maidens)\r\n## print(runs)\r\n## print(wickets)\r\n## print(economy)\r\n## print(dotBalls)\r\n## print(ballerFours)\r\n## print(ballerSixes)\r\n## print(wideBalls)\r\n## print(noBalls) \r\n \r\n \r\n #['O', 'M', 'R', 'W', 'Econ', 'WD', 'NB']\r\n bowlerDict = {}\r\n bowlerDict['short_name'] = bowlerName\r\n bowlerDict['player_cric_info_link'] = bowlerLink\r\n if '.' in overs:\r\n oversArr = overs.split('.')\r\n totalBalls: int = int(oversArr[0]) * 6\r\n totalBalls += int(oversArr[1])\r\n else:\r\n totalBalls: int = int(overs) * 6\r\n\r\n # getting the bowling team name\r\n if team == teamList[0]:\r\n bowlingTeam = teamList[1]\r\n else:\r\n bowlingTeam = teamList[0]\r\n\r\n bowlerDict['team'] = bowlingTeam\r\n bowlerDict['balls_bowled'] = totalBalls\r\n bowlerDict['maiden_overs'] = maidens\r\n bowlerDict['runs_given'] = runs\r\n bowlerDict['wicket'] = wickets\r\n bowlerDict['econ'] = economy\r\n bowlerDict['dot_delivery'] = dotBalls\r\n bowlerDict['four_delivery'] = ballerFours\r\n bowlerDict['six_delivery'] = ballerSixes\r\n bowlerDict['wide_balls'] = wideBalls\r\n bowlerDict['no_balls'] = noBalls\r\n bowlerDict['innings'] = inn\r\n #print(overs, maidens, runs, wickets, economy, wideBalls, noBalls)\r\n key = cricInfoBowlerId# + \"_\" + team\r\n if matchTypeText == 'Tests':\r\n key = key+inn[0]\r\n playerBowlerDict[key] = bowlerDict\r\n\r\n #print(\"batsmanDict: \", playerBatsmanDict)\r\n #print(\"bowlerDict: \", playerBowlerDict)\r\n\r\n if matchTypeText == 'Tests' and ((count[teamList[0]] == 2 and count[teamList[1]] == 1) or (count[teamList[0]] == 1 and count[teamList[1]] == 2)):\r\n # if \r\n missing = ''\r\n if count[teamList[0]] == 1:\r\n missing = teamList[0]\r\n elif count[teamList[1]] == 1:\r\n missing = teamList[1]\r\n\r\n for scorecardVal in scorecardDiv:\r\n team = scorecardVal.find('h2').get_text()\r\n inn = ' '.join(str(team).split(' ')[-2:])\r\n team = str(team).replace('1st Innings', '').replace('2nd Innings', '')\r\n team = team.strip()\r\n if team == missing:\r\n batsmanList = scorecardVal.find_all('div', class_='wrap batsmen')\r\n batsmanListNotBatted = scorecardVal.find('div', class_='wrap dnb').find_all('a')\r\n for batsman in batsmanList:\r\n batsmanDict = {}\r\n batsmanAnchor = batsman.find('div', class_=\"cell batsmen\").find('a')\r\n batsmanLink = batsmanAnchor.get('href')\r\n batsmanName = batsmanAnchor.get_text()\r\n batsmanLinkArr = str(batsmanLink).split('/')\r\n cricInfoBatsmanId = batsmanLinkArr[len(batsmanLinkArr) - 1]\r\n cricInfoBatsmanId = str(cricInfoBatsmanId).replace('.html', '')\r\n batsmanDict['short_name'] = batsmanName\r\n batsmanDict['player_cric_info_link'] = batsmanLink\r\n batsmanDict['team'] = team\r\n batsmanDict['run_scored'] = '-'\r\n batsmanDict['balls_faced'] = '-'\r\n batsmanDict['M'] = '-'\r\n batsmanDict['4s'] = '-'\r\n batsmanDict['6s'] = '-'\r\n batsmanDict['strike_rate'] = '-'\r\n batsmanDict['innings'] = '2nd Innings'\r\n## print(batsmanList)\r\n key = cricInfoBatsmanId\r\n batsmanDict['commentry'] = '-'\r\n if matchTypeText == 'Tests':\r\n key = key+'2'\r\n playerBatsmanDict[key] = batsmanDict\r\n\r\n for batsmen in batsmanListNotBatted:\r\n batsmanLink = batsmen.get('href')\r\n batsmanName = batsmen.get_text()\r\n batsmanLinkArr = str(batsmanLink).split('/')\r\n cricInfoBatsmanId = batsmanLinkArr[len(batsmanLinkArr) - 1]\r\n cricInfoBatsmanId = str(cricInfoBatsmanId).replace('.html', '')\r\n batsmanDict['short_name'] = batsmanName\r\n batsmanDict['player_cric_info_link'] = batsmanLink\r\n batsmanDict['team'] = team\r\n batsmanDict['run_scored'] = '-'\r\n batsmanDict['balls_faced'] = '-'\r\n batsmanDict['M'] = '-'\r\n batsmanDict['4s'] = '-'\r\n batsmanDict['6s'] = '-'\r\n batsmanDict['strike_rate'] = '-'\r\n batsmanDict['innings'] = '2nd Innings'\r\n key = cricInfoBatsmanId# + \"_\" + team\r\n if matchTypeText == 'Tests':\r\n key = key+'2'\r\n playerBatsmanDict[key] = batsmanDict\r\n \r\n # checking batsman in bowler map, if found add them in playerBatsmanDict\r\n if matchTypeText == 'Tests':\r\n for batsmanKey, batsmanValue in playerBatsmanDict.items():\r\n if batsmanKey in playerBowlerDict:\r\n if playerBatsmanDict[batsmanKey]['innings'] == playerBowlerDict[batsmanKey]['innings']:\r\n bowlerData = playerBowlerDict[batsmanKey]\r\n fianlDict = {**batsmanValue, **bowlerData}\r\n playerBatsmanDict[batsmanKey] = fianlDict\r\n del playerBowlerDict[batsmanKey]\r\n else: \r\n for batsmanKey, batsmanValue in playerBatsmanDict.items():\r\n if batsmanKey in playerBowlerDict:\r\n bowlerData = playerBowlerDict[batsmanKey]\r\n fianlDict = {**batsmanValue, **bowlerData}\r\n playerBatsmanDict[batsmanKey] = fianlDict\r\n del playerBowlerDict[batsmanKey]\r\n\r\n## print(\"after merging batsmanDict: \", playerBatsmanDict)\r\n## print(\"after merging bowlerDict: \", playerBowlerDict)\r\n playerFinalDict = {**playerBatsmanDict, **playerBowlerDict}\r\n\r\n## \r\n## print(\"Player final dict: \", playerFinalDict)\r\n \r\n ##TODO mark player as 'Batsman', 'Bowler', 'WicketKeeper', 'All rounder'\r\n pno = 0\r\n for playerKey, playerValue in playerFinalDict.items():\r\n flag = True\r\n while flag:\r\n try:\r\n pno+=1\r\n if pno <= 5:\r\n shortName = playerValue['short_name']\r\n playerDict = playerFinalDict[playerKey] \r\n if '†' in shortName:\r\n #checking for WicketKeeper positio\r\n playerDict['Position'] = \"WK\"\r\n elif 'econ' in playerDict:\r\n playerDict['Position'] = \"Bowler\"\r\n else:\r\n playerDict['Position'] = \"Batsman\"\r\n #print('Pno : ' + str(pno))\r\n playerDict['match_id'] = matchId + '_' + playerDict['innings'][:2]\r\n playerDict['match_desc'] = matchDesc\r\n playerDict['match_type_text'] = matchTypeText +' '+ playerDict['innings']\r\n playerDict['season'] = season\r\n playerDict['MatchURL'] = pageUrl\r\n playerDict['Match_start_Date'] = Date\r\n playerDict['Venue'] = venue\r\n if playerDict['team'] == teamList[0]:\r\n playerDict['TeamID'] = teamIDList[0]\r\n playerDict['OpponentID'] = teamIDList[1]\r\n else:\r\n playerDict['TeamID'] = teamIDList[1]\r\n playerDict['OpponentID'] = teamIDList[0]\r\n url = playerDict['player_cric_info_link']\r\n page = requests.get(url,params = payload).text\r\n soup = BeautifulSoup(page,'html.parser')\r\n pees = soup.find_all('p',class_='ciPlayerinformationtxt')\r\n val = []\r\n key = []\r\n for pee in pees:\r\n key.append(pee.find('b').get_text())\r\n val.append(pee.find('span').get_text())\r\n if \"Full name\" in key:\r\n playerDict['Player_Full_Name'] = val[key.index(\"Full name\")]\r\n else:\r\n playerDict['Player_Full_Name'] = '-'\r\n if 'Born' in key:\r\n playerDict['date,place_of_birth'] = val[key.index('Born')].replace('\\n','').strip()\r\n else:\r\n playerDict['date,place_of_birth'] = '-'\r\n if 'Nickname' in key:\r\n playerDict['Player_Nickname'] = val[key.index('Nickname')]\r\n else:\r\n playerDict['Player_Nickname'] = '-'\r\n \r\n \r\n ## playerDict['Player_Full_Name'] = data[0]\r\n ## playerDict['data,place_of_birth'] = data[1][1:]\r\n ## if data[4] == None:\r\n ## playerDict['Player_Nickname'] = '-'\r\n ## else:\r\n ## playerDict['Player_Nickname'] = data[4]\r\n \r\n\r\n #DOB_PlaceOB = soup.fin_next('p',class_='ciPlayerinformationtxt').find('span').get_text()\r\n \r\n \r\n \r\n # below adding missed parameters in player's dict with default 0 value\r\n if not 'run_scored' in playerDict:\r\n playerDict['run_scored'] = \"-\"\r\n\r\n if not 'balls_faced' in playerDict:\r\n playerDict['balls_faced'] = \"-\"\r\n\r\n if not 'strike_rate' in playerDict:\r\n playerDict['strike_rate'] = \"-\"\r\n\r\n if not 'balls_bowled' in playerDict:\r\n playerDict['balls_bowled'] = \"-\"\r\n\r\n if not 'maiden_overs' in playerDict:\r\n playerDict['maiden_overs'] = \"-\"\r\n if not 'runs_given' in playerDict:\r\n playerDict['runs_given'] = \"-\"\r\n if not 'wicket' in playerDict:\r\n playerDict['wicket'] = \"-\"\r\n if not 'econ' in playerDict:\r\n playerDict['econ'] = \"-\"\r\n if not 'wide_balls' in playerDict:\r\n playerDict['wide_balls'] = \"-\"\r\n if not 'no_balls' in playerDict:\r\n playerDict['no_balls'] = \"-\"\r\n flag = False\r\n else:\r\n pno = 0\r\n time.sleep(10)\r\n \r\n \r\n except Exception as e:\r\n print('pausing scrapping for 5 mins : '+str(e))\r\n time.sleep(300)\r\n flag = True\r\n \r\n\r\n \r\n # print(\"Player final dict 2: \", playerFinalDict)\r\n\r\n for key, val in playerFinalDict.items():\r\n val['cric_info_id'] = key\r\n val['_id'] = key + \"-\" + matchId\r\n #print(key)\r\n #MongoDao.insertToPlayerStats(val)\r\n \r\n\r\n logger.info(\"players inserted successfully for url: \" + pageUrl)\r\n #MongoDao.insertToProcessedUrls(pageUrl)\r\n #print(playerFinalDict.key())\r\n df = pd.DataFrame(playerFinalDict)\r\n return df\r\n\r\n except Exception as e:\r\n logger.error(\"ERROR while processing URL: \" + pageUrl)\r\n logger.exception(\"message\")\r\n print(\"Scrapping : \"+str(e))\r\n #print((\"ERROR while processing URL: \" + pageUrl))\r\n\r\n\r\n\r\n#scrapODI_T20Data('', '', '', \"T20\", '', '')\r\n" ]
[ [ "pandas.DataFrame" ] ]
portugueslab/scikit-image
[ "0fa3bcb118bb208a0cc7d3e8b96cd96c1ce7a75b" ]
[ "skimage/future/graph/graph_cut.py" ]
[ "try:\n import networkx as nx\nexcept ImportError:\n from ..._shared.utils import warn\n warn('RAGs require networkx')\nimport numpy as np\nfrom . import _ncut\nfrom . import _ncut_cy\nfrom scipy.sparse import linalg\n\n\ndef cut_threshold(labels, rag, thresh, in_place=True):\n \"\"\"Combine regions separated by weight less than threshold.\n\n Given an image's labels and its RAG, output new labels by\n combining regions whose nodes are separated by a weight less\n than the given threshold.\n\n Parameters\n ----------\n labels : ndarray\n The array of labels.\n rag : RAG\n The region adjacency graph.\n thresh : float\n The threshold. Regions connected by edges with smaller weights are\n combined.\n in_place : bool\n If set, modifies `rag` in place. The function will remove the edges\n with weights less that `thresh`. If set to `False` the function\n makes a copy of `rag` before proceeding.\n\n Returns\n -------\n out : ndarray\n The new labelled array.\n\n Examples\n --------\n >>> from skimage import data, segmentation\n >>> from skimage.future import graph\n >>> img = data.astronaut()\n >>> labels = segmentation.slic(img)\n >>> rag = graph.rag_mean_color(img, labels)\n >>> new_labels = graph.cut_threshold(labels, rag, 10)\n\n References\n ----------\n .. [1] Alain Tremeau and Philippe Colantoni\n \"Regions Adjacency Graph Applied To Color Image Segmentation\"\n http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.11.5274\n\n \"\"\"\n if not in_place:\n rag = rag.copy()\n\n # Because deleting edges while iterating through them produces an error.\n to_remove = [(x, y) for x, y, d in rag.edges(data=True)\n if d['weight'] >= thresh]\n rag.remove_edges_from(to_remove)\n\n comps = nx.connected_components(rag)\n\n # We construct an array which can map old labels to the new ones.\n # All the labels within a connected component are assigned to a single\n # label in the output.\n map_array = np.arange(labels.max() + 1, dtype=labels.dtype)\n for i, nodes in enumerate(comps):\n for node in nodes:\n for label in rag.node[node]['labels']:\n map_array[label] = i\n\n return map_array[labels]\n\n\ndef cut_normalized(labels, rag, thresh=0.001, num_cuts=10, in_place=True,\n max_edge=1.0):\n \"\"\"Perform Normalized Graph cut on the Region Adjacency Graph.\n\n Given an image's labels and its similarity RAG, recursively perform\n a 2-way normalized cut on it. All nodes belonging to a subgraph\n that cannot be cut further are assigned a unique label in the\n output.\n\n Parameters\n ----------\n labels : ndarray\n The array of labels.\n rag : RAG\n The region adjacency graph.\n thresh : float\n The threshold. A subgraph won't be further subdivided if the\n value of the N-cut exceeds `thresh`.\n num_cuts : int\n The number or N-cuts to perform before determining the optimal one.\n in_place : bool\n If set, modifies `rag` in place. For each node `n` the function will\n set a new attribute ``rag.node[n]['ncut label']``.\n max_edge : float, optional\n The maximum possible value of an edge in the RAG. This corresponds to\n an edge between identical regions. This is used to put self\n edges in the RAG.\n\n Returns\n -------\n out : ndarray\n The new labeled array.\n\n Examples\n --------\n >>> from skimage import data, segmentation\n >>> from skimage.future import graph\n >>> img = data.astronaut()\n >>> labels = segmentation.slic(img)\n >>> rag = graph.rag_mean_color(img, labels, mode='similarity')\n >>> new_labels = graph.cut_normalized(labels, rag)\n\n References\n ----------\n .. [1] Shi, J.; Malik, J., \"Normalized cuts and image segmentation\",\n Pattern Analysis and Machine Intelligence,\n IEEE Transactions on, vol. 22, no. 8, pp. 888-905, August 2000.\n\n \"\"\"\n if not in_place:\n rag = rag.copy()\n\n for node in rag.nodes():\n rag.add_edge(node, node, weight=max_edge)\n\n _ncut_relabel(rag, thresh, num_cuts)\n\n map_array = np.zeros(labels.max() + 1, dtype=labels.dtype)\n # Mapping from old labels to new\n for n, d in rag.nodes(data=True):\n map_array[d['labels']] = d['ncut label']\n\n return map_array[labels]\n\n\ndef partition_by_cut(cut, rag):\n \"\"\"Compute resulting subgraphs from given bi-parition.\n\n Parameters\n ----------\n cut : array\n A array of booleans. Elements set to `True` belong to one\n set.\n rag : RAG\n The Region Adjacency Graph.\n\n Returns\n -------\n sub1, sub2 : RAG\n The two resulting subgraphs from the bi-partition.\n \"\"\"\n # `cut` is derived from `D` and `W` matrices, which also follow the\n # ordering returned by `rag.nodes()` because we use\n # nx.to_scipy_sparse_matrix.\n\n # Example\n # rag.nodes() = [3, 7, 9, 13]\n # cut = [True, False, True, False]\n # nodes1 = [3, 9]\n # nodes2 = [7, 10]\n\n nodes1 = [n for i, n in enumerate(rag.nodes()) if cut[i]]\n nodes2 = [n for i, n in enumerate(rag.nodes()) if not cut[i]]\n\n sub1 = rag.subgraph(nodes1)\n sub2 = rag.subgraph(nodes2)\n\n return sub1, sub2\n\n\ndef get_min_ncut(ev, d, w, num_cuts):\n \"\"\"Threshold an eigenvector evenly, to determine minimum ncut.\n\n Parameters\n ----------\n ev : array\n The eigenvector to threshold.\n d : ndarray\n The diagonal matrix of the graph.\n w : ndarray\n The weight matrix of the graph.\n num_cuts : int\n The number of evenly spaced thresholds to check for.\n\n Returns\n -------\n mask : array\n The array of booleans which denotes the bi-partition.\n mcut : float\n The value of the minimum ncut.\n \"\"\"\n mcut = np.inf\n mn = ev.min()\n mx = ev.max()\n\n # If all values in `ev` are equal, it implies that the graph can't be\n # further sub-divided. In this case the bi-partition is the the graph\n # itself and an empty set.\n min_mask = np.zeros_like(ev, dtype=np.bool)\n if np.allclose(mn, mx):\n return min_mask, mcut\n\n # Refer Shi & Malik 2001, Section 3.1.3, Page 892\n # Perform evenly spaced n-cuts and determine the optimal one.\n for t in np.linspace(mn, mx, num_cuts, endpoint=False):\n mask = ev > t\n cost = _ncut.ncut_cost(mask, d, w)\n if cost < mcut:\n min_mask = mask\n mcut = cost\n\n return min_mask, mcut\n\n\ndef _label_all(rag, attr_name):\n \"\"\"Assign a unique integer to the given attribute in the RAG.\n\n This function assumes that all labels in `rag` are unique. It\n picks up a random label from them and assigns it to the `attr_name`\n attribute of all the nodes.\n\n rag : RAG\n The Region Adjacency Graph.\n attr_name : string\n The attribute to which a unique integer is assigned.\n \"\"\"\n node = min(rag.nodes())\n new_label = rag.node[node]['labels'][0]\n for n, d in rag.nodes(data=True):\n d[attr_name] = new_label\n\n\ndef _ncut_relabel(rag, thresh, num_cuts):\n \"\"\"Perform Normalized Graph cut on the Region Adjacency Graph.\n\n Recursively partition the graph into 2, until further subdivision\n yields a cut greater than `thresh` or such a cut cannot be computed.\n For such a subgraph, indices to labels of all its nodes map to a single\n unique value.\n\n Parameters\n ----------\n labels : ndarray\n The array of labels.\n rag : RAG\n The region adjacency graph.\n thresh : float\n The threshold. A subgraph won't be further subdivided if the\n value of the N-cut exceeds `thresh`.\n num_cuts : int\n The number or N-cuts to perform before determining the optimal one.\n map_array : array\n The array which maps old labels to new ones. This is modified inside\n the function.\n \"\"\"\n d, w = _ncut.DW_matrices(rag)\n m = w.shape[0]\n\n if m > 2:\n d2 = d.copy()\n # Since d is diagonal, we can directly operate on its data\n # the inverse of the square root\n d2.data = np.reciprocal(np.sqrt(d2.data, out=d2.data), out=d2.data)\n\n # Refer Shi & Malik 2001, Equation 7, Page 891\n vals, vectors = linalg.eigsh(d2 * (d - w) * d2, which='SM',\n k=min(100, m - 2))\n\n # Pick second smallest eigenvector.\n # Refer Shi & Malik 2001, Section 3.2.3, Page 893\n vals, vectors = np.real(vals), np.real(vectors)\n index2 = _ncut_cy.argmin2(vals)\n ev = vectors[:, index2]\n\n cut_mask, mcut = get_min_ncut(ev, d, w, num_cuts)\n if (mcut < thresh):\n # Sub divide and perform N-cut again\n # Refer Shi & Malik 2001, Section 3.2.5, Page 893\n sub1, sub2 = partition_by_cut(cut_mask, rag)\n\n _ncut_relabel(sub1, thresh, num_cuts)\n _ncut_relabel(sub2, thresh, num_cuts)\n return\n\n # The N-cut wasn't small enough, or could not be computed.\n # The remaining graph is a region.\n # Assign `ncut label` by picking any label from the existing nodes, since\n # `labels` are unique, `new_label` is also unique.\n _label_all(rag, 'ncut label')\n" ]
[ [ "numpy.zeros_like", "numpy.allclose", "numpy.sqrt", "numpy.linspace", "numpy.real" ] ]
ibianka/HARK
[ "8678dbab0a0ace1520ac8f7ff5b33765122619f4" ]
[ "DCT-Copula-Illustration.py" ]
[ "# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.2'\n# jupytext_version: 1.1.3\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %% [markdown]\n# # Dimensionality Reduction in [Bayer and Luetticke (2018)](https://cepr.org/active/publications/discussion_papers/dp.php?dpno=13071)\n#\n# [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/econ-ark/HARK/BayerLuetticke?filepath=HARK%2FBayerLuetticke%2FDCT-Copula-Illustration.ipynb)\n#\n# This companion to the [main notebook](TwoAsset.ipynb) explains in more detail how the authors reduce the dimensionality of their problem\n#\n# - Based on original slides by Christian Bayer and Ralph Luetticke \n# - Original Jupyter notebook by Seungcheol Lee \n# - Further edits by Chris Carroll, Tao Wang \n#\n\n# %% [markdown]\n# ### Preliminaries\n#\n# In Steady-state Equilibrium (StE) in the model, in any given period, a consumer in state $s$ (which comprises liquid assets $m$, illiquid assets $k$, and human capital $\\newcommand{hLev}{h}\\hLev$) has two key choices:\n# 1. To adjust ('a') or not adjust ('n') their holdings of illiquid assets $k$\n# 1. Contingent on that choice, decide the level of consumption, yielding consumption functions:\n# * $c_n(s)$ - nonadjusters\n# * $c_a(s)$ - adjusters\n#\n# The usual envelope theorem applies here, so marginal value wrt the liquid asset equals marginal utility with respect to consumption:\n# $[\\frac{d v}{d m} = \\frac{d u}{d c}]$.\n# In practice, the authors solve their problem using the marginal value of money $\\texttt{Vm} = dv/dm$, but because the marginal utility function is invertible it is trivial to recover $\\texttt{c}$ from $(u^{\\prime})^{-1}(\\texttt{Vm} )$. The consumption function is therefore computed from the $\\texttt{Vm}$ function\n\n# %% {\"code_folding\": [0]}\n# Setup stuff\n\n# This is a jupytext paired notebook that autogenerates a corresponding .py file\n# which can be executed from a terminal command line via \"ipython [name].py\"\n# But a terminal does not permit inline figures, so we need to test jupyter vs terminal\n# Google \"how can I check if code is executed in the ipython notebook\"\ndef in_ipynb():\n try:\n if str(type(get_ipython())) == \"<class 'ipykernel.zmqshell.ZMQInteractiveShell'>\":\n return True\n else:\n return False\n except NameError:\n return False\n\n# Determine whether to make the figures inline (for spyder or jupyter)\n# vs whatever is the automatic setting that will apply if run from the terminal\nif in_ipynb():\n # %matplotlib inline generates a syntax error when run from the shell\n # so do this instead\n get_ipython().run_line_magic('matplotlib', 'inline') \nelse:\n get_ipython().run_line_magic('matplotlib', 'auto') \n \n# The tools for navigating the filesystem\nimport sys\nimport os\n\n# Find pathname to this file:\nmy_file_path = os.path.dirname(os.path.abspath(\"TwoAsset.ipynb\"))\n\n# Relative directory for pickled code\ncode_dir = os.path.join(my_file_path, \"BayerLuetticke_code/TwoAssetCode\") \n\nsys.path.insert(0, code_dir)\nsys.path.insert(0, my_file_path)\n\n# %% {\"code_folding\": []}\n# Load precalculated Stationary Equilibrium (StE) object EX3SS\n\nimport pickle\nos.chdir(code_dir) # Go to the directory with pickled code\n\n## EX3SS_20.p is the information in the stationary equilibrium \n## (20: the number of illiquid and liquid weath gridpoints)\n### The comments above are original, but it seems that there are 30 not 20 points now\n\nEX3SS=pickle.load(open(\"EX3SS_20.p\", \"rb\"))\n\n# %% [markdown]\n# ### Dimensions\n#\n# The imported StE solution to the problem represents the functions at a set of gridpoints of\n# * liquid assets ($n_m$ points), illiquid assets ($n_k$), and human capital ($n_h$)\n# * In the code these are $\\{\\texttt{nm,nk,nh}\\}$\n#\n# So even if the grids are fairly sparse for each state variable, the total number of combinations of the idiosyncratic state gridpoints is large: $n = n_m \\times n_k \\times n_h$. So, e.g., $\\bar{c}$ is a set of size $n$ containing the level of consumption at each possible _combination_ of gridpoints.\n#\n# In the \"real\" micro problem, it would almost never happen that a continuous variable like $m$ would end up being exactly equal to one of the prespecified gridpoints. But the functions need to be evaluated at such non-grid points. This is addressed by linear interpolation. That is, if, say, the grid had $m_{8} = 40$ and $m_{9} = 50$ then and a consumer ended up with $m = 45$ then the approximation is that $\\tilde{c}(45) = 0.5 \\bar{c}_{8} + 0.5 \\bar{c}_{9}$.\n#\n\n# %% {\"code_folding\": []}\n# Show dimensions of the consumer's problem (state space)\n\nprint('c_n is of dimension: ' + str(EX3SS['mutil_c_n'].shape))\nprint('c_a is of dimension: ' + str(EX3SS['mutil_c_a'].shape))\n\nprint('Vk is of dimension:' + str(EX3SS['Vk'].shape))\nprint('Vm is of dimension:' + str(EX3SS['Vm'].shape))\n\nprint('For convenience, these are all constructed from the same exogenous grids:')\nprint(str(len(EX3SS['grid']['m']))+' gridpoints for liquid assets;')\nprint(str(len(EX3SS['grid']['k']))+' gridpoints for illiquid assets;')\nprint(str(len(EX3SS['grid']['h']))+' gridpoints for individual productivity.')\nprint('')\nprint('Therefore, the joint distribution is of size: ')\nprint(str(EX3SS['mpar']['nm'])+\n ' * '+str(EX3SS['mpar']['nk'])+\n ' * '+str(EX3SS['mpar']['nh'])+\n ' = '+ str(EX3SS['mpar']['nm']*EX3SS['mpar']['nk']*EX3SS['mpar']['nh']))\n\n\n# %% [markdown]\n# ### Dimension Reduction\n#\n# The authors use different dimensionality reduction methods for the consumer's problem and the distribution across idiosyncratic states\n\n# %% [markdown]\n# #### Representing the consumer's problem with Basis Functions\n#\n# The idea is to find an efficient \"compressed\" representation of our functions (e.g., the consumption function), which BL do using tools originally developed for image compression. The analogy to image compression is that nearby pixels are likely to have identical or very similar colors, so we need only to find an efficient way to represent how the colors _change_ from one pixel to nearby ones. Similarly, consumption at a given point $s_{i}$ is likely to be close to consumption point at another point $s_{j}$ that is \"close\" in the state space (similar wealth, income, etc), so a function that captures that similarity efficiently can preserve most of the information without keeping all of the points.\n#\n# Like linear interpolation, the [DCT transformation](https://en.wikipedia.org/wiki/Discrete_cosine_transform) is a method of representing a continuous function using a finite set of numbers. It uses a set of independent [basis functions](https://en.wikipedia.org/wiki/Basis_function) to do this.\n#\n# But it turns out that some of those basis functions are much more important than others in representing the steady-state functions. Dimension reduction is accomplished by basically ignoring all basis functions that make \"small enough\" contributions to the representation of the function. \n#\n# ##### When might this go wrong?\n#\n# Suppose the consumption function changes in a recession in ways that change behavior radically at some states. Like, suppose unemployment almost never happens in steady state, but it can happen in temporary recessions. Suppose further that, even for employed people, in a recession, _worries_ about unemployment cause many of them to prudently withdraw some of their illiquid assets -- behavior opposite of what people in the same state would be doing during expansions. In that case, the basis functions that represented the steady state function would have had no incentive to be able to represent well the part of the space that is never seen in steady state, so any functions that might help do so might well have been dropped in the dimension reduction stage.\n#\n# On the whole, it seems unlikely that this kind of thing is a major problem, because the vast majority of the variation that people experience is idiosyncratic. There is always unemployment, for example; it just moves up and down a bit with aggregate shocks, but since the experience of unemployment is in fact well represented in the steady state the method should have no trouble capturing it.\n#\n# Where the method might have more trouble is in representing economies in which there are multiple equilibria in which behavior is quite different.\n\n# %% [markdown]\n# #### For the distribution of agents across states: Copula\n#\n# The other tool the authors use is the [\"copula\"](https://en.wikipedia.org/wiki/Copula_(probability_theory)), which allows us to represent the distribution of people across idiosyncratic states efficiently\n#\n# The copula is computed from the joint distribution of states in StE and will be used to transform the [marginal distributions](https://en.wikipedia.org/wiki/Marginal_distribution) back to joint distributions. (For an illustration of how the assumptions used when modeling asset price distributions using copulas can fail see [Salmon](https://www.wired.com/2009/02/wp-quant/))\n#\n# * A copula is a representation of the joint distribution expressed using a mapping between the uniform joint CDF and the marginal distributions of the variables\n# \n# * The crucial assumption is that what aggregate shocks do is to squeeze or distort the steady state distribution, but leave the rank structure of the distribution the same\n# * An example of when this might not hold is the following. Suppose that in expansions, the people at the top of the distribution of illiquid assets (the top 1 percent, say) are also at the top 1 percent of liquid assets. But in recessions the bottom 99 percent get angry at the top 1 percent of illiquid asset holders and confiscate part of their liquid assets (the illiquid assets can't be confiscated quickly because they are illiquid). Now the people in the top 99 percent of illiquid assets might be in the _bottom_ 1 percent of liquid assets.\n# \n# - In this case we just need to represent how the mapping from ranks into levels of assets\n#\n# - This reduces the number of points for which we need to track transitions from $3600 = 30 \\times 30 \\times 4$ to $64 = 30+30+4$. Or the total number of points we need to contemplate goes from $3600^2 \\approx 13 $million to $64^2=4096$. \n\n# %% {\"code_folding\": []}\n# Get some specs about the copula, which is precomputed in the EX3SS object\n\nprint('The copula consists of two parts: gridpoints and values at those gridpoints:'+ \\\n '\\n gridpoints have dimensionality of '+str(EX3SS['Copula']['grid'].shape) + \\\n '\\n where the first element is total number of gridpoints' + \\\n '\\n and the second element is number of idiosyncratic state variables' + \\\n '\\n whose values also are of dimension of '+str(EX3SS['Copula']['value'].shape[0]) + \\\n '\\n each entry of which is the probability that all three of the'\n '\\n state variables are below the corresponding point.')\n\n\n# %% {\"code_folding\": []}\n## Import necessary libraries\n\nfrom __future__ import print_function\nimport sys \nsys.path.insert(0,'../')\n\nimport numpy as np\nfrom numpy.linalg import matrix_rank\nimport scipy as sc\nfrom scipy.stats import norm \nfrom scipy.interpolate import interp1d, interp2d, griddata, RegularGridInterpolator, interpn\nimport multiprocessing as mp\nfrom multiprocessing import Pool, cpu_count, Process\nfrom math import ceil\nimport math as mt\nfrom scipy import sparse as sp # used to work with sparse matrices\nfrom scipy import linalg #linear algebra \nfrom math import log, cos, pi, sqrt\nimport time\nfrom SharedFunc3 import Transition, ExTransitions, GenWeight, MakeGridkm, Tauchen, Fastroot\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport scipy.io #scipy input and output\nimport scipy.fftpack as sf # scipy discrete fourier transforms\n\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nfrom matplotlib import cm\n\nimport seaborn as sns\n\nimport copy as cp\n\n\n# %% {\"code_folding\": []}\n## State reduction and discrete cosine transformation\n\nclass StateReduc_Dct:\n \n def __init__(self, par, mpar, grid, Output, targets, Vm, Vk, \n joint_distr, Copula, c_n_guess, c_a_guess, psi_guess,\n m_n_star, m_a_star, cap_a_star, mutil_c_n, mutil_c_a,mutil_c, P_H):\n \n self.par = par # Parameters of the theoretical model\n self.mpar = mpar # Parameters of the numerical representation\n self.grid = grid # Discrete grid\n self.Output = Output # Results of the calculations\n self.targets = targets # Like, debt-to-GDP ratio or other desiderata\n self.Vm = Vm # Marginal value from liquid cash-on-hand\n self.Vk = Vk # Marginal value of capital\n self.joint_distr = joint_distr # Multidimensional histogram\n self.Copula = Copula # Encodes rank marginal correlation of joint distribution\n self.mutil_c = mutil_c # Marginal utility of consumption\n self.P_H = P_H # Transition matrix for macro states (not including distribution)\n \n \n def StateReduc(self):\n \"\"\"\n input\n -----\n self: dict, stored results from a StE \n \n output\n ------\n Newly generated\n ===============\n X_ss: ndarray, stacked states, including \n Y_ss: ndarray, controls \n Gamma_state: ndarray, marginal distributions of individual states \n grid: ndarray, discrete grids\n targets: ndarray, debt-to-GDP ratio or other desiderata\n P_H: transition probability of\n indexMUdct: ndarray, indices selected after dct operation on marginal utility of consumption\n indexVKdct: ndarray, indices selected after dct operation on marginal value of capital\n State: ndarray, dimension equal to reduced states\n State_m: ndarray, dimension equal to reduced states\n Contr: ndarray, dimension equal to reduced controls\n Contr_m: ndarray, dimension equal to reduced controls\n \n Passed down from the input\n ==========================\n Copula: dict, grids and values\n joint_distr: ndarray, nk x nm x nh\n Output: dict, outputs from the model \n par: dict, parameters of the theoretical model\n mpar:dict, parameters of the numerical representation\n aggrshock: string, type of aggregate shock used to purturb the StE \n \"\"\"\n \n # Inverse of CRRA on x for utility and marginal utility\n invutil = lambda x : ((1-self.par['xi'])*x)**(1./(1-self.par['xi'])) \n invmutil = lambda x : (1./x)**(1./self.par['xi']) \n \n # X=States\n # Marg dist of liquid assets summing over pty and illiquid assets k\n Xss=np.asmatrix(np.concatenate((np.sum(np.sum(self.joint_distr.copy(),axis=1),axis =1), \n np.transpose(np.sum(np.sum(self.joint_distr.copy(),axis=0),axis=1)),# marg dist k\n np.sum(np.sum(self.joint_distr.copy(),axis=1),axis=0), # marg dist pty (\\approx income)\n [np.log(self.par['RB'])],[ 0.]))).T # Given the constant interest rate\n \n # Y=\"controls\" (according to this literature's odd terminology)\n # c = invmarg(marg(c)), so first bit gets consumption policy function\n Yss=np.asmatrix(np.concatenate((invmutil(self.mutil_c.copy().flatten(order = 'F')),\\\n invmutil(self.Vk.copy().flatten(order = 'F')),\n [np.log(self.par['Q'])], # Question: Price of the illiquid asset, right?\n [ np.log(self.par['PI'])], # Inflation\n [ np.log(self.Output)], \n [np.log(self.par['G'])], # Gov spending\n [np.log(self.par['W'])], # Wage\n [np.log(self.par['R'])], # Nominal R\n [np.log(self.par['PROFITS'])], \n [np.log(self.par['N'])], # Hours worked\n [np.log(self.targets['T'])], # Taxes\n [np.log(self.grid['K'])], # Kapital\n [np.log(self.targets['B'])]))).T # Government debt\n \n # Mapping for Histogram\n # Gamma_state matrix reduced set of states\n # nm = number of gridpoints for liquid assets\n # nk = number of gridpoints for illiquid assets\n # nh = number of gridpoints for human capital (pty)\n Gamma_state = np.zeros( # Create zero matrix of size [nm + nk + nh,nm + nk + nh - 4]\n (self.mpar['nm']+self.mpar['nk']+self.mpar['nh'],\n self.mpar['nm']+self.mpar['nk']+self.mpar['nh'] - 4)) \n # Question: Why 4? 4 = 3+1, 3: sum to 1 for m, k, h and 1: for entrepreneurs \n\n # Impose adding-up conditions: \n # In each of the block matrices, probabilities must add to 1\n \n for j in range(self.mpar['nm']-1): # np.squeeze reduces one-dimensional matrix to vector\n Gamma_state[0:self.mpar['nm'],j] = -np.squeeze(Xss[0:self.mpar['nm']])\n Gamma_state[j,j]=1. - Xss[j] # \n Gamma_state[j,j]=Gamma_state[j,j] - np.sum(Gamma_state[0:self.mpar['nm'],j])\n bb = self.mpar['nm'] # Question: bb='bottom base'? because bb shorter to type than self.mpar['nm'] everywhere\n\n for j in range(self.mpar['nk']-1):\n Gamma_state[bb+np.arange(0,self.mpar['nk'],1), bb+j-1] = -np.squeeze(Xss[bb+np.arange(0,self.mpar['nk'],1)])\n Gamma_state[bb+j,bb-1+j] = 1. - Xss[bb+j] \n Gamma_state[bb+j,bb-1+j] = (Gamma_state[bb+j,bb-1+j] - \n np.sum(Gamma_state[bb+np.arange(0,self.mpar['nk']),bb-1+j]))\n bb = self.mpar['nm'] + self.mpar['nk']\n\n for j in range(self.mpar['nh']-2): \n # Question: Why -2? 1 for h sum to 1 and 1 for entrepreneur Some other symmetry/adding-up condition.\n Gamma_state[bb+np.arange(0,self.mpar['nh']-1,1), bb+j-2] = -np.squeeze(Xss[bb+np.arange(0,self.mpar['nh']-1,1)])\n Gamma_state[bb+j,bb-2+j] = 1. - Xss[bb+j]\n Gamma_state[bb+j,bb-2+j] = Gamma_state[bb+j,bb-2+j] - np.sum(Gamma_state[bb+np.arange(0,self.mpar['nh']-1,1),bb-2+j])\n\n # Number of other state variables not including the gridded -- here, just the interest rate \n self.mpar['os'] = len(Xss) - (self.mpar['nm']+self.mpar['nk']+self.mpar['nh'])\n # For each gridpoint there are two \"regular\" controls: consumption and illiquid saving\n # Counts the number of \"other\" controls (PROFITS, Q, etc)\n self.mpar['oc'] = len(Yss) - 2*(self.mpar['nm']*self.mpar['nk']*self.mpar['nh'])\n \n aggrshock = self.par['aggrshock']\n accuracy = self.par['accuracy']\n \n # Do the dct on the steady state marginal utility\n # Returns an array of indices for the used basis vectors\n indexMUdct = self.do_dct(invmutil(self.mutil_c.copy().flatten(order='F')),\n self.mpar,accuracy)\n\n # Do the dct on the steady state marginal value of capital\n # Returns an array of indices for the used basis vectors\n indexVKdct = self.do_dct(invmutil(self.Vk.copy()),self.mpar,accuracy)\n \n # Calculate the numbers of states and controls\n aux = np.shape(Gamma_state)\n self.mpar['numstates'] = np.int64(aux[1] + self.mpar['os'])\n self.mpar['numcontrols'] = np.int64(len(indexMUdct) + \n len(indexVKdct) + \n self.mpar['oc'])\n \n # Size of the reduced matrices to be used in the Fsys\n # Set to zero because in steady state they are zero\n State = np.zeros((self.mpar['numstates'],1))\n State_m = State\n Contr = np.zeros((self.mpar['numcontrols'],1))\n Contr_m = Contr\n \n return {'Xss': Xss, 'Yss':Yss, 'Gamma_state': Gamma_state, \n 'par':self.par, 'mpar':self.mpar, 'aggrshock':aggrshock,\n 'Copula':self.Copula,'grid':self.grid,'targets':self.targets,'P_H':self.P_H, \n 'joint_distr': self.joint_distr, 'Output': self.Output, 'indexMUdct':indexMUdct, 'indexVKdct':indexVKdct,\n 'State':State, 'State_m':State_m, 'Contr':Contr, 'Contr_m':Contr_m}\n\n # Discrete cosine transformation magic happens here\n # sf is scipy.fftpack tool\n def do_dct(self, obj, mpar, level):\n \"\"\"\n input\n -----\n obj: ndarray nm x nk x nh \n dimension of states before dct \n mpar: dict\n parameters in the numerical representaion of the model, e.g. nm, nk and nh\n level: float \n accuracy level for dct \n output\n ------\n index_reduced: ndarray n_dct x 1 \n an array of indices that select the needed grids after dct\n \n \"\"\"\n obj = np.reshape(obj.copy(),(mpar['nm'],mpar['nk'],mpar['nh']),order='F')\n X1 = sf.dct(obj,norm='ortho',axis=0) # dct is operated along three dimensions axis=0/1/2\n X2 = sf.dct(X1.copy(),norm='ortho',axis=1)\n X3 = sf.dct(X2.copy(),norm='ortho',axis=2)\n\n # Pick the coefficients that are big\n XX = X3.flatten(order='F') \n ind = np.argsort(abs(XX.copy()))[::-1]\n # i will \n i = 1 \n # Sort from smallest (=best) to biggest (=worst)\n # and count how many are 'good enough to keep'\n while linalg.norm(XX[ind[:i]].copy())/linalg.norm(XX) < level:\n i += 1 \n \n needed = i # Question:Isn't this counting the ones that are NOT needed?\n \n index_reduced = np.sort(ind[:i]) # Retrieve the good \n \n return index_reduced\n\n# %% {\"code_folding\": []}\n## Choose an aggregate shock to perturb(one of three shocks: MP, TFP, Uncertainty)\n\nEX3SS['par']['aggrshock'] = 'MP'\nEX3SS['par']['rhoS'] = 0.0 # Persistence of variance\nEX3SS['par']['sigmaS'] = 0.001 # STD of variance shocks\n\n#EX3SS['par']['aggrshock'] = 'TFP'\n#EX3SS['par']['rhoS'] = 0.95\n#EX3SS['par']['sigmaS'] = 0.0075\n \n#EX3SS['par']['aggrshock'] = 'Uncertainty'\n#EX3SS['par']['rhoS'] = 0.84 # Persistence of variance\n#EX3SS['par']['sigmaS'] = 0.54 # STD of variance shocks\n\n# %% {\"code_folding\": []}\n## Choose an accuracy of approximation with DCT\n### Determines number of basis functions chosen -- enough to match this accuracy\n### EX3SS is precomputed steady-state pulled in above\nEX3SS['par']['accuracy'] = 0.99999 \n\n# %% {\"code_folding\": []}\n## Implement state reduction and DCT\n### Do state reduction on steady state\nEX3SR=StateReduc_Dct(**EX3SS) # Takes StE result as input and get ready to invoke state reduction operation\nSR=EX3SR.StateReduc() # StateReduc is operated \n\n# %% {\"code_folding\": [0]}\n# Measuring the effectiveness of the state reduction\n\nprint('What are the results from the state reduction?')\n#print('Newly added attributes after the operation include \\n'+str(set(SR.keys())-set(EX3SS.keys())))\n\nprint('\\n')\n\nprint('To achieve an accuracy of '+str(EX3SS['par']['accuracy'])+'\\n') \n\nprint('The dimension of the policy functions is reduced to '+str(SR['indexMUdct'].shape[0]) \\\n +' from '+str(EX3SS['mpar']['nm']*EX3SS['mpar']['nk']*EX3SS['mpar']['nh'])\n )\nprint('The dimension of the marginal value functions is reduced to '+str(SR['indexVKdct'].shape[0]) \\\n + ' from ' + str(EX3SS['Vk'].shape))\nprint('The total number of control variables is '+str(SR['Contr'].shape[0])+'='+str(SR['indexMUdct'].shape[0]) + \\\n '+'+str(SR['indexVKdct'].shape[0])+'+ # of other macro controls')\nprint('\\n')\nprint('The copula represents the joint distribution with a vector of size '+str(SR['Gamma_state'].shape) )\nprint('The dimension of states including exogenous state, is ' +str(SR['Xss'].shape[0]))\n\nprint('It simply stacks all grids of different\\\n \\n state variables regardless of their joint distributions.\\\n \\n This is due to the assumption that the rank order remains the same.')\nprint('The total number of state variables is '+str(SR['State'].shape[0]) + '='+\\\n str(SR['Gamma_state'].shape[1])+'+ the number of macro states (like the interest rate)')\n\n\n# %% [markdown]\n# ### Graphical Illustration\n#\n# #### Policy/value functions\n#\n# Taking the consumption function as an example, we plot consumption by adjusters and non-adjusters over a range of $k$ and $m$ that encompasses x percent of the mass of the distribution function. \n#\n# We plot the functions for the top and bottom values of the wage $h$ distribution\n#\n\n# %% {\"code_folding\": []}\n## Graphical illustration\n\nxi = EX3SS['par']['xi']\ninvmutil = lambda x : (1./x)**(1./xi) \n\n### convert marginal utilities back to consumption function\nmut_StE = EX3SS['mutil_c']\nmut_n_StE = EX3SS['mutil_c_n'] # marginal utility of non-adjusters\nmut_a_StE = EX3SS['mutil_c_a'] # marginal utility of adjusters \n\nc_StE = invmutil(mut_StE)\ncn_StE = invmutil(mut_n_StE)\nca_StE = invmutil(mut_a_StE)\n\n\n### grid values \ndim_StE = mut_StE.shape\nmgrid = EX3SS['grid']['m']\nkgrid = EX3SS['grid']['k']\nhgrid = EX3SS['grid']['h']\n\n\n# %% {\"code_folding\": []}\n## define some functions to be used next\n\ndef dct3d(x):\n x0=sf.dct(x.copy(),axis=0,norm='ortho')\n x1=sf.dct(x0.copy(),axis=1,norm='ortho')\n x2=sf.dct(x1.copy(),axis=2,norm='ortho')\n return x2\n\ndef idct3d(x):\n x2 = sf.idct(x.copy(),axis=2,norm='ortho')\n x1 = sf.idct(x2.copy(),axis=1,norm='ortho')\n x0 = sf.idct(x1.copy(),axis=0,norm='ortho') \n return x0\n\ndef DCTApprox(fullgrids,dct_index):\n dim=fullgrids.shape\n dctcoefs = dct3d(fullgrids)\n dctcoefs_rdc = np.zeros(dim)\n dctcoefs_rdc[dct_index]=dctcoefs[dct_index]\n approxgrids = idct3d(dctcoefs_rdc)\n return approxgrids\n\n# %% [markdown]\n# Depending on the accuracy level, the DCT operation choses the necessary number of basis functions used to approximate consumption function at the full grids. This is illustrated in the p31-p34 in this [slides](https://www.dropbox.com/s/46fdxh0aphazm71/presentation_method.pdf?dl=0). We show this for both 1-dimensional (m or k) or 2-dimenstional grids (m and k) in the following. \n\n# %% {\"code_folding\": []}\n## 2D graph of consumption function: c(m) fixing k and h\n\n\n## list of accuracy levels \nAccuracy_BL = 0.99999 # From BL\nAccuracy_Less0 = 0.999\nAccuracy_Less1 = 0.99\nAccuracy_Less2 = 0.95\n\nacc_lst = np.array([Accuracy_BL,Accuracy_Less0,Accuracy_Less1,Accuracy_Less2])\n\n## c(m) fixing k and h\nfig = plt.figure(figsize=(8,8))\nfig.suptitle('c at full grids and c approximated by DCT in different accuracy levels' \n '\\n non-adjusters, fixing k and h',\n fontsize=(13))\nfig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.3)\n\nfor idx in range(len(acc_lst)):\n EX3SS_cp =cp.deepcopy(EX3SS) \n EX3SS_cp['par']['accuracy'] = acc_lst[idx]\n EX3SR_cp=StateReduc_Dct(**EX3SS_cp) # Takes StE result as input and get ready to invoke state reduction operation\n SR_cp=EX3SR_cp.StateReduc()\n mut_rdc_idx_flt_cp = SR_cp['indexMUdct']\n mut_rdc_idx_cp = np.unravel_index(mut_rdc_idx_flt_cp,dim_StE,order='F')\n nb_bf_cp = len(mut_rdc_idx_cp[0])\n print(str(nb_bf_cp) +\" basis functions used.\")\n c_n_approx_cp = DCTApprox(cn_StE,mut_rdc_idx_cp)\n c_a_approx_cp = DCTApprox(ca_StE,mut_rdc_idx_cp)\n cn_diff_cp = c_n_approx_cp-cn_StE\n \n # choose the fix grid of h and k\n hgrid_fix=2 # fix level of h as an example \n kgrid_fix=10 # fix level of k as an example\n \n # get the corresponding c function approximated by dct\n cVec = c_a_approx_cp[:,kgrid_fix,hgrid_fix]\n \n ## plots \n ax = fig.add_subplot(2,2,idx+1)\n ax.plot(mgrid,cVec,label='c approximated by DCT')\n ax.plot(mgrid,ca_StE[:,kgrid_fix,hgrid_fix],'--',label='c at full grids')\n ax.plot(mgrid,cVec,'r*')\n ax.set_xlabel('m',fontsize=13)\n ax.set_ylabel(r'$c(m)$',fontsize=13)\n ax.set_title(r'accuracy=${}$'.format(acc_lst[idx]))\n ax.legend(loc=0)\n\n# %% {\"code_folding\": []}\n## 2D graph of consumption function: c(k) fixing m and h\n\nfig = plt.figure(figsize=(8,8))\nfig.suptitle('c at full grids and c approximated by DCT in different accuracy levels' \n '\\n non-adjusters, fixing m and h',\n fontsize=(13))\nfig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.3)\n\nfor idx in range(len(acc_lst)):\n EX3SS_cp =cp.deepcopy(EX3SS)\n EX3SS_cp['par']['accuracy'] = acc_lst[idx]\n EX3SR_cp=StateReduc_Dct(**EX3SS_cp) # Takes StE result as input and get ready to invoke state reduction operation\n SR_cp=EX3SR_cp.StateReduc()\n mut_rdc_idx_flt_cp= SR_cp['indexMUdct']\n mut_rdc_idx_cp = np.unravel_index(mut_rdc_idx_flt_cp,dim_StE,order='F')\n nb_bf_cp = len(mut_rdc_idx_cp[0])\n print(str(nb_bf_cp) +\" basis functions used.\")\n c_n_approx_cp = DCTApprox(cn_StE,mut_rdc_idx_cp)\n c_a_approx_cp = DCTApprox(ca_StE,mut_rdc_idx_cp)\n cn_diff_cp = c_n_approx_cp-cn_StE\n \n # choose the fix grid of h and m \n hgrid_fix=2 # fix level of h as an example \n mgrid_fix=10 # fix level of k as an example\n \n # get the corresponding c function approximated by dct\n cVec = c_n_approx_cp[mgrid_fix,:,hgrid_fix]\n\n ## plots \n ax = fig.add_subplot(2,2,idx+1)\n ax.plot(kgrid,cVec,label='c approximated by DCT')\n ax.plot(kgrid,cn_StE[mgrid_fix,:,hgrid_fix],'--',label='c at full grids')\n ax.plot(kgrid,cVec,'r*')\n ax.set_xlabel('k',fontsize=13)\n ax.set_ylabel(r'$c(k)$',fontsize=13)\n ax.set_title(r'accuracy=${}$'.format(acc_lst[idx]))\n ax.legend(loc=0)\n\n# %% {\"code_folding\": []}\n# Restore the solution corresponding to the original BL accuracy\n\nEX3SS['par']['accuracy'] = Accuracy_BL \nEX3SR=StateReduc_Dct(**EX3SS) # Takes StE result as input and get ready to invoke state reduction operation\nSR=EX3SR.StateReduc() # StateReduc is operated \n\n\n## indexMUdct is one dimension, needs to be unraveled to 3 dimensions\nmut_rdc_idx_flt = SR['indexMUdct']\nmut_rdc_idx = np.unravel_index(mut_rdc_idx_flt,dim_StE,order='F')\n\nnb_dct = len(mut_StE.flatten()) \nmut_rdc_bool = np.zeros(nb_dct) # boolean array of 30 x 30 x 4 \nfor i in range(nb_dct):\n mut_rdc_bool[i]=i in list(SR['indexMUdct'])\nmut_rdc_bool_3d = (mut_rdc_bool==1).reshape(dim_StE)\nmut_rdc_mask_3d = (mut_rdc_bool).reshape(dim_StE)\n\n# Get the joint distribution calculated elsewhere\n\njoint_distr = EX3SS['joint_distr']\nmarginal_mk = EX3SS['joint_distr'].sum(axis=2)\n\n# Location at which to cut off the topmost part of the distributions\n\nmass_pct = 0.9\n\n## Again, for BL accuracy level, get dct compressed c functions at all grids \n\nc_n_approx = DCTApprox(cn_StE,mut_rdc_idx)\nc_a_approx = DCTApprox(ca_StE,mut_rdc_idx)\n\n# %% {\"code_folding\": []}\n# 3D surface plots of consumption function at full grids and approximated by DCT\n## at all grids and grids after dct first for non-adjusters and then for adjusters\n\n## for non-adjusters\n\n## full grids now \n## WangTao: \n## After plotting for the entire set of gridpoints, next plot only for the bottom mass_pct of the distributions\n\nmmgrid,kkgrid = np.meshgrid(mgrid,kgrid)\n\nfig = plt.figure(figsize=(14,14))\nfig.suptitle('Consumption of non-adjusters at grid points of m and k (for each h)',\n fontsize=(13))\nfor hgrid_id in range(EX3SS['mpar']['nh']):\n ## prepare the reduced grids \n hgrid_fix=hgrid_id \n \n ## plots \n ax = fig.add_subplot(2,2,hgrid_id+1, projection='3d')\n ax.scatter(mmgrid,kkgrid,c_n_approx[:,:,hgrid_fix],marker='v',color='red',\n label='StE(after dct):non-adjuster')\n ax.plot_surface(mmgrid,kkgrid,cn_StE[:,:,hgrid_fix],cmap='Blues',\n label='StE(before dct): non-adjuster')\n ax.set_xlabel('m',fontsize=13)\n ax.set_ylabel('k',fontsize=13)\n ax.set_zlabel(r'$c_n(m,k)$',fontsize=13)\n plt.gca().invert_yaxis()\n #ax.set_xlim([0,mmax])\n #ax.set_ylim([0,kmax])\n ax.set_title(r'$h({})$'.format(hgrid_fix))\n ax.view_init(20, 100)\n\n# %% {\"code_folding\": []}\n## Same thing in a different way: image plots of c functions at full grids and c approximated by DCT\n\n\n## for non-adjusters\n\n## full grids \nmmgrid,kkgrid = np.meshgrid(mgrid,kgrid)\n\n### for adjusters \nfig = plt.figure(figsize=(14,14))\nfig.suptitle('Consumption of non-adjusters at grid points of m and k(for different h)',\n fontsize=(13))\nfor hgrid_id in range(EX3SS['mpar']['nh']):\n ## prepare the reduced grids \n hgrid_fix=hgrid_id\n \n ## plots \n ax = fig.add_subplot(2,2,hgrid_id+1)\n ax.imshow(np.hstack((cn_StE[:,:,hgrid_fix],c_n_approx[:,:,hgrid_fix])))\n ax.set_xlabel('m',fontsize=13)\n ax.set_ylabel('k',fontsize=13)\n ax.set_title(r'$h({})$'.format(hgrid_fix))\n\n# %% {\"code_folding\": []}\n## 3D scatter plots of the difference of full-grid c and approximated c\n\n## for non-adjusters\n\n## full grids \nmmgrid,kkgrid = np.meshgrid(mgrid,kgrid)\n\n### for adjusters \nfig = plt.figure(figsize=(14,14))\nfig.suptitle('Consumption of non-adjusters at grid points of m and k (for each h)',\n fontsize=(13))\nfor hgrid_id in range(EX3SS['mpar']['nh']):\n ## prepare the reduced grids \n hgrid_fix=hgrid_id \n cn_diff = c_n_approx-cn_StE\n \n ## plots \n ax = fig.add_subplot(2,2,hgrid_id+1, projection='3d')\n ax.plot_surface(mmgrid,kkgrid,cn_diff[:,:,hgrid_fix], rstride=1, \n cstride=1,cmap=cm.coolwarm, edgecolor='none',\n label='Difference of full-grid and approximated consumption function')\n ax.set_xlabel('m',fontsize=13)\n ax.set_ylabel('k',fontsize=13)\n ax.set_zlabel(r'$c_a(m,k)$',fontsize=13)\n plt.gca().invert_yaxis()\n plt.gca().invert_xaxis()\n #ax.set_xlim([0,mmax])\n #ax.set_ylim([0,kmax])\n ax.set_title(r'$h({})$'.format(hgrid_fix))\n ax.view_init(20, 40)\n\n# %% {\"code_folding\": []}\n# Difference of full-grid c and DCT compressed c for difference levels of accuracy\n\nfig = plt.figure(figsize=(14,14))\nfig.suptitle('Differences of c at full grids and c approximated by DCT in different accuracy levels(non-adjusters)',\n fontsize=(13))\n\nfor idx in range(len(acc_lst)):\n EX3SS_cp =cp.deepcopy(EX3SS)\n EX3SS_cp['par']['accuracy'] = acc_lst[idx]\n EX3SR_cp=StateReduc_Dct(**EX3SS_cp) # Takes StE result as input and get ready to invoke state reduction operation\n SR_cp=EX3SR_cp.StateReduc()\n mut_rdc_idx_flt_cp = SR_cp['indexMUdct']\n mut_rdc_idx_cp = np.unravel_index(mut_rdc_idx_flt_cp,dim_StE,order='F')\n nb_bf_cp = len(mut_rdc_idx_cp[0])\n print(str(nb_bf_cp) +\" basis functions used.\")\n c_n_approx_cp = DCTApprox(cn_StE,mut_rdc_idx_cp)\n c_a_approx_cp = DCTApprox(ca_StE,mut_rdc_idx_cp)\n cn_diff_cp = c_n_approx_cp-cn_StE\n \n hgrid_fix=1 # fix level of h as an example \n \n ## plots \n ax = fig.add_subplot(2,2,idx+1, projection='3d')\n ax.plot_surface(mmgrid,kkgrid,cn_diff_cp[:,:,hgrid_fix], rstride=1, \n cstride=1,cmap=cm.summer, edgecolor='none',\n label='Difference of full-grid and approximated consumption functions')\n ax.set_xlabel('m',fontsize=13)\n ax.set_ylabel('k',fontsize=13)\n ax.set_zlabel('Difference of c functions',fontsize=13)\n plt.gca().invert_yaxis()\n plt.gca().invert_xaxis()\n #ax.set_xlim([0,mmax])\n #ax.set_ylim([0,kmax])\n ax.set_zlim([-8,2])\n ax.set_title(r'accuracy=${}$'.format(acc_lst[idx]))\n ax.view_init(10, 60)\n\n# %% {\"code_folding\": []}\n# for adjusters \n\nfig = plt.figure(figsize=(14,14))\nfig.suptitle('Consumption of adjusters at grid points of m and k(for different h)',\n fontsize=(13))\nfor hgrid_id in range(EX3SS['mpar']['nh']):\n ## prepare the reduced grids \n hgrid_fix=hgrid_id \n \n ## plots \n ax = fig.add_subplot(2,2,hgrid_id+1, projection='3d')\n ax.scatter(mmgrid,kkgrid,c_a_approx[:,:,hgrid_fix],marker='v',color='red',\n label='StE(after dct):adjuster')\n ax.plot_surface(mmgrid,kkgrid,ca_StE[:,:,hgrid_fix],cmap='Blues',\n label='StE(before dct): adjuster')\n ax.set_xlabel('m',fontsize=13)\n ax.set_ylabel('k',fontsize=13)\n ax.set_zlabel(r'$c_a(m,k)$',fontsize=13)\n plt.gca().invert_yaxis()\n #ax.set_xlim([0,mmax])\n #ax.set_ylim([0,kmax])\n ax.set_title(r'$h({})$'.format(hgrid_fix))\n ax.view_init(20, 150)\n\n# %% {\"code_folding\": []}\n# Compare consumption functions of adjusters and non-adjusters approximated by DCT\n\nfig = plt.figure(figsize=(14,14))\nfig.suptitle('Consumption of adjusters (yellow)/non-adjusters (blue) at grid points of m and k (for each h)',\n fontsize=(13))\nfor hgrid_id in range(EX3SS['mpar']['nh']):\n ## prepare the reduced grids \n hgrid_fix=hgrid_id \n \n ## plots \n ax = fig.add_subplot(2,2,hgrid_id+1, projection='3d')\n ax.plot_surface(mmgrid,kkgrid,c_n_approx[:,:,hgrid_fix],cmap=cm.winter,\n label='StE(after dct):non-adjuster')\n ax.plot_surface(mmgrid,kkgrid,c_a_approx[:,:,hgrid_fix],cmap=cm.autumn,\n label='StE(after dct):adjuster')\n ax.set_xlabel('m',fontsize=13)\n ax.set_ylabel('k',fontsize=13)\n ax.set_zlabel(r'$c_a(m,k)$',fontsize=13)\n ax.set_title(r'$h({})$'.format(hgrid_fix))\n plt.gca().invert_yaxis()\n plt.gca().invert_xaxis() \n #ax.set_xlim(0,mmax)\n #ax.set_ylim(0,kmax)\n ax.view_init(20, 60)\n\n# %% {\"code_folding\": []}\n## the differences of c functions of adjusters and non-adjusters approximated by DCT.\n\nc_diff_approx=c_n_approx-c_a_approx\n\nfig = plt.figure(figsize=(14,14))\nfig.suptitle('Consumption of adjusters/non-adjusters at grid points of m and k(for different h)',\n fontsize=(13))\nfor hgrid_id in range(EX3SS['mpar']['nh']):\n ## prepare the reduced grids \n hgrid_fix=hgrid_id \n \n ## plots \n ax = fig.add_subplot(2,2,hgrid_id+1, projection='3d')\n ax.plot_surface(mmgrid,kkgrid,c_diff_approx[:,:,hgrid_fix],cmap=cm.coolwarm,\n label='StE(after dct):difference of non-adjuster and adjusters')\n ax.set_xlabel('m',fontsize=13)\n ax.set_ylabel('k',fontsize=13)\n ax.set_zlabel(r'$c_n(m,k)-c_a(m,k)$',fontsize=12)\n ax.set_title(r'$h({})$'.format(hgrid_fix))\n plt.gca().invert_yaxis()\n plt.gca().invert_xaxis()\n #ax.set_xlim(0,mmax)\n #ax.set_ylim(0,kmax)\n ax.view_init(20, 80)\n\n# %% [markdown]\n# ##### Observation\n#\n# - For a given grid value of productivity, the remaining grid points after DCT to represent the whole consumption function are concentrated in low values of $k$ and $m$. This is because the slopes of the surfaces of marginal utility are changing the most in these regions. For larger values of $k$ and $m$ the functions become smooth and only slightly concave, so they can be represented by many fewer points\n# - For different grid values of productivity (2 sub plots), the numbers of grid points in the DCT operation differ. From the lowest to highest values of productivity, there are 78, 33, 25 and 18 grid points, respectively. They add up to the total number of gridpoints of 154 after DCT operation, as we noted above for marginal utility function. \n\n# %% [markdown]\n# #### Distribution of states \n#\n# - We first plot the distribution of $k$ fixing $m$ and $h$. Next, we plot the joint distribution of $m$ and $k$ only fixing $h$ in 3-dimenstional space. \n# - The joint-distribution can be represented by marginal distributions of $m$, $k$ and $h$ and a copula that describes the correlation between the three states. The former is straightfoward. We plot the copula only. The copula is essentially a multivariate cummulative distribution function where each marginal is uniform. (Translation from the uniform to the appropriate nonuniform distribution is handled at a separate stage).\n#\n\n# %% {\"code_folding\": []}\n### Marginalize along h grids\n\njoint_distr = EX3SS['joint_distr']\njoint_distr_km = EX3SS['joint_distr'].sum(axis=2)\n\n### Plot distributions in 2 dimensional graph \n\nfig = plt.figure(figsize=(10,10))\nplt.suptitle('Marginal distribution of k at different m')\n\nfor hgrid_id in range(EX3SS['mpar']['nh']):\n ax = plt.subplot(2,2,hgrid_id+1)\n ax.set_title(r'$h({})$'.format(hgrid_id))\n ax.set_xlabel('k',size=12)\n for id in range(EX3SS['mpar']['nm']): \n ax.plot(kgrid,joint_distr[id,:,hgrid_id])\n\n# %% {\"code_folding\": []}\n## Plot joint distribution of k and m in 3d graph\n\nfig = plt.figure(figsize=(14,14))\nfig.suptitle('Joint distribution of m and k(for different h)',\n fontsize=(13))\n\n\nfor hgrid_id in range(EX3SS['mpar']['nh']):\n ## plots \n ax = fig.add_subplot(2,2,hgrid_id+1, projection='3d')\n ax.plot_surface(mmgrid,kkgrid,joint_distr[:,:,hgrid_fix], rstride=1, cstride=1,\n cmap='viridis', edgecolor='none')\n ax.set_xlabel('m',fontsize=13)\n ax.set_ylabel('k',fontsize=13)\n plt.gca().invert_yaxis()\n #ax.set_zlabel(r'$p(m,k)$',fontsize=10)\n ax.set_title(r'$h({})$'.format(hgrid_id))\n ax.set_xlim(0,400)\n ax.view_init(20, 40)\n \n\n\n# %% [markdown]\n# Notice the CDFs in StE copula have 4 modes, corresponding to the number of $h$ gridpoints. Each of the four parts of the cdf is a joint-distribution of $m$ and $k$. It can be presented in 3-dimensional graph as below. \n\n# %% {\"code_folding\": []}\n## Plot the copula \n\ncdf=EX3SS['Copula']['value'].reshape(4,30,30) # important: 4,30,30 not 30,30,4? \n\nfig = plt.figure(figsize=(14,14))\nfig.suptitle('Copula of m and k(for different h)',\n fontsize=(13))\nfor hgrid_id in range(EX3SS['mpar']['nh']):\n ## plots \n ax = fig.add_subplot(2,2,hgrid_id+1, projection='3d')\n ax.plot_surface(mmgrid,kkgrid,cdf[hgrid_id,:,:], rstride=1, cstride=1,\n cmap='viridis', edgecolor='None')\n ax.set_xlabel('m',fontsize=13)\n ax.set_ylabel('k',fontsize=13)\n ax.set_title(r'$h({})$'.format(hgrid_id))\n \n ## for each h grid, take the 95% mass of m and k as the maximum of the m and k axis \n \n marginal_mk = joint_distr[:,:,hgrid_id]\n marginal_m = marginal_mk.sum(axis=0)\n marginal_k = marginal_mk.sum(axis=1)\n mmax = mgrid[(np.abs(marginal_m.cumsum()-mass_pct*marginal_m.cumsum().max())).argmin()]\n kmax = kgrid[(np.abs(marginal_k.cumsum()-mass_pct*marginal_k.cumsum().max())).argmin()]\n plt.gca().invert_yaxis()\n plt.gca().invert_xaxis()\n #ax.set_xlim(0,mmax)\n #ax.set_ylim(0,kmax)\n ax.view_init(30, 60)\n\n# %% [markdown]\n# # To Do:\n#\n# 1. Plot the _difference_ in the _approximation errors_ for adjusters and nonadjusters\n# 1. Make color or transparency be determined by the population density from the copula\n# 1. Make extra versions of the figures where the color is determined by the population density at that location (given by the copula)\n# 1. Differences _between_ adjusters and nonadjusters in consumption are not interesting and should be deleted\n# 1. Eliminate \"magic numbers\"\n# 1. Improve comments so a new reader can understand what is being done\n\n# %% [markdown]\n# Given the assumption that the copula remains the same after aggregate risk is introduced, we can use the same copula and the marginal distributions to recover the full joint-distribution of the states. \n\n# %% [markdown]\n# ### Summary: what do we achieve after the transformation?\n#\n# - Using the DCT, the dimension of the policy and value functions are reduced from 3600 to 154 and 94, respectively.\n# - By marginalizing the joint distribution with the fixed copula assumption, the marginal distribution is of dimension 64 compared to its joint distribution of a dimension of 3600.\n#\n#\n#\n" ]
[ [ "numpy.sum", "numpy.sort", "scipy.linalg.norm", "numpy.zeros", "numpy.squeeze", "scipy.fftpack.dct", "matplotlib.pyplot.figure", "matplotlib.pyplot.gca", "numpy.int64", "numpy.arange", "matplotlib.pyplot.subplot", "numpy.hstack", "numpy.shape", "numpy.log", "matplotlib.pyplot.suptitle", "numpy.array", "numpy.meshgrid", "numpy.unravel_index" ] ]
arceushui/Keyword-Spotting-Alibaba
[ "10e718491075dee8f875c7860385bc4eef22a790" ]
[ "espnet2/bin/enh_inference.py" ]
[ "#!/usr/bin/env python3\nimport argparse\nimport logging\nfrom pathlib import Path\nimport sys\nfrom typing import List\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\nfrom typing import Union\n\nimport humanfriendly\nimport numpy as np\nimport torch\nfrom tqdm import trange\nfrom typeguard import check_argument_types\n\nfrom espnet.utils.cli_utils import get_commandline_args\nfrom espnet2.fileio.sound_scp import SoundScpWriter\nfrom espnet2.tasks.enh import EnhancementTask\nfrom espnet2.torch_utils.device_funcs import to_device\nfrom espnet2.torch_utils.set_all_random_seed import set_all_random_seed\nfrom espnet2.utils import config_argparse\nfrom espnet2.utils.types import str2bool\nfrom espnet2.utils.types import str2triple_str\nfrom espnet2.utils.types import str_or_none\n\n\nEPS = torch.finfo(torch.get_default_dtype()).eps\n\n\nclass SeparateSpeech:\n \"\"\"SeparateSpeech class\n\n Examples:\n >>> import soundfile\n >>> separate_speech = SeparateSpeech(\"enh_config.yml\", \"enh.pth\")\n >>> audio, rate = soundfile.read(\"speech.wav\")\n >>> separate_speech(audio)\n [separated_audio1, separated_audio2, ...]\n\n \"\"\"\n\n def __init__(\n self,\n enh_train_config: Union[Path, str],\n enh_model_file: Union[Path, str] = None,\n segment_size: Optional[float] = None,\n hop_size: Optional[float] = None,\n normalize_segment_scale: bool = False,\n show_progressbar: bool = False,\n ref_channel: Optional[int] = None,\n normalize_output_wav: bool = False,\n device: str = \"cpu\",\n dtype: str = \"float32\",\n ):\n assert check_argument_types()\n\n # 1. Build Enh model\n enh_model, enh_train_args = EnhancementTask.build_model_from_file(\n enh_train_config, enh_model_file, device\n )\n enh_model.to(dtype=getattr(torch, dtype)).eval()\n\n self.device = device\n self.dtype = dtype\n self.enh_train_args = enh_train_args\n self.enh_model = enh_model\n\n # only used when processing long speech, i.e.\n # segment_size is not None and hop_size is not None\n self.segment_size = segment_size\n self.hop_size = hop_size\n self.normalize_segment_scale = normalize_segment_scale\n self.normalize_output_wav = normalize_output_wav\n self.show_progressbar = show_progressbar\n\n self.num_spk = enh_model.num_spk\n task = \"enhancement\" if self.num_spk == 1 else \"separation\"\n\n # reference channel for processing multi-channel speech\n if ref_channel is not None:\n logging.info(\n \"Overwrite enh_model.separator.ref_channel with {}\".format(ref_channel)\n )\n enh_model.separator.ref_channel = ref_channel\n self.ref_channel = ref_channel\n else:\n self.ref_channel = enh_model.ref_channel\n\n self.segmenting = segment_size is not None and hop_size is not None\n if self.segmenting:\n logging.info(\"Perform segment-wise speech %s\" % task)\n logging.info(\n \"Segment length = {} sec, hop length = {} sec\".format(\n segment_size, hop_size\n )\n )\n else:\n logging.info(\"Perform direct speech %s on the input\" % task)\n\n @torch.no_grad()\n def __call__(\n self, speech_mix: Union[torch.Tensor, np.ndarray], fs: int = 8000\n ) -> List[torch.Tensor]:\n \"\"\"Inference\n\n Args:\n speech_mix: Input speech data (Batch, Nsamples [, Channels])\n fs: sample rate\n Returns:\n [separated_audio1, separated_audio2, ...]\n\n \"\"\"\n assert check_argument_types()\n\n # Input as audio signal\n if isinstance(speech_mix, np.ndarray):\n speech_mix = torch.as_tensor(speech_mix)\n\n assert speech_mix.dim() > 1, speech_mix.size()\n batch_size = speech_mix.size(0)\n speech_mix = speech_mix.to(getattr(torch, self.dtype))\n # lenghts: (B,)\n lengths = speech_mix.new_full(\n [batch_size], dtype=torch.long, fill_value=speech_mix.size(1)\n )\n\n # a. To device\n speech_mix = to_device(speech_mix, device=self.device)\n lengths = to_device(lengths, device=self.device)\n\n if self.segmenting and lengths[0] > self.segment_size * fs:\n # Segment-wise speech enhancement/separation\n overlap_length = int(np.round(fs * (self.segment_size - self.hop_size)))\n num_segments = int(\n np.ceil((speech_mix.size(1) - overlap_length) / (self.hop_size * fs))\n )\n t = T = int(self.segment_size * fs)\n pad_shape = speech_mix[:, :T].shape\n enh_waves = []\n range_ = trange if self.show_progressbar else range\n for i in range_(num_segments):\n st = int(i * self.hop_size * fs)\n en = st + T\n if en >= lengths[0]:\n # en - st < T (last segment)\n en = lengths[0]\n speech_seg = speech_mix.new_zeros(pad_shape)\n t = en - st\n speech_seg[:, :t] = speech_mix[:, st:en]\n else:\n t = T\n speech_seg = speech_mix[:, st:en] # B x T [x C]\n\n lengths_seg = speech_mix.new_full(\n [batch_size], dtype=torch.long, fill_value=T\n )\n # b. Enhancement/Separation Forward\n feats, f_lens = self.enh_model.encoder(speech_seg, lengths_seg)\n feats, _, _ = self.enh_model.separator(feats, f_lens)\n processed_wav = [\n self.enh_model.decoder(f, lengths_seg)[0] for f in feats\n ]\n if speech_seg.dim() > 2:\n # multi-channel speech\n speech_seg_ = speech_seg[:, self.ref_channel]\n else:\n speech_seg_ = speech_seg\n\n if self.normalize_segment_scale:\n # normalize the energy of each separated stream\n # to match the input energy\n processed_wav = [\n self.normalize_scale(w, speech_seg_) for w in processed_wav\n ]\n # List[torch.Tensor(num_spk, B, T)]\n enh_waves.append(torch.stack(processed_wav, dim=0))\n\n # c. Stitch the enhanced segments together\n waves = enh_waves[0]\n for i in range(1, num_segments):\n # permutation between separated streams in last and current segments\n perm = self.cal_permumation(\n waves[:, :, -overlap_length:],\n enh_waves[i][:, :, :overlap_length],\n criterion=\"si_snr\",\n )\n # repermute separated streams in current segment\n for batch in range(batch_size):\n enh_waves[i][:, batch] = enh_waves[i][perm[batch], batch]\n\n if i == num_segments - 1:\n enh_waves[i][:, :, t:] = 0\n enh_waves_res_i = enh_waves[i][:, :, overlap_length:t]\n else:\n enh_waves_res_i = enh_waves[i][:, :, overlap_length:]\n\n # overlap-and-add (average over the overlapped part)\n waves[:, :, -overlap_length:] = (\n waves[:, :, -overlap_length:] + enh_waves[i][:, :, :overlap_length]\n ) / 2\n # concatenate the residual parts of the later segment\n waves = torch.cat([waves, enh_waves_res_i], dim=2)\n # ensure the stitched length is same as input\n assert waves.size(2) == speech_mix.size(1), (waves.shape, speech_mix.shape)\n waves = torch.unbind(waves, dim=0)\n else:\n # b. Enhancement/Separation Forward\n feats, f_lens = self.enh_model.encoder(speech_mix, lengths)\n feats, _, _ = self.enh_model.separator(feats, f_lens)\n waves = [self.enh_model.decoder(f, lengths)[0] for f in feats]\n\n assert len(waves) == self.num_spk, len(waves) == self.num_spk\n assert len(waves[0]) == batch_size, (len(waves[0]), batch_size)\n if self.normalize_output_wav:\n waves = [\n (w / abs(w).max(dim=1, keepdim=True)[0] * 0.9).cpu().numpy()\n for w in waves\n ] # list[(batch, sample)]\n else:\n waves = [w.cpu().numpy() for w in waves]\n\n return waves\n\n @staticmethod\n @torch.no_grad()\n def normalize_scale(enh_wav, ref_ch_wav):\n \"\"\"Normalize the energy of enh_wav to match that of ref_ch_wav.\n\n Args:\n enh_wav (torch.Tensor): (B, Nsamples)\n ref_ch_wav (torch.Tensor): (B, Nsamples)\n Returns:\n enh_wav (torch.Tensor): (B, Nsamples)\n \"\"\"\n ref_energy = torch.sqrt(torch.mean(ref_ch_wav.pow(2), dim=1))\n enh_energy = torch.sqrt(torch.mean(enh_wav.pow(2), dim=1))\n return enh_wav * (ref_energy / enh_energy)[:, None]\n\n @torch.no_grad()\n def cal_permumation(self, ref_wavs, enh_wavs, criterion=\"si_snr\"):\n \"\"\"Calculate the permutation between seaprated streams in two adjacent segments.\n\n Args:\n ref_wavs (List[torch.Tensor]): [(Batch, Nsamples)]\n enh_wavs (List[torch.Tensor]): [(Batch, Nsamples)]\n criterion (str): one of (\"si_snr\", \"mse\", \"corr)\n Returns:\n perm (torch.Tensor): permutation for enh_wavs (Batch, num_spk)\n \"\"\"\n loss_func = {\n \"si_snr\": self.enh_model.si_snr_loss,\n \"mse\": lambda enh, ref: torch.mean((enh - ref).pow(2), dim=1),\n \"corr\": lambda enh, ref: (\n (enh * ref).sum(dim=1)\n / (enh.pow(2).sum(dim=1) * ref.pow(2).sum(dim=1) + EPS)\n ).clamp(min=EPS, max=1 - EPS),\n }[criterion]\n\n _, perm = self.enh_model._permutation_loss(ref_wavs, enh_wavs, loss_func)\n return perm\n\n\ndef humanfriendly_or_none(value: str):\n if value in (\"none\", \"None\", \"NONE\"):\n return None\n return humanfriendly.parse_size(value)\n\n\ndef inference(\n output_dir: str,\n batch_size: int,\n dtype: str,\n fs: int,\n ngpu: int,\n seed: int,\n num_workers: int,\n log_level: Union[int, str],\n data_path_and_name_and_type: Sequence[Tuple[str, str, str]],\n key_file: Optional[str],\n enh_train_config: str,\n enh_model_file: str,\n allow_variable_data_keys: bool,\n segment_size: Optional[float],\n hop_size: Optional[float],\n normalize_segment_scale: bool,\n show_progressbar: bool,\n ref_channel: Optional[int],\n normalize_output_wav: bool,\n):\n assert check_argument_types()\n if batch_size > 1:\n raise NotImplementedError(\"batch decoding is not implemented\")\n if ngpu > 1:\n raise NotImplementedError(\"only single GPU decoding is supported\")\n\n logging.basicConfig(\n level=log_level,\n format=\"%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\",\n )\n\n if ngpu >= 1:\n device = \"cuda\"\n else:\n device = \"cpu\"\n\n # 1. Set random-seed\n set_all_random_seed(seed)\n\n # 2. Build separate_speech\n separate_speech = SeparateSpeech(\n enh_train_config=enh_train_config,\n enh_model_file=enh_model_file,\n segment_size=segment_size,\n hop_size=hop_size,\n normalize_segment_scale=normalize_segment_scale,\n show_progressbar=show_progressbar,\n ref_channel=ref_channel,\n normalize_output_wav=normalize_output_wav,\n device=device,\n dtype=dtype,\n )\n\n # 3. Build data-iterator\n loader = EnhancementTask.build_streaming_iterator(\n data_path_and_name_and_type,\n dtype=dtype,\n batch_size=batch_size,\n key_file=key_file,\n num_workers=num_workers,\n preprocess_fn=EnhancementTask.build_preprocess_fn(\n separate_speech.enh_train_args, False\n ),\n collate_fn=EnhancementTask.build_collate_fn(\n separate_speech.enh_train_args, False\n ),\n allow_variable_data_keys=allow_variable_data_keys,\n inference=True,\n )\n\n # 4. Start for-loop\n writers = []\n for i in range(separate_speech.num_spk):\n writers.append(\n SoundScpWriter(f\"{output_dir}/wavs/{i + 1}\", f\"{output_dir}/spk{i + 1}.scp\")\n )\n\n for keys, batch in loader:\n assert isinstance(batch, dict), type(batch)\n assert all(isinstance(s, str) for s in keys), keys\n _bs = len(next(iter(batch.values())))\n assert len(keys) == _bs, f\"{len(keys)} != {_bs}\"\n batch = {k: v for k, v in batch.items() if not k.endswith(\"_lengths\")}\n\n waves = separate_speech(**batch)\n for (spk, w) in enumerate(waves):\n for b in range(batch_size):\n writers[spk][keys[b]] = fs, w[b]\n print(w[b],file=sys.stderr)\n for writer in writers:\n writer.close()\n\n\ndef get_parser():\n parser = config_argparse.ArgumentParser(\n description=\"Frontend inference\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n\n # Note(kamo): Use '_' instead of '-' as separator.\n # '-' is confusing if written in yaml.\n parser.add_argument(\n \"--log_level\",\n type=lambda x: x.upper(),\n default=\"INFO\",\n choices=(\"CRITICAL\", \"ERROR\", \"WARNING\", \"INFO\", \"DEBUG\", \"NOTSET\"),\n help=\"The verbose level of logging\",\n )\n\n parser.add_argument(\"--output_dir\", type=str, required=True)\n parser.add_argument(\n \"--ngpu\",\n type=int,\n default=0,\n help=\"The number of gpus. 0 indicates CPU mode\",\n )\n parser.add_argument(\"--seed\", type=int, default=0, help=\"Random seed\")\n parser.add_argument(\n \"--dtype\",\n default=\"float32\",\n choices=[\"float16\", \"float32\", \"float64\"],\n help=\"Data type\",\n )\n parser.add_argument(\n \"--fs\", type=humanfriendly_or_none, default=8000, help=\"Sampling rate\"\n )\n parser.add_argument(\n \"--num_workers\",\n type=int,\n default=1,\n help=\"The number of workers used for DataLoader\",\n )\n\n group = parser.add_argument_group(\"Input data related\")\n group.add_argument(\n \"--data_path_and_name_and_type\",\n type=str2triple_str,\n required=True,\n action=\"append\",\n )\n group.add_argument(\"--key_file\", type=str_or_none)\n group.add_argument(\"--allow_variable_data_keys\", type=str2bool, default=False)\n\n group = parser.add_argument_group(\"Output data related\")\n group.add_argument(\n \"--normalize_output_wav\",\n type=str2bool,\n default=False,\n help=\"Whether to normalize the predicted wav to [-1~1]\",\n )\n\n group = parser.add_argument_group(\"The model configuration related\")\n group.add_argument(\"--enh_train_config\", type=str, required=True)\n group.add_argument(\"--enh_model_file\", type=str, required=True)\n\n group = parser.add_argument_group(\"Data loading related\")\n group.add_argument(\n \"--batch_size\",\n type=int,\n default=1,\n help=\"The batch size for inference\",\n )\n group = parser.add_argument_group(\"SeparateSpeech related\")\n group.add_argument(\n \"--segment_size\",\n type=float,\n default=None,\n help=\"Segment length in seconds for segment-wise speech enhancement/separation\",\n )\n group.add_argument(\n \"--hop_size\",\n type=float,\n default=None,\n help=\"Hop length in seconds for segment-wise speech enhancement/separation\",\n )\n group.add_argument(\n \"--normalize_segment_scale\",\n type=str2bool,\n default=False,\n help=\"Whether to normalize the energy of the separated streams in each segment\",\n )\n group.add_argument(\n \"--show_progressbar\",\n type=str2bool,\n default=False,\n help=\"Whether to show a progress bar when performing segment-wise speech \"\n \"enhancement/separation\",\n )\n group.add_argument(\n \"--ref_channel\",\n type=int,\n default=None,\n help=\"If not None, this will overwrite the ref_channel defined in the \"\n \"separator module (for multi-channel speech processing)\",\n )\n\n return parser\n\n\ndef main(cmd=None):\n print(get_commandline_args(), file=sys.stderr)\n parser = get_parser()\n args = parser.parse_args(cmd)\n kwargs = vars(args)\n kwargs.pop(\"config\", None)\n inference(**kwargs)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.unbind", "torch.stack", "torch.get_default_dtype", "torch.as_tensor", "torch.no_grad", "numpy.round", "torch.cat" ] ]
RosettaCommons/jade2
[ "40affc7c4e0f1f6ee07030e72de284e3484946e7" ]
[ "jade2/basic/structure/PythonPDB2.py" ]
[ "\n## @author Jared Adolf-Bryfogle ([email protected])\n\n#Python Imports\nimport copy\nimport pandas\nimport re, logging\nfrom collections import defaultdict\nfrom typing import Union, DefaultDict, List, Any, Dict\nfrom pathlib import Path\n\nfrom jade2.basic.path import *\n\nclass PythonPDB2:\n def __init__(self, pdb_file_path: Union[str, Path] = \"\"):\n \"\"\"\n \n Lightweight PDB class specifically for manipulating pdbs in scripts and simple apps as well as obtaining subsets of data in the PDB.\n 2.0 Uses a vector of dictionaries as main pdb_map for easier manipulation of the pdb_map.\n Notes:\n Not Meant to be fast - written for ease of use!\n ALL elements of the pdb_map data are stored as strings!\n\n \"\"\"\n\n self.elements = (\"id\", \"atom_number\", \"atom_name\", \"alternate_location\", \\\n \"three_letter_code\", \"chain\", \"residue_number\", \"i_code\", \"x\", \"y\", \"z\", \\\n \"occupancy\", \"b_factor\", \"element\", \"charge\")\n self.pdb_file_path = str(pdb_file_path)\n\n self.pdb_map: List[DefaultDict[str, str]] = [] #[int line]:[string element]:[string value]\n\n self.header: List[str] = [] #Unparsed header, but the data is held here as a list of strings. - Everything NOT ATOM or HETATM is here\n self.remarks: List[str] = [] #Only REMARK lines as strings\n\n if pdb_file_path:\n self.read_pdb_into_map()\n else:\n logging.info(\"Loading blank PythonPDB\")\n\n\n def set_pdb_map(self, pdb_map: List[DefaultDict[str, str]]):\n self.pdb_map = pdb_map\n\n ####################################################################\n # Getters + PDB_Map Subsets\n #\n #\n\n def get_pdb_map(self) -> List[DefaultDict[str, str]]:\n return self.pdb_map\n\n def get_dataframe(self) -> pandas.DataFrame:\n \"\"\"\n Get the PDB Map as a dataframe dataframe\n \"\"\"\n return pandas.DataFrame(self.pdb_map)\n\n def get_header(self) -> List[str]:\n \"\"\"\n Get 'header' of PDB as list of strings\n \"\"\"\n return self.header\n\n def get_remarks(self) -> List[str]:\n \"\"\"\n Get 'REMARK' lines of PDB as a list of strings\n \"\"\"\n return self.remarks\n\n def add_remark(self, remark: str):\n remark = \"REMARK \"+remark\n self.remarks.append(remark)\n\n def get_chain(self, chain) -> List[DefaultDict[str, str]]:\n \"\"\"\n Get Chain data as pdb_map subset\n \"\"\"\n chain_data = []\n for dat in self.pdb_map:\n if dat[\"chain\"] == chain:\n chain_data.append(dat)\n return chain_data\n\n def rename_chain(self, old_chain, new_chain):\n for i in range(0, len(self.pdb_map) ):\n #print(\"CHAIN :\",self.pdb_map[i][\"chain\"],\":\")\n if self.pdb_map[i][\"chain\"] == old_chain:\n #print(\"Chain found. Attempting to change\")\n self.pdb_map[i][\"chain\"] = new_chain\n\n def get_waters(self) -> List[DefaultDict[str, str]]:\n \"\"\"\n Get water data as pdb_map subset\n \"\"\"\n water_data = []\n for dat in self.pdb_map:\n if dat[\"three_letter_code\"] in [\"HOH\",\"TP3\",\"TP5\",\"TIP3\",\"TIP5\"]:\n water_data.append(dat)\n return water_data\n\n def get_hetatms(self) -> List[DefaultDict[str, str]]:\n \"\"\"\n Get hetatm data as pdb_map subset\n \"\"\"\n het_data = []\n for dat in self.pdb_map:\n if dat[\"id\"] == \"HETATM\":\n het_data.append(dat)\n return het_data\n\n def get_bb_data(self) -> List[DefaultDict[str, str]]:\n \"\"\"\n Get pdb_map subset of only N, CA, and C atoms\n \"\"\"\n bb_data = []\n for dat in self.pdb_map:\n if dat[\"atom_name\"] in [\"N\", \"CA\", \"C\"]:\n bb_data.append(dat)\n return bb_data\n\n def get_all_residues_of_type(self, name3: str) -> List[DefaultDict[str, str]]:\n \"\"\"\n Get PDB_Map subset of all residues of specific type\n \"\"\"\n res_data = []\n for dat in self.pdb_map:\n if dat[\"three_letter_code\"] == name3:\n res_data.append(dat)\n return res_data\n\n def get_residue(self, resnum: int, chain: str, icode: str= \"\") -> List[DefaultDict[str, str]]:\n \"\"\"\n Get PDB_Map subset of a specific residue\n \"\"\"\n residue = []\n for dat in self.pdb_map:\n if dat[\"residue_number\"] == str(resnum) and dat[\"chain\"] == chain and dat[\"icode\"] == \"\":\n residue.append(dat)\n return residue\n\n\n\n ####################################################################\n # Main\n #\n #\n\n def read_pdb_into_map(self):\n \"\"\"\n Reads PDB file path into a basic PDB map. All data is held as strings.\n \"\"\"\n \n FILE = open_file(self.pdb_file_path, 'r')\n i = 0\n for line in FILE:\n line = line.strip()\n line = line.strip('\\n')\n\n if not line: continue\n\n if re.search(\"REMARK\", line[0:6]):\n self.remarks.append(line)\n\n elif (re.search(\"END\", line[0:6]) or re.search(\"TER\", line[0:6])):\n #We ignore END and TER for now.\n pass\n\n elif (re.search(\"ATOM\", line[0:6]) or re.search(\"HETATM\", line[0:6])):\n\n self.pdb_map.append(defaultdict())\n\n self.pdb_map[i][\"id\"]=line[0:6].strip()\n self.pdb_map[i][\"atom_number\"]=line[6:11].strip(); self.pdb_map[i][\"atom_name\"] = line[12:16]\n self.pdb_map[i][\"alternate_location\"]=line[16]; self.pdb_map[i][\"three_letter_code\"] = line[17:21].strip()\n self.pdb_map[i][\"chain\"] = line[21]; self.pdb_map[i][\"residue_number\"]= line[22:26].strip()\n self.pdb_map[i][\"i_code\"] = line[26]; self.pdb_map[i][\"x\"] = line[27:38].strip()\n self.pdb_map[i][\"y\"]= line[38:46].strip(); self.pdb_map[i][\"z\"]= line[46:54].strip()\n self.pdb_map[i][\"occupancy\"] = line[54:60].strip(); self.pdb_map[i][\"b_factor\"]=line[60:66].strip()\n self.pdb_map[i][\"element\"]=line[66:78].strip(); self.pdb_map[i][\"charge\"]=line[78:79].strip()\n\n i +=1\n\n elif (re.search(\"REMARK\", line[0:6])):\n self.remarks.append(line)\n\n else:\n self.header.append(line)\n\n FILE.close()\n\n def save_PDB(self, filename: Union[Path, str], output_remarks: bool = True, output_header: bool= True) -> Union[Path, str]:\n \"\"\"\n Uses a the pdb_map to save the data as a PDB file.\n Returns the filename\n \"\"\"\n\n #global_variables.current_directory = os.path.dirname(filename)\n\n FILE = open_file(filename, 'w')\n if output_remarks:\n for line in self.remarks:\n FILE.write(line+\"\\n\")\n\n if output_header:\n for line in self.header:\n FILE.write(line+\"\\n\")\n\n for entry in self.pdb_map:\n line = self.morph_line_in_pdb_map_to_pdb_line(entry)\n FILE.write(line+\"\\n\")\n FILE.close()\n print(\"PDB File Written...\")\n return filename\n\n def morph_line_in_pdb_map_to_pdb_line(self, entry: DefaultDict[str, str]) -> str:\n \"\"\"\n Oh What fun. ;)\n Magic Numbers?: (6,5,4,3,1,4,8,8,8,4,5);\n \"\"\"\n\n\n #Here we fix the formating of atom name. If we stripped the atom name.\n \"\"\"\n atom_name = self.pdb_map[line_num]['atom_name']\n if len(atom_name)==1:\n atom_name=' '+atom_name+' '\n elif len(atom_name)==2:\n #Note that 2 letter elements like CA (calcium) differ from CA (C-Alpha)\n #If calcium, would go @column 13. if C-Alpha, column 14.\n atom_name=' '+atom_name+' '\n elif len(atom_name)==3:\n atom_name=' '+atom_name\n elif len(atom_name)==4:\n atom_name=atom_name\n else:\n print \"Atom Name missing. Inserting spaces.\"\n atom_name = ' '\n \"\"\"\n\n #Create the PDB line.\n line = (entry['id']).ljust(6)+ (entry['atom_number']).rjust(5)+\" \"+ entry['atom_name']+ \\\n (entry['alternate_location'])+ ((entry['three_letter_code']).rjust(3)).ljust(4)+ (entry['chain'])+ \\\n (entry['residue_number']).rjust(4)+ (entry['i_code']) + \\\n (entry['x']).rjust(11)+ (entry['y']).rjust(8)+ (entry['z']).rjust(8) + \\\n (entry['occupancy']).rjust(6)+ (entry['b_factor']).rjust(6)\n\n #Note three letter code is wonky due to DA residues. ljust(4) was not working.\n return line\n\n\n ##################\n # Addition\n #\n #\n\n def add_ca_residue(self, x: str, y: str, z: str, restype: str = \"ALA\", b_fac: float = 0, chain=\"X\"):\n \"\"\"\n Add a residue to the map that is only CA\n :param x:\n :param y:\n :param z:\n :param restype:\n :param b_fac:\n :return: None\n \"\"\"\n pass\n\n ####################################################################\n # Removal\n #\n #\n\n def remove_antigen(self):\n \"\"\"\n Remove Antigen from an LH only PDB\n \"\"\"\n temp_pdb_map = copy.deepcopy(self.pdb_map)\n for dat in temp_pdb_map:\n if dat[\"chain\"] not in ['L', 'H']:\n self.pdb_map.remove(dat)\n\n def remove_chain(self, chain: str):\n \"\"\"\n Removes chain from pdb_map\n \"\"\"\n temp_pdb_map = copy.deepcopy(self.pdb_map)\n for dat in temp_pdb_map:\n if dat[\"chain\"]==chain:\n self.pdb_map.remove(dat)\n\n def remove_residue_type(self, name3: str):\n temp_pdb_map = copy.deepcopy(self.pdb_map)\n for dat in temp_pdb_map:\n if dat[\"three_letter_code\"]==name3:\n self.pdb_map.remove(dat)\n\n def remove_hetatm_atoms(self):\n temp_pdb_map = copy.deepcopy(self.pdb_map)\n for dat in temp_pdb_map:\n if dat[\"id\"]==\"HETATM\":\n self.pdb_map.remove(dat)\n \n \n def remove_element_column(self):\n \"\"\"\n Removes the extra stuff in the element column, but not the element itself.\n \"\"\"\n for i in range(0, len(self.pdb_map)):\n ele = self.pdb_map[i][\"element\"]\n e = ele[11]\n self.pdb_map[i][\"element\"]=\" \"+e\n print(\"Extra stuff in Element Columns Removed\")\n return self.pdb_map\n \n def remove_waters(self):\n \"\"\"\n Removes waters from pdb_map\n \"\"\"\n #codes = [\"HOH\",\"TP3\",\"TP5\",\"TIP3\",\"TIP5\"]\n temp_pdb_map = copy.deepcopy(self.pdb_map) #This is to pop elements\n for dat in temp_pdb_map:\n if dat[\"three_letter_code\"] in [\"HOH\",\"TP3\",\"TP5\",\"TIP3\",\"TIP5\"]:\n #self.pdb_map.pop(num)\n self.pdb_map.remove(dat)\n \n def remove_alternate_residues(self):\n \"\"\"\n Removes any alternate residue codes and renumbers by renumbering from 1 and integrating any inserts. \n \"\"\"\n \n def get_residue_num(num): return int(self.pdb_map_copy[num][\"residue_number\"])\n def set_residue_num(num, resnum): self.pdb_map[num][\"residue_number\"]=str(resnum)\n def get_chain(num):return self.pdb_map_copy[num][\"chain\"]\n def get_i_code(num):return self.pdb_map_copy[num][\"i_code\"]\n \n def check_id(num):\n if self.pdb_map_copy[num]['id']==\"ATOM\":\n return True\n else:\n return False\n \n def check_new_residue(old_num, num, insert_residue=False, pdb_map = False):\n if insert_residue:\n if get_i_code(old_num)==get_i_code(num):\n return False\n else:\n return True\n else:\n if get_residue_num(old_num)==get_residue_num(num):\n return False\n else:\n return True\n \n def check_new_chain(old_num, num):\n if get_chain(old_num)==get_chain(num):\n return False\n else:\n return True\n \n def check_insertion(num):\n if not get_i_code(num)==\" \":\n return True\n else:\n return False\n \n def renumber_from_one(chain_only, start_num):\n resnum = 1\n for num in sorted(chain_only):\n \n insert = check_insertion(num)\n \n #print repr(get_residue_num(num))+\":\"+repr(insert)\n \n #This is so we don't check if it's a new residue with num-1 - Which won't actually be part of the chain!\n if num==start_num:\n set_residue_num(num, resnum)\n\n \n \n #Iterate resnum if new residue\n elif check_new_residue(num-1, num, insert):\n resnum+=1\n set_residue_num(num, resnum)\n\n else:\n set_residue_num(num, resnum)\n \n #Set i code at the end, so we can tell if we have new residues or not.\n for num in sorted(chain_only):\n self.pdb_map[num][\"i_code\"]=\" \"\n \n def renumber_from_insert(chain_only, start_num):\n pass\n \n self.pdb_map_copy = copy.deepcopy(self.pdb_map)\n \n #Get chains with insertion codes - Now renumbers all chains. Will be an option later.\n chains_with_inserts = dict(); \n for num in range(0, len(self.pdb_map)):\n #if get_i_code(num)==\" \":\n chains_with_inserts[get_chain(num)]=True\n\n \n #Iterate through all lines/atoms\n #Initialize for scope\n start_residue=0;\n new_start=False\n for chain in chains_with_inserts:\n print(\"Renumbering chain \"+chain)\n chain_only=dict()\n for num in range(0, len(self.pdb_map)):\n if chain == get_chain(num) and check_id(num):\n chain_only[num]=self.pdb_map[num]\n lines = sorted(chain_only)\n res_start = get_residue_num(lines[0])\n \n renumber_from_one(chain_only, lines[0])\n \n #For now, we only renumber from one.\n #else:\n #chain_only = renumber_from_insert(chain_only, lines[0]) \n \n\n\n ####################################################################\n # General Manipulation\n #\n #\n\n def change_occupancy(self):\n \"\"\"\n Changes ALL occupancies in a PDB dictionary to 1.00\n Returns PDB Dictionary.\n \"\"\"\n \n check = 0\n for key in range(0, len(self.pdb_map)):\n if self.pdb_map[key][\"occupancy\"].rfind(\"0.00\")!=-1:\n print(\"Changing occupancy of residue \" + self.pdb_map[key][\"residue_number\"] + \"To 1.00\")\n check =1\n self.pdb_map[key][\"occupancy\"] = \" 1.00\"\n if check ==1:\n print(\"Occupancy Column OK for PyRosetta...\")\n\n\n def combine_pdb(self, py_pdb: 'PythonPDB2'):\n \"\"\"\n Combines pdb_map from instance of PyPDB to this one. Does not do any checks.\n \"\"\"\n m = py_pdb.get_pdb_map()\n for dat in m:\n self.pdb_map.append(dat)\n\n def copy_chain_into_pdb_map(self, py_pdb: 'PythonPDB2', chain: str):\n \"\"\"\n Copies all data from one pdb_map of a py_pdb of a chain into the one held in this class. Useful for reordering chains.\n \"\"\"\n m = py_pdb.get_pdb_map()\n for dat in m:\n if dat[\"chain\"] == chain:\n self.pdb_map.append(dat)\n\n def copy_all_but_chains_into_pdb_map(self, py_pdb:'PythonPDB2', chains):\n \"\"\"\n Copies all data from one pdb_map of a py_pdb of all data except the specified chains into this one. Useful for reordering chains.\n \"\"\"\n m = py_pdb.get_pdb_map()\n for dat in m:\n if not dat[\"chain\"] in chains:\n self.pdb_map.append(dat)\n\n def combine_pdb_map(self, pdb_map: List[DefaultDict[str, str]]):\n \"\"\"\n Combines pdb_map passed with the PythonPDBs map\n \"\"\"\n for dat in pdb_map:\n self.pdb_map.append(dat)\n\n def pdb_alias(self, pairs: Dict[Any, Any], element: str):\n \"\"\"\n Replaces ALL occurances of old element with new from pair.\n pair is a dictionary. In C++ it would be an array of pairs. [string old]:[string new]\n For Specific functions, please see below.\n \"\"\"\n for num in range(0, len(self.pdb_map)):\n for old in pairs:\n if self.pdb_map[num][element] == old:\n self.pdb_map[num][element] = pairs[old]\n\n def pdb_atom_alias(self, line_num: int, pair: Dict[Any, Any]):\n \"\"\"\n Replaces atom_names with ones Rosetta is happy with.\n pair is a dictionary. In C++ it would be an array of pairs. [string MD atom_name]:[string rosetta atom_name]\n \"\"\"\n for start in pair:\n if self.pdb_map[line_num][\"atom_name\"] == start:\n print(self.pdb_map[line_num][\"three_letter_code\"] + \":\" + self.pdb_map[line_num][\"atom_name\"] + \":\" +\n pair[start])\n self.pdb_map[line_num][\"atom_name\"] = pair[start]\n\n def pdb_residue_alias(self, pairs: Dict[Any, Any]):\n \"\"\"\n Replaces ALL occurances of old residue with new residue.\n pair is a dictionary. In C++ it would be an array of pairs. [string old residue_name]:[string new residue_name]\n \"\"\"\n for num in range(0, len(self.pdb_map)):\n for old in pairs:\n if self.pdb_map[num][\"residue_name\"] == old:\n self.pdb_map[num][\"residue_name\"] = pairs[old]\n\n def pdb_chain_alias(self, pairs: Dict[Any, Any]):\n \"\"\"\n Replaces ALL occurances of old chain with new chain.\n pair is a dictionary. In C++ it would be an array of pairs. [string old chain]:[string new chain]\n \"\"\"\n for num in range(0, len(self.pdb_map)):\n for old in pairs:\n if self.pdb_map[num][\"chain\"] == old:\n self.pdb_map[num][\"chain\"] = pairs[old]\n\n def clean_PDB(self):\n \"\"\"\n Removes HSD, Waters: Tries to fix atom and residue name inconsistencies.\n HAS worked for changing a single MD pdb (NAMD) frame to Rosetta file.\n PLEASE Expand if possible to alias all residues for Rosetta compatability.\n NOT gaurenteed, but SHOULD work ok.\n \"\"\"\n\n self.RESIDUES_aliased = False; self.WATER_aliased=False; self.IONS_aliased=False; self.DNA_aliased = False\n\n waters: List[DefaultDict[str, str]] = [] #List of data that have waters\n print(\"Attempting to change residue names, atom names, and water\")\n for n in range(0, len(self.pdb_map)):\n dat = self.pdb_map[n]\n\n #print self.pdb_map[key][\"three_letter_code\"]\n def alias_dna():\n if dat[\"three_letter_code\"]==\"DA\":\n self.DNA_aliased=True\n dat[\"three_letter_code\"]=\"A\"\n\n elif dat[\"three_letter_code\"]==\"DT\":\n self.DNA_aliased=True\n dat[\"three_letter_code\"]=\"T\"\n\n elif dat[\"three_letter_code\"]==\"DC\":\n self.DNA_aliased=True\n dat[\"three_letter_code\"]=\"C\"\n\n elif dat[\"three_letter_code\"]==\"DG\":\n self.DNA_aliased=True\n dat[\"three_letter_code\"]=\"G\"\n\n else:\n return\n\n def alias_water():\n if dat[\"three_letter_code\"] in [\"HOH\", \"TIP3\", \"WAT\", \"TIP5\"]:\n self.WATER_aliased=True\n dat[\"three_letter_code\"]=\"TP3\" #IO_STRING for TP3 is WAT...Buy still reads TP#?\n dat[\"id\"]=\"HETATM\"\n waters.append(dat)\n\n #def alias_ions():\n #if self.pdb_map[key][\"chain\"]==\"I\":\n #IONS_aliased= True\n #self.pdb_map[key][\"id\"]=\"HETATM\"\n\n def alias_residues():\n if dat[\"three_letter_code\"] == \"HSD\":\n self.RESIDUES_aliased = True\n dat[\"three_letter_code\"]=\"HIS\"\n\n def alias_atoms():\n if dat[\"three_letter_code\"]== \"SER \":\n atom_pairs = {\" HG1\":\" HG \"}\n\n elif dat[\"three_letter_code\"]==\"ILE \":\n atom_pairs = {\" CD \":\" CD1\"}\n self.pdb_map = self.pdb_atom_alias(n, atom_pairs)\n\n elif dat[\"three_letter_code\"]==\"LEU \":\n atom_pairs = {\" OT1\":\" O \", \" OT2\":\" OXT\"}\n self.pdb_map = self.pdb_atom_alias(n, atom_pairs)\n\n elif dat[\"three_letter_code\"]==\"VAL \":\n atom_pairs = {\" OT1\":\" O \", \" OT2\":\" OXT\"}\n self.pdb_map = self.pdb_atom_alias(n, atom_pairs)\n\n elif dat[\"three_letter_code\"]==\"LYS \":\n atom_pairs = {\" HZ1\":\" 1HZ\", \" HZ2\":\" 2HZ\", \" HZ3\":\" 3HZ\"}\n self.pdb_map = self.pdb_atom_alias(n, atom_pairs)\n\n elif dat[\"three_letter_code\"]==\"ARG \":\n atom_pairs = {\" HH11\":\" 1HH1\", \" HH12\":\" 2HH1\", \" HH21\":\" 1HH2\", \" HH22\":\" 2HH2\"}\n self.pdb_map = self.pdb_atom_alias(n, atom_pairs)\n\n elif dat[\"three_letter_code\"]==\"ASN \":\n atom_pairs = {\"HD21\":\"1HD2\", \"HD22\":\"2HD2\"}\n self.pdb_map = self.pdb_atom_alias(n, atom_pairs)\n\n elif dat[\"three_letter_code\"]==\"PRO \":\n atom_pairs = {\" OT1\":\" O \", \" OT2\":\" OXT\", \" HD1\":\" 1HD\", \" HD2\":\" 2HD\", \" HB1\":\" 1HB\", \" HG1\":\" 1HG\", \" HG2\":\" 2HG\"}\n self.pdb_map = self.pdb_atom_alias(n, atom_pairs)\n\n\n #Unnessessary, but organized.\n alias_water()\n #alias_ions()\n #alias_residues()\n alias_atoms()\n alias_dna()\n\n #Removes Waters. Keeps Ions.\n #for key in waters:\n #self.pdb_map.pop(key)\n\n #Outputs what was found:\n if self.RESIDUES_aliased:\n print(\"Residues Changed\")\n\n if self.WATER_aliased:\n print(\"Water found...changed to TP3. Remove to decrease calculation time.\")\n\n if self.IONS_aliased:\n print(\"Ions found. Most are able to be read into Rosetta\")\n\n if self.DNA_aliased:\n print(\"DNA found, changed to single letter code.\")\n\n\n\n\n ####################################################################\n # B Factor Replacements\n #\n #\n\n def read_file_and_replace_b_factors(self, deliminator: str, filename: str, resnum_column: int=1, chain_column:int=2, data_column: int=3, atomname_column=False):\n \"\"\"\n This function reads a deliminated file with data and inserts the data into the BFactor column. Used to visualize arbitrary data.\n Use function options to control which column the data is in as well as where your resnums and chains are located.\n If atomname column is given, will insert by atom instead of by residue\n \"\"\"\n \n INFILE = open_file(filename, 'r')\n for line in INFILE:\n if line[0] == \"#\":continue\n line = line.strip()\n lineSP = line.split(deliminator)\n if len(lineSP)<3:\n print(\"Could not read line. Must have resnum, chain, and data columns\")\n continue\n if not atomname_column:\n self.replace_residue_b_factor(lineSP[resnum_column-1], lineSP[chain_column-1], lineSP[data_column-1])\n else:\n if len(lineSP)<4:\n print(\"Could not read line. Must have resnum, chain, atomname, and data columns\")\n continue\n self.replace_atom_b_factor(lineSP[resnum_column-1], lineSP[chain_column-1], lineSP[atomname_column-1], lineSP[data_column-1])\n INFILE.close()\n \n def replace_residue_b_factor(self, resnum: int, chain: str, data: float):\n \"\"\"\n Replaces the b factor of each atom in the residue with data.\n Can be all string representations or not.\n \"\"\"\n \n if type(resnum)!=str:\n resnum = str(resnum)\n if type(data)!=float:\n data=float(data) #In case data is an integer.\n \n #Need to make sure Bfactor column is adjusted correctly.\n \n for line in range(0, len(self.pdb_map)):\n if ((self.pdb_map[line]['residue_number']==resnum) and (self.pdb_map[line]['chain']==chain)):\n self.pdb_map[line]['b_factor']=\"%.2f\"%data\n else:\n continue\n \n \n \n def replace_atom_b_factor(self, resnum: int, chain: str, atomname: str, data: float):\n \"\"\"\n Replaces the b factor of an atom.\n Can be all string representations or not.\n \"\"\"\n \n if type(resnum)!=str:\n resnum = str(resnum)\n if type(data)!=float:\n data=float(data)\n \n #Need to make sure Bfactor column is adjusted correctly.\n \n for line in range(0, len(self.pdb_map)):\n if ((self.pdb_map[line]['residue_number']==resnum) and (self.pdb_map[line]['chain']==chain) and (self.pdb_map[line][\"atom_name\"]==atomname)):\n self.pdb_map[line]['b_factor']=\"%.2f\"%data\n else:\n continue\n\n \n " ]
[ [ "pandas.DataFrame" ] ]
SimiaCryptus/models
[ "c652a23a650070b71e286f1ded93726670161940", "c652a23a650070b71e286f1ded93726670161940", "c652a23a650070b71e286f1ded93726670161940", "c652a23a650070b71e286f1ded93726670161940", "c652a23a650070b71e286f1ded93726670161940" ]
[ "research/audioset/vggish/vggish_train_demo.py", "official/mnist/mnist_eager_test.py", "tutorials/rnn/quickdraw/train_model.py", "research/syntaxnet/dragnn/tools/conll_checkpoint_converter.py", "research/gan/progressive_gan/data_provider_test.py" ]
[ "# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nr\"\"\"A simple demonstration of running VGGish in training mode.\n\nThis is intended as a toy example that demonstrates how to use the VGGish model\ndefinition within a larger model that adds more layers on top, and then train\nthe larger model. If you let VGGish train as well, then this allows you to\nfine-tune the VGGish model parameters for your application. If you don't let\nVGGish train, then you use VGGish as a feature extractor for the layers above\nit.\n\nFor this toy task, we are training a classifier to distinguish between three\nclasses: sine waves, constant signals, and white noise. We generate synthetic\nwaveforms from each of these classes, convert into shuffled batches of log mel\nspectrogram examples with associated labels, and feed the batches into a model\nthat includes VGGish at the bottom and a couple of additional layers on top. We\nalso plumb in labels that are associated with the examples, which feed a label\nloss used for training.\n\nUsage:\n # Run training for 100 steps using a model checkpoint in the default\n # location (vggish_model.ckpt in the current directory). Allow VGGish\n # to get fine-tuned.\n $ python vggish_train_demo.py --num_batches 100\n\n # Same as before but run for fewer steps and don't change VGGish parameters\n # and use a checkpoint in a different location\n $ python vggish_train_demo.py --num_batches 50 \\\n --train_vggish=False \\\n --checkpoint /path/to/model/checkpoint\n\"\"\"\n\nfrom __future__ import print_function\n\nfrom random import shuffle\n\nimport numpy as np\nimport tensorflow as tf\nimport vggish_input\nimport vggish_params\nimport vggish_slim\n\nflags = tf.app.flags\nslim = tf.contrib.slim\n\nflags.DEFINE_integer(\n 'num_batches', 30,\n 'Number of batches of examples to feed into the model. Each batch is of '\n 'variable size and contains shuffled examples of each class of audio.')\n\nflags.DEFINE_boolean(\n 'train_vggish', True,\n 'If True, allow VGGish parameters to change during training, thus '\n 'fine-tuning VGGish. If False, VGGish parameters are fixed, thus using '\n 'VGGish as a fixed feature extractor.')\n\nflags.DEFINE_string(\n 'checkpoint', 'vggish_model.ckpt',\n 'Path to the VGGish checkpoint file.')\n\nFLAGS = flags.FLAGS\n\n_NUM_CLASSES = 3\n\n\ndef _get_examples_batch():\n \"\"\"Returns a shuffled batch of examples of all audio classes.\n\n Note that this is just a toy function because this is a simple demo intended\n to illustrate how the training code might work.\n\n Returns:\n a tuple (features, labels) where features is a NumPy array of shape\n [batch_size, num_frames, num_bands] where the batch_size is variable and\n each row is a log mel spectrogram patch of shape [num_frames, num_bands]\n suitable for feeding VGGish, while labels is a NumPy array of shape\n [batch_size, num_classes] where each row is a multi-hot label vector that\n provides the labels for corresponding rows in features.\n \"\"\"\n # Make a waveform for each class.\n num_seconds = 5\n sr = 44100 # Sampling rate.\n t = np.linspace(0, num_seconds, int(num_seconds * sr)) # Time axis.\n # Random sine wave.\n freq = np.random.uniform(100, 1000)\n sine = np.sin(2 * np.pi * freq * t)\n # Random constant signal.\n magnitude = np.random.uniform(-1, 1)\n const = magnitude * t\n # White noise.\n noise = np.random.normal(-1, 1, size=t.shape)\n\n # Make examples of each signal and corresponding labels.\n # Sine is class index 0, Const class index 1, Noise class index 2.\n sine_examples = vggish_input.waveform_to_examples(sine, sr)\n sine_labels = np.array([[1, 0, 0]] * sine_examples.shape[0])\n const_examples = vggish_input.waveform_to_examples(const, sr)\n const_labels = np.array([[0, 1, 0]] * const_examples.shape[0])\n noise_examples = vggish_input.waveform_to_examples(noise, sr)\n noise_labels = np.array([[0, 0, 1]] * noise_examples.shape[0])\n\n # Shuffle (example, label) pairs across all classes.\n all_examples = np.concatenate((sine_examples, const_examples, noise_examples))\n all_labels = np.concatenate((sine_labels, const_labels, noise_labels))\n labeled_examples = list(zip(all_examples, all_labels))\n shuffle(labeled_examples)\n\n # Separate and return the features and labels.\n features = [example for (example, _) in labeled_examples]\n labels = [label for (_, label) in labeled_examples]\n return (features, labels)\n\n\ndef main(_):\n with tf.Graph().as_default(), tf.Session() as sess:\n # Define VGGish.\n embeddings = vggish_slim.define_vggish_slim(FLAGS.train_vggish)\n\n # Define a shallow classification model and associated training ops on top\n # of VGGish.\n with tf.variable_scope('mymodel'):\n # Add a fully connected layer with 100 units.\n num_units = 100\n fc = slim.fully_connected(embeddings, num_units)\n\n # Add a classifier layer at the end, consisting of parallel logistic\n # classifiers, one per class. This allows for multi-class tasks.\n logits = slim.fully_connected(\n fc, _NUM_CLASSES, activation_fn=None, scope='logits')\n tf.sigmoid(logits, name='prediction')\n\n # Add training ops.\n with tf.variable_scope('train'):\n global_step = tf.Variable(\n 0, name='global_step', trainable=False,\n collections=[tf.GraphKeys.GLOBAL_VARIABLES,\n tf.GraphKeys.GLOBAL_STEP])\n\n # Labels are assumed to be fed as a batch multi-hot vectors, with\n # a 1 in the position of each positive class label, and 0 elsewhere.\n labels = tf.placeholder(\n tf.float32, shape=(None, _NUM_CLASSES), name='labels')\n\n # Cross-entropy label loss.\n xent = tf.nn.sigmoid_cross_entropy_with_logits(\n logits=logits, labels=labels, name='xent')\n loss = tf.reduce_mean(xent, name='loss_op')\n tf.summary.scalar('loss', loss)\n\n # We use the same optimizer and hyperparameters as used to train VGGish.\n optimizer = tf.train.AdamOptimizer(\n learning_rate=vggish_params.LEARNING_RATE,\n epsilon=vggish_params.ADAM_EPSILON)\n optimizer.minimize(loss, global_step=global_step, name='train_op')\n\n # Initialize all variables in the model, and then load the pre-trained\n # VGGish checkpoint.\n sess.run(tf.global_variables_initializer())\n vggish_slim.load_vggish_slim_checkpoint(sess, FLAGS.checkpoint)\n\n # Locate all the tensors and ops we need for the training loop.\n features_tensor = sess.graph.get_tensor_by_name(\n vggish_params.INPUT_TENSOR_NAME)\n labels_tensor = sess.graph.get_tensor_by_name('mymodel/train/labels:0')\n global_step_tensor = sess.graph.get_tensor_by_name(\n 'mymodel/train/global_step:0')\n loss_tensor = sess.graph.get_tensor_by_name('mymodel/train/loss_op:0')\n train_op = sess.graph.get_operation_by_name('mymodel/train/train_op')\n\n # The training loop.\n for _ in range(FLAGS.num_batches):\n (features, labels) = _get_examples_batch()\n [num_steps, loss, _] = sess.run(\n [global_step_tensor, loss_tensor, train_op],\n feed_dict={features_tensor: features, labels_tensor: labels})\n print('Step %d: loss %g' % (num_steps, loss))\n\nif __name__ == '__main__':\n tf.app.run()\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf # pylint: disable=g-bad-import-order\nimport tensorflow.contrib.eager as tfe # pylint: disable=g-bad-import-order\nfrom official.mnist import mnist\nfrom official.mnist import mnist_eager\nfrom official.utils.misc import keras_utils\n\n\ndef device():\n return \"/device:GPU:0\" if tfe.num_gpus() else \"/device:CPU:0\"\n\n\ndef data_format():\n return \"channels_first\" if tfe.num_gpus() else \"channels_last\"\n\n\ndef random_dataset():\n batch_size = 64\n images = tf.random_normal([batch_size, 784])\n labels = tf.random_uniform([batch_size], minval=0, maxval=10, dtype=tf.int32)\n return tf.data.Dataset.from_tensors((images, labels))\n\n\ndef train(defun=False):\n model = mnist.create_model(data_format())\n if defun:\n model.call = tfe.defun(model.call)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)\n dataset = random_dataset()\n with tf.device(device()):\n mnist_eager.train(model, optimizer, dataset,\n step_counter=tf.train.get_or_create_global_step())\n\n\ndef evaluate(defun=False):\n model = mnist.create_model(data_format())\n dataset = random_dataset()\n if defun:\n model.call = tfe.defun(model.call)\n with tf.device(device()):\n mnist_eager.test(model, dataset)\n\n\nclass MNISTTest(tf.test.TestCase):\n \"\"\"Run tests for MNIST eager loop.\"\"\"\n\n def setUp(self):\n if not keras_utils.is_v2_0():\n tf.compat.v1.enable_v2_behavior()\n super(MNISTTest, self).setUp()\n\n def test_train(self):\n train(defun=False)\n\n def test_evaluate(self):\n evaluate(defun=False)\n\n def test_train_with_defun(self):\n train(defun=True)\n\n def test_evaluate_with_defun(self):\n evaluate(defun=True)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nr\"\"\"Binary for training a RNN-based classifier for the Quick, Draw! data.\n\npython train_model.py \\\n --training_data train_data \\\n --eval_data eval_data \\\n --model_dir /tmp/quickdraw_model/ \\\n --cell_type cudnn_lstm\n\nWhen running on GPUs using --cell_type cudnn_lstm is much faster.\n\nThe expected performance is ~75% in 1.5M steps with the default configuration.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport ast\nimport functools\nimport sys\n\nimport tensorflow as tf\n\n\ndef get_num_classes():\n classes = []\n with tf.gfile.GFile(FLAGS.classes_file, \"r\") as f:\n classes = [x for x in f]\n num_classes = len(classes)\n return num_classes\n\n\ndef get_input_fn(mode, tfrecord_pattern, batch_size):\n \"\"\"Creates an input_fn that stores all the data in memory.\n\n Args:\n mode: one of tf.contrib.learn.ModeKeys.{TRAIN, INFER, EVAL}\n tfrecord_pattern: path to a TF record file created using create_dataset.py.\n batch_size: the batch size to output.\n\n Returns:\n A valid input_fn for the model estimator.\n \"\"\"\n\n def _parse_tfexample_fn(example_proto, mode):\n \"\"\"Parse a single record which is expected to be a tensorflow.Example.\"\"\"\n feature_to_type = {\n \"ink\": tf.VarLenFeature(dtype=tf.float32),\n \"shape\": tf.FixedLenFeature([2], dtype=tf.int64)\n }\n if mode != tf.estimator.ModeKeys.PREDICT:\n # The labels won't be available at inference time, so don't add them\n # to the list of feature_columns to be read.\n feature_to_type[\"class_index\"] = tf.FixedLenFeature([1], dtype=tf.int64)\n\n parsed_features = tf.parse_single_example(example_proto, feature_to_type)\n labels = None\n if mode != tf.estimator.ModeKeys.PREDICT:\n labels = parsed_features[\"class_index\"]\n parsed_features[\"ink\"] = tf.sparse_tensor_to_dense(parsed_features[\"ink\"])\n return parsed_features, labels\n\n def _input_fn():\n \"\"\"Estimator `input_fn`.\n\n Returns:\n A tuple of:\n - Dictionary of string feature name to `Tensor`.\n - `Tensor` of target labels.\n \"\"\"\n dataset = tf.data.TFRecordDataset.list_files(tfrecord_pattern)\n if mode == tf.estimator.ModeKeys.TRAIN:\n dataset = dataset.shuffle(buffer_size=10)\n dataset = dataset.repeat()\n # Preprocesses 10 files concurrently and interleaves records from each file.\n dataset = dataset.interleave(\n tf.data.TFRecordDataset,\n cycle_length=10,\n block_length=1)\n dataset = dataset.map(\n functools.partial(_parse_tfexample_fn, mode=mode),\n num_parallel_calls=10)\n dataset = dataset.prefetch(10000)\n if mode == tf.estimator.ModeKeys.TRAIN:\n dataset = dataset.shuffle(buffer_size=1000000)\n # Our inputs are variable length, so pad them.\n dataset = dataset.padded_batch(\n batch_size, padded_shapes=dataset.output_shapes)\n features, labels = dataset.make_one_shot_iterator().get_next()\n return features, labels\n\n return _input_fn\n\n\ndef model_fn(features, labels, mode, params):\n \"\"\"Model function for RNN classifier.\n\n This function sets up a neural network which applies convolutional layers (as\n configured with params.num_conv and params.conv_len) to the input.\n The output of the convolutional layers is given to LSTM layers (as configured\n with params.num_layers and params.num_nodes).\n The final state of the all LSTM layers are concatenated and fed to a fully\n connected layer to obtain the final classification scores.\n\n Args:\n features: dictionary with keys: inks, lengths.\n labels: one hot encoded classes\n mode: one of tf.estimator.ModeKeys.{TRAIN, INFER, EVAL}\n params: a parameter dictionary with the following keys: num_layers,\n num_nodes, batch_size, num_conv, conv_len, num_classes, learning_rate.\n\n Returns:\n ModelFnOps for Estimator API.\n \"\"\"\n\n def _get_input_tensors(features, labels):\n \"\"\"Converts the input dict into inks, lengths, and labels tensors.\"\"\"\n # features[ink] is a sparse tensor that is [8, batch_maxlen, 3]\n # inks will be a dense tensor of [8, maxlen, 3]\n # shapes is [batchsize, 2]\n shapes = features[\"shape\"]\n # lengths will be [batch_size]\n lengths = tf.squeeze(\n tf.slice(shapes, begin=[0, 0], size=[params.batch_size, 1]))\n inks = tf.reshape(features[\"ink\"], [params.batch_size, -1, 3])\n if labels is not None:\n labels = tf.squeeze(labels)\n return inks, lengths, labels\n\n def _add_conv_layers(inks, lengths):\n \"\"\"Adds convolution layers.\"\"\"\n convolved = inks\n for i in range(len(params.num_conv)):\n convolved_input = convolved\n if params.batch_norm:\n convolved_input = tf.layers.batch_normalization(\n convolved_input,\n training=(mode == tf.estimator.ModeKeys.TRAIN))\n # Add dropout layer if enabled and not first convolution layer.\n if i > 0 and params.dropout:\n convolved_input = tf.layers.dropout(\n convolved_input,\n rate=params.dropout,\n training=(mode == tf.estimator.ModeKeys.TRAIN))\n convolved = tf.layers.conv1d(\n convolved_input,\n filters=params.num_conv[i],\n kernel_size=params.conv_len[i],\n activation=None,\n strides=1,\n padding=\"same\",\n name=\"conv1d_%d\" % i)\n return convolved, lengths\n\n def _add_regular_rnn_layers(convolved, lengths):\n \"\"\"Adds RNN layers.\"\"\"\n if params.cell_type == \"lstm\":\n cell = tf.nn.rnn_cell.BasicLSTMCell\n elif params.cell_type == \"block_lstm\":\n cell = tf.contrib.rnn.LSTMBlockCell\n cells_fw = [cell(params.num_nodes) for _ in range(params.num_layers)]\n cells_bw = [cell(params.num_nodes) for _ in range(params.num_layers)]\n if params.dropout > 0.0:\n cells_fw = [tf.contrib.rnn.DropoutWrapper(cell) for cell in cells_fw]\n cells_bw = [tf.contrib.rnn.DropoutWrapper(cell) for cell in cells_bw]\n outputs, _, _ = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(\n cells_fw=cells_fw,\n cells_bw=cells_bw,\n inputs=convolved,\n sequence_length=lengths,\n dtype=tf.float32,\n scope=\"rnn_classification\")\n return outputs\n\n def _add_cudnn_rnn_layers(convolved):\n \"\"\"Adds CUDNN LSTM layers.\"\"\"\n # Convolutions output [B, L, Ch], while CudnnLSTM is time-major.\n convolved = tf.transpose(convolved, [1, 0, 2])\n lstm = tf.contrib.cudnn_rnn.CudnnLSTM(\n num_layers=params.num_layers,\n num_units=params.num_nodes,\n dropout=params.dropout if mode == tf.estimator.ModeKeys.TRAIN else 0.0,\n direction=\"bidirectional\")\n outputs, _ = lstm(convolved)\n # Convert back from time-major outputs to batch-major outputs.\n outputs = tf.transpose(outputs, [1, 0, 2])\n return outputs\n\n def _add_rnn_layers(convolved, lengths):\n \"\"\"Adds recurrent neural network layers depending on the cell type.\"\"\"\n if params.cell_type != \"cudnn_lstm\":\n outputs = _add_regular_rnn_layers(convolved, lengths)\n else:\n outputs = _add_cudnn_rnn_layers(convolved)\n # outputs is [batch_size, L, N] where L is the maximal sequence length and N\n # the number of nodes in the last layer.\n mask = tf.tile(\n tf.expand_dims(tf.sequence_mask(lengths, tf.shape(outputs)[1]), 2),\n [1, 1, tf.shape(outputs)[2]])\n zero_outside = tf.where(mask, outputs, tf.zeros_like(outputs))\n outputs = tf.reduce_sum(zero_outside, axis=1)\n return outputs\n\n def _add_fc_layers(final_state):\n \"\"\"Adds a fully connected layer.\"\"\"\n return tf.layers.dense(final_state, params.num_classes)\n\n # Build the model.\n inks, lengths, labels = _get_input_tensors(features, labels)\n convolved, lengths = _add_conv_layers(inks, lengths)\n final_state = _add_rnn_layers(convolved, lengths)\n logits = _add_fc_layers(final_state)\n # Add the loss.\n cross_entropy = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels, logits=logits))\n # Add the optimizer.\n train_op = tf.contrib.layers.optimize_loss(\n loss=cross_entropy,\n global_step=tf.train.get_global_step(),\n learning_rate=params.learning_rate,\n optimizer=\"Adam\",\n # some gradient clipping stabilizes training in the beginning.\n clip_gradients=params.gradient_clipping_norm,\n summaries=[\"learning_rate\", \"loss\", \"gradients\", \"gradient_norm\"])\n # Compute current predictions.\n predictions = tf.argmax(logits, axis=1)\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions={\"logits\": logits, \"predictions\": predictions},\n loss=cross_entropy,\n train_op=train_op,\n eval_metric_ops={\"accuracy\": tf.metrics.accuracy(labels, predictions)})\n\n\ndef create_estimator_and_specs(run_config):\n \"\"\"Creates an Experiment configuration based on the estimator and input fn.\"\"\"\n model_params = tf.contrib.training.HParams(\n num_layers=FLAGS.num_layers,\n num_nodes=FLAGS.num_nodes,\n batch_size=FLAGS.batch_size,\n num_conv=ast.literal_eval(FLAGS.num_conv),\n conv_len=ast.literal_eval(FLAGS.conv_len),\n num_classes=get_num_classes(),\n learning_rate=FLAGS.learning_rate,\n gradient_clipping_norm=FLAGS.gradient_clipping_norm,\n cell_type=FLAGS.cell_type,\n batch_norm=FLAGS.batch_norm,\n dropout=FLAGS.dropout)\n\n estimator = tf.estimator.Estimator(\n model_fn=model_fn,\n config=run_config,\n params=model_params)\n\n train_spec = tf.estimator.TrainSpec(input_fn=get_input_fn(\n mode=tf.estimator.ModeKeys.TRAIN,\n tfrecord_pattern=FLAGS.training_data,\n batch_size=FLAGS.batch_size), max_steps=FLAGS.steps)\n\n eval_spec = tf.estimator.EvalSpec(input_fn=get_input_fn(\n mode=tf.estimator.ModeKeys.EVAL,\n tfrecord_pattern=FLAGS.eval_data,\n batch_size=FLAGS.batch_size))\n\n return estimator, train_spec, eval_spec\n\n\ndef main(unused_args):\n estimator, train_spec, eval_spec = create_estimator_and_specs(\n run_config=tf.estimator.RunConfig(\n model_dir=FLAGS.model_dir,\n save_checkpoints_secs=300,\n save_summary_steps=100))\n tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.register(\"type\", \"bool\", lambda v: v.lower() == \"true\")\n parser.add_argument(\n \"--training_data\",\n type=str,\n default=\"\",\n help=\"Path to training data (tf.Example in TFRecord format)\")\n parser.add_argument(\n \"--eval_data\",\n type=str,\n default=\"\",\n help=\"Path to evaluation data (tf.Example in TFRecord format)\")\n parser.add_argument(\n \"--classes_file\",\n type=str,\n default=\"\",\n help=\"Path to a file with the classes - one class per line\")\n parser.add_argument(\n \"--num_layers\",\n type=int,\n default=3,\n help=\"Number of recurrent neural network layers.\")\n parser.add_argument(\n \"--num_nodes\",\n type=int,\n default=128,\n help=\"Number of node per recurrent network layer.\")\n parser.add_argument(\n \"--num_conv\",\n type=str,\n default=\"[48, 64, 96]\",\n help=\"Number of conv layers along with number of filters per layer.\")\n parser.add_argument(\n \"--conv_len\",\n type=str,\n default=\"[5, 5, 3]\",\n help=\"Length of the convolution filters.\")\n parser.add_argument(\n \"--cell_type\",\n type=str,\n default=\"lstm\",\n help=\"Cell type used for rnn layers: cudnn_lstm, lstm or block_lstm.\")\n parser.add_argument(\n \"--batch_norm\",\n type=\"bool\",\n default=\"False\",\n help=\"Whether to enable batch normalization or not.\")\n parser.add_argument(\n \"--learning_rate\",\n type=float,\n default=0.0001,\n help=\"Learning rate used for training.\")\n parser.add_argument(\n \"--gradient_clipping_norm\",\n type=float,\n default=9.0,\n help=\"Gradient clipping norm used during training.\")\n parser.add_argument(\n \"--dropout\",\n type=float,\n default=0.3,\n help=\"Dropout used for convolutions and bidi lstm layers.\")\n parser.add_argument(\n \"--steps\",\n type=int,\n default=100000,\n help=\"Number of training steps.\")\n parser.add_argument(\n \"--batch_size\",\n type=int,\n default=8,\n help=\"Batch size to use for training/evaluation.\")\n parser.add_argument(\n \"--model_dir\",\n type=str,\n default=\"\",\n help=\"Path for storing the model checkpoints.\")\n parser.add_argument(\n \"--self_test\",\n type=\"bool\",\n default=\"False\",\n help=\"Whether to enable batch normalization or not.\")\n\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n", "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Conversion script for CoNLL checkpoints to DRAGNN SavedModel format.\n\nThis script loads and finishes a CoNLL checkpoint, then exports it as a\nSavedModel. It expects that the CoNLL RNN cells have been updated using the\nRNN update script.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport tensorflow as tf\nfrom absl import flags\nfrom dragnn.protos import spec_pb2\nfrom dragnn.python import dragnn_model_saver_lib as saver_lib\nfrom dragnn.python import spec_builder\nfrom google.protobuf import text_format\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('master_spec', None, 'Path to task context with '\n 'inputs and parameters for feature extractors.')\nflags.DEFINE_string('params_path', None, 'Path to trained model parameters.')\nflags.DEFINE_string('export_path', '', 'Output path for exported servo model.')\nflags.DEFINE_string('resource_path', '',\n 'Base directory for resources in the master spec.')\nflags.DEFINE_bool('export_moving_averages', True,\n 'Whether to export the moving average parameters.')\n\n\ndef export(master_spec_path, params_path, resource_path, export_path,\n export_moving_averages):\n \"\"\"Restores a model and exports it in SavedModel form.\n\n This method loads a graph specified by the spec at master_spec_path and the\n params in params_path. It then saves the model in SavedModel format to the\n location specified in export_path.\n\n Args:\n master_spec_path: Path to a proto-text master spec.\n params_path: Path to the parameters file to export.\n resource_path: Path to resources in the master spec.\n export_path: Path to export the SavedModel to.\n export_moving_averages: Whether to export the moving average parameters.\n \"\"\"\n # Old CoNLL checkpoints did not need a known-word-map. Create a temporary if\n # that file is missing.\n if not tf.gfile.Exists(os.path.join(resource_path, 'known-word-map')):\n with tf.gfile.FastGFile(os.path.join(resource_path, 'known-word-map'),\n 'w') as out_file:\n out_file.write('This file intentionally left blank.')\n\n graph = tf.Graph()\n master_spec = spec_pb2.MasterSpec()\n with tf.gfile.FastGFile(master_spec_path) as fin:\n text_format.Parse(fin.read(), master_spec)\n\n # This is a workaround for an issue where the segmenter master-spec had a\n # spurious resource in it; this resource was not respected in the spec-builder\n # and ended up crashing the saver (since it didn't really exist).\n for component in master_spec.component:\n del component.resource[:]\n\n spec_builder.complete_master_spec(master_spec, None, resource_path)\n\n # Remove '/' if it exists at the end of the export path, ensuring that\n # path utils work correctly.\n stripped_path = export_path.rstrip('/')\n saver_lib.clean_output_paths(stripped_path)\n\n short_to_original = saver_lib.shorten_resource_paths(master_spec)\n saver_lib.export_master_spec(master_spec, graph)\n saver_lib.export_to_graph(master_spec, params_path, stripped_path, graph,\n export_moving_averages)\n saver_lib.export_assets(master_spec, short_to_original, stripped_path)\n\n\ndef main(unused_argv):\n # Run the exporter.\n export(FLAGS.master_spec, FLAGS.params_path, FLAGS.resource_path,\n FLAGS.export_path, FLAGS.export_moving_averages)\n tf.logging.info('Export complete.')\n\n\nif __name__ == '__main__':\n tf.app.run()\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport data_provider\nimport numpy as np\nimport tensorflow as tf\nfrom absl import flags\n\n\nclass DataProviderTest(tf.test.TestCase):\n\n def setUp(self):\n super(DataProviderTest, self).setUp()\n self.testdata_dir = os.path.join(\n flags.FLAGS.test_srcdir,\n 'google3/third_party/tensorflow_models/gan/progressive_gan/testdata/')\n\n def test_normalize_image(self):\n image_np = np.asarray([0, 255, 210], dtype=np.uint8)\n normalized_image = data_provider.normalize_image(tf.constant(image_np))\n self.assertEqual(normalized_image.dtype, tf.float32)\n self.assertEqual(normalized_image.shape.as_list(), [3])\n with self.test_session(use_gpu=True) as sess:\n normalized_image_np = sess.run(normalized_image)\n self.assertNDArrayNear(normalized_image_np, [-1, 1, 0.6470588235], 1.0e-6)\n\n def test_sample_patch_large_patch_returns_upscaled_image(self):\n image_np = np.reshape(np.arange(2 * 2), [2, 2, 1])\n image = tf.constant(image_np, dtype=tf.float32)\n image_patch = data_provider.sample_patch(\n image, patch_height=3, patch_width=3, colors=1)\n with self.test_session(use_gpu=True) as sess:\n image_patch_np = sess.run(image_patch)\n expected_np = np.asarray([[[0.], [0.66666669], [1.]], [[1.33333337], [2.],\n [2.33333349]],\n [[2.], [2.66666675], [3.]]])\n self.assertNDArrayNear(image_patch_np, expected_np, 1.0e-6)\n\n def test_sample_patch_small_patch_returns_downscaled_image(self):\n image_np = np.reshape(np.arange(3 * 3), [3, 3, 1])\n image = tf.constant(image_np, dtype=tf.float32)\n image_patch = data_provider.sample_patch(\n image, patch_height=2, patch_width=2, colors=1)\n with self.test_session(use_gpu=True) as sess:\n image_patch_np = sess.run(image_patch)\n expected_np = np.asarray([[[0.], [1.5]], [[4.5], [6.]]])\n self.assertNDArrayNear(image_patch_np, expected_np, 1.0e-6)\n\n def test_batch_images(self):\n image_np = np.reshape(np.arange(3 * 3), [3, 3, 1])\n image = tf.constant(image_np, dtype=tf.float32)\n images = data_provider.batch_images(\n image,\n patch_height=2,\n patch_width=2,\n colors=1,\n batch_size=2,\n shuffle=False,\n num_threads=1)\n with self.test_session(use_gpu=True) as sess:\n with tf.contrib.slim.queues.QueueRunners(sess):\n images_np = sess.run(images)\n expected_np = np.asarray([[[[0.], [1.5]], [[4.5], [6.]]], [[[0.], [1.5]],\n [[4.5], [6.]]]])\n self.assertNDArrayNear(images_np, expected_np, 1.0e-6)\n\n def test_provide_data(self):\n images = data_provider.provide_data(\n 'mnist',\n 'train',\n dataset_dir=self.testdata_dir,\n batch_size=2,\n shuffle=False,\n patch_height=3,\n patch_width=3,\n colors=1)\n self.assertEqual(images.shape.as_list(), [2, 3, 3, 1])\n with self.test_session(use_gpu=True) as sess:\n with tf.contrib.slim.queues.QueueRunners(sess):\n images_np = sess.run(images)\n self.assertEqual(images_np.shape, (2, 3, 3, 1))\n\n def test_provide_data_from_image_files_a_single_pattern(self):\n file_pattern = os.path.join(self.testdata_dir, '*.jpg')\n images = data_provider.provide_data_from_image_files(\n file_pattern,\n batch_size=2,\n shuffle=False,\n patch_height=3,\n patch_width=3,\n colors=1)\n self.assertEqual(images.shape.as_list(), [2, 3, 3, 1])\n with self.test_session(use_gpu=True) as sess:\n sess.run(tf.local_variables_initializer())\n with tf.contrib.slim.queues.QueueRunners(sess):\n images_np = sess.run(images)\n self.assertEqual(images_np.shape, (2, 3, 3, 1))\n\n def test_provide_data_from_image_files_a_list_of_patterns(self):\n file_pattern = [os.path.join(self.testdata_dir, '*.jpg')]\n images = data_provider.provide_data_from_image_files(\n file_pattern,\n batch_size=2,\n shuffle=False,\n patch_height=3,\n patch_width=3,\n colors=1)\n self.assertEqual(images.shape.as_list(), [2, 3, 3, 1])\n with self.test_session(use_gpu=True) as sess:\n sess.run(tf.local_variables_initializer())\n with tf.contrib.slim.queues.QueueRunners(sess):\n images_np = sess.run(images)\n self.assertEqual(images_np.shape, (2, 3, 3, 1))\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "numpy.random.uniform", "tensorflow.nn.sigmoid_cross_entropy_with_logits", "tensorflow.placeholder", "tensorflow.summary.scalar", "tensorflow.app.run", "tensorflow.global_variables_initializer", "tensorflow.sigmoid", "tensorflow.train.AdamOptimizer", "tensorflow.reduce_mean", "tensorflow.variable_scope", "tensorflow.Graph", "tensorflow.Session", "tensorflow.Variable", "numpy.random.normal", "numpy.sin", "numpy.concatenate", "numpy.array" ], [ "tensorflow.contrib.eager.defun", "tensorflow.random_uniform", "tensorflow.data.Dataset.from_tensors", "tensorflow.train.GradientDescentOptimizer", "tensorflow.train.get_or_create_global_step", "tensorflow.compat.v1.enable_v2_behavior", "tensorflow.contrib.eager.num_gpus", "tensorflow.random_normal", "tensorflow.test.main" ], [ "tensorflow.sparse_tensor_to_dense", "tensorflow.VarLenFeature", "tensorflow.reshape", "tensorflow.contrib.rnn.DropoutWrapper", "tensorflow.squeeze", "tensorflow.slice", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.train.get_global_step", "tensorflow.reduce_sum", "tensorflow.contrib.rnn.stack_bidirectional_dynamic_rnn", "tensorflow.gfile.GFile", "tensorflow.estimator.RunConfig", "tensorflow.FixedLenFeature", "tensorflow.transpose", "tensorflow.layers.conv1d", "tensorflow.shape", "tensorflow.app.run", "tensorflow.parse_single_example", "tensorflow.layers.batch_normalization", "tensorflow.data.TFRecordDataset.list_files", "tensorflow.contrib.cudnn_rnn.CudnnLSTM", "tensorflow.zeros_like", "tensorflow.layers.dense", "tensorflow.layers.dropout", "tensorflow.estimator.Estimator", "tensorflow.metrics.accuracy", "tensorflow.argmax", "tensorflow.estimator.train_and_evaluate" ], [ "tensorflow.Graph", "tensorflow.gfile.FastGFile", "tensorflow.logging.info", "tensorflow.app.run" ], [ "tensorflow.contrib.slim.queues.QueueRunners", "numpy.asarray", "numpy.arange", "tensorflow.constant", "tensorflow.local_variables_initializer", "tensorflow.test.main" ] ]
samuelru/session-knn-ae
[ "c6232667dbe57f82391d487875b52f651ca08a21" ]
[ "ipython/3_Training_Predicting/prnn_cb12_train_predict.py" ]
[ "from keras.layers import Input, Dense, concatenate\nfrom keras.layers.recurrent import GRU\nfrom keras.utils import plot_model\nfrom keras.models import Model, load_model\nfrom keras.callbacks import ModelCheckpoint\nimport keras\nimport pandas as pd\nimport numpy as np\nimport keras.backend as K\nfrom keras.utils import to_categorical\nfrom keras.losses import categorical_crossentropy\nfrom multiprocessing import Pool, cpu_count\nimport pickle\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\nimport os\n\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\n\n\n\ndataset = \"cb12/\"\npath = \"../../data/\"\ninterim_path = path + dataset + \"interim/\"\nprocessed_path = path + dataset + \"processed/\"\nmodel_path = \"models/\"\nmodel_path_valid = \"models/valid/\"\n\n\n\ndef TOP1(y_true, y_pred):\n y1 = y_pred * y_true\n y2 = K.sum(y1, axis=1)[:, np.newaxis]\n y3 = y_true - y1\n return (K.sum(K.sigmoid(y_pred - y2)) + y3 * y3) / tf.cast(tf.shape(y_true)[0], tf.float32)\n\nloss = TOP1\n\ndef create_prnn_model(left_input_size, right_input_size, batch_size = 512, hidden_units = 100, o_activation='softmax', lr = 0.001): \n emb_size = 50\n size = emb_size\n\n # left input - item vector\n input_left = Input(batch_shape=(batch_size, 1, left_input_size), name='input_left')\n gru_left, gru_left_states = GRU(hidden_units, stateful=True, return_state=True, name='gru_left')(input_left)\n\n # right input - feature vector\n input_right = Input(batch_shape=(batch_size, 1, right_input_size), name='input_right')\n gru_right, gru_right_states = GRU(hidden_units, stateful=True, return_state=True, name='gru_right')(input_right)\n \n # merging both layers and creating the model\n merged = concatenate([gru_left, gru_right])\n #change softmax per another activation funciton?\n output = Dense(left_input_size, activation=o_activation, name='output')(merged)\n model = Model(inputs=[input_left, input_right], outputs=output, name='gru4rec')\n \n encoder = Model(inputs=[input_left, input_right], outputs=merged)\n\n # define model's optimizer\n #optimizer = optim.Optimizer(optimizer=self.optimizer, lr=self.lr)\n #opt = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)\n opt = keras.optimizers.Adagrad(lr=lr)\n \n # define model's loss function --> implement here the top1 loss function\n# loss_function = loss.LossFunction(loss_type=self.loss_function)\n #model.compile(loss=loss_function, optimizer=opt, metrics=['accuracy'])\n \n model.compile(loss=loss, optimizer=opt, metrics=['accuracy'])\n filepath = model_path_valid + 'prnn_cb12_checkpoint.h5'\n checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=2, save_best_only=True, mode='min')\n callbacks_list = []\n model.summary()\n #plot_model(model, show_shapes=True, to_file='rnn-structure.png')\n return model, encoder\n\ndef get_states(model):\n #return the actual states of the layers\n return [K.get_value(s) for s,_ in model.state_updates]\n\n\ndef freeze_layer(model, layer_name, lr):\n if layer_name == 'gru_left':\n # gru left layer will not be trained this mini batch\n model.get_layer(layer_name).trainable = False\n # but gru right will\n model.get_layer('gru_right').trainable = True\n elif layer_name == 'gru_right':\n # gru right layer will not be trained this mini batch\n model.get_layer(layer_name).trainable = False\n # but gru left will\n model.get_layer('gru_left').trainable = True\n else:\n raise NotImplementedError\n \n # opt = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)\n opt = keras.optimizers.Adagrad(lr=lr)\n model.compile(loss=loss, optimizer=opt, metrics=['accuracy'])\n return model\n\n\nclass SessionDataset:\n \"\"\"Credit to yhs-968/pyGRU4REC.\"\"\" \n def __init__(self, data, sep='\\t', session_key='session_id', item_key='item_id', time_key='created_at', n_samples=-1, itemmap=None, time_sort=False):\n \"\"\"\n Args:\n path: path of the csv file\n sep: separator for the csv\n session_key, item_key, time_key: name of the fields corresponding to the sessions, items, time\n n_samples: the number of samples to use. If -1, use the whole dataset.\n itemmap: mapping between item IDs and item indices\n time_sort: whether to sort the sessions by time or not\n \"\"\"\n self.df = data\n self.session_key = session_key\n self.item_key = item_key\n self.time_key = time_key\n self.time_sort = time_sort\n self.add_item_indices(itemmap=itemmap)\n self.df.sort_values([session_key, time_key], inplace=True)\n\n # Sort the df by time, and then by session ID. That is, df is sorted by session ID and\n # clicks within a session are next to each other, where the clicks within a session are time-ordered.\n\n self.click_offsets = self.get_click_offsets() \n #array of the positions where there is a change of session. \n #len = len(session_idx_arr) + 1\n \n self.session_idx_arr = self.order_session_idx() \n #array of sessions [0 1 2 3 4 .... n-1]\n \n def get_click_offsets(self):\n \"\"\"\n Return the offsets of the beginning clicks of each session IDs,\n where the offset is calculated against the first click of the first session ID.\n \"\"\"\n offsets = np.zeros(self.df[self.session_key].nunique() + 1, dtype=np.int32)\n # group & sort the df by session_key and get the offset values\n offsets[1:] = self.df.groupby(self.session_key).size().cumsum()\n return offsets\n\n def order_session_idx(self):\n \"\"\" Order the session indices \"\"\"\n if self.time_sort:\n # starting time for each sessions, sorted by session IDs\n sessions_start_time = self.df.groupby(self.session_key)[self.time_key].min().values\n # order the session indices by session starting times\n session_idx_arr = np.argsort(sessions_start_time)\n else:\n session_idx_arr = np.arange(self.df[self.session_key].nunique())\n return session_idx_arr\n \n def add_item_indices(self, itemmap=None):\n \"\"\" \n Add item index column named \"item_idx\" to the df\n Args:\n itemmap (pd.DataFrame): mapping between the item Ids and indices\n \"\"\"\n if itemmap is None:\n item_ids = self.df[self.item_key].unique() # unique item ids\n item2idx = pd.Series(data=np.arange(len(item_ids)),\n index=item_ids)\n itemmap = pd.DataFrame({self.item_key:item_ids,\n 'item_idx':item2idx[item_ids].values})\n \n self.itemmap = itemmap\n self.df = pd.merge(self.df, self.itemmap, on=self.item_key, how='inner')\n \n @property \n def items(self):\n return self.itemmap.item_id.unique()\n\n\n\n\nclass SessionDataLoader:\n \"\"\"Credit to yhs-968/pyGRU4REC.\"\"\" \n def __init__(self, dataset, batch_size):\n \"\"\"\n A class for creating session-parallel mini-batches.\n Args:\n dataset (SessionDataset): the session dataset to generate the batches from\n batch_size (int): size of the batch\n \"\"\"\n self.dataset = dataset\n self.batch_size = batch_size\n self.done_sessions_counter = 0\n \n def __iter__(self):\n \"\"\" Returns the iterator for producing session-parallel training mini-batches.\n Yields:\n input (B,): Item indices that will be encoded as one-hot vectors later.\n target (B,): a Variable that stores the target item indices\n masks: Numpy array indicating the positions of the sessions to be terminated\n \"\"\"\n\n df = self.dataset.df\n \n session_key='session_id'\n item_key='item_id'\n time_key='created_at'\n self.n_items = df[item_key].nunique()\n click_offsets = self.dataset.click_offsets\n #print(click_offsets)\n session_idx_arr = self.dataset.session_idx_arr\n #print(session_idx_arr)\n \n iters = np.arange(self.batch_size)\n #iters = np.arange(1)\n\n maxiter = iters.max()\n \n start = click_offsets[session_idx_arr[iters]]\n end = click_offsets[session_idx_arr[iters] + 1]\n #print(start)\n #print(end)\n mask = [] # indicator for the sessions to be terminated\n finished = False \n\n while not finished:\n #minimum lenght of all the sessions\n minlen = (end - start).min()\n # Item indices (for embedding) for clicks where the first sessions start\n idx_target = df.item_idx.values[start]\n for i in range(minlen - 1):\n # Build inputs & targets\n idx_input = idx_target\n idx_target = df.item_idx.values[start + i + 1]\n inp = idx_input\n target = idx_target\n yield inp, target, mask\n \n # click indices where a particular session meets second-to-last element\n start = start + (minlen - 1)\n # see if how many sessions should terminate\n mask = np.arange(len(iters))[(end - start) <= 1]\n self.done_sessions_counter = len(mask)\n for idx in mask:\n maxiter += 1\n if maxiter >= len(click_offsets) - 1:\n finished = True\n break\n # update the next starting/ending point\n iters[idx] = maxiter\n start[idx] = click_offsets[session_idx_arr[maxiter]]\n end[idx] = click_offsets[session_idx_arr[maxiter] + 1]\n \n\n\n\ndef train_prnn(model, lr, loader, layer_freezing_enabled = False, num_epochs = 10):\n for epoch in range(0, num_epochs):\n print(\"Epoch: \" + str(epoch+1))\n epoch_loss = 0 \n\n i = 0\n for feat, target, mask in loader:\n #feat = np array size BATCH_SIZE with the item indexes of the first items of the first BATCH_SIZE sessions\n #comvert feat to an array size (BATCH_SIZE, 26723) of one hot encoding the indes with loader.n_items\n\n input_oh = to_categorical(feat, num_classes=loader.n_items)\n #convert from shape (BATCH_SIZE, 26723) to (BATCH_SIZE, 1, 26723)\n input_oh = np.expand_dims(input_oh, axis=1) \n\n # with the argmax function you get back again the feat/target np array (arg_input = feat)\n ### arg_input = np.argmax(to_categorical(feat, num_classes=loader.n_items), axis=1)\n ### arg_output = np.argmax(to_categorical(target, num_classes=loader.n_items), axis=1)\n input_feature = np.array([])\n\n for line in feat:\n #result = int(mapitem[(mapitem.item_idx == line)].item_id.values)\n result = str(mapitem[(mapitem.item_idx == line)].item_id.values[0])\n #print(result)\n \n # use empty feature vec if missing\n feature_vector = empty_feature_vec\n if result in item_encodings.keys():\n feature_vector = item_encodings[result]\n \n input_feature = np.append(input_feature, feature_vector)\n\n input_feature = input_feature.reshape(batch_size, 1, feature_size)\n\n #target = np array size BATCH_SIZE with the item indexes of the TARGET items of the feat array items\n target_oh = to_categorical(target, num_classes=loader.n_items)\n\n #calculate the loss between the input and the expected output\n\n if layer_freezing_enabled:\n if i % 2 is 0:\n model = freeze_layer(model, 'gru_left', lr = lr)\n else:\n model = freeze_layer(model, 'gru_right', lr = lr)\n\n tr_loss = model.train_on_batch([input_oh, input_feature], target_oh)\n epoch_loss += tr_loss[0]\n\n i = i + 1\n print(\"Epoch loss: \" + str(epoch_loss))\n return model\n\n\n\n# # Set data for final training\n\n# set data\n\ntrain_path = '../../data/' + dataset + 'processed/train_14d.csv'\ntrain = pd.read_csv(train_path, sep='\\t')[['session_id', 'item_id', 'created_at']]\n\ninteractions = pd.read_csv('../../data/' + dataset + 'interim/interactions.csv', header=0, sep='\\t')\nitems = pd.read_csv('../../data/' + dataset + 'interim/items.csv', header=0, sep='\\t')\nview_fields = [\"item_id\", \"state\", \"ReqTopic\", \"DescTopic\", \"TitTopic\"]\ncommon_items = items.merge(interactions, on=['item_id'])[view_fields].drop_duplicates()\n\nitem_count = len(train['item_id'].unique())\nprint(item_count)\nsession_count = len(train['created_at'].unique())\nprint(len(common_items))\n\n# CB12 items need to be converted to dummies\n\ncommon = common_items\n\n\ncommon[\"item_id\"] = common[\"item_id\"].astype('str')\ncommon[\"DescTopic\"] = common[\"DescTopic\"].astype('str')\ncommon[\"TitTopic\"] = common[\"TitTopic\"].astype('str')\ncommon[\"ReqTopic\"] = common[\"ReqTopic\"].astype('str')\n\ndf2 = pd.DataFrame(index=common.index)\ns1 = pd.get_dummies(common[\"state\"].fillna(\"\").str.split(\",\").apply(pd.Series).stack(), prefix=\"state\").sum(level=0)\ndf2 = pd.concat([df2, s1], axis=1)\ns1 = pd.get_dummies(common[\"ReqTopic\"].fillna(\"\").str.split(\",\").apply(pd.Series).stack(), prefix=\"ReqTopic\").sum(level=0)\ndf2 = pd.concat([df2, s1], axis=1)\ndf2 = df2.drop([\"state_\", \"ReqTopic_\"], axis=1, errors=\"ignore\")\n\ns1 = pd.get_dummies(common[\"DescTopic\"].fillna(\"\").str.split(\",\").apply(pd.Series).stack(), prefix=\"DescTopic\").sum(level=0)\ndf2 = pd.concat([df2, s1], axis=1)\n\ns1 = pd.get_dummies(common[\"TitTopic\"].fillna(\"\").str.split(\",\").apply(pd.Series).stack(), prefix=\"TitTopic\").sum(level=0)\ndf2 = pd.concat([df2, s1], axis=1)\n\ndf2 = df2.drop([\"DescTopic_\", \"TitTopic_\"], axis=1, errors=\"ignore\")\n\n\ncommon = common.drop([\"state\", \"ReqTopic\", \"DescTopic\", \"TitTopic\"], axis=1)\ndf2 = pd.concat([common, df2], axis=1)\n\none_hot = df2\nprint(one_hot.shape)\n# number of content features per item\nfeature_size = one_hot.shape[1] - 1\n\nitem_encodings = {}\nfor index, row in one_hot.iterrows():\n item_id = row[\"item_id\"]\n item_encodings[item_id] = row.values[1:]\n\nprint(len(item_encodings))\n\nempty_feature_vec = np.zeros(feature_size, dtype=int)\n\n# load data\n\nbatch_size = 512\n\ntrain_dataset = SessionDataset(train)\nloader = SessionDataLoader(train_dataset, batch_size=batch_size)\nmapitem = loader.dataset.itemmap\n\n\n# # Train final model\n\n\n# In[ ]:\n\n\n# use best params\nls = 1000\nact = \"softmax\"\nlr = 0.001\n# define model\nmodel, encoder = create_prnn_model(item_count, feature_size, batch_size=batch_size, hidden_units = ls, o_activation = act, lr = lr)\n\n# train model\nmodel_name = \"cb12_prnn_a_\" + act + \"_ls_\" + str(ls) + \"_lr_\" + str(lr) + \".model2\"\nprint(\"Starting to train: \" + model_name)\n\nmodel = train_prnn(model, lr, loader)\n\npickle.dump(model, open(model_path + model_name, 'wb'), protocol=4)\nprint(\"Stored model in: \" + model_path + model_name)\n\n\n# # Generate predictions\n\ndef predict_function(sid, test_session, pr, item_idx_map, idx_item_map, cut_off=20, \n session_key='session_id', item_key='item_id', time_key='created_at'):\n test_session.sort_values([time_key], inplace=True)\n # get first and only session_id (as we grouped it before calling this method)\n session_id = test_session[session_key].unique()[0]\n\n log_columns = [\"session_id\", \"input_items\", \"input_count\", \"position\", \"remaining_items\", \"remaining_count\", \"predictions\"]\n log_df = pd.DataFrame(columns = log_columns)\n\n session_length = len(test_session)\n il = a = np.zeros((batch_size, 1, len(item_idx_map)))\n ir = a = np.zeros((batch_size, 1, 115))\n \n for i in range(session_length -1):\n # use current item as reference point (rest is for testing)\n current_item_id = test_session[item_key].values[i]\n\n item_vec = np.zeros(len(item_idx_map), dtype=int)\n item_idx = item_idx_map[current_item_id]\n item_vec[item_idx] = 1\n # set vector in batch input\n il[i, 0] = item_vec\n \n #item_features = item_encodings[current_item_id]\n \n # use empty feature vec if missing\n item_features = empty_feature_vec\n if current_item_id in item_encodings.keys():\n item_features = item_encodings[result]\n \n #item_features = item_features.reshape(1,1, len(item_features))\n ir[i, 0] = item_features\n \n # do batch prediction\n pred = model.predict([il, ir], batch_size=batch_size)\n \n # for every subsession prediction\n for i in range(session_length-1):\n preds = pred[i]\n topn_idx_preds = preds.argsort()[-cut_off:][::-1]\n \n predictions = []\n # for every recommended item index\n for item_idx in topn_idx_preds:\n pred_item = idx_item_map[item_idx]\n predictions.append(pred_item)\n \n current_input_set = test_session[item_key].values[:i+1]\n remaining_test_set = test_session[item_key].values[i+1:]\n \n position = \"MID\"\n if i == 0:\n position = \"FIRST\"\n if len(remaining_test_set) == 1:\n position = \"LAST\"\n \n log_df = log_df.append({\n \"session_id\": sid,\n \"input_items\": ','.join(map(str, current_input_set)),\n \"input_count\": len(current_input_set),\n \"position\": position,\n \"remaining_items\": ','.join(map(str, remaining_test_set)),\n \"remaining_count\": len(remaining_test_set),\n \"predictions\": ','.join(map(str, predictions))\n }, ignore_index=True) \n \n \n log_df['input_count'] = log_df['input_count'].astype(int)\n log_df['remaining_count'] = log_df['remaining_count'].astype(int)\n \n return log_df\n\n# In[ ]:\n\n\nimport keras.losses\nkeras.losses.TOP1 = TOP1\n\nprint(\"Preparing train data...\")\ntrain_dataset = SessionDataset(train)\nloader = SessionDataLoader(train_dataset, batch_size=batch_size)\n \n\ntest_path = '../../data/' + dataset + 'processed/test_14d.csv'\ntest = pd.read_csv(test_path, sep='\\t')[['session_id', 'item_id', 'created_at']]\ntest_dataset = SessionDataset(test)\ntest_generator = SessionDataLoader(test_dataset, batch_size=batch_size)\n\nsession_groups = test.groupby(\"session_id\")\nmapitem = loader.dataset.itemmap\n\nitem_idx_map = {}\nidx_item_map = {}\nfor index, row in mapitem.iterrows():\n item_id = row[\"item_id\"]\n item_idx = row[\"item_idx\"]\n item_idx_map[item_id] = item_idx\n idx_item_map[item_idx] = item_id\n\n \npredict_path = \"../../data/cb12/interim/predict/base/\"\n\n\nmodel_name = \"cb12_prnn_a_\" + act + \"_ls_\" + str(ls) + \"_lr_\" + str(lr) + \".model2\"\nmodel = pickle.load(open(model_path + model_name, 'rb'))\nprint(\"Loaded: \" + model_name)\nres_list = []\n# predict\nreport_freq = len(session_groups) // 5 \ncount = 0\nfor sid, session in session_groups:\n pred_df = predict_function(sid, session, model, item_idx_map, idx_item_map)\n res_list.append(pred_df)\n # reset states\n model.get_layer('gru_left').reset_states()\n model.get_layer('gru_right').reset_states()\n # print progress\n count += 1\n if count % report_freq == 0:\n print(\"Predicted for \" + str(count) + \" sessions. \" + str(len(session_groups) - count) + \" sessions to go.\" )\n# concat results\nres = pd.concat(res_list)\nres = res.reindex(columns = [\"session_id\", \"input_items\", \"input_count\", \"position\", \"remaining_items\", \"remaining_count\", \"predictions\"])\n\nres.to_csv(predict_path + \"test_14d_prnn2.csv\", sep='\\t')\n \nprint(\"Stored predictions: \" + predict_path + \"test_14d_prnn2.csv\")\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "tensorflow.compat.v1.shape", "numpy.zeros", "numpy.append", "pandas.read_csv", "pandas.DataFrame", "numpy.argsort", "numpy.arange", "numpy.expand_dims", "pandas.merge", "pandas.concat", "numpy.array", "tensorflow.compat.v1.disable_v2_behavior" ] ]
MarcosVs98/candlestick-indicators
[ "5423b56751eead43569b15917d29519b4dd6f0e3" ]
[ "CandlestickIndicators.py" ]
[ "import logging\nimport numpy as np\nimport pandas as pd\nimport plotly.graph_objects as go\nimport plotly.express as px\n\nclass ChartIndicatorException(Exception):\n\tpass\n\n\nclass PlottingExeception(ChartIndicatorException):\n\tpass\n\n\nclass TraceCandlesException(ChartIndicatorException):\n\tpass\n\n\nclass ErrorImplementingIndicator(ChartIndicatorException):\n\tpass\n\n\nlog = logging.getLogger(\"candlestick-chart-indicator\")\n\n\nclass CandlestickChartIndicator(ABC):\n\t\"\"\"\n\tBase class responsible for the implementation of candlestick graphics, and their data.\n\n\tdetail:\n\t\tThis class implements a \"Chain of Responsibility\" design pattern.\n\t\thttps://en.wikipedia.org/wiki/Chain-of-responsibility_pattern.\n\t\"\"\"\n\[email protected]\n\tdef inicate(self):\n\t\tpass\n\n\nclass MA(CandlestickChartIndicator):\n\t\"\"\"\n\tClass responsible for implementing a simple Moving Average that stops\n\tfilter out price fluctuations helping to identify trends.\n\t\"\"\"\n\tdef indicate(self, data_frame, data=[], **kwargs):\n\t\ttry:\n\t\t\tma = data_frame['close'].rolling(window=kwargs.get(\"days\", 21)).mean()\n\t\t\ttrace_avg = go.Scatter(x=ma.index, y=MA, name='MA', line=dict(color='#BEBECF'), opacity=0.8)\n\t\t\tdata.append(trace_avg)\n\t\texcept (ErrorImplementingIndicator, TypeError) as e:\n\t\t\tlog.warning(f\"Error implementing 'ma' indicator: {e}\")\n\t\tfinally:\n\t\t\treturn data\n\n\nclass EMA(CandlestickChartIndicator):\n\t\"\"\"\n\tClass responsible for implementing an exponential moving average\n\tEMA = Price today * K + EMA yesterday x (1-k) where K = 2 /(N+1)\n\t\"\"\"\n\tdef indicate(self, data_frame, data=[], **kwargs):\n\t\ttry:\n\t\t\tk = (2 / (kwargs.get(\"days\", 21) + 1))\n\t\t\tma = data_frame['close'].rolling(window=kwargs.get(\"days\", 21)).mean()\n\t\t\tema_data = pd.DataFrame(index=ma.index)\n\t\t\tema_data['PRICE'] = data_frame['close']\n\t\t\tema_data['MA'] = ma\n\t\t\tema_data['EMA'] = np.NaN\n\t\t\tema_data['EMA'][0] = ema_data['MA'][1]\n\t\t\tfor i in range(1, len(ema_data)):\n\t\t\t\tema_data['EMA'][i] = (ema_data['PRICE'][i] * k) + ((1-k) * ema_data['EMA'][i-1])\n\t\t\ttrace_ema = go.Scatter(\n\t\t\t\tx=ema_data.index, y=ema_data['MA'], name='EMA', line=dict(color='#17BECF'), opacity=0.8)\n\t\t\tdata.append(trace_ema)\n\t\texcept (ErrorImplementingIndicator, TypeError) as e:\n\t\t\tlog.warning(f\"Error implementing 'ema' indicator: {e}\")\n\t\tfinally:\n\t\t\treturn data\n\n\nclass CrossingMovingAvarege(CandlestickChartIndicator):\n\t\"\"\"\n\tClass responsible for implementing the crossing of moving averages that consists of indicating\n\tbuying and selling an asset whenever the averages cross.\n\n\tdetail:\n\t\tThis indicator consists of 2 sets of simple moving averages. an acquaintance\n\t\tas short average or short and another known as long average or long whenever short crosses\n\t\tthe long down we make a sale, whenever the long crosses the short up we buy.\n\t\"\"\"\n\tdef indicate(self, data_frame, data=[], **kwargs):\n\t\ttry:\n\t\t\tshort_rolling = data_frame['close'].rolling(window=kwargs.get(\"short_rolling\", 9)).mean()\n\t\t\tlong_rolling = data_frame['close'].rolling(window=kwargs.get(\"long_rolling\", 21)).mean()\n\t\t\ttrace_short_rolling = go.Scatter(\n\t\t\t\tx=short_rolling.index, y=short_rolling, name='SHORT', line=dict(color='#17BECF'), opacity=0.5)\n\t\t\ttrace_long_rolling = go.Scatter(\n\t\t\t\tx=long_rolling.index, y=long_rolling, name='LONG', line=dict(color='#17becf'), opacity=0.5)\n\t\t\tdata.append(trace_short_rolling)\n\t\t\tdata.append(trace_long_rolling)\n\t\texcept (ErrorImplementingIndicator, TypeError) as e:\n\t\t\tlog.warning(f\"Error implementing 'crossing moving avarege' indicator: {e}\")\n\t\tfinally:\n\t\t\treturn data\n\n\nclass MACD(CandlestickChartIndicator):\n\n\t\"\"\"\n\tClass responsible for implementing a MACD -> Convergence - Divergence\n\tof the moving average, which uses 3 exponential moving averages.\n\t\"\"\"\n\tdef indicator(self, data_frame, data=[], **kwargs):\n\t\ttry:\n\t\t\thigh_average = data_frame['max'].rolling(window=kwargs.get(\"high\", 8)).mean()\n\t\t\tlow_average = data_frame['min'].rolling(window=kwargs.get(\"low\", 8)).mean()\n\t\t\thilo_high = pd.DataFrame(index=data_frame.index)\n\t\t\thilo_low = pd.DataFrame(index=data_frame.index)\n\t\t\thilo_high['max'] = np.where(data_frame['close'] > high_average, low_average, np.NaN)\n\t\t\thilo_low['min'] = np.where(data_frame['close'] < low_average, high_average, np.NaN)\n\t\t\ttrace_high = go.Scatter(x=hilo_high.index, y=hilo_high, line=dict(color='#17BECF'), opacity=1)\n\t\t\ttrace_low = go.Scatter(x=hilo_low.index, y=hilo_low, line=dict(color='#B22222'), opacity=1)\n\t\t\tdata.append(trace_high)\n\t\t\tdata.append(trace_low)\n\t\texcept (ErrorImplementingIndicator, TypeError) as e:\n\t\t\tlog.warning(f\"Error implementing 'macd' indicator: {e}\")\n\t\tfinally:\n\t\t\treturn data\n\n\nclass BollingerBands(CandlestickChartIndicator):\n\t\"\"\"\n\tClass responsible for implementing boolinger bands based on variations\n\tprices at standard deviation levels.\n\n\tdetail:\n\tThis indicator is able to measure price volatility.\n\t\"\"\"\n\tdef indicate(self, data_frame, data=[], **kwargs):\n\t\ttry:\n\t\t\tdf_avg = data_frame['close'].rolling(window=kwargs.get(\"days\", 21)).mean().dropna()\n\t\t\tdf_std = data_frame['close'].rolling(window=kwargs.get(\"days\", 21)).std().dropna()\n\t\t\tdf_bollinger = pd.DataFrame(index=df_avg.index)\n\n\t\t\tdf_bollinger['mband'] = df_avg\n\t\t\tdf_bollinger['uband'] = df_avg + df_std.apply(lambda x: (x * 2))\n\t\t\tdf_bollinger['iband'] = df_avg - df_std.apply(lambda x: (x * 2))\n\t\t\tdf_price = data_frame[df_bollinger.index[0]:]\n\n\t\t\ttrace_prices = go.Candlestick(\n\t\t\t x = df_price.index,\n\t\t\t open = df_price['open'],\n\t\t\t high = df_price['max'],\n\t\t\t low = df_price['min'],\n\t\t\t close = df_price['close'],\n\t\t\t name='prices')\n\t\t\tuband = go.Scatter(\n\t\t\t\tx=df_bollinger.index, y=df_bollinger['uband'], name='Upper Band',\n\t\t\t\tline=dict(color='#17BECF'), opacity=0.8)\n\t\t\tmband = go.Scatter(\n\t\t\t\tx=df_bollinger.index, y=df_bollinger['mband'], name='Moving Band',\n\t\t\t\tline=dict(color='#B22222'), opacity=0.5)\n\t\t\tiband = go.Scatter(\n\t\t\t\tx=df_bollinger.index, y=df_bollinger['iband'], name='Lower Band',\n\t\t\t\tline=dict(color='#17BECF'), opacity=0.8)\n\t\t\tdata.append(uband)\n\t\t\tdata.append(mband)\n\t\t\tdata.append(iband)\n\t\t\tdata.append(trace_prices)\n\t\texcept (ErrorImplementingIndicator, TypeError) as e:\n\t\t\tlog.warning(f\"Error implementing 'bollinger bands' indicator: {e}\")\n\t\tfinally:\n\t\t\treturn data\n\n\n# end-of-file" ]
[ [ "numpy.where", "pandas.DataFrame" ] ]
nr-patel/NP-SDC-T3-P4-Capstone-Project
[ "d20b4cb009c72f9d1b6fd8f36aca2af4c7bffb08" ]
[ "ros/src/tl_detector/tl_detector.py" ]
[ "#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import Int32, Float32MultiArray\nfrom std_msgs.msg import MultiArrayDimension, MultiArrayDimension\nfrom geometry_msgs.msg import PoseStamped, Pose\nfrom styx_msgs.msg import TrafficLightArray, TrafficLight\nfrom styx_msgs.msg import Lane\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge\nfrom light_classification.tl_classifier import TLClassifier\nimport tf\nimport cv2\nimport yaml\nimport math\nimport numpy as np\n\n# For now state is ground truth, so no need to have a cnt threshold\nSTATE_COUNT_THRESHOLD = 0\n\nclass TLDetector(object):\n def __init__(self):\n rospy.init_node('tl_detector')\n\n self.pose = None\n self.waypoints = None\n self.camera_image = None\n self.lights = None\n self.has_image = False # we don't have image yet\n\n self.pose_wp_idx = None\n self.tl_wp_idx = [] # Waypoint indices of traffic lights\n self.tl_xy = [] # Stop line positions of traffic lights\n\n config_string = rospy.get_param(\"/traffic_light_config\")\n self.config = yaml.load(config_string)\n\n self.bridge = CvBridge()\n self.use_simulator_classifier = rospy.get_param('~on_simulator')\n rospy.loginfo(\"Is on simulator? %s\" , self.use_simulator_classifier)\n self.light_classifier = TLClassifier(isSimulator = self.use_simulator_classifier)\n self.listener = tf.TransformListener()\n\n self.state = TrafficLight.UNKNOWN\n self.last_state = TrafficLight.UNKNOWN\n self.state_count = 0\n\n self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Float32MultiArray, queue_size=15)\n\n sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n # Add closest waypoint subscriber to receive current closest waypoint from waypoint WaypointUpdater\n sub4 = rospy.Subscriber('/closest_waypoint', Int32, self.closest_cb)\n\n '''\n /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and\n helps you acquire an accurate ground truth data source for the traffic light\n classifier by sending the current color state of all traffic lights in the\n simulator. When testing on the vehicle, the color state will not be available. You'll need to\n rely on the position of the light and the camera image to predict it.\n '''\n sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)\n sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)\n\n rospy.spin()\n\n def pose_cb(self, msg):\n self.pose = msg\n\n def waypoints_cb(self, waypoints):\n self.waypoints = waypoints.waypoints\n N = len(self.waypoints)\n # Waypoints are only loaded once so at boot find closest waypoint idx of each traffic light stop line\n\n for x, y in self.config['stop_line_positions']:\n ds = []\n [ds.append(math.sqrt((x-self.waypoints[i].pose.pose.position.x)**2 + (y-self.waypoints[i].pose.pose.position.y)**2)) for i in range(N)]\n best_idx = np.argmin(ds)\n self.tl_wp_idx.append(best_idx)\n self.tl_xy.append([x, y])\n\n def closest_cb(self, msg):\n self.pose_wp_idx = msg.data\n\n def traffic_cb(self, msg):\n self.lights = msg.lights\n\n def image_cb(self, msg):\n \"\"\"Identifies red lights in the incoming camera image and publishes the index\n of the waypoint closest to the red light's stop line to /traffic_waypoint\n\n Args:\n msg (Image): image from car-mounted camera\n\n \"\"\"\n self.has_image = True\n self.camera_image = msg\n \n # Every time waypoint updater finds new closest waypoint, re-calculate location\n # of nearest stop line, waypoint closest to nearest stop line, and state of nearest light\n closest_tl_xy, light_wp, state = self.process_traffic_lights()\n\n if state == TrafficLight.GREEN:\n light_wp = -1\n\n '''\n Publish upcoming red lights at camera frequency.\n Each predicted state has to occur `STATE_COUNT_THRESHOLD` number\n of times till we start using it. Otherwise the previous stable state is\n used.\n '''\n # Publish nearest waypoint and x-y coords of stop line so waypoint updater can slow if necessary\n red_light_pub = Float32MultiArray()\n red_light_pub.layout.dim.append(MultiArrayDimension())\n red_light_pub.layout.dim[0].label = \"length\"\n red_light_pub.layout.dim[0].size = 3\n red_light_pub.layout.dim[0].stride = 3\n red_light_pub.layout.data_offset = 0\n red_light_pub.data = [light_wp, closest_tl_xy[0], closest_tl_xy[1]]\n self.upcoming_red_light_pub.publish(red_light_pub)\n\n def get_light_state(self, light):\n \"\"\"Determines the current color of the traffic light\n\n Args:\n light (TrafficLight): light to classify\n\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n if(not self.has_image):\n self.prev_light_loc = None\n return False\n\n cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, \"bgr8\")\n\n image = np.asanyarray(cv_image)\n\n # Get classification\n return self.light_classifier.get_classification(image)\n\n def get_state_string(self, state):\n if (state == 0):\n state_s = \"RED\"\n elif (state == 1):\n state_s = \"YELLOW\"\n elif (state == 2):\n state_s = \"GREEN\"\n else:\n state_s = \"UNKNOWN\"\n\n return state_s\n\n def process_traffic_lights(self):\n \"\"\"Finds closest visible traffic light, if one exists, and determines its\n location and color\n\n Returns:\n int: index of waypoint closest to the upcoming stop line for a traffic light (-1 if none exists)\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n list (float): x,y coordinates of nearest traffic light stopline\n\n \"\"\"\n closest_tl_wp_idx = 0\n\n # This assumes ego always travels around loop in start direction. Should be fixed to use Yuri's calculation from waypoint_updater.py.\n closest_tl_wp_idx = min(self.tl_wp_idx)\n closest_tl_xy = self.tl_xy[np.argmin(self.tl_wp_idx)]\n if (self.pose_wp_idx):\n for i in range(len(self.tl_wp_idx)):\n if self.tl_wp_idx[i] > self.pose_wp_idx:\n closest_tl_wp_idx = self.tl_wp_idx[i]\n closest_tl_xy = self.tl_xy[i]\n break\n\n # We now have x,y position of stopline of closest traffic light.\n # Initially, rather than use camera img and classifier, we can get ground truth state of that light from the simulator.\n stop_x = closest_tl_xy[0]\n stop_y = closest_tl_xy[1]\n state = TrafficLight.UNKNOWN\n if (self.lights):\n n_lights = len(self.lights)\n ds = []\n [ds.append(math.sqrt((stop_x - self.lights[i].pose.pose.position.x)**2 + (stop_y - self.lights[i].pose.pose.position.y)**2)) for i in range(n_lights)]\n if (self.use_simulator_classifier):\n groundtruth = self.lights[np.argmin(ds)].state\n rospy.loginfo('groundtruth is {}'.format(self.get_state_string(groundtruth)))\n \n state = self.get_light_state(self.lights[np.argmin(ds)])\n rospy.loginfo('state is {}'.format(self.get_state_string(state)))\n\n return closest_tl_xy, closest_tl_wp_idx, state\n\nif __name__ == '__main__':\n try:\n TLDetector()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start traffic node.')\n" ]
[ [ "numpy.argmin", "numpy.asanyarray" ] ]
Aeon1/XlsxWriter
[ "6871b6c3fe6c294632054ea91f23d9e27068bcc1" ]
[ "examples/pandas_chart_columns.py" ]
[ "##############################################################################\n#\n# An example of converting a Pandas dataframe to an xlsx file with a grouped\n# column chart using Pandas and XlsxWriter.\n#\n# Copyright 2013-2019, John McNamara, [email protected]\n#\n\nimport pandas as pd\n\n# Some sample data to plot.\nfarm_1 = {'Apples': 10, 'Berries': 32, 'Squash': 21, 'Melons': 13, 'Corn': 18}\nfarm_2 = {'Apples': 15, 'Berries': 43, 'Squash': 17, 'Melons': 10, 'Corn': 22}\nfarm_3 = {'Apples': 6, 'Berries': 24, 'Squash': 22, 'Melons': 16, 'Corn': 30}\nfarm_4 = {'Apples': 12, 'Berries': 30, 'Squash': 15, 'Melons': 9, 'Corn': 15}\n\ndata = [farm_1, farm_2, farm_3, farm_4]\nindex = ['Farm 1', 'Farm 2', 'Farm 3', 'Farm 4']\n\n# Create a Pandas dataframe from the data.\ndf = pd.DataFrame(data, index=index)\n\n# Create a Pandas Excel writer using XlsxWriter as the engine.\nsheet_name = 'Sheet1'\nwriter = pd.ExcelWriter('pandas_chart_columns.xlsx', engine='xlsxwriter')\ndf.to_excel(writer, sheet_name=sheet_name)\n\n# Access the XlsxWriter workbook and worksheet objects from the dataframe.\nworkbook = writer.book\nworksheet = writer.sheets[sheet_name]\n\n# Create a chart object.\nchart = workbook.add_chart({'type': 'column'})\n\n# Some alternative colors for the chart.\ncolors = ['#E41A1C', '#377EB8', '#4DAF4A', '#984EA3', '#FF7F00']\n\n# Configure the series of the chart from the dataframe data.\nfor col_num in range(1, len(farm_1) + 1):\n chart.add_series({\n 'name': ['Sheet1', 0, col_num],\n 'categories': ['Sheet1', 1, 0, 4, 0],\n 'values': ['Sheet1', 1, col_num, 4, col_num],\n 'fill': {'color': colors[col_num - 1]},\n 'overlap': -10,\n })\n\n# Configure the chart axes.\nchart.set_x_axis({'name': 'Total Produce'})\nchart.set_y_axis({'name': 'Farms', 'major_gridlines': {'visible': False}})\n\n# Insert the chart into the worksheet.\nworksheet.insert_chart('H2', chart)\n\n# Close the Pandas Excel writer and output the Excel file.\nwriter.save()\n" ]
[ [ "pandas.ExcelWriter", "pandas.DataFrame" ] ]
lucamarx/pyAutoSpec
[ "d57efb6ff4c37ede1377351fd3dd3a6ce362b551" ]
[ "pyautospec/function_mps.py" ]
[ "\"\"\"\nMps based function compression algorithm\n\"\"\"\nimport numpy as np\nimport itertools\n\nfrom typing import List\n\nfrom .mps import Mps\nfrom .plots import function_wfa_comparison_chart\n\n\ndef word2real(s : List[int], x0 : float = 0.0, x1 : float = 1.0) -> float:\n \"\"\"\n Convert the binary representation s of xϵ[x0,x1) into the number itself\n \"\"\"\n s = [0] + s\n return x0 + sum([s[i] * 2**(-i) for i in range(len(s))]) * (x1-x0)\n\n\ndef real2word(r : float, l : int = 8, x0 : float = 0.0, x1 : float = 1.0) -> List[int]:\n \"\"\"\n Convert a real number xϵ[x0,x1) into its binary representation (with\n maximum length l)\n \"\"\"\n if r < x0 or r >= x1:\n raise Exception(\"out of bounds\")\n\n r = (r - x0) / (x1 - x0)\n w = []\n for _ in range(0,l+1):\n d = 1 if r >= 1 else 0\n w.append(d)\n r = (r-d)*2\n return w[1:]\n\n\nclass FunctionMps():\n \"\"\"\n Mps based real function model\n \"\"\"\n\n def __init__(self, sequence_length : int = 8, max_bond_dim : int = 20):\n \"\"\"\n Intialize a model of a real function f: [x0,x1) → R\n\n Parameters:\n -----------\n\n sequence_length : int\n the underlying MPS length\n\n max_bond_dim : int\n the underlying MPS maximum bond dimension\n \"\"\"\n self.f, self.x0, self.x1 = None, None, None\n\n self.model = Mps(sequence_length, 2, max_bond_dim)\n\n\n def __repr__(self) -> str:\n if self.f is None:\n return \" FunctionMps(N={}) <?>: [<?>,<?>] → R\\n{}\".format(len(self.model), self.model.__repr__())\n else:\n return \" FunctionMps(N={}) {}: [{:.2f},{:.2f}] → R\\n{}\".format(len(self.model), self.f.__repr__(), self.x0, self.x1, self.model.__repr__())\n\n\n def _one_hot(self, X : List[List[int]]) -> np.ndarray:\n \"\"\"\n Perform one-hot encoding\n \"\"\"\n idxs = np.array(X).reshape(-1)\n return np.eye(self.model.part_d)[idxs].reshape((-1, len(self.model), self.model.part_d))\n\n\n def __call__(self, x : float) -> float:\n \"\"\"\n Evaluate learned function at x\n\n Parameters:\n -----------\n\n x : float\n a point in [x0,x1)\n\n Returns:\n --------\n\n the value of the function at x\n \"\"\"\n return self.model(self._one_hot([real2word(x, l=len(self.model), x0=self.x0, x1=self.x1)]))[0]\n\n\n def comparison_chart(self, n_points : int = 50):\n \"\"\"\n Compare the two functions\n\n Parameters:\n -----------\n\n n_points : int\n the number of points in the plot\n \"\"\"\n function_wfa_comparison_chart(self, n_points, None, plot_derivative = False)\n\n\n def fit(self, f, x0 : float = 0.0, x1 : float = 1.0, learn_rate : float = 0.1, batch_size : int = 32, epochs : int = 10):\n \"\"\"\n Fit the model to the function f defined on the interval [x0,x1)\n\n Parameters:\n -----------\n\n f : function\n the function to be fitted\n\n x0 : float\n x1 : float\n the interval the function is defined on\n\n learn_rate : float\n the learning rate\n\n batch_size : int\n the batch size used at each step\n\n epochs : int\n the number of epochs\n\n Returns:\n --------\n\n The object itself\n \"\"\"\n self.f = f\n self.x0 = x0\n self.x1 = x1\n\n data = [(list(x), f(word2real(list(x), x0=x0, x1=x1))) for x in itertools.product(*([[0,1]] * len(self.model)))]\n\n self.model.fit(self._one_hot(np.array([t[0] for t in data])), np.array([t[1] for t in data]), learn_rate=learn_rate, batch_size=batch_size, epochs=epochs)\n\n return self\n" ]
[ [ "numpy.array", "numpy.eye" ] ]
Cheese229/DataAssignmentCIS
[ "7e31892721aa2b3845df3e76296af500f29c9196" ]
[ "Simulation_virus_BB.py" ]
[ "\"\"\"\r\n Bigger scale simulation of a virus spread in a city.\r\n This would have been the better opt for the project, as it uses geospatial visualisation (which is not in this code)\r\n and data gathered from a a ride share, a specific city, their population, and their public transport data.\r\n\r\n I still don't understand how geospatial visualisation works (I think I will look more into it in the holidays)\r\n The simulation uses mathematical equations on how a virus would spread and includes its recovery rates.\r\n\r\n Technically this code works (I think)...\r\n It is just missing it's data and its visuals\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom collections import namedtuple\r\n\r\nParam = namedtuple('Param', 'R0 DE DI I0 HopitalisationRate HospitalIters')\r\n# I0 is the distribution of infected people at time t=0, if None then randomly choose inf number of people\r\n\r\n# flow is a 3D matrix of dimentions r x n x n (i.e., 84 x 549 x 549),\r\n# flow[t mod r] is the desired OD matrix at time t.\r\n\r\ndef seir(par, distr, flow, alpha, iterations, inf):\r\n\r\n r = flow.shape[0]\r\n n = flow.shape[1]\r\n N = distr[0].sum() #total population, we assume that N = sum(flow)\r\n Svec = distr[0].copy()\r\n Evec = np.zeros(n)\r\n Ivec = np.zeros(n)\r\n Rvec = np.zeros(n)\r\n\r\n if par.I0 is None:\r\n initial = np.zeros(n)\r\n # randomly choose inf infections\r\n for i in range(inf):\r\n loc = np.random.randint(n)\r\n if (Svec[loc] > initial[loc]):\r\n initial[loc] += 1.0\r\n\r\n else:\r\n initial = par.I0\r\n assert ((Svec < initial).sum() == 0)\r\n\r\n Svec -= initial\r\n Ivec += initial\r\n \r\n res = np.zeros((iterations, 5))\r\n res[0,:] = [Svec.sum(), Evec.sum(), Ivec.sum(), Rvec.sum(), 0]\r\n\r\n realflow = flow.copy()\r\n\r\n realflow = realflow / realflow.sum(axis=2)[:,:, np.newaxis]\r\n realflow = alpha * realflow\r\n\r\n history = np.zeros((iterations, 5, n))\r\n history[0,0,:] = Svec\r\n history[0,1,:] = Evec\r\n history[0,2,:] = Ivec\r\n history[0,3,:] = Rvec\r\n\r\n eachIter = np.zeros(iterations + 1)\r\n\r\n # run simulation\r\n for iter in range(0, iterations - 1):\r\n realOD = realflow[iter % r]\r\n\r\n d = distr[iter % r] + 1\r\n\r\n if ((d>N+1).any()):\r\n print(\"Houston, we have a problem!\")\r\n return res, history\r\n # N = S + E + I + R\r\n\r\n newE = Svec * Ivec / d * (par.R0 / par.DI)\r\n newI = Evec / par.DE\r\n newR = Ivec / par.DI\r\n\r\n Svec -= newE\r\n Svec = (Svec + np.matmul(Svec.reshape(1,n), realOD) - Svec * realOD.sum(axis=1))\r\n Evec = Evec + newE - newI\r\n Evec = (Evec + np.matmul(Evec.reshape(1,n), realOD) - Evec * realOD.sum(axis=1))\r\n Ivec = Ivec + newI - newR\r\n Ivec = (Ivec + np.matmul(Ivec.reshape(1,n), realOD) - Ivec * realOD.sum(axis=1))\r\n Rvec += newR\r\n Rvec = (Rvec + np.matmul(Rvec.reshape(1,n), realOD) - Rvec * realOD.sum(axis=1))\r\n\r\n res[iter + 1,:] = [Svec.sum(), Evec.sum(), Ivec.sum(), Rvec.sum(), 0]\r\n eachIter[iter + 1] = newI.sum()\r\n res[iter + 1, 4] = eachIter[max(0, iter - par.HospitalIters) : iter].sum() * par.HospitalisationRate\r\n\r\n history[iter + 1,0,:] = Svec\r\n history[iter + 1,1,:] = Evec\r\n history[iter + 1,2,:] = Ivec\r\n history[iter + 1,3,:] = Rvec\r\n\r\n\r\n return res, history" ]
[ [ "numpy.random.randint", "numpy.zeros" ] ]
soumitri2001/EvoCluster
[ "001dfb4c1f00db84ad1c2f2228eed6112d7e65b1" ]
[ "EvoCluster/_objectives.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 9 18:12:29 2019\n\n@author: Raneem\n\"\"\"\n\nfrom sklearn import cluster, metrics\nfrom scipy.spatial.distance import pdist, cdist\nimport numpy\nimport sys\n\ndef getLabelsPred(startpts, points, k):\n labelsPred = [-1] * len(points)\n \n for i in range(len(points)):\n distances = numpy.linalg.norm(points[i]-startpts, axis = 1)\n labelsPred[i] = numpy.argmin(distances)\n \n return labelsPred\n \n\ndef SSE(startpts, points, k, metric):\n labelsPred = getLabelsPred(startpts, points, k)\n fitness = 0\n \n if numpy.unique(labelsPred).size < k:\n fitness = sys.float_info.max\n else:\n centroidsForPoints = startpts[labelsPred]\n fitness = 0\n for i in range(k):\n indexes = [n for n,x in enumerate(labelsPred) if x==i]\n fit = cdist(points[indexes], centroidsForPoints[indexes], metric)**2\n fit = sum(fit)[0]\n fitness += fit\n return fitness, labelsPred\n\n\ndef TWCV(startpts, points, k):\n labelsPred = getLabelsPred(startpts, points, k)\n \n if numpy.unique(labelsPred).size < k:\n fitness = sys.float_info.max\n else:\n sumAllFeatures = sum(sum(numpy.power(points,2)))\n sumAllPairPointsCluster = 0\n for clusterId in range(k):\n indices = numpy.where(numpy.array(labelsPred) == clusterId)[0]\n pointsInCluster = points[numpy.array(indices)]\n sumPairPointsCluster = sum(pointsInCluster)\n sumPairPointsCluster = numpy.power(sumPairPointsCluster,2)\n sumPairPointsCluster = sum(sumPairPointsCluster)\n sumPairPointsCluster = sumPairPointsCluster/len(pointsInCluster)\n \n sumAllPairPointsCluster += sumPairPointsCluster\n fitness = (sumAllFeatures - sumAllPairPointsCluster)\n return fitness, labelsPred\n\n\ndef SC(startpts, points, k, metric): \n labelsPred = getLabelsPred(startpts, points, k)\n \n if numpy.unique(labelsPred).size < k:\n fitness = sys.float_info.max\n else:\n silhouette = metrics.silhouette_score(points, labelsPred, metric=metric)\n #silhouette = (silhouette - (-1)) / (1 - (-1))\n silhouette = (silhouette + 1) / 2\n fitness = 1 - silhouette\n return fitness, labelsPred\n\n\ndef DB(startpts, points, k):\n labelsPred = getLabelsPred(startpts, points, k)\n if numpy.unique(labelsPred).size < k:\n fitness = sys.float_info.max\n else:\n fitness = metrics.davies_bouldin_score(points, labelsPred)\n return fitness, labelsPred\n\ndef CH(startpts, points, k):\n labelsPred = getLabelsPred(startpts, points, k)\n \n if numpy.unique(labelsPred).size < k:\n fitness = sys.float_info.max\n else:\n ch = metrics.calinski_harabaz_score(points, labelsPred)\n fitness = 1 / ch\n return fitness, labelsPred\n\n\ndef delta_fast(ck, cl, distances):\n values = distances[numpy.where(ck)][:, numpy.where(cl)]\n values = values[numpy.nonzero(values)]\n\n return numpy.min(values)\n \ndef big_delta_fast(ci, distances):\n values = distances[numpy.where(ci)][:, numpy.where(ci)]\n #values = values[numpy.nonzero(values)]\n \n return numpy.max(values)\n\ndef dunn_fast(points, labels, metric):\n v = pdist(points, metric)\n size_X = len(points)\n X = numpy.zeros((size_X,size_X))\n X[numpy.triu_indices(X.shape[0], k = 1)] = v\n distances = X + X.T\n ks = numpy.sort(numpy.unique(labels))\n \n deltas = numpy.ones([len(ks), len(ks)])*1000000\n big_deltas = numpy.zeros([len(ks), 1])\n \n l_range = list(range(0, len(ks)))\n \n for k in l_range:\n for l in (l_range[0:k]+l_range[k+1:]):\n deltas[k, l] = delta_fast((labels == ks[k]), (labels == ks[l]), distances)\n \n big_deltas[k] = big_delta_fast((labels == ks[k]), distances)\n\n di = numpy.min(deltas)/numpy.max(big_deltas)\n return di\n \n\ndef DI(startpts, points, k, metric):\n labelsPred = getLabelsPred(startpts, points, k)\n \n if numpy.unique(labelsPred).size < k:\n fitness = sys.float_info.max\n else:\n dunn = dunn_fast(points, labelsPred, metric)\n if(dunn < 0):\n dunn = 0\n fitness = 1 - dunn\n return fitness, labelsPred\n\n\ndef getFunctionDetails(a): \n # [name, lb, ub]\n param = { 0: [\"SSE\",0,1],\n 1: [\"TWCV\",0,1],\n 2: [\"SC\",0,1],\n 3: [\"DB\",0,1],\n #4: [\"CH\",0,1],\n 4: [\"DI\",0,1]\n }\n return param.get(a, \"nothing\")" ]
[ [ "scipy.spatial.distance.pdist", "scipy.spatial.distance.cdist", "numpy.zeros", "sklearn.metrics.calinski_harabaz_score", "numpy.argmin", "numpy.triu_indices", "numpy.where", "numpy.max", "numpy.power", "numpy.min", "sklearn.metrics.silhouette_score", "numpy.nonzero", "numpy.array", "sklearn.metrics.davies_bouldin_score", "numpy.linalg.norm", "numpy.unique" ] ]
shfshf/seq2annotation
[ "a824520d46f0b3d70268fae422976a5ce1b3f4ce" ]
[ "loadmodel.py" ]
[ "import tensorflow as tf\r\nfrom tensorflow.python.platform import gfile\r\n\r\n# only for bugfix\r\ntf.contrib.rnn\r\n\r\noutput_graph_path = './model.pb'\r\n\r\ngraph = tf.Graph()\r\n\r\nwith gfile.FastGFile(output_graph_path, 'rb') as f:\r\n output_graph_def = tf.GraphDef()\r\n output_graph_def.ParseFromString(f.read())\r\n\r\nwith graph.as_default():\r\n tf.import_graph_def(output_graph_def, name=\"\")\r\n\r\n with tf.Session(graph=graph) as sess:\r\n init_all_tables = graph.get_operation_by_name('init_all_tables')\r\n sess.run(init_all_tables)\r\n # sess.run(tf.global_variables_initializer())\r\n # sess.run(tf.local_variables_initializer())\r\n # 得到当前图有几个操作节点\r\n print(\"%d ops in the final graph.\" % len(output_graph_def.node))\r\n\r\n tensor_name = [tensor.name for tensor in output_graph_def.node]\r\n print(tensor_name)\r\n print('---------------------------')\r\n\r\n Placeholder = sess.graph.get_tensor_by_name('Placeholder:0')\r\n Placeholder_1 = sess.graph.get_tensor_by_name('Placeholder_1:0')\r\n # embedding层的输出\r\n embedding_out = sess.graph.get_tensor_by_name('embedding_lookup:0')\r\n enbedding_transpose = sess.graph.get_tensor_by_name('transpose:0')\r\n # BiLSTM层的输出\r\n BiLSTM_out = sess.graph.get_tensor_by_name('concat:0')\r\n BiLSTM_transpose_1 = sess.graph.get_tensor_by_name('transpose_1:0')\r\n\r\n a = sess.graph.get_tensor_by_name('Variable_1:0')\r\n a_array = a.eval(session=sess)\r\n for i in a_array[:1]:\r\n print(i)\r\n print('#####################')\r\n\r\n input_words = [['唱', '一', '首', '不', '消', '失', '的', '回', '忆']]\r\n input_words_len = [9]\r\n\r\n b = sess.graph.get_tensor_by_name('hash_table_Lookup/hash_table_Lookup/LookupTableFindV2:0')\r\n b = sess.run(b, {Placeholder: input_words, Placeholder_1: input_words_len})\r\n\r\n for i in b:\r\n print(i)" ]
[ [ "tensorflow.Graph", "tensorflow.Session", "tensorflow.import_graph_def", "tensorflow.python.platform.gfile.FastGFile", "tensorflow.GraphDef" ] ]
GaoX2015/intro_ds
[ "886e678e5353e9b4c0d4f3da83a00d6b9a2f06a5" ]
[ "ch06-sgd/utils.py" ]
[ "# -*- coding: UTF-8 -*-\n\"\"\"\n此脚本用于随机生成线性模型数据、定义模型以及其他工具\n\"\"\"\n\n\nimport numpy as np\nimport tensorflow as tf\n\n\ndef generateLinearData(dimension, num):\n \"\"\"\n 随机产生线性模型数据\n\n 参数\n ----\n dimension :int,自变量个数\n\n num :int,数据个数\n\n 返回\n ----\n x :np.array,自变量\n\n y :np.array,因变量\n \"\"\"\n np.random.seed(1024)\n beta = np.array(range(dimension)) + 1\n x = np.random.random((num, dimension))\n epsilon = np.random.random((num, 1))\n # 将被预测值写成矩阵形式,会极大加快速度\n y = x.dot(beta).reshape((-1, 1)) + epsilon\n return x, y\n\n\ndef createLinearModel(dimension):\n \"\"\"\n 搭建模型,包括数据中的自变量,应变量和损失函数\n\n 参数\n ----\n dimension : int,自变量的个数\n\n 返回\n ----\n model :dict,里面包含模型的参数,损失函数,自变量,应变量\n \"\"\"\n np.random.seed(1024)\n # 定义自变量和应变量\n x = tf.placeholder(tf.float64, shape=[None, dimension], name='x')\n ## 将被预测值写成矩阵形式,会极大加快速度\n y = tf.placeholder(tf.float64, shape=[None, 1], name=\"y\")\n # 定义参数估计值和预测值\n betaPred = tf.Variable(np.random.random([dimension, 1]))\n yPred = tf.matmul(x, betaPred, name=\"y_pred\")\n # 定义损失函数\n loss = tf.reduce_mean(tf.square(yPred - y))\n model = {\"loss_function\": loss, \"independent_variable\": x,\n \"dependent_variable\": y, \"prediction\": yPred, \"model_params\": betaPred}\n return model\n\n\ndef createSummaryWriter(logPath):\n \"\"\"\n 检查所给路径是否已存在,如果存在删除原有日志。并创建日志写入对象\n\n 参数\n ----\n logPath :string,日志存储路径\n\n 返回\n ----\n summaryWriter :FileWriter,日志写入器\n \"\"\"\n if tf.gfile.Exists(logPath):\n tf.gfile.DeleteRecursively(logPath)\n summaryWriter = tf.summary.FileWriter(logPath, graph=tf.get_default_graph())\n return summaryWriter\n" ]
[ [ "tensorflow.placeholder", "numpy.random.seed", "tensorflow.matmul", "numpy.random.random", "tensorflow.square", "tensorflow.get_default_graph", "tensorflow.gfile.Exists", "tensorflow.gfile.DeleteRecursively" ] ]
HarliWu/From-Deterioration-to-Acceleration-A-Calibration-Approach-to-Rehabilitating-Step-Asynchronism-in-Fe
[ "3a2f7196a2ca0446ce7ff7c8d15a0fa56a1d91d4" ]
[ "federated_learning/FedaGrac/param_server.py" ]
[ "import time, os, json, time\nimport numpy as np\n\nimport torch\nfrom torch._C import device\nimport torch.distributed as dist\nfrom torch.autograd import Variable\n\ndef test_model(model, test_data, dev):\n correct, total = 0, 0\n model.eval()\n\n with torch.no_grad():\n for data, target in test_data:\n data, target = Variable(data).cuda(dev), Variable(target).cuda(dev)\n output = model(data)\n # get the index of the max log-probability\n _, predictions = output.max(1)\n total += predictions.size(0)\n correct += torch.sum(predictions == target.data).float()\n\n acc = correct / total\n return acc.item()\n\ndef update_model(model, global_mu, size, cpu, gpu, args):\n # all_param = model.state_dict()\n\n # receive the parameter variance from workers \n for param in model.parameters():\n tensor = torch.zeros_like(param.data, device=cpu)\n gather_list = [torch.zeros_like(param.data, device=cpu) for _ in range(size)]\n dist.gather(tensor=tensor, gather_list=gather_list, dst=0)\n param.data = torch.zeros_like(param.data, device=gpu)\n for w in range(size):\n # Suppose the model received from clients are well processed \n param.data = param.data + gather_list[w].clone().detach().to(gpu)\n\n # receive averaged K from workers\n avg_k_list = [torch.tensor(0.0) for _ in range(size)]\n dist.gather(tensor=torch.tensor(0.0), gather_list=avg_k_list, dst=0)\n avg_k = sum(avg_k_list)\n print('Averaged K:', avg_k)\n # send averaged K to workers \n avg_k_list = [avg_k if args.avg_k==-1 else torch.tensor(float(args.avg_k)) for _ in range(size)]\n dist.scatter(tensor=avg_k, scatter_list=avg_k_list)\n\n # receive the mu from clients\n for idx, param in enumerate(global_mu):\n tensor = torch.zeros_like(param.data, device=cpu)\n gather_list = [torch.zeros_like(param.data, device=cpu) for _ in range(size)]\n dist.gather(tensor=tensor, gather_list=gather_list, dst=0)\n global_mu[idx] = torch.zeros_like(param.data, device=gpu)\n for w in range(size):\n # Suppose the model received from clients are well processed \n global_mu[idx] = global_mu[idx] + gather_list[w].clone().detach().to(gpu)\n\n # send the parameters to workers \n for param in model.parameters():\n tmp_p = param.clone().detach().to(cpu)\n scatter_p_list = [tmp_p for _ in range(size)]\n dist.scatter(tensor=tmp_p, scatter_list=scatter_p_list)\n if torch.sum(torch.isnan(tmp_p)) > 0:\n print(\"NaN occurs. Terminate. \")\n exit(-1)\n\n # send global_mu to workers\n for param in global_mu:\n tmp_p = param.clone().detach().to(cpu)\n scatter_p_list = [tmp_p for _ in range(size)]\n dist.scatter(tensor=tmp_p, scatter_list=scatter_p_list)\n\n # model.load_state_dict(all_param)\n\ndef run(size, model, args, test_data, f_result, cpu, gpu):\n # Receive the weights from all clients \n temp_w = torch.tensor([0.0 for _ in range(args.num_workers+1)])\n weights = [torch.tensor([0.0 for _ in range(args.num_workers+1)]) for _ in range(size)]\n dist.gather(tensor=temp_w, gather_list=weights, dst=0)\n weights = sum(weights)\n weights = weights / torch.sum(weights)\n print('weights:', weights)\n\n # send weights to clients\n weights_list = [weights.clone().detach().to(cpu) for _ in range(size)]\n dist.scatter(tensor=temp_w, scatter_list=weights_list)\n \n start = time.time()\n model = model.cuda(gpu)\n\n for p in model.parameters():\n tmp_p = p.clone().detach().to(cpu)\n scatter_p_list = [tmp_p for _ in range(size)]\n # dist.scatter(tensor=tmp_p, scatter_list=scatter_p_list, group=group)\n dist.scatter(tensor=tmp_p, scatter_list=scatter_p_list)\n\n global_mu = [torch.zeros_like(param.data, device=gpu) for param in model.parameters()]\n\n print('Model has sent to all nodes! ')\n print('Begin!') \n\n np.random.seed(42)\n\n for t in range(args.T):\n model.train()\n # send participants to all clients \n participants = np.random.choice(np.arange(len(weights)), size=args.num_part, replace=True, p=weights.numpy()) if args.partial else np.arange(len(weights))\n print('Participants list:', list(participants))\n participants = torch.tensor(participants).to(cpu)\n part_list = [participants for _ in range(size)]\n dist.scatter(tensor=participants, scatter_list=part_list)\n\n # receive the list of train loss from workers\n info_list = [torch.tensor(0.0) for _ in range(size)]\n # dist.gather(tensor=torch.tensor([0.0]), gather_list=info_list, group=group)\n dist.gather(tensor=torch.tensor(0.0), gather_list=info_list, dst=0)\n # info_list = np.concatenate([list(a) for a in info_list])\n # train_loss = sum(info_list).item() / args.num_part if args.partial else sum(info_list * weights).item()\n train_loss = sum(info_list).item()\n\n # if args.partial:\n # update_model_partial(model, size, cpu, gpu, args.num_part)\n # else:\n # update_model_full(model, size, cpu, gpu, weights)\n update_model(model, global_mu, size, cpu, gpu, args)\n\n timestamp = time.time() - start\n test_acc = test_model(model, test_data, gpu)\n print(\"Epoch: {}\\t\\tLoss: {}\\t\\tAccuracy: {}\".format(t, train_loss, test_acc))\n f_result.write(str(t) + \"\\t\" + str(timestamp) + \"\\t\" + str(train_loss) + \"\\t\" + str(test_acc) + \"\\n\")\n f_result.flush()\n\ndef init_processes(rank, size, model, args, test_data, cpu, gpu, backend='mpi'):\n if backend == 'mpi':\n dist.init_process_group(backend)\n elif backend == 'gloo':\n os.environ['MASTER_ADDR'] = '127.0.0.1'\n os.environ['MASTER_PORT'] = '29500'\n dist.init_process_group(backend, rank=rank, world_size=size)\n if not os.path.exists(args.result):\n os.makedirs(args.result)\n result_file = os.path.join(args.result, '{}.txt'.format(len(os.listdir(args.result))))\n f_result = open(result_file, 'w')\n f_result.write(json.dumps(vars(args)) + '\\n')\n run(size, model, args, test_data, f_result, cpu, gpu)" ]
[ [ "torch.sum", "torch.distributed.gather", "torch.zeros_like", "torch.no_grad", "numpy.random.seed", "torch.tensor", "torch.distributed.init_process_group", "torch.autograd.Variable", "torch.isnan", "torch.distributed.scatter" ] ]
balintmaci/drone_intro_exercises
[ "1d8b839fecd6b0c5e33210b9a88fd741a71034cc" ]
[ "ex1/daniel/imu_exercise_kalman.py" ]
[ "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# IMU exercise\n# Copyright (c) 2015-2020 Kjeld Jensen [email protected] [email protected]\n\n# import libraries\nfrom math import pi, sqrt, atan2\nimport matplotlib.pyplot as plt\nfrom pylab import ion\n\n# name of the file to read ##\nfileName = 'imu_razor_data_pitch_55deg.txt'\n\n## IMU type\n#imuType = 'vectornav_vn100'\nimuType = 'sparkfun_razor'\n\n# other parameters\nshowPlot = True\nshow3DLiveView = False\nshow3DLiveViewInterval = 3\n\nif show3DLiveView == True:\n\tfrom imu_box3d import imu_visualize\n\n\n##### Insert initialize code below ###################\n\n# approx. bias values determined by averaging over static measurements\nbias_gyro_x = 3.95*3.14/180 # [rad/measurement]\nbias_gyro_y = 2.85*3.14/180 # [rad/measurement]\nbias_gyro_z = 0.41*3.14/180 # [rad/measurement]\n\n# variances\ngyroVar = 0.02\npitchVar = 0.01\n\n# Kalman filter start guess\nestAngle = 0\nestVar = 3.14\n\n# Kalman filter housekeeping variables\ngyroVarAcc = 0.0\n\n######################################################\n\n## Variables for plotting ##\nplotDataGyro = []\nplotDataAcc = []\nplotDataKalman = []\n\n## Initialize your variables here ##\ngyro_x_rel = 0.0\ngyro_y_rel = 0.0\ngyro_z_rel = 0.0\n\n# open the imu data file\nf = open (fileName, \"r\")\n\n# initialize variables\ncount = 0\n\n# initialize 3D liveview\nif show3DLiveView == True:\n\timuview = imu_visualize()\n\timuview.set_axis (0, 0, 0)\n\timuview.update()\n\n# looping through file\nfor line in f:\n\tcount += 1\n\n\t# split the line into CSV formatted data\n\tline = line.replace ('*',',') # make the checkum another csv value\n\tcsv = line.split(',')\n\n\t# keep track of the timestamps \n\tts_recv = float(csv[0])\n\tif count == 1: \n\t\tts_now = ts_recv # only the first time\n\tts_prev = ts_now\n\tts_now = ts_recv\n\n\tif imuType == 'sparkfun_razor': \n\t\t# import data from a SparkFun Razor IMU (SDU firmware)\n\t\t# outputs ENU reference system\n\t\tacc_x = int(csv[2]) / 1000.0 * 4 * 9.82;\n\t\tacc_y = int(csv[3]) / 1000.0 * 4 * 9.82;\n\t\tacc_z = int(csv[4]) / 1000.0 * 4 * 9.82;\n\t\tgyro_x = int(csv[5]) * 1/14.375 * pi/180.0;\n\t\tgyro_y = int(csv[6]) * 1/14.375 * pi/180.0;\n\t\tgyro_z = int(csv[7]) * 1/14.375 * pi/180.0;\n\n\telif imuType == 'vectornav_vn100': \n\t\t# import data from a VectorNav VN-100 configured to output $VNQMR\n\t\t# outputs NED reference system (therefore converted to ENU)\n\t\tacc_y = float(csv[9])\n\t\tacc_x = float(csv[10])\n\t\tacc_z = -float(csv[11])\n\t\tgyro_y = float(csv[12])\n\t\tgyro_x = float(csv[13])\n\t\tgyro_z = -float(csv[14])\n\n\t# subtract defined static bias for each gyro\t\t\n\tgyro_x -= bias_gyro_x\n\tgyro_y -= bias_gyro_y\n\tgyro_z -= bias_gyro_z\n\n\t##### Insert loop code below #########################\n\n\t# Variables available\n\t# ----------------------------------------------------\n\t# count\t\tCurrent number of updates\t\t\n\t# ts_prev\tTime stamp at the previous update\n\t# ts_now\tTime stamp at this update\n\t# acc_x\t\tAcceleration measured along the x axis\n\t# acc_y\t\tAcceleration measured along the y axis\n\t# acc_z\t\tAcceleration measured along the z axis\n\t# gyro_x\tAngular velocity measured about the x axis\n\t# gyro_y\tAngular velocity measured about the y axis\n\t# gyro_z\tAngular velocity measured about the z axis\n\n\t## Insert your code here ##\n\t# calculate pitch (x-axis) and roll (y-axis) angles\n\tpitch = atan2(acc_y,sqrt(acc_x*acc_x + acc_z*acc_z))\n\troll = atan2(-acc_x, acc_z)\n\n\t# integrate gyro velocities to releative angles\n\tgyro_x_rel += gyro_x*(ts_now-ts_prev)\n\tgyro_y_rel += gyro_y*(ts_now-ts_prev)\n\tgyro_z_rel += gyro_z*(ts_now-ts_prev)\n\n\t# Kalman prediction step (we have new data in each iteration)\n\tgyroVarAcc += gyroVar\n\testAngle += gyro_y*(ts_now-ts_prev)\n\testVar += gyroVarAcc*(ts_now-ts_prev)\n\n\t# Kalman correction step (we have new data in each iteration)\n\tK = estVar/(estVar+pitchVar)\n\testAngle += K*(roll-estAngle)\n\testVar *= (1-K)\n\tgyroVarAcc = 0\n\n\t# define which value to plot as the Kalman filter estimate\n\tkalman_estimate = estAngle\n\n\t# define which value to plot as the absolute value (pitch/roll)\n\tpitch_roll_plot = roll\n\n\t# define which value to plot as the relative gyro value\n\tgyro_rel_plot = gyro_y_rel\n\n\t######################################################\n\n\t# if 3D liveview is enabled\n\tif show3DLiveView == True and count % show3DLiveViewInterval == 0:\n\n\t\t# determine what variables to liveview\n\t\troll_view = 0.0\n\t\tyaw_view = 0.0\n\t\tpitch_view = kalman_estimate\n\n\t\timuview.set_axis (-pitch_view, -yaw_view, roll_view)\n\t\timuview.update()\n\n\t# if plotting is enabled\n\tif showPlot == True:\n\t\tplotDataGyro.append(gyro_rel_plot*180.0/pi)\n\t\tplotDataAcc.append(pitch_roll_plot*180.0/pi)\n\t\tplotDataKalman.append(kalman_estimate*180.0/pi)\n\n# closing the file\t\nf.close()\n\n# show the plot\nif showPlot == True:\n\tion()\n\tplt.figure(1)\n\tplt.title('Gyro integrated (relative) angle')\n\tplt.plot(plotDataGyro)\n\tplt.savefig('imu_exercise_gyro.png')\n\n\tplt.figure(2)\n\tplt.title('Accelerometer (blue) & Kalman estimation (red) angles')\n\tplt.plot(plotDataAcc,'blue')\n\tplt.plot(plotDataKalman,'red')\n\tplt.savefig('imu_exercise_acc_kalman.png')\n\tplt.draw()\n\tprint ('Press enter to quit')\n\treal_raw_input = vars(__builtins__).get('raw_input',input)\n\treal_raw_input()\n\n\n" ]
[ [ "matplotlib.pyplot.draw", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "matplotlib.pyplot.title", "matplotlib.pyplot.plot" ] ]
CanyellWang/MachineLearning_Python_wchy
[ "7eac77f7446a0c69bfb1a8be7da405895409d131" ]
[ "NeuralNetwok/NeuralNetwork.py" ]
[ "#-*- coding: utf-8 -*-\nimport numpy as np\nfrom scipy import io as spio\nfrom matplotlib import pyplot as plt\nfrom scipy import optimize\nfrom matplotlib.font_manager import FontProperties\nfont = FontProperties(fname=r\"c:\\windows\\fonts\\simsun.ttc\", size=14) # 解决windows环境下画图汉字乱码问题\n\nfrom sklearn import datasets\nfrom sklearn.preprocessing import StandardScaler\nimport time\n\ndef neuralNetwork(input_layer_size,hidden_layer_size,out_put_layer):\n data_img = loadmat_data(\"data_digits.mat\")\n X = data_img['X']\n y = data_img['y']\n\n '''scaler = StandardScaler()\n scaler.fit(X)\n X = scaler.transform(X)''' \n \n m,n = X.shape\n \"\"\"digits = datasets.load_digits()\n X = digits.data\n y = digits.target\n m,n = X.shape\n \n scaler = StandardScaler()\n scaler.fit(X)\n X = scaler.transform(X)\"\"\"\n \n ## 随机显示几行数据\n rand_indices = [t for t in [np.random.randint(x-x, m) for x in range(100)]] # 生成100个0-m的随机数\n display_data(X[rand_indices,:]) # 显示100个数字 \n \n #nn_params = np.vstack((Theta1.reshape(-1,1),Theta2.reshape(-1,1)))\n \n Lambda = 1\n \n initial_Theta1 = randInitializeWeights(input_layer_size,hidden_layer_size); \n initial_Theta2 = randInitializeWeights(hidden_layer_size,out_put_layer)\n \n initial_nn_params = np.vstack((initial_Theta1.reshape(-1,1),initial_Theta2.reshape(-1,1))) #展开theta \n #np.savetxt(\"testTheta.csv\",initial_nn_params,delimiter=\",\")\n start = time.time()\n result = optimize.fmin_cg(nnCostFunction, initial_nn_params, fprime=nnGradient, args=(input_layer_size,hidden_layer_size,out_put_layer,X,y,Lambda), maxiter=100)\n print (u'执行时间:',time.time()-start)\n print (result)\n '''可视化 Theta1'''\n length = result.shape[0]\n Theta1 = result[0:hidden_layer_size*(input_layer_size+1)].reshape(hidden_layer_size,input_layer_size+1)\n Theta2 = result[hidden_layer_size*(input_layer_size+1):length].reshape(out_put_layer,hidden_layer_size+1) \n display_data(Theta1[:,1:length])\n display_data(Theta2[:,1:length])\n '''预测'''\n p = predict(Theta1,Theta2,X)\n print (u\"预测准确度为:%f%%\"%np.mean(np.float64(p == y.reshape(-1,1))*100)) \n res = np.hstack((p,y.reshape(-1,1)))\n np.savetxt(\"predict.csv\", res, delimiter=',')\n \n\n# 加载mat文件\ndef loadmat_data(fileName):\n return spio.loadmat(fileName)\n\n# 显示100个数字\ndef display_data(imgData):\n sum = 0\n '''\n 显示100个数(若是一个一个绘制将会非常慢,可以将要画的数字整理好,放到一个矩阵中,显示这个矩阵即可)\n - 初始化一个二维数组\n - 将每行的数据调整成图像的矩阵,放进二维数组\n - 显示即可\n '''\n m,n = imgData.shape\n width = np.int32(np.round(np.sqrt(n)))\n height = np.int32(n/width);\n rows_count = np.int32(np.floor(np.sqrt(m)))\n cols_count = np.int32(np.ceil(m/rows_count))\n pad = 1\n display_array = -np.ones((pad+rows_count*(height+pad),pad+cols_count*(width+pad)))\n for i in range(rows_count):\n for j in range(cols_count):\n if sum >= m: #超过了行数,退出当前循环\n break;\n display_array[pad+i*(height+pad):pad+i*(height+pad)+height,pad+j*(width+pad):pad+j*(width+pad)+width] = imgData[sum,:].reshape(height,width,order=\"F\") # order=F指定以列优先,在matlab中是这样的,python中需要指定,默认以行\n sum += 1\n if sum >= m: #超过了行数,退出当前循环\n break;\n \n plt.imshow(display_array,cmap='gray') #显示灰度图像\n plt.axis('off')\n plt.show()\n\n# 代价函数\ndef nnCostFunction(nn_params,input_layer_size,hidden_layer_size,num_labels,X,y,Lambda):\n length = nn_params.shape[0] # theta的中长度\n # 还原theta1和theta2\n Theta1 = nn_params[0:hidden_layer_size*(input_layer_size+1)].reshape(hidden_layer_size,input_layer_size+1)\n Theta2 = nn_params[hidden_layer_size*(input_layer_size+1):length].reshape(num_labels,hidden_layer_size+1)\n \n # np.savetxt(\"Theta1.csv\",Theta1,delimiter=',')\n \n m = X.shape[0]\n class_y = np.zeros((m,num_labels)) # 数据的y对应0-9,需要映射为0/1的关系\n # 映射y\n for i in range(num_labels):\n class_y[:,i] = np.int32(y==i).reshape(1,-1) # 注意reshape(1,-1)才可以赋值\n \n '''去掉theta1和theta2的第一列,因为正则化时从1开始''' \n Theta1_colCount = Theta1.shape[1] \n Theta1_x = Theta1[:,1:Theta1_colCount]\n Theta2_colCount = Theta2.shape[1] \n Theta2_x = Theta2[:,1:Theta2_colCount]\n # 正则化向theta^2\n term = np.dot(np.transpose(np.vstack((Theta1_x.reshape(-1,1),Theta2_x.reshape(-1,1)))),np.vstack((Theta1_x.reshape(-1,1),Theta2_x.reshape(-1,1))))\n \n '''正向传播,每次需要补上一列1的偏置bias'''\n a1 = np.hstack((np.ones((m,1)),X)) \n z2 = np.dot(a1,np.transpose(Theta1)) \n a2 = sigmoid(z2)\n a2 = np.hstack((np.ones((m,1)),a2))\n z3 = np.dot(a2,np.transpose(Theta2))\n h = sigmoid(z3) \n '''代价''' \n J = -(np.dot(np.transpose(class_y.reshape(-1,1)),np.log(h.reshape(-1,1)))+np.dot(np.transpose(1-class_y.reshape(-1,1)),np.log(1-h.reshape(-1,1)))-Lambda*term/2)/m \n #temp1 = (h.reshape(-1,1)-class_y.reshape(-1,1))\n #temp2 = (temp1**2).sum()\n #J = 1/(2*m)*temp2\n return np.ravel(J)\n\n# 梯度\ndef nnGradient(nn_params,input_layer_size,hidden_layer_size,num_labels,X,y,Lambda):\n length = nn_params.shape[0]\n Theta1 = nn_params[0:hidden_layer_size*(input_layer_size+1)].reshape(hidden_layer_size,input_layer_size+1).copy() # 这里使用copy函数,否则下面修改Theta的值,nn_params也会一起修改\n Theta2 = nn_params[hidden_layer_size*(input_layer_size+1):length].reshape(num_labels,hidden_layer_size+1).copy()\n m = X.shape[0]\n class_y = np.zeros((m,num_labels)) # 数据的y对应0-9,需要映射为0/1的关系 \n # 映射y\n for i in range(num_labels):\n class_y[:,i] = np.int32(y==i).reshape(1,-1) # 注意reshape(1,-1)才可以赋值\n \n '''去掉theta1和theta2的第一列,因为正则化时从1开始'''\n Theta1_colCount = Theta1.shape[1] \n Theta1_x = Theta1[:,1:Theta1_colCount]\n Theta2_colCount = Theta2.shape[1] \n Theta2_x = Theta2[:,1:Theta2_colCount]\n \n Theta1_grad = np.zeros((Theta1.shape)) #第一层到第二层的权重\n Theta2_grad = np.zeros((Theta2.shape)) #第二层到第三层的权重\n \n \n '''正向传播,每次需要补上一列1的偏置bias'''\n a1 = np.hstack((np.ones((m,1)),X))\n z2 = np.dot(a1,np.transpose(Theta1))\n a2 = sigmoid(z2)\n a2 = np.hstack((np.ones((m,1)),a2))\n z3 = np.dot(a2,np.transpose(Theta2))\n h = sigmoid(z3)\n \n \n '''反向传播,delta为误差,'''\n delta3 = np.zeros((m,num_labels))\n delta2 = np.zeros((m,hidden_layer_size))\n for i in range(m):\n #delta3[i,:] = (h[i,:]-class_y[i,:])*sigmoidGradient(z3[i,:]) # 均方误差的误差率\n delta3[i,:] = h[i,:]-class_y[i,:] # 交叉熵误差率\n Theta2_grad = Theta2_grad+np.dot(np.transpose(delta3[i,:].reshape(1,-1)),a2[i,:].reshape(1,-1))\n delta2[i,:] = np.dot(delta3[i,:].reshape(1,-1),Theta2_x)*sigmoidGradient(z2[i,:])\n Theta1_grad = Theta1_grad+np.dot(np.transpose(delta2[i,:].reshape(1,-1)),a1[i,:].reshape(1,-1))\n \n Theta1[:,0] = 0\n Theta2[:,0] = 0 \n '''梯度'''\n grad = (np.vstack((Theta1_grad.reshape(-1,1),Theta2_grad.reshape(-1,1)))+Lambda*np.vstack((Theta1.reshape(-1,1),Theta2.reshape(-1,1))))/m\n return np.ravel(grad)\n\n# S型函数 \ndef sigmoid(z):\n h = np.zeros((len(z),1)) # 初始化,与z的长度一致\n \n h = 1.0/(1.0+np.exp(-z))\n return h\n\n# S型函数导数\ndef sigmoidGradient(z):\n g = sigmoid(z)*(1-sigmoid(z))\n return g\n\n# 随机初始化权重theta\ndef randInitializeWeights(L_in,L_out):\n W = np.zeros((L_out,1+L_in)) # 对应theta的权重\n epsilon_init = (6.0/(L_out+L_in))**0.5\n W = np.random.rand(L_out,1+L_in)*2*epsilon_init-epsilon_init # np.random.rand(L_out,1+L_in)产生L_out*(1+L_in)大小的随机矩阵\n return W\n\n\n# 检验梯度是否计算正确\ndef checkGradient(Lambda = 0):\n '''构造一个小型的神经网络验证,因为数值法计算梯度很浪费时间,而且验证正确后之后就不再需要验证了'''\n input_layer_size = 3\n hidden_layer_size = 5\n num_labels = 3\n m = 5\n initial_Theta1 = debugInitializeWeights(input_layer_size,hidden_layer_size); \n initial_Theta2 = debugInitializeWeights(hidden_layer_size,num_labels)\n X = debugInitializeWeights(input_layer_size-1,m)\n y = 1+np.transpose(np.mod(np.arange(1,m+1), num_labels))# 初始化y\n \n y = y.reshape(-1,1)\n nn_params = np.vstack((initial_Theta1.reshape(-1,1),initial_Theta2.reshape(-1,1))) #展开theta \n '''BP求出梯度'''\n grad = nnGradient(nn_params, input_layer_size, hidden_layer_size, \n num_labels, X, y, Lambda) \n '''使用数值法计算梯度'''\n num_grad = np.zeros((nn_params.shape[0]))\n step = np.zeros((nn_params.shape[0]))\n e = 1e-4\n for i in range(nn_params.shape[0]):\n step[i] = e\n loss1 = nnCostFunction(nn_params-step.reshape(-1,1), input_layer_size, hidden_layer_size, \n num_labels, X, y, \n Lambda)\n loss2 = nnCostFunction(nn_params+step.reshape(-1,1), input_layer_size, hidden_layer_size, \n num_labels, X, y, \n Lambda)\n num_grad[i] = (loss2-loss1)/(2*e)\n step[i]=0\n # 显示两列比较\n res = np.hstack((num_grad.reshape(-1,1),grad.reshape(-1,1)))\n print(\"检查梯度的结果,第一列为数值法计算得到的,第二列为BP得到的:\")\n print (res)\n\n# 初始化调试的theta权重\ndef debugInitializeWeights(fan_in,fan_out):\n W = np.zeros((fan_out,fan_in+1))\n x = np.arange(1,fan_out*(fan_in+1)+1)\n W = np.sin(x).reshape(W.shape)/10\n return W\n\n# 预测\ndef predict(Theta1,Theta2,X):\n m = X.shape[0]\n num_labels = Theta2.shape[0]\n #p = np.zeros((m,1))\n '''正向传播,预测结果'''\n X = np.hstack((np.ones((m,1)),X))\n h1 = sigmoid(np.dot(X,np.transpose(Theta1)))\n h1 = np.hstack((np.ones((m,1)),h1))\n h2 = sigmoid(np.dot(h1,np.transpose(Theta2)))\n \n '''\n 返回h中每一行最大值所在的列号\n - np.max(h, axis=1)返回h中每一行的最大值(是某个数字的最大概率)\n - 最后where找到的最大概率所在的列号(列号即是对应的数字)\n '''\n #np.savetxt(\"h2.csv\",h2,delimiter=',')\n p = np.array(np.where(h2[0,:] == np.max(h2, axis=1)[0])) \n for i in np.arange(1, m):\n t = np.array(np.where(h2[i,:] == np.max(h2, axis=1)[i]))\n p = np.vstack((p,t))\n return p \n\nif __name__ == \"__main__\":\n checkGradient()\n neuralNetwork(400, 25, 10)" ]
[ [ "numpy.ones", "numpy.savetxt", "matplotlib.pyplot.imshow", "numpy.vstack", "numpy.transpose", "matplotlib.font_manager.FontProperties", "numpy.random.rand", "scipy.io.loadmat", "numpy.ceil", "numpy.zeros", "matplotlib.pyplot.axis", "numpy.int32", "numpy.arange", "numpy.max", "numpy.exp", "numpy.ravel", "matplotlib.pyplot.show", "numpy.sqrt", "numpy.sin", "scipy.optimize.fmin_cg", "numpy.random.randint" ] ]
bayesianbrad/pyprob
[ "a426fc51c1d6da13052979c21af447f9c4023642" ]
[ "pyprob/nn/dataset.py" ]
[ "import torch\nfrom torch.utils.data import Dataset, ConcatDataset, Sampler\nimport torch.distributed as dist\nimport math\nimport os\nimport sys\nimport shelve\nfrom glob import glob\nimport numpy as np\nimport uuid\nfrom termcolor import colored\nfrom collections import Counter, OrderedDict\nimport random\n\nfrom .. import util\nfrom ..util import TraceMode, PriorInflation\nfrom ..concurrency import ConcurrentShelf\n\n\nclass Batch():\n def __init__(self, traces):\n self.traces = traces\n self.size = len(traces)\n sub_batches = {}\n total_length_controlled = 0\n for trace in traces:\n tl = trace.length_controlled\n if tl == 0:\n raise ValueError('Trace of length zero.')\n total_length_controlled += tl\n trace_hash = ''.join([variable.address for variable in trace.variables_controlled])\n if trace_hash not in sub_batches:\n sub_batches[trace_hash] = []\n sub_batches[trace_hash].append(trace)\n self.sub_batches = list(sub_batches.values())\n self.mean_length_controlled = total_length_controlled / self.size\n\n def __len__(self):\n return len(self.traces)\n\n def __getitem__(self, key):\n return self.traces[key]\n\n def to(self, device):\n for trace in self.traces:\n trace.to(device=device)\n\n\nclass OnlineDataset(Dataset):\n def __init__(self, model, length=None, prior_inflation=PriorInflation.DISABLED):\n self._model = model\n if length is None:\n length = int(1e6)\n self._length = length\n self._prior_inflation = prior_inflation\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, idx):\n return next(self._model._trace_generator(trace_mode=TraceMode.PRIOR_FOR_INFERENCE_NETWORK, prior_inflation=self._prior_inflation))\n\n @staticmethod\n def _prune_trace(trace):\n del(trace.variables)\n # trace.variables_controlled = []\n del(trace.variables_uncontrolled)\n del(trace.variables_replaced)\n del(trace.variables_observed)\n del(trace.variables_observable)\n del(trace.variables_tagged)\n del(trace.variables_dict_address)\n del(trace.variables_dict_address_base)\n # trace.named_variables = {}\n del(trace.result)\n del(trace.log_prob)\n del(trace.log_prob_observed)\n # del(trace.log_importance_weight)\n # trace.length = 0\n # trace.length_controlled = 0\n del(trace.execution_time_sec)\n for variable in trace.variables_controlled:\n # variable.distribution = distribution\n # if value is None:\n # variable.value = None\n # else:\n # variable.value = util.to_tensor(value)\n del(variable.address_base)\n # variable.address = address\n del(variable.instance)\n del(variable.log_prob)\n del(variable.control)\n del(variable.replace)\n del(variable.name)\n del(variable.observable)\n del(variable.observed)\n del(variable.reused)\n del(variable.tagged)\n for _, variable in trace.named_variables.items():\n controlled = False\n for v in trace.variables_controlled:\n if variable is v: # Needs to be implemented this way to compare object references instead of object hashes (which change as a result of potentially deleted fields)\n controlled = True\n break\n if not controlled:\n del(variable.distribution)\n # if value is None:\n # variable.value = None\n # else:\n # variable.value = util.to_tensor(value)\n del(variable.address_base)\n del(variable.address)\n del(variable.instance)\n del(variable.log_prob)\n del(variable.control)\n del(variable.replace)\n del(variable.name)\n del(variable.observable)\n del(variable.observed)\n del(variable.reused)\n del(variable.tagged)\n\n def save_dataset(self, dataset_dir, num_traces, num_traces_per_file, *args, **kwargs):\n num_files = math.ceil(num_traces / num_traces_per_file)\n util.progress_bar_init('Saving offline dataset, traces:{}, traces per file:{}, files:{}'.format(num_traces, num_traces_per_file, num_files), num_traces, 'Traces')\n i = 0\n while i < num_traces:\n i += num_traces_per_file\n file_name = os.path.join(dataset_dir, 'pyprob_traces_{}_{}'.format(num_traces_per_file, str(uuid.uuid4())))\n shelf = shelve.open(file_name, flag='c')\n for j in range(num_traces_per_file):\n trace = next(self._model._trace_generator(trace_mode=TraceMode.PRIOR, prior_inflation=self._prior_inflation, *args, **kwargs))\n self._prune_trace(trace)\n shelf[str(j)] = trace\n shelf['__length'] = j + 1\n shelf.close()\n util.progress_bar_update(i)\n util.progress_bar_end()\n\n\nclass OfflineDatasetFile(Dataset):\n cache = OrderedDict()\n cache_capacity = 8\n\n def __init__(self, file_name):\n self._file_name = file_name\n self._closed = False\n shelf = self._open()\n self._length = shelf['__length']\n\n def _open(self):\n # idea from https://www.kunxi.org/2014/05/lru-cache-in-python\n try:\n shelf = OfflineDatasetFile.cache.pop(self._file_name)\n # it was in the cache, put it back on the front\n OfflineDatasetFile.cache[self._file_name] = shelf\n return shelf\n except KeyError:\n # not in the cache\n if len(OfflineDatasetFile.cache) >= OfflineDatasetFile.cache_capacity:\n # cache is full, delete the last entry\n n, s = OfflineDatasetFile.cache.popitem(last=False)\n s.close()\n shelf = shelve.open(self._file_name, flag='r')\n OfflineDatasetFile.cache[self._file_name] = shelf\n return shelf\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, idx):\n shelf = self._open()\n return shelf[str(idx)]\n\n\nclass OfflineDataset(ConcatDataset):\n def __init__(self, dataset_dir):\n self._dataset_dir = dataset_dir\n # files = [name for name in os.listdir(self._dataset_dir)]\n files = sorted(glob(os.path.join(self._dataset_dir, 'pyprob_traces_sorted_*')))\n if len(files) > 0:\n self._sorted_on_disk = True\n else:\n self._sorted_on_disk = False\n files = sorted(glob(os.path.join(self._dataset_dir, 'pyprob_traces_*')))\n if len(files) == 0:\n raise RuntimeError('Cannot find any data set files at {}'.format(dataset_dir))\n datasets = []\n for file in files:\n try:\n dataset = OfflineDatasetFile(file)\n datasets.append(dataset)\n except Exception as e:\n print(e)\n print(colored('Warning: dataset file potentially corrupt, omitting: {}'.format(file), 'red', attrs=['bold']))\n super().__init__(datasets)\n print('OfflineDataset at: {}'.format(self._dataset_dir))\n print('Num. traces : {:,}'.format(len(self)))\n print('Sorted on disk : {}'.format(self._sorted_on_disk))\n if self._sorted_on_disk:\n self._sorted_indices = list(range(len(self)))\n else:\n file_name = os.path.join(self._dataset_dir, 'pyprob_hashes')\n try:\n hashes_file = shelve.open(file_name, 'r')\n hashes_exist = 'hashes' in hashes_file\n hashes_file.close()\n except:\n hashes_exist = False\n if hashes_exist:\n print('Using pre-computed hashes in: {}'.format(file_name))\n hashes_file = shelve.open(file_name, 'r')\n self._hashes = hashes_file['hashes']\n self._sorted_indices = hashes_file['sorted_indices']\n hashes_file.close()\n if torch.is_tensor(self._hashes):\n self._hashes = self._hashes.cpu().numpy()\n if len(self._sorted_indices) != len(self):\n raise RuntimeError('Length of pre-computed hashes ({}) and length of offline dataset ({}) do not match. Dataset files have been altered. Delete and re-generate pre-computed hash file: {}'.format(len(self._sorted_indices), len(self), file_name))\n else:\n print('No pre-computed hashes found, generating: {}'.format(file_name))\n hashes_file = shelve.open(file_name, 'c')\n hashes, sorted_indices = self._compute_hashes()\n hashes_file['hashes'] = hashes\n hashes_file['sorted_indices'] = sorted_indices\n hashes_file.close()\n self._sorted_indices = sorted_indices\n self._hashes = hashes\n print('Num. trace types : {:,}'.format(len(set(self._hashes))))\n hashes_and_counts = OrderedDict(sorted(Counter(self._hashes).items()))\n print('Trace hash\\tCount')\n for hash, count in hashes_and_counts.items():\n print('{:.8f}\\t{}'.format(hash, count))\n print()\n\n @staticmethod\n def _trace_hash(trace):\n h = hash(''.join([variable.address for variable in trace.variables_controlled])) + sys.maxsize + 1\n return float('{}.{}'.format(trace.length_controlled, h))\n\n def _compute_hashes(self):\n hashes = torch.zeros(len(self))\n util.progress_bar_init('Hashing offline dataset for sorting', len(self), 'Traces')\n for i in range(len(self)):\n hashes[i] = self._trace_hash(self[i])\n util.progress_bar_update(i)\n util.progress_bar_end()\n print('Sorting offline dataset')\n _, sorted_indices = torch.sort(hashes)\n print('Sorting done')\n return hashes.cpu().numpy(), sorted_indices.cpu().numpy()\n\n def save_sorted(self, sorted_dataset_dir, num_traces_per_file=None, num_files=None, begin_file_index=None, end_file_index=None):\n if num_traces_per_file is not None:\n if num_files is not None:\n raise ValueError('Expecting either num_traces_per_file or num_files')\n else:\n if num_files is None:\n raise ValueError('Expecting either num_traces_per_file or num_files')\n else:\n num_traces_per_file = math.ceil(len(self) / num_files)\n\n if os.path.exists(sorted_dataset_dir):\n if len(glob(os.path.join(sorted_dataset_dir, '*'))) > 0:\n print(colored('Warning: target directory is not empty: {})'.format(sorted_dataset_dir), 'red', attrs=['bold']))\n util.create_path(sorted_dataset_dir, directory=True)\n file_indices = list(util.chunks(list(self._sorted_indices), num_traces_per_file))\n num_traces = len(self)\n num_files = len(file_indices)\n num_files_digits = len(str(num_files))\n file_name_template = 'pyprob_traces_sorted_{{:d}}_{{:0{}d}}'.format(num_files_digits)\n file_names = list(map(lambda x: os.path.join(sorted_dataset_dir, file_name_template.format(num_traces_per_file, x)), range(num_files)))\n if begin_file_index is None:\n begin_file_index = 0\n if end_file_index is None:\n end_file_index = num_files\n if begin_file_index < 0 or begin_file_index > end_file_index or end_file_index > num_files or end_file_index < begin_file_index:\n raise ValueError('Invalid indexes begin_file_index:{} and end_file_index: {}'.format(begin_file_index, end_file_index))\n\n print('Sorted offline dataset, traces: {}, traces per file: {}, files: {} (overall)'.format(num_traces, num_traces_per_file, num_files))\n util.progress_bar_init('Saving sorted files with indices in range [{}, {}) ({} of {} files overall)'.format(begin_file_index, end_file_index, end_file_index - begin_file_index, num_files), end_file_index - begin_file_index + 1, 'Files')\n j = 0\n for i in range(begin_file_index, end_file_index):\n j += 1\n file_name = file_names[i]\n print(file_name)\n shelf = ConcurrentShelf(file_name)\n shelf.lock(write=True)\n for new_i, old_i in enumerate(file_indices[i]):\n shelf[str(new_i)] = self[old_i]\n shelf['__length'] = len(file_indices[i])\n shelf.unlock()\n util.progress_bar_update(j)\n util.progress_bar_end()\n\n\nclass TraceSampler(Sampler):\n def __init__(self, offline_dataset):\n if not isinstance(offline_dataset, OfflineDataset):\n raise TypeError('Expecting an OfflineDataset instance.')\n self._sorted_indices = offline_dataset._sorted_indices\n\n def __iter__(self):\n return iter(self._sorted_indices)\n\n def __len__(self):\n return len(self._offline_dataset)\n\n\nclass TraceBatchSampler(Sampler):\n def __init__(self, offline_dataset, batch_size, shuffle_batches=True):\n if not isinstance(offline_dataset, OfflineDataset):\n raise TypeError('Expecting an OfflineDataset instance.')\n self._batches = list(util.chunks(offline_dataset._sorted_indices, batch_size))\n self._shuffle_batches = shuffle_batches\n\n def __iter__(self):\n if self._shuffle_batches:\n np.random.shuffle(self._batches)\n return iter(self._batches)\n\n def __len__(self):\n return len(self._batches)\n\n\nclass DistributedTraceBatchSampler(Sampler):\n def __init__(self, offline_dataset, batch_size, shuffle_batches=True, num_buckets=None, shuffle_buckets=True):\n if not isinstance(offline_dataset, OfflineDataset):\n raise TypeError('Expecting an OfflineDataset instance.')\n if not dist.is_available():\n raise RuntimeError('Expecting distributed training.')\n self._world_size = dist.get_world_size()\n self._rank = dist.get_rank()\n # Randomly drop a number of traces so that the number of all minibatches in the whole dataset is an integer multiple of world size\n num_batches_to_drop = math.floor(len(offline_dataset._sorted_indices) / batch_size) % self._world_size\n num_traces_to_drop = num_batches_to_drop * batch_size\n # Ensure all ranks choose the same traces to drop\n st = random.getstate()\n random.seed(0)\n self._batches = list(util.chunks(util.drop_items(list(offline_dataset._sorted_indices), num_traces_to_drop), batch_size)) # List of all minibatches, where each minibatch is a list of trace indices\n random.setstate(st)\n # Discard last minibatch if it's smaller than batch_size\n if len(self._batches[-1]) < batch_size:\n del(self._batches[-1])\n if num_buckets is None:\n num_buckets = len(self._batches) / self._world_size\n self._num_buckets = num_buckets\n self._bucket_size = math.ceil(len(self._batches) / num_buckets)\n if self._bucket_size < self._world_size:\n raise RuntimeError('offline_dataset:{}, batch_size:{} and num_buckets:{} imply a bucket_size:{} smaller than world_size:{}'.format(len(offline_dataset), batch_size, num_buckets, self._bucket_size, self._world_size))\n # List of buckets, where each bucket is a list of minibatches\n self._buckets = list(util.chunks(self._batches, self._bucket_size))\n # Unify last two buckets if the last bucket is smaller than other buckets\n if len(self._buckets[-1]) < self._bucket_size:\n if len(self._buckets) < 2:\n raise RuntimeError('offline_dataset:{} too small for given batch_size:{} and num_buckets:{}'.format(len(offline_dataset), batch_size, num_buckets))\n self._buckets[-2].extend(self._buckets[-1])\n del(self._buckets[-1])\n self._shuffle_batches = shuffle_batches\n self._shuffle_buckets = shuffle_buckets\n self._epoch = 0\n self._current_bucket_id = 0\n\n print('DistributedTraceBatchSampler')\n print('OfflineDataset size : {:,}'.format(len(offline_dataset)))\n print('World size : {:,}'.format(self._world_size))\n print('Batch size : {:,}'.format(batch_size))\n print('Num. batches dropped: {:,}'.format(num_batches_to_drop))\n print('Num. batches : {:,}'.format(len(self._batches)))\n print('Bucket size : {:,}'.format(self._bucket_size))\n print('Num. buckets : {:,}'.format(self._num_buckets))\n\n def __iter__(self):\n self._epoch += 1\n bucket_ids = list(range(len(self._buckets)))\n if self._shuffle_buckets:\n # Shuffle the list of buckets (but not the order of minibatches inside each bucket) at the beginning of each epoch, deterministically based on the epoch number so that all nodes have the same bucket order\n # Idea from: https://github.com/pytorch/pytorch/blob/a3fb004b1829880547dd7b3e2cd9d16af657b869/torch/utils/data/distributed.py#L44\n st = np.random.get_state()\n np.random.seed(self._epoch)\n np.random.shuffle(bucket_ids)\n np.random.set_state(st)\n for bucket_id in bucket_ids:\n bucket = self._buckets[bucket_id]\n self._current_bucket_id = bucket_id\n # num_batches is needed to ensure that all nodes have the same number of minibatches (iterations) in each bucket, in cases where the bucket size is not divisible by world_size.\n num_batches = math.floor(len(bucket) / self._world_size)\n # Select a num_batches-sized subset of the current bucket for the current node\n # The part not selected by the current node will be selected by other nodes\n batches = bucket[self._rank:len(bucket):self._world_size][:num_batches]\n if self._shuffle_batches:\n # Shuffle the list of minibatches (but not the order trace indices inside each minibatch) selected for the current node\n np.random.shuffle(batches)\n for batch in batches:\n yield batch\n\n def __len__(self):\n return len(self._batches)\n" ]
[ [ "numpy.random.set_state", "numpy.random.shuffle", "torch.distributed.get_rank", "torch.distributed.get_world_size", "numpy.random.get_state", "torch.distributed.is_available", "numpy.random.seed", "torch.is_tensor", "torch.sort" ] ]
hengwei-chan/graph_network_demo
[ "542f2a59b1b9708abdc718d77db7111f3ba2df96" ]
[ "reports/configs/only_logs_dmpnn4_1/other_config.py" ]
[ "from dataclasses import dataclass, field\nfrom typing import List\n\nimport tensorflow as tf\nfrom graph_networks.utilities import * \nimport logging\nimport os\n\nATOM_FEATURE_DIM = DGIN4_ATOM_FEATURE_DIM\nEDGE_FEATURE_DIM = DGIN4_EDGE_FEATURE_DIM\n\n@dataclass\nclass BasicModelConfig:\n \"\"\"\n Config for model1/2/3 run file.\n General model parameters\n \"\"\"\n model_name: str = 'only_logs_dmpnn4_1' # without h_w in DGIN gin part - added h_v_0 instead\n # whole train/eval split - no more double split within train data set\n # random train/test split in get_data_sd - only change overall_seed\n # CHANGES dgin3 10.02.2021:\n # *added new bondFeaturesDGIN2 and atomFeaturesDGIN2; DGIN2_ATOM_FEATURE_DIM; DGIN2_EDGE_FEATURE_DIM\n # *from project_path+'data/processed/lipo/pickled/train_frags3/' to project_path+'data/processed/lipo/pickled/test_frags3/'\n # CHANGES dgin3 16.02.2021:\n # *added new bondFeaturesDGIN3 and atomFeaturesDGIN3; DGIN3_ATOM_FEATURE_DIM; DGIN3_EDGE_FEATURE_DIM\n # *from project_path+'data/processed/lipo/pickled/train_frags_dgin3/' to project_path+'data/processed/lipo/pickled/test_frags_dgin3/'\n # CHANGES dgin4 16.02.2021:\n # *added add_species bool in model1 config - previously not there; for dgin2 featurization adds the species type after the dgin \n # encoding before logD prediction\n # test_frags_dgin4 was added for species inclusion in model2 call()\n batch_size: int =15\n override_if_exists: bool = True\n\n overall_seed: int = 2\n \n # path to the project folder \n project_path:str = \"./\" \n\n retrain_model: bool = False\n retrain_model_name: str = ''\n retrain_model_epoch: str = ''\n retrain_model_weights_dir: str = project_path+'reports/model_weights/'+retrain_model_name+'/epoch_'+retrain_model_epoch+'/checkp_'+retrain_model_epoch\n\n train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin4_logs/'\n test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin4_logs/'\n\n combined_dataset: bool = False\n\n add_train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin4_logs/'\n add_test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin4_logs/'\n\n test_model: bool = False\n test_model_epoch: str = '887'\n\n # define the number or test runs for the CI. \n # the mean and std of the RMSE and r^2 of the combined runs are taken as the output. \n test_n_times: int = 1 \n # do you want to test the model with consensus mode? \n # if yes, a defined ML model will be included in the consensus predictions during the testing. \n consensus: bool = False \n # include dropout during testing?\n include_dropout: bool = False\n test_model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/epoch_'+test_model_epoch+'/checkp_'+test_model_epoch\n\n # To save the prediction values for each property set to True \n # When this flag is True - the whole test dataset is taken an test_n_times is set to zero! \n save_predictions: bool = False \n # define the folder where you want to save the predictions. \n # For each property, a file is created under the property name (\"./logd.txt\",\"./logs.txt\",\"./logp.txt\",\"./others.txt\") \n test_prediction_output_folder: str = project_path+\"reports/predictions/\"+model_name+\"/\" \n encode_hidden: bool = False\n\n log_dir: str = project_path+'reports/logs/'+model_name+'.log' \n verbosity_level = logging.INFO\n\n model_type: str = 'DMPNN' # added 31.03.2021 to compare models like 'GIN' 'DMPNN' 'DGIN' 'MLP'\n \n plot_dir: str = project_path+'reports/figures/'+model_name+'/'\n tensorboard_log_dir: str = project_path+'reports/tensorboard/'+model_name+'/'\n config_log_dir: str = project_path+'reports/configs/'+model_name+'/'\n model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/'\n stats_log_dir: str = project_path+'reports/stats/'+model_name+'/'\n\n@dataclass\nclass DGINConfig:\n \"\"\"\n Config for direcpted-mpnn class.\n \"\"\"\n dropout_aggregate_dmpnn: bool = False\n layernorm_aggregate_dmpnn: bool = True\n dropout_passing_dmpnn: bool = False\n layernorm_passing_dmpnn: bool = True\n\n dropout_aggregate_gin: bool = False\n layernorm_aggregate_gin: bool = True\n dropout_passing_gin: bool = False\n layernorm_passing_gin: bool = True\n\n gin_aggregate_bias: bool = False\n dmpnn_passing_bias: bool = False\n init_bias: bool = False\n\n massge_iteration_dmpnn: int = 4\n message_iterations_gin: int = 4\n dropout_rate: float = 0.15\n input_size: int = (ATOM_FEATURE_DIM+EDGE_FEATURE_DIM) # combination of node feature len (33) and edge feature len (12)\n passing_hidden_size: int = 56 # this can be changed\n input_size_gin: int = (ATOM_FEATURE_DIM) # changed 31.03.2021\n\n return_hv: bool = True # model3 parameter\n\n@dataclass\nclass Model1Config:\n \"\"\"\n Config model1 class - no subclass configs are defined here.\n \"\"\"\n validation_split: float = 0.90\n learning_rate: float = 0.004\n clip_rate: float = 0.6\n optimizer = tf.keras.optimizers.Adam(learning_rate)\n lipo_loss_mse = tf.keras.losses.mse\n lipo_loss_mae = tf.keras.losses.mae\n logP_loss_mse = tf.keras.losses.mse\n logS_loss_mse = tf.keras.losses.mse\n other_loss_mse = tf.keras.losses.mse \n mw_loss_mse = tf.keras.losses.mse\n metric = tf.keras.losses.mae\n epochs: int = 1600\n # define the number of epochs for each test run. \n save_after_epoch: int = 3 \n # dropout rate for the general model - mainly the MLP for the different log predictions \n dropout_rate: float = 0.15 # the overall dropout rate of the readout functions \n # the seed to shuffle the training/validation dataset; For the same dataset, even when \n # combined_dataset is True, it is the same training/valiation instances \n train_data_seed: int = 0 \n dropout_rate: float = 0.15 # the overall dropout rate of the readout functions\n train_data_seed: int = 0\n\n hidden_readout_1: int = 32\n hidden_readout_2: int = 14\n activation_func_readout = tf.nn.relu\n \n include_logD: bool = False\n include_logS: bool = True\n include_logP: bool = False\n\n include_other: bool = False \n include_mw: bool = False\n include_rot_bond: bool = False\n include_HBA: bool = False\n include_HBD: bool = False\n\n # define the starting threshold for the RMSE of the model. When the comnbined RMSE \n # is below this threshold, the model weights are being safed and a new threshold \n # is set. It only serves as a starting threshold so that not too many models \n # are being safed. Depends on how many log endpoints are being taken into \n # consideration - as three endpoints have a higher combined RMSE as only one \n # endpoint. \n best_evaluation_threshold: float = 2.45 #was introduced on the 25.03.2021/ \n\n # define the individual thresholds. If one model is better, the corresponding \n # model weights are being saved. \n best_evaluation_threshold_logd: float = 1.85 \n best_evaluation_threshold_logp: float = 1.65 \n best_evaluation_threshold_logs: float = 2.15 \n best_evaluation_threshold_other: float = 2.15 \n # 2.45 for all_logs\n # 0.70 logP\n # 0.75 logD\n # 1.00 logS\n # 1.75 logSD\n # 1.70 logSP\n # 1.45 logDP\n\n include_fragment_conv: bool = False # was introduced on the 4.12.2020\n\n use_rmse: bool = True # uses RMSE instead of MSE for only lipo_loss\n shuffle_inside: bool = True # reshuffles the train/valid test seach in each epoch (generalizes)\n\n add_species: bool = False # 16.02 introduction; previously not there; for dgin3 adds the species type after the dgin encoding before logD prediction\n\n@dataclass\nclass FrACConfig:\n \"\"\"\n Config fragment aggregation class - no subclass configs are defined here.\n \"\"\"\n input_size_gin: int = 28\n layernorm_aggregate: bool = True\n reduce_mean: bool = True # when false -> reduce_sum\n\n@dataclass \nclass MLConfig: \n \"\"\" \n Configs for the ML algorithm \n \"\"\" \n # which algorithm do you want to use for the consensus? \n # possibilities are: \"SVM\", \"RF\", \"KNN\" or \"LR\" - all are regression models! \n # SVM: Support Vector Machine; RF: Random Forest, KNN: K-Nearest Neigbors; LR: Linear Regression;\n algorithm: str = \"SVM\" \n # which fingerprint to use - possibilities are: \"ECFP\" or \"MACCS\" \n fp_types: str = \"ECFP\" \n # If 'ECFP' fingerprint is used, define the number of bits - maximum is 2048! \n n_bits: int = 2048 \n # If \"ECFP\" fingerprint is used, define the radius \n radius: int = 4 \n # define if descriptors should be included into the non-GNN molecular representation \n include_descriptors: bool = True \n # define if the descriptors should be standardizedby scaling and centering (Sklearn) \n standardize: bool = True \n\n@dataclass\nclass Config():\n \"\"\"\n Overall config class for model2 and run file.\n Includes all submodels config\n \"\"\"\n basic_model_config: BasicModelConfig\n model1_config: Model1Config\n d_gin_config: DGINConfig\n frag_acc_config: FrACConfig\n\n ml_config: MLConfig \n model: str = 'model11'" ]
[ [ "tensorflow.keras.optimizers.Adam" ] ]
Artcs1/RotationDetection
[ "095be17345ee9984d8de8f24eb6b5a0b2d764a06" ]
[ "tools/r3det_gwd/train.py" ]
[ "# -*- coding:utf-8 -*-\n# Author: Xue Yang <[email protected]>\n#\n# License: Apache-2.0 license\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport os\nimport sys\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport numpy as np\nsys.path.append(\"../../\")\n\nfrom tools.train_base import Train\nfrom libs.configs import cfgs\nfrom libs.models.detectors.r3det_gwd import build_whole_network\nfrom libs.utils.coordinate_convert import backward_convert, get_horizen_minAreaRectangle\nfrom dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = cfgs.GPU_GROUP\n\n\nclass TrainR3DetGWD(Train):\n\n def get_gtboxes_and_label(self, gtboxes_and_label_h, gtboxes_and_label_r, num_objects):\n return gtboxes_and_label_h[:int(num_objects), :].astype(np.float32), \\\n gtboxes_and_label_r[:int(num_objects), :].astype(np.float32)\n\n def main(self):\n with tf.Graph().as_default() as graph, tf.device('/cpu:0'):\n\n num_gpu = len(cfgs.GPU_GROUP.strip().split(','))\n global_step = slim.get_or_create_global_step()\n lr = self.warmup_lr(cfgs.LR, global_step, cfgs.WARM_SETP, num_gpu)\n tf.summary.scalar('lr', lr)\n\n optimizer = tf.train.MomentumOptimizer(lr, momentum=cfgs.MOMENTUM)\n r3det_gwd = build_whole_network.DetectionNetworkR3DetGWD(cfgs=self.cfgs,\n is_training=True)\n\n with tf.name_scope('get_batch'):\n if cfgs.IMAGE_PYRAMID:\n shortside_len_list = tf.constant(cfgs.IMG_SHORT_SIDE_LEN)\n shortside_len = tf.random_shuffle(shortside_len_list)[0]\n\n else:\n shortside_len = cfgs.IMG_SHORT_SIDE_LEN\n\n img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch, img_h_batch, img_w_batch = \\\n self.reader.next_batch(dataset_name=cfgs.DATASET_NAME,\n batch_size=cfgs.BATCH_SIZE * num_gpu,\n shortside_len=shortside_len,\n is_training=True)\n\n # data processing\n inputs_list = []\n for i in range(num_gpu):\n img = tf.expand_dims(img_batch[i], axis=0)\n pretrain_zoo = PretrainModelZoo()\n if self.cfgs.NET_NAME in pretrain_zoo.pth_zoo or self.cfgs.NET_NAME in pretrain_zoo.mxnet_zoo:\n img = img / tf.constant([cfgs.PIXEL_STD])\n\n gtboxes_and_label_r = tf.py_func(backward_convert,\n inp=[gtboxes_and_label_batch[i]],\n Tout=tf.float32)\n gtboxes_and_label_r = tf.reshape(gtboxes_and_label_r, [-1, 6])\n\n gtboxes_and_label_h = get_horizen_minAreaRectangle(gtboxes_and_label_batch[i])\n gtboxes_and_label_h = tf.reshape(gtboxes_and_label_h, [-1, 5])\n\n num_objects = num_objects_batch[i]\n num_objects = tf.cast(tf.reshape(num_objects, [-1, ]), tf.float32)\n\n img_h = img_h_batch[i]\n img_w = img_w_batch[i]\n\n inputs_list.append([img, gtboxes_and_label_h, gtboxes_and_label_r, num_objects, img_h, img_w])\n\n tower_grads = []\n biases_regularizer = tf.no_regularizer\n weights_regularizer = tf.contrib.layers.l2_regularizer(cfgs.WEIGHT_DECAY)\n\n with tf.variable_scope(tf.get_variable_scope()):\n for i in range(num_gpu):\n with tf.device('/gpu:%d' % i):\n with tf.name_scope('tower_%d' % i):\n with slim.arg_scope(\n [slim.model_variable, slim.variable],\n device='/device:CPU:0'):\n with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane,\n slim.conv2d_transpose, slim.separable_conv2d,\n slim.fully_connected],\n weights_regularizer=weights_regularizer,\n biases_regularizer=biases_regularizer,\n biases_initializer=tf.constant_initializer(0.0)):\n\n gtboxes_and_label_h, gtboxes_and_label_r = tf.py_func(self.get_gtboxes_and_label,\n inp=[inputs_list[i][1],\n inputs_list[i][2],\n inputs_list[i][3]],\n Tout=[tf.float32, tf.float32])\n gtboxes_and_label_h = tf.reshape(gtboxes_and_label_h, [-1, 5])\n gtboxes_and_label_r = tf.reshape(gtboxes_and_label_r, [-1, 6])\n\n img = inputs_list[i][0]\n img_shape = inputs_list[i][-2:]\n img = tf.image.crop_to_bounding_box(image=img,\n offset_height=0,\n offset_width=0,\n target_height=tf.cast(img_shape[0], tf.int32),\n target_width=tf.cast(img_shape[1], tf.int32))\n\n outputs = r3det_gwd.build_whole_detection_network(input_img_batch=img,\n gtboxes_batch_h=gtboxes_and_label_h,\n gtboxes_batch_r=gtboxes_and_label_r,\n gpu_id=i)\n gtboxes_in_img_h = self.drawer.draw_boxes_with_categories(img_batch=img,\n boxes=gtboxes_and_label_h[\n :, :-1],\n labels=gtboxes_and_label_h[\n :, -1],\n method=0)\n gtboxes_in_img_r = self.drawer.draw_boxes_with_categories(img_batch=img,\n boxes=gtboxes_and_label_r[\n :, :-1],\n labels=gtboxes_and_label_r[\n :, -1],\n method=1)\n tf.summary.image('Compare/gtboxes_h_gpu:%d' % i, gtboxes_in_img_h)\n tf.summary.image('Compare/gtboxes_r_gpu:%d' % i, gtboxes_in_img_r)\n\n if cfgs.ADD_BOX_IN_TENSORBOARD:\n detections_in_img = self.drawer.draw_boxes_with_categories_and_scores(\n img_batch=img,\n boxes=outputs[0],\n scores=outputs[1],\n labels=outputs[2],\n method=1)\n tf.summary.image('Compare/final_detection_gpu:%d' % i, detections_in_img)\n\n loss_dict = outputs[-1]\n total_loss_dict, total_losses = self.loss_dict(loss_dict, num_gpu)\n\n if i == num_gpu - 1:\n regularization_losses = tf.get_collection(\n tf.GraphKeys.REGULARIZATION_LOSSES)\n # weight_decay_loss = tf.add_n(slim.losses.get_regularization_losses())\n total_losses = total_losses + tf.add_n(regularization_losses)\n\n tf.get_variable_scope().reuse_variables()\n grads = optimizer.compute_gradients(total_losses)\n if cfgs.GRADIENT_CLIPPING_BY_NORM is not None:\n grads = slim.learning.clip_gradient_norms(grads, cfgs.GRADIENT_CLIPPING_BY_NORM)\n tower_grads.append(grads)\n self.log_printer(r3det_gwd, optimizer, global_step, tower_grads, total_loss_dict, num_gpu, graph)\n\nif __name__ == '__main__':\n\n trainer = TrainR3DetGWD(cfgs)\n trainer.main()" ]
[ [ "tensorflow.summary.scalar", "tensorflow.contrib.slim.learning.clip_gradient_norms", "tensorflow.reshape", "tensorflow.train.MomentumOptimizer", "tensorflow.summary.image", "tensorflow.name_scope", "tensorflow.get_variable_scope", "tensorflow.device", "tensorflow.Graph", "tensorflow.contrib.layers.l2_regularizer", "tensorflow.constant", "tensorflow.contrib.slim.get_or_create_global_step", "tensorflow.constant_initializer", "tensorflow.add_n", "tensorflow.get_collection", "tensorflow.expand_dims", "tensorflow.random_shuffle", "tensorflow.contrib.slim.arg_scope", "tensorflow.cast", "tensorflow.py_func" ] ]
wright/OpenMDAO
[ "58f9ff47197531f4fb4ef632c6bcca11e799ccf0" ]
[ "openmdao/core/tests/test_connections.py" ]
[ "\"\"\" Tests related to connecing inputs to outputs.\"\"\"\n\nimport unittest\nimport numpy as np\n\nfrom io import StringIO\n\nimport openmdao.api as om\nfrom openmdao.utils.assert_utils import assert_near_equal, assert_warning\nfrom openmdao.utils.mpi import MPI\n\ntry:\n from openmdao.vectors.petsc_vector import PETScVector\nexcept ImportError:\n PETScVector = None\n\n\nclass TestConnections(unittest.TestCase):\n\n def setUp(self):\n self.setup_model(None, None)\n\n def setup_model(self, c1meta=None, c3meta=None):\n self.p = om.Problem()\n root = self.p.model\n\n if c1meta is None:\n c1meta = {}\n\n if c3meta is None:\n c3meta = {}\n\n self.G1 = root.add_subsystem(\"G1\", om.Group())\n self.G2 = self.G1.add_subsystem(\"G2\", om.Group())\n self.C1 = self.G2.add_subsystem(\"C1\", om.ExecComp('y=x*2.0', **c1meta))\n self.C2 = self.G2.add_subsystem(\"C2\", om.IndepVarComp('x', 1.0))\n\n self.G3 = root.add_subsystem(\"G3\", om.Group())\n self.G4 = self.G3.add_subsystem(\"G4\", om.Group())\n self.C3 = self.G4.add_subsystem(\"C3\", om.ExecComp('y=x*2.0', **c3meta))\n self.C4 = self.G4.add_subsystem(\"C4\", om.ExecComp('y=x*2.0'))\n\n def test_no_conns(self):\n self.p.setup()\n\n self.p['G1.G2.C1.x'] = 111.\n self.p['G3.G4.C3.x'] = 222.\n self.p['G3.G4.C4.x'] = 333.\n\n self.p.run_model()\n\n self.assertEqual(self.C1._inputs['x'], 111.)\n self.assertEqual(self.C3._inputs['x'], 222.)\n self.assertEqual(self.C4._inputs['x'], 333.)\n\n def test_pull_size_from_source(self):\n raise unittest.SkipTest(\"setting input size based on src size not supported yet\")\n\n class Src(ExplicitComponent):\n\n def setup(self):\n\n self.add_input('x', 2.0)\n self.add_output('y1', np.zeros((3, )))\n self.add_output('y2', shape=((3, )))\n\n def solve_nonlinear(self, inputs, outputs, resids):\n x = inputs['x']\n\n outputs['y1'] = x * np.array([1.0, 2.0, 3.0])\n outputs['y2'] = x * np.array([1.0, 2.0, 3.0])\n\n class Tgt(ExplicitComponent):\n\n def setup(self):\n\n self.add_input('x1')\n self.add_input('x2')\n self.add_output('y1', 0.0)\n self.add_output('y2', 0.0)\n\n def solve_nonlinear(self, inputs, outputs, resids):\n x1 = inputs['x1']\n x2 = inputs['x2']\n\n outputs['y1'] = np.sum(x1)\n outputs['y2'] = np.sum(x2)\n\n p = om.Problem()\n p.model.add_subsystem('src', Src())\n p.model.add_subsystem('tgt', Tgt())\n\n p.model.connect('src.y1', 'tgt.x1')\n p.model.connect('src.y2', 'tgt.x2')\n\n p.setup()\n p.run_model()\n\n self.assertEqual(p['tgt.y1'], 12.0)\n self.assertEqual(p['tgt.y2'], 12.0)\n\n def test_pull_size_from_source_with_indices(self):\n raise unittest.SkipTest(\"setting input size based on src size not supported yet\")\n\n class Src(ExplicitComponent):\n\n def setup(self):\n\n self.add_input('x', 2.0)\n self.add_output('y1', np.zeros((3, )))\n self.add_output('y2', shape=((3, )))\n self.add_output('y3', 3.0)\n\n def solve_nonlinear(self, inputs, outputs, resids):\n \"\"\" counts up. \"\"\"\n\n x = inputs['x']\n\n outputs['y1'] = x * np.array([1.0, 2.0, 3.0])\n outputs['y2'] = x * np.array([1.0, 2.0, 3.0])\n outputs['y3'] = x * 4.0\n\n class Tgt(ExplicitComponent):\n\n def setup(self):\n\n self.add_input('x1')\n self.add_input('x2')\n self.add_input('x3')\n self.add_output('y1', 0.0)\n self.add_output('y2', 0.0)\n self.add_output('y3', 0.0)\n\n def solve_nonlinear(self, inputs, outputs, resids):\n \"\"\" counts up. \"\"\"\n\n x1 = inputs['x1']\n x2 = inputs['x2']\n x3 = inputs['x3']\n\n outputs['y1'] = np.sum(x1)\n outputs['y2'] = np.sum(x2)\n outputs['y3'] = np.sum(x3)\n\n top = om.Problem()\n top.model.add_subsystem('src', Src())\n top.model.add_subsystem('tgt', Tgt())\n\n top.model.connect('src.y1', 'tgt.x1', src_indices=(0, 1))\n top.model.connect('src.y2', 'tgt.x2', src_indices=(0, 1))\n top.model.connect('src.y3', 'tgt.x3')\n\n top.setup()\n top.run_model()\n\n self.assertEqual(top['tgt.y1'], 6.0)\n self.assertEqual(top['tgt.y2'], 6.0)\n self.assertEqual(top['tgt.y3'], 8.0)\n\n def test_inp_inp_conn_no_src(self):\n raise unittest.SkipTest(\"no setup testing yet\")\n self.p.model.connect('G3.G4.C3.x', 'G3.G4.C4.x')\n\n stream = StringIO()\n self.p.setup(out_stream=stream)\n\n self.p['G3.G4.C3.x'] = 999.\n self.assertEqual(self.p.model.G3.G4.C3._inputs['x'], 999.)\n self.assertEqual(self.p.model.G3.G4.C4._inputs['x'], 999.)\n\n content = stream.getvalue()\n self.assertTrue(\"The following parameters have no associated unknowns:\\n\"\n \"G1.G2.C1.x\\nG3.G4.C3.x\\nG3.G4.C4.x\" in content)\n self.assertTrue(\"The following components have no connections:\\n\"\n \"G1.G2.C1\\nG1.G2.C2\\nG3.G4.C3\\nG3.G4.C4\\n\" in content)\n self.assertTrue(\"No recorders have been specified, so no data will be saved.\" in content)\n\n\nclass TestConnectionsPromoted(unittest.TestCase):\n\n def test_inp_inp_promoted_w_prom_src(self):\n p = om.Problem()\n root = p.model\n\n G1 = root.add_subsystem(\"G1\", om.Group(), promotes=['x'])\n G2 = G1.add_subsystem(\"G2\", om.Group(), promotes=['x'])\n G2.add_subsystem(\"C1\", om.ExecComp('y=x*2.0'))\n G2.add_subsystem(\"C2\", om.IndepVarComp('x', 1.0), promotes=['x'])\n\n G3 = root.add_subsystem(\"G3\", om.Group(), promotes=['x'])\n G4 = G3.add_subsystem(\"G4\", om.Group(), promotes=['x'])\n C3 = G4.add_subsystem(\"C3\", om.ExecComp('y=x*2.0'), promotes=['x'])\n C4 = G4.add_subsystem(\"C4\", om.ExecComp('y=x*2.0'), promotes=['x'])\n\n p.setup()\n p.set_solver_print(level=0)\n\n # setting promoted name will set the value into the outputs, but will\n # not propagate it to the inputs. That will happen during run_model().\n p['x'] = 999.\n\n p.run_model()\n self.assertEqual(C3._inputs['x'], 999.)\n self.assertEqual(C4._inputs['x'], 999.)\n\n def test_inp_inp_promoted_w_explicit_src(self):\n p = om.Problem()\n root = p.model\n\n G1 = root.add_subsystem(\"G1\", om.Group())\n G2 = G1.add_subsystem(\"G2\", om.Group(), promotes=['x'])\n G2.add_subsystem(\"C1\", om.ExecComp('y=x*2.0'))\n G2.add_subsystem(\"C2\", om.IndepVarComp('x', 1.0), promotes=['x'])\n\n G3 = root.add_subsystem(\"G3\", om.Group())\n G4 = G3.add_subsystem(\"G4\", om.Group(), promotes=['x'])\n C3 = G4.add_subsystem(\"C3\", om.ExecComp('y=x*2.0'), promotes=['x'])\n C4 = G4.add_subsystem(\"C4\", om.ExecComp('y=x*2.0'), promotes=['x'])\n\n p.model.connect('G1.x', 'G3.x')\n p.setup()\n p.set_solver_print(level=0)\n\n # setting promoted name will set the value into the outputs, but will\n # not propagate it to the inputs. That will happen during run_model().\n p['G1.x'] = 999.\n\n p.run_model()\n self.assertEqual(C3._inputs['x'], 999.)\n self.assertEqual(C4._inputs['x'], 999.)\n\n def test_overlapping_system_names(self):\n # This ensures that _setup_connections does not think g1 and g1a are the same system\n prob = om.Problem()\n model = prob.model\n\n g1 = model.add_subsystem('g1', om.Group())\n g1a = model.add_subsystem('g1a', om.Group())\n\n g1.add_subsystem('c', om.ExecComp('y=x'))\n g1a.add_subsystem('c', om.ExecComp('y=x'))\n\n model.connect('g1.c.y', 'g1a.c.x')\n model.connect('g1a.c.y', 'g1.c.x')\n\n prob.setup(check=True)\n\n\nclass TestConnectionsIndices(unittest.TestCase):\n\n def setUp(self):\n class ArrayComp(om.ExplicitComponent):\n def setup(self):\n self.add_input('inp', val=np.ones((2)))\n self.add_input('inp1', val=0)\n self.add_output('out', val=np.zeros((2)))\n\n def compute(self, inputs, outputs):\n outputs['out'] = inputs['inp'] * 2.\n\n indep_var_comp = om.IndepVarComp()\n indep_var_comp.add_output('blammo', val=3.)\n indep_var_comp.add_output('arrout', val=np.ones(5))\n\n prob = om.Problem()\n prob.model.add_subsystem('idvp', indep_var_comp)\n prob.model.add_subsystem('arraycomp', ArrayComp())\n\n self.prob = prob\n\n def test_bad_shapes(self):\n # Should not be allowed because the source and target shapes do not match\n self.prob.model.connect('idvp.blammo', 'arraycomp.inp')\n\n expected = \"<model> <class Group>: The source and target shapes do not match or are \" + \\\n \"ambiguous for the connection 'idvp.blammo' to 'arraycomp.inp'. \" + \\\n \"The source shape is (1,) but the target shape is (2,).\"\n\n try:\n self.prob.setup()\n except ValueError as err:\n self.assertEqual(str(err), expected)\n else:\n self.fail('Exception expected.')\n\n self.prob.model._raise_connection_errors = False\n\n with assert_warning(UserWarning, expected):\n self.prob.setup()\n\n def test_bad_length(self):\n # Should not be allowed because the length of src_indices is greater than\n # the shape of arraycomp.inp\n self.prob.model.connect('idvp.blammo', 'arraycomp.inp', src_indices=[0, 1, 0])\n\n expected = \"<model> <class Group>: The source indices [0 1 0] do not specify a valid shape \" + \\\n \"for the connection 'idvp.blammo' to 'arraycomp.inp'. The target shape is \" + \\\n \"(2,) but indices are (3,).\"\n\n try:\n self.prob.setup()\n except ValueError as err:\n self.assertEqual(str(err), expected)\n else:\n self.fail('Exception expected.')\n\n self.prob.model._raise_connection_errors = False\n\n with assert_warning(UserWarning, expected):\n self.prob.setup()\n\n def test_bad_value(self):\n # Should not be allowed because the index value within src_indices is outside\n # the valid range for the source\n self.prob.model.connect('idvp.arrout', 'arraycomp.inp1', src_indices=[100000])\n\n expected = \"<model> <class Group>: The source indices do not specify a valid index \" + \\\n \"for the connection 'idvp.arrout' to 'arraycomp.inp1'. \" + \\\n \"Index '100000' is out of range for source dimension of size 5.\"\n\n try:\n self.prob.setup()\n except ValueError as err:\n self.assertEqual(str(err), expected)\n else:\n self.fail('Exception expected.')\n\n self.prob.model._raise_connection_errors = False\n\n with assert_warning(UserWarning, expected):\n self.prob.setup()\n\n def test_bad_value_bug(self):\n # Should not be allowed because the 2nd index value within src_indices is outside\n # the valid range for the source. A bug prevented this from being checked.\n self.prob.model.connect('idvp.arrout', 'arraycomp.inp', src_indices=[0, 100000])\n\n expected = \"<model> <class Group>: The source indices do not specify a valid index \" + \\\n \"for the connection 'idvp.arrout' to 'arraycomp.inp'. \" + \\\n \"Index '100000' is out of range for source dimension of size 5.\"\n\n try:\n self.prob.setup()\n except ValueError as err:\n self.assertEqual(str(err), expected)\n else:\n self.fail('Exception expected.')\n\n self.prob.model._raise_connection_errors = False\n\n with assert_warning(UserWarning, expected):\n self.prob.setup()\n\n\nclass TestShapes(unittest.TestCase):\n def test_connect_flat_array_to_row_vector(self):\n p = om.Problem()\n p.model.add_subsystem('indep', om.IndepVarComp('x', val=np.arange(10)))\n p.model.add_subsystem('C1',\n om.ExecComp('y=dot(x, A)',\n x={'value': np.zeros((1, 10))},\n A={'value': np.eye(10)},\n y={'value': np.zeros((1, 10))}))\n p.model.connect('indep.x', 'C1.x')\n p.setup()\n p.run_model()\n assert_near_equal(p['C1.y'], np.arange(10)[np.newaxis, :])\n\n def test_connect_flat_array_to_col_vector(self):\n p = om.Problem()\n p.model.add_subsystem('indep', om.IndepVarComp('x', val=np.arange(10)))\n p.model.add_subsystem('C1',\n om.ExecComp('y=dot(A, x)',\n x={'value': np.zeros((10, 1))},\n A={'value': np.eye(10)},\n y={'value': np.zeros((10, 1))}))\n p.model.connect('indep.x', 'C1.x')\n p.setup()\n p.run_model()\n assert_near_equal(p['C1.y'], np.arange(10)[:, np.newaxis])\n\n def test_connect_row_vector_to_flat_array(self):\n p = om.Problem()\n p.model.add_subsystem('indep', om.IndepVarComp('x', val=np.arange(10)[np.newaxis, :]))\n p.model.add_subsystem('C1', om.ExecComp('y=5*x',\n x={'value': np.zeros(10)},\n y={'value': np.zeros(10)}))\n p.model.connect('indep.x', 'C1.x')\n p.setup()\n p.run_model()\n assert_near_equal(p['C1.y'], 5 * np.arange(10))\n\n def test_connect_col_vector_to_flat_array(self):\n p = om.Problem()\n p.model.add_subsystem('indep', om.IndepVarComp('x', val=np.arange(10)[:, np.newaxis]))\n p.model.add_subsystem('C1', om.ExecComp('y=5*x',\n x={'value': np.zeros(10)},\n y={'value': np.zeros(10)}))\n p.model.connect('indep.x', 'C1.x')\n p.setup()\n p.run_model()\n assert_near_equal(p['C1.y'], 5 * np.arange(10))\n\n def test_connect_flat_to_3d_array(self):\n p = om.Problem()\n p.model.add_subsystem('indep', om.IndepVarComp('x', val=np.arange(10)))\n p.model.add_subsystem('C1', om.ExecComp('y=5*x',\n x={'value': np.zeros((1, 10, 1))},\n y={'value': np.zeros((1, 10, 1))}))\n p.model.connect('indep.x', 'C1.x')\n p.setup()\n p.run_model()\n assert_near_equal(p['C1.y'], 5 * np.arange(10)[np.newaxis, :, np.newaxis])\n\n def test_connect_flat_nd_to_flat_nd(self):\n p = om.Problem()\n p.model.add_subsystem('indep', om.IndepVarComp('x',\n val=np.arange(10)[np.newaxis, :, np.newaxis,\n np.newaxis]))\n p.model.add_subsystem('C1', om.ExecComp('y=5*x',\n x={'value': np.zeros((1, 1, 1, 10))},\n y={'value': np.zeros((1, 1, 1, 10))}))\n p.model.connect('indep.x', 'C1.x')\n p.setup()\n p.run_model()\n assert_near_equal(p['C1.y'],\n 5 * np.arange(10)[np.newaxis, np.newaxis, np.newaxis, :])\n\n def test_connect_incompatible_shapes(self):\n p = om.Problem()\n p.model.add_subsystem('indep', om.IndepVarComp('x', val=np.arange(10)[np.newaxis, :,\n np.newaxis, np.newaxis]))\n p.model.add_subsystem('C1', om.ExecComp('y=5*x',\n x={'value': np.zeros((5, 2))},\n y={'value': np.zeros((5, 2))}))\n p.model.connect('indep.x', 'C1.x')\n\n expected = \"<model> <class Group>: The source and target shapes do not match or are \" + \\\n \"ambiguous for the connection 'indep.x' to 'C1.x'. The source shape is \" + \\\n \"(1, 10, 1, 1) but the target shape is (5, 2).\"\n\n with self.assertRaises(Exception) as context:\n p.setup()\n\n self.assertEqual(str(context.exception), expected)\n\n p.model._raise_connection_errors = False\n\n with assert_warning(UserWarning, expected):\n p.setup()\n\n\nclass TestMultiConns(unittest.TestCase):\n\n def test_mult_conns(self):\n\n class SubGroup(om.Group):\n def setup(self):\n self.add_subsystem('c1', om.ExecComp('y = 2*x', x=np.ones(4), y=2*np.ones(4)),\n promotes=['y', 'x'])\n self.add_subsystem('c2', om.ExecComp('z = 2*y', y=np.ones(4), z=2*np.ones(4)),\n promotes=['z', 'y'])\n\n prob = om.Problem()\n indeps = prob.model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])\n indeps.add_output('x', 10*np.ones(4))\n indeps.add_output('y', np.ones(4))\n\n prob.model.add_subsystem('sub', SubGroup())\n\n prob.model.connect('x', 'sub.x')\n prob.model.connect('y', 'sub.y')\n\n expected = \"<model> <class Group>: The following inputs have multiple connections: \" + \\\n \"sub.c2.y from ['indeps.y', 'sub.c1.y']\"\n\n with self.assertRaises(Exception) as context:\n prob.setup()\n\n self.assertEqual(str(context.exception), expected)\n\n prob.model._raise_connection_errors = False\n\n with assert_warning(UserWarning, expected):\n prob.setup()\n\n def test_mixed_conns_same_level(self):\n\n prob = om.Problem()\n indeps = prob.model.add_subsystem('indeps', om.IndepVarComp())\n indeps.add_output('x', 10*np.ones(4))\n\n # c2.y is implicitly connected to c1.y\n prob.model.add_subsystem('c1', om.ExecComp('y = 2*x', x=np.ones(4), y=2*np.ones(4)),\n promotes=['y'])\n prob.model.add_subsystem('c2', om.ExecComp('z = 2*y', y=np.ones(4), z=2*np.ones(4)),\n promotes=['y'])\n\n # make a second, explicit, connection to y (which is c2.y promoted)\n prob.model.connect('indeps.x', 'y')\n\n expected = \"<model> <class Group>: Input 'c2.y' cannot be connected to 'indeps.x' \" + \\\n \"because it's already connected to 'c1.y'\"\n\n with self.assertRaises(Exception) as context:\n prob.setup()\n prob.final_setup()\n\n self.assertEqual(str(context.exception), expected)\n\n prob.model._raise_connection_errors = False\n\n with assert_warning(UserWarning, expected):\n prob.setup()\n\n def test_auto_ivc_ambiguous_with_src_indices_msg(self):\n\n class TComp(om.ExplicitComponent):\n\n def initialize(self):\n self.options.declare('src_idx', [0, 1])\n\n def setup(self):\n src = self.options['src_idx']\n self.add_input('x', shape=2, src_indices=src, val=-2038.0)\n self.add_output('y', shape=2)\n self.declare_partials('y', 'x')\n\n def compute(self, inputs, outputs):\n outputs['y'] = 2.0 * inputs['x']\n\n\n prob = om.Problem()\n model = prob.model\n\n prob.model.add_subsystem('c1', TComp(src_idx=[0, 1]), promotes_inputs=['x'])\n prob.model.add_subsystem('c2', TComp(src_idx=[2, 3]), promotes_inputs=['x'])\n prob.model.add_subsystem('d1', TComp(src_idx=[0, 1]), promotes_inputs=[('x', 'zz')])\n prob.model.add_subsystem('d2', TComp(src_idx=[1, 2]), promotes_inputs=[('x', 'zz')])\n\n with self.assertRaises(RuntimeError) as context:\n prob.setup()\n\n msg = \"The following inputs ['c1.x', 'c2.x'] are defined using src_indices but the total source \"\n msg += \"size is undetermined. You can specify the src size by setting 'val' or 'src_shape' in a call to set_input_defaults, or by adding an IndepVarComp as the source.\"\n\n err_msg = str(context.exception).split(':')[-1]\n self.assertEqual(err_msg, msg)\n\n\[email protected](MPI and PETScVector, \"MPI and PETSc are required.\")\nclass TestConnectionsDistrib(unittest.TestCase):\n N_PROCS = 2\n\n def test_serial_mpi_error(self):\n # Should still catch the bad index when we are running under mpi with no distributed comps.\n # A bug formerly prevented this.\n class TestComp(om.ExplicitComponent):\n\n def initialize(self):\n self.options['distributed'] = False\n\n def setup(self):\n self.add_input('x', shape=2, src_indices=[1, 2], val=-2038.0)\n self.add_output('y', shape=1)\n self.declare_partials('y', 'x')\n\n def compute(self, inputs, outputs):\n outputs['y'] = np.sum(inputs['x'])\n\n def compute_partials(self, inputs, J):\n J['y', 'x'] = np.ones((2,))\n\n prob = om.Problem()\n model = prob.model\n model.add_subsystem('p1', om.IndepVarComp('x', np.array([1.0, 3.0])))\n model.add_subsystem('c3', TestComp())\n model.connect(\"p1.x\", \"c3.x\")\n\n rank = prob.comm.rank\n expected = f\"Exception raised on rank {rank}: <model> <class Group>: The source indices do not specify a valid index \" + \\\n \"for the connection 'p1.x' to 'c3.x'. \" + \\\n \"Index '2' is out of range for source dimension of size 2.\"\n try:\n prob.setup()\n except Exception as err:\n self.assertEqual(str(err).splitlines()[-1], expected)\n else:\n self.fail('Exception expected.')\n\n def test_serial_mpi_error_flat(self):\n # Make sure the flat branch works too.\n class TestComp(om.ExplicitComponent):\n\n def initialize(self):\n self.options['distributed'] = False\n\n def setup(self):\n self.add_input('x', shape=2, src_indices=[1, 2], val=-2038.0, flat_src_indices=True)\n self.add_output('y', shape=1)\n self.declare_partials('y', 'x')\n\n def compute(self, inputs, outputs):\n outputs['y'] = np.sum(inputs['x'])\n\n def compute_partials(self, inputs, J):\n J['y', 'x'] = np.ones((2,))\n\n prob = om.Problem()\n model = prob.model\n model.add_subsystem('p1', om.IndepVarComp('x', np.array([1.0, 3.0])))\n model.add_subsystem('c3', TestComp())\n model.connect(\"p1.x\", \"c3.x\")\n\n rank = prob.comm.rank\n expected = f\"Exception raised on rank {rank}: <model> <class Group>: The source indices do not specify a valid index \" + \\\n \"for the connection 'p1.x' to 'c3.x'. \" + \\\n \"Index '2' is out of range for source dimension of size 2.\"\n\n try:\n prob.setup()\n except Exception as err:\n self.assertEqual(str(err).splitlines()[-1], expected)\n else:\n self.fail('Exception expected.')\n\[email protected](MPI, \"MPI is required.\")\nclass TestConnectionsError(unittest.TestCase):\n N_PROCS = 2\n\n def test_incompatible_src_indices(self):\n class TestCompDist(om.ExplicitComponent):\n # this comp is distributed and forces PETScTransfer\n def initialize(self):\n self.options['distributed'] = True\n\n def setup(self):\n self.add_input('x', shape=2)\n self.add_output('y', shape=1)\n self.declare_partials('y', 'x', val=1.0)\n\n def compute(self, inputs, outputs):\n outputs['y'] = np.sum(inputs['x'])\n\n class TestComp(om.ExplicitComponent):\n def initialize(self):\n self.options['distributed'] = False\n\n def setup(self):\n # read SRC_INDICES on each proc\n self.add_input('x', shape=2, src_indices=[1, 2], val=-2038.0)\n self.add_output('y', shape=1)\n self.declare_partials('y', 'x')\n\n def compute(self, inputs, outputs):\n outputs['y'] = np.sum(inputs['x'])\n\n def compute_partials(self, inputs, J):\n J['y', 'x'] = np.ones((2,))\n\n prob = om.Problem()\n model = prob.model\n\n rank = prob.comm.rank\n\n if rank == 0:\n setval = np.array([2.0, 3.0])\n else:\n setval = np.array([10.0, 20.0])\n\n # no parallel or distributed comps, so default_vector is used (local xfer only)\n model.add_subsystem('p1', om.IndepVarComp('x', setval))\n model.add_subsystem('c3', TestComp())\n model.add_subsystem('c4', TestCompDist())\n model.connect(\"p1.x\", \"c3.x\")\n model.connect(\"c3.y\", \"c4.x\")\n\n with self.assertRaises(ValueError) as context:\n prob.setup(check=False, mode='fwd')\n self.assertEqual(str(context.exception),\n f\"Exception raised on rank {rank}: <model> <class Group>: The source indices do not specify a valid index for \"\n \"the connection 'p1.x' to 'c3.x'. Index '2' is out of range for source \"\n \"dimension of size 2.\")\n\n\[email protected](MPI, \"MPI is required.\")\nclass TestConnectionsMPIBug(unittest.TestCase):\n N_PROCS = 2\n\n def test_bug_2d_src_indices(self):\n # This model gave an exception during setup.\n\n class Burn(om.ExplicitComponent):\n\n def setup(self):\n self.add_input('x', np.arange(12))\n self.add_output('y', np.arange(12))\n\n def compute(self, inputs, outputs):\n outputs['y'] = inputs['x'] * 2.0\n\n class LinkageComp(om.ExplicitComponent):\n\n def setup(self):\n self.add_input('in1', np.zeros((3, 2)))\n self.add_input('in2', np.zeros((3, 2)))\n self.add_output('out', np.zeros((3, 2)))\n\n def compute(self, inputs, outputs):\n outputs['out'] = 3 * inputs['in2'] - 2.5 * inputs['in1']\n\n class Phases(om.ParallelGroup):\n\n def setup(self):\n self.add_subsystem('burn1', Burn())\n self.add_subsystem('burn2', Burn())\n\n class Linkages(om.Group):\n\n def setup(self):\n self.add_subsystem('linkage', LinkageComp())\n\n class Traj(om.Group):\n\n def setup(self):\n self.add_subsystem('phases', Phases())\n self.add_subsystem('linkages', Linkages())\n\n def configure(self):\n self.connect('phases.burn1.y', 'linkages.linkage.in1', src_indices=np.array([[0, 3], [4, 6], [2, 1]]))\n self.connect('phases.burn2.y', 'linkages.linkage.in2', src_indices=np.array([[0, 3], [4, 6], [2, 1]]))\n\n prob = om.Problem(model=Traj())\n prob.setup()\n prob.run_model()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.ones", "numpy.sum", "numpy.eye", "numpy.zeros", "numpy.arange", "numpy.array" ] ]
sgascoin/extractViewAngle
[ "2ec54426714eac9628fa73b622519c88b8ab96b2" ]
[ "extractViewAngle.py" ]
[ "#!/usr/bin/python\n\n\"\"\"\nextractViewAngle.py\nScope: export points or raster of viewing incidences angles from a Theia L2A product (rasters are scaled by 100 as UInt16) \nAuthor: [email protected]\n\"\"\"\n\nimport csv\nimport gdal\nimport numpy as np\nimport ogr\nimport os\nimport osr\nimport sys\nimport xml.etree.ElementTree as ET\n\n\n# function to read points file as lon lat values delimited by tab without header line\ndef readPoints(f):\n with open(f,'r') as csvfile:\n reader = csv.reader(csvfile,delimiter=',')\n data = [r for r in reader]\n return data\n\n\n# function to write points values as csv\ndef writePoints(newPointsFn,outDictList):\n with open(newPointsFn, 'w') as csvfile:\n fieldnames = list(outDictList[0].keys())\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n for ouDict in outDictList:\n writer.writerow(ouDict)\n\n\n# function to write an array to a (multiband) geotiff \ndef array2geotiff(newRasterFn,geoTransform,array,noData,outSpatialRef,dataType=gdal.GDT_Float64):\n cols = array.shape[1]\n rows = array.shape[0]\n bands = array.shape[2]\n driver = gdal.GetDriverByName('GTiff')\n outRaster = driver.Create(newRasterFn, cols, rows, bands, dataType, options=['COMPRESS=DEFLATE'])\n outRaster.SetGeoTransform(geoTransform)\n # write bands\n for i in range(bands):\n outband = outRaster.GetRasterBand(i+1) # 1-based index\n outband.WriteArray(array[:,:,i])\n outband.SetNoDataValue(noData)\n outRaster.SetProjection(outSpatialRef.ExportToWkt())\n outRaster.FlushCache()\n\n\n# function to get mask file name and bit number to test which detector was used\ndef getDetector(productFolder,root,bandId,detectorId):\n # find node containing detector metadata based on the presence of attribute \"detector_id\" in subnodes\n n = root.find(\".//Product_Organisation//*[@detector_id]/..\")\n if n is None:\n print('this product version does not provide detector mask')\n maskFn = bitNumber = None\n else:\n # get MASK_FILE element for target band and detector\n s = \"./MASK_FILE/[@band_id='{}'][@detector_id='{}']\".format(bandId,detectorId)\n element = n.find(s)\n # get detector mask file from element value \n maskFn = os.path.join(productFolder,element.text)\n # get detector bit number from element attribute\n bitNumber = int(element.attrib['bit_number'])\n return maskFn, bitNumber\n\n\n# function to test if detector was used at this point\ndef testDetector(point,maskFn,bitNumber):\n # open the raster file\n ds = gdal.Open(maskFn,gdal.GA_ReadOnly)\n if ds is None:\n print('Could not open the mask file')\n sys.exit(1)\n band = ds.GetRasterBand(1) # 1-based index\n data = band.ReadAsArray() # we could save memory and time by reading only the pixel using ReadRaster?\n geoTransform = ds.GetGeoTransform()\n # get position in array\n col,row = pix2map(point.GetX(),point.GetY(),geoTransform)\n # check if point is outside the mask\n if (col < 0 or row < 0 or col > band.XSize or row > band.YSize):\n print('Point is outside the product mask extent')\n test = False\n else:\n value = data[int(col)][int(row)]\n test = testBit(value, bitNumber)\n return test\n\n\n# function which returns True if the bit number n is 1 in an integer value of base 10.\ndef testBit(value, n):\n mask = 1 << (n - 1) # bitNumber is 1-based index\n return(value & mask > 0)\n\n\n# find position of x,y coordinates in georeferenced array with the same projection system\ndef pix2map(x,y,geoTransform):\n col = np.floor((x - geoTransform[0]) / geoTransform[1]) #x pixel\n row = np.floor((y - geoTransform[3]) / geoTransform[5]) #y pixel\n return col,row\n\n\n# main function\ndef main(productFolder,outputFolder,points=None):\n # scale factor to export angles\n scale = 100\n # set no data value for UInt16 export\n noDataRaster = np.iinfo(np.uint16).max\n # set no data value for csv export\n noDataCsv = -10000 \n\n # MTD angle grid always have a 5 km resolution\n colstep = 5000\n rowstep = -5000\n # MTD angle grid always have an size of 23x23\n nx = ny = 23\n\n # open metadata file\n MTDFile = os.path.join(productFolder,os.path.basename(os.path.abspath(productFolder)+'_MTD_ALL.xml'))\n tree = ET.parse(MTDFile)\n root = tree.getroot()\n \n # get product id\n productId = root.find(\".//PRODUCT_ID\").text\n # get EPSG code\n epsg = root.find(\".//HORIZONTAL_CS_CODE\").text\n # get grid corners coordinates (warning in array geometry the lower left corner is the upper left in raster geometry)\n ulx = float(root.find(\".//*[@name='upperLeft']/X\").text)\n uly = float(root.find(\".//*[@name='upperLeft']/Y\").text)\n lrx = float(root.find(\".//*[@name='lowerRight']/X\").text)\n lry = float(root.find(\".//*[@name='lowerRight']/Y\").text) \n\n # We assume that the above coordinates correspond to the *centers* of corner pixels\n # otherwise the 23x23 grid would have an extra row and column somewhere\n ulxMTD = ulx - colstep/2\n ulyMTD = uly - rowstep/2\n\n # define the affine transformation coefficients\n geoTransform = (ulxMTD, colstep, 0, ulyMTD, 0, rowstep)\n\n # create output spatial reference\n outSpatialRef = osr.SpatialReference()\n outSpatialRef.ImportFromEPSG(int(epsg))\n\n if points is not None:\n # create coordinate transformation\n inSpatialRef = osr.SpatialReference()\n inSpatialRef.ImportFromEPSG(4326)\n # keep the traditionnal GIS order even if GDAL > 3\n try:\n inSpatialRef.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)\n except:\n pass\n coordTransform = osr.CoordinateTransformation(inSpatialRef, outSpatialRef)\n\n # loop through angle definition\n for angle in ('Azimuth','Zenith'):\n\n # initialize output list of dictionnaries for points\n if points is not None:\n outDictList = list()\n [outDictList.append(dict()) for i in points]\n\n # loop through bands\n for band in root.iter('Band_Viewing_Incidence_Angles_Grids_List'):\n # init stack of grids\n Zd = np.array([], dtype=float).reshape(nx,ny,0)\n # loop through detectors\n for detector in band.iter('Viewing_Incidence_Angles_Grids'):\n rows = detector.find(angle).findall('.//VALUES')\n grid = ''\n # loop through grid rows to read grid values as a string\n for row in iter(rows):\n grid = grid + row.text + '\\n'\n # array with grid values \n Z = np.fromstring(grid, dtype=float, sep=' ')\n # reshape to 2D array\n Z = Z.reshape((len(rows),-1))\n # add to the stack of detector grids\n Zd = np.dstack((Zd,Z))\n\n # display mean value for this angle and band\n bandId = band.attrib.get('band_id')\n print('{:s} {:s} mean value: {:g}'.format(bandId,angle,np.nanmean(Zd)))\n\n # export as multiband geotiff (we don't flatten the stack since the detector arrays overlap)\n if points is None:\n newRasterFn = os.path.join(\\\n outputFolder,'{:s}_{:s}_{:s}{:d}.tif'.format(productId,bandId,angle,scale))\n # scale \n Zd = scale * Zd\n # set no data\n Zd[np.isnan(Zd)] = noDataRaster\n # write to disk\n array2geotiff(newRasterFn,geoTransform,Zd,noDataRaster,outSpatialRef,gdal.GDT_UInt16)\n\n # find values at points\n else:\n for ipoint,pointCoord in enumerate(points):\n lon,lat = float(pointCoord[0]),float(pointCoord[1])\n # create a geometry from coordinates\n point = ogr.Geometry(ogr.wkbPoint)\n point.AddPoint(lon, lat) # traditionnal GIS order\n # transform point\n point.Transform(coordTransform)\n # find position in array\n col,row = pix2map(point.GetX(),point.GetY(),geoTransform)\n # check if point is out of the grid\n if (col < 0 or row < 0 or col > nx or row > ny):\n v = noDataCsv\n\n # otherwise retrieve the values in all bands\n else:\n vd = Zd[int(row),int(col),:]\n # select the non-NaN value(s)\n v = vd[np.isfinite(vd)]\n\n # check if point is in no data area\n if len(v) == 0:\n v = noDataCsv\n\n # check if more than one value is found in the stack \n # this can occur because angle grids overlap due to their coarse resolution\n elif len(v) > 1:\n print('solving an ambiguity for band = ' + bandId + ' at point ' + str(pointCoord))\n detectorList = [d.attrib for d in band.iter('Viewing_Incidence_Angles_Grids')]\n # indices where are the finite values\n indexList = np.argwhere(np.isfinite(vd))\n # look into the detector mask files to find which detector has measured this point \n test = False\n for ix in indexList :\n detectorId = detectorList[int(ix)]['detector_id']\n print('testing detector = ' + detectorId)\n maskFn,bitNumber = getDetector(productFolder,root,bandId,detectorId)\n # if the detector mask file is provided then we assign the first value\n if maskFn is None :\n print('takes first detector value by default')\n test = True\n test = testDetector(point,maskFn,bitNumber)\n if test: \n print('found it!')\n v = vd[ix]\n break\n # if test always false (point outside the mask) returns no data\n if test is False: \n v = noDataCsv\n\n outDictList[ipoint]['lon'] = lon\n outDictList[ipoint]['lat'] = lat\n # add this value to the output dictionnary \n if bandId in outDictList[ipoint]:\n outDictList[ipoint][bandId].append(float(v))\n else:\n outDictList[ipoint][bandId] = float(v)\n\n # dump data to text file for this angle and band\n if points is not None:\n newPointsFn = os.path.join(\\\n outputFolder,'{:s}_{:s}.csv'.format(productId,angle))\n writePoints(newPointsFn,outDictList)\n\nif __name__ == \"__main__\":\n\n # check arguments\n if len(sys.argv) == 4:\n print(\"Point mode\")\n pointFile = sys.argv[3]\n # check if input file exists\n if not(os.path.exists(pointFile)):\n print(\"Error: input point file does not exists\")\n sys.exit(1)\n points = readPoints(pointFile) \n\n elif len(sys.argv) == 3:\n print(\"Raster mode\")\n points = None\n\n else:\n print(\"Error: missing arguments\\n\")\n print(\"usage in raster mode: extractViewAngle.py productFolder outputFolder\\n\")\n print(\"usage in point mode: extractViewAngle.py productFolder outputFolder point_table_as_lon_lat.csv\\n\")\n print(\"example: python extractViewAngle.py SENTINEL2A_20180224-103018-463_L2A_T31TGK_C_V2-2 angles\\n\")\n print(\"example: python extractViewAngle.py SENTINEL2A_20180224-103018-463_L2A_T31TGK_C_V2-2 angles points.csv\\n\")\n sys.exit(1)\n\n # check if input file exists\n productFolder = sys.argv[1]\n if not(os.path.exists(productFolder)):\n print (\"Error: input folder does not exists\")\n sys.exit(1)\n\n # check if folder can be created\n outputFolder = sys.argv[2]\n try:\n os.makedirs(outputFolder,exist_ok=True)\n except OSError:\n print (\"Error: cannot create output folder\")\n sys.exit(1)\n else:\n main(productFolder,outputFolder,points)\n \n" ]
[ [ "numpy.nanmean", "numpy.floor", "numpy.iinfo", "numpy.dstack", "numpy.isnan", "numpy.array", "numpy.fromstring", "numpy.isfinite" ] ]
yannikkellerde/TD3
[ "6101baaa38a53bdaa34e33105f4e016eb84cf5a9" ]
[ "my_replay_buffer.py" ]
[ "import numpy as np\nimport torch\nimport pickle\nimport os\n\nclass ReplayBuffer_particles(object):\n def __init__(self, obs_space, action_space, max_size=int(1e6), load_folder=None):\n self.max_size = max_size\n self.store_np = [\"state_features\",\"state_particles\",\"action\",\n \"next_state_features\",\"next_state_particles\",\"reward\",\n \"not_done\"]\n self.store_pkl = [\"ptr\",\"size\"]\n if load_folder is None:\n self.ptr = 0\n self.size = 0\n self.state_features = np.zeros((max_size,obs_space[0].shape[0]))\n self.state_particles = np.zeros((max_size, *obs_space[1].shape))\n self.action = np.zeros((max_size, action_space.shape[0]))\n self.next_state_features = np.zeros((max_size,obs_space[0].shape[0]))\n self.next_state_particles = np.zeros((max_size, *obs_space[1].shape))\n self.reward = np.zeros((max_size, 1))\n self.not_done = np.zeros((max_size, 1))\n else:\n self.load(load_folder)\n\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n def save(self,folder):\n os.makedirs(folder,exist_ok=True)\n for attrib in self.store_pkl:\n with open(os.path.join(folder,attrib+\".pkl\"), \"wb\") as f:\n pickle.dump(self.__dict__[attrib],f,protocol=4)\n\n for attrib in self.store_np:\n with open(os.path.join(folder,attrib+\".pkl\"), \"wb\") as f:\n np.save(f,self.__dict__[attrib])\n \n def load(self,folder):\n for attrib in self.store_pkl:\n with open(os.path.join(folder,attrib+\".pkl\"), \"rb\") as f:\n self.__dict__[attrib] = pickle.load(f)\n for attrib in self.store_np:\n with open(os.path.join(folder,attrib+\".pkl\"), \"rb\") as f:\n self.__dict__[attrib] = np.load(f)\n\n def add(self, state, action, next_state, reward, done):\n self.state_features[self.ptr] = state[0]\n self.state_particles[self.ptr] = state[1]\n self.action[self.ptr] = action\n self.next_state_features[self.ptr] = next_state[0]\n self.next_state_particles[self.ptr] = next_state[1]\n self.reward[self.ptr] = reward\n self.not_done[self.ptr] = 1. - done\n\n self.ptr = (self.ptr + 1) % self.max_size\n self.size = min(self.size + 1, self.max_size)\n\n def sample(self, batch_size):\n ind = np.random.randint(0, self.size, size=batch_size)\n\n return (\n torch.FloatTensor(self.state_features[ind]).to(self.device),\n torch.FloatTensor(self.state_particles[ind]).to(self.device),\n torch.FloatTensor(self.action[ind]).to(self.device),\n torch.FloatTensor(self.next_state_features[ind]).to(self.device),\n torch.FloatTensor(self.next_state_particles[ind]).to(self.device),\n torch.FloatTensor(self.reward[ind]).to(self.device),\n torch.FloatTensor(self.not_done[ind]).to(self.device)\n )\n\n\nclass ReplayBuffer_featured(object):\n def __init__(self, obs_space, action_space, max_size=int(1e6),load_folder=None):\n self.max_size = max_size\n self.ptr = 0\n self.size = 0\n self.store_np = [\"state\",\"action\",\"next_state\",\"reward\",\"not_done\"]\n self.store_pkl = [\"ptr\",\"size\"]\n\n if load_folder is None:\n self.state = np.zeros((max_size, obs_space.shape[0]))\n self.action = np.zeros((max_size, action_space.shape[0]))\n self.next_state = np.zeros((max_size, obs_space.shape[0]))\n self.reward = np.zeros((max_size, 1))\n self.not_done = np.zeros((max_size, 1))\n else:\n self.load(load_folder)\n\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n def save(self,folder):\n os.makedirs(folder,exist_ok=True)\n for attrib in self.store_pkl:\n with open(os.path.join(folder,attrib+\".pkl\"), \"wb\") as f:\n pickle.dump(self.__dict__[attrib],f,protocol=4)\n\n for attrib in self.store_np:\n with open(os.path.join(folder,attrib+\".pkl\"), \"wb\") as f:\n np.save(f,self.__dict__[attrib])\n \n def load(self,folder):\n for attrib in self.store_pkl:\n with open(os.path.join(folder,attrib+\".pkl\"), \"rb\") as f:\n self.__dict__[attrib] = pickle.load(f)\n for attrib in self.store_np:\n with open(os.path.join(folder,attrib+\".pkl\"), \"rb\") as f:\n self.__dict__[attrib] = np.load(f)\n\n def add(self, state, action, next_state, reward, done):\n self.state[self.ptr] = state\n self.action[self.ptr] = action\n self.next_state[self.ptr] = next_state\n self.reward[self.ptr] = reward\n self.not_done[self.ptr] = 1. - done\n\n self.ptr = (self.ptr + 1) % self.max_size\n self.size = min(self.size + 1, self.max_size)\n\n def sample(self, batch_size):\n ind = np.random.randint(0, self.size, size=batch_size)\n\n return (\n torch.FloatTensor(self.state[ind]).to(self.device),\n torch.FloatTensor(self.action[ind]).to(self.device),\n torch.FloatTensor(self.next_state[ind]).to(self.device),\n torch.FloatTensor(self.reward[ind]).to(self.device),\n torch.FloatTensor(self.not_done[ind]).to(self.device)\n )\n\nif __name__ == \"__main__\":\n env = gym.make(\"water_pouring:Pouring-mdp-full-v0\")\n r = ReplayBuffer(env.observation_space, env.action_space)\n r.save(\"test.pkl\")" ]
[ [ "numpy.load", "numpy.save", "torch.FloatTensor", "numpy.zeros", "torch.cuda.is_available", "numpy.random.randint" ] ]
savan77/nni
[ "510213393d9cae58c5a8cccd21f322f7bba4e0cf" ]
[ "examples/trials/cifar10_grad_match/cords/selectionstrategies/supervisedlearning/submodularselectionstrategy.py" ]
[ "import apricot\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom scipy.sparse import csr_matrix\nfrom .dataselectionstrategy import DataSelectionStrategy\nfrom torch.utils.data.sampler import SubsetRandomSampler\n\n\nclass SubmodularSelectionStrategy(DataSelectionStrategy):\n \"\"\"\n This class extends :class:`selectionstrategies.supervisedlearning.dataselectionstrategy.DataSelectionStrategy`\n to include submodular optmization functions using apricot for data selection.\n\n Parameters\n ----------\n trainloader: class\n Loading the training data using pytorch DataLoader\n valloader: class\n Loading the validation data using pytorch DataLoader\n model: class\n Model architecture used for training\n loss_type: class\n The type of loss criterion\n device: str\n The device being utilized - cpu | cuda\n num_classes: int\n The number of target classes in the dataset\n linear_layer: bool\n Apply linear transformation to the data\n if_convex: bool\n If convex or not\n selection_type: str\n PerClass or Supervised\n submod_func_type: str\n The type of submodular optimization function. Must be one of\n 'facility-location', 'graph-cut', 'sum-redundancy', 'saturated-coverage' \n \"\"\"\n\n def __init__(self, trainloader, valloader, model, loss_type,\n device, num_classes, linear_layer, if_convex, selection_type, submod_func_type):\n \"\"\"\n Constructer method\n \"\"\"\n\n super().__init__(trainloader, valloader, model, num_classes, linear_layer)\n\n self.loss_type = loss_type # Make sure it has reduction='none' instead of default\n self.device = device\n self.if_convex = if_convex\n self.selection_type = selection_type\n self.submod_func_type = submod_func_type\n\n\n def distance(self, x, y, exp=2):\n \"\"\"\n Compute the distance.\n \n Parameters\n ----------\n x: Tensor\n First input tensor\n y: Tensor\n Second input tensor\n exp: float, optional\n The exponent value (default: 2)\n \n Returns\n ----------\n dist: Tensor\n Output tensor \n \"\"\"\n\n n = x.size(0)\n m = y.size(0)\n d = x.size(1)\n x = x.unsqueeze(1).expand(n, m, d)\n y = y.unsqueeze(0).expand(n, m, d)\n dist = torch.pow(x - y, exp).sum(2)\n #dist = torch.exp(-1 * torch.pow(x - y, 2).sum(2))\n return dist\n\n\n def compute_score(self, model_params, idxs):\n \"\"\"\n Compute the score of the indices.\n\n Parameters\n ----------\n model_params: OrderedDict\n Python dictionary object containing models parameters\n idxs: list\n The indices\n \"\"\"\n\n trainset = self.trainloader.sampler.data_source\n subset_loader = torch.utils.data.DataLoader(trainset, batch_size=self.trainloader.batch_size, shuffle=False,\n sampler=SubsetRandomSampler(idxs),\n pin_memory=True)\n self.model.load_state_dict(model_params)\n self.N = 0\n g_is = []\n\n with torch.no_grad():\n if self.if_convex:\n for batch_idx, (inputs, targets) in enumerate(subset_loader):\n inputs, targets = inputs, targets\n self.N += inputs.size()[0]\n g_is.append(inputs.view(inputs.size()[0], -1))\n else:\n embDim = self.model.get_embedding_dim()\n for batch_idx, (inputs, targets) in enumerate(subset_loader):\n inputs, targets = inputs.to(self.device), targets.to(self.device, non_blocking=True)\n self.N += inputs.size()[0]\n with torch.no_grad():\n out, l1 = self.model(inputs, last=True)\n data = F.softmax(out, dim=1)\n outputs = torch.zeros(len(inputs), self.num_classes).to(self.device)\n outputs.scatter_(1, targets.view(-1, 1), 1)\n l0_grads = data - outputs\n if self.linear_layer:\n l0_expand = torch.repeat_interleave(l0_grads, embDim, dim=1)\n l1_grads = l0_expand * l1.repeat(1, self.num_classes)\n g_is.append(torch.cat((l0_grads, l1_grads), dim=1))\n else:\n g_is.append(l0_grads)\n \n self.dist_mat = torch.zeros([self.N, self.N], dtype=torch.float32)\n first_i = True\n for i, g_i in enumerate(g_is, 0):\n if first_i:\n size_b = g_i.size(0)\n first_i = False\n for j, g_j in enumerate(g_is, 0):\n self.dist_mat[i * size_b: i * size_b + g_i.size(0),\n j * size_b: j * size_b + g_j.size(0)] = self.distance(g_i, g_j)\n self.const = torch.max(self.dist_mat).item()\n self.dist_mat = (self.const - self.dist_mat).numpy()\n\n\n def compute_gamma(self, idxs):\n \"\"\"\n Compute the gamma values for the indices.\n\n Parameters\n ----------\n idxs: list\n The indices\n \n Returns\n ----------\n gamma: list\n Gradient values of the input indices \n \"\"\"\n\n if self.selection_type == 'PerClass':\n gamma = [0 for i in range(len(idxs))]\n best = self.dist_mat[idxs] # .to(self.device)\n rep = np.argmax(best, axis=0)\n for i in rep:\n gamma[i] += 1\n elif self.selection_type == 'Supervised':\n gamma = [0 for i in range(len(idxs))]\n best = self.dist_mat[idxs] # .to(self.device)\n rep = np.argmax(best, axis=0)\n for i in range(rep.shape[1]):\n gamma[rep[0, i]] += 1\n return gamma\n\n\n def get_similarity_kernel(self):\n \"\"\"\n Obtain the similarity kernel.\n\n Returns\n ----------\n kernel: ndarray\n Array of kernel values\n \"\"\"\n\n for batch_idx, (inputs, targets) in enumerate(self.trainloader):\n if batch_idx == 0:\n labels = targets\n else:\n tmp_target_i = targets\n labels = torch.cat((labels, tmp_target_i), dim=0)\n kernel = np.zeros((labels.shape[0], labels.shape[0]))\n for target in np.unique(labels):\n x = np.where(labels == target)[0]\n # prod = np.transpose([np.tile(x, len(x)), np.repeat(x, len(x))])\n for i in x:\n kernel[i, x] = 1\n return kernel\n\n\n def select(self, budget, model_params, optimizer):\n \"\"\"\n Data selection method using different submodular optimization\n functions.\n \n Parameters\n ----------\n budget: int\n The number of data points to be selected\n model_params: OrderedDict\n Python dictionary object containing models parameters\n optimizer: str\n The optimization approach for data selection. Must be one of\n 'random', 'modular', 'naive', 'lazy', 'approximate-lazy', 'two-stage',\n 'stochastic', 'sample', 'greedi', 'bidirectional'\n \n Returns\n ----------\n total_greedy_list: list\n List containing indices of the best datapoints \n gammas: list\n List containing gradients of datapoints present in greedySet\n \"\"\"\n\n for batch_idx, (inputs, targets) in enumerate(self.trainloader):\n if batch_idx == 0:\n x_trn, labels = inputs, targets\n else:\n tmp_inputs, tmp_target_i = inputs, targets\n labels = torch.cat((labels, tmp_target_i), dim=0)\n per_class_bud = int(budget / self.num_classes)\n total_greedy_list = []\n gammas = []\n if self.selection_type == 'PerClass':\n for i in range(self.num_classes):\n idxs = torch.where(labels == i)[0]\n self.compute_score(model_params, idxs)\n if self.submod_func_type == 'facility-location': \n fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0, metric='precomputed',\n n_samples=per_class_bud, optimizer=optimizer)\n elif self.submod_func_type == 'graph-cut':\n fl = apricot.functions.graphCut.GraphCutSelection(random_state=0, metric='precomputed',\n n_samples=per_class_bud, optimizer=optimizer)\n elif self.submod_func_type == 'sum-redundancy':\n fl = apricot.functions.sumRedundancy.SumRedundancySelection(random_state=0, metric='precomputed',\n n_samples=per_class_bud, optimizer=optimizer)\n elif self.submod_func_type == 'saturated-coverage':\n fl = apricot.functions.saturatedCoverage.SaturatedCoverageSelection(random_state=0, metric='precomputed',\n n_samples=per_class_bud, optimizer=optimizer)\n \n sim_sub = fl.fit_transform(self.dist_mat)\n greedyList = list(np.argmax(sim_sub, axis=1))\n gamma = self.compute_gamma(greedyList) \n total_greedy_list.extend(idxs[greedyList])\n gammas.extend(gamma)\n\n elif self.selection_type == 'Supervised':\n for i in range(self.num_classes):\n if i == 0:\n idxs = torch.where(labels == i)[0]\n N = len(idxs)\n self.compute_score(model_params, idxs)\n row = idxs.repeat_interleave(N)\n col = idxs.repeat(N)\n data = self.dist_mat.flatten()\n else:\n idxs = torch.where(labels == i)[0]\n N = len(idxs)\n self.compute_score(model_params, idxs)\n row = torch.cat((row, idxs.repeat_interleave(N)), dim=0)\n col = torch.cat((col, idxs.repeat(N)), dim=0)\n data = np.concatenate([data, self.dist_mat.flatten()], axis=0)\n sparse_simmat = csr_matrix((data, (row.numpy(), col.numpy())), shape=(self.N_trn, self.N_trn))\n self.dist_mat = sparse_simmat \n if self.submod_func_type == 'facility-location':\n fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0, metric='precomputed',\n n_samples=per_class_bud, optimizer=optimizer)\n elif self.submod_func_type == 'graph-cut':\n fl = apricot.functions.graphCut.GraphCutSelection(random_state=0, metric='precomputed',\n n_samples=per_class_bud, optimizer=optimizer)\n elif self.submod_func_type == 'sum-redundancy':\n fl = apricot.functions.sumRedundancy.SumRedundancySelection(random_state=0, metric='precomputed',\n n_samples=per_class_bud, optimizer=optimizer)\n elif self.submod_func_type == 'saturated-coverage':\n fl = apricot.functions.saturatedCoverage.SaturatedCoverageSelection(random_state=0, metric='precomputed',\n n_samples=per_class_bud, optimizer=optimizer)\n\n sim_sub = fl.fit_transform(sparse_simmat)\n total_greedy_list = list(np.array(np.argmax(sim_sub, axis=1)).reshape(-1))\n gammas = self.compute_gamma(total_greedy_list)\n return total_greedy_list, gammas\n" ]
[ [ "numpy.zeros", "torch.repeat_interleave", "torch.nn.functional.softmax", "torch.no_grad", "numpy.argmax", "torch.utils.data.sampler.SubsetRandomSampler", "torch.where", "torch.max", "torch.zeros", "numpy.where", "torch.cat", "numpy.unique", "torch.pow" ] ]
cloudhan/jax
[ "9781f365a1c5dbdf57bf78b98831c4390eb9ca5f" ]
[ "jax/interpreters/pxla.py" ]
[ "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Implementation of pmap and related functionality.\"\"\"\n\n# A ShardingSpec describes at a high level how a logical array is sharded across\n# devices (each ShardedDeviceArray has a ShardingSpec, and ShardingSpecs also\n# describe how to shard inputs to a parallel computation). spec_to_indices()\n# encodes exactly how a given ShardingSpec is translated to device buffers, i.e.\n# how the sharded array is \"laid out\" across devices. Given a sequence of\n# devices, we shard the data across the devices in row-major order, with\n# replication treated as an extra inner dimension.\n#\n# For example, given the logical data array [1, 2, 3, 4], if we were to\n# partition this array 4 ways with a replication factor of 2, for a total of 8\n# devices, the data on each device would be: [1, 1], [2, 2], [3, 3], [4, 4].\n#\n# This encoding is assumed by various parts of the system, e.g. generating\n# replica groups for collective operations.\n\nfrom contextlib import contextmanager\nfrom collections import defaultdict, OrderedDict\nimport dataclasses\nfrom functools import partial\nimport itertools as it\nimport operator as op\nimport threading\nfrom typing import (Any, Callable, Dict, List, NamedTuple, Optional,\n Sequence, Set, Tuple, Type, Union, Iterable)\nimport sys\n\nfrom absl import logging\nimport numpy as np\n\nfrom .._src.config import config\nfrom .. import core\nfrom .. import linear_util as lu\nfrom jax._src.abstract_arrays import array_types\nfrom ..core import ConcreteArray, ShapedArray\nfrom jax._src import device_array\nfrom .._src import source_info_util\nfrom .._src.util import (unzip3, prod, safe_map, safe_zip,\n extend_name_stack, wrap_name, assert_unreachable,\n tuple_insert, tuple_delete, distributed_debug_log)\nfrom ..errors import JAXTypeError\nfrom jax._src import dispatch\nfrom jax._src.lib import xla_bridge as xb\nfrom jax._src.lib import xla_client as xc\nfrom jax._src.lib import pmap_lib\nfrom ..tree_util import tree_flatten, tree_map\nfrom . import batching\nfrom . import partial_eval as pe\nfrom . import xla\nfrom . import ad\n\n# Built in Python lists don't support weak refs but subclasses of lists do.\nclass WeakRefList(list):\n pass\n\nif sys.version_info >= (3, 8):\n from functools import cached_property as maybe_cached_property\nelse:\n maybe_cached_property = property\n\nif sys.version_info >= (3, 9):\n OrderedDictType = OrderedDict\nelse:\n OrderedDictType = Dict\n\nxops = xc.ops\n\nunsafe_map, map = map, safe_map # type: ignore\n\nIndex = Union[int, slice, Tuple[Union[int, slice], ...]]\n\nNoSharding = pmap_lib.NoSharding\nChunked = pmap_lib.Chunked\nUnstacked = pmap_lib.Unstacked\n\nShardedAxis = pmap_lib.ShardedAxis\nReplicated = pmap_lib.Replicated\n\n_UNSHARDED_INSTANCE = NoSharding()\nAvalDimSharding = Union[Unstacked, Chunked, NoSharding]\nMeshDimAssignment = Union[ShardedAxis, Replicated]\nShardingSpec = pmap_lib.ShardingSpec\n\n\ndef sharding_spec_mesh_shape(self):\n sharded_axis_sizes = []\n for sharding in self.sharding:\n if isinstance(sharding, NoSharding):\n continue\n elif isinstance(sharding, Unstacked):\n sharded_axis_sizes.append(sharding.size)\n elif isinstance(sharding, Chunked):\n sharded_axis_sizes.extend(sharding.chunks)\n else:\n assert_unreachable(sharding)\n return tuple(sharded_axis_sizes[a.axis] if isinstance(a, ShardedAxis) else a.replicas\n for a in self.mesh_mapping)\n\ndef sharding_spec_sharding_proto(self):\n \"\"\"Converts a ShardingSpec to an OpSharding proto.\n\n See\n https://github.com/tensorflow/tensorflow/blob/master/tensorflow/compiler/xla/xla_data.proto#L601\n for details on the OpSharding proto.\n Unfortunately the semantics are not very well described in the proto spec, but the code here might help:\n https://github.com/tensorflow/tensorflow/blob/master/tensorflow/compiler/xla/experimental/xla_sharding/xla_sharding.py\n \"\"\"\n mesh_shape = self.mesh_shape\n mesh = np.arange(np.prod(mesh_shape)).reshape(mesh_shape)\n\n sharded_axes = {} # maps sharded axis identifiers to mesh axis indices to which they're mapped\n replicated_maxes = [] # lists mesh axis identifiers to replicate over\n for maxis, assignment in enumerate(self.mesh_mapping):\n if isinstance(assignment, Replicated):\n replicated_maxes.append(maxis)\n elif isinstance(assignment, ShardedAxis):\n sharded_axes[assignment.axis] = maxis\n else:\n assert_unreachable(assignment)\n\n proto = xc.OpSharding()\n if len(replicated_maxes) == len(self.mesh_mapping):\n proto.type = xc.OpSharding.Type.REPLICATED\n return proto\n else:\n proto.type = xc.OpSharding.Type.OTHER\n\n mesh_permutation = []\n new_mesh_shape = []\n next_sharded_axis = 0\n for axis, sharding in enumerate(self.sharding):\n if isinstance(sharding, NoSharding):\n new_mesh_shape.append(1) # Add a dummy mesh axis we won't be sharding over\n elif isinstance(sharding, Chunked):\n for nchunks in sharding.chunks:\n maxis = sharded_axes[next_sharded_axis]\n assert mesh_shape[maxis] == nchunks\n mesh_permutation.append(maxis)\n next_sharded_axis += 1\n new_mesh_shape.append(int(np.prod(sharding.chunks)))\n elif isinstance(sharding, Unstacked):\n raise RuntimeError(\"Cannot convert unstacked sharding specs to XLA OpSharding\")\n else:\n assert_unreachable(sharding)\n\n # Create the partial sharding proto if tensor is replicated over some mesh axes\n if replicated_maxes:\n new_mesh_shape.append(-1)\n mesh_permutation.extend(replicated_maxes)\n proto.replicate_on_last_tile_dim = True\n\n proto_mesh = mesh.transpose(mesh_permutation).reshape(new_mesh_shape)\n proto.tile_assignment_dimensions = list(proto_mesh.shape)\n proto.tile_assignment_devices = list(proto_mesh.flat)\n return proto\n\ndef sharding_spec_indices(self, shape: Tuple[int, ...]) -> np.ndarray:\n \"\"\"Returns NumPy-style indices corresponding to a sharding spec.\n\n Args:\n shape: The shape of the logical array being sharded.\n\n Returns:\n An ndarray with the same shape as the logical mesh (as derived form\n `mesh_mapping`). Each entry is a NumPy-style index selecting the subset of\n the data array to be placed on a corresponding device. The indices can be\n ints, slice objects with step=1, or tuples of those.\n \"\"\"\n assert len(shape) == len(self.sharding), (shape, self.sharding)\n\n axis_indices: List[Sequence[Index]] = []\n shard_indices_shape = []\n for dim, sharding in enumerate(self.sharding):\n axis_size = shape[dim]\n if isinstance(sharding, NoSharding):\n axis_indices.append([slice(None)])\n # NOTE: We don't append unsharded dimensions to shard_indices_shape here,\n # because they do not appear in the mesh mapping.\n elif isinstance(sharding, Unstacked):\n assert axis_size == sharding.size, f'{axis_size} != {sharding.size}'\n axis_indices.append(range(axis_size))\n shard_indices_shape.append(axis_size)\n elif isinstance(sharding, Chunked):\n total_chunks = int(np.prod(sharding.chunks))\n shard_size, ragged = divmod(axis_size, total_chunks)\n assert not ragged, (axis_size, total_chunks, dim)\n axis_indices.append([slice(i * shard_size, (i + 1) * shard_size)\n for i in range(total_chunks)])\n shard_indices_shape.extend(sharding.chunks)\n else:\n assert_unreachable(sharding)\n\n # shard_indices is an ndarray representing the sharded axes of the logical array,\n # with each dimension having size equal to the number of shards across the corresponding\n # logical array dimension, and each element containing the multi-dimensional index that\n # is used to extract the corresponding shard of the logical array.\n shard_indices = np.empty([prod(shard_indices_shape)], dtype=np.object_)\n for i, idxs in enumerate(it.product(*axis_indices)):\n shard_indices[i] = idxs\n shard_indices = shard_indices.reshape(shard_indices_shape)\n\n # Ensure that each sharded axis is used exactly once in the mesh mapping\n num_sharded_dim = len(shard_indices_shape)\n sharded_dim_perm = [a.axis for a in self.mesh_mapping if isinstance(a, ShardedAxis)]\n assert (set(sharded_dim_perm) == set(range(num_sharded_dim)) and\n len(sharded_dim_perm) == num_sharded_dim)\n # Replicate/reorder the indices according to the mesh mapping\n replica_sizes = tuple(a.replicas for a in self.mesh_mapping if isinstance(a, Replicated))\n replica_dim, sharded_dim = it.count(0), iter(sharded_dim_perm)\n perm = [next(replica_dim) if isinstance(a, Replicated) else\n len(replica_sizes) + next(sharded_dim)\n for a in self.mesh_mapping]\n return (np.broadcast_to(shard_indices, replica_sizes + shard_indices.shape)\n .transpose(perm))\n\ndef sharding_spec_repr(self):\n return f'ShardingSpec({self.sharding}, {self.mesh_mapping})'\n\n\nShardingSpec.mesh_shape = property(sharding_spec_mesh_shape)\nShardingSpec.sharding_proto = sharding_spec_sharding_proto\nShardingSpec.indices = sharding_spec_indices\n# mypy raises: error: Cannot assign to a method [assignment]\nShardingSpec.__repr__ = sharding_spec_repr # type: ignore\n# Do not pollute the namespace\ndel sharding_spec_mesh_shape, sharding_spec_indices, sharding_spec_repr\n\ndef spec_to_indices(shape: Tuple[int, ...],\n spec: ShardingSpec) -> Tuple[Index, ...]:\n \"\"\"Returns numpy-style indices corresponding to a sharding spec.\n\n Each index describes a shard of the array. The order of the indices is the\n same as the device_buffers of a ShardedDeviceArray (i.e. the data is laid out\n row-major).\n\n Args:\n shape: The shape of the logical array being sharded.\n spec: Describes how the array is sharded and how the shards are assigned to\n the logical mesh.\n\n Returns:\n A tuple of length equal to the size of the mesh (inferred as the product of\n sharded dimension sizes and all replication factors). Each element is an\n int, a slice object with step=1, or a tuple thereof, to be treated as an\n index into the full logical array.\n \"\"\"\n return tuple(spec.indices(shape).flat) # type: ignore\n\n\n### util\n\ndef identity(x): return x\n\ndef _shard_arg(arg, devices, arg_indices):\n \"\"\"Returns a list of size len(devices) containing per-device buffers.\n\n For the C++ pmap path, we fallback to Python (this function) to shard\n arguments that are not supported by the C++ `ShardArg`.\n\n Arrgs:\n arg: The Python argument.\n devices: The list of devices to shard over.\n arg_indices: A list of `len(devices)` indices to use to shard the argument.\n \"\"\"\n if isinstance(arg, ShardedDeviceArray) and arg_indices == arg.indices:\n # The shard_arg_handlers allow an extensible set of types to be sharded, but\n # inline handling for ShardedDeviceArray as a special case for performance\n # NOTE: we compare indices instead of sharding_spec because\n # pmap_benchmark.pmap_shard_args_benchmark indicates this is faster.\n return [\n buf if buf.device() == d else buf.copy_to_device(d)\n for d, buf in zip(devices, arg.device_buffers)\n ]\n else:\n arg = xla.canonicalize_dtype(arg)\n return shard_arg_handlers[type(arg)](arg, devices, arg_indices)\n\n\n\ndef shard_args(devices: Sequence[xb.xla_client.Device],\n indices: Sequence[Sequence[Index]],\n args) -> Sequence[Sequence[xb.xla_client.Buffer]]:\n \"\"\"Shard each argument data array along its leading axis.\n\n Args:\n devices: sequence of Devices mapping replica index to a physical device.\n indices: sequence of the same length as `args` describing how each arg\n should be sharded/replicated across `devices`. Each element in `indices`\n is the same length as `devices`.\n args: a sequence of JaxTypes representing arguments to be sharded according\n to `indices` and placed on `devices`.\n\n Returns:\n A list of length matching args, containing lists of per-device buffers\n for each argument.\n \"\"\"\n return [_shard_arg(arg, devices, indices[a]) for a, arg in enumerate(args)]\n\n\nshard_arg_handlers: Dict[Any, Callable[[Any, Any, Any], Sequence[Any]]] = {}\nshard_arg_handlers[core.Unit] = \\\n lambda x, devices, _: device_put(core.unit, devices, replicate=True)\ndef _shard_array(x, devices, indices):\n return device_put([x[i] for i in indices], devices)\nfor _t in array_types:\n shard_arg_handlers[_t] = _shard_array\n\ndef _shard_device_array(x, devices, indices):\n start_indices, limit_indices, removed_dims = unzip3(\n _as_slice_indices(x, idx) for idx in indices)\n shards = x._multi_slice(start_indices, limit_indices, removed_dims)\n return device_put(shards, devices)\nfor t in device_array.device_array_types:\n shard_arg_handlers[t] = _shard_device_array\n\n\n# NOTE(skye): we could refactor to generate _multi_slice parameters directly\n# from the input ShardingSpec, rather than the indices. However, this would\n# require duplicating the ordering logic of spec_to_indices, which is more\n# subtle and more likely to change than the index logic we have to support here.\ndef _as_slice_indices(arr: device_array.DeviceArrayProtocol, idx: Index) -> Tuple[\n Tuple[int, ...], Tuple[int, ...], Tuple[int, ...]]:\n \"\"\"Returns start_indices, limit_indices, removed_dims\"\"\"\n start_indices = [0] * arr.ndim\n limit_indices = list(arr.shape)\n removed_dims = []\n\n tuple_idx = idx if isinstance(idx, tuple) else (idx,)\n for dim, sub_idx in enumerate(tuple_idx):\n if isinstance(sub_idx, int):\n start_indices[dim] = sub_idx\n limit_indices[dim] = sub_idx + 1\n removed_dims.append(dim)\n elif sub_idx == slice(None):\n continue\n else:\n assert isinstance(sub_idx, slice), sub_idx\n assert isinstance(sub_idx.start, int), sub_idx\n assert isinstance(sub_idx.stop, int), sub_idx\n start_indices[dim] = sub_idx.start\n limit_indices[dim] = sub_idx.stop\n\n return tuple(start_indices), tuple(limit_indices), tuple(removed_dims) # type: ignore\n\n\ndef shard_aval(size, axis: int, aval):\n try:\n return shard_aval_handlers[type(aval)](size, axis, aval)\n except KeyError as err:\n raise TypeError(f\"No shard_aval handler for type: {type(aval)}\") from err\nshard_aval_handlers: Dict[Type[core.AbstractValue], Callable[[int, int, Any], Any]] = {}\nshard_aval_handlers[core.AbstractUnit] = lambda size, axis, x: x\ndef _shard_abstract_array(size, axis: int, x):\n try:\n if x.shape[axis] != size:\n raise ValueError(f\"Axis size {size} does not match dimension {axis} of \"\n f\"shape {x.shape}\")\n except IndexError:\n raise ValueError(\"Cannot split a {x.dim}D value along axis {axis}\") from None\n return x.update(shape=tuple_delete(x.shape, axis))\nshard_aval_handlers[ShapedArray] = _shard_abstract_array\n\nMeshAxisName = Any\n\"\"\"\nArrayMapping specifies how an ndarray should map to mesh axes.\n\nNote that the ordering is crucial for the cases when this mapping is non-injective\n(i.e. when multiple mesh axes map to the same positional axis). Then, the\norder of entries of the mapping determines a major-to-minor order on mesh axes,\naccording to which chunks of the value along the repeated dimension will be assigned.\n\nFor example, consider a mapping {'x': 1, 'y': 1} and a mesh with shape {'x': 2, 'y': 3}.\nThe second dimension of the value would get chunked into 6 pieces, and assigned to the\nmesh in a way that treats 'y' as the fastest changing (minor) dimension. In this case,\nthat would mean that a flat list of chunks would get assigned to a flattened list of\nmesh devices without any modifications. If the mapping was {'y': 1, 'x': 1}, then the\nmesh devices ndarray would have to be transposed before flattening and assignment.\n\"\"\"\nArrayMapping = OrderedDictType[MeshAxisName, int]\n\nAxisResource = Tuple[Optional[Tuple[Any, ...]], ...]\n\ndef array_mapping_to_axis_resources(array_mapping: ArrayMapping) -> AxisResource:\n if not array_mapping:\n return tuple()\n max_index = array_mapping[max(array_mapping, key=array_mapping.get)] # type: ignore\n reverse_map = defaultdict(list)\n for axis, index in array_mapping.items():\n reverse_map[index].append(axis)\n return tuple(\n tuple(reverse_map[i]) if reverse_map[i] else None for i in range(max_index + 1)\n )\n\ndef aval_to_result_handler(\n sharding_spec: Optional[ShardingSpec],\n indices: Optional[Tuple[Index]],\n aval: core.AbstractValue,\n global_aval: Optional[ShapedArray] = None,\n out_axis_resources: Optional[AxisResource] = None,\n global_mesh = None,\n) -> Callable[[List[xb.xla_client.Buffer]], Any]:\n \"\"\"Returns a function for handling the raw buffers of a single output aval.\n\n Args:\n sharding_spec: Indicates how the output is sharded across devices, or None\n for non-array avals.\n indices: The pre-computed result of spec_to_indices, or None for non-array\n avals.\n aval: The output AbstractValue.\n global_aval: Global output AbstractValue. Used for creating GSDAs.\n out_axis_resources: A tuple specifying the sharding of outputs.\n Used for creating GSDAs.\n global_mesh: The global device mesh that generated this output. Used\n for creating GSDAs.\n\n Returns:\n A function for handling the Buffers that will eventually be produced\n for this output. The function will return an object suitable for returning\n to the user, e.g. a ShardedDeviceArray.\n \"\"\"\n try:\n return pxla_result_handlers[type(aval)](sharding_spec, indices, aval,\n global_aval, out_axis_resources, global_mesh)\n except KeyError as err:\n raise TypeError(\"No pxla_result_handler for type: {}\".format(type(aval))\n ) from err\n\nPxlaResultHandler = Callable[..., Callable[[List[xb.xla_client.Buffer]], Any]]\npxla_result_handlers: Dict[Type[core.AbstractValue], PxlaResultHandler] = {}\npxla_result_handlers[core.AbstractUnit] = lambda *_: lambda _: core.unit\n\ndef array_result_handler(sharding_spec, indices, aval: ShapedArray, global_aval,\n out_axis_resources, global_mesh):\n if config.jax_gsda_out:\n return gsda_array_result_handler(global_aval, global_mesh, out_axis_resources)\n else:\n return sda_array_result_handler(sharding_spec, indices, aval)\n\npxla_result_handlers[ShapedArray] = array_result_handler\npxla_result_handlers[ConcreteArray] = array_result_handler\n\ndef sda_array_result_handler(sharding_spec, indices, aval: ShapedArray):\n return lambda bufs: make_sharded_device_array(aval, sharding_spec, bufs,\n indices)\n\ndef gsda_array_result_handler(global_aval, global_mesh, out_axis_resources):\n from ..experimental.gsda import GlobalShardedDeviceArray\n\n return lambda bufs: GlobalShardedDeviceArray(\n global_aval.shape, global_mesh, out_axis_resources, bufs)\n\n### lazy device-memory persistence and result handling\n\n# TODO(jblespiau): Consider removing this option.\n_USE_CPP_SDA = True\n\n\ndef make_sharded_device_array(\n aval: ShapedArray,\n sharding_spec: Optional[ShardingSpec],\n # Any is for JAX extensions implementing their own buffer.\n device_buffers: List[Union[Any, xb.xla_client.Buffer]],\n indices: Optional[Tuple[Index, ...]] = None,\n):\n \"\"\"Returns a ShardedDeviceArray implementation based on arguments.\n\n Returns either a C++ SDA or a Python DeviceArray when the buffers are not\n JAX buffers.\n\n Args:\n aval: The `ShapedArray` for this array.\n sharding_spec: If `None`, assumes a pmap-style ShardedDeviceArrays over the\n first dimension.\n device_buffers: If a list of Jax `Buffer` objects, a C++ SDA will be\n returned (if the version is high enough). Otherwise, a Python object will\n be returned, for JAX extensions not implementing the C++ API.\n indices: For caching purposes, will be computed if `None`.\n \"\"\"\n if sharding_spec is None:\n sharded_aval = aval.update(shape=aval.shape[1:])\n sharding_spec = _pmap_sharding_spec(aval.shape[0], aval.shape[0], 1, None,\n sharded_aval, 0)\n\n if indices is None:\n indices = spec_to_indices(aval.shape, sharding_spec)\n\n if (_USE_CPP_SDA and\n (not device_buffers or\n isinstance(device_buffers[0], xb.xla_client.Buffer))):\n return pmap_lib.ShardedDeviceArray.make(\n aval, sharding_spec, device_buffers,\n indices, aval.weak_type)\n\n return _ShardedDeviceArray(aval, sharding_spec, device_buffers, indices)\n\n\nif _USE_CPP_SDA:\n ShardedDeviceArrayBase = pmap_lib.ShardedDeviceArrayBase # type: ignore\n # We want the C++ SDA to extend the DeviceArrayBase. We want this both to\n # benefit from its methods, and to have isinstance(x, DeviceArray) return true\n ShardedDeviceArrayBase.__bases__ = ((device_array.DeviceArray,) + # type: ignore\n ShardedDeviceArrayBase.__bases__)\n _SDA_BASE_CLASS = pmap_lib.ShardedDeviceArrayBase # type: ignore\nelse:\n _SDA_BASE_CLASS: Type[device_array.DeviceArray] = device_array.DeviceArray # type: ignore\n\n\nclass _ShardedDeviceArray(_SDA_BASE_CLASS): # type: ignore\n \"\"\"A ShardedDeviceArray is an ndarray sharded across devices.\n\n The purpose of a ShardedDeviceArray is to reduce the number of transfers when\n executing replicated computations, by allowing results to persist on the\n devices that produced them. That way dispatching a similarly replicated\n computation that consumes the same sharded memory layout does not incur any\n transfers.\n\n A ShardedDeviceArray represents one logical ndarray value, and simulates the\n behavior of an ndarray so that it can be treated by user code as an ndarray;\n that is, it is only an optimization to reduce transfers.\n\n Attributes:\n aval: A ShapedArray indicating the shape and dtype of this array.\n sharding_spec: describes how this array is sharded across `device_buffers`.\n device_buffers: the buffers containing the data for this array. Each buffer\n is the same shape and on a different device. Buffers are in row-major\n order, with replication treated as an extra innermost dimension.\n indices: the result of spec_to_indices(sharding_spec). Can optionally be\n precomputed for efficiency. A list the same length as\n `device_buffers`. Each index indicates what portion of the full array is\n stored in the corresponding device buffer, i.e. `array[indices[i]] ==\n device_buffers[i].to_py()`.\n \"\"\"\n __slots__ = [\n \"aval\", \"device_buffers\", \"sharding_spec\", \"indices\",\n \"_one_replica_buffer_indices\", \"_npy_value\"\n ]\n\n def __init__(self,\n aval: ShapedArray,\n sharding_spec: ShardingSpec,\n device_buffers: List[xb.xla_client.Buffer],\n indices: Optional[Tuple[Index, ...]] = None):\n super().__init__()\n\n # TODO(skye): assert invariants. Keep performance in mind though.\n if indices is None:\n indices = spec_to_indices(aval.shape, sharding_spec)\n\n self.aval = aval\n self.device_buffers = device_buffers\n self.sharding_spec = sharding_spec\n self.indices = indices\n self._npy_value = None\n self._one_replica_buffer_indices = None\n if config.jax_enable_checks:\n assert type(aval) is ShapedArray\n\n @property\n def shape(self):\n return self.aval.shape\n\n @property\n def dtype(self):\n return self.aval.dtype\n\n @property\n def size(self):\n return prod(self.aval.shape)\n\n @property\n def ndim(self):\n return len(self.aval.shape)\n\n def delete(self):\n if self.device_buffers is None:\n return\n for buf in self.device_buffers:\n buf.delete()\n self.device_buffers = None\n self._npy_value = None\n\n\ndef _sda_one_replica_buffer_indices(self):\n \"\"\"Indices of buffers containing one complete copy of the array data.\"\"\"\n if self._one_replica_buffer_indices is None:\n one_replica_indices = []\n seen_index_hashes = set()\n for i, index in enumerate(self.indices):\n hashed_index = _hashable_index(index)\n if hashed_index not in seen_index_hashes:\n one_replica_indices.append(i)\n seen_index_hashes.add(hashed_index)\n self._one_replica_buffer_indices = one_replica_indices\n return self._one_replica_buffer_indices\n\n\ndef _sda_copy_to_host_async(self):\n for buffer_index in self.one_replica_buffer_indices:\n self.device_buffers[buffer_index].copy_to_host_async()\n\n\ndef _sda_check_if_deleted(self):\n if self.device_buffers is None:\n raise ValueError(\"ShardedDeviceArray has been deleted.\")\n\n\ndef _sda_block_until_ready(self):\n self._check_if_deleted()\n for buf in self.device_buffers:\n buf.block_host_until_ready()\n return self\n\n\ndef _sda_value(self):\n if self._npy_value is None:\n self.copy_to_host_async()\n npy_value = np.empty(self.aval.shape, self.aval.dtype)\n for i in self.one_replica_buffer_indices:\n npy_value[self.indices[i]] = self.device_buffers[i].to_py()\n self._npy_value = npy_value\n return self._npy_value\n\n\ndef _sda__getitem__(self, idx):\n self._check_if_deleted()\n if not isinstance(idx, tuple):\n cidx = (idx,) + (slice(None),) * (len(self.aval.shape) - 1)\n else:\n cidx = idx + (slice(None),) * (len(self.aval.shape) - len(idx))\n if self._npy_value is None:\n try:\n buf_idx = self.indices.index(cidx)\n except ValueError:\n buf_idx = None\n if buf_idx is not None:\n buf = self.device_buffers[buf_idx]\n aval = ShapedArray(buf.xla_shape().dimensions(), self.aval.dtype)\n return device_array.make_device_array(aval, None, buf)\n return super(self.__class__, self).__getitem__(idx)\n\n\ndef _sda__iter__(self):\n if self.ndim == 0:\n raise TypeError(\"iteration over a 0-d array\") # same as numpy error\n else:\n return (self[i] for i in range(self.shape[0]))\n\ndef _sda__reversed__(self):\n if self.ndim == 0:\n raise TypeError(\"iteration over a 0-d array\") # same as numpy error\n else:\n return (self[i] for i in range(self.shape[0] - 1, -1, -1))\n\n\nfor sda in [_ShardedDeviceArray, pmap_lib.ShardedDeviceArray]:\n setattr(sda, \"one_replica_buffer_indices\",\n property(_sda_one_replica_buffer_indices))\n setattr(sda, \"copy_to_host_async\", _sda_copy_to_host_async)\n setattr(sda, \"_check_if_deleted\", _sda_check_if_deleted)\n setattr(sda, \"block_until_ready\", _sda_block_until_ready)\n setattr(sda, \"_value\", property(_sda_value))\n setattr(sda, \"__getitem__\", _sda__getitem__)\n setattr(sda, \"__iter__\", _sda__iter__)\n setattr(sda, \"__reversed__\", _sda__reversed__)\n\ndel (_sda_one_replica_buffer_indices, _sda_copy_to_host_async,\n _sda_check_if_deleted, _sda_block_until_ready, _sda_value, _sda__getitem__)\n\n\nShardedDeviceArray: Type[object]\nif _USE_CPP_SDA:\n ShardedDeviceArray = pmap_lib.ShardedDeviceArrayBase\nelse:\n ShardedDeviceArray = _ShardedDeviceArray\n\n\n\ndef _hashable_index(idx):\n return tree_map(lambda x: (x.start, x.stop) if type(x) == slice else x,\n idx)\n\n# The fast path is handled directly in shard_args().\n# TODO(skye): is there a simpler way to rewrite this using sharding_spec?\ndef _shard_sharded_device_array_slow_path(x, devices, indices):\n candidates = defaultdict(list)\n for buf, idx in safe_zip(x.device_buffers, x.indices):\n candidates[_hashable_index(idx)].append(buf)\n\n bufs = []\n for idx, device in safe_zip(indices, devices):\n # Look up all buffers that contain the correct slice of the logical array.\n candidates_list = candidates[_hashable_index(idx)]\n if not candidates_list:\n # This array isn't sharded correctly. Reshard it via host roundtrip.\n # TODO(skye): more efficient reshard?\n return shard_arg_handlers[type(x._value)](x._value, devices, indices)\n # Try to find a candidate buffer already on the correct device,\n # otherwise copy one of them.\n for buf in candidates_list:\n if buf.device() == device:\n bufs.append(buf)\n break\n else:\n bufs.append(buf.copy_to_device(device))\n return bufs\n\n\ndef _sharded_device_array_constant_handler(c, val, canonicalize_types=True):\n return xla.pyval_to_ir_constants(c, np.asarray(val),\n canonicalize_types=canonicalize_types)\n\n\ndef _register_handlers_for_sharded_device_array(sda):\n shard_arg_handlers[sda] = _shard_sharded_device_array_slow_path\n xla.register_constant_handler(sda, _sharded_device_array_constant_handler)\n\n core.pytype_aval_mappings[sda] = ConcreteArray\n dispatch.device_put_handlers[sda] = dispatch._device_put_array\n xla.pytype_aval_mappings[sda] = op.attrgetter(\"aval\")\n xla.canonicalize_dtype_handlers[sda] = identity\n\n_register_handlers_for_sharded_device_array(_ShardedDeviceArray)\n_register_handlers_for_sharded_device_array(pmap_lib.ShardedDeviceArray)\n\n### the xla_pmap primitive and its rules are comparable to xla_call in xla.py\n\ndef xla_pmap_impl(fun: lu.WrappedFun, *args,\n backend: Optional[str],\n axis_name: core.AxisName,\n axis_size: int,\n global_axis_size: Optional[int],\n devices: Optional[Sequence[Any]],\n name: str,\n in_axes: Sequence[Optional[int]],\n out_axes_thunk: Callable[[], Sequence[Optional[int]]],\n donated_invars: Sequence[bool],\n global_arg_shapes: Sequence[Optional[Tuple[int, ...]]]):\n abstract_args = unsafe_map(xla.abstractify, args)\n compiled_fun, fingerprint = parallel_callable(\n fun, backend, axis_name, axis_size, global_axis_size, devices, name,\n in_axes, out_axes_thunk, donated_invars, global_arg_shapes,\n *abstract_args)\n\n # Don't re-abstractify args unless logging is enabled for performance.\n if config.jax_distributed_debug:\n distributed_debug_log((\"Running pmapped function\", name),\n (\"python function\", fun.f),\n (\"devices\", devices),\n (\"abstract args\", map(xla.abstractify, args)),\n (\"fingerprint\", fingerprint))\n return compiled_fun(*args)\n\n\[email protected]\ndef parallel_callable(fun: lu.WrappedFun,\n backend_name: Optional[str],\n axis_name: core.AxisName,\n axis_size: int,\n global_axis_size: Optional[int],\n devices: Optional[Sequence[Any]],\n name: str,\n in_axes: Sequence[Optional[int]],\n out_axes_thunk: Callable[[], Sequence[Optional[int]]],\n donated_invars: Sequence[bool],\n global_arg_shapes: Sequence[Optional[Tuple[int, ...]]],\n *avals):\n pmap_computation = lower_parallel_callable(\n fun, backend_name, axis_name, axis_size, global_axis_size, devices, name,\n in_axes, out_axes_thunk, donated_invars, global_arg_shapes, avals)\n pmap_executable = pmap_computation.compile()\n return WeakRefList([pmap_executable.unsafe_call, pmap_executable.fingerprint])\n\n\[email protected](frozen=True)\nclass ParallelCallableInfo:\n backend: Any # TODO(frostig): really xla.Backend, fix xla_bridge annotations\n axis_name: core.AxisName\n axis_size: int\n global_axis_size: Optional[int]\n devices: Optional[Sequence[xla.Device]]\n in_axes: Iterable[Optional[int]]\n out_axes_thunk: Callable[[], Sequence[Optional[int]]]\n avals: Sequence[core.AbstractValue]\n\n @maybe_cached_property\n def local_devices(self):\n if self.devices:\n out = [d for d in self.devices\n if d.process_index == xb.process_index(self.backend)]\n assert len(out) > 0\n else:\n out = None # type: ignore\n return out\n\n @maybe_cached_property\n def out_axes(self):\n return self.out_axes_thunk()\n\n\nclass ShardInfo(NamedTuple):\n sharded_avals: Sequence[core.AbstractValue]\n out_sharded_avals: Sequence[core.AbstractValue]\n global_sharded_avals: Sequence[core.AbstractValue]\n num_local_shards: int\n num_global_shards: int\n\n\nclass ReplicaInfo(NamedTuple):\n jaxpr_replicas: int\n num_local_replicas: int\n num_global_replicas: int\n\n\ndef find_replicas(jaxpr, axis_size, global_axis_size):\n # TODO(skyewm): replace this with a chain of pmaps and/or sharded_jits\n jaxpr_replicas = dispatch.jaxpr_replicas(jaxpr)\n num_local_replicas = axis_size * jaxpr_replicas\n num_global_replicas = global_axis_size * jaxpr_replicas\n return ReplicaInfo(jaxpr_replicas, num_local_replicas, num_global_replicas)\n\n\ndef tuple_args(shards: ShardInfo):\n # tuplify long arg lists for TPU\n return len(shards.global_sharded_avals) > 100\n\n\ndef stage_parallel_callable(\n pci: ParallelCallableInfo,\n fun: lu.WrappedFun,\n global_arg_shapes: Sequence[Optional[Tuple[int, ...]]]):\n sharded_avals = tuple(\n shard_aval(pci.axis_size, axis, aval) if axis is not None else aval\n for axis, aval in safe_zip(pci.in_axes, pci.avals))\n if any(s is not None for s in global_arg_shapes):\n # TODO(skye): we could take this branch unconditionally if we handled\n # grad of global_arg_shapes correctly.\n global_sharded_avals = [\n aval.update(shape=shape) if shape is not None else aval\n for shape, aval in safe_zip(global_arg_shapes, sharded_avals)]\n else:\n global_sharded_avals = sharded_avals # type: ignore\n\n with core.extend_axis_env(pci.axis_name, pci.global_axis_size, None): # type: ignore\n jaxpr, out_sharded_avals, consts = pe.trace_to_jaxpr_final(\n fun, global_sharded_avals, pe.debug_info_final(fun, \"pmap\"))\n jaxpr = dispatch.apply_outfeed_rewriter(jaxpr)\n\n assert len(out_sharded_avals) == len(pci.out_axes), (\n len(out_sharded_avals), len(pci.out_axes))\n\n # TODO(skye,mattjj): allow more collectives on multi-host as we test them, but\n # for now raise an error\n if pci.devices is not None:\n is_multi_host_pmap = len(pci.local_devices) != len(pci.devices)\n else:\n is_multi_host_pmap = xb.process_count(pci.backend) > 1\n if is_multi_host_pmap:\n check_multihost_collective_allowlist(jaxpr)\n\n replicas = find_replicas(jaxpr, pci.axis_size, pci.global_axis_size)\n parts = find_partitions(jaxpr)\n\n num_local_shards = replicas.num_local_replicas * parts.local_num_partitions\n num_global_shards = replicas.num_global_replicas * parts.num_partitions\n\n shards = ShardInfo(\n sharded_avals, out_sharded_avals, global_sharded_avals,\n num_local_shards, num_global_shards)\n\n return jaxpr, consts, replicas, parts, shards\n\n\ndef lower_parallel_callable(\n fun: lu.WrappedFun,\n backend_name: Optional[str],\n axis_name: core.AxisName,\n axis_size: int,\n global_axis_size: Optional[int],\n devices: Optional[Sequence[xla.Device]],\n name: str,\n in_axes: Iterable[Optional[int]],\n out_axes_thunk: Callable[[], Sequence[Optional[int]]],\n donated_invars: Sequence[bool],\n global_arg_shapes: Sequence[Optional[Tuple[int, ...]]],\n avals: Sequence[core.AbstractValue]):\n if devices is not None and len(devices) == 0:\n raise ValueError(\"'devices' argument to pmap must be non-empty, or None.\")\n\n # Determine global_axis_size for use in AxisEnv.\n # TODO(mattjj,skyewm): revive this check (inner_pmap always False now)\n # if xb.process_count() > 1 and global_axis_size is None and inner_pmap:\n # raise ValueError(\"'axis_size' must be specified for nested multi-host pmaps\")\n if (xb.process_count() == 1 and global_axis_size is not None and\n global_axis_size != axis_size):\n raise ValueError(\n f\"Specified axis_size {global_axis_size} doesn't match received \"\n f\"axis_size {axis_size}.\")\n\n if devices is not None and backend_name is None:\n backend = xb.get_device_backend(devices[0])\n else:\n backend = xb.get_backend(backend_name)\n\n must_run_on_all_devices = False\n no_nested_sharding = False\n if global_axis_size is None:\n if xb.process_count(backend) == 1:\n global_axis_size = axis_size\n elif devices:\n # This allows each host in a multi-host pmap to run on a different number\n # of devices, but precludes nested sharding (i.e. inner pmaps or\n # sharded_jits).\n global_axis_size = len(devices)\n no_nested_sharding = True\n else:\n # This assumes all hosts run on the same number of devices. We make sure\n # this assumption is true by requiring that the pmap is run on all devices\n # (and making the further assumption that each host has the same number of\n # devices). Nested sharding is ok in this case.\n global_axis_size = axis_size * xb.process_count(backend)\n assert all(\n len(xb.local_devices(process_index, backend)) == xb.local_device_count(backend)\n for process_index in range(xb.process_count(backend)))\n must_run_on_all_devices = True\n\n pci = ParallelCallableInfo(\n backend, axis_name, axis_size, global_axis_size, devices, in_axes,\n out_axes_thunk, avals)\n jaxpr, consts, replicas, parts, shards = stage_parallel_callable(\n pci, fun, global_arg_shapes)\n\n if logging.vlog_is_on(2):\n logging.vlog(2, \"sharded_avals: %s\", shards.sharded_avals)\n logging.vlog(2, \"global_sharded_avals: %s\", shards.global_sharded_avals)\n logging.vlog(2, \"num_replicas: %d num_local_replicas: %d\",\n replicas.num_global_replicas, replicas.num_local_replicas)\n logging.vlog(2, \"num_partitions: %d local_num_partitions: %d\",\n parts.num_partitions, parts.local_num_partitions)\n logging.vlog(2, \"arg_parts: %s\", parts.arg_parts)\n logging.vlog(2, \"local_arg_parts: %s\", parts.local_arg_parts)\n logging.vlog(2, \"out_parts: %s\", parts.out_parts)\n logging.vlog(2, \"local_out_parts: %s\", parts.local_out_parts)\n logging.vlog(2, \"devices: %s\", devices)\n logging.vlog(2, \"local_devices: %s\", pci.local_devices)\n\n if (xb.process_count(backend) > 1 and must_run_on_all_devices and\n shards.num_local_shards != xb.local_device_count(backend)):\n if shards.num_local_shards == axis_size:\n raise ValueError(\n f\"On multi-host platforms, the input to pmapped functions must have \"\n f\"leading axis size equal to the number of local devices if no \"\n f\"`devices` argument is specified. Got axis_size={axis_size}, \"\n f\"num_local_devices={xb.local_device_count(backend)}\")\n else:\n raise ValueError(\n f\"On multi-host platforms, pmapped functions must run across all \"\n f\"devices, i.e. num_replicas * num_partitions should equal the \"\n f\"number of local devices. Got \"\n f\"num_replicas={replicas.num_local_replicas}, \"\n f\"num_partitions={parts.num_partitions}, and \"\n f\"num_local_devices={xb.local_device_count(backend)}\")\n\n if no_nested_sharding and (\n replicas.jaxpr_replicas > 1 or parts.num_partitions > 1):\n raise ValueError(\n f\"On multi-host platforms, pmapped functions that both have `devices` \"\n f\"specified and contain an inner_pmap or sharded_jit must specify an \"\n f\"`axis_size` (or remove the `devices` argument). Got nested_replicas=\"\n f\"{replicas.jaxpr_replicas} and nested_partitions={parts.num_partitions}\")\n\n log_priority = logging.WARNING if config.jax_log_compiles else logging.DEBUG\n logging.log(log_priority,\n \"Compiling %s (%d) for %d devices with args %s. (num_replicas=%d\"\n \" num_partitions=%d)\", fun.__name__, id(fun),\n shards.num_global_shards, avals, replicas.num_global_replicas,\n parts.num_partitions)\n\n axis_env = xla.AxisEnv(\n replicas.num_global_replicas, (axis_name,), (global_axis_size,))\n\n c = xc.XlaBuilder(\"pmap_{}\".format(fun.__name__))\n xla_consts = map(partial(xla.pyval_to_ir_constant, c), consts)\n replicated_args = [axis is None for axis in in_axes]\n xla_args, donated_invars = xla._xla_callable_args(\n c, shards.global_sharded_avals, tuple_args(shards),\n replicated=replicated_args,\n partitions=parts.arg_parts,\n donated_invars=donated_invars)\n with maybe_extend_axis_env(axis_name, global_axis_size, None): # type: ignore\n ctx = xla.TranslationContext(c, backend.platform, axis_env,\n extend_name_stack(wrap_name(name, 'pmap')))\n out_nodes = xla.jaxpr_subcomp(ctx, jaxpr, xla_consts, *xla_args)\n build_out_tuple = partial(xops.Tuple, c, out_nodes)\n if parts.out_parts is not None:\n out_tuple = xb.with_sharding(c, parts.out_parts, build_out_tuple)\n else:\n out_tuple = build_out_tuple()\n\n if backend.platform in (\"gpu\", \"tpu\"):\n donated_invars = xla.set_up_aliases(c, xla_args, c.GetShape(out_tuple),\n donated_invars, tuple_args(shards))\n built = c.Build(out_tuple)\n\n return PmapComputation(built, pci, replicas, parts, shards)\n\n\nclass PmapComputation:\n def __init__(self, hlo, *compile_args):\n self._executable = None\n self.hlo = hlo\n self.compile_args = compile_args\n\n def compile(self):\n if self._executable is None:\n self._executable = PmapExecutable.from_hlo(self.hlo, *self.compile_args)\n return self._executable\n\n\nclass PmapExecutable:\n __slots__ = ['xla_executable', 'unsafe_call', 'fingerprint', 'in_avals']\n\n def __init__(self, xla_executable, unsafe_call, fingerprint, in_avals):\n self.xla_executable = xla_executable\n self.unsafe_call = unsafe_call\n self.fingerprint = fingerprint\n self.in_avals = in_avals\n\n @staticmethod\n def from_hlo(xla_computation,\n pci: ParallelCallableInfo,\n replicas: ReplicaInfo,\n parts: 'PartitionInfo',\n shards: ShardInfo):\n devices = pci.devices\n if devices is None:\n if shards.num_global_shards > xb.device_count(pci.backend):\n msg = (\"compiling computation that requires {} logical devices, but only {} XLA \"\n \"devices are available (num_replicas={}, num_partitions={})\")\n raise ValueError(msg.format(shards.num_global_shards,\n xb.device_count(pci.backend),\n replicas.num_global_replicas,\n parts.num_partitions))\n # On a single host, we use the platform's default device assignment to\n # potentially take advantage of device locality. On multiple hosts, the\n # default device assignment may interleave different hosts' replicas,\n # violating pmap's semantics where data is sharded across replicas in\n # row-major order. Instead, manually create a device assignment that ensures\n # each host is responsible for a continguous set of replicas.\n if shards.num_global_shards > shards.num_local_shards:\n # TODO(skye): use a locality-aware assignment that satisfies the above\n # constraint.\n devices = [d for process_index in range(xb.process_count(pci.backend))\n for d in xb.local_devices(process_index, pci.backend)]\n else:\n devices = xb.get_backend(pci.backend).get_default_device_assignment(\n replicas.num_global_replicas, parts.num_partitions)\n else:\n if shards.num_local_shards != len(pci.local_devices):\n local_devices_str = \", \".join(map(str, pci.local_devices))\n if shards.num_local_shards == pci.axis_size:\n raise ValueError(\n f\"Leading axis size of input to pmapped function must equal the \"\n f\"number of local devices passed to pmap. Got axis_size=\"\n f\"{pci.axis_size}, num_local_devices={len(pci.local_devices)}.\\n\"\n f\"(Local devices available to pmap: {local_devices_str})\")\n else:\n raise ValueError(\n f\"pmapped function requires {shards.num_local_shards} local \"\n f\"devices to run due to nested pmapped or other parallel \"\n f\"functions, but only {len(pci.local_devices)} are available.\\n\"\n f\"(outer axis size: {pci.axis_size}, local devices available to \"\n f\"pmap: {local_devices_str})\")\n if shards.num_global_shards != len(devices):\n raise ValueError(\"compiling computation that creates %s shards, \"\n \"but %s devices were specified\" %\n (shards.num_global_shards, len(devices)))\n\n # 'devices' may be 1D or 2D at this point (e.g.\n # get_default_device_assignment() returns 2D assignment, caller may have\n # provided 1D list of devices).\n device_assignment = tree_map(lambda d: d.id, devices)\n # Convert to 2D in case it's 1D and we have > 1 partitions.\n device_assignment = np.array(device_assignment).reshape(\n (replicas.num_global_replicas, parts.num_partitions))\n # TODO(b/162356737): Enabling SPMD partitioning causes issues with some\n # non-partitioned workloads, so disable unless needed.\n use_spmd_partitioning = parts.num_partitions > 1\n compile_options = xb.get_compile_options(\n num_replicas=replicas.num_global_replicas,\n num_partitions=parts.num_partitions,\n device_assignment=device_assignment,\n use_spmd_partitioning=use_spmd_partitioning,\n )\n compile_options.parameter_is_tupled_arguments = tuple_args(shards)\n\n local_arg_parts_ = parts.local_arg_parts or [None] * len(pci.avals)\n input_sharding_specs = [\n _pmap_sharding_spec(replicas.num_local_replicas, pci.axis_size,\n parts.local_num_partitions, arg_parts, aval, in_axis)\n if aval is not core.abstract_unit else None\n for aval, arg_parts, in_axis in safe_zip(\n shards.sharded_avals, local_arg_parts_, pci.in_axes)]\n input_indices = [spec_to_indices(aval.shape, spec)\n if spec is not None else None\n for aval, spec in safe_zip(pci.avals, input_sharding_specs)]\n nouts = len(shards.out_sharded_avals)\n\n out_parts, local_out_parts = parts.out_parts, parts.local_out_parts\n if parts.out_parts is None:\n out_parts = (None,) * nouts\n if parts.local_out_parts is None:\n local_out_parts = (None,) * nouts\n\n local_out_avals = [\n get_local_aval(aval, parts, lparts)\n for aval, parts, lparts\n in safe_zip(shards.out_sharded_avals, out_parts, local_out_parts)]\n local_unmapped_avals = [\n core.unmapped_aval(pci.axis_size, pci.axis_name, out_axis, aval)\n if out_axis is not None else aval\n for aval, out_axis in safe_zip(local_out_avals, pci.out_axes)]\n\n out_specs = [\n _pmap_sharding_spec(replicas.num_local_replicas, pci.axis_size,\n parts.local_num_partitions, out_parts, aval, out_axis)\n if aval is not core.abstract_unit else None\n for out_parts, aval, out_axis in safe_zip(\n local_out_parts, local_out_avals, pci.out_axes)]\n handle_outs = avals_to_results_handler(\n replicas.num_local_replicas, parts.local_num_partitions, out_specs,\n local_unmapped_avals)\n\n if hasattr(pci.backend, \"compile_replicated\"):\n execute_fun = pci.backend.compile_replicated(\n xla_computation, compile_options, input_indices, input_sharding_specs,\n handle_outs)\n # TODO(frostig): need `compile_replicated` to give us the XLA executable\n return PmapExecutable(None, execute_fun, None, pci.avals)\n\n compiled = dispatch.compile_or_get_cached(\n pci.backend, xla_computation, compile_options)\n handle_args = InputsHandler(\n compiled.local_devices(), input_sharding_specs, input_indices)\n execute_fun = partial(\n execute_replicated, compiled, pci.backend, handle_args, handle_outs)\n fingerprint = getattr(compiled, \"fingerprint\", None)\n\n return PmapExecutable(compiled, execute_fun, fingerprint, pci.avals)\n\n def call(self, *args):\n # TODO(frostig): do we need to check sharding and sharded avals?\n arg_avals = map(xla.abstractify, args)\n dispatch.check_arg_avals_for_call(self.in_avals, arg_avals)\n return self.unsafe_call(*args)\n\n\nmulti_host_supported_collectives: Set[core.Primitive] = set()\n\n\ndef check_multihost_collective_allowlist(jaxpr):\n used_collectives = set(xla.jaxpr_collectives(jaxpr))\n if not used_collectives.issubset(multi_host_supported_collectives):\n bad_collectives = used_collectives - multi_host_supported_collectives\n msg = \"using collectives that aren't supported for multi-host: {}\"\n raise TypeError(msg.format(\", \".join(map(str, bad_collectives))))\n\n\nPartitionsOrReplicated = Optional[Tuple[int, ...]]\n\nclass PartitionInfo(NamedTuple):\n arg_parts: Optional[Tuple[PartitionsOrReplicated, ...]]\n out_parts: Optional[Tuple[PartitionsOrReplicated, ...]]\n num_partitions: int\n local_arg_parts: Optional[Tuple[PartitionsOrReplicated, ...]]\n local_out_parts: Optional[Tuple[PartitionsOrReplicated, ...]]\n local_num_partitions: Optional[int]\n\ndef _find_partitions(jaxpr):\n \"\"\"Returns (in_partitions, out_partitions, num_partitions, local_in_parts,\n local_out_parts, local_num_partitions).\n \"\"\"\n for eqn in jaxpr.eqns:\n if eqn.primitive.name == \"sharded_call\":\n if len(jaxpr.eqns) > 1:\n raise NotImplementedError(\n \"pmap of sharded_jit + non-sharded operations not yet implemented.\")\n num_partitions = reconcile_num_partitions(eqn.params[\"call_jaxpr\"],\n eqn.params[\"nparts\"])\n return (eqn.params[\"in_parts\"],\n eqn.params[\"out_parts_thunk\"](),\n num_partitions,\n eqn.params[\"local_in_parts\"],\n eqn.params[\"local_out_parts_thunk\"](),\n eqn.params[\"local_nparts\"])\n return None, None, 1, None, None, None\n\ndef find_partitions(jaxpr) -> PartitionInfo:\n (arg_parts, out_parts, num_partitions, local_arg_parts, local_out_parts,\n local_num_partitions) = _find_partitions(jaxpr)\n\n if local_num_partitions is None:\n local_num_partitions = num_partitions\n if local_arg_parts is None:\n local_arg_parts = arg_parts\n if local_out_parts is None:\n local_out_parts = out_parts\n\n return PartitionInfo(arg_parts, out_parts, num_partitions,\n local_arg_parts, local_out_parts, local_num_partitions)\n\n\ndef reconcile_num_partitions(jaxpr, outer_num_parts: Optional[int]):\n \"\"\"Returns the total number of partitions to use.\n\n Validates that any inner partitioning matches outer_num_parts if provided, and\n returns the number of partitions to use based on outer_num_parts and any inner\n partitioning.\n \"\"\"\n inner_num_parts = _inner_partitions(jaxpr, outer_num_parts)\n if outer_num_parts is None and inner_num_parts is None:\n # No partitions specified anywhere, everything is replicated.\n return 1\n if outer_num_parts is None:\n return inner_num_parts\n return outer_num_parts\n\n\ndef _inner_partitions(jaxpr, expected_num_parts: Optional[int]):\n \"\"\"Returns the total number of partitions from PartitionSpecs inside `jaxpr`.\n\n Also validates that this number matches `expected_num_parts` if provided.\n \"\"\"\n for eqn in jaxpr.eqns:\n if eqn.primitive.name in [\"sharding_constraint\", \"infeed\"]:\n parts = eqn.params[\"partitions\"]\n nparts = get_num_partitions(parts)\n if expected_num_parts is None:\n expected_num_parts = nparts\n elif nparts is not None and nparts != expected_num_parts:\n # TODO(skye): raise this error as we trace the jaxpr\n raise ValueError(\n f\"with_sharding_constraint with partitions={parts} \"\n f\"(total partitions: {nparts}) doesn't match expected number of \"\n f\"partitions: {expected_num_parts}. If these partitions look \"\n f\"right, check outer sharded_jit and/or other \"\n f\"with_sharding_constraint calls.\")\n else:\n for subjaxpr in core.jaxprs_in_params(eqn.params):\n expected_num_parts = _inner_partitions(subjaxpr, expected_num_parts)\n return expected_num_parts\n\n\ndef get_num_partitions(*partitions):\n partition_specs = tree_flatten(partitions)[0]\n if len(partition_specs) == 0:\n # Everything is specified as replicated (all Nones).\n return None\n num_partitions_set = {np.prod(spec) for spec in partition_specs}\n if len(num_partitions_set) > 1:\n raise ValueError(\n f\"All partition specs must use the same number of total partitions, \"\n f\"got {partitions}, with distinct number of partitions \"\n f\"{num_partitions_set} (the total number of partitions is the product \"\n f\"of a partition spec)\")\n assert len(num_partitions_set) == 1\n return num_partitions_set.pop()\n\n\ndef get_global_aval(local_aval, global_parts: PartitionsOrReplicated,\n local_parts: PartitionsOrReplicated):\n if local_aval is core.abstract_unit:\n return local_aval\n if global_parts is None:\n return local_aval\n assert local_parts is not None\n global_shape = [dim * _safe_div(ngparts, nlparts)\n for dim, ngparts, nlparts\n in safe_zip(local_aval.shape, global_parts, local_parts)]\n return local_aval.update(shape=global_shape)\n\n\ndef get_local_aval(global_aval, global_parts: PartitionsOrReplicated,\n local_parts: PartitionsOrReplicated):\n if global_aval is core.abstract_unit:\n return global_aval\n if global_parts is None:\n return global_aval\n assert local_parts is not None\n local_shape = [_safe_div(dim, _safe_div(ngparts, nlparts))\n for dim, ngparts, nlparts\n in safe_zip(global_aval.shape, global_parts, local_parts)]\n return global_aval.update(shape=local_shape)\n\n\ndef _safe_div(x, y):\n result, ragged = divmod(x, y)\n assert not ragged, f\"{x} % {y} != 0\"\n return result\n\n\nclass InputsHandler:\n __slots__ = (\"handler\", \"local_devices\", \"sharding_specs\", \"input_indices\")\n\n def __init__(self, local_devices, sharding_specs, input_indices):\n self.handler = partial(shard_args, local_devices, input_indices)\n self.local_devices = local_devices\n self.sharding_specs = sharding_specs\n self.input_indices = input_indices\n\n def __call__(self, input_buffers):\n return self.handler(input_buffers)\n\n\nclass ResultsHandler:\n __slots__ = (\"handlers\", \"out_specs\", \"out_indices\", \"unmapped_local_out_avals\")\n\n def __init__(self, handlers, out_specs, out_indices, unmapped_local_out_avals):\n self.out_specs = out_specs\n self.out_indices = out_indices\n self.handlers = handlers\n self.unmapped_local_out_avals = unmapped_local_out_avals\n\n def __call__(self, out_bufs):\n return [h(bufs) for h, bufs in safe_zip(self.handlers, out_bufs)]\n\n\ndef avals_to_results_handler(\n nrep,\n npart,\n out_specs,\n unmapped_local_out_avals,\n global_out_avals: Optional[Sequence[ShapedArray]] = None,\n out_axis_resources: Optional[Sequence[AxisResource]] = None,\n global_mesh=None):\n out_indices = [spec_to_indices(aval.shape, spec)\n if aval is not core.abstract_unit else None\n for aval, spec in safe_zip(unmapped_local_out_avals, out_specs)] # pytype: disable=attribute-error\n if global_out_avals and out_axis_resources and global_mesh:\n handlers = [\n aval_to_result_handler(spec, idcs, aval, global_aval, out_axis, global_mesh)\n for spec, idcs, aval, global_aval, out_axis in safe_zip(\n out_specs, out_indices, unmapped_local_out_avals,\n global_out_avals, out_axis_resources)\n ]\n else:\n handlers = [\n aval_to_result_handler(spec, idcs, aval)\n for spec, idcs, aval, in safe_zip(out_specs, out_indices,\n unmapped_local_out_avals)\n ]\n\n return ResultsHandler(handlers, out_specs, out_indices, unmapped_local_out_avals)\n\ndef replicate(val, axis_size, nrep, devices=None, backend=None, in_axis=0):\n \"\"\"Replicates ``val`` across multiple devices.\n\n Args:\n val: the value to be replicated.\n axis_size: the length of the output, i.e. the logical number of replicas to\n create. Usually equal to `nrep`, but in the case of nested pmaps, `nrep` may\n be a multiple of `axis_size`.\n nrep: the number of replicas to create. If ``devices`` is set, must be equal\n to ``len(devices)``.\n devices: the devices to replicate across. If None, ``nrep`` will be used to\n generate a default device assignment.\n backend: string specifying which backend to use.\n in_axis: axis along which the value is to be replciated.\n\n Returns:\n A ShardedDeviceArray of length `axis_size` where each shard is equal to\n ``val``.\n \"\"\"\n device_count = (len(devices) if devices else xb.local_device_count(backend))\n if nrep > device_count:\n msg = (\"Cannot replicate across %d replicas because only %d local devices \"\n \"are available.\" % (nrep, device_count))\n if devices:\n msg += (\" (local devices = %s)\"\n % \", \".join(map(str, devices)) if devices else str(None))\n raise ValueError(msg)\n\n if devices is None:\n assert nrep is not None\n # TODO(skye): use different device assignment on multihost\n devices = xb.get_backend(backend).get_default_device_assignment(nrep)\n assert nrep == len(devices)\n\n aval = xla.abstractify(val) # type: ShapedArray\n if in_axis is not None:\n replicated_aval = aval.update(shape=(axis_size,) + aval.shape)\n else:\n replicated_aval = aval\n # TODO(skye): figure out how partitioning should work here\n sharding_spec = _pmap_sharding_spec(nrep, axis_size, 1, None, aval, in_axis)\n device_buffers = device_put(val, devices, replicate=True)\n return make_sharded_device_array(replicated_aval, sharding_spec,\n device_buffers)\n\n\ndef _pmap_sharding_spec(nrep, axis_size, npart, parts, sharded_aval,\n map_axis: Optional[int]) -> ShardingSpec:\n \"\"\"Sharding spec for arguments or results of a pmap.\n Args:\n nrep: number of local XLA replicas (product of local axis sizes)\n axis_size: local axis size for outer pmap\n npart: total number of XLA partitions (required by sharded_jit calls)\n parts: the partitioning of the value or None\n sharded_aval: the aval of the value inside the outer pmap, an instance of\n a ShapedArray.\n map_axis: the axis along which the value is mapped in the outer pmap\n Returns:\n A ShardingSpec.\n \"\"\"\n assert isinstance(sharded_aval, ShapedArray), sharded_aval\n replication_factor, ragged = divmod(nrep, axis_size)\n assert not ragged\n # get the sharding spec from inner sharded_jits as if we weren't in a pmap\n pspec = partitioned_sharding_spec(npart, parts, sharded_aval)\n maybe_replicate = () if replication_factor == 1 else (Replicated(replication_factor),)\n if map_axis is not None:\n sharded_in_axis = sum(not isinstance(s, NoSharding) for s in pspec.sharding[:map_axis])\n def shift_sharded_axis(a: MeshDimAssignment):\n if isinstance(a, ShardedAxis) and a.axis >= sharded_in_axis:\n return ShardedAxis(a.axis + 1)\n return a\n # replication_factor represents the product of inner pmaps, so it goes\n # after the outer pmapped axis at index 0\n return ShardingSpec(\n sharding=tuple_insert(pspec.sharding, map_axis, Unstacked(axis_size)),\n mesh_mapping=it.chain([ShardedAxis(sharded_in_axis)],\n maybe_replicate,\n map(shift_sharded_axis, pspec.mesh_mapping)))\n else:\n return ShardingSpec(\n sharding=pspec.sharding,\n mesh_mapping=(Replicated(axis_size),) + maybe_replicate + pspec.mesh_mapping)\n\ndef partitioned_sharding_spec(num_partitions: int,\n partitions: Optional[Sequence[int]],\n aval) -> ShardingSpec:\n if partitions is None:\n maybe_replicate = () if num_partitions == 1 else (Replicated(num_partitions),)\n return ShardingSpec(\n sharding=[_UNSHARDED_INSTANCE] * len(aval.shape),\n mesh_mapping=maybe_replicate)\n else:\n assert len(partitions) == len(aval.shape)\n return ShardingSpec(\n # Chunked expects a list of integers\n sharding=map(Chunked, [[x] for x in partitions]),\n mesh_mapping=map(ShardedAxis, range(len(partitions))))\n\n\ndef execute_replicated(compiled, backend, in_handler, out_handler, *args):\n input_bufs = in_handler(args)\n out_bufs = compiled.execute_sharded_on_local_devices(input_bufs)\n if dispatch.needs_check_special():\n for bufs in out_bufs:\n dispatch.check_special(\"parallel computation\", bufs)\n return out_handler(out_bufs)\n\n\nxla_pmap_p = core.MapPrimitive('xla_pmap')\nxla_pmap = xla_pmap_p.bind\nxla_pmap_p.def_impl(xla_pmap_impl)\n\n# Set param update handlers to update `donated_invars` just like xla_call_p\npe.call_param_updaters[xla_pmap_p] = pe.call_param_updaters[xla.xla_call_p]\nad.call_param_updaters[xla_pmap_p] = ad.call_param_updaters[xla.xla_call_p]\nad.call_transpose_param_updaters[xla_pmap_p] = \\\n ad.call_transpose_param_updaters[xla.xla_call_p]\n\ndef _pmap_translation_rule(c, axis_env,\n in_nodes, name_stack, axis_name, axis_size,\n global_axis_size, devices, name,\n call_jaxpr, *, backend=None, in_axes, out_axes,\n donated_invars, global_arg_shapes):\n del donated_invars # Unused.\n # We in-line here rather than generating a Call HLO as in the xla_call\n # translation rule just because the extra tuple stuff is a pain.\n if axis_env.names and devices is not None:\n raise ValueError(\"Nested pmap with explicit devices argument.\")\n if global_axis_size is None:\n global_axis_size = axis_size\n new_env = xla.extend_axis_env(axis_env, axis_name, global_axis_size)\n # Shard the in_nodes that are mapped\n in_avals = [v.aval for v in call_jaxpr.invars]\n in_nodes_sharded = (\n _xla_shard(c, aval, new_env, in_node, in_axis) if in_axis is not None else in_node\n for aval, in_node, in_axis in safe_zip(in_avals, in_nodes, in_axes))\n\n with maybe_extend_axis_env(axis_name, global_axis_size, None): # type: ignore\n ctx = xla.TranslationContext(\n c, backend, new_env,\n extend_name_stack(name_stack, wrap_name(name, 'pmap')))\n sharded_outs = xla.jaxpr_subcomp(ctx, call_jaxpr, (), *in_nodes_sharded)\n out_avals = [v.aval for v in call_jaxpr.outvars]\n outs = [_xla_unshard(c, aval, new_env, out_axis, shard, backend=backend)\n for aval, out_axis, shard in safe_zip(out_avals, out_axes, sharded_outs)]\n return xops.Tuple(c, outs)\n\nxla.call_translations[xla_pmap_p] = _pmap_translation_rule\nad.primitive_transposes[xla_pmap_p] = partial(ad.map_transpose, xla_pmap_p)\n\ndef _xla_shard(c, aval, axis_env, x, in_axis):\n if aval is core.abstract_unit:\n return x\n elif aval is core.abstract_token:\n return x\n elif isinstance(aval, ShapedArray):\n dims = list(c.get_shape(x).dimensions())\n zero = xops.Constant(c, np.zeros((), dtype=np.uint32))\n idxs = [zero] * (len(dims) - 1)\n idxs.insert(in_axis, _unravel_index(c, axis_env))\n dims_unsqueezed = dims.copy()\n dims_unsqueezed[in_axis] = 1\n dims_squeezed = dims.copy()\n dims_squeezed.pop(in_axis)\n return xops.Reshape(xops.DynamicSlice(x, idxs, dims_unsqueezed), dims_squeezed)\n else:\n raise TypeError((aval, c.get_shape(x)))\n\n# TODO(b/110096942): more efficient gather\ndef _xla_unshard(c, aval, axis_env, out_axis, x, backend):\n if aval is core.abstract_unit:\n return x\n elif aval is core.abstract_token:\n return x\n elif isinstance(aval, ShapedArray):\n # TODO(mattjj): remove this logic when AllReduce PRED supported on CPU / GPU\n convert_bool = (np.issubdtype(aval.dtype, np.bool_)\n and xb.get_backend(backend).platform in ('cpu', 'gpu'))\n if convert_bool:\n x = xops.ConvertElementType(\n x, xla.dtype_to_primitive_type(np.dtype(np.float32)))\n\n xla_shape = c.get_shape(x)\n dims = list(xla_shape.dimensions())\n padded = xops.Broadcast(\n xops.Constant(c, np.array(0, xla_shape.numpy_dtype())),\n [axis_env.sizes[-1]] + dims)\n zero = xops.Constant(c, np.zeros((), dtype=np.uint32))\n idxs = [_unravel_index(c, axis_env)] + [zero] * len(dims)\n padded = xops.DynamicUpdateSlice(padded, xops.Reshape(x, [1] + dims), idxs)\n replica_groups_protos = xc.make_replica_groups(\n xla.axis_groups(axis_env, axis_env.names[-1]))\n out = xops.CrossReplicaSum(padded, replica_groups_protos)\n if out_axis != 0:\n # TODO(apaszke,mattjj): Change the indices to DynamicUpdateSlice instead\n perm = list(range(1, len(dims)))\n perm.insert(out_axis, 0)\n out = xops.Transpose(out, perm)\n\n # TODO(mattjj): remove this logic when AllReduce PRED supported on CPU / GPU\n if convert_bool:\n nonzero = xops.Ne(out, xops.Constant(c, np.array(0, dtype=np.float32)))\n out = xops.ConvertElementType(\n nonzero, xla.dtype_to_primitive_type(np.dtype(np.bool_)))\n return out\n else:\n raise TypeError((aval, c.get_shape(x)))\n\ndef _unravel_index(c, axis_env):\n div = xops.Constant(c, np.array(axis_env.nreps // prod(axis_env.sizes),\n np.uint32))\n mod = xops.Constant(c, np.array(axis_env.sizes[-1], np.uint32))\n return xops.Rem(xops.Div(xops.ReplicaId(c), div), mod)\n\n# ------------------- xmap -------------------\n\nclass Mesh:\n\n def __init__(self, devices: np.ndarray, axis_names: Sequence[MeshAxisName]):\n assert devices.ndim == len(axis_names)\n # TODO: Make sure that devices are unique? At least with the quick and\n # dirty check that the array size is not larger than the number of\n # available devices?\n self.devices = devices.copy()\n self.devices.flags.writeable = False\n self.axis_names = tuple(axis_names)\n\n def __eq__(self, other):\n if not isinstance(other, Mesh):\n return False\n return (self.axis_names == other.axis_names and\n np.array_equal(self.devices, other.devices))\n\n def __hash__(self):\n if not hasattr(self, '_hash'):\n self._hash = hash((self.axis_names, tuple(self.devices.flat)))\n return self._hash\n\n def __setattr__(self, name, value):\n if hasattr(self, name):\n raise RuntimeError(\"Cannot reassign attributes of immutable mesh objects\")\n super().__setattr__(name, value)\n\n @property\n def shape(self):\n return OrderedDict((name, size) for name, size in safe_zip(self.axis_names, self.devices.shape))\n\n @property\n def size(self):\n return np.prod(list(self.shape.values()))\n\n @property\n def empty(self):\n return self.devices.ndim == 0\n\n @property\n def is_multi_process(self):\n return self.shape != self.local_mesh.shape\n\n @maybe_cached_property\n def local_mesh(self):\n if self.empty:\n return self\n process_index = xb.process_index()\n is_local_device = np.vectorize(\n lambda d: d.process_index == process_index, otypes=[bool])(self.devices)\n subcube_indices = []\n # We take the smallest slice of each dimension that doesn't skip any local device.\n for axis in range(self.devices.ndim):\n other_axes = tuple_delete(tuple(range(self.devices.ndim)), axis)\n # NOTE: This re-reduces over many axes multiple times, so we could definitely\n # optimize it, but I hope it won't be a bottleneck anytime soon.\n local_slices = is_local_device.any(other_axes, keepdims=False)\n nonzero_indices = np.flatnonzero(local_slices)\n start, end = int(np.min(nonzero_indices)), int(np.max(nonzero_indices))\n subcube_indices.append(slice(start, end + 1))\n subcube_indices = tuple(subcube_indices)\n # We only end up with all conditions being true if the local devices formed a\n # subcube of the full array. This is because we were biased towards taking a\n # \"hull\" spanned by the devices, and in case the local devices don't form a\n # subcube that hull will contain non-local devices.\n if not is_local_device[subcube_indices].all():\n raise ValueError(\"Devices connected to a single host must form a contiguous \"\n \"subcube of the global device mesh\")\n return Mesh(self.devices[subcube_indices], self.axis_names)\n\n @property\n def device_ids(self):\n assert not self.empty\n return np.vectorize(lambda d: d.id, otypes=[int])(self.devices)\n\n def __repr__(self):\n if self.empty:\n return \"Mesh([], ())\"\n return f\"Mesh({self.device_ids!r}, {self.axis_names!r})\"\n\n @maybe_cached_property\n def local_devices(self):\n process_index = xb.process_index()\n return [d for d in self.devices.flat if d.process_index == process_index]\n\n def local_to_global(self, axes: ArrayMapping, aval):\n return untile_aval_nd(self.shape, axes,\n tile_aval_nd(self.local_mesh.shape, axes, aval))\n\n def global_to_local(self, axes: ArrayMapping, aval):\n return untile_aval_nd(self.local_mesh.shape, axes,\n tile_aval_nd(self.shape, axes, aval))\n\n\ndef tile_aval_nd(axis_sizes, in_axes: ArrayMapping, aval, tiling_sizes=None):\n if tiling_sizes is None:\n tiling_sizes = axis_sizes\n if aval is core.abstract_unit:\n return aval\n assert isinstance(aval, ShapedArray)\n shape = list(aval.shape)\n named_shape = dict(aval.named_shape)\n for name, axis in in_axes.items():\n assert shape[axis] % tiling_sizes[name] == 0\n assert name not in named_shape\n named_shape[name] = axis_sizes[name]\n shape[axis] //= tiling_sizes[name]\n return aval.update(shape=tuple(shape), named_shape=named_shape)\n\ndef untile_aval_nd(axis_sizes, out_axes: ArrayMapping, aval):\n if aval is core.abstract_unit:\n return aval\n assert isinstance(aval, ShapedArray)\n shape = list(aval.shape)\n named_shape = dict(aval.named_shape)\n for name, axis in out_axes.items():\n shape[axis] *= axis_sizes[name]\n named_shape.pop(name, None) # The name might be missing --- it's a broadcast.\n return aval.update(shape=tuple(shape), named_shape=named_shape)\n\n\nclass SPMDBatchTrace(batching.BatchTrace):\n def get_axis_primitive_batcher(self, primitive, frame):\n if primitive in spmd_primitive_batchers:\n return partial(spmd_primitive_batchers[primitive],\n frame.size, frame.name, frame.main_trace.trace_type)\n return super().get_axis_primitive_batcher(primitive, frame)\n\n\nspmd_primitive_batchers: Dict[core.Primitive, Callable] = {}\n\n\ndef vtile_by_mesh(fun: lu.WrappedFun,\n mesh: Mesh,\n in_axes: Sequence[ArrayMapping],\n out_axes: Sequence[ArrayMapping]):\n # We vectorize in reversed order, because vmap is often biased towards\n # moving the batch axis to the front, and this way of stacking transforms\n # will order the batch axes according to the mesh axis order.\n # Not strictly necessary, but seems nicer than reversing it?\n for name, size in reversed(mesh.shape.items()):\n fun = batching.vtile(fun,\n tuple(a.get(name, None) for a in in_axes),\n tuple(a.get(name, None) for a in out_axes),\n tile_size=size,\n axis_name=name,\n main_type=SPMDBatchTrace)\n return fun\n\ndef lower_mesh_computation(\n fun: lu.WrappedFun,\n transformed_name: str,\n mesh: Mesh,\n in_axes: Sequence[ArrayMapping],\n out_axes: Union[Sequence[ArrayMapping], Callable[[], Sequence[ArrayMapping]]],\n donated_invars: Sequence[bool],\n spmd_lowering: bool,\n local_in_untiled_avals: Sequence[core.ShapedArray],\n tile_by_mesh_axes: bool):\n assert not mesh.empty\n backend = xb.get_device_backend(mesh.devices.flat[0])\n\n local_mesh = mesh.local_mesh\n global_axis_sizes = mesh.shape\n local_axis_sizes = local_mesh.shape\n\n log_priority = logging.WARNING if config.jax_log_compiles else logging.DEBUG\n logging.log(log_priority,\n \"Compiling %s (%d) for %s mesh with args %s. Argument mapping: \"\n \"%s.\",\n getattr(fun, '__name__', '<unnamed function>'), id(fun),\n tuple(global_axis_sizes.items()), local_in_untiled_avals,\n in_axes)\n\n # 1. Trace to jaxpr and preprocess/verify it\n # Note that we tile by the local axis sizes, but use global axis sizes for named_shape\n in_tiled_avals = [tile_aval_nd(global_axis_sizes, aval_in_axes, aval,\n tiling_sizes=local_axis_sizes)\n for aval, aval_in_axes in safe_zip(local_in_untiled_avals, in_axes)]\n if spmd_lowering:\n # TODO: Consider handling xmap's 'vectorize' in here. We can vmap once instead of vtile twice!\n if tile_by_mesh_axes:\n assert not callable(out_axes)\n fun = vtile_by_mesh(fun, mesh, in_axes, out_axes)\n global_in_untiled_avals = [untile_aval_nd(global_axis_sizes, aval_in_axes, aval)\n for aval, aval_in_axes in safe_zip(in_tiled_avals, in_axes)]\n in_jaxpr_avals = global_in_untiled_avals\n else:\n assert tile_by_mesh_axes\n in_jaxpr_avals = in_tiled_avals\n with core.extend_axis_env_nd(mesh.shape.items()):\n jaxpr, out_jaxpr_avals, consts = pe.trace_to_jaxpr_final(fun, in_jaxpr_avals)\n if callable(out_axes):\n out_axes = out_axes()\n assert len(out_axes) == len(out_jaxpr_avals)\n if spmd_lowering:\n global_out_untiled_avals = out_jaxpr_avals\n out_tiled_avals = [tile_aval_nd(global_axis_sizes, aval_out_axes, aval)\n for aval, aval_out_axes in safe_zip(global_out_untiled_avals, out_axes)]\n else:\n out_tiled_avals = out_jaxpr_avals\n local_out_untiled_avals = [untile_aval_nd(local_axis_sizes, aval_out_axes, aval)\n for aval, aval_out_axes in safe_zip(out_tiled_avals, out_axes)]\n _sanitize_mesh_jaxpr(jaxpr)\n if local_mesh.shape != mesh.shape:\n check_multihost_collective_allowlist(jaxpr)\n jaxpr = dispatch.apply_outfeed_rewriter(jaxpr)\n\n # 3. Build up the HLO\n c = xc.XlaBuilder(f\"xmap_{fun.__name__}\")\n xla_consts = map(partial(xla.pyval_to_ir_constant, c), consts)\n tuple_args = len(in_jaxpr_avals) > 100 # pass long arg lists as tuple for TPU\n in_partitions: Optional[List]\n if spmd_lowering:\n replicated_args = [False] * len(in_jaxpr_avals)\n global_sharding_spec = mesh_sharding_specs(global_axis_sizes, mesh.axis_names)\n in_partitions = [global_sharding_spec(aval, aval_in_axes).sharding_proto()\n if aval is not core.abstract_unit else None\n for aval, aval_in_axes in safe_zip(global_in_untiled_avals, in_axes)]\n out_partitions = [global_sharding_spec(aval, aval_out_axes).sharding_proto()\n for aval, aval_out_axes in safe_zip(global_out_untiled_avals, out_axes)]\n partitions_proto = True\n axis_env = xla.AxisEnv(nreps=1, names=(), sizes=()) # All named axes have been vmapped\n else:\n replicated_args = [not axis for axis in in_axes]\n in_partitions = None\n partitions_proto = False\n axis_env = xla.AxisEnv(nreps=mesh.size,\n names=tuple(global_axis_sizes.keys()),\n sizes=tuple(global_axis_sizes.values()))\n xla_args, donated_invars = xla._xla_callable_args(\n c, in_jaxpr_avals, tuple_args,\n replicated=replicated_args,\n partitions=in_partitions,\n partitions_proto=partitions_proto,\n donated_invars=donated_invars)\n with core.extend_axis_env_nd(mesh.shape.items()):\n ctx = xla.TranslationContext(\n c, backend.platform, axis_env,\n extend_name_stack(wrap_name(transformed_name, 'xmap')))\n out_nodes = xla.jaxpr_subcomp(ctx, jaxpr, xla_consts, *xla_args)\n if spmd_lowering:\n out_partitions_t = xb.tuple_sharding_proto(out_partitions)\n out_tuple = xb.with_sharding_proto(c, out_partitions_t, xops.Tuple, c, out_nodes)\n else:\n out_tuple = xops.Tuple(c, out_nodes)\n\n if backend.platform in (\"gpu\", \"tpu\"):\n xla.set_up_aliases(c, xla_args, c.GetShape(out_tuple), donated_invars,\n tuple_args)\n # TODO: Warn about unused donations?\n\n built = c.Build(out_tuple)\n return MeshComputation(\n built, donated_invars, mesh, local_in_untiled_avals,\n local_out_untiled_avals, (out_jaxpr_avals if spmd_lowering else None),\n in_axes, out_axes, spmd_lowering, tuple_args)\n\n\nclass MeshComputation:\n def __init__(self, hlo, donated_invars, *compile_args):\n self._executable = None\n self._hlo = hlo\n self._donated_invars = donated_invars\n self.compile_args = compile_args\n\n def hlo(self):\n # this is a method for api consistency with xla.XlaComputation\n return self._hlo\n\n def compile(self,\n _allow_propagation_to_outputs : bool = False,\n _allow_compile_replicated : bool = True) -> 'MeshExecutable':\n if self._executable is None:\n self._executable = MeshExecutable.from_hlo(\n self._hlo, *self.compile_args,\n _allow_propagation_to_outputs=_allow_propagation_to_outputs,\n _allow_compile_replicated=_allow_compile_replicated) # type: ignore\n return self._executable\n\n\nclass MeshExecutable:\n __slots__ = ['xla_executable', 'unsafe_call', '_local_in_untiled_avals']\n\n def __init__(self, xla_executable, unsafe_call, local_in_untiled_avals):\n self.xla_executable = xla_executable\n self.unsafe_call = unsafe_call\n self._local_in_untiled_avals = local_in_untiled_avals\n\n @staticmethod\n def from_hlo(computation: xc.XlaComputation,\n mesh: Mesh,\n local_in_untiled_avals: Sequence[ShapedArray],\n local_out_untiled_avals: Sequence[ShapedArray],\n global_out_avals: Optional[Sequence[ShapedArray]],\n in_axes: Sequence[ArrayMapping],\n out_axes: Sequence[ArrayMapping],\n spmd_lowering: bool, tuple_args: bool,\n _allow_propagation_to_outputs: bool,\n _allow_compile_replicated: bool):\n assert not mesh.empty\n backend = xb.get_device_backend(mesh.devices.flat[0])\n\n local_mesh = mesh.local_mesh\n local_axis_sizes = local_mesh.shape\n if spmd_lowering:\n num_replicas, num_partitions = 1, mesh.size\n num_local_replicas, num_local_partitions = 1, local_mesh.size\n else:\n num_replicas, num_partitions = mesh.size, 1\n num_local_replicas, num_local_partitions = local_mesh.size, 1\n device_assignment = mesh.device_ids.reshape((num_replicas, num_partitions))\n compile_options = xb.get_compile_options(\n num_replicas=num_replicas,\n num_partitions=num_partitions,\n device_assignment=device_assignment,\n use_spmd_partitioning=spmd_lowering,\n )\n compile_options.parameter_is_tupled_arguments = tuple_args\n compile_options.executable_build_options.allow_spmd_sharding_propagation_to_output = \\\n _allow_propagation_to_outputs\n\n local_sharding_spec = mesh_sharding_specs(local_axis_sizes, mesh.axis_names)\n local_input_specs = [local_sharding_spec(aval, aval_in_axes)\n if aval is not core.abstract_unit else None\n for aval, aval_in_axes in safe_zip(local_in_untiled_avals, in_axes)]\n input_indices = [spec_to_indices(aval.shape, spec)\n if spec is not None else None\n for aval, spec in safe_zip(local_in_untiled_avals, local_input_specs)]\n\n local_output_specs = [local_sharding_spec(aval, aval_out_axes)\n for aval, aval_out_axes in safe_zip(local_out_untiled_avals, out_axes)]\n out_axis_resources = [array_mapping_to_axis_resources(o) for o in out_axes]\n handle_outs = avals_to_results_handler(num_local_replicas, num_local_partitions,\n local_output_specs, local_out_untiled_avals,\n global_out_avals, out_axis_resources, mesh)\n\n if _allow_compile_replicated and hasattr(backend, \"compile_replicated\"):\n unsafe_call = backend.compile_replicated(\n computation, compile_options,\n input_indices, local_input_specs,\n handle_outs)\n xla_executable = None\n else:\n compiled = dispatch.compile_or_get_cached(backend, computation, compile_options)\n handle_args = InputsHandler(compiled.local_devices(), local_input_specs,\n input_indices)\n unsafe_call = partial(execute_replicated, compiled, backend, handle_args, handle_outs)\n xla_executable = compiled\n\n return MeshExecutable(xla_executable, unsafe_call, local_in_untiled_avals)\n\n def call(self, *args):\n arg_avals = map(xla.abstractify, args)\n ref_avals = self._local_in_untiled_avals\n dispatch.check_arg_avals_for_call(ref_avals, arg_avals)\n return self.unsafe_call(*args)\n\n\n_forbidden_primitives = {\n 'xla_pmap': 'pmap',\n 'sharded_call': 'sharded_jit',\n}\ndef _sanitize_mesh_jaxpr(jaxpr):\n if isinstance(jaxpr, core.ClosedJaxpr):\n jaxpr = jaxpr.jaxpr\n for eqn in jaxpr.eqns:\n if eqn.primitive.name in _forbidden_primitives:\n raise RuntimeError(f\"Nesting {_forbidden_primitives[eqn.primitive.name]} \"\n f\"inside xmaps not supported!\")\n core.traverse_jaxpr_params(_sanitize_mesh_jaxpr, eqn.params)\n\n\ncustom_resource_typing_rules: Dict[core.Primitive, Callable] = {}\n\ndef resource_typecheck(jaxpr, resource_env, axis_resources, what_jaxpr_thunk):\n if isinstance(jaxpr, core.ClosedJaxpr):\n jaxpr = jaxpr.jaxpr\n def _check_aval(aval, what_thunk):\n if not hasattr(aval, 'named_shape'):\n return\n resource_to_axis = {}\n for axis in aval.named_shape:\n for resource in axis_resources[axis]:\n if resource in resource_to_axis:\n other_axis = resource_to_axis[resource]\n axis, other_axis = sorted([str(axis), str(other_axis)])\n raise JAXTypeError(\n f\"Axes `{axis}` and `{other_axis}` are both mapped to the \"\n f\"resource `{resource}`, but they coincide in the named_shape \"\n f\"of {what_thunk()}\")\n resource_to_axis[resource] = axis\n\n what_thunk = lambda: (f\"an input to {what_jaxpr_thunk()}\")\n for v in jaxpr.constvars:\n _check_aval(v.aval, what_thunk)\n for v in jaxpr.invars:\n _check_aval(v.aval, what_thunk)\n what_thunk = lambda: (f\"a value returned from a primitive {eqn.primitive} created \"\n f\"at {source_info_util.summarize(eqn.source_info)}\")\n rec_what_jaxpr_thunk = lambda: (f\"a primitive {eqn.primitive} created at\"\n f\"{source_info_util.summarize(eqn.source_info)}\")\n for eqn in jaxpr.eqns:\n typing_rule = custom_resource_typing_rules.get(eqn.primitive, None)\n if typing_rule:\n typing_rule([v.aval for v in eqn.invars], eqn.params, eqn.source_info,\n resource_env, axis_resources)\n else:\n core.traverse_jaxpr_params(partial(resource_typecheck,\n resource_env=resource_env,\n axis_resources=axis_resources,\n what_jaxpr_thunk=rec_what_jaxpr_thunk),\n eqn.params)\n for v in eqn.outvars:\n _check_aval(v.aval, what_thunk)\n\n\ndef mesh_sharding_specs(axis_sizes, axis_names):\n mesh_axis_pos = {name: i for i, name in enumerate(axis_names)}\n # NOTE: This takes in the non-sharded avals!\n def mk_sharding_spec(aval, aval_axes):\n mesh_mapping = [Replicated(axis_size) for axis_size in axis_sizes.values()]\n if aval is core.abstract_token:\n assert not aval_axes\n return ShardingSpec([], mesh_mapping)\n sharding = [_UNSHARDED_INSTANCE] * len(aval.shape)\n next_sharded_axis = 0\n aval_shape = list(aval.shape)\n # NOTE: sorted is stable, which is important when multiple resources\n # map to the same axis.\n for name, axis in sorted(aval_axes.items(), key=lambda x: x[1]):\n assert aval_shape[axis] % axis_sizes[name] == 0, (axis_sizes[name], aval.shape[axis])\n aval_shape[axis] //= axis_sizes[name]\n if isinstance(sharding[axis], NoSharding):\n sharding[axis] = Chunked([])\n sharding[axis] = Chunked(sharding[axis].chunks + [axis_sizes[name]])\n assert isinstance(mesh_mapping[mesh_axis_pos[name]], Replicated), \\\n \"Value mapped to the same mesh axis twice\"\n mesh_mapping[mesh_axis_pos[name]] = ShardedAxis(next_sharded_axis)\n next_sharded_axis += 1\n return ShardingSpec(sharding, mesh_mapping)\n return mk_sharding_spec\n\n\n@contextmanager\ndef maybe_extend_axis_env(*args, **kwargs):\n with core.extend_axis_env(*args, **kwargs):\n yield\n\nclass DynamicAxisEnvFrame(object):\n __slots__ = [\"name\", \"pmap_trace\", \"hard_size\"]\n def __init__(self, name, pmap_trace, hard_size):\n self.name = name\n self.pmap_trace = pmap_trace\n self.hard_size = hard_size\n\nclass DynamicAxisEnv(list):\n def __contains__(self, axis_name):\n return axis_name in (frame.name for frame in self)\n\n def __getitem__(self, axis_name):\n if axis_name not in self:\n raise NameError(\"unbound axis name: {}\".format(axis_name))\n for frame in reversed(self):\n if frame.name == axis_name:\n return frame\n\n raise AssertionError\n\n @property\n def sizes(self):\n return tuple(frame.hard_size for frame in self)\n\n @property\n def nreps(self):\n return prod(frame.hard_size for frame in self)\n\nclass _ThreadLocalState(threading.local):\n def __init__(self):\n self.dynamic_axis_env = DynamicAxisEnv()\n\n_thread_local_state = _ThreadLocalState()\n\ndef device_put(x, devices: Sequence[xb.xla_client.Device], replicate: bool=False) -> List[xb.xla_client.Buffer]:\n \"\"\"Call device_put on a sequence of devices and return a flat sequence of buffers.\"\"\"\n if replicate:\n return list(it.chain.from_iterable(dispatch.device_put(x, device) for device in devices))\n else:\n return list(it.chain.from_iterable(dispatch.device_put(val, device) for val, device in safe_zip(x, devices)))\n" ]
[ [ "numpy.vectorize", "numpy.empty", "numpy.zeros", "numpy.dtype", "numpy.issubdtype", "numpy.asarray", "numpy.prod", "numpy.array_equal", "numpy.max", "numpy.min", "numpy.broadcast_to", "numpy.array", "numpy.flatnonzero" ] ]
enricovara/mne-python
[ "f6f2aa7a97c3ae7ae5276202805d2f45de7b64cc" ]
[ "mne/time_frequency/_stft.py" ]
[ "from math import ceil\nimport numpy as np\n\nfrom ..fixes import rfft, irfft, rfftfreq\nfrom ..utils import logger, verbose\n\n\n@verbose\ndef stft(x, wsize, tstep=None, verbose=None):\n \"\"\"STFT Short-Term Fourier Transform using a sine window.\n\n The transformation is designed to be a tight frame that can be\n perfectly inverted. It only returns the positive frequencies.\n\n Parameters\n ----------\n x : array, shape (n_signals, n_times)\n Containing multi-channels signal.\n wsize : int\n Length of the STFT window in samples (must be a multiple of 4).\n tstep : int\n Step between successive windows in samples (must be a multiple of 2,\n a divider of wsize and smaller than wsize/2) (default: wsize/2).\n %(verbose)s\n\n Returns\n -------\n X : array, shape (n_signals, wsize // 2 + 1, n_step)\n STFT coefficients for positive frequencies with\n ``n_step = ceil(T / tstep)``.\n\n See Also\n --------\n istft\n stftfreq\n \"\"\"\n if not np.isrealobj(x):\n raise ValueError(\"x is not a real valued array\")\n\n if x.ndim == 1:\n x = x[None, :]\n\n n_signals, T = x.shape\n wsize = int(wsize)\n\n # Errors and warnings\n if wsize % 4:\n raise ValueError('The window length must be a multiple of 4.')\n\n if tstep is None:\n tstep = wsize / 2\n\n tstep = int(tstep)\n\n if (wsize % tstep) or (tstep % 2):\n raise ValueError('The step size must be a multiple of 2 and a '\n 'divider of the window length.')\n\n if tstep > wsize / 2:\n raise ValueError('The step size must be smaller than half the '\n 'window length.')\n\n n_step = int(ceil(T / float(tstep)))\n n_freq = wsize // 2 + 1\n logger.info(\"Number of frequencies: %d\" % n_freq)\n logger.info(\"Number of time steps: %d\" % n_step)\n\n X = np.zeros((n_signals, n_freq, n_step), dtype=np.complex128)\n\n if n_signals == 0:\n return X\n\n # Defining sine window\n win = np.sin(np.arange(.5, wsize + .5) / wsize * np.pi)\n win2 = win ** 2\n\n swin = np.zeros((n_step - 1) * tstep + wsize)\n for t in range(n_step):\n swin[t * tstep:t * tstep + wsize] += win2\n swin = np.sqrt(wsize * swin)\n\n # Zero-padding and Pre-processing for edges\n xp = np.zeros((n_signals, wsize + (n_step - 1) * tstep),\n dtype=x.dtype)\n xp[:, (wsize - tstep) // 2: (wsize - tstep) // 2 + T] = x\n x = xp\n\n for t in range(n_step):\n # Framing\n wwin = win / swin[t * tstep: t * tstep + wsize]\n frame = x[:, t * tstep: t * tstep + wsize] * wwin[None, :]\n # FFT\n X[:, :, t] = rfft(frame)\n\n return X\n\n\ndef istft(X, tstep=None, Tx=None):\n \"\"\"ISTFT Inverse Short-Term Fourier Transform using a sine window.\n\n Parameters\n ----------\n X : array, shape (..., wsize / 2 + 1, n_step)\n The STFT coefficients for positive frequencies.\n tstep : int\n Step between successive windows in samples (must be a multiple of 2,\n a divider of wsize and smaller than wsize/2) (default: wsize/2).\n Tx : int\n Length of returned signal. If None Tx = n_step * tstep.\n\n Returns\n -------\n x : array, shape (Tx,)\n Array containing the inverse STFT signal.\n\n See Also\n --------\n stft\n \"\"\"\n # Errors and warnings\n X = np.asarray(X)\n if X.ndim < 2:\n raise ValueError(f'X must have ndim >= 2, got {X.ndim}')\n n_win, n_step = X.shape[-2:]\n signal_shape = X.shape[:-2]\n if n_win % 2 == 0:\n raise ValueError('The number of rows of the STFT matrix must be odd.')\n\n wsize = 2 * (n_win - 1)\n if tstep is None:\n tstep = wsize / 2\n\n if wsize % tstep:\n raise ValueError('The step size must be a divider of two times the '\n 'number of rows of the STFT matrix minus two.')\n\n if wsize % 2:\n raise ValueError('The step size must be a multiple of 2.')\n\n if tstep > wsize / 2:\n raise ValueError('The step size must be smaller than the number of '\n 'rows of the STFT matrix minus one.')\n\n if Tx is None:\n Tx = n_step * tstep\n\n T = n_step * tstep\n\n x = np.zeros(signal_shape + (T + wsize - tstep,), dtype=np.float64)\n\n if np.prod(signal_shape) == 0:\n return x[..., :Tx]\n\n # Defining sine window\n win = np.sin(np.arange(.5, wsize + .5) / wsize * np.pi)\n # win = win / norm(win);\n\n # Pre-processing for edges\n swin = np.zeros(T + wsize - tstep, dtype=np.float64)\n for t in range(n_step):\n swin[t * tstep:t * tstep + wsize] += win ** 2\n swin = np.sqrt(swin / wsize)\n\n for t in range(n_step):\n # IFFT\n frame = irfft(X[..., t], wsize)\n # Overlap-add\n frame *= win / swin[t * tstep:t * tstep + wsize]\n x[..., t * tstep: t * tstep + wsize] += frame\n\n # Truncation\n x = x[..., (wsize - tstep) // 2: (wsize - tstep) // 2 + T + 1]\n x = x[..., :Tx].copy()\n return x\n\n\ndef stftfreq(wsize, sfreq=None): # noqa: D401\n \"\"\"Compute frequencies of stft transformation.\n\n Parameters\n ----------\n wsize : int\n Size of stft window.\n sfreq : float\n Sampling frequency. If None the frequencies are given between 0 and pi\n otherwise it's given in Hz.\n\n Returns\n -------\n freqs : array\n The positive frequencies returned by stft.\n\n See Also\n --------\n stft\n istft\n \"\"\"\n freqs = rfftfreq(wsize)\n if sfreq is not None:\n freqs *= float(sfreq)\n return freqs\n\n\ndef stft_norm2(X):\n \"\"\"Compute L2 norm of STFT transform.\n\n It takes into account that stft only return positive frequencies.\n As we use tight frame this quantity is conserved by the stft.\n\n Parameters\n ----------\n X : 3D complex array\n The STFT transforms\n\n Returns\n -------\n norms2 : array\n The squared L2 norm of every row of X.\n \"\"\"\n X2 = (X * X.conj()).real\n # compute all L2 coefs and remove first and last frequency once.\n norms2 = (2. * X2.sum(axis=2).sum(axis=1) - np.sum(X2[:, 0, :], axis=1) -\n np.sum(X2[:, -1, :], axis=1))\n return norms2\n\n\ndef stft_norm1(X):\n \"\"\"Compute L1 norm of STFT transform.\n\n It takes into account that stft only return positive frequencies.\n\n Parameters\n ----------\n X : 3D complex array\n The STFT transforms\n\n Returns\n -------\n norms : array\n The L1 norm of every row of X.\n \"\"\"\n X_abs = np.abs(X)\n # compute all L1 coefs and remove first and last frequency once.\n norms = (2. * X_abs.sum(axis=(1, 2)) -\n np.sum(X_abs[:, 0, :], axis=1) - np.sum(X_abs[:, -1, :], axis=1))\n return norms\n" ]
[ [ "numpy.sum", "numpy.isrealobj", "numpy.zeros", "numpy.abs", "numpy.asarray", "numpy.arange", "numpy.prod", "numpy.sqrt" ] ]
scottyhq/xarray-sentinel
[ "3899a86e5bf5d56454e7467d9231bc97ebab8fe1" ]
[ "xarray_sentinel/sentinel1.py" ]
[ "\"\"\"Map Sentinel-1 data products to xarray.\n\nReferences:\n - Sentinel-1 document library: https://sentinels.copernicus.eu/web/sentinel/user-guides/sentinel-1-sar/document-library\n - Sentinel-1 Product Specification v3.9 07 May 2021 S1-RS-MDA-52-7441-3-9 documenting IPF 3.40\n https://sentinel.esa.int/documents/247904/1877131/S1-RS-MDA-52-7441-3-9-2_Sentinel-1ProductSpecification.pdf\n - Sentinel-1 Product Specification v3.7 27 February 2020 S1-RS-MDA-52-7441 documenting IPF 3.30\n https://sentinel.esa.int/documents/247904/1877131/Sentinel-1-Product-Specification\n\"\"\"\n\nimport contextlib\nimport os\nimport typing as T\nimport warnings\n\nimport fsspec\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\n\nfrom . import conventions, esa_safe\n\nSPEED_OF_LIGHT = 299_792_458 # m / s\nONE_SECOND = np.timedelta64(1, \"s\")\n\n\ndef get_fs_path(\n urlpath_or_path: esa_safe.PathType,\n fs: T.Optional[fsspec.AbstractFileSystem] = None,\n storage_options: T.Optional[T.Dict[str, T.Any]] = None,\n) -> T.Tuple[fsspec.AbstractFileSystem, str]:\n if fs is not None and storage_options is not None:\n raise TypeError(\"only one of 'fs' and 'storage_options' can be not None\")\n if fs is None:\n fs, _, paths = fsspec.get_fs_token_paths(\n urlpath_or_path, storage_options=storage_options\n )\n if len(paths) == 0:\n raise ValueError(f\"file or object not found {urlpath_or_path!r}\")\n elif len(paths) > 1:\n raise ValueError(f\"multiple files or objects found {urlpath_or_path!r}\")\n path = paths[0]\n else:\n path = str(urlpath_or_path)\n return fs, path\n\n\ndef normalise_group(group: T.Optional[str]) -> T.Tuple[str, T.Optional[int]]:\n if group is None:\n group = \"\"\n if group.startswith(\"/\"):\n group = group[1:]\n burst_index = None\n parent_group, _, last_name = group.rpartition(\"/\")\n if parent_group.count(\"/\") == 1 and last_name.isnumeric():\n burst_index = int(last_name)\n group = parent_group\n return group, burst_index\n\n\ndef open_calibration_dataset(calibration: esa_safe.PathType) -> xr.Dataset:\n calibration_vectors = esa_safe.parse_tag_as_list(\n calibration, \".//calibrationVector\", \"calibration\"\n )\n\n azimuth_time_list = []\n pixel_list = []\n line_list = []\n sigmaNought_list = []\n betaNought_list = []\n gamma_list = []\n dn_list = []\n for vector in calibration_vectors:\n azimuth_time_list.append(vector[\"azimuthTime\"])\n line_list.append(vector[\"line\"])\n pixel = np.fromstring(vector[\"pixel\"][\"$\"], dtype=int, sep=\" \") # type: ignore\n pixel_list.append(pixel)\n sigmaNought = np.fromstring(vector[\"sigmaNought\"][\"$\"], dtype=np.float32, sep=\" \") # type: ignore\n sigmaNought_list.append(sigmaNought)\n betaNought = np.fromstring(vector[\"betaNought\"][\"$\"], dtype=np.float32, sep=\" \") # type: ignore\n betaNought_list.append(betaNought)\n gamma = np.fromstring(vector[\"gamma\"][\"$\"], dtype=np.float32, sep=\" \") # type: ignore\n gamma_list.append(gamma)\n dn = np.fromstring(vector[\"dn\"][\"$\"], dtype=np.float32, sep=\" \") # type: ignore\n dn_list.append(dn)\n\n pixel = np.array(pixel_list)\n if not np.allclose(pixel, pixel[0]):\n raise ValueError(\n \"Unable to organise calibration vectors in a regular line-pixel grid\"\n )\n data_vars = {\n \"azimuth_time\": (\"line\", [np.datetime64(dt) for dt in azimuth_time_list]),\n \"sigmaNought\": ((\"line\", \"pixel\"), sigmaNought_list),\n \"betaNought\": ((\"line\", \"pixel\"), betaNought_list),\n \"gamma\": ((\"line\", \"pixel\"), gamma_list),\n \"dn\": ((\"line\", \"pixel\"), dn_list),\n }\n coords = {\"line\": line_list, \"pixel\": pixel_list[0]}\n\n return xr.Dataset(data_vars=data_vars, coords=coords)\n\n\ndef open_noise_range_dataset(noise: esa_safe.PathType) -> xr.Dataset:\n noise_vectors = esa_safe.parse_tag_as_list(noise, \".//noiseRangeVector\", \"noise\")\n\n azimuth_time_list = []\n pixel_list = []\n line_list = []\n noiseRangeLut_list = []\n for vector in noise_vectors:\n azimuth_time_list.append(vector[\"azimuthTime\"])\n line_list.append(vector[\"line\"])\n pixel = np.fromstring(vector[\"pixel\"][\"$\"], dtype=int, sep=\" \") # type: ignore\n pixel_list.append(pixel)\n noiseRangeLut = np.fromstring(vector[\"noiseRangeLut\"][\"$\"], dtype=np.float32, sep=\" \") # type: ignore\n noiseRangeLut_list.append(noiseRangeLut)\n\n pixel = np.array(pixel_list)\n if not np.allclose(pixel, pixel[0]):\n raise ValueError(\n \"Unable to organise noise vectors in a regular line-pixel grid\"\n )\n data_vars = {\n \"azimuth_time\": (\"line\", [np.datetime64(dt) for dt in azimuth_time_list]),\n \"noiseRangeLut\": ((\"line\", \"pixel\"), noiseRangeLut_list),\n }\n coords = {\"line\": line_list, \"pixel\": pixel_list[0]}\n\n return xr.Dataset(data_vars=data_vars, coords=coords)\n\n\ndef open_noise_azimuth_dataset(noise: esa_safe.PathType) -> xr.Dataset:\n noise_vectors = esa_safe.parse_tag_as_list(noise, \".//noiseAzimuthVector\", \"noise\")\n\n first_range_sample = []\n line_list = []\n noiseAzimuthLut_list = []\n for vector in noise_vectors:\n first_range_sample.append(vector[\"firstRangeSample\"])\n line = np.fromstring(vector[\"line\"][\"$\"], dtype=int, sep=\" \") # type: ignore\n line_list.append(line)\n noiseAzimuthLut = np.fromstring(vector[\"noiseAzimuthLut\"][\"$\"], dtype=np.float32, sep=\" \") # type: ignore\n noiseAzimuthLut_list.append(noiseAzimuthLut)\n\n # BROKEN: GRDs have line and noiseAzimuthLut of different size, we take the first one\n data_vars = {}\n coords = {}\n if first_range_sample:\n data_vars[\"noiseAzimuthLut\"] = (\"line\", noiseAzimuthLut_list[0])\n coords[\"line\"] = line_list[0]\n\n return xr.Dataset(data_vars=data_vars, coords=coords)\n\n\ndef open_coordinate_conversion_dataset(\n annotation_path: esa_safe.PathType,\n) -> xr.Dataset:\n coordinate_conversion = esa_safe.parse_tag_as_list(\n annotation_path, \".//coordinateConversionList/coordinateConversion\"\n )\n\n gr0 = []\n sr0 = []\n azimuth_time = []\n slant_range_time = []\n srgrCoefficients: T.List[T.List[float]] = []\n grsrCoefficients: T.List[T.List[float]] = []\n for values in coordinate_conversion:\n sr0.append(values[\"sr0\"])\n gr0.append(values[\"gr0\"])\n azimuth_time.append(values[\"azimuthTime\"])\n slant_range_time.append(values[\"slantRangeTime\"])\n srgrCoefficients.append(\n [float(v) for v in values[\"srgrCoefficients\"][\"$\"].split()]\n )\n grsrCoefficients.append(\n [float(v) for v in values[\"grsrCoefficients\"][\"$\"].split()]\n )\n\n coords: T.Dict[str, T.Any] = {}\n data_vars: T.Dict[str, T.Any] = {}\n if srgrCoefficients:\n coords[\"azimuth_time\"] = [np.datetime64(dt) for dt in azimuth_time]\n coords[\"degree\"] = list(range(len(srgrCoefficients[0])))\n\n data_vars[\"gr0\"] = (\"azimuth_time\", gr0)\n data_vars[\"sr0\"] = (\"azimuth_time\", sr0)\n data_vars[\"slant_range_time\"] = (\"azimuth_time\", slant_range_time)\n data_vars[\"srgrCoefficients\"] = ((\"azimuth_time\", \"degree\"), srgrCoefficients)\n data_vars[\"grsrCoefficients\"] = ((\"azimuth_time\", \"degree\"), grsrCoefficients)\n\n return xr.Dataset(data_vars=data_vars, coords=coords)\n\n\ndef open_gcp_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:\n geolocation_grid_points = esa_safe.parse_tag_as_list(\n annotation, \".//geolocationGridPoint\"\n )\n\n azimuth_time = []\n slant_range_time = []\n line_set = set()\n pixel_set = set()\n for ggp in geolocation_grid_points:\n if ggp[\"line\"] not in line_set:\n azimuth_time.append(np.datetime64(ggp[\"azimuthTime\"]))\n line_set.add(ggp[\"line\"])\n if ggp[\"pixel\"] not in pixel_set:\n slant_range_time.append(ggp[\"slantRangeTime\"])\n pixel_set.add(ggp[\"pixel\"])\n shape = (len(azimuth_time), len(slant_range_time))\n dims = (\"azimuth_time\", \"slant_range_time\")\n data_vars = {\n \"latitude\": (dims, np.full(shape, np.nan)),\n \"longitude\": (dims, np.full(shape, np.nan)),\n \"height\": (dims, np.full(shape, np.nan)),\n \"incidenceAngle\": (dims, np.full(shape, np.nan)),\n \"elevationAngle\": (dims, np.full(shape, np.nan)),\n }\n line = sorted(line_set)\n pixel = sorted(pixel_set)\n for ggp in geolocation_grid_points:\n for var in data_vars:\n j = line.index(ggp[\"line\"])\n i = pixel.index(ggp[\"pixel\"])\n data_vars[var][1][j, i] = ggp[var]\n\n ds = xr.Dataset(\n data_vars=data_vars,\n coords={\n \"azimuth_time\": [np.datetime64(dt) for dt in azimuth_time],\n \"slant_range_time\": slant_range_time,\n \"line\": (\"azimuth_time\", line),\n \"pixel\": (\"slant_range_time\", pixel),\n },\n )\n return ds\n\n\ndef open_attitude_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:\n attitudes = esa_safe.parse_tag_as_list(annotation, \".//attitude\")\n\n variables = [\"q0\", \"q1\", \"q2\", \"q3\", \"wx\", \"wy\", \"wz\", \"pitch\", \"roll\", \"yaw\"]\n azimuth_time: T.List[T.Any] = []\n data_vars: T.Dict[str, T.Any] = {var: (\"azimuth_time\", []) for var in variables}\n for attitude in attitudes:\n azimuth_time.append(attitude[\"time\"])\n for var in variables:\n data_vars[var][1].append(attitude[var])\n\n ds = xr.Dataset(\n data_vars=data_vars,\n coords={\"azimuth_time\": [np.datetime64(dt) for dt in azimuth_time]},\n )\n\n return ds\n\n\ndef open_orbit_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:\n orbits = esa_safe.parse_tag_as_list(annotation, \".//orbit\")\n\n reference_system = orbits[0][\"frame\"]\n variables = [\"position\", \"velocity\"]\n data: T.Dict[str, T.List[T.Any]] = {var: [[], [], []] for var in variables}\n azimuth_time: T.List[T.Any] = []\n for orbit in orbits:\n azimuth_time.append(orbit[\"time\"])\n data[\"position\"][0].append(orbit[\"position\"][\"x\"])\n data[\"position\"][1].append(orbit[\"position\"][\"y\"])\n data[\"position\"][2].append(orbit[\"position\"][\"z\"])\n data[\"velocity\"][0].append(orbit[\"velocity\"][\"x\"])\n data[\"velocity\"][1].append(orbit[\"velocity\"][\"y\"])\n data[\"velocity\"][2].append(orbit[\"velocity\"][\"z\"])\n if orbit[\"frame\"] != reference_system:\n warnings.warn(\n \"reference_system is not consistent in all the state vectors. \"\n )\n reference_system = None\n\n position = xr.Variable(data=data[\"position\"], dims=(\"axis\", \"azimuth_time\")) # type: ignore\n velocity = xr.Variable(data=data[\"velocity\"], dims=(\"axis\", \"azimuth_time\")) # type: ignore\n\n attrs = {}\n if reference_system is not None:\n attrs.update({\"reference_system\": reference_system})\n\n ds = xr.Dataset(\n data_vars={\"position\": position, \"velocity\": velocity},\n attrs=attrs,\n coords={\n \"azimuth_time\": [np.datetime64(dt) for dt in azimuth_time],\n \"axis\": [0, 1, 2],\n },\n )\n\n return ds\n\n\ndef open_dc_estimate_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:\n dc_estimates = esa_safe.parse_tag_as_list(annotation, \".//dcEstimate\")\n\n azimuth_time = []\n t0 = []\n data_dc_poly = []\n for dc_estimate in dc_estimates:\n azimuth_time.append(dc_estimate[\"azimuthTime\"])\n t0.append(dc_estimate[\"t0\"])\n data_dc_poly.append(\n [float(c) for c in dc_estimate[\"dataDcPolynomial\"][\"$\"].split()]\n )\n\n ds = xr.Dataset(\n data_vars={\n \"t0\": (\"azimuth_time\", t0),\n \"data_dc_polynomial\": ((\"azimuth_time\", \"degree\"), data_dc_poly),\n },\n coords={\n \"azimuth_time\": [np.datetime64(at) for at in azimuth_time],\n \"degree\": list(range(len(data_dc_poly[0]))),\n },\n )\n return ds\n\n\ndef open_azimuth_fm_rate_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:\n azimuth_fm_rates = esa_safe.parse_tag_as_list(annotation, \".//azimuthFmRate\")\n\n azimuth_time = []\n t0 = []\n azimuth_fm_rate_poly = []\n for azimuth_fm_rate in azimuth_fm_rates:\n azimuth_time.append(azimuth_fm_rate[\"azimuthTime\"])\n t0.append(azimuth_fm_rate[\"t0\"])\n azimuth_fm_rate_poly.append(\n [float(c) for c in azimuth_fm_rate[\"azimuthFmRatePolynomial\"][\"$\"].split()]\n )\n\n ds = xr.Dataset(\n data_vars={\n \"t0\": (\"azimuth_time\", t0),\n \"azimuth_fm_rate_polynomial\": (\n (\"azimuth_time\", \"degree\"),\n azimuth_fm_rate_poly,\n ),\n },\n coords={\n \"azimuth_time\": [np.datetime64(at) for at in azimuth_time],\n \"degree\": list(range(len(azimuth_fm_rate_poly[0]))),\n },\n )\n return ds\n\n\ndef find_available_groups(\n product_files: T.Dict[str, T.Tuple[str, str, str, str, str]],\n product_path: str,\n check_files_exist: bool = False,\n fs: fsspec.AbstractFileSystem = fsspec.filesystem(\"file\"),\n) -> T.Dict[str, T.List[str]]:\n groups: T.Dict[str, T.List[str]] = {}\n for path, (type, _, swath, polarization, _) in product_files.items():\n swath_pol_group = f\"{swath}/{polarization}\".upper()\n abspath = os.path.join(product_path, os.path.normpath(path))\n if check_files_exist:\n if not fs.exists(abspath):\n continue\n if type == \"s1Level1ProductSchema\":\n groups[swath.upper()] = [\"\"]\n groups[swath_pol_group] = [abspath] + groups.get(swath_pol_group, [])\n for metadata_group in [\n \"orbit\",\n \"attitude\",\n \"azimuth_fm_rate\",\n \"dc_estimate\",\n \"gcp\",\n \"coordinate_conversion\",\n ]:\n groups[f\"{swath_pol_group}/{metadata_group}\"] = [abspath]\n elif type == \"s1Level1CalibrationSchema\":\n groups[f\"{swath_pol_group}/calibration\"] = [abspath]\n elif type == \"s1Level1NoiseSchema\":\n groups[f\"{swath_pol_group}/noise_range\"] = [abspath]\n groups[f\"{swath_pol_group}/noise_azimuth\"] = [abspath]\n elif type == \"s1Level1MeasurementSchema\":\n groups[swath_pol_group] = [abspath] + groups.get(swath_pol_group, [])\n\n return groups\n\n\ndef open_pol_dataset(\n measurement: esa_safe.PathOrFileType,\n annotation: esa_safe.PathOrFileType,\n fs: T.Optional[fsspec.AbstractFileSystem] = None,\n) -> xr.Dataset:\n\n product_information = esa_safe.parse_tag(annotation, \".//productInformation\")\n image_information = esa_safe.parse_tag(annotation, \".//imageInformation\")\n swath_timing = esa_safe.parse_tag(annotation, \".//swathTiming\")\n incidence_angle_mid_swath = image_information[\"incidenceAngleMidSwath\"]\n\n number_of_samples = image_information[\"numberOfSamples\"]\n first_slant_range_time = image_information[\"slantRangeTime\"]\n slant_range_time_interval = 1 / product_information[\"rangeSamplingRate\"]\n\n number_of_lines = image_information[\"numberOfLines\"]\n first_azimuth_time = image_information[\"productFirstLineUtcTime\"]\n azimuth_time_interval = image_information[\"azimuthTimeInterval\"]\n number_of_bursts = swath_timing[\"burstList\"][\"@count\"]\n range_pixel_spaxing = image_information[\"rangePixelSpacing\"]\n anx_datetime = image_information[\"ascendingNodeTime\"]\n\n attrs = {\n \"sar:center_frequency\": product_information[\"radarFrequency\"] / 10 ** 9,\n \"sar:pixel_spacing_azimuth\": image_information[\"azimuthPixelSpacing\"],\n \"sar:pixel_spacing_range\": range_pixel_spaxing,\n \"azimuth_time_interval\": azimuth_time_interval,\n \"slant_range_time_interval\": slant_range_time_interval,\n \"incidence_angle_mid_swath\": incidence_angle_mid_swath,\n \"sat:anx_datetime\": anx_datetime + \"Z\",\n }\n encoding = {}\n swap_dims = {}\n chunks: T.Union[None, T.Dict[str, int]] = None\n\n azimuth_time = pd.date_range(\n start=first_azimuth_time,\n periods=number_of_lines,\n freq=pd.Timedelta(azimuth_time_interval, \"s\"),\n ).values\n if number_of_bursts == 0:\n swap_dims = {\"line\": \"azimuth_time\", \"pixel\": \"slant_range_time\"}\n else:\n lines_per_burst = swath_timing[\"linesPerBurst\"]\n attrs.update(\n {\n \"azimuth_steering_rate\": product_information[\"azimuthSteeringRate\"],\n \"number_of_bursts\": number_of_bursts,\n \"lines_per_burst\": lines_per_burst,\n }\n )\n for burst_index, burst in enumerate(swath_timing[\"burstList\"][\"burst\"]):\n first_azimuth_time_burst = burst[\"azimuthTime\"]\n azimuth_time_burst = pd.date_range(\n start=first_azimuth_time_burst,\n periods=lines_per_burst,\n freq=pd.Timedelta(azimuth_time_interval, \"s\"),\n )\n azimuth_time[\n lines_per_burst * burst_index : lines_per_burst * (burst_index + 1)\n ] = azimuth_time_burst\n\n # chunk at burst boundaries if dask is present\n try:\n import dask # noqa\n\n encoding[\"preferred_chunks\"] = {\"line\": lines_per_burst}\n chunks = {}\n except ModuleNotFoundError:\n pass\n\n coords = {\n \"pixel\": np.arange(0, number_of_samples, dtype=int),\n \"line\": np.arange(0, number_of_lines, dtype=int),\n \"azimuth_time\": (\"line\", azimuth_time),\n }\n\n if product_information[\"projection\"] == \"Slant Range\":\n slant_range_time = np.linspace(\n first_slant_range_time,\n first_slant_range_time\n + slant_range_time_interval * (number_of_samples - 1),\n number_of_samples,\n )\n coords[\"slant_range_time\"] = (\"pixel\", slant_range_time)\n elif product_information[\"projection\"] == \"Ground Range\":\n ground_range = np.linspace(\n 0,\n range_pixel_spaxing * (number_of_samples - 1),\n number_of_samples,\n )\n coords[\"ground_range\"] = (\"pixel\", ground_range)\n swap_dims = {\"line\": \"azimuth_time\", \"pixel\": \"ground_range\"}\n else:\n raise ValueError(f\"unknown projection {product_information['projection']}\")\n\n # temporary ugly work-around to get fsspec support with rasterio >= 1.3a3\n # the try block uses fsspec if rasterio >= 1.3a3 is installed\n # the except block falls back to standard file based rasterio\n # the with is needed to avoid polluting stderr when the try block fails\n with contextlib.redirect_stderr(open(\"/dev/null\", \"w\")):\n try:\n arr = xr.open_dataarray(fs.open(measurement), engine=\"rasterio\", chunks=chunks) # type: ignore\n except AttributeError:\n arr = xr.open_dataarray(measurement, engine=\"rasterio\") # type: ignore\n\n arr = arr.squeeze(\"band\").drop_vars([\"band\", \"spatial_ref\"])\n arr = arr.rename({\"y\": \"line\", \"x\": \"pixel\"})\n arr = arr.assign_coords(coords)\n arr = arr.swap_dims(swap_dims)\n\n arr.attrs.update(attrs)\n arr.encoding.update(encoding)\n\n return xr.Dataset(attrs=attrs, data_vars={\"measurement\": arr})\n\n\ndef find_bursts_index(\n pol_dataset: xr.Dataset,\n azimuth_anx_time: float,\n use_center: bool = False,\n) -> int:\n lines_per_burst = pol_dataset.attrs[\"lines_per_burst\"]\n anx_datetime = np.datetime64(pol_dataset.attrs[\"sat:anx_datetime\"].replace(\"Z\", \"\"))\n azimuth_anx_time = pd.Timedelta(azimuth_anx_time, unit=\"s\")\n if use_center:\n azimuth_anx_time_center = (\n pol_dataset.azimuth_time[lines_per_burst // 2 :: lines_per_burst]\n - anx_datetime\n )\n distance = abs(azimuth_anx_time_center - azimuth_anx_time)\n else:\n azimuth_anx_time_first_line = (\n pol_dataset.azimuth_time[::lines_per_burst] - anx_datetime\n )\n distance = abs(azimuth_anx_time_first_line - azimuth_anx_time)\n return distance.argmin().item() # type: ignore\n\n\ndef crop_burst_dataset(\n pol_dataset: xr.Dataset,\n burst_index: T.Optional[int] = None,\n azimuth_anx_time: T.Optional[float] = None,\n use_center: bool = False,\n) -> xr.Dataset:\n if (burst_index is not None) and (azimuth_anx_time is not None):\n raise TypeError(\n \"only one keyword between 'burst_index' and 'azimuth_anx_time' must be defined\"\n )\n\n if burst_index is None:\n if azimuth_anx_time is not None:\n burst_index = find_bursts_index(\n pol_dataset, azimuth_anx_time, use_center=use_center\n )\n else:\n raise TypeError(\n \"one keyword between 'burst_index' and 'azimuth_anx_time' must be defined\"\n )\n\n if burst_index < 0 or burst_index >= pol_dataset.attrs[\"number_of_bursts\"]:\n raise IndexError(f\"burst_index={burst_index} out of bounds\")\n\n lines_per_burst = pol_dataset.attrs[\"lines_per_burst\"]\n ds = pol_dataset.sel(\n line=slice(\n lines_per_burst * burst_index, lines_per_burst * (burst_index + 1) - 1\n )\n )\n\n anx_datetime = np.datetime64(pol_dataset.attrs[\"sat:anx_datetime\"].replace(\"Z\", \"\"))\n burst_azimuth_anx_times = ds.azimuth_time - anx_datetime\n ds.attrs[\"azimuth_anx_time\"] = burst_azimuth_anx_times.values[0] / ONE_SECOND\n ds = ds.swap_dims({\"line\": \"azimuth_time\", \"pixel\": \"slant_range_time\"})\n ds.attrs[\"burst_index\"] = burst_index\n\n return ds\n\n\ndef mosaic_slc_iw(slc_iw_image: xr.Dataset, crop: int = 90) -> xr.Dataset:\n bursts = []\n for i in range(slc_iw_image.attrs[\"number_of_bursts\"]):\n burst = crop_burst_dataset(slc_iw_image, burst_index=i)\n bursts.append(burst.isel(azimuth_time=slice(crop, -crop)))\n return xr.concat(bursts, dim=\"azimuth_time\")\n\n\ndef calibrate_amplitude(\n digital_number: xr.DataArray, calibration_lut: xr.DataArray\n) -> xr.DataArray:\n calibration = calibration_lut.interp(\n line=digital_number.line,\n pixel=digital_number.pixel,\n ).astype(np.float32)\n amplitude = digital_number / calibration\n amplitude.attrs.update(digital_number.attrs)\n try:\n lut_name = calibration_lut.attrs[\"long_name\"].partition(\"calibration LUT\")[0]\n amplitude.attrs[\"long_name\"] = f\"amplitude for {lut_name}\"\n amplitude.attrs[\"units\"] = calibration.attrs[\"units\"]\n except KeyError:\n pass\n return amplitude\n\n\ndef calibrate_intensity(\n digital_number: xr.DataArray,\n calibration_lut: xr.DataArray,\n as_db: bool = False,\n min_db: T.Optional[float] = -40.0,\n) -> xr.DataArray:\n amplitude = calibrate_amplitude(digital_number, calibration_lut)\n intensity = abs(amplitude) ** 2\n if as_db:\n intensity = 10.0 * np.log10(intensity)\n if min_db is not None:\n intensity = np.maximum(intensity, min_db)\n intensity.attrs.update(amplitude.attrs)\n intensity.attrs[\"units\"] = \"dB\"\n else:\n intensity.attrs.update(amplitude.attrs)\n intensity.attrs[\"units\"] = \"m2 m-2\"\n try:\n lut_name = amplitude.attrs[\"long_name\"].partition(\"amplitude for \")[2]\n intensity.attrs[\"long_name\"] = lut_name\n except KeyError:\n pass\n return intensity\n\n\ndef slant_range_time_to_ground_range(\n azimuth_time: xr.DataArray,\n slant_range_time: xr.DataArray,\n coordinate_conversion: xr.DataArray,\n) -> xr.DataArray:\n slant_range = SPEED_OF_LIGHT / 2.0 * slant_range_time\n cc = coordinate_conversion.interp(azimuth_time=azimuth_time)\n x = slant_range - cc.sr0\n ground_range = (cc.srgrCoefficients * x ** cc.degree).sum(\"degree\")\n return ground_range # type: ignore\n\n\ndef assign_slant_range_time_coord(\n measurement: xr.Dataset, coordinate_conversion: xr.Dataset\n) -> xr.Dataset:\n x = measurement.ground_range - coordinate_conversion.gr0\n slant_range = (\n coordinate_conversion.grsrCoefficients * x ** coordinate_conversion.degree\n ).sum(dim=\"degree\")\n slant_range_coord = slant_range.interp(\n azimuth_time=measurement.azimuth_time, ground_range=measurement.ground_range\n ).data\n slant_range_time = 2 / SPEED_OF_LIGHT * slant_range_coord\n measurement = measurement.assign_coords(\n slant_range_time=((\"azimuth_time\", \"ground_range\"), slant_range_time)\n ) # type: ignore\n return measurement\n\n\ndef build_burst_id(lat: float, lon: float, relative_orbit: int) -> str:\n lat = int(round(lat * 10))\n lon = int(round(lon * 10))\n\n n_or_s = \"N\" if lat >= 0 else \"S\"\n e_or_w = \"E\" if lon >= 0 else \"W\"\n burst_id = f\"R{relative_orbit:03}\" f\"-{n_or_s}{lat:03}\" f\"-{e_or_w}{lon:04}\"\n return burst_id\n\n\ndef compute_burst_centres(\n gcp: xr.Dataset,\n) -> T.Tuple[T.List[float], T.List[float]]:\n gcp_rolling = gcp.rolling(azimuth_time=2, min_periods=1)\n gc_az_win = gcp_rolling.construct(azimuth_time=\"az_win\")\n centre = gc_az_win.mean([\"az_win\", \"slant_range_time\"])\n centre = centre.isel(azimuth_time=slice(1, None))\n return centre.latitude.values.tolist(), centre.longitude.values.tolist()\n\n\nMETADATA_OPENERS = {\n \"orbit\": open_orbit_dataset,\n \"attitude\": open_attitude_dataset,\n \"azimuth_fm_rate\": open_azimuth_fm_rate_dataset,\n \"dc_estimate\": open_dc_estimate_dataset,\n \"gcp\": open_gcp_dataset,\n \"coordinate_conversion\": open_coordinate_conversion_dataset,\n \"calibration\": open_calibration_dataset,\n \"noise_range\": open_noise_range_dataset,\n \"noise_azimuth\": open_noise_azimuth_dataset,\n}\n\n\ndef do_override_product_files(\n template: str, product_files: T.Dict[str, T.Tuple[str, str, str, str, str]]\n) -> T.Dict[str, T.Tuple[str, str, str, str, str]]:\n overridden_product_files = {}\n for path, description in product_files.items():\n type, prefix, swath, polarization, date = description\n ext = os.path.splitext(path)[1]\n dirname = os.path.dirname(path)\n overridden_path = template.format(**locals())\n overridden_product_files[overridden_path] = description\n return overridden_product_files\n\n\ndef open_sentinel1_dataset(\n product_urlpath: esa_safe.PathType,\n *,\n drop_variables: T.Optional[T.Tuple[str]] = None,\n group: T.Optional[str] = None,\n fs: T.Optional[fsspec.AbstractFileSystem] = None,\n storage_options: T.Optional[T.Dict[str, T.Any]] = None,\n check_files_exist: bool = False,\n override_product_files: T.Optional[str] = None,\n) -> xr.Dataset:\n if drop_variables is not None:\n warnings.warn(\"'drop_variables' is currently ignored\")\n\n fs, manifest_path = get_fs_path(product_urlpath, fs, storage_options)\n if fs.isdir(manifest_path):\n manifest_path = os.path.join(manifest_path, \"manifest.safe\")\n product_path = os.path.dirname(manifest_path)\n\n with fs.open(manifest_path) as file:\n product_attrs, product_files = esa_safe.parse_manifest_sentinel1(file)\n\n if override_product_files:\n product_files = do_override_product_files(override_product_files, product_files)\n\n groups = find_available_groups(\n product_files, product_path, check_files_exist=check_files_exist, fs=fs\n )\n\n group, burst_index = normalise_group(group)\n absgroup = f\"/{group}\"\n if group != \"\" and group not in groups:\n raise ValueError(\n f\"Invalid group {group!r}, please select one of the following groups:\"\n f\"\\n{list(groups.keys())}\"\n )\n\n metadata = \"\"\n\n ds = xr.Dataset()\n if group == \"\":\n subgroups = list(groups)\n else:\n subgroups = [\n g[len(group) + 1 :] for g in groups if g.startswith(group) and g != group\n ]\n\n if group.count(\"/\") == 1:\n with fs.open(groups[group][1]) as annotation:\n ds = open_pol_dataset(groups[group][0], annotation, fs=fs)\n elif group.count(\"/\") == 2:\n _, _, metadata = group.split(\"/\", 2)\n with fs.open(groups[group][0]) as file:\n ds = METADATA_OPENERS[metadata](file)\n\n for data_var in ds.data_vars:\n ds.data_vars[data_var].attrs.update(product_attrs)\n\n product_attrs[\"group\"] = absgroup\n if len(subgroups):\n product_attrs[\"subgroups\"] = subgroups\n ds.attrs.update(product_attrs) # type: ignore\n\n if group.count(\"/\") == 1 and burst_index is not None:\n ds = crop_burst_dataset(ds, burst_index=burst_index)\n\n conventions.update_attributes(ds, group=metadata)\n\n return ds\n" ]
[ [ "numpy.allclose", "numpy.timedelta64", "numpy.maximum", "pandas.Timedelta", "numpy.arange", "numpy.log10", "numpy.datetime64", "numpy.array", "numpy.linspace", "numpy.full", "numpy.fromstring" ] ]
Christensen-Lab-Dartmouth/MethylCapsNet
[ "17b6b19809c5e1984de804eb34cc7494210f91a6" ]
[ "methylcapsnet/.ipynb_checkpoints/methylcaps_model_-checkpoint.py" ]
[ "import pandas as pd\nfrom pymethylprocess.MethylationDataTypes import MethylationArray\nfrom sklearn.metrics import mean_absolute_error, r2_score\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nfrom pybedtools import BedTool\nimport numpy as np\nfrom functools import reduce\nfrom torch.utils.data import Dataset, DataLoader\nimport torch\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\nimport os\nimport pysnooper\nimport argparse\nimport pickle\nfrom sklearn.metrics import classification_report\nimport click\nimport methylcapsnet\nfrom methylcapsnet.build_capsules import *\nfrom methylcapsnet.methylcaps_data_models import *\nimport sqlite3\nimport os\nimport glob\nimport dask\nfrom dask.diagnostics import ProgressBar\nfrom pathos.multiprocessing import Pool\nimport multiprocessing\nimport dask.bag as db\nfrom distributed import Client, LocalCluster, get_task_stream\nRANDOM_SEED=42\nnp.random.seed(RANDOM_SEED)\ntorch.manual_seed(RANDOM_SEED)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\[email protected]('train.log')\ndef model_capsnet_(train_methyl_array='train_val_test_sets/train_methyl_array.pkl',\n\t\t\t\t\tval_methyl_array='train_val_test_sets/val_methyl_array.pkl',\n\t\t\t\t\tinterest_col='disease',\n\t\t\t\t\tn_epochs=10,\n\t\t\t\t\tn_bins=0,\n\t\t\t\t\tbin_len=1000000,\n\t\t\t\t\tmin_capsule_len=300,\n\t\t\t\t\tprimary_caps_out_len=45,\n\t\t\t\t\tcaps_out_len=45,\n\t\t\t\t\thidden_topology='30,80,50',\n\t\t\t\t\tgamma=1e-2,\n\t\t\t\t\tdecoder_topology='100,300',\n\t\t\t\t\tlearning_rate=1e-2,\n\t\t\t\t\trouting_iterations=3,\n\t\t\t\t\toverlap=0.,\n\t\t\t\t\tcustom_loss='none',\n\t\t\t\t\tgamma2=1e-2,\n\t\t\t\t\tjob=0,\n\t\t\t\t\tcapsule_choice=['genomic_binned'],\n\t\t\t\t\tcustom_capsule_file='',\n\t\t\t\t\ttest_methyl_array='',\n\t\t\t\t\tpredict=False,\n\t\t\t\t\tbatch_size=16,\n\t\t\t\t\tlimited_capsule_names_file='',\n\t\t\t\t\tgsea_superset='',\n\t\t\t\t\ttissue='',\n\t\t\t\t\tnumber_sets=25,\n\t\t\t\t\tuse_set=False,\n\t\t\t\t\tgene_context=False,\n\t\t\t\t\tselect_subtypes=[],\n\t\t\t\t\tfit_spw=False,\n\t\t\t\t\tl1_l2='',\n\t\t\t\t\tcustom_capsule_file2='',\n\t\t\t\t\tmin_capsules=5):\n\n\tcapsule_choice=list(capsule_choice)\n\t#custom_capsule_file=list(custom_capsule_file)\n\thlt_list=filter(None,hidden_topology.split(','))\n\tif hlt_list:\n\t\thidden_topology=list(map(int,hlt_list))\n\telse:\n\t\thidden_topology=[]\n\thlt_list=filter(None,decoder_topology.split(','))\n\tif hlt_list:\n\t\tdecoder_topology=list(map(int,hlt_list))\n\telse:\n\t\tdecoder_topology=[]\n\n\thidden_caps_layers=[]\n\tinclude_last=False\n\n\tma=MethylationArray.from_pickle(train_methyl_array)\n\tma_v=MethylationArray.from_pickle(val_methyl_array)\n\tif test_methyl_array and predict:\n\t\tma_t=MethylationArray.from_pickle(test_methyl_array)\n\n\ttry:\n\t\tma.remove_na_samples(interest_col)\n\t\tma_v.remove_na_samples(interest_col)\n\t\tif test_methyl_array and predict:\n\t\t\tma_t.remove_na_samples(interest_col)\n\texcept:\n\t\tpass\n\n\tif select_subtypes:\n\t\tprint(ma.pheno[interest_col].unique())\n\t\tma.pheno=ma.pheno.loc[ma.pheno[interest_col].isin(select_subtypes)]\n\t\tma.beta=ma.beta.loc[ma.pheno.index]\n\t\tma_v.pheno=ma_v.pheno.loc[ma_v.pheno[interest_col].isin(select_subtypes)]\n\t\tma_v.beta=ma_v.beta.loc[ma_v.pheno.index]\n\t\tprint(ma.pheno[interest_col].unique())\n\n\t\tif test_methyl_array and predict:\n\t\t\tma_t.pheno=ma_t.pheno.loc[ma_t.pheno[interest_col].isin(select_subtypes)]\n\t\t\tma_t.beta=ma_t.beta.loc[ma_t.pheno.index]\n\n\tif custom_capsule_file2 and os.path.exists(custom_capsule_file2):\n\t\tcapsules_dict=torch.load(custom_capsule_file2)\n\t\tfinal_modules, modulecpgs, module_names=capsules_dict['final_modules'], capsules_dict['modulecpgs'], capsules_dict['module_names']\n\t\tif min_capsule_len>1:\n\t\t\tinclude_capsules=[len(x)>min_capsule_len for x in final_modules]\n\t\t\tfinal_modules=[final_modules[i] for i in range(len(final_modules)) if include_capsules[i]]\n\t\t\tmodule_names=[module_names[i] for i in range(len(module_names)) if include_capsules[i]]\n\t\t\tmodulecpgs=(reduce(np.union1d,final_modules)).tolist()\n\n\telse:\n\t\tfinal_modules, modulecpgs, module_names=build_capsules(capsule_choice,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\toverlap,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbin_len,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tma,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tinclude_last,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmin_capsule_len,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcustom_capsule_file,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tgsea_superset,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttissue,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tgene_context,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tuse_set,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnumber_sets,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlimited_capsule_names_file)\n\t\tif custom_capsule_file2:\n\t\t\ttorch.save(dict(final_modules=final_modules, modulecpgs=modulecpgs, module_names=module_names),custom_capsule_file2)\n\n\tassert len(final_modules) >= min_capsules , \"Below the number of allowed capsules.\"\n\n\tif fit_spw:\n\t\tmodulecpgs=list(reduce(lambda x,y:np.hstack((x,y)),final_modules))\n\n\tif not include_last: # ERROR HAPPENS HERE!\n\t\tma.beta=ma.beta.loc[:,modulecpgs]\n\t\tma_v.beta=ma_v.beta.loc[:,modulecpgs]\n\t\tif test_methyl_array and predict:\n\t\t\tma_t.beta=ma_t.beta.loc[:,modulecpgs]\n\t# https://github.com/higgsfield/Capsule-Network-Tutorial/blob/master/Capsule%20Network.ipynb\n\toriginal_interest_col=interest_col\n\tif n_bins:\n\t\tnew_interest_col=interest_col+'_binned'\n\t\tma.pheno.loc[:,new_interest_col],bins=pd.cut(ma.pheno[interest_col],bins=n_bins,retbins=True)\n\t\tma_v.pheno.loc[:,new_interest_col],_=pd.cut(ma_v.pheno[interest_col],bins=bins,retbins=True)\n\t\tif test_methyl_array and predict:\n\t\t\tma_t.pheno.loc[:,new_interest_col],_=pd.cut(ma_t.pheno[interest_col],bins=bins,retbins=True)\n\t\tinterest_col=new_interest_col\n\n\tdatasets=dict()\n\n\tdatasets['train']=MethylationDataset(ma,interest_col,modules=final_modules, module_names=module_names, original_interest_col=original_interest_col, run_spw=fit_spw)\n\tprint(datasets['train'].X.isnull().sum().sum())\n\tdatasets['val']=MethylationDataset(ma_v,interest_col,modules=final_modules, module_names=module_names, original_interest_col=original_interest_col, run_spw=fit_spw)\n\tif test_methyl_array and predict:\n\t\tdatasets['test']=MethylationDataset(ma_t,interest_col,modules=final_modules, module_names=module_names, original_interest_col=original_interest_col, run_spw=fit_spw)\n\n\tdataloaders=dict()\n\n\tdataloaders['train']=DataLoader(datasets['train'],batch_size=batch_size,shuffle=True,num_workers=8, pin_memory=True, drop_last=True)\n\tdataloaders['val']=DataLoader(datasets['val'],batch_size=batch_size,shuffle=False,num_workers=8, pin_memory=True, drop_last=False)\n\tn_primary=len(final_modules)\n\tif test_methyl_array and predict:\n\t\tdataloaders['test']=DataLoader(datasets['test'],batch_size=batch_size,shuffle=False,num_workers=8, pin_memory=True, drop_last=False)\n\n\tn_inputs=list(map(len,final_modules))\n\n\tn_out_caps=len(datasets['train'].y_unique)\n\n\tif not fit_spw:\n\t\tprint(\"Not fitting MethylSPWNet\")\n\t\tprimary_caps = PrimaryCaps(modules=final_modules,hidden_topology=hidden_topology,n_output=primary_caps_out_len)\n\t\thidden_caps = []\n\t\toutput_caps = CapsLayer(n_out_caps,n_primary,primary_caps_out_len,caps_out_len,routing_iterations=routing_iterations)\n\t\tdecoder = Decoder(n_out_caps*caps_out_len,len(list(ma.beta)),decoder_topology)\n\t\tmodel = CapsNet(primary_caps, hidden_caps, output_caps, decoder, gamma=gamma)\n\n\t\tif test_methyl_array and predict:\n\t\t\tmodel.load_state_dict(torch.load('capsnet_model.pkl'))\n\n\n\telse:\n\t\tprint(\"Fitting MethylSPWNet\")\n\t\tmodule_lens=[len(x) for x in final_modules]\n\t\tmodel=MethylSPWNet(module_lens, hidden_topology, dropout_p=0.2, n_output=n_out_caps)\n\t\tif test_methyl_array and predict:\n\t\t\tmodel.load_state_dict(torch.load('spwnet_model.pkl'))\n\n\tif torch.cuda.is_available():\n\t\tmodel=model.cuda()\n\n\n\t# extract all c_ij for all layers across all batches, or just last batch\n\n\tif l1_l2 and fit_spw:\n\t\tl1,l2=list(map(float,l1_l2.split(',')))\n\telif fit_spw:\n\t\tl1,l2=0.,0.\n\n\ttrainer=Trainer(model=model,\n\t\t\t\t\tvalidation_dataloader=dataloaders['val'],\n\t\t\t\t\tn_epochs=n_epochs,\n\t\t\t\t\tlr=learning_rate,\n\t\t\t\t\tn_primary=n_primary,\n\t\t\t\t\tcustom_loss=custom_loss,\n\t\t\t\t\tgamma2=gamma2,\n\t\t\t\t\tspw_mode=fit_spw,\n\t\t\t\t\tl1=l1 if fit_spw else 0.,\n\t\t\t\t\tl2=l2 if fit_spw else 0.)\n\n\tif not predict:\n\t\ttry:\n\t\t\t#assert 1==2\n\t\t\ttrainer.fit(dataloader=dataloaders['train'])\n\t\t\tval_loss=min(trainer.val_losses)\n\t\t\ttorch.save(trainer.model.state_dict(),'capsnet_model.pkl' if not fit_spw else 'spwnet_model.pkl')\n\t\t\tif fit_spw:\n\t\t\t\ttorch.save(dict(final_modules=final_modules, modulecpgs=modulecpgs, module_names=module_names), 'spwnet_capsules.pkl')\n\t\t\t\ttorch.save(dict(module_names=module_names,module_lens=module_lens,dropout_p=0.2,hidden_topology=hidden_topology,n_output=n_out_caps),'spwnet_config.pkl')\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\tval_loss=-2\n\n\t\twith sqlite3.connect('jobs.db', check_same_thread=False) as conn:\n\t\t\tpd.DataFrame([job,val_loss],index=['job','val_loss'],columns=[0]).T.to_sql('val_loss',conn,if_exists='append')\n\telse:\n\t\tif test_methyl_array:\n\t\t\ttrainer.weights=1.\n\t\t\tY=trainer.predict(dataloaders['test'])\n\t\t\tpickle.dump(Y,open('predictions.pkl','wb'))\n\t\t\tval_loss=-1\n\t#print(val_loss)\n\t# print([min(trainer.val_losses),n_epochs,\n\t# \t\tn_bins,\n\t# \t\tbin_len,\n\t# \t\tmin_capsule_len,\n\t# \t\tprimary_caps_out_len,\n\t# \t\tcaps_out_len,\n\t# \t\thidden_topology,\n\t# \t\tgamma,\n\t# \t\tdecoder_topology,\n\t# \t\tlearning_rate,\n\t# \t\trouting_iterations])\n\n\n\n\treturn val_loss\n" ]
[ [ "torch.utils.data.DataLoader", "torch.load", "torch.manual_seed", "pandas.DataFrame", "numpy.random.seed", "numpy.hstack", "torch.cuda.is_available", "pandas.cut" ] ]
Jeremy-Tian/Data-Lake
[ "62d2aad31e924ffc536cca98001da7671a7a9fde" ]
[ "Sprint3 Creating Redshift Cluster.py" ]
[ "\nimport pandas as pd\nimport boto3\nimport json\n\n\n\nimport configparser\nconfig = configparser.ConfigParser()\nconfig.read_file(open('dwh.cfg'))\n\nKEY = config.get('AWS','KEY')\nSECRET = config.get('AWS','SECRET')\n\nDWH_CLUSTER_TYPE = config.get(\"DWH\",\"DWH_CLUSTER_TYPE\")\nDWH_NUM_NODES = config.get(\"DWH\",\"DWH_NUM_NODES\")\nDWH_NODE_TYPE = config.get(\"DWH\",\"DWH_NODE_TYPE\")\n\nDWH_CLUSTER_IDENTIFIER = config.get(\"DWH\",\"DWH_CLUSTER_IDENTIFIER\")\nDWH_DB = config.get(\"DWH\",\"DWH_DB\")\nDWH_DB_USER = config.get(\"DWH\",\"DWH_DB_USER\")\nDWH_DB_PASSWORD = config.get(\"DWH\",\"DWH_DB_PASSWORD\")\nDWH_PORT = config.get(\"DWH\",\"DWH_PORT\")\n\nDWH_IAM_ROLE_NAME = config.get(\"DWH\", \"DWH_IAM_ROLE_NAME\")\n\n(DWH_DB_USER, DWH_DB_PASSWORD, DWH_DB)\n\npd.DataFrame({\"Param\":\n [\"DWH_CLUSTER_TYPE\", \"DWH_NUM_NODES\", \"DWH_NODE_TYPE\", \"DWH_CLUSTER_IDENTIFIER\", \"DWH_DB\", \"DWH_DB_USER\", \"DWH_DB_PASSWORD\", \"DWH_PORT\", \"DWH_IAM_ROLE_NAME\"],\n \"Value\":\n [DWH_CLUSTER_TYPE, DWH_NUM_NODES, DWH_NODE_TYPE, DWH_CLUSTER_IDENTIFIER, DWH_DB, DWH_DB_USER, DWH_DB_PASSWORD, DWH_PORT, DWH_IAM_ROLE_NAME]\n })\n\n\n# # Create clients for IAM, EC2, S3 and Redshift\n\n# In[69]:\n\n\nimport boto3\n\nec2 = boto3.resource('ec2',\n region_name=\"us-west-2\",\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET\n )\n\ns3 = boto3.resource('s3',\n region_name=\"us-west-2\",\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET\n )\n\niam = boto3.client('iam',aws_access_key_id=KEY,\n aws_secret_access_key=SECRET,\n region_name='us-west-2'\n )\n\nredshift = boto3.client('redshift',\n region_name=\"us-west-2\",\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET\n )\n\n\n\nsampleDbBucket = s3.Bucket(\"awssampledbuswest2\")\nfor obj in sampleDbBucket.objects.filter(Prefix=\"ssbgz\"):\n print(obj)\n\n\n\nfrom botocore.exceptions import ClientError\n\n#1.1 Create the role, \ntry:\n print(\"1.1 Creating a new IAM Role\") \n dwhRole = iam.create_role(\n Path='/',\n RoleName=DWH_IAM_ROLE_NAME,\n Description = \"Allows Redshift clusters to call AWS services on your behalf.\",\n AssumeRolePolicyDocument=json.dumps(\n {'Statement': [{'Action': 'sts:AssumeRole',\n 'Effect': 'Allow',\n 'Principal': {'Service': 'redshift.amazonaws.com'}}],\n 'Version': '2012-10-17'})\n ) \nexcept Exception as e:\n print(e)\n \n \nprint(\"1.2 Attaching Policy\")\n\niam.attach_role_policy(RoleName=DWH_IAM_ROLE_NAME,\n PolicyArn=\"arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess\"\n )['ResponseMetadata']['HTTPStatusCode']\n\nprint(\"1.3 Get the IAM role ARN\")\nroleArn = iam.get_role(RoleName=DWH_IAM_ROLE_NAME)['Role']['Arn']\n\nprint(roleArn)\n\n\n# # STEP 2: Redshift Cluster\n# \n# - Create a RedShift Cluster\n\n\n# In[83]:\n\n\ntry:\n response = redshift.create_cluster( \n #HW\n ClusterType=DWH_CLUSTER_TYPE,\n NodeType=DWH_NODE_TYPE,\n NumberOfNodes=int(DWH_NUM_NODES),\n\n #Identifiers & Credentials\n DBName=DWH_DB,\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,\n MasterUsername=DWH_DB_USER,\n MasterUserPassword=DWH_DB_PASSWORD,\n \n #Roles (for s3 access)\n IamRoles=[roleArn] \n )\nexcept Exception as e:\n print(e)\n\n\n# ## 2.1 *Describe* the cluster to see its status\n\n\n\n\ndef prettyRedshiftProps(props):\n pd.set_option('display.max_colwidth', -1)\n keysToShow = [\"ClusterIdentifier\", \"NodeType\", \"ClusterStatus\", \"MasterUsername\", \"DBName\", \"Endpoint\", \"NumberOfNodes\", 'VpcId']\n x = [(k, v) for k,v in props.items() if k in keysToShow]\n return pd.DataFrame(data=x, columns=[\"Key\", \"Value\"])\n\nmyClusterProps = redshift.describe_clusters(ClusterIdentifier=DWH_CLUSTER_IDENTIFIER)['Clusters'][0]\nprettyRedshiftProps(myClusterProps)\n\n\n# 2.2 Take note of the cluster <font color='red'> endpoint and role ARN </font> </h2>\n\n\nDWH_ENDPOINT = myClusterProps['Endpoint']['Address']\nDWH_ROLE_ARN = myClusterProps['IamRoles'][0]['IamRoleArn']\nprint(\"DWH_ENDPOINT :: \", endpoint)\nprint(\"DWH_ROLE_ARN :: \", roleArn)\n\n\n# ## STEP 3: Open an incoming TCP port to access the cluster ednpoint\n\n# In[84]:\n\n\ntry:\n vpc = ec2.Vpc(id=myClusterProps['VpcId'])\n defaultSg = list(vpc.security_groups.all())[0]\n print(defaultSg)\n defaultSg.authorize_ingress(\n GroupName=defaultSg.group_name,\n CidrIp='0.0.0.0/0',\n IpProtocol='TCP',\n FromPort=int(DWH_PORT),\n ToPort=int(DWH_PORT)\n )\nexcept Exception as e:\n print(e)\n\n\n# # STEP 4: Make sure you can connect to the cluster\n\n\n\nget_ipython().run_line_magic('load_ext', 'sql')\n\n\n\n\nconn_string=\"postgresql://{}:{}@{}:{}/{}\".format(DWH_DB_USER, DWH_DB_PASSWORD, DWH_ENDPOINT, DWH_PORT,DWH_DB)\nprint(conn_string)\nget_ipython().run_line_magic('sql', '$conn_string')\n\n\n" ]
[ [ "pandas.DataFrame", "pandas.set_option" ] ]
parthpatwa/autokeras
[ "2b23d870e91afdd2bc12663ff6e00e9df9ef855c" ]
[ "autokeras/task.py" ]
[ "import pandas as pd\n\nfrom autokeras import auto_model\nfrom autokeras.hypermodel import head\nfrom autokeras.hypermodel import node\n\n\nclass SupervisedImagePipeline(auto_model.AutoModel):\n\n def __init__(self, outputs, **kwargs):\n super().__init__(inputs=node.ImageInput(),\n outputs=outputs,\n **kwargs)\n\n\nclass ImageClassifier(SupervisedImagePipeline):\n \"\"\"AutoKeras image classification class.\n\n # Arguments\n num_classes: Int. Defaults to None. If None, it will infer from the data.\n multi_label: Boolean. Defaults to False.\n loss: A Keras loss function. Defaults to use 'binary_crossentropy' or\n 'categorical_crossentropy' based on the number of classes.\n metrics: A list of Keras metrics. Defaults to use 'accuracy'.\n name: String. The name of the AutoModel. Defaults to 'image_classifier'.\n max_trials: Int. The maximum number of different Keras Models to try.\n The search may finish before reaching the max_trials. Defaults to 100.\n directory: String. The path to a directory for storing the search outputs.\n Defaults to None, which would create a folder with the name of the\n AutoModel in the current directory.\n objective: String. Name of model metric to minimize\n or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.\n seed: Int. Random seed.\n \"\"\"\n\n def __init__(self,\n num_classes=None,\n multi_label=False,\n loss=None,\n metrics=None,\n name='image_classifier',\n max_trials=100,\n directory=None,\n objective='val_loss',\n seed=None):\n super().__init__(\n outputs=head.ClassificationHead(num_classes=num_classes,\n multi_label=multi_label,\n loss=loss,\n metrics=metrics),\n max_trials=max_trials,\n directory=directory,\n name=name,\n objective=objective,\n seed=seed)\n\n\nclass ImageRegressor(SupervisedImagePipeline):\n \"\"\"AutoKeras image regression class.\n\n # Arguments\n output_dim: Int. The number of output dimensions. Defaults to None.\n If None, it will infer from the data.\n multi_label: Boolean. Defaults to False.\n loss: A Keras loss function. Defaults to use 'mean_squared_error'.\n metrics: A list of Keras metrics. Defaults to use 'mean_squared_error'.\n name: String. The name of the AutoModel. Defaults to 'image_regressor'.\n max_trials: Int. The maximum number of different Keras Models to try.\n The search may finish before reaching the max_trials. Defaults to 100.\n directory: String. The path to a directory for storing the search outputs.\n Defaults to None, which would create a folder with the name of the\n AutoModel in the current directory.\n objective: String. Name of model metric to minimize\n or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.\n seed: Int. Random seed.\n \"\"\"\n\n def __init__(self,\n output_dim=None,\n loss=None,\n metrics=None,\n name='image_regressor',\n max_trials=100,\n directory=None,\n objective='val_loss',\n seed=None):\n super().__init__(\n outputs=head.RegressionHead(output_dim=output_dim,\n loss=loss,\n metrics=metrics),\n max_trials=max_trials,\n directory=directory,\n name=name,\n objective=objective,\n seed=seed)\n\n\nclass SupervisedTextPipeline(auto_model.AutoModel):\n\n def __init__(self, outputs, **kwargs):\n super().__init__(inputs=node.TextInput(),\n outputs=outputs,\n **kwargs)\n\n\nclass TextClassifier(SupervisedTextPipeline):\n \"\"\"AutoKeras text classification class.\n\n # Arguments\n num_classes: Int. Defaults to None. If None, it will infer from the data.\n multi_label: Boolean. Defaults to False.\n loss: A Keras loss function. Defaults to use 'binary_crossentropy' or\n 'categorical_crossentropy' based on the number of classes.\n metrics: A list of Keras metrics. Defaults to use 'accuracy'.\n name: String. The name of the AutoModel. Defaults to 'text_classifier'.\n max_trials: Int. The maximum number of different Keras Models to try.\n The search may finish before reaching the max_trials. Defaults to 100.\n directory: String. The path to a directory for storing the search outputs.\n Defaults to None, which would create a folder with the name of the\n AutoModel in the current directory.\n objective: String. Name of model metric to minimize\n or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.\n seed: Int. Random seed.\n \"\"\"\n\n def __init__(self,\n num_classes=None,\n multi_label=False,\n loss=None,\n metrics=None,\n name='text_classifier',\n max_trials=100,\n directory=None,\n objective='val_loss',\n seed=None):\n super().__init__(\n outputs=head.ClassificationHead(num_classes=num_classes,\n multi_label=multi_label,\n loss=loss,\n metrics=metrics),\n max_trials=max_trials,\n directory=directory,\n name=name,\n objective=objective,\n seed=seed)\n\n\nclass TextRegressor(SupervisedTextPipeline):\n \"\"\"AutoKeras text regression class.\n\n # Arguments\n output_dim: Int. The number of output dimensions. Defaults to None.\n If None, it will infer from the data.\n multi_label: Boolean. Defaults to False.\n loss: A Keras loss function. Defaults to use 'mean_squared_error'.\n metrics: A list of Keras metrics. Defaults to use 'mean_squared_error'.\n name: String. The name of the AutoModel. Defaults to 'text_regressor'.\n max_trials: Int. The maximum number of different Keras Models to try.\n The search may finish before reaching the max_trials. Defaults to 100.\n directory: String. The path to a directory for storing the search outputs.\n Defaults to None, which would create a folder with the name of the\n AutoModel in the current directory.\n objective: String. Name of model metric to minimize\n or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.\n seed: Int. Random seed.\n \"\"\"\n\n def __init__(self,\n output_dim=None,\n loss=None,\n metrics=None,\n name='text_regressor',\n max_trials=100,\n directory=None,\n objective='val_loss',\n seed=None):\n super().__init__(\n outputs=head.RegressionHead(output_dim=output_dim,\n loss=loss,\n metrics=metrics),\n max_trials=max_trials,\n directory=directory,\n name=name,\n objective=objective,\n seed=seed)\n\n\nclass SupervisedStructuredDataPipeline(auto_model.AutoModel):\n\n def __init__(self, outputs, column_names, column_types, **kwargs):\n inputs = node.StructuredDataInput()\n inputs.column_types = column_types\n inputs.column_names = column_names\n if column_types:\n for column_type in column_types.values():\n if column_type not in ['categorical', 'numerical']:\n raise ValueError(\n 'Column_types should be either \"categorical\" '\n 'or \"numerical\", but got {name}'.format(name=column_type))\n if column_names and column_types:\n for column_name in column_types:\n if column_name not in column_names:\n raise ValueError('Column_names and column_types are '\n 'mismatched. Cannot find column name '\n '{name} in the data.'.format(name=column_name))\n super().__init__(inputs=inputs,\n outputs=outputs,\n **kwargs)\n self._target_col_name = None\n\n def _read_from_csv(self, x, y):\n df = pd.read_csv(x)\n target = df.pop(y).to_numpy()\n return df, target\n\n def fit(self,\n x=None,\n y=None,\n epochs=None,\n callbacks=None,\n validation_split=0,\n validation_data=None,\n **kwargs):\n \"\"\"Search for the best model and hyperparameters for the task.\n\n # Arguments\n x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.\n Training data x. If the data is from a csv file, it should be a\n string specifying the path of the csv file of the training data.\n y: String, numpy.ndarray, or tensorflow.Dataset. Training data y.\n If the data is from a csv file, it should be a string corresponding\n to the label column.\n epochs: Int. The number of epochs to train each model during the search.\n If unspecified, we would use epochs equal to 1000 and early stopping\n with patience equal to 30.\n callbacks: List of Keras callbacks to apply during training and\n validation.\n validation_split: Float between 0 and 1.\n Fraction of the training data to be used as validation data.\n The model will set apart this fraction of the training data,\n will not train on it, and will evaluate\n the loss and any model metrics\n on this data at the end of each epoch.\n The validation data is selected from the last samples\n in the `x` and `y` data provided, before shuffling. This argument is\n not supported when `x` is a dataset.\n The best model found would be fit on the entire dataset including the\n validation data.\n validation_data: Data on which to evaluate the loss and any model metrics\n at the end of each epoch. The model will not be trained on this data.\n `validation_data` will override `validation_split`. The type of the\n validation data should be the same as the training data.\n The best model found would be fit on the training dataset without the\n validation data.\n **kwargs: Any arguments supported by keras.Model.fit.\n \"\"\"\n # x is file path of training data\n if isinstance(x, str):\n self._target_column_name = y\n x, y = self._read_from_csv(x, y)\n if validation_data:\n x_val, y_val = validation_data\n if isinstance(x_val, str):\n validation_data = self._read_from_csv(x_val, y_val)\n\n super().fit(x=x,\n y=y,\n epochs=epochs,\n callbacks=callbacks,\n validation_split=validation_split,\n validation_data=validation_data,\n **kwargs)\n\n def predict(self, x, batch_size=32, **kwargs):\n \"\"\"Predict the output for a given testing data.\n\n # Arguments\n x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.\n Testing data x. If the data is from a csv file, it should be a\n string specifying the path of the csv file of the testing data.\n batch_size: Int. Defaults to 32.\n **kwargs: Any arguments supported by keras.Model.predict.\n\n # Returns\n A list of numpy.ndarray objects or a single numpy.ndarray.\n The predicted results.\n \"\"\"\n if isinstance(x, str):\n x = pd.read_csv(x)\n if self._target_col_name in x:\n x.pop(self._target_col_name)\n\n return super().predict(x=x,\n batch_size=batch_size,\n **kwargs)\n\n def evaluate(self, x, y=None, batch_size=32, **kwargs):\n \"\"\"Evaluate the best model for the given data.\n\n # Arguments\n x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.\n Testing data x. If the data is from a csv file, it should be a\n string specifying the path of the csv file of the testing data.\n y: String, numpy.ndarray, or tensorflow.Dataset. Testing data y.\n If the data is from a csv file, it should be a string corresponding\n to the label column.\n batch_size: Int. Defaults to 32.\n **kwargs: Any arguments supported by keras.Model.evaluate.\n\n # Returns\n Scalar test loss (if the model has a single output and no metrics) or\n list of scalars (if the model has multiple outputs and/or metrics).\n The attribute model.metrics_names will give you the display labels for\n the scalar outputs.\n \"\"\"\n if isinstance(x, str):\n x, y = self._read_from_csv(x, y)\n return super().evaluate(x=x,\n y=y,\n batch_size=batch_size,\n **kwargs)\n\n\nclass StructuredDataClassifier(SupervisedStructuredDataPipeline):\n \"\"\"AutoKeras structured data classification class.\n\n # Arguments\n column_names: A list of strings specifying the names of the columns. The\n length of the list should be equal to the number of columns of the data.\n Defaults to None. If None, it will obtained from the header of the csv\n file or the pandas.DataFrame.\n column_types: Dict. The keys are the column names. The values should either\n be 'numerical' or 'categorical', indicating the type of that column.\n Defaults to None. If not None, the column_names need to be specified.\n If None, it will be inferred from the data.\n num_classes: Int. Defaults to None. If None, it will infer from the data.\n multi_label: Boolean. Defaults to False.\n loss: A Keras loss function. Defaults to use 'binary_crossentropy' or\n 'categorical_crossentropy' based on the number of classes.\n metrics: A list of Keras metrics. Defaults to use 'accuracy'.\n name: String. The name of the AutoModel. Defaults to\n 'structured_data_classifier'.\n max_trials: Int. The maximum number of different Keras Models to try.\n The search may finish before reaching the max_trials. Defaults to 100.\n directory: String. The path to a directory for storing the search outputs.\n Defaults to None, which would create a folder with the name of the\n AutoModel in the current directory.\n objective: String. Name of model metric to minimize\n or maximize. Defaults to 'val_accuracy'.\n seed: Int. Random seed.\n \"\"\"\n\n def __init__(self,\n column_names=None,\n column_types=None,\n num_classes=None,\n multi_label=False,\n loss=None,\n metrics=None,\n name='structured_data_classifier',\n max_trials=100,\n directory=None,\n objective='val_accuracy',\n seed=None):\n super().__init__(\n outputs=head.ClassificationHead(num_classes=num_classes,\n multi_label=multi_label,\n loss=loss,\n metrics=metrics),\n column_names=column_names,\n column_types=column_types,\n max_trials=max_trials,\n directory=directory,\n name=name,\n objective=objective,\n seed=seed)\n\n def fit(self,\n x=None,\n y=None,\n epochs=None,\n callbacks=None,\n validation_split=0,\n validation_data=None,\n **kwargs):\n \"\"\"Search for the best model and hyperparameters for the task.\n\n # Arguments\n x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.\n Training data x. If the data is from a csv file, it should be a\n string specifying the path of the csv file of the training data.\n y: String, numpy.ndarray, or tensorflow.Dataset. Training data y.\n If the data is from a csv file, it should be a string corresponding\n to the label column.\n epochs: Int. The number of epochs to train each model during the search.\n If unspecified, we would use epochs equal to 1000 and early stopping\n with patience equal to 30.\n callbacks: List of Keras callbacks to apply during training and\n validation.\n validation_split: Float between 0 and 1.\n Fraction of the training data to be used as validation data.\n The model will set apart this fraction of the training data,\n will not train on it, and will evaluate\n the loss and any model metrics\n on this data at the end of each epoch.\n The validation data is selected from the last samples\n in the `x` and `y` data provided, before shuffling. This argument is\n not supported when `x` is a dataset.\n validation_data: Data on which to evaluate the loss and any model metrics\n at the end of each epoch. The model will not be trained on this data.\n `validation_data` will override `validation_split`. The type of the\n validation data should be the same as the training data.\n **kwargs: Any arguments supported by keras.Model.fit.\n \"\"\"\n super().fit(x=x,\n y=y,\n epochs=epochs,\n callbacks=callbacks,\n validation_split=validation_split,\n validation_data=validation_data,\n **kwargs)\n\n\nclass StructuredDataRegressor(SupervisedStructuredDataPipeline):\n \"\"\"AutoKeras structured data regression class.\n\n # Arguments\n column_names: A list of strings specifying the names of the columns. The\n length of the list should be equal to the number of columns of the data.\n Defaults to None. If None, it will obtained from the header of the csv\n file or the pandas.DataFrame.\n column_types: Dict. The keys are the column names. The values should either\n be 'numerical' or 'categorical', indicating the type of that column.\n Defaults to None. If not None, the column_names need to be specified.\n If None, it will be inferred from the data.\n loss: A Keras loss function. Defaults to use 'mean_squared_error'.\n metrics: A list of Keras metrics. Defaults to use 'mean_squared_error'.\n max_trials: Int. The maximum number of different Keras Models to try.\n The search may finish before reaching the max_trials. Defaults to 100.\n directory: String. The path to a directory for storing the search outputs.\n Defaults to None, which would create a folder with the name of the\n AutoModel in the current directory.\n objective: String. Name of model metric to minimize\n or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.\n seed: Int. Random seed.\n \"\"\"\n\n def __init__(self,\n column_names=None,\n column_types=None,\n output_dim=None,\n loss=None,\n metrics=None,\n name='structured_data_regressor',\n max_trials=100,\n directory=None,\n objective='val_loss',\n seed=None):\n super().__init__(\n outputs=head.RegressionHead(output_dim=output_dim,\n loss=loss,\n metrics=metrics),\n column_names=column_names,\n column_types=column_types,\n max_trials=max_trials,\n directory=directory,\n name=name,\n objective=objective,\n seed=seed)\n" ]
[ [ "pandas.read_csv" ] ]
bhbai/tensorflow
[ "d4b5c606fc9fbd1a20b5b113b4bc831f31d889a3", "d4b5c606fc9fbd1a20b5b113b4bc831f31d889a3", "d4b5c606fc9fbd1a20b5b113b4bc831f31d889a3" ]
[ "tensorflow/python/debug/wrappers/framework.py", "tensorflow/contrib/distributions/python/ops/inverse_gamma.py", "tensorflow/contrib/rnn/python/kernel_tests/core_rnn_cell_test.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Framework of debug wrapper sessions.\n\nA debug wrapper session is a wrapper around a TensorFlow Python Session.\nThe wrapper preserves the Session interface, most importantly the run() method,\nwhile providing abilities to:\na) Intercept a run() call to a wrapped session and insert debug tensor watches\n according to externally-specified debug URLs.\n\nb) Release control to an external (i.e., non-Session) object before and after\n the run() call, so that the external object can perform actions such as\n launching a UI to let users inspect the intermediate tensors and partition\n graphs from the run() call.\n\nc) (To be implemented) Intercept a run() call and give control to DebugStepper\n to let it perform stepping / continuing-to actions on the graph.\n\nb) (To be implemented in a future CL) Enter an instruction loop to let an\n external object (e.g., remote client) launch run() and cont() calls\n remotely.\n\n*** The lifetime of a debug wrapper session: ***\n\n1) The wrapper session is created by calling the constructor with a\n wrapped (normal) session as the argument:\n wrapper = FooDebugWrapperSession(sess)\n wherein FooDebugWrapperSession is a concrete subclass implementing the\n abstract BaseDebugWrapperSession class below.\n\n2) Near the end of the constructor call, the on_session_init() callback is\n invoked, with a OnSessionInitRequest object as the argument. The object\n carries the wrapped (normal) session object.\n\n3) The callback handles the request and returns a OnSessionInitResponse\n object with an action field, directing the wrapper session what to do next.\n\nIf the action field in the OnSessionInitResponse is PROCEED, the constuctor\nreturns. Control is released back to the caller of the constructor, which can\ninvoke run() method of wrapper session with the same syntax as a non-wrapped\nsession, e.g.,:\n wrapper.run(fetches, feed_dict=feeds, options=run_options)\n\nBelow, A1 - A2 is the lifetime of a wrapper run() call if the action is\nPROCEED:\n\nA1) Right at the start of each run() call, the on_run_start() callback is\n invoked, with an OnRunStartRequest object carrying information such as\n the fetches, the feed dict, the run options and run metadata used in\n this run call, along with a count of how many run calls has occurred\n on this wrapper session. The callback then returns an OnRunStartResponse\n object, of which the action field directs what the wrapper session\n actually will do of the run() call.\n\n If the action is DEBUG_RUN, a debugged (tensor-watched) run will ensue,\n with the debug URLs supplied in the debug_urls field of the response.\n These can be file:// or grpc:// URLs, for example.\n\n If the action is NON_DEBUG_RUN, a non-debug (normal) run will ensue.\n\n If the action is INVOKE_STEPPER, no run() call will be issued to the\n wrapped session. But instead, a DebugStepper (i.e., \"continuation\n debugger\") will be used to perform stepping / continue-to actions on\n the graph.\n\nTODO(cais): The event loop for the DebugStepper will request additional\n callbacks including on_cont_start() and on_cont_end(). Add those.\n\nA2) Right before the run() returns, the on_run_end() callback is invoked,\n with an OnRunEndRequest object as the argument, which carries information\n including the actual action performed in the warpper run() call and the\n run_metadata from the run() call.\n\nHowever, if the action field in OnSessionInitResponse is\nREMOTE_INSTR_LOOP, the constructor will automatically invoke an instruction loop\nthat gives the control to a remote caller.\n\nIn the remote instruction loop, the following steps will happen:\n\nB1) Callback on_instr_start() is invoked. The callback will return an\n OnInstrStartResponse object with an action field which can order one of\n the following actions:\n i) a run() call with fetches, feeds and debug_urls specified.\n ii) a DebugStepper cont() call with target specified.\n iii) value overrides in the cached tensors from the DebugStepper.\n iv) exit the instruction loop.\n\nB2) The wrapper session carries out the action specified above.\n\nB3) If still in the instruction loop, the wrapper session invokes the\n on_instr_end() callback. After the on_instr_end() callback returns, jump\n back to B1.\n\nTODO(cais): Implemented the instruction loop in B1 - B3.\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.debug import debug_utils\nfrom tensorflow.python.debug import stepper\nfrom tensorflow.python.framework import errors\n\n\n# Helper function.\ndef _check_type(obj, expected_type):\n \"\"\"Check if an object is of the expected type.\n\n Args:\n obj: The object being checked.\n expected_type: (type) The expected type of obj.\n\n Raises:\n TypeError: If obj is not an instance of expected_type.\n \"\"\"\n if not isinstance(obj, expected_type):\n raise TypeError(\"Expected type %s; got type %s\" %\n (expected_type, type(obj)))\n\n\nclass OnSessionInitRequest(object):\n \"\"\"Request to an on-session-init callback.\n\n This callback is invoked during the __init__ call to a debug-wrapper session.\n \"\"\"\n\n def __init__(self, sess):\n \"\"\"Constructor.\n\n Args:\n sess: A tensorflow Session object.\n \"\"\"\n\n _check_type(sess, session.BaseSession)\n self.session = sess\n\n\nclass OnSessionInitAction(object):\n \"\"\"Enum-like values for possible action to take on session init.\"\"\"\n\n # Proceed, without special actions, in the wrapper session initialization.\n # What action the wrapper session performs next is determined by the caller\n # of the wrapper session. E.g., it can call run().\n PROCEED = \"proceed\"\n\n # Instead of letting the caller of the wrapper session determine what actions\n # the wrapper session will perform next, enter a loop to receive instructions\n # from a remote client.\n # For example, TensorBoard visual debugger can use this action so that it can\n # launch session.run() calls remotely.\n REMOTE_INSTR_LOOP = \"remote_instr_loop\"\n\n\nclass OnSessionInitResponse(object):\n \"\"\"Response from an on-session-init callback.\"\"\"\n\n def __init__(self, action):\n \"\"\"Constructor.\n\n Args:\n action: (`OnSessionInitAction`) Debugger action to take on session init.\n \"\"\"\n _check_type(action, str)\n self.action = action\n\n\nclass OnRunStartRequest(object):\n \"\"\"Request to an on-run-start callback.\n\n This callback is invoked during a run() call of the debug-wrapper\n session, immediately after the run() call counter is incremented.\n \"\"\"\n\n def __init__(self, fetches, feed_dict, run_options, run_metadata,\n run_call_count):\n \"\"\"Constructor of `OnRunStartRequest`.\n\n Args:\n fetches: Fetch targets of the run() call.\n feed_dict: The feed dictionary to the run() call.\n run_options: RunOptions input to the run() call.\n run_metadata: RunMetadata input to the run() call.\n The above four arguments are identical to the input arguments to the\n run() method of a non-wrapped TensorFlow session.\n run_call_count: 1-based count of how many run calls (including this one)\n has been invoked.\n \"\"\"\n self.fetches = fetches\n self.feed_dict = feed_dict\n self.run_options = run_options\n self.run_metadata = run_metadata\n self.run_call_count = run_call_count\n\n\nclass OnRunStartAction(object):\n \"\"\"Enum-like values for possible action to take on start of a run() call.\"\"\"\n\n # Run once with debug tensor-watching.\n DEBUG_RUN = \"debug_run\"\n\n # Run without debug tensor-watching.\n NON_DEBUG_RUN = \"non_debug_run\"\n\n # Instead of running the fetches as a whole, as would normally happen, invoke\n # the (to-be-implemented) debug stepper.\n # TODO(cais): Remove \"to-be-implemented\".\n INVOKE_STEPPER = \"invoke_stepper\"\n\n\nclass OnRunStartResponse(object):\n \"\"\"Request from an on-run-start callback.\n\n The caller of the callback can use this response object to specify what\n action the debug-wrapper session actually takes on the run() call.\n \"\"\"\n\n def __init__(self, action, debug_urls):\n \"\"\"Constructor of `OnRunStartResponse`.\n\n Args:\n action: (`OnRunStartAction`) the action actually taken by the wrapped\n session for the run() call.\n debug_urls: (list of str) debug_urls used in watching the tensors during\n the run() call.\n \"\"\"\n\n _check_type(action, str)\n self.action = action\n\n _check_type(debug_urls, list)\n self.debug_urls = debug_urls\n\n\nclass OnRunEndRequest(object):\n \"\"\"Request to an on-run-end callback.\n\n The callback is invoked immediately before the wrapped run() call ends.\n \"\"\"\n\n def __init__(self,\n performed_action,\n run_metadata=None,\n client_graph_def=None,\n tf_error=None):\n \"\"\"Constructor for `OnRunEndRequest`.\n\n Args:\n performed_action: (`OnRunStartAction`) Actually-performed action by the\n debug-wrapper session.\n run_metadata: run_metadata output from the run() call (if any).\n client_graph_def: (GraphDef) GraphDef from the client side, i.e., from\n the python front end of TensorFlow. Can be obtained with\n session.graph.as_graph_def().\n tf_error: (errors.OpError subtypes) TensorFlow OpError that occurred\n during the run (if any).\n \"\"\"\n\n _check_type(performed_action, str)\n self.performed_action = performed_action\n\n if run_metadata is not None:\n _check_type(run_metadata, config_pb2.RunMetadata)\n self.run_metadata = run_metadata\n self.client_graph_def = client_graph_def\n self.tf_error = tf_error\n\n\nclass OnRunEndResponse(object):\n \"\"\"Response from an on-run-end callback.\"\"\"\n\n def __init__(self):\n\n # Currently only a placeholder.\n pass\n\n\nclass BaseDebugWrapperSession(session.SessionInterface):\n \"\"\"Base class of debug-wrapper session classes.\n\n Concrete classes that inherit from this class need to implement the abstract\n methods such as on_session_init, on_run_start and on_run_end.\n \"\"\"\n\n # TODO(cais): Add on_cont_start and on_cont_end callbacks once the stepper is\n # is available.\n\n def __init__(self, sess):\n \"\"\"Constructor of `BaseDebugWrapperSession`.\n\n Args:\n sess: An (unwrapped) TensorFlow session instance.\n\n Raises:\n ValueError: On invalid `OnSessionInitAction` value.\n \"\"\"\n\n _check_type(sess, session.BaseSession)\n\n # The session being wrapped.\n self._sess = sess\n\n # Keeps track of number of run calls that have been performed on this\n # debug-wrapper session.\n self._run_call_count = 0\n\n # Invoke on-session-init callback.\n response = self.on_session_init(OnSessionInitRequest(self._sess))\n _check_type(response, OnSessionInitResponse)\n\n if response.action == OnSessionInitAction.PROCEED:\n pass\n elif response.action == OnSessionInitAction.REMOTE_INSTR_LOOP:\n # TODO(cais): Implement REMOTE_INSTR_LOOP\n raise NotImplementedError(\n \"OnSessionInitAction REMOTE_INSTR_LOOP has not been \"\n \"implemented.\")\n else:\n raise ValueError(\n \"Invalid OnSessionInitAction value: %s\" % response.action)\n\n @property\n def graph(self):\n return self._sess.graph\n\n @property\n def sess_str(self):\n return self._sess.sess_str\n\n @property\n def session(self):\n return self._sess\n\n def run(self, fetches, feed_dict=None, options=None, run_metadata=None):\n \"\"\"Wrapper around Session.run() that inserts tensor watch options.\n\n Args:\n fetches: Same as the `fetches` arg to regular `Session.run()`.\n feed_dict: Same as the `feed_dict` arg to regular `Session.run()`.\n options: Same as the `options` arg to regular `Session.run()`.\n run_metadata: Same as the `run_metadata` arg to regular `Session.run()`.\n\n Returns:\n Simply forwards the output of the wrapped `Session.run()` call.\n\n Raises:\n ValueError: On invalid `OnRunStartAction` value.\n \"\"\"\n\n self._run_call_count += 1\n\n # Invoke on-run-start callback and obtain response.\n run_start_resp = self.on_run_start(\n OnRunStartRequest(fetches, feed_dict, options, run_metadata,\n self._run_call_count))\n _check_type(run_start_resp, OnRunStartResponse)\n\n if run_start_resp.action == OnRunStartAction.DEBUG_RUN:\n # Decorate RunOption to fill in debugger tensor watch specifications.\n decorated_run_options = options or config_pb2.RunOptions()\n run_metadata = run_metadata or config_pb2.RunMetadata()\n\n self._decorate_run_options(decorated_run_options,\n run_start_resp.debug_urls)\n\n # Invoke the run() method of the wrapped Session. Catch any TensorFlow\n # runtime errors.\n tf_error = None\n try:\n retvals = self._sess.run(fetches,\n feed_dict=feed_dict,\n options=decorated_run_options,\n run_metadata=run_metadata)\n except errors.OpError as op_error:\n tf_error = op_error\n retvals = op_error\n\n run_end_req = OnRunEndRequest(\n run_start_resp.action,\n run_metadata=run_metadata,\n client_graph_def=self._sess.graph.as_graph_def(),\n tf_error=tf_error)\n\n elif (run_start_resp.action == OnRunStartAction.NON_DEBUG_RUN or\n run_start_resp.action == OnRunStartAction.INVOKE_STEPPER):\n if run_start_resp.action == OnRunStartAction.INVOKE_STEPPER:\n retvals = self.invoke_node_stepper(\n stepper.NodeStepper(self._sess, fetches, feed_dict),\n restore_variable_values_on_exit=True)\n\n # Invoke run() method of the wrapped session.\n retvals = self._sess.run(\n fetches,\n feed_dict=feed_dict,\n options=options,\n run_metadata=run_metadata)\n\n # Prepare arg for the on-run-end callback.\n run_end_req = OnRunEndRequest(run_start_resp.action)\n else:\n raise ValueError(\n \"Invalid OnRunStartAction value: %s\" % run_start_resp.action)\n\n # Invoke on-run-end callback and obtain response.\n run_end_resp = self.on_run_end(run_end_req)\n _check_type(run_end_resp, OnRunEndResponse)\n # Currently run_end_resp is only a placeholder. No action is taken on it.\n\n return retvals\n\n def partial_run_setup(self, fetches, feeds=None):\n \"\"\"Sets up the feeds and fetches for partial runs in the session.\"\"\"\n raise NotImplementedError(\n \"partial_run_setup is not implemented for debug-wrapper sessions.\")\n\n def partial_run(self, handle, fetches, feed_dict=None):\n raise NotImplementedError(\n \"partial_run is not implemented for debug-wrapper sessions.\")\n\n def _decorate_run_options(self, run_options, debug_urls):\n \"\"\"Modify a RunOptions object for debug tensor watching.\n\n Specifies request for outputting partition graphs. Adds\n debug_tensor_watch_opts with proper debug URLs.\n\n Args:\n run_options: (RunOptions) the modified RunOptions object.\n debug_urls: (list of str) debug URLs to be entered in run_options.\n debug_tensor_watch_opts.\n \"\"\"\n\n run_options.output_partition_graphs = True\n debug_utils.watch_graph(\n run_options, self._sess.graph, debug_urls=debug_urls)\n\n @abc.abstractmethod\n def on_session_init(self, request):\n \"\"\"Callback invoked during construction of the debug-wrapper session.\n\n This is a blocking callback.\n The invocation happens right before the constructor ends.\n\n Args:\n request: (`OnSessionInitRequest`) callback request carrying information\n such as the session being wrapped.\n\n Returns:\n An instance of `OnSessionInitResponse`.\n \"\"\"\n\n @abc.abstractmethod\n def on_run_start(self, request):\n \"\"\"Callback invoked on run() calls to the debug-wrapper session.\n\n This is a blocking callback.\n The invocation happens after the wrapper's run() call is entered,\n after an increment of run call counter.\n\n Args:\n request: (`OnRunStartRequest`) callback request object carrying\n information about the run call such as the fetches, feed dict, run\n options, run metadata, and how many `run()` calls to this wrapper\n session have occurred.\n\n Returns:\n An instance of `OnRunStartResponse`, carrying information to\n 1) direct the wrapper session to perform a specified action (e.g., run\n with or without debug tensor watching, invoking the stepper.)\n 2) debug URLs used to watch the tensors.\n \"\"\"\n\n @abc.abstractmethod\n def on_run_end(self, request):\n \"\"\"Callback invoked on run() calls to the debug-wrapper session.\n\n This is a blocking callback.\n The invocation happens right before the wrapper exits its run() call.\n\n Args:\n request: (`OnRunEndRequest`) callback request object carrying information\n such as the actual action performed by the session wrapper for the\n run() call.\n\n Returns:\n An instance of `OnRunStartResponse`.\n \"\"\"\n\n def __enter__(self):\n return self._sess.__enter__()\n\n def __exit__(self, exec_type, exec_value, exec_tb):\n self._sess.__exit__(exec_type, exec_value, exec_tb)\n\n def close(self):\n self._sess.close()\n\n # TODO(cais): Add _node_name_regex_whitelist and\n # _node_op_type_regex_whitelist.\n\n @abc.abstractmethod\n def invoke_node_stepper(self,\n node_stepper,\n restore_variable_values_on_exit=True):\n \"\"\"Callback invoked when the client intends to step through graph nodes.\n\n Args:\n node_stepper: (stepper.NodeStepper) An instance of NodeStepper to be used\n in this stepping session.\n restore_variable_values_on_exit: (bool) Whether any variables whose values\n have been altered during this node-stepper invocation should be restored\n to their old values when this invocation ends.\n\n Returns:\n The same return values as the `Session.run()` call on the same fetches as\n the NodeStepper.\n \"\"\"\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"The InverseGamma distribution class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.contrib.distributions.python.ops import distribution\nfrom tensorflow.contrib.distributions.python.ops import distribution_util\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import random_ops\n\n\nclass InverseGamma(distribution.Distribution):\n \"\"\"The `InverseGamma` distribution with parameter alpha and beta.\n\n The parameters are the shape and inverse scale parameters alpha, beta.\n\n The PDF of this distribution is:\n\n ```pdf(x) = (beta^alpha)/Gamma(alpha)(x^(-alpha-1))e^(-beta/x), x > 0```\n\n and the CDF of this distribution is:\n\n ```cdf(x) = GammaInc(alpha, beta / x) / Gamma(alpha), x > 0```\n\n where GammaInc is the upper incomplete Gamma function.\n\n Examples:\n\n ```python\n dist = InverseGamma(alpha=3.0, beta=2.0)\n dist2 = InverseGamma(alpha=[3.0, 4.0], beta=[2.0, 3.0])\n ```\n\n \"\"\"\n\n def __init__(self,\n alpha,\n beta,\n validate_args=False,\n allow_nan_stats=True,\n name=\"InverseGamma\"):\n \"\"\"Construct InverseGamma distributions with parameters `alpha` and `beta`.\n\n The parameters `alpha` and `beta` must be shaped in a way that supports\n broadcasting (e.g. `alpha + beta` is a valid operation).\n\n Args:\n alpha: Floating point tensor, the shape params of the\n distribution(s).\n alpha must contain only positive values.\n beta: Floating point tensor, the scale params of the distribution(s).\n beta must contain only positive values.\n validate_args: `Boolean`, default `False`. Whether to assert that\n `a > 0`, `b > 0`, and that `x > 0` in the methods `prob(x)` and\n `log_prob(x)`. If `validate_args` is `False` and the inputs are\n invalid, correct behavior is not guaranteed.\n allow_nan_stats: `Boolean`, default `True`. If `False`, raise an\n exception if a statistic (e.g. mean/mode/etc...) is undefined for any\n batch member. If `True`, batch members with valid parameters leading to\n undefined statistics will return NaN for this statistic.\n name: The name to prepend to all ops created by this distribution.\n\n Raises:\n TypeError: if `alpha` and `beta` are different dtypes.\n \"\"\"\n parameters = locals()\n parameters.pop(\"self\")\n with ops.name_scope(name, values=[alpha, beta]) as ns:\n with ops.control_dependencies([\n check_ops.assert_positive(alpha),\n check_ops.assert_positive(beta),\n ] if validate_args else []):\n self._alpha = array_ops.identity(alpha, name=\"alpha\")\n self._beta = array_ops.identity(beta, name=\"beta\")\n super(InverseGamma, self).__init__(\n dtype=self._alpha.dtype,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n is_continuous=True,\n is_reparameterized=False,\n parameters=parameters,\n graph_parents=[self._alpha, self._beta],\n name=ns)\n\n @staticmethod\n def _param_shapes(sample_shape):\n return dict(\n zip((\"alpha\", \"beta\"), ([ops.convert_to_tensor(\n sample_shape, dtype=dtypes.int32)] * 2)))\n\n @property\n def alpha(self):\n \"\"\"Shape parameter.\"\"\"\n return self._alpha\n\n @property\n def beta(self):\n \"\"\"Scale parameter.\"\"\"\n return self._beta\n\n def _batch_shape(self):\n return array_ops.broadcast_dynamic_shape(\n array_ops.shape(self.alpha), array_ops.shape(self.beta))\n\n def _get_batch_shape(self):\n return array_ops.broadcast_static_shape(\n self.alpha.get_shape(), self.beta.get_shape())\n\n def _event_shape(self):\n return constant_op.constant([], dtype=dtypes.int32)\n\n def _get_event_shape(self):\n return tensor_shape.scalar()\n\n def _sample_n(self, n, seed=None):\n \"\"\"See the documentation for tf.random_gamma for more details.\"\"\"\n return 1. / random_ops.random_gamma([n], self.alpha, beta=self.beta,\n dtype=self.dtype, seed=seed)\n\n def _log_prob(self, x):\n x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if\n self.validate_args else [], x)\n return (self.alpha * math_ops.log(self.beta) -\n math_ops.lgamma(self.alpha) -\n (self.alpha + 1.) * math_ops.log(x) - self.beta / x)\n\n def _prob(self, x):\n return math_ops.exp(self._log_prob(x))\n\n def _log_cdf(self, x):\n return math_ops.log(self._cdf(x))\n\n def _cdf(self, x):\n x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if\n self.validate_args else [], x)\n # Note that igammac returns the upper regularized incomplete gamma\n # function Q(a, x), which is what we want for the CDF.\n return math_ops.igammac(self.alpha, self.beta / x)\n\n @distribution_util.AppendDocstring(\n \"\"\"This is defined to be\n\n ```\n entropy = alpha - log(beta) + log(Gamma(alpha))\n + (1-alpha)digamma(alpha)\n ```\n\n where digamma(alpha) is the digamma function.\"\"\")\n def _entropy(self):\n return (self.alpha +\n math_ops.log(self.beta) +\n math_ops.lgamma(self.alpha) -\n (1. + self.alpha) * math_ops.digamma(self.alpha))\n\n @distribution_util.AppendDocstring(\n \"\"\"The mean of an inverse gamma distribution is `beta / (alpha - 1)`,\n when `alpha > 1`, and `NaN` otherwise. If `self.allow_nan_stats` is\n `False`, an exception will be raised rather than returning `NaN`\"\"\")\n def _mean(self):\n mean = self.beta / (self.alpha - 1.)\n if self.allow_nan_stats:\n nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())\n return array_ops.where(\n self.alpha > 1., mean,\n array_ops.fill(self.batch_shape(), nan, name=\"nan\"))\n else:\n return control_flow_ops.with_dependencies([\n check_ops.assert_less(\n array_ops.ones((), self.dtype), self.alpha,\n message=\"mean not defined for components of self.alpha <= 1\"),\n ], mean)\n\n @distribution_util.AppendDocstring(\n \"\"\"Variance for inverse gamma is defined only for `alpha > 2`. If\n `self.allow_nan_stats` is `False`, an exception will be raised rather\n than returning `NaN`.\"\"\")\n def _variance(self):\n var = (math_ops.square(self.beta) /\n (math_ops.square(self.alpha - 1.) * (self.alpha - 2.)))\n if self.allow_nan_stats:\n nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())\n return array_ops.where(\n self.alpha > 2., var,\n array_ops.fill(self.batch_shape(), nan, name=\"nan\"))\n else:\n return control_flow_ops.with_dependencies([\n check_ops.assert_less(\n constant_op.constant(2., dtype=self.dtype), self.alpha,\n message=\"variance not defined for components of alpha <= 2\"),\n ], var)\n\n def _mode(self):\n \"\"\"The mode of an inverse gamma distribution is `beta / (alpha + 1)`.\"\"\"\n return self.beta / (self.alpha + 1.)\n\n\nclass InverseGammaWithSoftplusAlphaBeta(InverseGamma):\n \"\"\"Inverse Gamma with softplus applied to `alpha` and `beta`.\"\"\"\n\n def __init__(self,\n alpha,\n beta,\n validate_args=False,\n allow_nan_stats=True,\n name=\"InverseGammaWithSoftplusAlphaBeta\"):\n parameters = locals()\n parameters.pop(\"self\")\n with ops.name_scope(name, values=[alpha, beta]) as ns:\n super(InverseGammaWithSoftplusAlphaBeta, self).__init__(\n alpha=nn.softplus(alpha, name=\"softplus_alpha\"),\n beta=nn.softplus(beta, name=\"softplus_gamma\"),\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n name=ns)\n self._parameters = parameters\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for RNN cells.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport sys\n\n# TODO: #6568 Remove this hack that makes dlopen() not crash.\nif hasattr(sys, \"getdlopenflags\") and hasattr(sys, \"setdlopenflags\"):\n import ctypes\n sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)\n\nimport numpy as np\n\n# TODO(ebrevdo): Remove once _linear is fully deprecated.\n# pylint: disable=protected-access\n\nfrom tensorflow.contrib.rnn.python.ops import core_rnn_cell_impl\nfrom tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl import _linear as linear\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import rnn\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables as variables_lib\nfrom tensorflow.python.platform import test\n\n# pylint: enable=protected-access\n\n\nclass RNNCellTest(test.TestCase):\n\n def testLinear(self):\n with self.test_session() as sess:\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(1.0)):\n x = array_ops.zeros([1, 2])\n l = linear([x], 2, False)\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run([l], {x.name: np.array([[1., 2.]])})\n self.assertAllClose(res[0], [[3.0, 3.0]])\n\n # Checks prevent you from accidentally creating a shared function.\n with self.assertRaises(ValueError):\n l1 = linear([x], 2, False)\n\n # But you can create a new one in a new scope and share the variables.\n with variable_scope.variable_scope(\"l1\") as new_scope:\n l1 = linear([x], 2, False)\n with variable_scope.variable_scope(new_scope, reuse=True):\n linear([l1], 2, False)\n self.assertEqual(len(variables_lib.trainable_variables()), 2)\n\n def testBasicRNNCell(self):\n with self.test_session() as sess:\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros([1, 2])\n m = array_ops.zeros([1, 2])\n g, _ = core_rnn_cell_impl.BasicRNNCell(2)(x, m)\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run(\n [g], {x.name: np.array([[1., 1.]]),\n m.name: np.array([[0.1, 0.1]])})\n self.assertEqual(res[0].shape, (1, 2))\n\n def testGRUCell(self):\n with self.test_session() as sess:\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros([1, 2])\n m = array_ops.zeros([1, 2])\n g, _ = core_rnn_cell_impl.GRUCell(2)(x, m)\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run(\n [g], {x.name: np.array([[1., 1.]]),\n m.name: np.array([[0.1, 0.1]])})\n # Smoke test\n self.assertAllClose(res[0], [[0.175991, 0.175991]])\n with variable_scope.variable_scope(\n \"other\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros(\n [1, 3]) # Test GRUCell with input_size != num_units.\n m = array_ops.zeros([1, 2])\n g, _ = core_rnn_cell_impl.GRUCell(2)(x, m)\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run(\n [g],\n {x.name: np.array([[1., 1., 1.]]),\n m.name: np.array([[0.1, 0.1]])})\n # Smoke test\n self.assertAllClose(res[0], [[0.156736, 0.156736]])\n\n def testBasicLSTMCell(self):\n with self.test_session() as sess:\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros([1, 2])\n m = array_ops.zeros([1, 8])\n g, out_m = core_rnn_cell_impl.MultiRNNCell(\n [core_rnn_cell_impl.BasicLSTMCell(\n 2, state_is_tuple=False)] * 2,\n state_is_tuple=False)(x, m)\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run(\n [g, out_m],\n {x.name: np.array([[1., 1.]]),\n m.name: 0.1 * np.ones([1, 8])})\n self.assertEqual(len(res), 2)\n variables = variables_lib.global_variables()\n self.assertEqual(4, len(variables))\n self.assertEquals(variables[0].op.name,\n \"root/multi_rnn_cell/cell_0/basic_lstm_cell/weights\")\n self.assertEquals(variables[1].op.name,\n \"root/multi_rnn_cell/cell_0/basic_lstm_cell/biases\")\n self.assertEquals(variables[2].op.name,\n \"root/multi_rnn_cell/cell_1/basic_lstm_cell/weights\")\n self.assertEquals(variables[3].op.name,\n \"root/multi_rnn_cell/cell_1/basic_lstm_cell/biases\")\n # The numbers in results were not calculated, this is just a smoke test.\n self.assertAllClose(res[0], [[0.24024698, 0.24024698]])\n expected_mem = np.array([[\n 0.68967271, 0.68967271, 0.44848421, 0.44848421, 0.39897051,\n 0.39897051, 0.24024698, 0.24024698\n ]])\n self.assertAllClose(res[1], expected_mem)\n with variable_scope.variable_scope(\n \"other\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros(\n [1, 3]) # Test BasicLSTMCell with input_size != num_units.\n m = array_ops.zeros([1, 4])\n g, out_m = core_rnn_cell_impl.BasicLSTMCell(\n 2, state_is_tuple=False)(x, m)\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run(\n [g, out_m],\n {x.name: np.array([[1., 1., 1.]]),\n m.name: 0.1 * np.ones([1, 4])})\n self.assertEqual(len(res), 2)\n\n def testBasicLSTMCellStateTupleType(self):\n with self.test_session():\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros([1, 2])\n m0 = (array_ops.zeros([1, 2]),) * 2\n m1 = (array_ops.zeros([1, 2]),) * 2\n cell = core_rnn_cell_impl.MultiRNNCell(\n [core_rnn_cell_impl.BasicLSTMCell(2)] * 2, state_is_tuple=True)\n self.assertTrue(isinstance(cell.state_size, tuple))\n self.assertTrue(\n isinstance(cell.state_size[0], core_rnn_cell_impl.LSTMStateTuple))\n self.assertTrue(\n isinstance(cell.state_size[1], core_rnn_cell_impl.LSTMStateTuple))\n\n # Pass in regular tuples\n _, (out_m0, out_m1) = cell(x, (m0, m1))\n self.assertTrue(isinstance(out_m0, core_rnn_cell_impl.LSTMStateTuple))\n self.assertTrue(isinstance(out_m1, core_rnn_cell_impl.LSTMStateTuple))\n\n # Pass in LSTMStateTuples\n variable_scope.get_variable_scope().reuse_variables()\n zero_state = cell.zero_state(1, dtypes.float32)\n self.assertTrue(isinstance(zero_state, tuple))\n self.assertTrue(\n isinstance(zero_state[0], core_rnn_cell_impl.LSTMStateTuple))\n self.assertTrue(\n isinstance(zero_state[1], core_rnn_cell_impl.LSTMStateTuple))\n _, (out_m0, out_m1) = cell(x, zero_state)\n self.assertTrue(isinstance(out_m0, core_rnn_cell_impl.LSTMStateTuple))\n self.assertTrue(isinstance(out_m1, core_rnn_cell_impl.LSTMStateTuple))\n\n def testBasicLSTMCellWithStateTuple(self):\n with self.test_session() as sess:\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros([1, 2])\n m0 = array_ops.zeros([1, 4])\n m1 = array_ops.zeros([1, 4])\n cell = core_rnn_cell_impl.MultiRNNCell(\n [core_rnn_cell_impl.BasicLSTMCell(\n 2, state_is_tuple=False)] * 2,\n state_is_tuple=True)\n g, (out_m0, out_m1) = cell(x, (m0, m1))\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run([g, out_m0, out_m1], {\n x.name: np.array([[1., 1.]]),\n m0.name: 0.1 * np.ones([1, 4]),\n m1.name: 0.1 * np.ones([1, 4])\n })\n self.assertEqual(len(res), 3)\n # The numbers in results were not calculated, this is just a smoke test.\n # Note, however, these values should match the original\n # version having state_is_tuple=False.\n self.assertAllClose(res[0], [[0.24024698, 0.24024698]])\n expected_mem0 = np.array(\n [[0.68967271, 0.68967271, 0.44848421, 0.44848421]])\n expected_mem1 = np.array(\n [[0.39897051, 0.39897051, 0.24024698, 0.24024698]])\n self.assertAllClose(res[1], expected_mem0)\n self.assertAllClose(res[2], expected_mem1)\n\n def testLSTMCell(self):\n with self.test_session() as sess:\n num_units = 8\n num_proj = 6\n state_size = num_units + num_proj\n batch_size = 3\n input_size = 2\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros([batch_size, input_size])\n m = array_ops.zeros([batch_size, state_size])\n cell = core_rnn_cell_impl.LSTMCell(\n num_units=num_units,\n num_proj=num_proj,\n forget_bias=1.0,\n state_is_tuple=False)\n output, state = cell(x, m)\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run([output, state], {\n x.name: np.array([[1., 1.], [2., 2.], [3., 3.]]),\n m.name: 0.1 * np.ones((batch_size, state_size))\n })\n self.assertEqual(len(res), 2)\n # The numbers in results were not calculated, this is mostly just a\n # smoke test.\n self.assertEqual(res[0].shape, (batch_size, num_proj))\n self.assertEqual(res[1].shape, (batch_size, state_size))\n # Different inputs so different outputs and states\n for i in range(1, batch_size):\n self.assertTrue(\n float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)\n self.assertTrue(\n float(np.linalg.norm((res[1][0, :] - res[1][i, :]))) > 1e-6)\n\n def testLSTMCellVariables(self):\n with self.test_session():\n num_units = 8\n num_proj = 6\n state_size = num_units + num_proj\n batch_size = 3\n input_size = 2\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros([batch_size, input_size])\n m = array_ops.zeros([batch_size, state_size])\n cell = core_rnn_cell_impl.LSTMCell(\n num_units=num_units,\n num_proj=num_proj,\n forget_bias=1.0,\n state_is_tuple=False)\n cell(x, m) # Execute to create variables\n variables = variables_lib.global_variables()\n self.assertEquals(variables[0].op.name, \"root/lstm_cell/weights\")\n self.assertEquals(variables[1].op.name, \"root/lstm_cell/biases\")\n self.assertEquals(variables[2].op.name,\n \"root/lstm_cell/projection/weights\")\n\n def testOutputProjectionWrapper(self):\n with self.test_session() as sess:\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros([1, 3])\n m = array_ops.zeros([1, 3])\n cell = core_rnn_cell_impl.OutputProjectionWrapper(\n core_rnn_cell_impl.GRUCell(3), 2)\n g, new_m = cell(x, m)\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run([g, new_m], {\n x.name: np.array([[1., 1., 1.]]),\n m.name: np.array([[0.1, 0.1, 0.1]])\n })\n self.assertEqual(res[1].shape, (1, 3))\n # The numbers in results were not calculated, this is just a smoke test.\n self.assertAllClose(res[0], [[0.231907, 0.231907]])\n\n def testInputProjectionWrapper(self):\n with self.test_session() as sess:\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros([1, 2])\n m = array_ops.zeros([1, 3])\n cell = core_rnn_cell_impl.InputProjectionWrapper(\n core_rnn_cell_impl.GRUCell(3), num_proj=3)\n g, new_m = cell(x, m)\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run(\n [g, new_m],\n {x.name: np.array([[1., 1.]]),\n m.name: np.array([[0.1, 0.1, 0.1]])})\n self.assertEqual(res[1].shape, (1, 3))\n # The numbers in results were not calculated, this is just a smoke test.\n self.assertAllClose(res[0], [[0.154605, 0.154605, 0.154605]])\n\n def testDropoutWrapper(self):\n with self.test_session() as sess:\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros([1, 3])\n m = array_ops.zeros([1, 3])\n keep = array_ops.zeros([]) + 1\n g, new_m = core_rnn_cell_impl.DropoutWrapper(\n core_rnn_cell_impl.GRUCell(3), keep, keep)(x, m)\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run([g, new_m], {\n x.name: np.array([[1., 1., 1.]]),\n m.name: np.array([[0.1, 0.1, 0.1]])\n })\n self.assertEqual(res[1].shape, (1, 3))\n # The numbers in results were not calculated, this is just a smoke test.\n self.assertAllClose(res[0], [[0.154605, 0.154605, 0.154605]])\n\n def testEmbeddingWrapper(self):\n with self.test_session() as sess:\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros([1, 1], dtype=dtypes.int32)\n m = array_ops.zeros([1, 2])\n embedding_cell = core_rnn_cell_impl.EmbeddingWrapper(\n core_rnn_cell_impl.GRUCell(2),\n embedding_classes=3,\n embedding_size=2)\n self.assertEqual(embedding_cell.output_size, 2)\n g, new_m = embedding_cell(x, m)\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run(\n [g, new_m],\n {x.name: np.array([[1]]),\n m.name: np.array([[0.1, 0.1]])})\n self.assertEqual(res[1].shape, (1, 2))\n # The numbers in results were not calculated, this is just a smoke test.\n self.assertAllClose(res[0], [[0.17139, 0.17139]])\n\n def testEmbeddingWrapperWithDynamicRnn(self):\n with self.test_session() as sess:\n with variable_scope.variable_scope(\"root\"):\n inputs = ops.convert_to_tensor([[[0], [0]]], dtype=dtypes.int64)\n input_lengths = ops.convert_to_tensor([2], dtype=dtypes.int64)\n embedding_cell = core_rnn_cell_impl.EmbeddingWrapper(\n core_rnn_cell_impl.BasicLSTMCell(\n 1, state_is_tuple=True),\n embedding_classes=1,\n embedding_size=2)\n outputs, _ = rnn.dynamic_rnn(\n cell=embedding_cell,\n inputs=inputs,\n sequence_length=input_lengths,\n dtype=dtypes.float32)\n sess.run([variables_lib.global_variables_initializer()])\n # This will fail if output's dtype is inferred from input's.\n sess.run(outputs)\n\n def testMultiRNNCell(self):\n with self.test_session() as sess:\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros([1, 2])\n m = array_ops.zeros([1, 4])\n _, ml = core_rnn_cell_impl.MultiRNNCell(\n [core_rnn_cell_impl.GRUCell(2)] * 2, state_is_tuple=False)(x, m)\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run(ml, {\n x.name: np.array([[1., 1.]]),\n m.name: np.array([[0.1, 0.1, 0.1, 0.1]])\n })\n # The numbers in results were not calculated, this is just a smoke test.\n self.assertAllClose(res, [[0.175991, 0.175991, 0.13248, 0.13248]])\n\n def testMultiRNNCellWithStateTuple(self):\n with self.test_session() as sess:\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros([1, 2])\n m_bad = array_ops.zeros([1, 4])\n m_good = (array_ops.zeros([1, 2]), array_ops.zeros([1, 2]))\n\n # Test incorrectness of state\n with self.assertRaisesRegexp(ValueError, \"Expected state .* a tuple\"):\n core_rnn_cell_impl.MultiRNNCell(\n [core_rnn_cell_impl.GRUCell(2)] * 2,\n state_is_tuple=True)(x, m_bad)\n\n _, ml = core_rnn_cell_impl.MultiRNNCell(\n [core_rnn_cell_impl.GRUCell(2)] * 2, state_is_tuple=True)(x, m_good)\n\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run(ml, {\n x.name: np.array([[1., 1.]]),\n m_good[0].name: np.array([[0.1, 0.1]]),\n m_good[1].name: np.array([[0.1, 0.1]])\n })\n\n # The numbers in results were not calculated, this is just a\n # smoke test. However, these numbers should match those of\n # the test testMultiRNNCell.\n self.assertAllClose(res[0], [[0.175991, 0.175991]])\n self.assertAllClose(res[1], [[0.13248, 0.13248]])\n\n\nclass SlimRNNCellTest(test.TestCase):\n\n def testBasicRNNCell(self):\n with self.test_session() as sess:\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros([1, 2])\n m = array_ops.zeros([1, 2])\n my_cell = functools.partial(basic_rnn_cell, num_units=2)\n # pylint: disable=protected-access\n g, _ = core_rnn_cell_impl._SlimRNNCell(my_cell)(x, m)\n # pylint: enable=protected-access\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run(\n [g], {x.name: np.array([[1., 1.]]),\n m.name: np.array([[0.1, 0.1]])})\n self.assertEqual(res[0].shape, (1, 2))\n\n def testBasicRNNCellMatch(self):\n batch_size = 32\n input_size = 100\n num_units = 10\n with self.test_session() as sess:\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n inputs = random_ops.random_uniform((batch_size, input_size))\n _, initial_state = basic_rnn_cell(inputs, None, num_units)\n my_cell = functools.partial(basic_rnn_cell, num_units=num_units)\n # pylint: disable=protected-access\n slim_cell = core_rnn_cell_impl._SlimRNNCell(my_cell)\n # pylint: enable=protected-access\n slim_outputs, slim_state = slim_cell(inputs, initial_state)\n rnn_cell = core_rnn_cell_impl.BasicRNNCell(num_units)\n variable_scope.get_variable_scope().reuse_variables()\n outputs, state = rnn_cell(inputs, initial_state)\n self.assertEqual(slim_outputs.get_shape(), outputs.get_shape())\n self.assertEqual(slim_state.get_shape(), state.get_shape())\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run([slim_outputs, slim_state, outputs, state])\n self.assertAllClose(res[0], res[2])\n self.assertAllClose(res[1], res[3])\n\n\ndef basic_rnn_cell(inputs, state, num_units, scope=None):\n if state is None:\n if inputs is not None:\n batch_size = inputs.get_shape()[0]\n dtype = inputs.dtype\n else:\n batch_size = 0\n dtype = dtypes.float32\n init_output = array_ops.zeros(\n array_ops.stack([batch_size, num_units]), dtype=dtype)\n init_state = array_ops.zeros(\n array_ops.stack([batch_size, num_units]), dtype=dtype)\n init_output.set_shape([batch_size, num_units])\n init_state.set_shape([batch_size, num_units])\n return init_output, init_state\n else:\n with variable_scope.variable_scope(scope, \"basic_rnn_cell\",\n [inputs, state]):\n output = math_ops.tanh(linear([inputs, state], num_units, True))\n return output, output\n\n\nif __name__ == \"__main__\":\n test.main()\n" ]
[ [ "tensorflow.core.protobuf.config_pb2.RunOptions", "tensorflow.python.debug.debug_utils.watch_graph", "tensorflow.python.debug.stepper.NodeStepper", "tensorflow.core.protobuf.config_pb2.RunMetadata" ], [ "tensorflow.python.ops.math_ops.square", "tensorflow.python.ops.nn.softplus", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.framework.tensor_shape.scalar", "tensorflow.contrib.distributions.python.ops.distribution_util.AppendDocstring", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.math_ops.log", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.math_ops.lgamma", "tensorflow.python.ops.math_ops.igammac", "tensorflow.python.ops.check_ops.assert_positive", "tensorflow.python.ops.random_ops.random_gamma", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.math_ops.digamma", "tensorflow.python.framework.constant_op.constant" ], [ "numpy.ones", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl.GRUCell", "tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl.LSTMCell", "tensorflow.python.ops.init_ops.constant_initializer", "tensorflow.python.ops.rnn.dynamic_rnn", "tensorflow.python.ops.random_ops.random_uniform", "tensorflow.python.ops.array_ops.zeros", "tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl.BasicRNNCell", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.ops.variables.trainable_variables", "tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl._SlimRNNCell", "tensorflow.python.framework.ops.convert_to_tensor", "numpy.linalg.norm", "tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl._linear", "tensorflow.python.ops.variable_scope.get_variable_scope", "tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl.BasicLSTMCell", "tensorflow.python.platform.test.main", "tensorflow.python.ops.array_ops.stack", "tensorflow.python.ops.variables.global_variables", "numpy.array" ] ]
student-work-agu-gis2021/lesson7-matplotlib-AbeRyusei
[ "2adc657c1c1c02014a5a113b25f28756df377619" ]
[ "Exercise_7_problem_1.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# ## Problem 1: Simple scatter plot using random \n# \n# We can generate random numbers using using a method `random.rand()` from the [NumPy package](https://numpy.org/). This example generates 10 random values:\n# \n# ```\n# import numpy as np\n# random_numbers = np.random.rand(10)\n# \n# ```\n# \n# ### Part 1\n# \n# Create an new data frame called `data` and add 1000 random numbers (`float`) into a new column `x` and another 1000 random numbers (`float`) into a new column `y`.\n\nimport numpy as np\nimport pandas as pd\n\n# YOUR CODE HERE 1 to set data\nx = np.random.rand(1000)\ny = np.random.rand(1000)\ndata = pd.DataFrame()\ndata[\"x\"] = x\ndata[\"y\"] = y\n# Check your random values\nprint(data.head())\n\n# Check that you have the correct number of rows\nassert len(data) == 1000, \"There should be 1000 rows of data.\"\n\n\n# ### Part 2\n# \n\n# YOUR CODE HERE 2 to set colors\ncolors = np.random.rand(1000)\n# This test print should print out 10 first numbers in the variable colors\nprint(colors[0:10])\n\n# Check that the length matches\nassert len(colors) == 1000, \"There should be 1000 random numbers for colors\"\n\n\n# ### Part 3 \n# \n# #### Part 3.1\n# \n# Create a scatter plot of points with random colors\n# \n# #### Part 3.2\n# \n# #### Part 3.3\n# \n\n# Plot a scatter plot\n# YOUR CODE HERE 3\nimport matplotlib.pyplot as plt\nplt.scatter(x, y, s = 50, c = colors, cmap = 'rainbow', edgecolor = 'black')\n# Add labels and title\n# YOUR CODE HERE 4\nplt.title(\"My random candy points\")\nplt.xlabel(\"X-label\")\nplt.ylabel(\"Y-label\")\nplt.show()\n# Save the plot as a png file:\noutputfp = \"my_first_plot.png\"\n\n# YOUR CODE HERE 5\n\n# This test print statement should print the output filename of your figure\nprint(\"Saved my first plot as:\", outputfp)\n\n#Check that the file exists (also go and open the file to check that everything is ok!)\nimport os\n\nassert os.path.exists(outputfp), \"Can't find the output image.\"\n\n\n# Remember to commit your changes (including the image file) to your GitHub repo!\n# \n# ### Done!\n# \n# Now you can move to [problem 2](Exercise-7-problem-2.ipynb).\n" ]
[ [ "pandas.DataFrame", "matplotlib.pyplot.title", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "numpy.random.rand", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.scatter" ] ]
MingAtUWA/SimpleMPM2
[ "7a1d7c257c621123d85a0630e93d42ae25c70fb4" ]
[ "PyUtilities/OneDConsolidation.py" ]
[ "import math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass OneDConsolidation:\n \"\"\"\n z = 0, free flow boundary condition\n z = H, impermeable boundary condition\n Parameters:\n 1. Cv, coefficient of consolidation;\n 2. Es, one dimensional compressive modulus\n 3. u0, initial pore pressure;\n 4. H, depth of soil;\n 5. error_ratio, used to control the calculation precision.\n \"\"\"\n def __init__(self, Cv, Es, u0, H, error_ratio = 1.0e-3):\n self.Cv = Cv\n self.Es = Es\n self.u0 = u0\n self.H = H\n # Final settlement\n self.dH_final = -H * u0 / Es\n self.error_ratio = error_ratio\n \n def calPorePressure(self, t, z):\n Tv = self.Cv * t / (self.H * self.H)\n p = 0.0\n z = z / self.H\n i = 0\n while True:\n M = (2*i+1) * math.pi / 2.0\n inc = 2.0/M * math.sin(M*z) * math.exp(-M*M*Tv)\n p += inc\n i += 1\n if abs(inc) < self.error_ratio:\n break\n if (p > 1.0): p = 1.0\n p *= self.u0\n return p\n \n def calSettlement(self, t):\n Tv = self.Cv * t / (self.H * self.H)\n dH = 0.0\n i = 0\n while True:\n M = (2*i+1) * math.pi / 2.0\n inc = 2.0/(M*M) * math.exp(-M*M*Tv)\n dH += inc\n i += 1\n if abs(inc) < self.error_ratio:\n break\n dH = self.dH_final * (1.0 - dH)\n return dH\n \nif __name__ == \"__main__\":\n Es = 40.0e6\n kv = 1.0e-5\n miu = 1.0 # dynamic viscosity\n Cv = kv * Es / miu\n u0 = 40.0e3\n H = 10.0\n con_res = OneDConsolidation(Cv, Es, u0, H)\n \n fig = plt.figure()\n plot1 = fig.subplots(1, 1)\n plot1.set_title('Settlement - Time relation')\n plot1.set_xlabel('Time')\n plot1.set_ylabel('Settlement')\n \n data_num = 100\n t_list = np.zeros(data_num)\n p_list = np.zeros(data_num)\n u_list = np.zeros(data_num)\n for i in range(data_num):\n t_list[i] = 0.01 * float(i)\n p_list[i] = con_res.calPorePressure(t_list[i], 10.0)\n u_list[i] = con_res.calSettlement(t_list[i])\n \n plot1.set_xlim([t_list[0], t_list[data_num-1]])\n \n plot1.plot(t_list, p_list, 'k--')\n #plot1.plot(t_list, u_list, 'k--')\n \n plt.show()\n" ]
[ [ "matplotlib.pyplot.figure", "matplotlib.pyplot.show", "numpy.zeros" ] ]
YuJaceKim/Activity-Recognition-with-Combination-of-Deeply-Learned-Visual-Attention-and-Pose-Estimation
[ "23b9191f150d0edb981cf22a47a618feb55578b9" ]
[ "AR/exp/common/ntu_tools.py" ]
[ "import os\n\nimport numpy as np\nimport json\nimport time\n\nfrom keras.callbacks import Callback\n\nfrom deephar.data import BatchLoader\nfrom deephar.utils import *\n\n\ndef eval_singleclip_gt_bbox_generator(model, datagen, verbose=1):\n\n num_blocks = len(model.outputs)\n num_samples = len(datagen)\n\n start = time.time()\n for i in range(num_samples):\n if verbose > 1:\n printcn('', 'pred %05d/%05d' % (i+1, num_samples))\n\n [x], [y] = datagen[i]\n if 'y_true' not in locals():\n y_true = np.zeros((num_samples,) + y.shape[1:])\n y_pred = np.zeros((num_samples, num_blocks) + y.shape[1:])\n\n y_true[i, :] = y\n pred = model.predict(x)\n for b in range(num_blocks):\n y_pred[i, b, :] = pred[b]\n\n dt = time.time() - start\n\n if verbose:\n printc(WARNING, 'NTU, single-clip, GT bbox, action acc.%:')\n\n scores = []\n for b in range(num_blocks):\n correct = np.equal(np.argmax(y_true, axis=-1),\n np.argmax(y_pred[:, b, :], axis=-1), dtype=np.float)\n scores.append(sum(correct) / len(correct))\n if verbose:\n printc(WARNING, ' %.1f ' % (100*scores[-1]))\n\n if verbose:\n printcn('', '\\n%d samples in %.1f sec: %.1f clips per sec' \\\n % (num_samples, dt, num_samples / dt))\n\n return scores\n\n\ndef eval_multiclip_dataset(model, ntu, subsampling, bboxes_file=None,\n logdir=None, verbose=1):\n \"\"\"If bboxes_file if not given, use ground truth bounding boxes.\"\"\"\n\n num_samples = ntu.get_length(TEST_MODE)\n num_blocks = len(model.outputs)\n\n \"\"\"Save and reset some original configs from the dataset.\"\"\"\n org_hflip = ntu.dataconf.fixed_hflip\n org_use_gt_bbox = ntu.use_gt_bbox\n\n cnt_corr = 0\n cnt_total = 0\n\n action_shape = (num_samples,) + ntu.get_shape('ntuaction')\n a_true = np.zeros(action_shape)\n a_pred = np.ones((num_blocks,) + action_shape)\n missing_clips = {}\n\n if bboxes_file is not None:\n with open(bboxes_file, 'r') as fid:\n bboxes_data = json.load(fid)\n ntu.use_gt_bbox = False\n bboxes_info = 'Using bounding boxes from file \"{}\"'.format(bboxes_file)\n else:\n bboxes_data = None\n ntu.use_gt_bbox = True\n bboxes_info = 'Using ground truth bounding boxes.'\n\n for i in range(num_samples):\n if verbose:\n printc(OKBLUE, '%04d/%04d\\t' % (i, num_samples))\n\n frame_list = ntu.get_clip_index(i, TEST_MODE, subsamples=[subsampling])\n\n \"\"\"Variable to hold all preditions for this sequence.\n 2x frame_list due to hflip.\n \"\"\"\n allpred = np.ones((num_blocks, 2*len(frame_list)) + action_shape[1:])\n\n for f in range(len(frame_list)):\n for hflip in range(2):\n preds_clip = []\n try:\n ntu.dataconf.fixed_hflip = hflip # Force horizontal flip\n\n bbox = None\n if bboxes_data is not None:\n key = '%04d.%d.%03d.%d' % (i, subsampling, f, hflip)\n try:\n bbox = np.array(bboxes_data[key])\n except:\n warning('Missing bounding box key ' + str(key))\n\n \"\"\"Load clip and predict action.\"\"\"\n data = ntu.get_data(i, TEST_MODE, frame_list=frame_list[f],\n bbox=bbox)\n a_true[i, :] = data['ntuaction']\n\n pred = model.predict(np.expand_dims(data['frame'], axis=0))\n for b in range(num_blocks):\n allpred[b, 2*f+hflip, :] = pred[b][0]\n a_pred[b, i, :] *= pred[b][0]\n\n if np.argmax(a_true[i]) != np.argmax(a_pred[-1, i]):\n missing_clips['%04d.%03d.%d' % (i, f, hflip)] = [\n int(np.argmax(a_true[i])),\n int(np.argmax(a_pred[-1, i]))]\n\n except Exception as e:\n warning('eval_multiclip, exception on sample ' \\\n + str(i) + ' frame ' + str(f) + ': ' + str(e))\n\n if verbose:\n cor = int(np.argmax(a_true[i]) == np.argmax(a_pred[-1, i]))\n\n cnt_total += 1\n cnt_corr += cor\n printnl('%d : %.1f' % (cor, 100 * cnt_corr / cnt_total))\n\n if logdir is not None:\n np.save('%s/a_pred.npy' % logdir, a_pred)\n np.save('%s/a_true.npy' % logdir, a_true)\n with open(os.path.join(logdir, 'missing-clips.json'), 'w') as fid:\n json.dump(missing_clips, fid)\n\n a_true = np.expand_dims(a_true, axis=0)\n a_true = np.tile(a_true, (num_blocks, 1, 1))\n correct = np.argmax(a_true, axis=-1) == np.argmax(a_pred, axis=-1)\n scores = 100*np.sum(correct, axis=-1) / num_samples\n if verbose:\n printcn(WARNING, 'NTU, multi-clip. ' + bboxes_info + '\\n')\n printcn(WARNING, np.array2string(np.array(scores), precision=2))\n printcn(WARNING, 'NTU best: %.2f' % max(scores))\n\n ntu.dataconf.fixed_hflip = org_hflip\n ntu.use_gt_bbox = org_use_gt_bbox\n\n return scores\n\n\nclass NtuEvalCallback(Callback):\n\n def __init__(self, data, eval_model=None, logdir=None):\n\n assert type(data) == BatchLoader, \\\n 'data must be a BatchLoader instance, ' \\\n + 'got {} instead'.format(data)\n\n self.data = data\n self.eval_model = eval_model\n self.scores = {}\n self.logdir = logdir\n\n def on_epoch_end(self, epoch, logs={}):\n if self.eval_model is not None:\n model = self.eval_model\n else:\n model = self.model\n\n scores = eval_singleclip_gt_bbox_generator(model, self.data)\n\n epoch += 1\n if self.logdir is not None:\n if not hasattr(self, 'logarray'):\n self.logarray = {}\n self.logarray[epoch] = scores\n with open(os.path.join(self.logdir, 'ntu_val.json'), 'w') as f:\n json.dump(self.logarray, f)\n\n cur_best = max(scores)\n self.scores[epoch] = cur_best\n\n printcn(OKBLUE, 'Best score is %.1f at epoch %d' % \\\n (100*self.best_score, self.best_epoch))\n\n @property\n def best_epoch(self):\n if len(self.scores) > 0:\n # Get the key of the maximum value from a dict\n return max(self.scores, key=self.scores.get)\n else:\n return np.inf\n\n @property\n def best_score(self):\n if len(self.scores) > 0:\n # Get the maximum value from a dict\n return self.scores[self.best_epoch]\n else:\n return 0\n\n# Aliases.\neval_singleclip_generator = eval_singleclip_gt_bbox_generator\n" ]
[ [ "numpy.ones", "numpy.save", "numpy.tile", "numpy.sum", "numpy.zeros", "numpy.argmax", "numpy.expand_dims", "numpy.array" ] ]
deepguider/RoadGPS
[ "7db4669a54da98a854886b89b6922fb8c7a60f33" ]
[ "src/ocr_recog/ocr_recognizer.py" ]
[ "import os\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\" #CUDA_VISIBLE_DEVICES=0 (always use the first GPU only)\n\nimport time\nimport string\nimport argparse\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.utils.data\n\nfrom utils import AttnLabelConverter\nfrom model import Model\n\nfrom demo import detect_ocr\nfrom craft.craft import CRAFT\nfrom collections import OrderedDict\n\n#####################################\n# 21.06.04 Astrid\n# https://github.com/googleapis/oauth2client/issues/642#issuecomment-279643203\n'''\nSolving this error \nFile \"./../src/ocr_recog/ocr_recognizer.py\", line 41, in __init__\n self.opt_craft, self.opt_recog = self.setup_parser()\n File \"./../src/ocr_recog/ocr_recognizer.py\", line 120, in setup_parser\n parser_craft = argparse.ArgumentParser(description='CRAFT Text Detection')\n File \"/usr/lib/python3.6/argparse.py\", line 1635, in __init__\n prog = _os.path.basename(_sys.argv[0])\nAttributeError: module 'sys' has no attribute 'argv'\n'''\nimport sys\n\nif not hasattr(sys, 'argv'):\n sys.argv = ['']\n#####################################\n\ndef str2bool(v):\n return v.lower() in (\"yes\", \"y\", \"true\", \"t\", \"1\")\n\ndef copyStateDict(state_dict):\n if list(state_dict.keys())[0].startswith(\"module\"):\n start_idx = 1\n else:\n start_idx = 0\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n name = \".\".join(k.split(\".\")[start_idx:])\n new_state_dict[name] = v\n return new_state_dict\n\nclass OCRRecognizer:\n def __init__(self):\n self.net = None #detect\n self.model = None #recog\n self.converter = None\n #self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n \n self.res_imagefileName = None\n\n self.opt_craft, self.opt_recog = self.setup_parser()\n\n self.args_craft= vars(self.opt_craft)\n self.args = vars(self.opt_recog)\n\n self.detect_time = 0.0\n self.recog_time = 0.0\n self.total_time =0.0\n # print(\"~~~~~~~~ Hyperparameters used: ~~~~~~~\")\n # for x, y in self.args.items():\n # print(\"{} : {}\".format(x, y))\n self.__dict__.update(self.args_craft)\n self.__dict__.update(self.args)\n\n\n def initialize(self):\n\n start = time.time()\n\n\n\n # self.saved_model = '/home_hongdo/sungeun.kim/checkpoints/ocr/ocr_train_addKorean_synth/best_accuracy.pth'\n # self.craft_trained_model = '/home_hongdo/sungeun.kim/checkpoints/ocr/ocr_train/craft_mlt_25k.pth'\n # self.saved_model = '/home_hongdo/sungeun.kim/checkpoints/ocr/ocr_train_v2/best_accuracy.pth'\n # self.craft_trained_model = '/home_hongdo/sungeun.kim/checkpoints/ocr/ocr_train_v2/best_accuracy_craft.pth'\n #\n # official\n\n self.saved_model = './data_ocr/best_accuracy.pth'\n self.craft_trained_model = './data_ocr/best_craft.pth'\n self.logfilepath = './data_ocr/log_ocr_result.txt'\n \n if torch.cuda.is_available():\n self.device = torch.device('cuda')\n self.cuda = True\n cudnn.benchmark = False\n else:\n self.device = torch.device('cpu')\n self.cuda = False\n cudnn.benchmark = True\n\n\n \"\"\" vocab / character number configuration \"\"\"\n # if self.sensitive:\n # self.character = string.printable[:-6] # same with ASTER setting (use 94 char).\n cudnn.deterministic = True\n\n #self.num_gpu = torch.cuda.device_count()\n\n \"\"\" model configuration \"\"\"\n # detetion\n self.net = CRAFT(self).to(self.device) # initialize\n print('Loading detection weights from checkpoint ' + self.craft_trained_model)\n self.net.load_state_dict(copyStateDict(torch.load(self.craft_trained_model, map_location=self.device)))\n #self.net = torch.nn.DataParallel(self.net).to(self.device)\n self.net.to(self.device)\n\n self.converter = AttnLabelConverter(self.character)\n self.num_class = len(self.converter.character)\n\n if self.rgb:\n self.input_channel = 3\n self.model = Model(self, self.num_class).to(self.device)\n # load model\n #self.model = torch.nn.DataParallel(self.model).to(self.device)\n print('Loading recognition weights from checkpoint %s' % self.saved_model)\n #ckpt = torch.load(self.saved_model, map_location=self.device)\n self.model.load_state_dict(torch.load(self.saved_model, map_location=self.device))\n self.model.to(self.device)\n \n print('Initialization Done! It tooks {:.2f} sec.\\n'.format(time.time() - start))\n return True\n\n def setup_parser(self):\n \"\"\"\n Sets up an argument parser\n \"\"\"\n\n parser_craft = argparse.ArgumentParser(description='CRAFT Text Detection')\n\n parser_craft.add_argument('--craft_trained_model', default='weights/craft_mlt_25k.pth', type=str,\n help='pretrained model')\n parser_craft.add_argument('--text_threshold', default=0.7, type=float, help='text confidence threshold')\n parser_craft.add_argument('--low_text', default=0.4, type=float, help='text low-bound score')\n parser_craft.add_argument('--link_threshold', default=0.4, type=float, help='link confidence threshold')\n parser_craft.add_argument('--cuda', default=False, type=str2bool, help='Use cuda for inference')\n parser_craft.add_argument('--canvas_size', default=1280, type=int, help='image size for inference')\n parser_craft.add_argument('--mag_ratio', default=1.5, type=float, help='image magnification ratio')\n parser_craft.add_argument('--poly', default=False, action='store_true', help='enable polygon type')\n parser_craft.add_argument('--show_time', default=False, action='store_true', help='show processing time')\n parser_craft.add_argument('--test_folder', default='/data/', type=str, help='folder path to input images')\n parser_craft.add_argument('--result_folder', default='./results/', type=str, help='result folder path')\n parser_craft.add_argument('--refine', default=False, action='store_true', help='enable link refiner')\n parser_craft.add_argument('--refiner_model', default='weights/craft_refiner_CTW1500.pth', type=str,\n help='pretrained refiner model')\n\n args_craft = parser_craft.parse_args()\n\n parser_recog = argparse.ArgumentParser(description='ocr recognition')\n parser_recog.add_argument('--image_path', help='path to image_folder or image_file which contains text images')\n parser_recog.add_argument('--workers', type=int, help='number of data loading workers', default=4)\n parser_recog.add_argument('--batch_size', type=int, default=1, help='input batch size')\n parser_recog.add_argument('--saved_model', help=\"path to saved_model to evaluation\")\n parser_recog.add_argument('--logfilepath', help=\"path to log to demo\")\n\n \"\"\" Data processing \"\"\"\n parser_recog.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length')\n parser_recog.add_argument('--imgH', type=int, default=32, help='the height of the input image')\n parser_recog.add_argument('--imgW', type=int, default=100, help='the width of the input image')\n parser_recog.add_argument('--rgb', action='store_true', help='use rgb input')\n # parser.add_argument('--character', type=str, default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label')\n\n parser_recog.add_argument('--character', type=str,\n default='0123456789abcdefghijklmnopqrstuvwxyz가각간갇갈감갑값갓강갖같갚갛개객걀걔거걱건걷걸검겁것겉게겨격겪견결겹경곁계고곡곤곧골곰곱곳공과관광괜괴굉교구국군굳굴굵굶굽궁권귀귓규균귤그극근글긁금급긋긍기긴길김깅깊까깍깎깐깔깜깝깡깥깨꺼꺾껌껍껏껑께껴꼬꼭꼴꼼꼽꽂꽃꽉꽤꾸꾼꿀꿈뀌끄끈끊끌끓끔끗끝끼낌나낙낚난날낡남납낫낭낮낯낱낳내냄냇냉냐냥너넉넌널넓넘넣네넥넷녀녁년념녕노녹논놀놈농높놓놔뇌뇨누눈눕뉘뉴늄느늑는늘늙능늦늬니닐님다닥닦단닫달닭닮담답닷당닿대댁댐댓더덕던덜덟덤덥덧덩덮데델도독돈돌돕돗동돼되된두둑둘둠둡둥뒤뒷드득든듣들듬듭듯등디딩딪따딱딴딸땀땅때땜떠떡떤떨떻떼또똑뚜뚫뚱뛰뜨뜩뜯뜰뜻띄라락란람랍랑랗래랜램랫략량러럭런럴럼럽럿렁렇레렉렌려력련렬렵령례로록론롬롭롯료루룩룹룻뤄류륙률륭르른름릇릎리릭린림립릿링마막만많말맑맘맙맛망맞맡맣매맥맨맵맺머먹먼멀멈멋멍멎메멘멩며면멸명몇모목몬몰몸몹못몽묘무묵묶문묻물뭄뭇뭐뭘뭣므미민믿밀밉밌및밑바박밖반받발밝밟밤밥방밭배백뱀뱃뱉버번벌범법벗베벤벨벼벽변별볍병볕보복볶본볼봄봇봉뵈뵙부북분불붉붐붓붕붙뷰브븐블비빌빔빗빚빛빠빡빨빵빼뺏뺨뻐뻔뻗뼈뼉뽑뿌뿐쁘쁨사삭산살삶삼삿상새색샌생샤서석섞선설섬섭섯성세섹센셈셋셔션소속손솔솜솟송솥쇄쇠쇼수숙순숟술숨숫숭숲쉬쉰쉽슈스슨슬슴습슷승시식신싣실싫심십싯싱싶싸싹싼쌀쌍쌓써썩썰썹쎄쏘쏟쑤쓰쓴쓸씀씌씨씩씬씹씻아악안앉않알앓암압앗앙앞애액앨야약얀얄얇양얕얗얘어억언얹얻얼엄업없엇엉엊엌엎에엔엘여역연열엷염엽엿영옆예옛오옥온올옮옳옷옹와완왕왜왠외왼요욕용우욱운울움웃웅워원월웨웬위윗유육율으윽은을음응의이익인일읽잃임입잇있잊잎자작잔잖잘잠잡잣장잦재쟁쟤저적전절젊점접젓정젖제젠젯져조족존졸좀좁종좋좌죄주죽준줄줌줍중쥐즈즉즌즐즘증지직진질짐집짓징짙짚짜짝짧째쨌쩌쩍쩐쩔쩜쪽쫓쭈쭉찌찍찢차착찬찮찰참찻창찾채책챔챙처척천철첩첫청체쳐초촉촌촛총촬최추축춘출춤춥춧충취츠측츰층치칙친칠침칫칭카칸칼캄캐캠커컨컬컴컵컷케켓켜코콘콜콤콩쾌쿄쿠퀴크큰클큼키킬타탁탄탈탑탓탕태택탤터턱턴털텅테텍텔템토톤톨톱통퇴투툴툼퉁튀튜트특튼튿틀틈티틱팀팅파팎판팔팝패팩팬퍼퍽페펜펴편펼평폐포폭폰표푸푹풀품풍퓨프플픔피픽필핏핑하학한할함합항해핵핸햄햇행향허헌험헤헬혀현혈협형혜호혹혼홀홈홉홍화확환활황회획횟횡효후훈훌훔훨휘휴흉흐흑흔흘흙흡흥흩희흰히힘',\n help='character label')\n\n parser_recog.add_argument('--sensitive', action='store_true', help='for sensitive character mode')\n parser_recog.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize')\n \"\"\" Model Architecture \"\"\"\n parser_recog.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')\n parser_recog.add_argument('--input_channel', type=int, default=1,\n help='the number of input channel of Feature extractor')\n parser_recog.add_argument('--output_channel', type=int, default=512,\n help='the number of output channel of Feature extractor')\n parser_recog.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state')\n\n args_recog= parser_recog.parse_args()\n\n return args_craft , args_recog\n\n\n\n def apply(self, image, timestamp, save_img=False):\n #coordinate : list\n save_log = False\n pred, timestamp = detect_ocr(self, image, timestamp, save_img, save_log)\n return pred, timestamp" ]
[ [ "torch.cuda.is_available", "torch.device", "torch.load" ] ]
fada-catec/advice_AI4EU_experiment
[ "624a1051e0502b60abe6122450ea53f80e9e4f8a" ]
[ "advice-road-crop/roadnet/train_valid_split.py" ]
[ "import os\nimport shutil\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom tqdm import tqdm\nfrom PIL import Image\n\nrandom.seed(2020)\nIMG_CROP = True\n\n# save gt_image_2 into gt_image, so that road is assigned to 255 and non-road is 0\ntrain_gt_path = \"../../data_road/training/gt_image_2/\"\nsave_gt_path = \"../../data_road/training/gt_image/\"\ngt_list = [f for f in os.listdir(train_gt_path) if f.endswith('.png')]\n\ntry:\n shutil.rmtree(save_gt_path)\nexcept OSError:\n pass\nos.mkdir(save_gt_path)\n\npbar = tqdm(total=289)\nfor gt in gt_list:\n if \"road\" in gt:\n img = np.array(Image.open(train_gt_path+gt))\n height = img.shape[0]\n width = img.shape[1]\n gtId = np.zeros((height, width), dtype=np.uint8)\n for i in range(height):\n for j in range(width):\n # print(img[i, j, :])\n if sum(img[i, j, :] == [255, 0, 255]) == 3:\n gtId[i, j] = 7\n else:\n gtId[i, j] = 0\n gt_name = gt.split('_road_')\n Image.fromarray(gtId).save(save_gt_path+gt_name[0]+'_'+gt_name[1])\n pbar.update(1)\n\n\n# split the training and validation data by 9:1\ndef traval_split(data_path, sub='um', seed=1):\n random.seed(seed)\n data_list = [f for f in os.listdir(data_path) if sub+'_' in f]\n\n train_len = round(len(data_list)*0.9)\n random.shuffle(data_list)\n train_list = data_list[:train_len]\n valid_list = data_list[train_len:]\n # print(len(train_list))\n # print(len(valid_list))\n return train_list, valid_list\n\n\n# load path\nimg_src_path = '../../data_road/training/image_2/'\ngt_src_path = '../../data_road/training/gt_image/'\n# save path\nbase_dir = '../../data_road_3/'\ntry:\n shutil.rmtree(base_dir)\nexcept OSError:\n pass\nos.mkdir(base_dir)\ntry:\n shutil.rmtree(base_dir+'training')\nexcept OSError:\n pass\nos.mkdir(base_dir+'training')\ntry:\n shutil.rmtree(base_dir+'validation')\nexcept OSError:\n pass\nos.mkdir(base_dir+'validation')\n\nimg_tra_path = base_dir+'training/image/'\ngt_tra_path = base_dir+'training/gt_image/'\nimg_val_path = base_dir+'validation/image/'\ngt_val_path = base_dir+'validation/gt_image/'\n\ntry:\n shutil.rmtree(img_tra_path)\nexcept OSError:\n pass\nos.mkdir(img_tra_path)\ntry:\n shutil.rmtree(gt_tra_path)\nexcept OSError:\n pass\nos.mkdir(gt_tra_path)\ntry:\n shutil.rmtree(img_val_path)\nexcept OSError:\n pass\nos.mkdir(img_val_path)\ntry:\n shutil.rmtree(gt_val_path)\nexcept OSError:\n pass\nos.mkdir(gt_val_path)\n\nname_list = ['um', 'umm', 'uu']\n\n\ndef image_crop(img):\n return img.crop((0, int(img.size[1]*0.45), img.size[0], img.size[1]))\n\n\nfor name in name_list:\n train_list, valid_list = traval_split(img_src_path, sub=name)\n for valid_img in valid_list:\n if IMG_CROP:\n img = Image.open(img_src_path+valid_img)\n img_crop = image_crop(img)\n img_crop.save(img_val_path+valid_img)\n\n gt = Image.open(gt_src_path+valid_img)\n gt_crop = image_crop(gt)\n gt_crop.save(gt_val_path+valid_img)\n else:\n shutil.copy(img_src_path+valid_img, img_val_path+valid_img)\n shutil.copy(gt_src_path+valid_img, gt_val_path+valid_img)\n for train_img in train_list:\n if IMG_CROP:\n img = Image.open(img_src_path+train_img)\n img_crop = image_crop(img)\n img_crop.save(img_tra_path+train_img)\n\n gt = Image.open(gt_src_path+train_img)\n gt_crop = image_crop(gt)\n gt_crop.save(gt_tra_path+train_img)\n else:\n shutil.copy(img_src_path+train_img, img_tra_path+train_img)\n shutil.copy(gt_src_path+train_img, gt_tra_path+train_img)\n\n" ]
[ [ "numpy.zeros" ] ]
adler-j/jax
[ "3d7f884ccfe15da1b218903b37b255769223b4cf" ]
[ "tests/dtypes_test.py" ]
[ "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\nimport operator\nimport unittest\n\nimport six\n\nif six.PY3:\n import enum\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nimport numpy as onp\n\nimport jax\nfrom jax import dtypes\nfrom jax import numpy as np\nfrom jax import test_util as jtu\n\nfrom jax.config import config\nconfig.parse_flags_with_absl()\nFLAGS = config.FLAGS\n\nbool_dtypes = [onp.dtype('bool')]\n\nsigned_dtypes = [onp.dtype('int8'), onp.dtype('int16'), onp.dtype('int32'),\n onp.dtype('int64')]\n\nunsigned_dtypes = [onp.dtype('uint8'), onp.dtype('uint16'), onp.dtype('uint32'),\n onp.dtype('uint64')]\n\nonp_float_dtypes = [onp.dtype('float16'), onp.dtype('float32'),\n onp.dtype('float64')]\n\nfloat_dtypes = [onp.dtype(dtypes.bfloat16)] + onp_float_dtypes\n\ncomplex_dtypes = [onp.dtype('complex64'), onp.dtype('complex128')]\n\n\nall_dtypes = (bool_dtypes + signed_dtypes + unsigned_dtypes + float_dtypes +\n complex_dtypes)\n\n\nclass DtypesTest(jtu.JaxTestCase):\n\n @parameterized.named_parameters(\n {\"testcase_name\": \"_type={}\".format(type.__name__), \"type\": type,\n \"dtype\": dtype}\n for type, dtype in [(bool, np.bool_), (int, np.int_), (float, np.float_),\n (complex, np.complex_)])\n def testDefaultTypes(self, type, dtype):\n for f in [np.array, jax.jit(np.array), jax.jit(lambda x: x)]:\n y = f(type(0))\n self.assertTrue(isinstance(y, np.ndarray), msg=(f, y))\n self.assertEqual(y.dtype, dtypes.canonicalize_dtype(dtype), msg=(f, y))\n\n @parameterized.named_parameters(\n {\"testcase_name\": \"_swap={}_jit={}\".format(swap, jit),\n \"swap\": swap, \"jit\": jit} \n for swap in [False, True] for jit in [False, True])\n @jtu.skip_on_devices(\"tpu\") # F16 not supported on TPU\n def testBinaryPromotion(self, swap, jit):\n testcases = [\n (np.array(1.), 0., np.float_),\n (np.array(1.), np.array(0.), np.float_),\n (np.array(1.), np.array(0., dtype=np.float16), np.float_),\n (np.array(1.), np.array(0., dtype=np.float32), np.float_),\n (np.array(1.), np.array(0., dtype=np.float64), np.float64),\n (np.array(1., dtype=np.float16), 0., np.float16),\n (np.array(1., dtype=np.float32), 0., np.float32),\n (np.array(1., dtype=np.float64), 0., np.float64),\n (np.array(1., dtype=np.float16), np.array(0., dtype=np.float16), np.float16),\n (np.array(1., dtype=np.float16), np.array(0., dtype=np.float32), np.float32),\n (np.array(1., dtype=np.float16), np.array(0., dtype=np.float64), np.float64),\n (np.array(1., dtype=np.float32), np.array(0., dtype=np.float32), np.float32),\n (np.array(1., dtype=np.float32), np.array(0., dtype=np.float64), np.float64),\n (np.array(1., dtype=np.float64), np.array(0., dtype=np.float64), np.float64),\n (np.array([1.]), 0., np.float_),\n (np.array([1.]), np.array(0.), np.float_),\n (np.array([1.]), np.array(0., dtype=np.float16), np.float_),\n (np.array([1.]), np.array(0., dtype=np.float32), np.float_),\n (np.array([1.]), np.array(0., dtype=np.float64), np.float64),\n (np.array([1.], dtype=np.float32), np.array(0., dtype=np.float16), np.float32),\n (np.array([1.], dtype=np.float16), np.array(0., dtype=np.float32), np.float32),\n (np.array([1.], dtype=np.float16), 0., np.float16),\n ]\n op = jax.jit(operator.add) if jit else operator.add\n for x, y, dtype in testcases:\n x, y = (y, x) if swap else (x, y)\n z = x + y\n self.assertTrue(isinstance(z, np.ndarray), msg=(x, y, z))\n self.assertEqual(z.dtype, dtypes.canonicalize_dtype(dtype), msg=(x, y, z))\n\n def testPromoteDtypes(self):\n for t1 in all_dtypes:\n self.assertEqual(t1, dtypes.promote_types(t1, t1))\n\n self.assertEqual(t1, dtypes.promote_types(t1, onp.bool_))\n self.assertEqual(onp.dtype(onp.complex128),\n dtypes.promote_types(t1, onp.complex128))\n\n for t2 in all_dtypes:\n # Symmetry\n self.assertEqual(dtypes.promote_types(t1, t2),\n dtypes.promote_types(t2, t1))\n\n self.assertEqual(onp.dtype(onp.float32),\n dtypes.promote_types(onp.float16, dtypes.bfloat16))\n\n # Promotions of non-inexact types against inexact types always prefer\n # the inexact types.\n for t in float_dtypes + complex_dtypes:\n for i in bool_dtypes + signed_dtypes + unsigned_dtypes:\n self.assertEqual(t, dtypes.promote_types(t, i))\n\n # Promotions between exact types, or between inexact types, match NumPy.\n for groups in [bool_dtypes + signed_dtypes + unsigned_dtypes,\n onp_float_dtypes + complex_dtypes]:\n for t1, t2 in itertools.combinations(groups, 2):\n self.assertEqual(onp.promote_types(t1, t2),\n dtypes.promote_types(t1, t2))\n\n\n @unittest.skipIf(six.PY2, \"Test requires Python 3\")\n def testEnumPromotion(self):\n class AnEnum(enum.IntEnum):\n A = 42\n B = 101\n onp.testing.assert_equal(onp.array(42), onp.array(AnEnum.A))\n onp.testing.assert_equal(np.array(42), np.array(AnEnum.A))\n onp.testing.assert_equal(onp.int32(101), onp.int32(AnEnum.B))\n onp.testing.assert_equal(np.int32(101), np.int32(AnEnum.B))\n\nif __name__ == \"__main__\":\n absltest.main()\n" ]
[ [ "numpy.array", "numpy.dtype", "numpy.promote_types", "numpy.int32" ] ]
ZHG2017/scipy
[ "859c1061b3d5aa30c4466824049d69edde5499a2" ]
[ "scipy/linalg/lapack.py" ]
[ "\"\"\"\nLow-level LAPACK functions (:mod:`scipy.linalg.lapack`)\n=======================================================\n\nThis module contains low-level functions from the LAPACK library.\n\nThe `*gegv` family of routines have been removed from LAPACK 3.6.0\nand have been deprecated in SciPy 0.17.0. They will be removed in\na future release.\n\n.. versionadded:: 0.12.0\n\n.. note::\n\n The common ``overwrite_<>`` option in many routines, allows the\n input arrays to be overwritten to avoid extra memory allocation.\n However this requires the array to satisfy two conditions\n which are memory order and the data type to match exactly the\n order and the type expected by the routine.\n\n As an example, if you pass a double precision float array to any\n ``S....`` routine which expects single precision arguments, f2py\n will create an intermediate array to match the argument types and\n overwriting will be performed on that intermediate array.\n\n Similarly, if a C-contiguous array is passed, f2py will pass a\n FORTRAN-contiguous array internally. Please make sure that these\n details are satisfied. More information can be found in the f2py\n documentation.\n\n.. warning::\n\n These functions do little to no error checking.\n It is possible to cause crashes by mis-using them,\n so prefer using the higher-level routines in `scipy.linalg`.\n\nFinding functions\n-----------------\n\n.. autosummary::\n :toctree: generated/\n\n get_lapack_funcs\n\nAll functions\n-------------\n\n.. autosummary::\n :toctree: generated/\n\n sgbsv\n dgbsv\n cgbsv\n zgbsv\n\n sgbtrf\n dgbtrf\n cgbtrf\n zgbtrf\n\n sgbtrs\n dgbtrs\n cgbtrs\n zgbtrs\n\n sgebal\n dgebal\n cgebal\n zgebal\n\n sgecon\n dgecon\n cgecon\n zgecon\n\n sgeequ\n dgeequ\n cgeequ\n zgeequ\n\n sgeequb\n dgeequb\n cgeequb\n zgeequb\n\n sgees\n dgees\n cgees\n zgees\n\n sgeev\n dgeev\n cgeev\n zgeev\n\n sgeev_lwork\n dgeev_lwork\n cgeev_lwork\n zgeev_lwork\n\n sgegv\n dgegv\n cgegv\n zgegv\n\n sgehrd\n dgehrd\n cgehrd\n zgehrd\n\n sgehrd_lwork\n dgehrd_lwork\n cgehrd_lwork\n zgehrd_lwork\n\n sgejsv\n dgejsv\n\n sgels\n dgels\n cgels\n zgels\n\n sgels_lwork\n dgels_lwork\n cgels_lwork\n zgels_lwork\n\n sgelsd\n dgelsd\n cgelsd\n zgelsd\n\n sgelsd_lwork\n dgelsd_lwork\n cgelsd_lwork\n zgelsd_lwork\n\n sgelss\n dgelss\n cgelss\n zgelss\n\n sgelss_lwork\n dgelss_lwork\n cgelss_lwork\n zgelss_lwork\n\n sgelsy\n dgelsy\n cgelsy\n zgelsy\n\n sgelsy_lwork\n dgelsy_lwork\n cgelsy_lwork\n zgelsy_lwork\n\n sgeqp3\n dgeqp3\n cgeqp3\n zgeqp3\n\n sgeqrf\n dgeqrf\n cgeqrf\n zgeqrf\n\n sgeqrf_lwork\n dgeqrf_lwork\n cgeqrf_lwork\n zgeqrf_lwork\n\n sgeqrfp\n dgeqrfp\n cgeqrfp\n zgeqrfp\n\n sgeqrfp_lwork\n dgeqrfp_lwork\n cgeqrfp_lwork\n zgeqrfp_lwork\n\n sgerqf\n dgerqf\n cgerqf\n zgerqf\n\n sgesdd\n dgesdd\n cgesdd\n zgesdd\n\n sgesdd_lwork\n dgesdd_lwork\n cgesdd_lwork\n zgesdd_lwork\n\n sgesv\n dgesv\n cgesv\n zgesv\n\n sgesvd\n dgesvd\n cgesvd\n zgesvd\n\n sgesvd_lwork\n dgesvd_lwork\n cgesvd_lwork\n zgesvd_lwork\n\n sgesvx\n dgesvx\n cgesvx\n zgesvx\n\n sgetrf\n dgetrf\n cgetrf\n zgetrf\n\n sgetc2\n dgetc2\n cgetc2\n zgetc2\n\n sgetri\n dgetri\n cgetri\n zgetri\n\n sgetri_lwork\n dgetri_lwork\n cgetri_lwork\n zgetri_lwork\n\n sgetrs\n dgetrs\n cgetrs\n zgetrs\n\n sgesc2\n dgesc2\n cgesc2\n zgesc2\n\n sgges\n dgges\n cgges\n zgges\n\n sggev\n dggev\n cggev\n zggev\n\n sgglse\n dgglse\n cgglse\n zgglse\n\n sgglse_lwork\n dgglse_lwork\n cgglse_lwork\n zgglse_lwork\n\n sgtsv\n dgtsv\n cgtsv\n zgtsv\n\n sgtsvx\n dgtsvx\n cgtsvx\n zgtsvx\n\n chbevd\n zhbevd\n\n chbevx\n zhbevx\n\n checon\n zhecon\n\n cheequb\n zheequb\n\n cheev\n zheev\n\n cheev_lwork\n zheev_lwork\n\n cheevd\n zheevd\n\n cheevd_lwork\n zheevd_lwork\n\n cheevr\n zheevr\n\n cheevr_lwork\n zheevr_lwork\n\n cheevx\n zheevx\n\n cheevx_lwork\n zheevx_lwork\n\n chegst\n zhegst\n\n chegv\n zhegv\n\n chegv_lwork\n zhegv_lwork\n\n chegvd\n zhegvd\n\n chegvx\n zhegvx\n\n chegvx_lwork\n zhegvx_lwork\n\n chesv\n zhesv\n\n chesv_lwork\n zhesv_lwork\n\n chesvx\n zhesvx\n\n chesvx_lwork\n zhesvx_lwork\n\n chetrd\n zhetrd\n\n chetrd_lwork\n zhetrd_lwork\n\n chetrf\n zhetrf\n\n chetrf_lwork\n zhetrf_lwork\n\n chfrk\n zhfrk\n\n slamch\n dlamch\n\n slange\n dlange\n clange\n zlange\n\n slarf\n dlarf\n clarf\n zlarf\n\n slarfg\n dlarfg\n clarfg\n zlarfg\n\n slartg\n dlartg\n clartg\n zlartg\n\n slasd4\n dlasd4\n\n slaswp\n dlaswp\n claswp\n zlaswp\n\n slauum\n dlauum\n clauum\n zlauum\n\n sorcsd\n dorcsd\n sorcsd_lwork\n dorcsd_lwork\n\n sorghr\n dorghr\n sorghr_lwork\n dorghr_lwork\n\n sorgqr\n dorgqr\n\n sorgrq\n dorgrq\n\n sormqr\n dormqr\n\n sormrz\n dormrz\n\n sormrz_lwork\n dormrz_lwork\n\n spbsv\n dpbsv\n cpbsv\n zpbsv\n\n spbtrf\n dpbtrf\n cpbtrf\n zpbtrf\n\n spbtrs\n dpbtrs\n cpbtrs\n zpbtrs\n\n spftrf\n dpftrf\n cpftrf\n zpftrf\n\n spftri\n dpftri\n cpftri\n zpftri\n\n spftrs\n dpftrs\n cpftrs\n zpftrs\n\n spocon\n dpocon\n cpocon\n zpocon\n\n spstrf\n dpstrf\n cpstrf\n zpstrf\n\n spstf2\n dpstf2\n cpstf2\n zpstf2\n\n sposv\n dposv\n cposv\n zposv\n\n sposvx\n dposvx\n cposvx\n zposvx\n\n spotrf\n dpotrf\n cpotrf\n zpotrf\n\n spotri\n dpotri\n cpotri\n zpotri\n\n spotrs\n dpotrs\n cpotrs\n zpotrs\n\n sppcon\n dppcon\n cppcon\n zppcon\n\n sppsv\n dppsv\n cppsv\n zppsv\n\n spptrf\n dpptrf\n cpptrf\n zpptrf\n\n spptri\n dpptri\n cpptri\n zpptri\n\n spptrs\n dpptrs\n cpptrs\n zpptrs\n\n sptsv\n dptsv\n cptsv\n zptsv\n\n sptsvx\n dptsvx\n cptsvx\n zptsvx\n\n spttrf\n dpttrf\n cpttrf\n zpttrf\n\n spttrs\n dpttrs\n cpttrs\n zpttrs\n\n spteqr\n dpteqr\n cpteqr\n zpteqr\n\n crot\n zrot\n\n ssbev\n dsbev\n\n ssbevd\n dsbevd\n\n ssbevx\n dsbevx\n\n ssfrk\n dsfrk\n\n sstebz\n dstebz\n\n sstein\n dstein\n\n sstemr\n dstemr\n\n sstemr_lwork\n dstemr_lwork\n\n ssterf\n dsterf\n\n sstev\n dstev\n\n ssycon\n dsycon\n csycon\n zsycon\n\n ssyconv\n dsyconv\n csyconv\n zsyconv\n\n ssyequb\n dsyequb\n csyequb\n zsyequb\n\n ssyev\n dsyev\n\n ssyev_lwork\n dsyev_lwork\n\n ssyevd\n dsyevd\n\n ssyevd_lwork\n dsyevd_lwork\n\n ssyevr\n dsyevr\n\n ssyevr_lwork\n dsyevr_lwork\n\n ssyevx\n dsyevx\n\n ssyevx_lwork\n dsyevx_lwork\n\n ssygst\n dsygst\n\n ssygv\n dsygv\n\n ssygv_lwork\n dsygv_lwork\n\n ssygvd\n dsygvd\n\n ssygvx\n dsygvx\n\n ssygvx_lwork\n dsygvx_lwork\n\n ssysv\n dsysv\n csysv\n zsysv\n\n ssysv_lwork\n dsysv_lwork\n csysv_lwork\n zsysv_lwork\n\n ssysvx\n dsysvx\n csysvx\n zsysvx\n\n ssysvx_lwork\n dsysvx_lwork\n csysvx_lwork\n zsysvx_lwork\n\n ssytf2\n dsytf2\n csytf2\n zsytf2\n\n ssytrd\n dsytrd\n\n ssytrd_lwork\n dsytrd_lwork\n\n ssytrf\n dsytrf\n csytrf\n zsytrf\n\n ssytrf_lwork\n dsytrf_lwork\n csytrf_lwork\n zsytrf_lwork\n\n stbtrs\n dtbtrs\n ctbtrs\n ztbtrs\n\n stfsm\n dtfsm\n ctfsm\n ztfsm\n\n stfttp\n dtfttp\n ctfttp\n ztfttp\n\n stfttr\n dtfttr\n ctfttr\n ztfttr\n\n stgexc\n dtgexc\n ctgexc\n ztgexc\n\n stgsen\n dtgsen\n ctgsen\n ztgsen\n\n stpttf\n dtpttf\n ctpttf\n ztpttf\n\n stpttr\n dtpttr\n ctpttr\n ztpttr\n\n strsyl\n dtrsyl\n ctrsyl\n ztrsyl\n\n strtri\n dtrtri\n ctrtri\n ztrtri\n\n strtrs\n dtrtrs\n ctrtrs\n ztrtrs\n\n strttf\n dtrttf\n ctrttf\n ztrttf\n\n strttp\n dtrttp\n ctrttp\n ztrttp\n\n stzrzf\n dtzrzf\n ctzrzf\n ztzrzf\n\n stzrzf_lwork\n dtzrzf_lwork\n ctzrzf_lwork\n ztzrzf_lwork\n\n cunghr\n zunghr\n\n cunghr_lwork\n zunghr_lwork\n\n cungqr\n zungqr\n\n cungrq\n zungrq\n\n cunmqr\n zunmqr\n\n sgeqrt\n dgeqrt\n cgeqrt\n zgeqrt\n\n sgemqrt\n dgemqrt\n cgemqrt\n zgemqrt\n\n sgttrf\n dgttrf\n cgttrf\n zgttrf\n\n sgttrs\n dgttrs\n cgttrs\n zgttrs\n\n stpqrt\n dtpqrt\n ctpqrt\n ztpqrt\n\n stpmqrt\n dtpmqrt\n ctpmqrt\n ztpmqrt\n\n cuncsd\n zuncsd\n\n cuncsd_lwork\n zuncsd_lwork\n\n cunmrz\n zunmrz\n\n cunmrz_lwork\n zunmrz_lwork\n\n ilaver\n\n\"\"\"\n#\n# Author: Pearu Peterson, March 2002\n#\n\nimport numpy as _np\nfrom .blas import _get_funcs, _memoize_get_funcs\nfrom scipy.linalg import _flapack\nfrom re import compile as regex_compile\ntry:\n from scipy.linalg import _clapack\nexcept ImportError:\n _clapack = None\n\ntry:\n from scipy.linalg import _flapack_64\n HAS_ILP64 = True\nexcept ImportError:\n HAS_ILP64 = False\n _flapack_64 = None\n\n# Backward compatibility\nfrom scipy._lib._util import DeprecatedImport as _DeprecatedImport\nclapack = _DeprecatedImport(\"scipy.linalg.blas.clapack\", \"scipy.linalg.lapack\")\nflapack = _DeprecatedImport(\"scipy.linalg.blas.flapack\", \"scipy.linalg.lapack\")\n\n# Expose all functions (only flapack --- clapack is an implementation detail)\nempty_module = None\nfrom scipy.linalg._flapack import *\ndel empty_module\n\n__all__ = ['get_lapack_funcs']\n\n_dep_message = \"\"\"The `*gegv` family of routines has been deprecated in\nLAPACK 3.6.0 in favor of the `*ggev` family of routines.\nThe corresponding wrappers will be removed from SciPy in\na future release.\"\"\"\n\ncgegv = _np.deprecate(cgegv, old_name='cgegv', message=_dep_message)\ndgegv = _np.deprecate(dgegv, old_name='dgegv', message=_dep_message)\nsgegv = _np.deprecate(sgegv, old_name='sgegv', message=_dep_message)\nzgegv = _np.deprecate(zgegv, old_name='zgegv', message=_dep_message)\n\n# Modify _flapack in this scope so the deprecation warnings apply to\n# functions returned by get_lapack_funcs.\n_flapack.cgegv = cgegv\n_flapack.dgegv = dgegv\n_flapack.sgegv = sgegv\n_flapack.zgegv = zgegv\n\n# some convenience alias for complex functions\n_lapack_alias = {\n 'corghr': 'cunghr', 'zorghr': 'zunghr',\n 'corghr_lwork': 'cunghr_lwork', 'zorghr_lwork': 'zunghr_lwork',\n 'corgqr': 'cungqr', 'zorgqr': 'zungqr',\n 'cormqr': 'cunmqr', 'zormqr': 'zunmqr',\n 'corgrq': 'cungrq', 'zorgrq': 'zungrq',\n}\n\n\n# Place guards against docstring rendering issues with special characters\np1 = regex_compile(r'with bounds (?P<b>.*?)( and (?P<s>.*?) storage){0,1}\\n')\np2 = regex_compile(r'Default: (?P<d>.*?)\\n')\n\n\ndef backtickrepl(m):\n if m.group('s'):\n return ('with bounds ``{}`` with ``{}`` storage\\n'\n ''.format(m.group('b'), m.group('s')))\n else:\n return 'with bounds ``{}``\\n'.format(m.group('b'))\n\n\nfor routine in [ssyevr, dsyevr, cheevr, zheevr,\n ssyevx, dsyevx, cheevx, zheevx,\n ssygvd, dsygvd, chegvd, zhegvd]:\n if routine.__doc__:\n routine.__doc__ = p1.sub(backtickrepl, routine.__doc__)\n routine.__doc__ = p2.sub('Default ``\\\\1``\\n', routine.__doc__)\n else:\n continue\n\ndel regex_compile, p1, p2, backtickrepl\n\n\n@_memoize_get_funcs\ndef get_lapack_funcs(names, arrays=(), dtype=None, ilp64=False):\n \"\"\"Return available LAPACK function objects from names.\n\n Arrays are used to determine the optimal prefix of LAPACK routines.\n\n Parameters\n ----------\n names : str or sequence of str\n Name(s) of LAPACK functions without type prefix.\n\n arrays : sequence of ndarrays, optional\n Arrays can be given to determine optimal prefix of LAPACK\n routines. If not given, double-precision routines will be\n used, otherwise the most generic type in arrays will be used.\n\n dtype : str or dtype, optional\n Data-type specifier. Not used if `arrays` is non-empty.\n\n ilp64 : {True, False, 'preferred'}, optional\n Whether to return ILP64 routine variant.\n Choosing 'preferred' returns ILP64 routine if available, and\n otherwise the 32-bit routine. Default: False\n\n Returns\n -------\n funcs : list\n List containing the found function(s).\n\n Notes\n -----\n This routine automatically chooses between Fortran/C\n interfaces. Fortran code is used whenever possible for arrays with\n column major order. In all other cases, C code is preferred.\n\n In LAPACK, the naming convention is that all functions start with a\n type prefix, which depends on the type of the principal\n matrix. These can be one of {'s', 'd', 'c', 'z'} for the NumPy\n types {float32, float64, complex64, complex128} respectively, and\n are stored in attribute ``typecode`` of the returned functions.\n\n Examples\n --------\n Suppose we would like to use '?lange' routine which computes the selected\n norm of an array. We pass our array in order to get the correct 'lange'\n flavor.\n\n >>> import scipy.linalg as LA\n >>> a = np.random.rand(3,2)\n >>> x_lange = LA.get_lapack_funcs('lange', (a,))\n >>> x_lange.typecode\n 'd'\n >>> x_lange = LA.get_lapack_funcs('lange',(a*1j,))\n >>> x_lange.typecode\n 'z'\n\n Several LAPACK routines work best when its internal WORK array has\n the optimal size (big enough for fast computation and small enough to\n avoid waste of memory). This size is determined also by a dedicated query\n to the function which is often wrapped as a standalone function and\n commonly denoted as ``###_lwork``. Below is an example for ``?sysv``\n\n >>> import scipy.linalg as LA\n >>> a = np.random.rand(1000,1000)\n >>> b = np.random.rand(1000,1)*1j\n >>> # We pick up zsysv and zsysv_lwork due to b array\n ... xsysv, xlwork = LA.get_lapack_funcs(('sysv', 'sysv_lwork'), (a, b))\n >>> opt_lwork, _ = xlwork(a.shape[0]) # returns a complex for 'z' prefix\n >>> udut, ipiv, x, info = xsysv(a, b, lwork=int(opt_lwork.real))\n\n \"\"\"\n if isinstance(ilp64, str):\n if ilp64 == 'preferred':\n ilp64 = HAS_ILP64\n else:\n raise ValueError(\"Invalid value for 'ilp64'\")\n\n if not ilp64:\n return _get_funcs(names, arrays, dtype,\n \"LAPACK\", _flapack, _clapack,\n \"flapack\", \"clapack\", _lapack_alias,\n ilp64=False)\n else:\n if not HAS_ILP64:\n raise RuntimeError(\"LAPACK ILP64 routine requested, but Scipy \"\n \"compiled only with 32-bit BLAS\")\n return _get_funcs(names, arrays, dtype,\n \"LAPACK\", _flapack_64, None,\n \"flapack_64\", None, _lapack_alias,\n ilp64=True)\n\n\n_int32_max = _np.iinfo(_np.int32).max\n_int64_max = _np.iinfo(_np.int64).max\n\n\ndef _compute_lwork(routine, *args, **kwargs):\n \"\"\"\n Round floating-point lwork returned by lapack to integer.\n\n Several LAPACK routines compute optimal values for LWORK, which\n they return in a floating-point variable. However, for large\n values of LWORK, single-precision floating point is not sufficient\n to hold the exact value --- some LAPACK versions (<= 3.5.0 at\n least) truncate the returned integer to single precision and in\n some cases this can be smaller than the required value.\n\n Examples\n --------\n >>> from scipy.linalg import lapack\n >>> n = 5000\n >>> s_r, s_lw = lapack.get_lapack_funcs(('sysvx', 'sysvx_lwork'))\n >>> lwork = lapack._compute_lwork(s_lw, n)\n >>> lwork\n 32000\n\n \"\"\"\n dtype = getattr(routine, 'dtype', None)\n int_dtype = getattr(routine, 'int_dtype', None)\n ret = routine(*args, **kwargs)\n if ret[-1] != 0:\n raise ValueError(\"Internal work array size computation failed: \"\n \"%d\" % (ret[-1],))\n\n if len(ret) == 2:\n return _check_work_float(ret[0].real, dtype, int_dtype)\n else:\n return tuple(_check_work_float(x.real, dtype, int_dtype)\n for x in ret[:-1])\n\n\ndef _check_work_float(value, dtype, int_dtype):\n \"\"\"\n Convert LAPACK-returned work array size float to integer,\n carefully for single-precision types.\n \"\"\"\n\n if dtype == _np.float32 or dtype == _np.complex64:\n # Single-precision routine -- take next fp value to work\n # around possible truncation in LAPACK code\n value = _np.nextafter(value, _np.inf, dtype=_np.float32)\n\n value = int(value)\n if int_dtype.itemsize == 4:\n if value < 0 or value > _int32_max:\n raise ValueError(\"Too large work array required -- computation \"\n \"cannot be performed with standard 32-bit\"\n \" LAPACK.\")\n elif int_dtype.itemsize == 8:\n if value < 0 or value > _int64_max:\n raise ValueError(\"Too large work array required -- computation\"\n \" cannot be performed with standard 64-bit\"\n \" LAPACK.\")\n return value\n" ]
[ [ "numpy.nextafter", "numpy.iinfo", "numpy.deprecate", "scipy._lib._util.DeprecatedImport" ] ]
Expert68/hotel_recommendation
[ "a6c1035c7e3ff2d824039855a2349b50f9143d37" ]
[ "GBDT_modeling.py" ]
[ "import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport xgboost as xgb\nimport h5py\nimport os\nfrom data_clean import pre_process,get_agg\n\n#------------------------------定义评估标准---------------------------\ndef map5eval(preds,dtrain):\n actual = dtrain.get_label()\n predicted = preds.argsort(axis=1)[:-np.arange(5)]\n metric = 0\n for i in range(5):\n metric += np.sum(actual==predicted[:i])/(i+1)\n metric /= actual.shape[0]\n\n return 'map5',-metric\n\n#------------------------------对模型进行训练-----------------------------------\nclf = xgb.XGBClassifier(objective='multi:softmax',max_depth=5,n_estimators=300,learning_rate=0.01,nthread=4,subsample=0.7,colsample_bytree=0.7,min_child_weight=3,silent=False)\ndestinations = pd.read_csv('input/destinations.csv')\nresult = pd.read_csv('input/sample_result.csv')\nagg1 = pd.read_csv('output/srch_dest_hc_hm_agg.csv')\n\nif os.path.exists('rows_complete.txt'):\n with open('rows_complete.txt','r') as f:\n skipsize = int(f.readline())\nelse:\n skipsize = 0\n\nskip = 0 if skipsize==0 else range(1,skipsize)\ntchunksize = 1000000\nprint('%d rows will be skipped and next %d rows will be used for training' % (skipsize, tchunksize))\ntrain = pd.read_csv('input/train.csv', parse_dates=['date_time', 'srch_ci', 'srch_co'], skiprows=skip, nrows=tchunksize)\ntrain = train[train.is_booking==1]\ntrain = pd.merge(train, destinations, how='left', on='srch_destination_id')\ntrain = pd.merge(train, agg1, how='left', on=['srch_destination_id','hotel_country','hotel_market'])\npre_process(train)\ny = train.hotel_cluster\ntrain.drop(['cnt', 'hotel_cluster', 'is_booking'], axis=1, inplace=True)\nX_train, X_test, y_train, y_test = train_test_split(train, y, stratify=y, test_size=0.2)\nclf.fit(X_train, y_train, early_stopping_rounds=50, eval_metric=map5eval, eval_set=[(X_train, y_train),(X_test, y_test)])\n\n#-----------------------------对测试数据进行预测-----------------------------------\ncount = 0\nchunksize = 10000\npreds = np.empty((result.shape[0],clf.n_classes_))\nreader = pd.read_csv('input/test.csv', parse_dates=['date_time', 'srch_ci', 'srch_co'], chunksize=chunksize)\nfor chunk in reader:\n chunk = pd.merge(chunk, destinations, how='left', on='srch_destination_id')\n chunk = pd.merge(chunk, agg1, how='left', on=['srch_destination_id', 'hotel_country', 'hotel_market'])\n chunk.drop(['id'], axis=1, inplace=True)\n pre_process(chunk)\n\n pred = clf.predict_proba(chunk)\n preds[count:(count + chunk.shape[0]), :] = pred\n count = count + chunksize\n print('%d rows completed' % count)\n\ndel clf\ndel agg1\nif os.path.exists('output/probs/allpreds_xgb.h5'):\n with h5py.File('output/probs/allpreds_xgb.h5', 'r+') as hf:\n print('reading in and combining probabilities')\n predshf = hf['preds']\n preds += predshf.value\n print('writing latest probabilities to file')\n predshf[...] = preds\nelse:\n with h5py.File('../output/probs/allpreds_xgb.h5', 'w') as hf:\n print('writing latest probabilities to file')\n hf.create_dataset('preds', data=preds)\n\nprint('generating submission')\ncol_ind = np.argsort(-preds, axis=1)[:,:5]\nhc = [' '.join(row.astype(str)) for row in col_ind]\n\nsub = pd.DataFrame(data=hc, index=result.id)\nsub.reset_index(inplace=True)\nsub.columns = result.columns\nsub.to_csv('output/pred_sub.csv', index=False)\n\n\nskipsize += tchunksize\nwith open('rows_complete.txt', 'w') as f:\n f.write(str(skipsize))" ]
[ [ "numpy.sum", "numpy.empty", "pandas.read_csv", "pandas.DataFrame", "numpy.argsort", "numpy.arange", "pandas.merge", "sklearn.model_selection.train_test_split" ] ]
vincentcheny/models
[ "afb1a59fc1bc792ac72d1a3e22e2469020529788", "afb1a59fc1bc792ac72d1a3e22e2469020529788", "afb1a59fc1bc792ac72d1a3e22e2469020529788", "afb1a59fc1bc792ac72d1a3e22e2469020529788", "afb1a59fc1bc792ac72d1a3e22e2469020529788" ]
[ "research/attention_ocr/python/demo_inference.py", "research/syntaxnet/dragnn/python/transformer_units.py", "research/autoencoder/autoencoder_models/VariationalAutoencoder.py", "research/audioset/vggish/vggish_smoke_test.py", "research/object_detection/dataset_tools/oid_tfrecord_creation.py" ]
[ "\"\"\"A script to run inference on a set of image files.\r\n\r\nNOTE #1: The Attention OCR model was trained only using FSNS train dataset and\r\nit will work only for images which look more or less similar to french street\r\nnames. In order to apply it to images from a different distribution you need\r\nto retrain (or at least fine-tune) it using images from that distribution.\r\n\r\nNOTE #2: This script exists for demo purposes only. It is highly recommended\r\nto use tools and mechanisms provided by the TensorFlow Serving system to run\r\ninference on TensorFlow models in production:\r\nhttps://www.tensorflow.org/serving/serving_basic\r\n\r\nUsage:\r\npython demo_inference.py --batch_size=32 \\\r\n --checkpoint=model.ckpt-399731\\\r\n --image_path_pattern=./datasets/data/fsns/temp/fsns_train_%02d.png\r\n\"\"\"\r\nimport numpy as np\r\nimport PIL.Image\r\n\r\nimport tensorflow as tf\r\nfrom tensorflow.python.platform import flags\r\nfrom tensorflow.python.training import monitored_session\r\n\r\nimport common_flags\r\nimport datasets\r\nimport data_provider\r\n\r\nFLAGS = flags.FLAGS\r\ncommon_flags.define()\r\n\r\n# e.g. ./datasets/data/fsns/temp/fsns_train_%02d.png\r\nflags.DEFINE_string('image_path_pattern', '',\r\n 'A file pattern with a placeholder for the image index.')\r\n\r\n\r\ndef get_dataset_image_size(dataset_name):\r\n # Ideally this info should be exposed through the dataset interface itself.\r\n # But currently it is not available by other means.\r\n ds_module = getattr(datasets, dataset_name)\r\n height, width, _ = ds_module.DEFAULT_CONFIG['image_shape']\r\n return width, height\r\n\r\n\r\ndef load_images(file_pattern, batch_size, dataset_name):\r\n width, height = get_dataset_image_size(dataset_name)\r\n images_actual_data = np.ndarray(shape=(batch_size, height, width, 3),\r\n dtype='uint8')\r\n for i in range(batch_size):\r\n path = file_pattern % i\r\n print(\"Reading %s\" % path)\r\n pil_image = PIL.Image.open(tf.gfile.GFile(path))\r\n images_actual_data[i, ...] = np.asarray(pil_image)\r\n return images_actual_data\r\n\r\n\r\ndef create_model(batch_size, dataset_name):\r\n width, height = get_dataset_image_size(dataset_name)\r\n dataset = common_flags.create_dataset(split_name=FLAGS.split_name)\r\n model = common_flags.create_model(\r\n num_char_classes=dataset.num_char_classes,\r\n seq_length=dataset.max_sequence_length,\r\n num_views=dataset.num_of_views,\r\n null_code=dataset.null_code,\r\n charset=dataset.charset)\r\n raw_images = tf.placeholder(tf.uint8, shape=[batch_size, height, width, 3])\r\n images = tf.map_fn(data_provider.preprocess_image, raw_images,\r\n dtype=tf.float32)\r\n endpoints = model.create_base(images, labels_one_hot=None)\r\n return raw_images, endpoints\r\n\r\n\r\ndef run(checkpoint, batch_size, dataset_name, image_path_pattern):\r\n images_placeholder, endpoints = create_model(batch_size,\r\n dataset_name)\r\n images_data = load_images(image_path_pattern, batch_size,\r\n dataset_name)\r\n session_creator = monitored_session.ChiefSessionCreator(\r\n checkpoint_filename_with_path=checkpoint)\r\n with monitored_session.MonitoredSession(\r\n session_creator=session_creator) as sess:\r\n predictions = sess.run(endpoints.predicted_text,\r\n feed_dict={images_placeholder: images_data})\r\n return predictions.tolist()\r\n\r\n\r\ndef main(_):\r\n print(\"Predicted strings:\")\r\n predictions = run(FLAGS.checkpoint, FLAGS.batch_size, FLAGS.dataset_name,\r\n FLAGS.image_path_pattern)\r\n for line in predictions:\r\n print(line)\r\n\r\n\r\nif __name__ == '__main__':\r\n tf.app.run()\r\n", "# Copyright 2017 Google Inc. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"Network units implementing the Transformer network (Vaswani et al. 2017).\r\n\r\nHeavily adapted from the tensor2tensor implementation of the Transformer,\r\ndescribed in detail here: https://arxiv.org/abs/1706.03762.\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\nfrom dragnn.python import network_units\r\n\r\n\r\ndef add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):\r\n \"\"\"Adds a bunch of sinusoids of different frequencies to a Tensor.\r\n\r\n Each channel of the input Tensor is incremented by a sinusoid of a different\r\n frequency and phase.\r\n\r\n This allows attention to learn to use absolute and relative positions.\r\n Timing signals should be added to some precursors of both the query and the\r\n memory inputs to attention.\r\n\r\n The use of relative position is possible because sin(x+y) and cos(x+y) can be\r\n expressed in terms of y, sin(x) and cos(x).\r\n\r\n In particular, we use a geometric sequence of timescales starting with\r\n min_timescale and ending with max_timescale. The number of different\r\n timescales is equal to channels / 2. For each timescale, we\r\n generate the two sinusoidal signals sin(timestep/timescale) and\r\n cos(timestep/timescale). All of these sinusoids are concatenated in\r\n the channels dimension.\r\n\r\n Args:\r\n x: a Tensor with shape [batch, length, channels]\r\n min_timescale: a float\r\n max_timescale: a float\r\n\r\n Returns:\r\n a Tensor the same shape as x.\r\n \"\"\"\r\n length = tf.shape(x)[1]\r\n channels = tf.shape(x)[2]\r\n pos = tf.to_float(tf.range(length))\r\n num_timescales = channels // 2\r\n log_timescale_increment = (\r\n np.log(float(max_timescale) / float(min_timescale)) /\r\n (tf.to_float(num_timescales) - 1))\r\n inv_timescales = min_timescale * tf.exp(\r\n tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)\r\n scaled_time = tf.expand_dims(pos, 1) * tf.expand_dims(inv_timescales, 0)\r\n signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)\r\n signal = tf.pad(signal, [[0, 0], [0, tf.mod(channels, 2)]])\r\n signal = tf.reshape(signal, [1, length, channels])\r\n return x + signal\r\n\r\n\r\ndef split_last_dimension(x, n):\r\n \"\"\"Partitions x so that the last dimension becomes two dimensions.\r\n\r\n The first of these two dimensions is n.\r\n\r\n Args:\r\n x: a Tensor with shape [..., m]\r\n n: an integer.\r\n\r\n Returns:\r\n a Tensor with shape [..., n, m/n]\r\n \"\"\"\r\n old_shape = x.get_shape().dims\r\n last = old_shape[-1]\r\n new_shape = old_shape[:-1] + [n] + [last // n if last else None]\r\n ret = tf.reshape(x, tf.concat([tf.shape(x)[:-1], [n, -1]], 0))\r\n ret.set_shape(new_shape)\r\n return ret\r\n\r\n\r\ndef combine_last_two_dimensions(x):\r\n \"\"\"Reshape x so that the last two dimensions become one.\r\n\r\n Args:\r\n x: a Tensor with shape [..., a, b]\r\n\r\n Returns:\r\n a Tensor with shape [..., ab]\r\n \"\"\"\r\n old_shape = x.get_shape().dims\r\n a, b = old_shape[-2:]\r\n new_shape = old_shape[:-2] + [a * b if a and b else None]\r\n ret = tf.reshape(x, tf.concat([tf.shape(x)[:-2], [-1]], 0))\r\n ret.set_shape(new_shape)\r\n return ret\r\n\r\n\r\ndef split_heads(x, num_heads):\r\n \"\"\"Splits channels (dimension 3) into multiple heads (becomes dimension 1).\r\n\r\n Args:\r\n x: a Tensor with shape [batch, length, channels]\r\n num_heads: an integer\r\n\r\n Returns:\r\n a Tensor with shape [batch, num_heads, length, channels / num_heads]\r\n \"\"\"\r\n return tf.transpose(split_last_dimension(x, num_heads), [0, 2, 1, 3])\r\n\r\n\r\ndef combine_heads(x):\r\n \"\"\"Performs the inverse of split_heads.\r\n\r\n Args:\r\n x: a Tensor with shape [batch, num_heads, length, channels / num_heads]\r\n\r\n Returns:\r\n a Tensor with shape [batch, length, channels]\r\n \"\"\"\r\n return combine_last_two_dimensions(tf.transpose(x, [0, 2, 1, 3]))\r\n\r\n\r\ndef compute_padding_mask(lengths):\r\n \"\"\"Computes an additive mask for padding.\r\n\r\n Given the non-padded sequence lengths for the batch, computes a mask that will\r\n send padding attention to 0 when added to logits before applying a softmax.\r\n\r\n Args:\r\n lengths: a Tensor containing the sequence length of each batch element\r\n\r\n Returns:\r\n A Tensor of shape [batch_size, 1, 1, max_len] with zeros in non-padding\r\n entries and -1e9 in padding entries.\r\n \"\"\"\r\n lengths = tf.reshape(lengths, [-1])\r\n mask = tf.sequence_mask(lengths)\r\n\r\n # This will be used as an additive mask, so we want the inverse of the mask\r\n # produced by tf.sequence_mask.\r\n inv_mask = tf.to_float(tf.logical_not(mask))\r\n\r\n mem_padding = inv_mask * -1e9\r\n return tf.expand_dims(tf.expand_dims(mem_padding, 1), 1)\r\n\r\n\r\ndef dot_product_attention(queries, keys, values, dropout_keep_rate, bias=None):\r\n \"\"\"Computes dot-product attention.\r\n\r\n Args:\r\n queries: a Tensor with shape [batch, heads, seq_len, depth_keys]\r\n keys: a Tensor with shape [batch, heads, seq_len, depth_keys]\r\n values: a Tensor with shape [batch, heads, seq_len, depth_values]\r\n dropout_keep_rate: dropout proportion of units to keep\r\n bias: A bias to add before applying the softmax, or None. This can be used\r\n for masking padding in the batch.\r\n\r\n Returns:\r\n A Tensor with shape [batch, heads, seq_len, depth_values].\r\n \"\"\"\r\n # [batch, num_heads, seq_len, seq_len]\r\n logits = tf.matmul(queries, keys, transpose_b=True)\r\n if bias is not None:\r\n logits += bias\r\n\r\n attn_weights = tf.nn.softmax(logits)\r\n\r\n # Dropping out the attention links for each of the heads\r\n attn_weights = network_units.maybe_apply_dropout(attn_weights,\r\n dropout_keep_rate,\r\n False)\r\n return tf.matmul(attn_weights, values)\r\n\r\n\r\ndef residual(old_input, new_input, dropout_keep_rate, layer_norm):\r\n \"\"\"Residual layer combining old_input and new_input.\r\n\r\n Computes old_input + dropout(new_input) if layer_norm is None; otherwise:\r\n layer_norm(old_input + dropout(new_input)).\r\n\r\n Args:\r\n old_input: old float32 Tensor input to residual layer\r\n new_input: new float32 Tensor input to residual layer\r\n dropout_keep_rate: dropout proportion of units to keep\r\n layer_norm: network_units.LayerNorm to apply to residual output, or None\r\n\r\n Returns:\r\n float32 Tensor output of residual layer.\r\n \"\"\"\r\n res_sum = old_input + network_units.maybe_apply_dropout(new_input,\r\n dropout_keep_rate,\r\n False)\r\n return layer_norm.normalize(res_sum) if layer_norm else res_sum\r\n\r\n\r\ndef mlp(component, input_tensor, dropout_keep_rate, depth):\r\n \"\"\"Feed the input through an MLP.\r\n\r\n Each layer except the last is followed by a ReLU activation and dropout.\r\n\r\n Args:\r\n component: the DRAGNN Component containing parameters for the MLP\r\n input_tensor: the float32 Tensor input to the MLP.\r\n dropout_keep_rate: dropout proportion of units to keep\r\n depth: depth of the MLP.\r\n\r\n Returns:\r\n the float32 output Tensor\r\n \"\"\"\r\n for i in range(depth):\r\n ff_weights = component.get_variable('ff_weights_%d' % i)\r\n input_tensor = tf.nn.conv2d(input_tensor,\r\n ff_weights,\r\n [1, 1, 1, 1],\r\n padding='SAME')\r\n # Apply ReLU and dropout to all but the last layer\r\n if i < depth - 1:\r\n input_tensor = tf.nn.relu(input_tensor)\r\n input_tensor = network_units.maybe_apply_dropout(input_tensor,\r\n dropout_keep_rate,\r\n False)\r\n return input_tensor\r\n\r\n\r\nclass TransformerEncoderNetwork(network_units.NetworkUnitInterface):\r\n \"\"\"Implementation of the Transformer network encoder.\"\"\"\r\n\r\n def __init__(self, component):\r\n \"\"\"Initializes parameters for this Transformer unit.\r\n\r\n Args:\r\n component: parent ComponentBuilderBase object.\r\n\r\n Parameters used to construct the network:\r\n num_layers: number of transformer layers (attention + MLP)\r\n hidden_size: size of hidden layers in MLPs\r\n filter_size: filter width for each attention head\r\n num_heads: number of attention heads\r\n residual_dropout: dropout keep rate for residual layers\r\n attention_dropout: dropout keep rate for attention weights\r\n mlp_dropout: dropout keep rate for mlp layers\r\n initialization: initialization scheme to use for model parameters\r\n bias_init: initial value for bias parameters\r\n scale_attention: whether to scale attention parameters by filter_size^-0.5\r\n layer_norm_residuals: whether to perform layer normalization on residual\r\n layers\r\n timing_signal: whether to add a position-wise timing signal to the input\r\n kernel: kernel width in middle MLP layers\r\n mlp_layers: number of MLP layers. Must be >= 2.\r\n\r\n Raises:\r\n ValueError: if mlp_layers < 2.\r\n\r\n The input depth of the first layer is inferred from the total concatenated\r\n size of the input features, minus 1 to account for the sequence lengths.\r\n\r\n Hyperparameters used:\r\n dropout_rate: The probability that an input is not dropped. This is the\r\n default when the |dropout_keep_prob| parameter is unset.\r\n \"\"\"\r\n\r\n super(TransformerEncoderNetwork, self).__init__(component)\r\n default_dropout_rate = component.master.hyperparams.dropout_rate\r\n self._attrs = network_units.get_attrs_with_defaults(\r\n component.spec.network_unit.parameters, defaults={\r\n 'num_layers': 4,\r\n 'hidden_size': 256,\r\n 'filter_size': 64,\r\n 'num_heads': 8,\r\n 'residual_drop': default_dropout_rate,\r\n 'attention_drop': default_dropout_rate,\r\n 'mlp_drop': default_dropout_rate,\r\n 'initialization': 'xavier',\r\n 'bias_init': 0.001,\r\n 'scale_attention': True,\r\n 'layer_norm_residuals': True,\r\n 'timing_signal': True,\r\n 'kernel': 1,\r\n 'mlp_layers': 2})\r\n\r\n self._num_layers = self._attrs['num_layers']\r\n self._hidden_size = self._attrs['hidden_size']\r\n self._filter_size = self._attrs['filter_size']\r\n self._num_heads = self._attrs['num_heads']\r\n self._residual_dropout = self._attrs['residual_drop']\r\n self._attention_dropout = self._attrs['attention_drop']\r\n self._mlp_dropout = self._attrs['mlp_drop']\r\n self._initialization = self._attrs['initialization']\r\n self._bias_init = self._attrs['bias_init']\r\n self._scale_attn = self._attrs['scale_attention']\r\n self._layer_norm_res = self._attrs['layer_norm_residuals']\r\n self._timing_signal = self._attrs['timing_signal']\r\n self._kernel = self._attrs['kernel']\r\n self._mlp_depth = self._attrs['mlp_layers']\r\n\r\n if self._mlp_depth < 2:\r\n raise ValueError('TransformerEncoderNetwork needs mlp_layers >= 2')\r\n\r\n self._combined_filters = self._num_heads * self._filter_size\r\n\r\n self._weights = []\r\n self._biases = []\r\n self._layer_norms = {}\r\n\r\n # Hacky: one dimension comes from the lengths input; subtract it.\r\n self._concatenated_input_dim -= 1\r\n\r\n # Initial projection of inputs, this is mainly to project input down to the\r\n # right size for residual layers\r\n proj_shape = [1, 1, self._concatenated_input_dim, self._combined_filters]\r\n self._weights.append(\r\n network_units.add_var_initialized('init_proj', proj_shape,\r\n self._initialization))\r\n self._biases.append(tf.get_variable('init_bias',\r\n self._combined_filters,\r\n initializer=tf.constant_initializer(\r\n self._bias_init),\r\n dtype=tf.float32))\r\n\r\n for i in range(self._num_layers):\r\n with tf.variable_scope('transform_%d' % i):\r\n # Attention weights: 3 * self.combined_filters = (q, k, v)\r\n # We assume that q, k and v all have the same dimension\r\n attn_shape = [1, 1, self._combined_filters, 3 * self._combined_filters]\r\n self._weights.append(\r\n network_units.add_var_initialized('attn_weights',\r\n attn_shape,\r\n self._initialization))\r\n\r\n # Attention final projection weights\r\n proj_shape = [1, 1, self._combined_filters, self._combined_filters]\r\n self._weights.append(\r\n network_units.add_var_initialized('proj_weights',\r\n proj_shape,\r\n self._initialization))\r\n\r\n # MLP weights\r\n with tf.variable_scope('mlp'):\r\n ff_shape = [1, 1, self._combined_filters, self._hidden_size]\r\n self._weights.append(\r\n network_units.add_var_initialized('ff_weights_0',\r\n ff_shape,\r\n self._initialization))\r\n ff_shape = [1, self._kernel, self._hidden_size, self._hidden_size]\r\n for j in range(1, self._mlp_depth - 1):\r\n self._weights.append(\r\n network_units.add_var_initialized('ff_weights_%d' % j,\r\n ff_shape,\r\n self._initialization))\r\n ff_shape = [1, 1, self._hidden_size, self._combined_filters]\r\n self._weights.append(\r\n network_units.add_var_initialized('ff_weights_%d' %\r\n (self._mlp_depth - 1),\r\n ff_shape,\r\n self._initialization))\r\n\r\n # Layer normalization for residual layers\r\n if self._layer_norm_res:\r\n attn_layer_norm = network_units.LayerNorm(component,\r\n 'attn_layer_norm_%d' % i,\r\n self._combined_filters,\r\n tf.float32)\r\n self._layer_norms['attn_layer_norm_%d' % i] = attn_layer_norm\r\n\r\n ff_layer_norm = network_units.LayerNorm(component,\r\n 'ff_layer_norm_%d' % i,\r\n self._combined_filters,\r\n tf.float32)\r\n self._layer_norms['ff_layer_norm_%d' % i] = ff_layer_norm\r\n\r\n # Layer norm parameters are not added to self._weights,\r\n # which means that they are not l2 regularized\r\n self._params.extend(attn_layer_norm.params + ff_layer_norm.params)\r\n\r\n self._params.extend(self._weights)\r\n self._params.extend(self._biases)\r\n self._regularized_weights.extend(self._weights)\r\n self._layers.append(\r\n network_units.Layer(component, name='transformer_output',\r\n dim=self._combined_filters))\r\n\r\n def create(self,\r\n fixed_embeddings,\r\n linked_embeddings,\r\n context_tensor_arrays,\r\n attention_tensor,\r\n during_training,\r\n stride=None):\r\n \"\"\"Requires |stride|; otherwise see base class.\"\"\"\r\n del context_tensor_arrays, attention_tensor\r\n if stride is None:\r\n raise RuntimeError(\"TransformerEncoderNetwork needs 'stride' and must be \"\r\n \"called in the bulk feature extractor component.\")\r\n\r\n lengths = network_units.lookup_named_tensor('lengths', linked_embeddings)\r\n lengths_s = tf.to_int32(tf.squeeze(lengths.tensor, [1]))\r\n num_steps = tf.reduce_max(lengths_s)\r\n\r\n in_tensor = network_units.lookup_named_tensor('features', linked_embeddings)\r\n input_tensor = tf.reshape(in_tensor.tensor, [stride, num_steps, -1])\r\n\r\n if self._timing_signal:\r\n input_tensor = add_timing_signal_1d(input_tensor)\r\n\r\n # Adds a dimension for conv2d\r\n input_tensor = tf.expand_dims(input_tensor, 1)\r\n\r\n # For masking padding in attention\r\n mask = compute_padding_mask(lengths_s)\r\n\r\n conv = tf.nn.conv2d(input_tensor,\r\n self._component.get_variable('init_proj'),\r\n [1, 1, 1, 1], padding='SAME')\r\n conv = tf.nn.bias_add(conv, self._component.get_variable('init_bias'))\r\n\r\n for i in range(self._num_layers):\r\n with tf.variable_scope('transform_%d' % i, reuse=True):\r\n attn_weights = self._component.get_variable('attn_weights')\r\n attn_combined = tf.nn.conv2d(conv,\r\n attn_weights,\r\n [1, 1, 1, 1],\r\n padding='SAME')\r\n attn_combined = tf.squeeze(attn_combined, 1)\r\n\r\n # Splits combined projection into queries, keys, and values\r\n queries, keys, values = tf.split(attn_combined,\r\n [self._combined_filters]*3,\r\n axis=2)\r\n\r\n # Splits each of queries, keys, values into attention heads\r\n queries = split_heads(queries, self._num_heads)\r\n keys = split_heads(keys, self._num_heads)\r\n values = split_heads(values, self._num_heads)\r\n if self._scale_attn:\r\n queries *= self._filter_size**-0.5\r\n\r\n # Performs dot product attention and concatenates the resulting heads\r\n attended = dot_product_attention(queries, keys, values,\r\n self._attention_dropout, mask)\r\n attended = combine_heads(attended)\r\n\r\n # Projects combined heads\r\n attended = tf.expand_dims(attended, 1)\r\n proj = tf.nn.conv2d(attended,\r\n self._component.get_variable('proj_weights'),\r\n [1, 1, 1, 1],\r\n padding='SAME')\r\n\r\n # Residual connection between input and attended input\r\n attn_layer_norm_params = None\r\n if self._layer_norm_res:\r\n attn_layer_norm_params = self._layer_norms['attn_layer_norm_%d' % i]\r\n proj_res = residual(conv, proj, self._residual_dropout,\r\n attn_layer_norm_params)\r\n\r\n # Feed forward\r\n with tf.variable_scope('mlp'):\r\n ff = mlp(self._component, proj_res, self._mlp_dropout,\r\n self._mlp_depth)\r\n\r\n # Residual connection between attended input and feed forward layers\r\n ff_layer_norm_params = None\r\n if self._layer_norm_res:\r\n ff_layer_norm_params = self._layer_norms['ff_layer_norm_%d' % i]\r\n conv = residual(proj_res, ff, self._residual_dropout,\r\n ff_layer_norm_params)\r\n\r\n return [tf.reshape(conv, [-1, self._combined_filters],\r\n name='reshape_activations')]\r\n\r\n\r\nclass PairwiseBilinearLabelNetwork(network_units.NetworkUnitInterface):\r\n r\"\"\"Network unit that computes pairwise bilinear label scores.\r\n\r\n Given source and target representations for each token, this network unit\r\n computes bilinear scores for each label for each of the N^2 combinations of\r\n source and target tokens, rather than for only N already-computed\r\n source/target pairs (as is performed by the biaffine_units). The output is\r\n suitable as input to e.g. the heads_labels transition system.\r\n Specifically, a weights tensor W called `bilinear' is used to compute bilinear\r\n scores B for input tensors S and T:\r\n\r\n B_{bnml} = \\sum_{i,j} S_{bni} W_{ilj} T{bmj}\r\n\r\n for batches b, steps n and m and labels l.\r\n\r\n Parameters:\r\n num_labels: The number of dependency labels, L.\r\n\r\n Features:\r\n sources: [B * N, S] matrix of batched activations for source tokens.\r\n targets: [B * N, T] matrix of batched activations for target tokens.\r\n\r\n Layers:\r\n bilinear_scores: [B * N, N * L] matrix where vector b*N*N*L+t contains\r\n per-label scores for all N possible arcs from token t in\r\n batch b.\r\n \"\"\"\r\n\r\n def __init__(self, component):\r\n super(PairwiseBilinearLabelNetwork, self).__init__(component)\r\n parameters = component.spec.network_unit.parameters\r\n\r\n self._num_labels = int(parameters['num_labels'])\r\n\r\n self._source_dim = self._linked_feature_dims['sources']\r\n self._target_dim = self._linked_feature_dims['targets']\r\n\r\n self._weights = []\r\n self._weights.append(\r\n network_units.add_var_initialized('bilinear',\r\n [self._source_dim,\r\n self._num_labels,\r\n self._target_dim],\r\n 'xavier'))\r\n\r\n self._params.extend(self._weights)\r\n self._regularized_weights.extend(self._weights)\r\n self._layers.append(network_units.Layer(component,\r\n name='bilinear_scores',\r\n dim=self._num_labels))\r\n\r\n def create(self,\r\n fixed_embeddings,\r\n linked_embeddings,\r\n context_tensor_arrays,\r\n attention_tensor,\r\n during_training,\r\n stride=None):\r\n \"\"\"Requires |stride|; otherwise see base class.\"\"\"\r\n del context_tensor_arrays, attention_tensor\r\n if stride is None:\r\n raise RuntimeError(\"PairwiseBilinearLabelNetwork needs 'stride' and must \"\r\n \"be called in a bulk component.\")\r\n\r\n sources = network_units.lookup_named_tensor('sources', linked_embeddings)\r\n sources_tensor = tf.reshape(sources.tensor, [stride, -1, self._source_dim])\r\n\r\n targets = network_units.lookup_named_tensor('targets', linked_embeddings)\r\n targets_tensor = tf.reshape(targets.tensor, [stride, -1, self._target_dim])\r\n\r\n # Dimensions: source_dim x num_labels x target_dim\r\n bilinear_params = self._component.get_variable('bilinear')\r\n\r\n # Ensures that num_steps is the same for both inputs\r\n num_steps = tf.shape(sources_tensor)[1]\r\n with tf.control_dependencies([tf.assert_equal(num_steps,\r\n tf.shape(targets_tensor)[1],\r\n name='num_steps_mismatch')]):\r\n # Dimensions:\r\n # (batch_size*num_steps x source_dim) *\r\n # (source_dim x num_labels*target_dim)\r\n # = (batch_size*num_steps x num_labels*target_dim)\r\n lin = tf.matmul(tf.reshape(sources_tensor, [-1, self._source_dim]),\r\n tf.reshape(bilinear_params, [self._source_dim, -1]))\r\n\r\n # (batch_size x num_steps*num_labels x target_dim) *\r\n # (batch_size x num_steps x target_dim)^T\r\n # = (batch_size x num_steps*num_labels x num_steps)\r\n bilin = tf.matmul(\r\n tf.reshape(lin, [-1, num_steps*self._num_labels, self._target_dim]),\r\n targets_tensor, transpose_b=True)\r\n\r\n # (batch_size x num_steps*num_labels x num_steps) ->\r\n # (batch_size x num_steps x num_steps*num_labels)\r\n scores = tf.transpose(bilin, [0, 2, 1])\r\n\r\n return [tf.reshape(scores, [-1, num_steps*self._num_labels],\r\n name='reshape_activations')]\r\n", "import tensorflow as tf\r\n\r\nclass VariationalAutoencoder(object):\r\n\r\n def __init__(self, n_input, n_hidden, optimizer = tf.train.AdamOptimizer()):\r\n self.n_input = n_input\r\n self.n_hidden = n_hidden\r\n\r\n network_weights = self._initialize_weights()\r\n self.weights = network_weights\r\n\r\n # model\r\n self.x = tf.placeholder(tf.float32, [None, self.n_input])\r\n self.z_mean = tf.add(tf.matmul(self.x, self.weights['w1']), self.weights['b1'])\r\n self.z_log_sigma_sq = tf.add(tf.matmul(self.x, self.weights['log_sigma_w1']), self.weights['log_sigma_b1'])\r\n\r\n # sample from gaussian distribution\r\n eps = tf.random_normal(tf.stack([tf.shape(self.x)[0], self.n_hidden]), 0, 1, dtype = tf.float32)\r\n self.z = tf.add(self.z_mean, tf.multiply(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))\r\n\r\n self.reconstruction = tf.add(tf.matmul(self.z, self.weights['w2']), self.weights['b2'])\r\n\r\n # cost\r\n reconstr_loss = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))\r\n latent_loss = -0.5 * tf.reduce_sum(1 + self.z_log_sigma_sq\r\n - tf.square(self.z_mean)\r\n - tf.exp(self.z_log_sigma_sq), 1)\r\n self.cost = tf.reduce_mean(reconstr_loss + latent_loss)\r\n self.optimizer = optimizer.minimize(self.cost)\r\n\r\n init = tf.global_variables_initializer()\r\n self.sess = tf.Session()\r\n self.sess.run(init)\r\n\r\n def _initialize_weights(self):\r\n all_weights = dict()\r\n all_weights['w1'] = tf.get_variable(\"w1\", shape=[self.n_input, self.n_hidden],\r\n initializer=tf.contrib.layers.xavier_initializer())\r\n all_weights['log_sigma_w1'] = tf.get_variable(\"log_sigma_w1\", shape=[self.n_input, self.n_hidden],\r\n initializer=tf.contrib.layers.xavier_initializer())\r\n all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32))\r\n all_weights['log_sigma_b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32))\r\n all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype=tf.float32))\r\n all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype=tf.float32))\r\n return all_weights\r\n\r\n def partial_fit(self, X):\r\n cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict={self.x: X})\r\n return cost\r\n\r\n def calc_total_cost(self, X):\r\n return self.sess.run(self.cost, feed_dict = {self.x: X})\r\n\r\n def transform(self, X):\r\n return self.sess.run(self.z_mean, feed_dict={self.x: X})\r\n\r\n def generate(self, hidden = None):\r\n if hidden is None:\r\n hidden = self.sess.run(tf.random_normal([1, self.n_hidden]))\r\n return self.sess.run(self.reconstruction, feed_dict={self.z: hidden})\r\n\r\n def reconstruct(self, X):\r\n return self.sess.run(self.reconstruction, feed_dict={self.x: X})\r\n\r\n def getWeights(self):\r\n return self.sess.run(self.weights['w1'])\r\n\r\n def getBiases(self):\r\n return self.sess.run(self.weights['b1'])\r\n\r\n", "# Copyright 2017 The TensorFlow Authors All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"A smoke test for VGGish.\r\n\r\nThis is a simple smoke test of a local install of VGGish and its associated\r\ndownloaded files. We create a synthetic sound, extract log mel spectrogram\r\nfeatures, run them through VGGish, post-process the embedding ouputs, and\r\ncheck some simple statistics of the results, allowing for variations that\r\nmight occur due to platform/version differences in the libraries we use.\r\n\r\nUsage:\r\n- Download the VGGish checkpoint and PCA parameters into the same directory as\r\n the VGGish source code. If you keep them elsewhere, update the checkpoint_path\r\n and pca_params_path variables below.\r\n- Run:\r\n $ python vggish_smoke_test.py\r\n\"\"\"\r\n\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\nimport vggish_input\r\nimport vggish_params\r\nimport vggish_postprocess\r\nimport vggish_slim\r\n\r\nprint('\\nTesting your install of VGGish\\n')\r\n\r\n# Paths to downloaded VGGish files.\r\ncheckpoint_path = 'vggish_model.ckpt'\r\npca_params_path = 'vggish_pca_params.npz'\r\n\r\n# Relative tolerance of errors in mean and standard deviation of embeddings.\r\nrel_error = 0.1 # Up to 10%\r\n\r\n# Generate a 1 kHz sine wave at 44.1 kHz (we use a high sampling rate\r\n# to test resampling to 16 kHz during feature extraction).\r\nnum_secs = 3\r\nfreq = 1000\r\nsr = 44100\r\nt = np.linspace(0, num_secs, int(num_secs * sr))\r\nx = np.sin(2 * np.pi * freq * t)\r\n\r\n# Produce a batch of log mel spectrogram examples.\r\ninput_batch = vggish_input.waveform_to_examples(x, sr)\r\nprint('Log Mel Spectrogram example: ', input_batch[0])\r\nnp.testing.assert_equal(\r\n input_batch.shape,\r\n [num_secs, vggish_params.NUM_FRAMES, vggish_params.NUM_BANDS])\r\n\r\n# Define VGGish, load the checkpoint, and run the batch through the model to\r\n# produce embeddings.\r\nwith tf.Graph().as_default(), tf.Session() as sess:\r\n vggish_slim.define_vggish_slim()\r\n vggish_slim.load_vggish_slim_checkpoint(sess, checkpoint_path)\r\n\r\n features_tensor = sess.graph.get_tensor_by_name(\r\n vggish_params.INPUT_TENSOR_NAME)\r\n embedding_tensor = sess.graph.get_tensor_by_name(\r\n vggish_params.OUTPUT_TENSOR_NAME)\r\n [embedding_batch] = sess.run([embedding_tensor],\r\n feed_dict={features_tensor: input_batch})\r\n print('VGGish embedding: ', embedding_batch[0])\r\n expected_embedding_mean = 0.131\r\n expected_embedding_std = 0.238\r\n np.testing.assert_allclose(\r\n [np.mean(embedding_batch), np.std(embedding_batch)],\r\n [expected_embedding_mean, expected_embedding_std],\r\n rtol=rel_error)\r\n\r\n# Postprocess the results to produce whitened quantized embeddings.\r\npproc = vggish_postprocess.Postprocessor(pca_params_path)\r\npostprocessed_batch = pproc.postprocess(embedding_batch)\r\nprint('Postprocessed VGGish embedding: ', postprocessed_batch[0])\r\nexpected_postprocessed_mean = 123.0\r\nexpected_postprocessed_std = 75.0\r\nnp.testing.assert_allclose(\r\n [np.mean(postprocessed_batch), np.std(postprocessed_batch)],\r\n [expected_postprocessed_mean, expected_postprocessed_std],\r\n rtol=rel_error)\r\n\r\nprint('\\nLooks Good To Me!\\n')\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\nr\"\"\"Utilities for creating TFRecords of TF examples for the Open Images dataset.\r\n\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport tensorflow as tf\r\n\r\nfrom object_detection.core import standard_fields\r\nfrom object_detection.utils import dataset_util\r\n\r\n\r\ndef tf_example_from_annotations_data_frame(annotations_data_frame, label_map,\r\n encoded_image):\r\n \"\"\"Populates a TF Example message with image annotations from a data frame.\r\n\r\n Args:\r\n annotations_data_frame: Data frame containing the annotations for a single\r\n image.\r\n label_map: String to integer label map.\r\n encoded_image: The encoded image string\r\n\r\n Returns:\r\n The populated TF Example, if the label of at least one object is present in\r\n label_map. Otherwise, returns None.\r\n \"\"\"\r\n\r\n filtered_data_frame = annotations_data_frame[\r\n annotations_data_frame.LabelName.isin(label_map)]\r\n filtered_data_frame_boxes = filtered_data_frame[\r\n ~filtered_data_frame.YMin.isnull()]\r\n filtered_data_frame_labels = filtered_data_frame[\r\n filtered_data_frame.YMin.isnull()]\r\n image_id = annotations_data_frame.ImageID.iloc[0]\r\n\r\n feature_map = {\r\n standard_fields.TfExampleFields.object_bbox_ymin:\r\n dataset_util.float_list_feature(\r\n filtered_data_frame_boxes.YMin.as_matrix()),\r\n standard_fields.TfExampleFields.object_bbox_xmin:\r\n dataset_util.float_list_feature(\r\n filtered_data_frame_boxes.XMin.as_matrix()),\r\n standard_fields.TfExampleFields.object_bbox_ymax:\r\n dataset_util.float_list_feature(\r\n filtered_data_frame_boxes.YMax.as_matrix()),\r\n standard_fields.TfExampleFields.object_bbox_xmax:\r\n dataset_util.float_list_feature(\r\n filtered_data_frame_boxes.XMax.as_matrix()),\r\n standard_fields.TfExampleFields.object_class_text:\r\n dataset_util.bytes_list_feature(\r\n filtered_data_frame_boxes.LabelName.as_matrix()),\r\n standard_fields.TfExampleFields.object_class_label:\r\n dataset_util.int64_list_feature(\r\n filtered_data_frame_boxes.LabelName.map(lambda x: label_map[x])\r\n .as_matrix()),\r\n standard_fields.TfExampleFields.filename:\r\n dataset_util.bytes_feature('{}.jpg'.format(image_id)),\r\n standard_fields.TfExampleFields.source_id:\r\n dataset_util.bytes_feature(image_id),\r\n standard_fields.TfExampleFields.image_encoded:\r\n dataset_util.bytes_feature(encoded_image),\r\n }\r\n\r\n if 'IsGroupOf' in filtered_data_frame.columns:\r\n feature_map[standard_fields.TfExampleFields.\r\n object_group_of] = dataset_util.int64_list_feature(\r\n filtered_data_frame_boxes.IsGroupOf.as_matrix().astype(int))\r\n if 'IsOccluded' in filtered_data_frame.columns:\r\n feature_map[standard_fields.TfExampleFields.\r\n object_occluded] = dataset_util.int64_list_feature(\r\n filtered_data_frame_boxes.IsOccluded.as_matrix().astype(\r\n int))\r\n if 'IsTruncated' in filtered_data_frame.columns:\r\n feature_map[standard_fields.TfExampleFields.\r\n object_truncated] = dataset_util.int64_list_feature(\r\n filtered_data_frame_boxes.IsTruncated.as_matrix().astype(\r\n int))\r\n if 'IsDepiction' in filtered_data_frame.columns:\r\n feature_map[standard_fields.TfExampleFields.\r\n object_depiction] = dataset_util.int64_list_feature(\r\n filtered_data_frame_boxes.IsDepiction.as_matrix().astype(\r\n int))\r\n\r\n if 'ConfidenceImageLabel' in filtered_data_frame_labels.columns:\r\n feature_map[standard_fields.TfExampleFields.\r\n image_class_label] = dataset_util.int64_list_feature(\r\n filtered_data_frame_labels.LabelName.map(\r\n lambda x: label_map[x]).as_matrix())\r\n feature_map[standard_fields.TfExampleFields.\r\n image_class_text] = dataset_util.bytes_list_feature(\r\n filtered_data_frame_labels.LabelName.as_matrix()),\r\n return tf.train.Example(features=tf.train.Features(feature=feature_map))\r\n" ]
[ [ "tensorflow.placeholder", "tensorflow.python.training.monitored_session.MonitoredSession", "tensorflow.app.run", "tensorflow.map_fn", "tensorflow.gfile.GFile", "numpy.asarray", "tensorflow.python.platform.flags.DEFINE_string", "numpy.ndarray", "tensorflow.python.training.monitored_session.ChiefSessionCreator" ], [ "tensorflow.reduce_max", "tensorflow.reshape", "tensorflow.variable_scope", "tensorflow.matmul", "tensorflow.squeeze", "tensorflow.sequence_mask", "tensorflow.nn.softmax", "tensorflow.split", "tensorflow.mod", "tensorflow.transpose", "tensorflow.sin", "tensorflow.constant_initializer", "tensorflow.shape", "tensorflow.to_float", "tensorflow.expand_dims", "tensorflow.logical_not", "tensorflow.range", "tensorflow.nn.conv2d", "tensorflow.cos", "tensorflow.nn.relu" ], [ "tensorflow.placeholder", "tensorflow.zeros", "tensorflow.shape", "tensorflow.global_variables_initializer", "tensorflow.subtract", "tensorflow.train.AdamOptimizer", "tensorflow.reduce_mean", "tensorflow.matmul", "tensorflow.exp", "tensorflow.square", "tensorflow.Session", "tensorflow.contrib.layers.xavier_initializer", "tensorflow.random_normal" ], [ "numpy.testing.assert_equal", "numpy.std", "tensorflow.Graph", "tensorflow.Session", "numpy.sin", "numpy.mean" ], [ "tensorflow.train.Features" ] ]
bmcmenamin/word2vec_advice
[ "69dbde89b26b80d10f778147f2e3abe1628d6e05" ]
[ "scrape_scripts/textScrape.py" ]
[ "#!/Users/mcmenamin/.virtualenvs/py3env/bin/python\n\nfrom lxml import html\nimport requests\n\nfrom datetime import date\nimport numpy as np\nimport pandas as pd\n\nimport re as re\n\nfrom itertools import chain\nimport pickle\n\nfrom tqdm import tqdm\n\ndef getURLforYear(year, archiveURL='http://www.uexpress.com/dearabby/archives'):\n archive = requests.get('{0}/{1}'.format(archiveURL, year))\n tree = html.fromstring(archive.text)\n urlList = [a.attrib['href'] for a in tree.find_class('media-link-main')]\n return urlList\n\ndef scrape_page(extURL, baseURL='http://www.uexpress.com/'): \n page = requests.get('{0}{1}'.format(baseURL, extURL))\n tree = html.fromstring(page.text)\n questions = tree.find_class('item-section')\n allQ = []\n for q in questions:\n qText = [i.text_content() for i in q.iterfind('p')]\n allQ += qText\n allQ = ' '.join(allQ)\n return allQ\n\ndef parseAbby(block):\n block = block.strip().split('DEAR ')\n\n abbyBlock = [p.startswith('ABBY:') for p in block]\n dearReaderBlock = [p.startswith('READERS:') for p in block]\n replyBlock = [not (p[0] or p[1]) for p in zip(abbyBlock, dearReaderBlock)]\n \n QA_pairs = []\n if True in abbyBlock and True in replyBlock:\n firstBlock = abbyBlock.index(True)\n \n block = block[firstBlock:]\n abbyBlock = abbyBlock[firstBlock:]\n dearReaderBlock = dearReaderBlock[firstBlock:]\n replyBlock = replyBlock[firstBlock:]\n \n for i in range(len(block)-1):\n if abbyBlock[i] and replyBlock[i+1]:\n QA_pairs.append([block[i], block[i+1]])\n return QA_pairs\n\n\n#\n# Get an iterator of URLs from archives for a specific date range\n#\n\narchivedURLs = list(chain.from_iterable([getURLforYear(y) for y in range(1991,2017+1)]))\n\n\n#\n# Pull in the text from each archived URL\n#\n\nall_text_dict = {}\nfor url in tqdm(archivedURLs):\n raw_text = scrape_page(url)\n all_text_dict[url] = {'path': url,\n 'date': date(*[int(i) for i in url.split('/')[2:5]]),\n 'raw_text': raw_text,\n 'parse_text': parseAbby(raw_text)\n } \ndf_text = pd.DataFrame.from_dict(all_text_dict, orient='index')\n\ndf_text.to_pickle('abbyText.pickle')\n\ndf_text.to_json('abbyText.json',\n lines=True,\n orient='records',\n force_ascii=True\n)\n" ]
[ [ "pandas.DataFrame.from_dict" ] ]
globotree/tensorflow
[ "b944fb947898de8cb4279a5a8a066955ba685412" ]
[ "tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/multi_variables_v1.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n# RUN: %p/multi_variables_v1 | FileCheck %s\n\n# pylint: disable=missing-docstring,line-too-long\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common_v1\n\n# Verify that the tf.versions attribute exists. It is difficult to enforce\n# contents, since the version numbers change over time. The conversion logic\n# itself is verified in the common graphdef converter, so here just assert\n# it is being invoked.\n# CHECK: module\n# CHECK-SAME: tf.versions\n# CHECK-SAME: bad_consumers\n# CHECK-SAME: min_consumer\n# CHECK-SAME: producer\n\n# CHECK: \"tf_saved_model.global_tensor\"() {is_mutable, sym_name = \"y\", type = tensor<1x3xf32>, value = {{.*}} : tensor<1x3xf32>} : () -> ()\n# CHECK: \"tf_saved_model.global_tensor\"() {is_mutable, sym_name = \"z\", type = tensor<3x3xf32>, value = {{.*}} : tensor<3x3xf32>} : () -> ()\n# CHECK: func @basic([[ARG0:%.*]]: tensor<3x1xf32>,\n# CHECK-SAME: [[ARG1:%.*]]: tensor<!tf.resource<tensor<1x3xf32>>> {tf_saved_model.bound_input = @y}\n# CHECK-SAME: [[ARG2:%.*]]: tensor<!tf.resource<tensor<3x3xf32>>> {tf_saved_model.bound_input = @z}) -> tensor<3x3xf32>\n# CHECK-NEXT: [[R0:%.*]] = \"tf.ReadVariableOp\"([[ARG1]]) {{{.*}}} : (tensor<!tf.resource<tensor<1x3xf32>>>) -> tensor<1x3xf32>\n# CHECK-NEXT: [[R1:%.*]] = \"tf.MatMul\"([[ARG0]], [[R0]]) {{{.*}}} : (tensor<3x1xf32>, tensor<1x3xf32>) -> tensor<3x3xf32>\n# CHECK-NEXT: [[R2:%.*]] = \"tf.ReadVariableOp\"([[ARG2]]) {{{.*}}} : (tensor<!tf.resource<tensor<3x3xf32>>>) -> tensor<3x3xf32>\n# CHECK-NEXT: [[R3:%.*]] = \"tf.MatMul\"([[R1]], [[R2]]) {{{.*}}} : (tensor<3x3xf32>, tensor<3x3xf32>) -> tensor<3x3xf32>\n# CHECK-NEXT: return [[R3]] : tensor<3x3xf32>\n\n\ndef Test():\n\n # Default TF1.x uses reference variables that are not supported by SavedModel\n # v1 Importer. To use SavedModel V1 Importer, resource variables should be\n # enabled.\n tf.compat.v1.enable_resource_variables()\n\n tf.compat.v1.disable_eager_execution()\n\n x = tf.constant([[1.0], [1.0], [1.0]])\n y = tf.compat.v1.get_variable(\n name='y',\n shape=(1, 3),\n initializer=tf.random_normal_initializer(),\n trainable=True)\n z = tf.compat.v1.get_variable(\n name='z',\n shape=(3, 3),\n initializer=tf.random_normal_initializer(),\n trainable=True)\n r = tf.matmul(x, y)\n s = tf.matmul(r, z)\n\n tensor_info_x = tf.compat.v1.saved_model.utils.build_tensor_info(x)\n tensor_info_s = tf.compat.v1.saved_model.utils.build_tensor_info(s)\n\n return {\n 'basic':\n (tf.compat.v1.saved_model.signature_def_utils.build_signature_def(\n inputs={'x': tensor_info_x},\n outputs={'s': tensor_info_s},\n method_name=tf.saved_model.PREDICT_METHOD_NAME))\n }\n\n\nif __name__ == '__main__':\n common_v1.do_test(Test())\n" ]
[ [ "tensorflow.compat.v1.compat.v1.saved_model.utils.build_tensor_info", "tensorflow.compat.v1.matmul", "tensorflow.compat.v1.random_normal_initializer", "tensorflow.compat.v1.compat.v1.enable_resource_variables", "tensorflow.compat.v1.compat.v1.saved_model.signature_def_utils.build_signature_def", "tensorflow.compat.v1.constant", "tensorflow.compat.v1.compat.v1.disable_eager_execution" ] ]
shpotes/s4x
[ "83151b8a7cfc78ebfc1d87ccc5109b6a0444a5e5" ]
[ "t5x/losses_test.py" ]
[ "# Copyright 2022 The T5X Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for t5x.losses.\"\"\"\n\nfrom absl.testing import absltest\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom t5x import losses\n\n\nclass LossTest(absltest.TestCase):\n\n def test_xent(self):\n\n def lossfn(logits, targets, weights):\n loss, z_loss, weight_sum = losses.compute_weighted_cross_entropy(\n logits,\n targets,\n weights,\n label_smoothing=0.1,\n z_loss=0.1,\n loss_normalizing_factor=0.1)\n return loss, (z_loss, weight_sum)\n\n batch_size = 2\n length = 4\n vocab_size = 8\n logits = np.random.normal(size=(batch_size, length,\n vocab_size)).astype(np.float32)\n targets = np.random.randint(0, vocab_size, size=(batch_size, length))\n weights = np.ones_like(targets)\n out = jax.jit(jax.value_and_grad(lossfn, has_aux=True))(logits, targets,\n weights)\n (loss, (z_loss, weight_sum)), dlogits = out\n # Just a smoke test for now\n # TODO(t5x): Expand test\n print(jax.device_get(((loss, (z_loss, weight_sum)), dlogits)))\n\n\nclass SpecialLossNormalizingFactorTest(absltest.TestCase):\n\n def test_num_real_target_tokens(self):\n batch = {\n 'decoder_target_tokens':\n jnp.asarray([[1, 2, 3, 4, 0], [5, 6, 0, 0, 0]], jnp.int32)\n }\n\n (output_lnf,\n output_loss_weights) = losses.get_loss_normalizing_factor_and_weights(\n loss_normalizing_factor=losses.SpecialLossNormalizingFactor\n .NUM_REAL_TARGET_TOKENS,\n batch=batch)\n\n np.testing.assert_allclose(output_lnf, 6.0, rtol=1e-3)\n np.testing.assert_allclose(\n output_loss_weights,\n np.array([[1.0, 1.0, 1.0, 1.0, 0.0], [1.0, 1.0, 0.0, 0.0, 0.0]],\n dtype=np.float32),\n rtol=1e-3)\n\n def test_num_total_target_tokens(self):\n batch = {\n 'decoder_target_tokens':\n jnp.asarray([[1, 2, 3, 4, 0], [5, 6, 0, 0, 0]], jnp.int32)\n }\n\n (output_lnf,\n output_loss_weights) = losses.get_loss_normalizing_factor_and_weights(\n loss_normalizing_factor=losses.SpecialLossNormalizingFactor\n .NUM_TOTAL_TARGET_TOKENS,\n batch=batch)\n\n np.testing.assert_allclose(output_lnf, 10.0, rtol=1e-3)\n np.testing.assert_allclose(\n output_loss_weights,\n np.array([[1.0, 1.0, 1.0, 1.0, 0.0], [1.0, 1.0, 0.0, 0.0, 0.0]],\n dtype=np.float32),\n rtol=1e-3)\n\n def test_average_per_sequence(self):\n batch = {\n 'decoder_target_tokens':\n jnp.asarray([[1, 2, 3, 4, 0], [5, 6, 0, 0, 0]], jnp.int32)\n }\n\n (output_lnf,\n output_loss_weights) = losses.get_loss_normalizing_factor_and_weights(\n loss_normalizing_factor=losses.SpecialLossNormalizingFactor\n .AVERAGE_PER_SEQUENCE,\n batch=batch)\n\n np.testing.assert_allclose(output_lnf, 2.0, rtol=1e-3)\n np.testing.assert_allclose(\n output_loss_weights,\n jnp.asarray([[0.25, 0.25, 0.25, 0.25, 0.0], [0.5, 0.5, 0.0, 0.0, 0.0]],\n jnp.float32),\n rtol=1e-3)\n\n def test_average_per_sequence_with_weights(self):\n batch = {\n 'decoder_target_tokens':\n jnp.asarray([[1, 2, 3, 4, 0], [5, 6, 0, 0, 0]], jnp.int32),\n 'decoder_loss_weights':\n jnp.asarray([[0.5, 1.0, 0.25, 2.0, 0.0], [1.0, 1.0, 0.0, 0.0, 0.0]],\n jnp.float32)\n }\n\n (output_lnf,\n output_loss_weights) = losses.get_loss_normalizing_factor_and_weights(\n loss_normalizing_factor=losses.SpecialLossNormalizingFactor\n .AVERAGE_PER_SEQUENCE,\n batch=batch)\n\n np.testing.assert_allclose(output_lnf, 2.0, rtol=1e-3)\n np.testing.assert_allclose(\n output_loss_weights,\n jnp.asarray(\n [[0.1333, 0.2666, 0.0666, 0.5333, 0.0], [0.5, 0.5, 0.0, 0.0, 0.0]],\n jnp.float32),\n rtol=1e-3)\n\nif __name__ == '__main__':\n absltest.main()\n" ]
[ [ "numpy.random.normal", "numpy.ones_like", "numpy.testing.assert_allclose", "numpy.array", "numpy.random.randint" ] ]
rafalmularczyk/public_lectures
[ "fcd10c217f56021ebdec0046dfe0def7f31e9b0c" ]
[ "Data Analytics/Utilities/DA_tools.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nlight=\"#FFFCDC\"\nlight_highlight=\"#FEF590\"\nmid=\"#FDED2A\"\nmid_highlight=\"#f0dc05\"\ndark=\"#EECA02\"\ndark_highlight=\"#BB9700\"\ngreen=\"#00FF00\"\nlight_grey=\"#DDDDDD\"\n\ndef is_sorted(a):\n '''Check if numpy 1d-array is sorted\n '''\n return np.all(a[:-1] <= a[1:])\n\ndef ribbon_plot(x, fx, ax=None,zorder=0):\n '''Plot a ribbon plot for regression and similar.\n Plot consists of quantiles (by 10%) of a variate (fx) as a function of covariate (x).\n x has shape (n, )\n fx has shape (N,n)\n '''\n if ax is None:\n ax = plt.gca()\n if not is_sorted(x):\n print('Sorting')\n arr2D = np.concatenate([np.expand_dims(x,axis=0),fx],axis=0)\n sortedArr = arr2D [ :, arr2D[0].argsort()]\n x = sortedArr[0,:]\n fx = sortedArr[1:,:]\n\n probs = [10, 20, 30, 40, 50, 60, 70, 80, 90]\n perc_interv=np.percentile(fx, probs, axis=0)\n ax.fill_between(x,perc_interv[0,:],perc_interv[8,:],color=light,zorder=zorder)\n ax.fill_between(x,perc_interv[1,:],perc_interv[7,:],color=light_highlight,zorder=zorder)\n ax.fill_between(x,perc_interv[2,:],perc_interv[6,:],color=mid,zorder=zorder)\n ax.fill_between(x,perc_interv[3,:],perc_interv[5,:],color=mid_highlight,zorder=zorder)\n ax.plot(x,perc_interv[4,:],color=dark,zorder=zorder)\n return(ax)\n" ]
[ [ "matplotlib.pyplot.gca", "numpy.all", "numpy.percentile", "numpy.expand_dims" ] ]
mkulariya1/tefla
[ "8de25c1b67dcf025535f5e8c40539de59acd7fb8" ]
[ "tefla/core/losses.py" ]
[ "# -------------------------------------------------------------------#\n# Written by Mrinal Haloi\n# Contact: [email protected]\n# Copyright 2016, Mrinal Haloi\n# -------------------------------------------------------------------#\nimport numpy as np\nimport tensorflow as tf\nimport numbers\nfrom functools import partial\nfrom ..utils import util\nfrom .layers import flatten, fully_connected as fc, relu\nfrom .layers import gradient_reverse\nfrom ..utils import losses_utils\nlog_loss = tf.losses.log_loss\n\n\ndef log_loss_custom(predictions, labels, eps=1e-7, name='log'):\n \"\"\"Define a log loss.\n\n Args:\n predictions: 2D tensor or array, [batch_size, num_classes] predictions of the network .\n labels: 2D or array tensor, [batch_size, num_classes] ground truth labels or target labels.\n eps: a constant to set upper or lower limit for labels, smoothening factor\n name: Optional scope/name for op_scope.\n\n Returns:\n A tensor with the log loss.\n \"\"\"\n with tf.name_scope(name):\n predictions = tf.to_float(predictions)\n labels = tf.to_float(labels)\n predictions = tf.clip_by_value(predictions, eps, 1 - eps)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n loss = -tf.reduce_mean(labels * tf.log(predictions))\n return loss\n\n\ndef log_loss_tf(predictions, labels, eps=1e-7, weights=1.0, name='log_loss'):\n \"\"\"Define a log loss.\n\n Args:\n predictions: 2D tensor or array, [batch_size, num_classes] predictions of the network .\n labels: 2D or array tensor, [batch_size, num_classes] ground truth labels or target labels.\n eps: a constant to set upper or lower limit for labels, smoothening factor\n name: Optional scope/name for op_scope.\n\n Returns:\n A tensor with the log loss.\n \"\"\"\n with tf.name_scope(name):\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n predictions = tf.to_float(predictions)\n labels = tf.to_float(labels)\n losses = -tf.multiply(labels, tf.log(predictions + eps)) - tf.multiply(\n (1 - labels), tf.log(1 - predictions + eps))\n return tf.losses.compute_weighted_loss(losses, weights)\n\n\ndef kappa_loss(predictions, labels, y_pow=1, eps=1e-15, num_ratings=5, batch_size=32, name='kappa'):\n \"\"\"Define a kappa loss, Its a continuous differentiable approximation of\n discrete kappa loss.\n\n Args:\n predictions: 2D tensor or array, [batch_size, num_classes] predictions of the network .\n labels: 2D tensor or array,[batch_size, num_classes] ground truth labels or target labels.\n y_pow: int, to whcih the labels should be raised; useful if model diverge. e.g. y_pow=2\n num_ratings: numbers of rater to used, typically num_classes of the model\n batch_size: batch_size of the training or validation ops\n eps: a float, prevents divide by zero\n name: Optional scope/name for op_scope.\n\n Returns:\n A tensor with the kappa loss.\n \"\"\"\n with tf.name_scope(name):\n labels = tf.to_float(labels)\n repeat_op = tf.to_float(\n tf.tile(tf.reshape(tf.range(0, num_ratings), [num_ratings, 1]), [1, num_ratings]))\n repeat_op_sq = tf.square((repeat_op - tf.transpose(repeat_op)))\n weights = repeat_op_sq / tf.to_float((num_ratings - 1)**2)\n\n pred_ = predictions**y_pow\n try:\n pred_norm = pred_ / \\\n (eps + tf.reshape(tf.reduce_sum(pred_, 1), [-1, 1]))\n except Exception:\n pred_norm = pred_ / \\\n (eps + tf.reshape(tf.reduce_sum(pred_, 1), [batch_size, 1]))\n\n hist_rater_a = tf.reduce_sum(pred_norm, 0)\n hist_rater_b = tf.reduce_sum(labels, 0)\n\n conf_mat = tf.matmul(tf.transpose(pred_norm), labels)\n\n nom = tf.reduce_sum(weights * conf_mat)\n denom = tf.reduce_sum(weights * tf.matmul(\n tf.reshape(hist_rater_a, [num_ratings, 1]), tf.reshape(hist_rater_b, [1, num_ratings])) /\n tf.to_float(batch_size))\n\n try:\n return -(1 - nom / denom)\n except Exception:\n return -(1 - nom / (denom + eps))\n\n\ndef kappa_log_loss(predictions,\n labels,\n label_smoothing=0.0,\n y_pow=1,\n batch_size=32,\n log_scale=0.5,\n num_classes=5,\n log_offset=0.50,\n name='kappa_log'):\n \"\"\"Define a joint kappa and log loss, Kappa is a continuous differentiable\n approximation of discrete kappa loss.\n\n Args:\n predictions: 2D tensor or array, [batch_size, num_classes] predictions of the network .\n labels: 2D tensor or array,[batch_size, num_classes] ground truth labels or target labels.\n label_smoothing: a float, used to smooth the labels for better generalization\n if greater than 0 then smooth the labels.\n y_pow: int, to whcih the labels should be raised; useful if model diverge. e.g. y_pow=2\n num_ratings: numbers of rater to used, typically num_classes of the model\n batch_size: batch_size of the training or validation ops\n log_scale: a float, used to multiply the clipped log loss, e.g: 0.5\n log_offset:a float minimum log loss offset to substract from original log loss; e.g. 0.50\n name: Optional scope/name for op_scope.\n\n Returns:\n A tensor with the kappa log loss.\n \"\"\"\n with tf.name_scope(name):\n num_classes = labels.get_shape()[-1].value\n labels = tf.cast(labels, predictions.dtype)\n if label_smoothing > 0:\n smooth_positives = 1.0 - label_smoothing\n smooth_negatives = label_smoothing / num_classes\n labels = labels * smooth_positives + smooth_negatives\n log_loss_res = log_loss(predictions, labels)\n kappa_loss_res = kappa_loss(\n predictions, labels, y_pow=y_pow, num_ratings=num_classes, batch_size=batch_size)\n return kappa_loss_res + log_scale * (log_loss_res - log_offset)\n\n\ndef kappa_log_loss_clipped(predictions,\n labels,\n label_smoothing=0.0,\n y_pow=1,\n batch_size=32,\n log_scale=0.5,\n log_cutoff=0.80,\n num_classes=5,\n name='kappa_log_clipped'):\n \"\"\"Define a joint kappa and log loss; log loss is clipped by a defined min\n value; Kappa is a continuous differentiable approximation of discrete kappa\n loss.\n\n Args:\n predictions: 2D tensor or array, [batch_size, num_classes] predictions of the network .\n labels: 2D tensor or array,[batch_size, num_classes] ground truth labels or target labels.\n label_smoothing: a float, used to smooth the labels for better generalization\n if greater than 0 then smooth the labels.\n y_pow: int, to whcih the labels should be raised; useful if model diverge. e.g. y_pow=2\n num_ratings: numbers of rater to used, typically num_classes of the model\n batch_size: batch_size of the training or validation ops\n log_scale: a float, used to multiply the clipped log loss, e.g: 0.5\n log_cutoff:a float, minimum log loss value; e.g. 0.50\n name: Optional scope/name for op_scope.\n\n Returns:\n A tensor with the clipped kappa log loss.\n \"\"\"\n with tf.name_scope(name):\n num_classes = labels.get_shape()[-1].value\n labels = tf.cast(labels, predictions.dtype)\n if label_smoothing > 0:\n smooth_positives = 1.0 - label_smoothing\n smooth_negatives = label_smoothing / num_classes\n labels = labels * smooth_positives + smooth_negatives\n log_loss_res = log_loss_tf(predictions, labels)\n kappa_loss_res = kappa_loss(\n predictions, labels, y_pow=y_pow, num_ratings=num_classes, batch_size=batch_size)\n return kappa_loss_res + log_scale * tf.clip_by_value(log_loss_res, log_cutoff, 10**3)\n\n\ndef cross_entropy_loss(logits, labels, label_smoothing=0.0, weight=1.0, name='cross_entropy_loss'):\n \"\"\"Define a cross entropy loss with label smoothing.\n\n Args:\n predictions: 2D tensor or array, [batch_size, num_classes] predictions of the network .\n labels: 2D tensor or array,[batch_size, num_classes] ground truth labels or target labels.\n label_smoothing: a float, used to smooth the labels for better generalization\n if greater than 0 then smooth the labels.\n weight: scale the loss by this factor.\n name: Optional scope/name for op_scope.\n\n Returns:\n A tensor with the cross entropy loss.\n \"\"\"\n logits.get_shape().assert_is_compatible_with(labels.get_shape())\n with tf.name_scope(name):\n num_classes = labels.get_shape()[-1].value\n labels = tf.cast(labels, logits.dtype)\n if label_smoothing > 0:\n smooth_positives = 1.0 - label_smoothing\n smooth_negatives = label_smoothing / num_classes\n labels = labels * smooth_positives + smooth_negatives\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(\n logits=logits, labels=labels, name='xentropy')\n weight = tf.convert_to_tensor(weight, dtype=logits.dtype.base_dtype, name='loss_weight')\n loss = tf.multiply(weight, tf.reduce_mean(cross_entropy), name='value')\n return loss\n\n\ndef l1_l2_regularizer(var, weight_l1=1.0, weight_l2=1.0, name='l1_l2_regularizer'):\n \"\"\"Define a L2Loss, useful for regularize, i.e. weight decay.\n\n Args:\n var: tensor to regularize.\n weight_l1: an optional weight to modulate the l1 loss.\n weight_l2: an optional weight to modulate the l2 loss.\n name: Optional scope/name for op_scope.\n\n Returns:\n the l1+L2 loss op.\n \"\"\"\n with tf.name_scope(name):\n weight_l1_t = tf.convert_to_tensor(weight_l1, dtype=var.dtype.base_dtype, name='weight_l1')\n weight_l2_t = tf.convert_to_tensor(weight_l2, dtype=var.dtype.base_dtype, name='weight_l2')\n reg_l1 = tf.multiply(weight_l1_t, tf.reduce_sum(tf.abs(var)), name='value_l1')\n reg_l2 = tf.multiply(weight_l2_t, tf.nn.l2_loss(var), name='value_l2')\n return tf.add(reg_l1, reg_l2, name='value')\n\n\ndef l1_regularizer(scale, name='l1_regularizer'):\n \"\"\"Returns a function that can be used to apply L1 regularization to weights.\n L1 regularization encourages sparsity.\n\n Args:\n scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.\n name: An optional name/scope name.\n\n Returns:\n A function with signature `l1(weights)` that apply L1 regularization.\n\n Raises:\n ValueError: If scale is negative or if scale is not a float.\n \"\"\"\n if isinstance(scale, numbers.Integral):\n raise ValueError('scale cannot be an integer: %s' % scale)\n if isinstance(scale, numbers.Real):\n if scale < 0.:\n raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale)\n if scale == 0.:\n return lambda _: None\n\n def l1(weights, name='l1_regularizer'):\n \"\"\"Applies L1 regularization to weights.\"\"\"\n with tf.name_scope(name):\n my_scale = tf.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale')\n return tf.multiply(my_scale, tf.reduce_sum(tf.abs(weights)), name=name)\n\n return l1\n\n\ndef l2_regularizer(scale, name='l2_regularizer'):\n \"\"\"Returns a function that can be used to apply L2 regularization to weights.\n Small values of L2 can help prevent overfitting the training data.\n\n Args:\n scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.\n name: An optional name/scope name.\n\n Returns:\n A function with signature `l2(weights)` that applies L2 regularization.\n\n Raises:\n ValueError: If scale is negative or if scale is not a float.\n \"\"\"\n if isinstance(scale, numbers.Integral):\n raise ValueError('scale cannot be an integer: %s' % (scale,))\n if isinstance(scale, numbers.Real):\n if scale < 0.:\n raise ValueError('Setting a scale less than 0 on a regularizer: %g.' % scale)\n if scale == 0.:\n return lambda _: None\n\n def l2(weights, name='l2_regularizer'):\n \"\"\"Applies l2 regularization to weights.\"\"\"\n with tf.name_scope(name):\n my_scale = tf.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale')\n return tf.multiply(my_scale, nn.l2_loss(weights), name=name)\n\n return l2\n\n\ndef discretized_mix_logistic_loss(inputs,\n predictions,\n sum_all=True,\n name='disretized_mix_logistic_loss'):\n \"\"\"log-likelihood for mixture of discretized logistics, assumes the data has\n been rescaled to.\n\n [-1,1] interval\n\n Args:\n predictions: 4D tensor or array, [batch_size, width, height, out_channels]\n predictions of the network .\n inputs: 4D tensor or array, [batch_size, width, height, num_classes]\n ground truth labels or target labels.\n name: Optional scope/name for op_scope.\n\n Returns:\n A tensor with the discretized mix logistic loss.\n \"\"\"\n with tf.name_scope(name):\n inputs_shape = list(map(int, inputs.get_shape()))\n predictions_shape = list(map(int, predictions.get_shape()))\n nr_mix = int(predictions_shape[-1] / 10)\n logit_probs = predictions[:, :, :, :nr_mix]\n predictions = tf.reshape(predictions[:, :, :, nr_mix:], inputs_shape + [nr_mix * 3])\n means = predictions[:, :, :, :, :nr_mix]\n log_scales = tf.maximum(predictions[:, :, :, :, nr_mix:2 * nr_mix], -7.)\n coeffs = tf.nn.tanh(predictions[:, :, :, :, 2 * nr_mix:3 * nr_mix])\n inputs = tf.reshape(inputs, inputs_shape + [1]) + tf.zeros(inputs_shape + [nr_mix])\n m2 = tf.reshape(means[:, :, :, 1, :] + coeffs[:, :, :, 0, :] * inputs[:, :, :, 0, :],\n [inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix])\n m3 = tf.reshape(\n means[:, :, :, 2, :] + coeffs[:, :, :, 1, :] * inputs[:, :, :, 0, :] +\n coeffs[:, :, :, 2, :] * inputs[:, :, :, 1, :],\n [inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix])\n means = tf.concat([\n tf.reshape(means[:, :, :, 0, :],\n [inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix]), m2, m3\n ],\n axis=3)\n centered_inputs = inputs - means\n inv_stdv = tf.exp(-log_scales)\n plus_in = inv_stdv * (centered_inputs + 1. / 255.)\n cdf_plus = tf.nn.sigmoid(plus_in)\n min_in = inv_stdv * (centered_inputs - 1. / 255.)\n cdf_min = tf.nn.sigmoid(min_in)\n log_cdf_plus = plus_in - tf.nn.softplus(plus_in)\n log_one_minus_cdf_min = -tf.nn.softplus(min_in)\n cdf_delta = cdf_plus - cdf_min\n mid_in = inv_stdv * centered_inputs\n log_pdf_mid = mid_in - log_scales - 2. * tf.nn.softplus(mid_in)\n log_probs = tf.select(\n inputs < -0.999, log_cdf_plus,\n tf.select(\n inputs > 0.999, log_one_minus_cdf_min,\n tf.select(cdf_delta > 1e-5, tf.log(tf.maximum(cdf_delta, 1e-12)),\n log_pdf_mid - np.log(127.5))))\n\n log_probs = tf.reduce_sum(log_probs, 3) + \\\n log_prob_from_logits(logit_probs)\n if sum_all:\n return -tf.reduce_sum(log_sum_exp(log_probs))\n else:\n return -tf.reduce_sum(log_sum_exp(log_probs), [1, 2])\n\n\ndef mse_loss(pred, labels):\n try:\n batch_size = tf.cast(pred.shape[0], tf.float32)\n except Exception as e:\n print('Pred is a tf tensor %s' % str(e.message))\n batch_size = tf.cast(tf.shape(pred)[0], tf.float32)\n loss_val = tf.sqrt(2 * tf.nn.l2_loss(pred - labels)) / batch_size\n return loss_val\n\n\ndef pullaway_loss(embeddings, name='pullaway_loss'):\n \"\"\"Pull Away loss calculation.\n\n Args:\n embeddings: The embeddings to be orthogonalized for varied faces.\n Shape [batch_size, embeddings_dim]\n\n Return: pull away term loss\n \"\"\"\n with tf.name_scope(name):\n norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))\n normalized_embeddings = embeddings / norm\n similarity = tf.matmul(normalized_embeddings, normalized_embeddings, transpose_b=True)\n batch_size = tf.cast(tf.shape(embeddings)[0], tf.float32)\n pt_loss = (tf.reduce_sum(similarity) - batch_size) / \\\n (batch_size * (batch_size - 1))\n return pt_loss\n\n\ndef log_sum_exp(x):\n \"\"\"numerically stable log_sum_exp implementation that prevents overflow.\"\"\"\n axis = len(x.get_shape()) - 1\n m = tf.reduce_max(x, axis)\n m2 = tf.reduce_max(x, axis, keep_dims=True)\n return m + tf.log(tf.reduce_sum(tf.exp(x - m2), axis))\n\n\ndef log_prob_from_logits(x):\n \"\"\"numerically stable log_softmax implementation that prevents overflow.\"\"\"\n axis = len(x.get_shape()) - 1\n m = tf.reduce_max(x, axis, keep_dims=True)\n return x - m - tf.log(tf.reduce_sum(tf.exp(x - m), axis, keep_dims=True))\n\n\ndef segment_loss(logits, labels, num_classes, head=None):\n \"\"\"Calculate the loss from the logits and the labels.\n\n Args:\n logits: tensor, float - [batch_size * width * height, num_classes].\n Use vgg_fcn.up as logits.\n labels: Labels tensor, int32 - [batch_size * width * height, num_classes].\n The ground truth of your data.\n head: numpy array - [num_classes]\n Weighting the loss of each class\n Optional: Prioritize some classes\n\n Returns:\n loss: Loss tensor of type float.\n \"\"\"\n with tf.name_scope('segment_loss'):\n # logits = tf.reshape(logits, (-1, num_classes))\n epsilon = tf.constant(value=1e-7)\n labels = tf.to_float(labels)\n # labels = tf.to_float(tf.reshape(labels, (-1, num_classes)))\n\n softmax = tf.nn.softmax(logits) + epsilon\n\n if head is not None:\n cross_entropy = -tf.reduce_sum(tf.mul(labels * tf.log(softmax), head), axis=[1])\n else:\n cross_entropy = -tf.reduce_sum(labels * tf.log(softmax), axis=[1])\n\n cross_entropy_mean = tf.reduce_mean(cross_entropy, name='xentropy_mean')\n return cross_entropy_mean\n\n\ndef triplet_loss(anchor, positive, negative, alpha=0.2, name='triplet_loss'):\n \"\"\"Calculate the triplet loss according to the FaceNet paper.\n\n Args:\n anchor: 2-D `tensor` [batch_size, embedding_size], the embeddings for the anchor images.\n positive: 2-D `tensor` [batch_size, embedding_size], the embeddings for the positive images.\n negative: 2-D `tensor` [batch_size, embedding_size], the embeddings for the negative images.\n alpha: positive to negative triplet distance margin\n\n Returns:\n the triplet loss.\n \"\"\"\n with tf.name_scope(name):\n pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)\n neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)\n basic_loss = tf.add(tf.subtract(pos_dist, neg_dist), alpha)\n loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)\n return loss\n\n\ndef decov_loss(xs, name='decov_loss'):\n \"\"\"Decov loss as described in https://arxiv.org/pdf/1511.06068.pdf 'Reducing\n Overfitting In Deep Networks by Decorrelating Representation'.\n\n Args:\n xs: 4-D `tensor` [batch_size, height, width, channels], input\n\n Returns:\n a `float` decov loss\n \"\"\"\n with tf.name_scope(name):\n x = tf.reshape(xs, [int(xs.get_shape()[0]), -1])\n m = tf.reduce_mean(x, 0, True)\n z = tf.expand_dims(x - m, 2)\n corr = tf.reduce_mean(tf.matmul(z, tf.transpose(z, perm=[0, 2, 1])), 0)\n corr_frob_sqr = tf.reduce_sum(tf.square(corr))\n corr_diag_sqr = tf.reduce_sum(tf.square(tf.diag_part(corr)))\n loss = 0.5 * (corr_frob_sqr - corr_diag_sqr)\n return loss\n\n\ndef center_loss(features, label, alpha, num_classes, name='center_loss'):\n \"\"\"Center loss based on the paper \"A Discriminative Feature Learning Approach\n for Deep Face Recognition\" (http://ydwen.github.io/papers/WenECCV16.pdf)\n\n Args:\n features: 2-D `tensor` [batch_size, feature_length], input features\n label: 1-D `tensor` [batch_size], input label\n alpha: center loss parameter\n num_classes: a `int` numof classes for training\n\n Returns:\n a `float`, center loss\n \"\"\"\n with tf.variable_scope(name):\n num_features = features.get_shape()[1]\n centers = tf.get_variable(\n 'centers', [num_classes, num_features],\n dtype=tf.float32,\n initializer=tf.constant_initializer(0),\n trainable=False)\n label = tf.reshape(label, [-1])\n centers_batch = tf.gather(centers, label)\n diff = (1 - alpha) * (centers_batch - features)\n centers = tf.scatter_sub(centers, label, diff)\n loss = tf.nn.l2_loss(features - centers_batch)\n return loss, centers\n\n\ndef correlation_loss(source_samples, target_samples, weight, name='corr_loss'):\n \"\"\"Adds a similarity loss term, the correlation between two representations.\n\n Args:\n source_samples: a tensor of shape [num_samples, num_features]\n target_samples: a tensor of shape [num_samples, num_features]\n weight: a scalar weight for the loss.\n scope: optional name scope for summary tags.\n\n Returns:\n a scalar tensor representing the correlation loss value.\n \"\"\"\n with tf.name_scope(name):\n source_samples -= tf.reduce_mean(source_samples, 0)\n target_samples -= tf.reduce_mean(target_samples, 0)\n source_samples = tf.nn.l2_normalize(source_samples, 1)\n target_samples = tf.nn.l2_normalize(target_samples, 1)\n source_cov = tf.matmul(tf.transpose(source_samples), source_samples)\n target_cov = tf.matmul(tf.transpose(target_samples), target_samples)\n corr_loss = tf.reduce_mean(tf.square(source_cov - target_cov)) * weight\n\n assert_op = tf.Assert(tf.is_finite(corr_loss), [corr_loss])\n with tf.control_dependencies([assert_op]):\n tag = 'Correlation Loss'\n barrier = tf.no_op(tag)\n\n return corr_loss\n\n\ndef maximum_mean_discrepancy(x,\n y,\n kernel=util.gaussian_kernel_matrix,\n name='maximum_mean_discrepancy'):\n r\"\"\"Computes the Maximum Mean Discrepancy (MMD) of two samples: x and y.\n\n Maximum Mean Discrepancy (MMD) is a distance-measure between the samples of\n the distributions of x and y. Here we use the kernel two sample estimate\n using the empirical mean of the two distributions.\n\n MMD^2(P, Q) = || \\E{\\phi(x)} - \\E{\\phi(y)} ||^2\n = \\E{ K(x, x) } + \\E{ K(y, y) } - 2 \\E{ K(x, y) },\n\n where K = <\\phi(x), \\phi(y)>,\n is the desired kernel function, in this case a radial basis kernel.\n\n Args:\n x: a tensor of shape [num_samples, num_features]\n y: a tensor of shape [num_samples, num_features]\n kernel: a function which computes the kernel in MMD. Defaults to the\n GaussianKernelMatrix.\n\n Returns:\n a scalar denoting the squared maximum mean discrepancy loss.\n \"\"\"\n with tf.name_scope(name):\n # \\E{ K(x, x) } + \\E{ K(y, y) } - 2 \\E{ K(x, y) }\n cost = tf.reduce_mean(kernel(x, x))\n cost += tf.reduce_mean(kernel(y, y))\n cost -= 2 * tf.reduce_mean(kernel(x, y))\n\n # We do not allow the loss to become negative.\n cost = tf.where(cost > 0, cost, 0, name='value')\n return cost\n\n\ndef mmd_loss(source_samples, target_samples, weight, name='mmd_loss'):\n \"\"\"Adds a similarity loss term, the MMD between two representations.\n\n This Maximum Mean Discrepancy (MMD) loss is calculated with a number of\n different Gaussian kernels.\n\n Args:\n source_samples: a tensor of shape [num_samples, num_features].\n target_samples: a tensor of shape [num_samples, num_features].\n weight: the weight of the MMD loss.\n scope: optional name scope for summary tags.\n\n Returns:\n a scalar tensor representing the MMD loss value.\n \"\"\"\n with tf.name_scope(name):\n sigmas = [\n 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 20, 25, 30, 35, 100, 1e3, 1e4, 1e5, 1e6\n ]\n gaussian_kernel = partial(util.gaussian_kernel_matrix, sigmas=tf.constant(sigmas))\n\n loss_value = maximum_mean_discrepancy(source_samples, target_samples, kernel=gaussian_kernel)\n loss_value = tf.maximum(1e-4, loss_value) * weight\n assert_op = tf.Assert(tf.is_finite(loss_value), [loss_value])\n with tf.control_dependencies([assert_op]):\n tag = 'MMD_Loss'\n barrier = tf.no_op(tag)\n return loss_value\n\n\ndef dann_loss(source_samples, target_samples, weight, name='dann_loss'):\n \"\"\"Adds the domain adversarial (DANN) loss.\n\n Args:\n source_samples: a tensor of shape [num_samples, num_features].\n target_samples: a tensor of shape [num_samples, num_features].\n weight: the weight of the loss.\n scope: optional name scope for summary tags.\n\n Returns:\n a scalar tensor representing the correlation loss value.\n \"\"\"\n with tf.variable_scope(name):\n batch_size = tf.shape(source_samples)[0]\n samples = tf.concat(values=[source_samples, target_samples], axis=0)\n samples = flatten(samples)\n\n domain_selection_mask = tf.concat(\n values=[tf.zeros((batch_size, 1)), tf.ones((batch_size, 1))], axis=0)\n\n grl = gradient_reverse(samples)\n grl = tf.reshape(grl, (-1, samples.get_shape().as_list()[1]))\n\n grl = fc(grl, 100, True, None, activation=relu, name='fc1')\n logits = fc(grl, 1, True, None, activation=None, name='fc2')\n\n domain_predictions = tf.sigmoid(logits)\n\n domain_loss = tf.losses.log_loss(domain_selection_mask, domain_predictions, weights=weight)\n\n domain_accuracy = util.accuracy_tf(domain_selection_mask, tf.round(domain_predictions))\n\n assert_op = tf.Assert(tf.is_finite(domain_loss), [domain_loss])\n with tf.control_dependencies([assert_op]):\n tag_loss = 'losses/domain_loss'\n barrier = tf.no_op(tag_loss)\n\n return domain_loss\n\n\ndef difference_loss(private_samples, shared_samples, weight=1.0, name='difference_loss'):\n \"\"\"Adds the difference loss between the private and shared representations.\n\n Args:\n private_samples: a tensor of shape [num_samples, num_features].\n shared_samples: a tensor of shape [num_samples, num_features].\n weight: the weight of the incoherence loss.\n name: the name of the tf summary.\n \"\"\"\n with tf.name_scope(name):\n private_samples -= tf.reduce_mean(private_samples, 0)\n shared_samples -= tf.reduce_mean(shared_samples, 0)\n\n private_samples = tf.nn.l2_normalize(private_samples, 1)\n shared_samples = tf.nn.l2_normalize(shared_samples, 1)\n\n correlation_matrix = tf.matmul(private_samples, shared_samples, transpose_a=True)\n\n cost = tf.reduce_mean(tf.square(correlation_matrix)) * weight\n cost = tf.where(cost > 0, cost, 0, name='value')\n\n assert_op = tf.Assert(tf.is_finite(cost), [cost])\n with tf.control_dependencies([assert_op]):\n barrier = tf.no_op(name)\n return cost\n\n\ndef log_quaternion_loss_batch(predictions, labels, name='log_quaternion_batch_loss'):\n \"\"\"A helper function to compute the error between quaternions.\n\n Args:\n predictions: A Tensor of size [batch_size, 4].\n labels: A Tensor of size [batch_size, 4].\n params: A dictionary of parameters. Expecting 'use_logging', 'batch_size'.\n\n Returns:\n A Tensor of size [batch_size], denoting the error between the quaternions.\n \"\"\"\n assertions = []\n assertions.append(\n tf.Assert(\n tf.reduce_all(tf.less(tf.abs(tf.reduce_sum(tf.square(predictions), [1]) - 1), 1e-4)),\n ['The l2 norm of each prediction quaternion vector should be 1.']))\n assertions.append(\n tf.Assert(\n tf.reduce_all(tf.less(tf.abs(tf.reduce_sum(tf.square(labels), [1]) - 1), 1e-4)),\n ['The l2 norm of each label quaternion vector should be 1.']))\n with tf.name_scope(name):\n with tf.control_dependencies(assertions):\n product = tf.multiply(predictions, labels)\n internal_dot_products = tf.reduce_sum(product, [1])\n logcost = tf.log(1e-4 + 1 - tf.abs(internal_dot_products))\n return logcost\n\n\ndef log_quaternion_loss(predictions, labels, batch_size, name='log_quaternion_loss'):\n \"\"\"A helper function to compute the mean error between batches of\n quaternions.\n\n The caller is expected to add the loss to the graph.\n\n Args:\n predictions: A Tensor of size [batch_size, 4].\n labels: A Tensor of size [batch_size, 4].\n params: A dictionary of parameters. Expecting 'use_logging', 'batch_size'.\n\n Returns:\n A Tensor of size 1, denoting the mean error between batches of quaternions.\n \"\"\"\n with tf.name_scope(name):\n logcost = log_quaternion_loss_batch(predictions, labels)\n logcost = tf.reduce_sum(logcost, [0])\n logcost = tf.multiply(logcost, 1.0 / batch_size, name='log_quaternion_loss')\n return logcost\n\n\ndef random_perturbation_loss(embedded, length, loss_fn, perturb_norm_length=0.1):\n \"\"\"Adds noise to embeddings and recomputes classification loss.\n\n Args:\n embedded: 3-D float `Tensor`, [batch_size, num_timesteps, embedding_dim]\n length: a `int`, length of the mask\n loss_fn: a callable, that returns loss\n perturb_norm_length: a `float`, Norm length of adversarial perturbation\n to be optimized with validatio\n\n Returns:\n perturbation loss\n \"\"\"\n noise = tf.random_normal(shape=tf.shape(embedded))\n perturb = _scale_l2(_mask_by_length(noise, length), perturb_norm_length)\n return loss_fn(embedded + perturb)\n\n\ndef adversarial_loss(embedded, loss, loss_fn, perturb_norm_length=0.1):\n \"\"\"Adds gradient to embedding and recomputes classification loss.\n\n Args:\n embedded: 3-D float `Tensor`, [batch_size, num_timesteps, embedding_dim]\n loss: `float`, loss\n loss_fn: a callable, that returns loss\n perturb_norm_length: a `float`, Norm length of adversarial perturbation\n to be optimized with validatio\n\n Returns:\n adversial loss\n \"\"\"\n grad, = tf.gradients(\n loss, embedded, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)\n grad = tf.stop_gradient(grad)\n perturb = _scale_l2(grad, perturb_norm_length)\n return loss_fn(embedded + perturb)\n\n\ndef virtual_adversarial_loss(logits,\n embedded,\n labels,\n length,\n logits_from_embedding_fn,\n num_classes,\n num_power_iteration=1,\n small_constant_for_finite_diff=1e-3,\n perturb_norm_length=0.1):\n \"\"\"Virtual adversarial loss. Computes virtual adversarial perturbation by\n finite difference method and power iteration, adds it to the embedding, and\n computes the KL divergence between the new logits and the original logits.\n\n Args:\n logits: 2-D float `Tensor`, [num_timesteps*batch_size, m], where m=1 if\n num_classes=2, otherwise m=num_classes.\n embedded: 3-D float `Tensor`, [batch_size, num_timesteps, embedding_dim].\n labels: 1-D `Tensor`, input labels\n length: a `int`, input length\n logits_from_embedding_fn: callable that takes embeddings and returns\n classifier logits.\n num_classes: num_classes for training\n vocab_size: a `int`, vocabular size of the problem\n num_power_iteration: a `int`, the number of power iteration\n small_constant_for_finite_diff: a `float`, Small constant for finite difference method\n perturb_norm_length: a `float`, Norm length of adversarial perturbation\n to be optimized with validatio\n\n Returns:\n a `float` `scalar`, KL divergence.\n \"\"\"\n logits = tf.stop_gradient(logits)\n weights = _end_of_seq_mask(labels, vocab_size)\n\n d = _mask_by_length(tf.random_normal(shape=tf.shape(embedded)), length)\n\n for _ in range(num_power_iteration):\n d = _scale_l2(d, small_constant_for_finite_diff)\n d_logits = logits_from_embedding_fn(embedded + d)\n kl = _kl_divergence_with_logits(logits, d_logits, weights, num_classes)\n d, = tf.gradients(kl, d, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)\n d = tf.stop_gradient(d)\n\n perturb = _scale_l2(_mask_by_length(d, length), perturb_norm_length)\n vadv_logits = logits_from_embedding_fn(embedded + perturb)\n return _kl_divergence_with_logits(logits, vadv_logits, weights, num_classes)\n\n\ndef random_perturbation_loss_brnn(embedded, length, loss_fn, perturb_norm_length=0.1):\n \"\"\"Adds noise to embeddings and recomputes classification loss fir\n bidirectional rnn models.\n\n Args:\n embedded: 3-D float `Tensor`, [batch_size, num_timesteps, embedding_dim]\n length: a `int`, length of the mask\n loss_fn: a callable, that returns loss\n perturb_norm_length: a `float`, Norm length of adversarial perturbation to\n be optimized with validatio\n\n Returns:\n perturbation loss\n \"\"\"\n noise = [tf.random_normal(shape=tf.shape(emb)) for emb in embedded]\n masked = [_mask_by_length(n, length) for n in noise]\n scaled = [_scale_l2(m, perturb_norm_length) for m in masked]\n return loss_fn([e + s for (e, s) in zip(embedded, scaled)])\n\n\ndef adversarial_loss_brnn(embedded, loss, loss_fn, perurb_norm_length=0.1):\n \"\"\"Adds gradient to embeddings and recomputes classification loss for\n bidirectional rnn models.\n\n Args:\n embedded: 3-D float `Tensor`, [batch_size, num_timesteps, embedding_dim]\n loss: `float`, loss\n loss_fn: a callable, that returns loss\n perturb_norm_length: a `float`, Norm length of adversarial perturbation\n to be optimized with validatio\n\n Returns:\n adversial loss\n \"\"\"\n grads = tf.gradients(\n loss, embedded, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)\n adv_exs = [\n emb + _scale_l2(tf.stop_gradient(g), perturb_norm_length) for emb, g in zip(embedded, grads)\n ]\n return loss_fn(adv_exs)\n\n\ndef virtual_adversarial_loss_brnn(logits,\n embedded,\n labels,\n length,\n logits_from_embedding_fn,\n vocab_size,\n num_classes,\n num_power_iteration=1,\n small_constant_for_finite_diff=1e-3,\n perturb_norm_length=0.1):\n \"\"\"Virtual adversarial loss for bidirectional models Computes virtual\n adversarial perturbation by finite difference method and power iteration,\n adds it to the embedding, and computes the KL divergence between the new\n logits and the original logits.\n\n Args:\n logits: 2-D float `Tensor`, [num_timesteps*batch_size, m], where m=1 if\n num_classes=2, otherwise m=num_classes.\n embedded: 3-D float `Tensor`, [batch_size, num_timesteps, embedding_dim].\n labels: 1-D `Tensor`, input labels\n length: a `int`, input length\n logits_from_embedding_fn: callable that takes embeddings and returns\n classifier logits.\n num_classes: num_classes for training\n vocab_size: a `int`, vocabular size of the problem\n num_power_iteration: a `int`, the number of power iteration\n small_constant_for_finite_diff: a `float`, Small constant for finite difference method\n perturb_norm_length: a `float`, Norm length of adversarial perturbation\n to be optimized with validatio\n\n Returns:\n a `float` `scalar`, KL divergence.\n \"\"\"\n logits = tf.stop_gradient(logits)\n weights = _end_of_seq_mask(labels, vocab_size)\n\n perturbs = [_mask_by_length(tf.random_normal(shape=tf.shape(emb)), length) for emb in embedded]\n for _ in range(num_power_iteration):\n perturbs = [_scale_l2(d, small_constant_for_finite_diff) for d in perturbs]\n d_logits = logits_from_embedding_fn([emb + d for (emb, d) in zip(embedded, perturbs)])\n kl = _kl_divergence_with_logits(logits, d_logits, weights, num_classes)\n perturbs = tf.gradients(\n kl, perturbs, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)\n perturbs = [tf.stop_gradient(d) for d in perturbs]\n\n perturbs = [_scale_l2(_mask_by_length(d, length), perturb_norm_length) for d in perturbs]\n vadv_logits = logits_from_embedding_fn([emb + d for (emb, d) in zip(embedded, perturbs)])\n return _kl_divergence_with_logits(logits, vadv_logits, weights, num_classes)\n\n\ndef _mask_by_length(t, length):\n maxlen = t.get_shape().as_list()[1]\n mask = tf.sequence_mask(length, maxlen=maxlen)\n mask = tf.expand_dims(tf.cast(mask, tf.float32), -1)\n return t * mask\n\n\ndef _scale_l2(x, norm_length):\n alpha = tf.reduce_max(tf.abs(x), (1, 2), keep_dims=True) + 1e-12\n l2_norm = alpha * tf.sqrt(tf.reduce_sum(tf.pow(x / alpha, 2), (1, 2), keep_dims=True) + 1e-6)\n x_unit = x / l2_norm\n return norm_length * x_unit\n\n\ndef _end_of_seq_mask(tokens, vocab_size):\n \"\"\"Generate a mask for the EOS token (1.0 on EOS, 0.0 otherwise).\n\n Args:\n tokens: 1-D integer `Tensor` [num_timesteps*batch_size]. Each element is an\n id from the vocab.\n vocab_size: a `int`, vocabular size of the problem\n\n Returns:\n Float 1-D `Tensor` same shape as tokens, whose values are 1.0 on the end of\n sequence and 0.0 on the others.\n \"\"\"\n eos_id = vocab_size - 1\n return tf.cast(tf.equal(tokens, eos_id), tf.float32)\n\n\ndef _kl_divergence_with_logits(q_logits, p_logits, weights, num_classes):\n \"\"\"Returns weighted KL divergence between distributions q and p.\n\n Args:\n q_logits: logits for 1st argument of KL divergence shape\n [num_timesteps * batch_size, num_classes] if num_classes > 2, and\n [num_timesteps * batch_size] if num_classes == 2.\n p_logits: logits for 2nd argument of KL divergence with same shape q_logits.\n weights: 1-D `float` tensor with shape [num_timesteps * batch_size].\n Elements should be 1.0 only on end of sequences\n num_classes: a `int`, number of training classes\n\n Returns:\n a `float` `scalar`, KL divergence.\n \"\"\"\n if num_classes == 2:\n q = tf.nn.sigmoid(q_logits)\n p = tf.nn.sigmoid(p_logits)\n kl = (-tf.nn.sigmoid_cross_entropy_with_logits(logits=q_logits, labels=q) +\n f.nn.sigmoid_cross_entropy_with_logits(logits=p_logits, labels=q))\n\n else:\n q = tf.nn.softmax(q_logits)\n p = tf.nn.softmax(p_logits)\n kl = tf.reduce_sum(q * (tf.log(q) - tf.log(p)), 1)\n\n num_labels = tf.reduce_sum(weights)\n num_labels = tf.where(tf.equal(num_labels, 0.), 1., num_labels)\n\n kl.get_shape().assert_has_rank(2)\n weights.get_shape().assert_has_rank(1)\n loss = tf.identity(tf.reduce_sum(tf.expand_dims(weights, -1) * kl) / num_labels, name='kl')\n return loss\n\n\ndef cross_entropy_sequence_loss(logits, targets, sequence_length):\n \"\"\"Calculates the per-example cross-entropy loss for a sequence of logits and\n masks out all losses passed the sequence length.\n\n Args:\n logits: Logits of shape `[T, B, vocab_size]`\n targets: Target classes of shape `[T, B]`\n sequence_length: An int32 tensor of shape `[B]` corresponding\n to the length of each input\n\n Returns:\n A tensor of shape [T, B] that contains the loss per example, per time step.\n \"\"\"\n with tf.name_scope(\"cross_entropy_sequence_loss\"):\n losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets)\n loss_mask = tf.sequence_mask(tf.to_int32(sequence_length), tf.to_int32(tf.shape(targets)[0]))\n losses = losses * tf.transpose(tf.to_float(loss_mask), [1, 0])\n\n return losses\n\n\ndef dice_loss(predictions, targets, weights=1., name='dice_loss'):\n with tf.name_scope(name):\n # predictions = tf.to_float(predictions)\n targets = tf.to_float(targets)\n intersection = 2 * tf.reduce_sum(predictions * targets) + weights\n union = weights + tf.reduce_sum(predictions) + tf.reduce_sum(targets)\n loss = -(intersection / (union))\n return loss\n\n\ndef precision_recall_auc_loss(labels,\n logits,\n precision_range=(0.0, 1.0),\n num_anchors=20,\n weights=1.0,\n dual_rate_factor=0.1,\n label_priors=None,\n surrogate_type='xent',\n lambdas_initializer=tf.constant_initializer(1.0),\n reuse=None,\n variables_collections=None,\n trainable=True,\n scope=None):\n \"\"\"Computes precision-recall AUC loss.\n The loss is based on a sum of losses for recall at a range of\n precision values (anchor points). This sum is a Riemann sum that\n approximates the area under the precision-recall curve.\n The per-example `weights` argument changes not only the coefficients of\n individual training examples, but how the examples are counted toward the\n constraint. If `label_priors` is given, it MUST take `weights` into account.\n That is,\n label_priors = P / (P + N)\n where\n P = sum_i (wt_i on positives)\n N = sum_i (wt_i on negatives).\n Args:\n labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels].\n logits: A `Tensor` with the same shape as `labels`.\n precision_range: A length-two tuple, the range of precision values over\n which to compute AUC. The entries must be nonnegative, increasing, and\n less than or equal to 1.0.\n num_anchors: The number of grid points used to approximate the Riemann sum.\n weights: Coefficients for the loss. Must be a scalar or `Tensor` of shape\n [batch_size] or [batch_size, num_labels].\n dual_rate_factor: A floating point value which controls the step size for\n the Lagrange multipliers.\n label_priors: None, or a floating point `Tensor` of shape [num_labels]\n containing the prior probability of each label (i.e. the fraction of the\n training data consisting of positive examples). If None, the label\n priors are computed from `labels` with a moving average. See the notes\n above regarding the interaction with `weights` and do not set this unless\n you have a good reason to do so.\n surrogate_type: Either 'xent' or 'hinge', specifying which upper bound\n should be used for indicator functions.\n lambdas_initializer: An initializer for the Lagrange multipliers.\n reuse: Whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: Optional list of collections for the variables.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n scope: Optional scope for `variable_scope`.\n Returns:\n loss: A `Tensor` of the same shape as `logits` with the component-wise\n loss.\n other_outputs: A dictionary of useful internal quantities for debugging. For\n more details, see http://arxiv.org/pdf/1608.04802.pdf.\n lambdas: A Tensor of shape [1, num_labels, num_anchors] consisting of the\n Lagrange multipliers.\n biases: A Tensor of shape [1, num_labels, num_anchors] consisting of the\n learned bias term for each.\n label_priors: A Tensor of shape [1, num_labels, 1] consisting of the prior\n probability of each label learned by the loss, if not provided.\n true_positives_lower_bound: Lower bound on the number of true positives\n given `labels` and `logits`. This is the same lower bound which is used\n in the loss expression to be optimized.\n false_positives_upper_bound: Upper bound on the number of false positives\n given `labels` and `logits`. This is the same upper bound which is used\n in the loss expression to be optimized.\n Raises:\n ValueError: If `surrogate_type` is not `xent` or `hinge`.\n \"\"\"\n with tf.variable_scope(scope, 'precision_recall_auc', [labels, logits, label_priors], reuse=reuse):\n labels, logits, weights, original_shape = _prepare_labels_logits_weights(labels, logits, weights)\n num_labels = losses_utils.get_num_labels(logits)\n\n # Convert other inputs to tensors and standardize dtypes.\n dual_rate_factor = losses_utils.convert_and_cast(dual_rate_factor, 'dual_rate_factor',\n logits.dtype)\n\n # Create Tensor of anchor points and distance between anchors.\n precision_values, delta = _range_to_anchors_and_delta(precision_range, num_anchors, logits.dtype)\n # Create lambdas with shape [1, num_labels, num_anchors].\n lambdas, lambdas_variable = _create_dual_variable(\n 'lambdas',\n shape=[1, num_labels, num_anchors],\n dtype=logits.dtype,\n initializer=lambdas_initializer,\n collections=variables_collections,\n trainable=trainable,\n dual_rate_factor=dual_rate_factor)\n # Create biases with shape [1, num_labels, num_anchors].\n biases = tf.contrib.framework.model_variable(\n name='biases',\n shape=[1, num_labels, num_anchors],\n dtype=logits.dtype,\n initializer=tf.zeros_initializer(),\n collections=variables_collections,\n trainable=trainable)\n # Maybe create label_priors.\n label_priors = maybe_create_label_priors(label_priors, labels, weights, variables_collections)\n label_priors = tf.reshape(label_priors, [1, num_labels, 1])\n\n # Expand logits, labels, and weights to shape [batch_size, num_labels, 1].\n logits = tf.expand_dims(logits, 2)\n labels = tf.expand_dims(labels, 2)\n weights = tf.expand_dims(weights, 2)\n\n # Calculate weighted loss and other outputs. The log(2.0) term corrects for\n # logloss not being an upper bound on the indicator function.\n loss = weights * losses_utils.weighted_surrogate_loss(\n labels,\n logits + biases,\n surrogate_type=surrogate_type,\n positive_weights=1.0 + lambdas * (1.0 - precision_values),\n negative_weights=lambdas * precision_values)\n maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0\n maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)\n lambda_term = lambdas * (1.0 - precision_values) * label_priors * maybe_log2\n per_anchor_loss = loss - lambda_term\n per_label_loss = delta * tf.reduce_sum(per_anchor_loss, 2)\n # Normalize the AUC such that a perfect score function will have AUC 1.0.\n # Because precision_range is discretized into num_anchors + 1 intervals\n # but only num_anchors terms are included in the Riemann sum, the\n # effective length of the integration interval is `delta` less than the\n # length of precision_range.\n scaled_loss = tf.div(\n per_label_loss, precision_range[1] - precision_range[0] - delta, name='AUC_Normalize')\n scaled_loss = tf.reshape(scaled_loss, original_shape)\n\n other_outputs = {\n 'lambdas':\n lambdas_variable,\n 'biases':\n biases,\n 'label_priors':\n label_priors,\n 'true_positives_lower_bound':\n true_positives_lower_bound(labels, logits, weights, surrogate_type),\n 'false_positives_upper_bound':\n false_positives_upper_bound(labels, logits, weights, surrogate_type)\n }\n\n return scaled_loss, other_outputs\n\n\ndef roc_auc_loss(labels, logits, weights=1.0, surrogate_type='xent', scope=None):\n \"\"\"Computes ROC AUC loss.\n The area under the ROC curve is the probability p that a randomly chosen\n positive example will be scored higher than a randomly chosen negative\n example. This loss approximates 1-p by using a surrogate (either hinge loss or\n cross entropy) for the indicator function. Specifically, the loss is:\n sum_i sum_j w_i*w_j*loss(logit_i - logit_j)\n where i ranges over the positive datapoints, j ranges over the negative\n datapoints, logit_k denotes the logit (or score) of the k-th datapoint, and\n loss is either the hinge or log loss given a positive label.\n Args:\n labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels].\n logits: A `Tensor` with the same shape and dtype as `labels`.\n weights: Coefficients for the loss. Must be a scalar or `Tensor` of shape\n [batch_size] or [batch_size, num_labels].\n surrogate_type: Either 'xent' or 'hinge', specifying which upper bound\n should be used for the indicator function.\n scope: Optional scope for `name_scope`.\n Returns:\n loss: A `Tensor` of the same shape as `logits` with the component-wise loss.\n other_outputs: An empty dictionary, for consistency.\n Raises:\n ValueError: If `surrogate_type` is not `xent` or `hinge`.\n \"\"\"\n with tf.name_scope(scope, 'roc_auc', [labels, logits, weights]):\n # Convert inputs to tensors and standardize dtypes.\n labels, logits, weights, original_shape = _prepare_labels_logits_weights(labels, logits, weights)\n\n # Create tensors of pairwise differences for logits and labels, and\n # pairwise products of weights. These have shape\n # [batch_size, batch_size, num_labels].\n logits_difference = tf.expand_dims(logits, 0) - tf.expand_dims(logits, 1)\n labels_difference = tf.expand_dims(labels, 0) - tf.expand_dims(labels, 1)\n weights_product = tf.expand_dims(weights, 0) * tf.expand_dims(weights, 1)\n\n signed_logits_difference = labels_difference * logits_difference\n raw_loss = losses_utils.weighted_surrogate_loss(\n labels=tf.ones_like(signed_logits_difference),\n logits=signed_logits_difference,\n surrogate_type=surrogate_type)\n weighted_loss = weights_product * raw_loss\n\n # Zero out entries of the loss where labels_difference zero (so loss is only\n # computed on pairs with different labels).\n loss = tf.reduce_mean(tf.abs(labels_difference) * weighted_loss, 0) * 0.5\n loss = tf.reshape(loss, original_shape)\n return loss, {}\n\n\ndef recall_at_precision_loss(labels,\n logits,\n target_precision,\n weights=1.0,\n dual_rate_factor=0.1,\n label_priors=None,\n surrogate_type='xent',\n lambdas_initializer=tf.constant_initializer(1.0),\n reuse=None,\n variables_collections=None,\n trainable=True,\n scope=None):\n \"\"\"Computes recall at precision loss.\n The loss is based on a surrogate of the form\n wt * w(+) * loss(+) + wt * w(-) * loss(-) - c * pi,\n where:\n - w(+) = 1 + lambdas * (1 - target_precision)\n - loss(+) is the cross-entropy loss on the positive examples\n - w(-) = lambdas * target_precision\n - loss(-) is the cross-entropy loss on the negative examples\n - wt is a scalar or tensor of per-example weights\n - c = lambdas * (1 - target_precision)\n - pi is the label_priors.\n The per-example weights change not only the coefficients of individual\n training examples, but how the examples are counted toward the constraint.\n If `label_priors` is given, it MUST take `weights` into account. That is,\n label_priors = P / (P + N)\n where\n P = sum_i (wt_i on positives)\n N = sum_i (wt_i on negatives).\n Args:\n labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels].\n logits: A `Tensor` with the same shape as `labels`.\n target_precision: The precision at which to compute the loss. Can be a\n floating point value between 0 and 1 for a single precision value, or a\n `Tensor` of shape [num_labels], holding each label's target precision\n value.\n weights: Coefficients for the loss. Must be a scalar or `Tensor` of shape\n [batch_size] or [batch_size, num_labels].\n dual_rate_factor: A floating point value which controls the step size for\n the Lagrange multipliers.\n label_priors: None, or a floating point `Tensor` of shape [num_labels]\n containing the prior probability of each label (i.e. the fraction of the\n training data consisting of positive examples). If None, the label\n priors are computed from `labels` with a moving average. See the notes\n above regarding the interaction with `weights` and do not set this unless\n you have a good reason to do so.\n surrogate_type: Either 'xent' or 'hinge', specifying which upper bound\n should be used for indicator functions.\n lambdas_initializer: An initializer for the Lagrange multipliers.\n reuse: Whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: Optional list of collections for the variables.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n scope: Optional scope for `variable_scope`.\n Returns:\n loss: A `Tensor` of the same shape as `logits` with the component-wise\n loss.\n other_outputs: A dictionary of useful internal quantities for debugging. For\n more details, see http://arxiv.org/pdf/1608.04802.pdf.\n lambdas: A Tensor of shape [num_labels] consisting of the Lagrange\n multipliers.\n label_priors: A Tensor of shape [num_labels] consisting of the prior\n probability of each label learned by the loss, if not provided.\n true_positives_lower_bound: Lower bound on the number of true positives\n given `labels` and `logits`. This is the same lower bound which is used\n in the loss expression to be optimized.\n false_positives_upper_bound: Upper bound on the number of false positives\n given `labels` and `logits`. This is the same upper bound which is used\n in the loss expression to be optimized.\n Raises:\n ValueError: If `logits` and `labels` do not have the same shape.\n \"\"\"\n with tf.variable_scope(scope, 'recall_at_precision', [logits, labels, label_priors], reuse=reuse):\n labels, logits, weights, original_shape = _prepare_labels_logits_weights(labels, logits, weights)\n num_labels = losses_utils.get_num_labels(logits)\n\n # Convert other inputs to tensors and standardize dtypes.\n target_precision = losses_utils.convert_and_cast(target_precision, 'target_precision',\n logits.dtype)\n dual_rate_factor = losses_utils.convert_and_cast(dual_rate_factor, 'dual_rate_factor',\n logits.dtype)\n\n # Create lambdas.\n lambdas, lambdas_variable = _create_dual_variable(\n 'lambdas',\n shape=[num_labels],\n dtype=logits.dtype,\n initializer=lambdas_initializer,\n collections=variables_collections,\n trainable=trainable,\n dual_rate_factor=dual_rate_factor)\n # Maybe create label_priors.\n label_priors = maybe_create_label_priors(label_priors, labels, weights, variables_collections)\n\n # Calculate weighted loss and other outputs. The log(2.0) term corrects for\n # logloss not being an upper bound on the indicator function.\n weighted_loss = weights * losses_utils.weighted_surrogate_loss(\n labels,\n logits,\n surrogate_type=surrogate_type,\n positive_weights=1.0 + lambdas * (1.0 - target_precision),\n negative_weights=lambdas * target_precision)\n maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0\n maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)\n lambda_term = lambdas * (1.0 - target_precision) * label_priors * maybe_log2\n loss = tf.reshape(weighted_loss - lambda_term, original_shape)\n other_outputs = {\n 'lambdas':\n lambdas_variable,\n 'label_priors':\n label_priors,\n 'true_positives_lower_bound':\n true_positives_lower_bound(labels, logits, weights, surrogate_type),\n 'false_positives_upper_bound':\n false_positives_upper_bound(labels, logits, weights, surrogate_type)\n }\n\n return loss, other_outputs\n\n\ndef precision_at_recall_loss(labels,\n logits,\n target_recall,\n weights=1.0,\n dual_rate_factor=0.1,\n label_priors=None,\n surrogate_type='xent',\n lambdas_initializer=tf.constant_initializer(1.0),\n reuse=None,\n variables_collections=None,\n trainable=True,\n scope=None):\n \"\"\"Computes precision at recall loss.\n The loss is based on a surrogate of the form\n wt * loss(-) + lambdas * (pi * (b - 1) + wt * loss(+))\n where:\n - loss(-) is the cross-entropy loss on the negative examples\n - loss(+) is the cross-entropy loss on the positive examples\n - wt is a scalar or tensor of per-example weights\n - b is the target recall\n - pi is the label_priors.\n The per-example weights change not only the coefficients of individual\n training examples, but how the examples are counted toward the constraint.\n If `label_priors` is given, it MUST take `weights` into account. That is,\n label_priors = P / (P + N)\n where\n P = sum_i (wt_i on positives)\n N = sum_i (wt_i on negatives).\n Args:\n labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels].\n logits: A `Tensor` with the same shape as `labels`.\n target_recall: The recall at which to compute the loss. Can be a floating\n point value between 0 and 1 for a single target recall value, or a\n `Tensor` of shape [num_labels] holding each label's target recall value.\n weights: Coefficients for the loss. Must be a scalar or `Tensor` of shape\n [batch_size] or [batch_size, num_labels].\n dual_rate_factor: A floating point value which controls the step size for\n the Lagrange multipliers.\n label_priors: None, or a floating point `Tensor` of shape [num_labels]\n containing the prior probability of each label (i.e. the fraction of the\n training data consisting of positive examples). If None, the label\n priors are computed from `labels` with a moving average. See the notes\n above regarding the interaction with `weights` and do not set this unless\n you have a good reason to do so.\n surrogate_type: Either 'xent' or 'hinge', specifying which upper bound\n should be used for indicator functions.\n lambdas_initializer: An initializer for the Lagrange multipliers.\n reuse: Whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: Optional list of collections for the variables.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n scope: Optional scope for `variable_scope`.\n Returns:\n loss: A `Tensor` of the same shape as `logits` with the component-wise\n loss.\n other_outputs: A dictionary of useful internal quantities for debugging. For\n more details, see http://arxiv.org/pdf/1608.04802.pdf.\n lambdas: A Tensor of shape [num_labels] consisting of the Lagrange\n multipliers.\n label_priors: A Tensor of shape [num_labels] consisting of the prior\n probability of each label learned by the loss, if not provided.\n true_positives_lower_bound: Lower bound on the number of true positives\n given `labels` and `logits`. This is the same lower bound which is used\n in the loss expression to be optimized.\n false_positives_upper_bound: Upper bound on the number of false positives\n given `labels` and `logits`. This is the same upper bound which is used\n in the loss expression to be optimized.\n \"\"\"\n with tf.variable_scope(scope, 'precision_at_recall', [logits, labels, label_priors], reuse=reuse):\n labels, logits, weights, original_shape = _prepare_labels_logits_weights(labels, logits, weights)\n num_labels = losses_utils.get_num_labels(logits)\n\n # Convert other inputs to tensors and standardize dtypes.\n target_recall = losses_utils.convert_and_cast(target_recall, 'target_recall', logits.dtype)\n dual_rate_factor = losses_utils.convert_and_cast(dual_rate_factor, 'dual_rate_factor',\n logits.dtype)\n\n # Create lambdas.\n lambdas, lambdas_variable = _create_dual_variable(\n 'lambdas',\n shape=[num_labels],\n dtype=logits.dtype,\n initializer=lambdas_initializer,\n collections=variables_collections,\n trainable=trainable,\n dual_rate_factor=dual_rate_factor)\n # Maybe create label_priors.\n label_priors = maybe_create_label_priors(label_priors, labels, weights, variables_collections)\n\n # Calculate weighted loss and other outputs. The log(2.0) term corrects for\n # logloss not being an upper bound on the indicator function.\n weighted_loss = weights * losses_utils.weighted_surrogate_loss(\n labels, logits, surrogate_type, positive_weights=lambdas, negative_weights=1.0)\n maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0\n maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)\n lambda_term = lambdas * label_priors * (target_recall - 1.0) * maybe_log2\n loss = tf.reshape(weighted_loss + lambda_term, original_shape)\n other_outputs = {\n 'lambdas':\n lambdas_variable,\n 'label_priors':\n label_priors,\n 'true_positives_lower_bound':\n true_positives_lower_bound(labels, logits, weights, surrogate_type),\n 'false_positives_upper_bound':\n false_positives_upper_bound(labels, logits, weights, surrogate_type)\n }\n\n return loss, other_outputs\n\n\ndef false_positive_rate_at_true_positive_rate_loss(labels,\n logits,\n target_rate,\n weights=1.0,\n dual_rate_factor=0.1,\n label_priors=None,\n surrogate_type='xent',\n lambdas_initializer=tf.constant_initializer(1.0),\n reuse=None,\n variables_collections=None,\n trainable=True,\n scope=None):\n \"\"\"Computes false positive rate at true positive rate loss.\n Note that `true positive rate` is a synonym for Recall, and that minimizing\n the false positive rate and maximizing precision are equivalent for a fixed\n Recall. Therefore, this function is identical to precision_at_recall_loss.\n The per-example weights change not only the coefficients of individual\n training examples, but how the examples are counted toward the constraint.\n If `label_priors` is given, it MUST take `weights` into account. That is,\n label_priors = P / (P + N)\n where\n P = sum_i (wt_i on positives)\n N = sum_i (wt_i on negatives).\n Args:\n labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels].\n logits: A `Tensor` with the same shape as `labels`.\n target_rate: The true positive rate at which to compute the loss. Can be a\n floating point value between 0 and 1 for a single true positive rate, or\n a `Tensor` of shape [num_labels] holding each label's true positive rate.\n weights: Coefficients for the loss. Must be a scalar or `Tensor` of shape\n [batch_size] or [batch_size, num_labels].\n dual_rate_factor: A floating point value which controls the step size for\n the Lagrange multipliers.\n label_priors: None, or a floating point `Tensor` of shape [num_labels]\n containing the prior probability of each label (i.e. the fraction of the\n training data consisting of positive examples). If None, the label\n priors are computed from `labels` with a moving average. See the notes\n above regarding the interaction with `weights` and do not set this unless\n you have a good reason to do so.\n surrogate_type: Either 'xent' or 'hinge', specifying which upper bound\n should be used for indicator functions. 'xent' will use the cross-entropy\n loss surrogate, and 'hinge' will use the hinge loss.\n lambdas_initializer: An initializer op for the Lagrange multipliers.\n reuse: Whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: Optional list of collections for the variables.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n scope: Optional scope for `variable_scope`.\n Returns:\n loss: A `Tensor` of the same shape as `logits` with the component-wise\n loss.\n other_outputs: A dictionary of useful internal quantities for debugging. For\n more details, see http://arxiv.org/pdf/1608.04802.pdf.\n lambdas: A Tensor of shape [num_labels] consisting of the Lagrange\n multipliers.\n label_priors: A Tensor of shape [num_labels] consisting of the prior\n probability of each label learned by the loss, if not provided.\n true_positives_lower_bound: Lower bound on the number of true positives\n given `labels` and `logits`. This is the same lower bound which is used\n in the loss expression to be optimized.\n false_positives_upper_bound: Upper bound on the number of false positives\n given `labels` and `logits`. This is the same upper bound which is used\n in the loss expression to be optimized.\n Raises:\n ValueError: If `surrogate_type` is not `xent` or `hinge`.\n \"\"\"\n return precision_at_recall_loss(\n labels=labels,\n logits=logits,\n target_recall=target_rate,\n weights=weights,\n dual_rate_factor=dual_rate_factor,\n label_priors=label_priors,\n surrogate_type=surrogate_type,\n lambdas_initializer=lambdas_initializer,\n reuse=reuse,\n variables_collections=variables_collections,\n trainable=trainable,\n scope=scope)\n\n\ndef true_positive_rate_at_false_positive_rate_loss(labels,\n logits,\n target_rate,\n weights=1.0,\n dual_rate_factor=0.1,\n label_priors=None,\n surrogate_type='xent',\n lambdas_initializer=tf.constant_initializer(1.0),\n reuse=None,\n variables_collections=None,\n trainable=True,\n scope=None):\n \"\"\"Computes true positive rate at false positive rate loss.\n The loss is based on a surrogate of the form\n wt * loss(+) + lambdas * (wt * loss(-) - r * (1 - pi))\n where:\n - loss(-) is the loss on the negative examples\n - loss(+) is the loss on the positive examples\n - wt is a scalar or tensor of per-example weights\n - r is the target rate\n - pi is the label_priors.\n The per-example weights change not only the coefficients of individual\n training examples, but how the examples are counted toward the constraint.\n If `label_priors` is given, it MUST take `weights` into account. That is,\n label_priors = P / (P + N)\n where\n P = sum_i (wt_i on positives)\n N = sum_i (wt_i on negatives).\n Args:\n labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels].\n logits: A `Tensor` with the same shape as `labels`.\n target_rate: The false positive rate at which to compute the loss. Can be a\n floating point value between 0 and 1 for a single false positive rate, or\n a `Tensor` of shape [num_labels] holding each label's false positive rate.\n weights: Coefficients for the loss. Must be a scalar or `Tensor` of shape\n [batch_size] or [batch_size, num_labels].\n dual_rate_factor: A floating point value which controls the step size for\n the Lagrange multipliers.\n label_priors: None, or a floating point `Tensor` of shape [num_labels]\n containing the prior probability of each label (i.e. the fraction of the\n training data consisting of positive examples). If None, the label\n priors are computed from `labels` with a moving average. See the notes\n above regarding the interaction with `weights` and do not set this unless\n you have a good reason to do so.\n surrogate_type: Either 'xent' or 'hinge', specifying which upper bound\n should be used for indicator functions. 'xent' will use the cross-entropy\n loss surrogate, and 'hinge' will use the hinge loss.\n lambdas_initializer: An initializer op for the Lagrange multipliers.\n reuse: Whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: Optional list of collections for the variables.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n scope: Optional scope for `variable_scope`.\n Returns:\n loss: A `Tensor` of the same shape as `logits` with the component-wise\n loss.\n other_outputs: A dictionary of useful internal quantities for debugging. For\n more details, see http://arxiv.org/pdf/1608.04802.pdf.\n lambdas: A Tensor of shape [num_labels] consisting of the Lagrange\n multipliers.\n label_priors: A Tensor of shape [num_labels] consisting of the prior\n probability of each label learned by the loss, if not provided.\n true_positives_lower_bound: Lower bound on the number of true positives\n given `labels` and `logits`. This is the same lower bound which is used\n in the loss expression to be optimized.\n false_positives_upper_bound: Upper bound on the number of false positives\n given `labels` and `logits`. This is the same upper bound which is used\n in the loss expression to be optimized.\n Raises:\n ValueError: If `surrogate_type` is not `xent` or `hinge`.\n \"\"\"\n with tf.variable_scope(scope, 'tpr_at_fpr', [labels, logits, label_priors], reuse=reuse):\n labels, logits, weights, original_shape = _prepare_labels_logits_weights(labels, logits, weights)\n num_labels = losses_utils.get_num_labels(logits)\n\n # Convert other inputs to tensors and standardize dtypes.\n target_rate = losses_utils.convert_and_cast(target_rate, 'target_rate', logits.dtype)\n dual_rate_factor = losses_utils.convert_and_cast(dual_rate_factor, 'dual_rate_factor',\n logits.dtype)\n\n # Create lambdas.\n lambdas, lambdas_variable = _create_dual_variable(\n 'lambdas',\n shape=[num_labels],\n dtype=logits.dtype,\n initializer=lambdas_initializer,\n collections=variables_collections,\n trainable=trainable,\n dual_rate_factor=dual_rate_factor)\n # Maybe create label_priors.\n label_priors = maybe_create_label_priors(label_priors, labels, weights, variables_collections)\n\n # Loss op and other outputs. The log(2.0) term corrects for\n # logloss not being an upper bound on the indicator function.\n weighted_loss = weights * losses_utils.weighted_surrogate_loss(\n labels,\n logits,\n surrogate_type=surrogate_type,\n positive_weights=1.0,\n negative_weights=lambdas)\n maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0\n maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)\n lambda_term = lambdas * target_rate * (1.0 - label_priors) * maybe_log2\n loss = tf.reshape(weighted_loss - lambda_term, original_shape)\n other_outputs = {\n 'lambdas':\n lambdas_variable,\n 'label_priors':\n label_priors,\n 'true_positives_lower_bound':\n true_positives_lower_bound(labels, logits, weights, surrogate_type),\n 'false_positives_upper_bound':\n false_positives_upper_bound(labels, logits, weights, surrogate_type)\n }\n\n return loss, other_outputs\n\n\ndef _prepare_labels_logits_weights(labels, logits, weights):\n \"\"\"Validates labels, logits, and weights.\n Converts inputs to tensors, checks shape compatibility, and casts dtype if\n necessary.\n Args:\n labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels].\n logits: A `Tensor` with the same shape as `labels`.\n weights: Either `None` or a `Tensor` with shape broadcastable to `logits`.\n Returns:\n labels: Same as `labels` arg after possible conversion to tensor, cast, and\n reshape.\n logits: Same as `logits` arg after possible conversion to tensor and\n reshape.\n weights: Same as `weights` arg after possible conversion, cast, and reshape.\n original_shape: Shape of `labels` and `logits` before reshape.\n Raises:\n ValueError: If `labels` and `logits` do not have the same shape.\n \"\"\"\n # Convert `labels` and `logits` to Tensors and standardize dtypes.\n logits = tf.convert_to_tensor(logits, name='logits')\n labels = losses_utils.convert_and_cast(labels, 'labels', logits.dtype.base_dtype)\n weights = losses_utils.convert_and_cast(weights, 'weights', logits.dtype.base_dtype)\n\n try:\n labels.get_shape().merge_with(logits.get_shape())\n except ValueError:\n raise ValueError('logits and labels must have the same shape (%s vs %s)' % (logits.get_shape(),\n labels.get_shape()))\n\n original_shape = labels.get_shape().as_list()\n if labels.get_shape().ndims > 0:\n original_shape[0] = -1\n if labels.get_shape().ndims <= 1:\n labels = tf.reshape(labels, [-1, 1])\n logits = tf.reshape(logits, [-1, 1])\n\n if weights.get_shape().ndims == 1:\n # Weights has shape [batch_size]. Reshape to [batch_size, 1].\n weights = tf.reshape(weights, [-1, 1])\n if weights.get_shape().ndims == 0:\n # Weights is a scalar. Change shape of weights to match logits.\n weights *= tf.ones_like(logits)\n\n return labels, logits, weights, original_shape\n\n\ndef _range_to_anchors_and_delta(precision_range, num_anchors, dtype):\n \"\"\"Calculates anchor points from precision range.\n Args:\n precision_range: As required in precision_recall_auc_loss.\n num_anchors: int, number of equally spaced anchor points.\n dtype: Data type of returned tensors.\n Returns:\n precision_values: A `Tensor` of data type dtype with equally spaced values\n in the interval precision_range.\n delta: The spacing between the values in precision_values.\n Raises:\n ValueError: If precision_range is invalid.\n \"\"\"\n # Validate precision_range.\n if not 0 <= precision_range[0] <= precision_range[-1] <= 1:\n raise ValueError(\n 'precision values must obey 0 <= %f <= %f <= 1' % (precision_range[0], precision_range[-1]))\n if not 0 < len(precision_range) < 3:\n raise ValueError('length of precision_range (%d) must be 1 or 2' % len(precision_range))\n\n # Sets precision_values uniformly between min_precision and max_precision.\n values = np.linspace(start=precision_range[0], stop=precision_range[1], num=num_anchors + 2)[1:-1]\n precision_values = losses_utils.convert_and_cast(values, 'precision_values', dtype)\n delta = losses_utils.convert_and_cast(values[0] - precision_range[0], 'delta', dtype)\n # Makes precision_values [1, 1, num_anchors].\n precision_values = losses_utils.expand_outer(precision_values, 3)\n return precision_values, delta\n\n\ndef _create_dual_variable(name, shape, dtype, initializer, collections, trainable, dual_rate_factor):\n \"\"\"Creates a new dual variable.\n Dual variables are required to be nonnegative. If trainable, their gradient\n is reversed so that they are maximized (rather than minimized) by the\n optimizer.\n Args:\n name: A string, the name for the new variable.\n shape: Shape of the new variable.\n dtype: Data type for the new variable.\n initializer: Initializer for the new variable.\n collections: List of graph collections keys. The new variable is added to\n these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.\n trainable: If `True`, the default, also adds the variable to the graph\n collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as\n the default list of variables to use by the `Optimizer` classes.\n dual_rate_factor: A floating point value or `Tensor`. The learning rate for\n the dual variable is scaled by this factor.\n Returns:\n dual_value: An op that computes the absolute value of the dual variable\n and reverses its gradient.\n dual_variable: The underlying variable itself.\n \"\"\"\n # We disable partitioning while constructing dual variables because they will\n # be updated with assign, which is not available for partitioned variables.\n partitioner = tf.get_variable_scope().partitioner\n try:\n tf.get_variable_scope().set_partitioner(None)\n dual_variable = tf.contrib.framework.model_variable(\n name=name,\n shape=shape,\n dtype=dtype,\n initializer=initializer,\n collections=collections,\n trainable=trainable)\n finally:\n tf.get_variable_scope().set_partitioner(partitioner)\n # Using the absolute value enforces nonnegativity.\n dual_value = tf.abs(dual_variable)\n\n if trainable:\n # To reverse the gradient on the dual variable, multiply the gradient by\n # -dual_rate_factor\n dual_value = (tf.stop_gradient(\n (1.0 + dual_rate_factor) * dual_value) - dual_rate_factor * dual_value)\n return dual_value, dual_variable\n\n\ndef maybe_create_label_priors(label_priors, labels, weights, variables_collections):\n \"\"\"Creates moving average ops to track label priors, if necessary.\n Args:\n label_priors: As required in e.g. precision_recall_auc_loss.\n labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels].\n weights: As required in e.g. precision_recall_auc_loss.\n variables_collections: Optional list of collections for the variables, if\n any must be created.\n Returns:\n label_priors: A Tensor of shape [num_labels] consisting of the\n weighted label priors, after updating with moving average ops if created.\n \"\"\"\n if label_priors is not None:\n label_priors = losses_utils.convert_and_cast(\n label_priors, name='label_priors', dtype=labels.dtype.base_dtype)\n return tf.squeeze(label_priors)\n\n label_priors = losses_utils.build_label_priors(\n labels, weights, variables_collections=variables_collections)\n return label_priors\n\n\ndef true_positives_lower_bound(labels, logits, weights, surrogate_type):\n \"\"\"Calculate a lower bound on the number of true positives.\n This lower bound on the number of true positives given `logits` and `labels`\n is the same one used in the global objectives loss functions.\n Args:\n labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels].\n logits: A `Tensor` of shape [batch_size, num_labels] or\n [batch_size, num_labels, num_anchors]. If the third dimension is present,\n the lower bound is computed on each slice [:, :, k] independently.\n weights: Per-example loss coefficients, with shape broadcast-compatible with\n that of `labels`.\n surrogate_type: Either 'xent' or 'hinge', specifying which upper bound\n should be used for indicator functions.\n Returns:\n A `Tensor` of shape [num_labels] or [num_labels, num_anchors].\n \"\"\"\n maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0\n maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)\n if logits.get_shape().ndims == 3 and labels.get_shape().ndims < 3:\n labels = tf.expand_dims(labels, 2)\n loss_on_positives = losses_utils.weighted_surrogate_loss(\n labels, logits, surrogate_type, negative_weights=0.0) / maybe_log2\n return tf.reduce_sum(weights * (labels - loss_on_positives), 0)\n\n\ndef false_positives_upper_bound(labels, logits, weights, surrogate_type):\n \"\"\"Calculate an upper bound on the number of false positives.\n This upper bound on the number of false positives given `logits` and `labels`\n is the same one used in the global objectives loss functions.\n Args:\n labels: A `Tensor` of shape [batch_size, num_labels]\n logits: A `Tensor` of shape [batch_size, num_labels] or\n [batch_size, num_labels, num_anchors]. If the third dimension is present,\n the lower bound is computed on each slice [:, :, k] independently.\n weights: Per-example loss coefficients, with shape broadcast-compatible with\n that of `labels`.\n surrogate_type: Either 'xent' or 'hinge', specifying which upper bound\n should be used for indicator functions.\n Returns:\n A `Tensor` of shape [num_labels] or [num_labels, num_anchors].\n \"\"\"\n maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0\n maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)\n loss_on_negatives = losses_utils.weighted_surrogate_loss(\n labels, logits, surrogate_type, positive_weights=0.0) / maybe_log2\n return tf.reduce_sum(weights * loss_on_negatives, 0)\n" ]
[ [ "tensorflow.nn.tanh", "tensorflow.reduce_max", "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.reshape", "tensorflow.no_op", "tensorflow.sigmoid", "tensorflow.round", "tensorflow.ones", "tensorflow.variable_scope", "tensorflow.scatter_sub", "tensorflow.matmul", "tensorflow.nn.l2_loss", "tensorflow.squeeze", "tensorflow.abs", "tensorflow.sequence_mask", "tensorflow.name_scope", "tensorflow.convert_to_tensor", "tensorflow.concat", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.get_variable_scope", "numpy.log", "tensorflow.nn.softmax", "tensorflow.reduce_sum", "tensorflow.multiply", "tensorflow.is_finite", "tensorflow.clip_by_value", "tensorflow.constant", "tensorflow.transpose", "numpy.linspace", "tensorflow.nn.softplus", "tensorflow.nn.sigmoid_cross_entropy_with_logits", "tensorflow.constant_initializer", "tensorflow.shape", "tensorflow.subtract", "tensorflow.ones_like", "tensorflow.zeros_initializer", "tensorflow.to_float", "tensorflow.expand_dims", "tensorflow.pow", "tensorflow.contrib.framework.model_variable", "tensorflow.cast", "tensorflow.diag_part", "tensorflow.gradients", "tensorflow.losses.compute_weighted_loss", "tensorflow.nn.l2_normalize", "tensorflow.control_dependencies", "tensorflow.nn.sigmoid", "tensorflow.zeros", "tensorflow.equal", "tensorflow.div", "tensorflow.losses.log_loss", "tensorflow.range", "tensorflow.reduce_mean", "tensorflow.add", "tensorflow.stop_gradient", "tensorflow.to_int32", "tensorflow.exp", "tensorflow.where", "tensorflow.square", "tensorflow.log", "tensorflow.gather", "tensorflow.maximum" ] ]
rusty-fast-solvers/rusty-green-kernel
[ "9317f88e873550270c482473005250a9d2df2950" ]
[ "rusty_green_kernel/test/test_rusty_green_kernel.py" ]
[ "\"\"\"Unit tests for direct assembly and evaluation of kernels.\"\"\"\nimport numpy as np\nimport pytest\n\n\[email protected](\"parallel\", [True, False])\[email protected](\"dtype,rtol\", [(np.float64, 1e-14), (np.float32, 5e-6)])\ndef test_laplace_assemble(dtype, rtol, parallel):\n \"\"\"Test the Laplace kernel.\"\"\"\n from rusty_green_kernel import assemble_laplace_kernel\n\n nsources = 10\n ntargets = 20\n\n rng = np.random.default_rng(seed=0)\n # Construct target and sources so that they do not overlap\n # apart from the first point.\n\n targets = 1.5 + rng.random((3, ntargets), dtype=dtype)\n sources = rng.random((3, nsources), dtype=dtype)\n sources[:, 0] = targets[:, 0] # Test what happens if source = target\n\n actual = assemble_laplace_kernel(sources, targets, dtype=dtype, parallel=parallel)\n\n # Calculate expected result\n\n # A divide by zero error is expected to happen here.\n # So just ignore the warning.\n old_param = np.geterr()[\"divide\"]\n np.seterr(divide=\"ignore\")\n\n expected = np.empty((ntargets, nsources), dtype=dtype)\n\n for index, target in enumerate(targets.T):\n expected[index, :] = 1.0 / (\n 4 * np.pi * np.linalg.norm(sources - target.reshape(3, 1), axis=0)\n )\n\n # Reset the warnings\n np.seterr(divide=old_param)\n\n expected[0, 0] = 0 # First source and target are identical.\n\n np.testing.assert_allclose(actual, expected, rtol=rtol)\n\n\[email protected](\"parallel\", [True, False])\[email protected](\"dtype,rtol\", [(np.float64, 1e-14), (np.float32, 5e-6)])\ndef test_laplace_evaluate_only_values(dtype, rtol, parallel):\n \"\"\"Test the Laplace kernel.\"\"\"\n from rusty_green_kernel import evaluate_laplace_kernel\n\n nsources = 10\n ntargets = 20\n ncharge_vecs = 2\n\n rng = np.random.default_rng(seed=0)\n # Construct target and sources so that they do not overlap\n # apart from the first point.\n\n targets = 1.5 + rng.random((3, ntargets), dtype=dtype)\n sources = rng.random((3, nsources), dtype=dtype)\n sources[:, 0] = targets[:, 0] # Test what happens if source = target\n charges = rng.random((ncharge_vecs, nsources), dtype=dtype)\n\n actual = evaluate_laplace_kernel(\n sources, targets, charges, dtype=dtype, parallel=parallel\n )\n\n # Calculate expected result\n\n # A divide by zero error is expected to happen here.\n # So just ignore the warning.\n old_param = np.geterr()[\"divide\"]\n np.seterr(divide=\"ignore\")\n\n expected = np.empty((nsources, ntargets), dtype=dtype)\n\n for index, target in enumerate(targets.T):\n expected[:, index] = 1.0 / (\n 4 * np.pi * np.linalg.norm(sources - target.reshape(3, 1), axis=0)\n )\n\n # Reset the warnings\n np.seterr(divide=old_param)\n\n expected[0, 0] = 0 # First source and target are identical.\n\n expected = np.expand_dims(charges @ expected, -1)\n\n np.testing.assert_allclose(actual, expected, rtol=rtol)\n\n\[email protected](\"parallel\", [True, False])\[email protected](\"dtype,rtol\", [(np.float64, 1e-14), (np.float32, 5e-6)])\ndef test_laplace_evaluate_values_and_deriv(dtype, rtol, parallel):\n \"\"\"Test the Laplace kernel.\"\"\"\n from rusty_green_kernel import evaluate_laplace_kernel\n\n nsources = 10\n ntargets = 20\n ncharge_vecs = 2\n\n rng = np.random.default_rng(seed=0)\n # Construct target and sources so that they do not overlap\n # apart from the first point.\n\n targets = 1.5 + rng.random((3, ntargets), dtype=dtype)\n sources = rng.random((3, nsources), dtype=dtype)\n sources[:, 0] = targets[:, 0] # Test what happens if source = target\n charges = rng.random((ncharge_vecs, nsources), dtype=dtype)\n\n actual = evaluate_laplace_kernel(\n sources, targets, charges, dtype=dtype, return_gradients=True, parallel=parallel\n )\n\n # Calculate expected result\n\n # A divide by zero error is expected to happen here.\n # So just ignore the warning.\n old_params = np.geterr()\n np.seterr(all=\"ignore\")\n\n expected = np.empty((nsources, ntargets, 4), dtype=dtype)\n\n for index, target in enumerate(targets.T):\n diff = sources - target.reshape(3, 1)\n dist = np.linalg.norm(diff, axis=0)\n expected[:, index, 0] = 1.0 / (4 * np.pi * dist)\n expected[:, index, 1:] = diff.T / (4 * np.pi * dist.reshape(nsources, 1) ** 3)\n expected[dist == 0, index, :] = 0\n\n # Reset the warnings\n np.seterr(**old_params)\n\n expected = np.tensordot(charges, expected, 1)\n\n np.testing.assert_allclose(actual, expected, rtol=rtol)\n\n\[email protected](\"parallel\", [True, False])\[email protected](\"dtype,rtol\", [(np.complex128, 1e-14), (np.complex64, 5e-6)])\ndef test_helmholtz_assemble(dtype, rtol, parallel):\n \"\"\"Test the Laplace kernel.\"\"\"\n from rusty_green_kernel import assemble_helmholtz_kernel\n\n wavenumber = 2.5\n\n nsources = 10\n ntargets = 20\n\n if dtype == np.complex128:\n real_type = np.float64\n elif dtype == np.complex64:\n real_type = np.float32\n else:\n raise ValueError(f\"Unsupported type: {dtype}.\")\n\n rng = np.random.default_rng(seed=0)\n # Construct target and sources so that they do not overlap\n # apart from the first point.\n\n targets = 1.5 + rng.random((3, ntargets), dtype=real_type)\n sources = rng.random((3, nsources), dtype=real_type)\n sources[:, 0] = targets[:, 0] # Test what happens if source = target\n\n actual = assemble_helmholtz_kernel(\n sources, targets, wavenumber, dtype=dtype, parallel=parallel\n )\n\n # Calculate expected result\n\n # A divide by zero error is expected to happen here.\n # So just ignore the warning.\n old_params = np.geterr()\n np.seterr(all=\"ignore\")\n\n expected = np.empty((ntargets, nsources), dtype=dtype)\n\n for index, target in enumerate(targets.T):\n dist = np.linalg.norm(sources - target.reshape(3, 1), axis=0)\n expected[index, :] = np.exp(1j * wavenumber * dist) / (4 * np.pi * dist)\n expected[index, dist == 0] = 0\n\n # Reset the warnings\n np.seterr(**old_params)\n\n np.testing.assert_allclose(actual, expected, rtol=rtol)\n\n\[email protected](\"dtype,rtol\", [(np.complex128, 1e-14), (np.complex64, 5e-6)])\ndef test_helmholtz_evaluate_only_values(dtype, rtol):\n \"\"\"Test the Laplace kernel.\"\"\"\n from rusty_green_kernel import evaluate_helmholtz_kernel\n\n nsources = 10\n ntargets = 20\n ncharge_vecs = 2\n\n wavenumber = 2.5 + 1.3j\n\n if dtype == np.complex128:\n real_type = np.float64\n elif dtype == np.complex64:\n real_type = np.float32\n else:\n raise ValueError(f\"Unsupported type: {dtype}.\")\n\n rng = np.random.default_rng(seed=0)\n # Construct target and sources so that they do not overlap\n # apart from the first point.\n\n targets = 1.5 + rng.random((3, ntargets), dtype=real_type)\n sources = rng.random((3, nsources), dtype=real_type)\n sources[:, 0] = targets[:, 0] # Test what happens if source = target\n charges = rng.random((ncharge_vecs, nsources), dtype=real_type) + 1j * rng.random(\n (ncharge_vecs, nsources), dtype=real_type\n )\n\n actual = evaluate_helmholtz_kernel(\n sources, targets, charges, wavenumber, dtype=dtype, parallel=False\n )\n\n # Calculate expected result\n\n # A divide by zero error is expected to happen here.\n # So just ignore the warning.\n old_param = np.geterr()\n np.seterr(all=\"ignore\")\n\n expected = np.empty((nsources, ntargets), dtype=dtype)\n\n for index, target in enumerate(targets.T):\n dist = np.linalg.norm(sources - target.reshape(3, 1), axis=0)\n expected[:, index] = np.exp(1j * wavenumber * dist) / (4 * np.pi * dist)\n expected[dist == 0, index] = 0\n\n # Reset the warnings\n np.seterr(**old_param)\n\n expected = np.expand_dims(np.tensordot(charges, expected, 1), -1)\n\n np.testing.assert_allclose(actual, expected, rtol=rtol)\n\n\[email protected](\"parallel\", [True, False])\[email protected](\"dtype,rtol\", [(np.complex128, 1e-14), (np.complex64, 5e-6)])\ndef test_helmholtz_evaluate_values_and_deriv(dtype, rtol, parallel):\n \"\"\"Test the Laplace kernel.\"\"\"\n from rusty_green_kernel import evaluate_helmholtz_kernel\n\n nsources = 10\n ntargets = 20\n ncharge_vecs = 2\n\n wavenumber = 2.5 + 1.3j\n\n if dtype == np.complex128:\n real_type = np.float64\n elif dtype == np.complex64:\n real_type = np.float32\n else:\n raise ValueError(f\"Unsupported type: {dtype}.\")\n\n rng = np.random.default_rng(seed=0)\n # Construct target and sources so that they do not overlap\n # apart from the first point.\n\n targets = 1.5 + rng.random((3, ntargets), dtype=real_type)\n sources = rng.random((3, nsources), dtype=real_type)\n sources[:, 0] = targets[:, 0] # Test what happens if source = target\n charges = rng.random((ncharge_vecs, nsources), dtype=real_type) + 1j * rng.random(\n (ncharge_vecs, nsources), dtype=real_type\n )\n\n actual = evaluate_helmholtz_kernel(\n sources,\n targets,\n charges,\n wavenumber,\n dtype=dtype,\n return_gradients=True,\n parallel=parallel,\n )\n\n # Calculate expected result\n\n # A divide by zero error is expected to happen here.\n # So just ignore the warning.\n old_params = np.geterr()\n np.seterr(all=\"ignore\")\n\n expected = np.empty((nsources, ntargets, 4), dtype=dtype)\n\n for index, target in enumerate(targets.T):\n diff = target.reshape(3, 1) - sources\n dist = np.linalg.norm(diff, axis=0)\n expected[:, index, 0] = np.exp(1j * wavenumber * dist) / (4 * np.pi * dist)\n expected[:, index, 1:] = (\n diff.T\n * expected[:, index, 0].reshape(nsources, 1)\n / dist.reshape(nsources, 1) ** 2\n * (1j * wavenumber * dist.reshape(nsources, 1) - 1)\n )\n expected[dist == 0, index, :] = 0\n\n # Reset the warnings\n np.seterr(**old_params)\n\n expected = np.tensordot(charges, expected, 1)\n\n np.testing.assert_allclose(actual, expected, rtol=rtol)\n\n\[email protected](\"parallel\", [True, False])\[email protected](\"dtype,rtol\", [(np.float64, 1e-14), (np.float32, 5e-6)])\ndef test_modified_helmholtz_assemble(dtype, rtol, parallel):\n \"\"\"Test the modified Helmholtz kernel.\"\"\"\n from rusty_green_kernel import assemble_modified_helmholtz_kernel\n\n nsources = 10\n ntargets = 20\n\n omega = 2.5\n\n rng = np.random.default_rng(seed=0)\n # Construct target and sources so that they do not overlap\n # apart from the first point.\n\n targets = 1.5 + rng.random((3, ntargets), dtype=dtype)\n sources = rng.random((3, nsources), dtype=dtype)\n sources[:, 0] = targets[:, 0] # Test what happens if source = target\n\n actual = assemble_modified_helmholtz_kernel(\n sources, targets, omega, dtype=dtype, parallel=parallel\n )\n\n # Calculate expected result\n\n # A divide by zero error is expected to happen here.\n # So just ignore the warning.\n old_param = np.geterr()[\"divide\"]\n np.seterr(divide=\"ignore\")\n\n expected = np.empty((ntargets, nsources), dtype=dtype)\n\n for index, target in enumerate(targets.T):\n dist = np.linalg.norm(sources - target.reshape(3, 1), axis=0)\n expected[index, :] = np.exp(-omega * dist) / (4 * np.pi * dist)\n\n # Reset the warnings\n np.seterr(divide=old_param)\n\n expected[0, 0] = 0 # First source and target are identical.\n\n np.testing.assert_allclose(actual, expected, rtol=rtol)\n\n\[email protected](\"parallel\", [True, False])\[email protected](\"dtype,rtol\", [(np.float64, 1e-14), (np.float32, 5e-6)])\ndef test_modified_helmholtz_evaluate_only_values(dtype, rtol, parallel):\n \"\"\"Test the modified Helmholtz kernel.\"\"\"\n from rusty_green_kernel import evaluate_modified_helmholtz_kernel\n\n nsources = 10\n ntargets = 20\n ncharge_vecs = 2\n\n omega = 2.5\n\n rng = np.random.default_rng(seed=0)\n # Construct target and sources so that they do not overlap\n # apart from the first point.\n\n targets = 1.5 + rng.random((3, ntargets), dtype=dtype)\n sources = rng.random((3, nsources), dtype=dtype)\n sources[:, 0] = targets[:, 0] # Test what happens if source = target\n charges = rng.random((ncharge_vecs, nsources), dtype=dtype)\n\n actual = evaluate_modified_helmholtz_kernel(\n sources, targets, charges, omega, dtype=dtype, parallel=parallel\n )\n\n # Calculate expected result\n\n # A divide by zero error is expected to happen here.\n # So just ignore the warning.\n old_param = np.geterr()[\"divide\"]\n np.seterr(divide=\"ignore\")\n\n expected = np.empty((nsources, ntargets), dtype=dtype)\n\n for index, target in enumerate(targets.T):\n dist = np.linalg.norm(sources - target.reshape(3, 1), axis=0)\n expected[:, index] = np.exp(-omega * dist) / (4 * np.pi * dist)\n\n # Reset the warnings\n np.seterr(divide=old_param)\n\n expected[0, 0] = 0 # First source and target are identical.\n\n expected = np.expand_dims(charges @ expected, -1)\n\n np.testing.assert_allclose(actual, expected, rtol=rtol)\n\n\[email protected](\"parallel\", [True, False])\[email protected](\"dtype,rtol\", [(np.float64, 1e-14), (np.float32, 5e-6)])\ndef test_modified_helmholtz_evaluate_values_and_deriv(dtype, rtol, parallel):\n \"\"\"Test the modified Helmholtz kernel.\"\"\"\n from rusty_green_kernel import evaluate_modified_helmholtz_kernel\n\n nsources = 10\n ntargets = 20\n ncharge_vecs = 2\n\n omega = 2.5\n\n rng = np.random.default_rng(seed=0)\n # Construct target and sources so that they do not overlap\n # apart from the first point.\n\n targets = 1.5 + rng.random((3, ntargets), dtype=dtype)\n sources = rng.random((3, nsources), dtype=dtype)\n sources[:, 0] = targets[:, 0] # Test what happens if source = target\n charges = rng.random((ncharge_vecs, nsources), dtype=dtype)\n\n actual = evaluate_modified_helmholtz_kernel(\n sources,\n targets,\n charges,\n omega,\n dtype=dtype,\n return_gradients=True,\n parallel=parallel,\n )\n\n # Calculate expected result\n\n # A divide by zero error is expected to happen here.\n # So just ignore the warning.\n old_params = np.geterr()\n np.seterr(all=\"ignore\")\n\n expected = np.empty((nsources, ntargets, 4), dtype=dtype)\n\n for index, target in enumerate(targets.T):\n diff = target.reshape(3, 1) - sources\n dist = np.linalg.norm(diff, axis=0)\n expected[:, index, 0] = np.exp(-omega * dist) / (4 * np.pi * dist)\n expected[:, index, 1:] = (\n diff.T\n / (4 * np.pi * dist.reshape(nsources, 1) ** 3)\n * np.exp(-omega * dist.reshape(nsources, 1))\n * (-omega * dist.reshape(nsources, 1) - 1)\n )\n expected[dist == 0, index, :] = 0\n\n # Reset the warnings\n np.seterr(**old_params)\n\n expected = np.tensordot(charges, expected, 1)\n\n np.testing.assert_allclose(actual, expected, rtol=rtol)\n\n\ndef test_laplace_derivative_is_correct():\n \"\"\"Test that the Gradient of the Laplace kernel is correct.\"\"\"\n from rusty_green_kernel import evaluate_laplace_kernel\n\n nsources = 10\n\n eps = 1e-10\n\n dtype = np.float64\n\n targets = np.array(\n [\n [1.1, 1.5, 2.3],\n [1.1 + eps, 1.5, 2.3],\n [1.1 - eps, 1.5, 2.3],\n [1.1, 1.5 + eps, 2.3],\n [1.1, 1.5 - eps, 2.3],\n [1.1, 1.5, 2.3 + eps],\n [1.1, 1.5, 2.3 - eps],\n ]\n ).T\n\n rng = np.random.default_rng(seed=0)\n\n sources = rng.random((3, nsources), dtype=dtype)\n charges = rng.random((1, nsources), dtype=dtype)\n\n # Evalute derivative approximately.\n\n values = evaluate_laplace_kernel(sources, targets, charges)\n\n x_deriv = (values[0, 1, 0] - values[0, 2, 0]) / (2 * eps)\n y_deriv = (values[0, 3, 0] - values[0, 4, 0]) / (2 * eps)\n z_deriv = (values[0, 5, 0] - values[0, 6, 0]) / (2 * eps)\n\n expected = np.array([x_deriv, y_deriv, z_deriv])\n\n actual = evaluate_laplace_kernel(sources, targets, charges, return_gradients=True)[\n 0, 0, 1:\n ]\n\n np.testing.assert_allclose(actual, expected, rtol=1e-5)\n\n\ndef test_helmholtz_derivative_is_correct():\n \"\"\"Test that the Gradient of the Helmholtz kernel is correct.\"\"\"\n from rusty_green_kernel import evaluate_helmholtz_kernel\n\n nsources = 10\n\n wavenumber = 2.5 + 1.3j\n\n eps = 1e-10\n\n dtype = np.float64\n\n targets = np.array(\n [\n [1.1, 1.5, 2.3],\n [1.1 + eps, 1.5, 2.3],\n [1.1 - eps, 1.5, 2.3],\n [1.1, 1.5 + eps, 2.3],\n [1.1, 1.5 - eps, 2.3],\n [1.1, 1.5, 2.3 + eps],\n [1.1, 1.5, 2.3 - eps],\n ]\n ).T\n\n rng = np.random.default_rng(seed=0)\n\n sources = rng.random((3, nsources), dtype=dtype)\n charges = rng.random((1, nsources), dtype=dtype)\n\n # Evalute derivative approximately.\n\n values = evaluate_helmholtz_kernel(sources, targets, charges, wavenumber)\n\n x_deriv = (values[0, 1, 0] - values[0, 2, 0]) / (2 * eps)\n y_deriv = (values[0, 3, 0] - values[0, 4, 0]) / (2 * eps)\n z_deriv = (values[0, 5, 0] - values[0, 6, 0]) / (2 * eps)\n\n expected = np.array([x_deriv, y_deriv, z_deriv])\n\n actual = evaluate_helmholtz_kernel(\n sources, targets, charges, wavenumber, return_gradients=True\n )[0, 0, 1:]\n\n np.testing.assert_allclose(actual, expected, rtol=1e-5)\n\n\ndef test_modified_helmholtz_derivative_is_correct():\n \"\"\"Test that the Gradient of the Helmholtz kernel is correct.\"\"\"\n from rusty_green_kernel import evaluate_modified_helmholtz_kernel\n\n nsources = 10\n\n omega = 1.3\n\n eps = 1e-10\n\n dtype = np.float64\n\n targets = np.array(\n [\n [1.1, 1.5, 2.3],\n [1.1 + eps, 1.5, 2.3],\n [1.1 - eps, 1.5, 2.3],\n [1.1, 1.5 + eps, 2.3],\n [1.1, 1.5 - eps, 2.3],\n [1.1, 1.5, 2.3 + eps],\n [1.1, 1.5, 2.3 - eps],\n ]\n ).T\n\n rng = np.random.default_rng(seed=0)\n\n sources = rng.random((3, nsources), dtype=dtype)\n charges = rng.random((1, nsources), dtype=dtype)\n\n # Evalute derivative approximately.\n\n values = evaluate_modified_helmholtz_kernel(sources, targets, charges, omega)\n\n x_deriv = (values[0, 1, 0] - values[0, 2, 0]) / (2 * eps)\n y_deriv = (values[0, 3, 0] - values[0, 4, 0]) / (2 * eps)\n z_deriv = (values[0, 5, 0] - values[0, 6, 0]) / (2 * eps)\n\n expected = np.array([x_deriv, y_deriv, z_deriv])\n\n actual = evaluate_modified_helmholtz_kernel(\n sources, targets, charges, omega, return_gradients=True\n )[0, 0, 1:]\n\n np.testing.assert_allclose(actual, expected, rtol=1e-5)\n\n\ndef test_helmholtz_at_zero_agrees_with_laplace():\n \"\"\"Test if Helmholtz with wavenumber 0 agrees with Laplace.\"\"\"\n from rusty_green_kernel import evaluate_helmholtz_kernel\n from rusty_green_kernel import evaluate_laplace_kernel\n\n nsources = 10\n ntargets = 20\n ncharge_vecs = 2\n\n wavenumber = 0\n\n dtype = np.float64\n\n rng = np.random.default_rng(seed=0)\n # Construct target and sources so that they do not overlap\n # apart from the first point.\n\n targets = 1.5 + rng.random((3, ntargets), dtype=dtype)\n sources = rng.random((3, nsources), dtype=dtype)\n sources[:, 0] = targets[:, 0] # Test what happens if source = target\n charges = rng.random((ncharge_vecs, nsources), dtype=dtype) + 1j * rng.random(\n (ncharge_vecs, nsources), dtype=dtype\n )\n\n values_helmholtz = evaluate_helmholtz_kernel(\n sources, targets, charges, wavenumber, return_gradients=True\n )\n values_laplace = evaluate_laplace_kernel(\n sources, targets, np.real(charges), return_gradients=True\n ) + 1j * evaluate_laplace_kernel(\n sources, targets, np.imag(charges), return_gradients=True\n )\n\n np.testing.assert_allclose(values_helmholtz, values_laplace, rtol=1E-14)\n\ndef test_helmholtz_imaginary_wavenumber_agrees_with_modified_helmholtz():\n \"\"\"Test if Helmholtz with wavenumber 0 agrees with Laplace.\"\"\"\n from rusty_green_kernel import evaluate_helmholtz_kernel\n from rusty_green_kernel import evaluate_modified_helmholtz_kernel\n\n nsources = 10\n ntargets = 20\n ncharge_vecs = 2\n\n wavenumber = 1.3j\n\n dtype = np.float64\n\n rng = np.random.default_rng(seed=0)\n # Construct target and sources so that they do not overlap\n # apart from the first point.\n\n targets = 1.5 + rng.random((3, ntargets), dtype=dtype)\n sources = rng.random((3, nsources), dtype=dtype)\n sources[:, 0] = targets[:, 0] # Test what happens if source = target\n charges = rng.random((ncharge_vecs, nsources), dtype=dtype) + 1j * rng.random(\n (ncharge_vecs, nsources), dtype=dtype\n )\n\n values_helmholtz = evaluate_helmholtz_kernel(\n sources, targets, charges, wavenumber, return_gradients=True\n )\n values_modified_helmholtz = evaluate_modified_helmholtz_kernel(\n sources, targets, np.real(charges), np.imag(wavenumber), return_gradients=True\n ) + 1j * evaluate_modified_helmholtz_kernel(\n sources, targets, np.imag(charges), np.imag(wavenumber), return_gradients=True\n )\n\n np.testing.assert_allclose(values_helmholtz, values_modified_helmholtz, rtol=1E-14)" ]
[ [ "numpy.geterr", "numpy.empty", "numpy.random.default_rng", "numpy.exp", "numpy.seterr", "numpy.tensordot", "numpy.expand_dims", "numpy.testing.assert_allclose", "numpy.array", "numpy.linalg.norm", "numpy.real", "numpy.imag" ] ]
Ialkhouri/Adv_attacks_big_picture_classification
[ "53edffc3b5bb313e476dcdbaf97ec776884cad50" ]
[ "Alg2_ADMM_MNIST_model_1.py" ]
[ "# Importing Libraries\n\nfrom foolbox.criteria import TargetClass\nfrom foolbox.criteria import Misclassification\n\nfrom numpy import linalg as LA\nimport matplotlib.pyplot as plt\n\nfrom foolbox.attacks import CarliniWagnerL2Attack\nfrom foolbox.attacks import SaliencyMapAttack\nfrom foolbox.attacks import GradientSignAttack\n\nfrom foolbox.v1.attacks import FGSM\nfrom foolbox.v1.attacks import MomentumIterativeAttack\n#from foolbox.v1.attacks import GradientSignAttack\n\nfrom skimage.measure import compare_ssim\n\n\nfrom keras import layers, models\n\nimport numpy as np\n\nfrom keras.utils import np_utils\n\nfrom keras import backend as K\nfrom keras.applications import vgg16\n\nimport tensorflow as tf\n\n\nimport pickle\n\nimport foolbox\n\nimport json\n\nimport timeit\nstart = timeit.default_timer()\n\nimport cvxpy as cp\nfrom numpy import linalg as LA\nfrom ISMAIL_big_picture_journal_lib import sup_lbl_from_lbl,get_S_T_S_T_comp_from_lbl,Imperceptibility,ADMM_,Attack_performance,cvxPy_pert_gen\n\n########################################################################\n############################################### Fashion MNIST dataset import\n############################################################################\n\n#tf.keras.backend.set_learning_phase(False)\n# Keras Parameters\nbatch_size = 28\nnb_classes = 10\nnb_epoch = 2\nimg_rows, img_col = 28, 28\nimg_channels = 1\n# download mnist data and split into train and test sets\n(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.fashion_mnist.load_data()\n# reshape data to fit model\nX_train = train_images.reshape(train_images.shape[0], 28, 28, 1)\nX_test = test_images.reshape(test_images.shape[0], 28, 28, 1)\nX_train, X_test = X_train/255, X_test/255\n# normalization:\ntrain_images = train_images / 255\ntest_images = test_images / 255\nprint(\"\")\n\ny_train = np_utils.to_categorical(train_labels,10)\ny_test = np_utils.to_categorical(test_labels,10)\n\nX_train_1d = X_train.reshape(60000,784,1)\nX_test_1d = X_test.reshape(10000,784,1)\n\n################################################################################\n############## Loading the model and preprocessing #####################\n######################################################################################\n\n########### load the propoer model here\n\n\nmodel1 = tf.keras.models.load_model('my_model_1d_last_dense_activation_seperate')\nmodel1.summary()\n####################################################################\n\n\n\n\n\n####################################################################################\n############RE-LABEL TRAIN_LABELS AND TEST_LABELS (Using a dictonary) #########################\n######################################################################################\ndic5 = {2:0, 4:0, 6:0, 5:2, 7:2, 9:2, 8:4}\ntrain_labels_5 = [dic5[x] if x in dic5.keys() else x for x in train_labels]\ntest_labels_5 = [dic5[x] if x in dic5.keys() else x for x in test_labels]\n\n'''\nyour mapping is different than mine. Here is the mapping from the paper you gave me.\n0 ==> {0,2,4,6} top\n1 ==> {1} bottom\n2 ==> {5,7,9} shoes\n3 ==> {3} dress\n4 ==> {8}\n'''\n######################################################################################\n# #####################################################################\n################### loading Grads and testing the vectorization\n#####################################################################\n\nGrad_MNIST_model1 = pickle.load(open(\"/home/user/.PyCharmCE2019.1/config/scratches/saved_models_variables/Grad_MNIST_model1_1d_before_SM.p\",\"rb\"))\n\ndisc_values = pickle.load(open(\"/home/user/.PyCharmCE2019.1/config/scratches/saved_models_variables/disc_values_before_SM.p\",\"rb\"))\n\n\n################################################################################\n##################################### BUILDING THE ALG - 1 PROBLEM WITH CVXPY ######\n################################################################################\n\n######## to save eta, ceate a vectorized empty np array of size 10000,28*28,1\nnumber_of_observations = 10000\n\n### tensors to save and to calc CApert, CApert_sup, ELA, RLA, and sigmas\neta_vec = np.zeros(shape=(number_of_observations,28*28,1))\nimperceptibility_rho_2_save = np.nan*np.ones(shape=(number_of_observations,1))\nimperceptibility_rho_i_save = np.nan*np.ones(shape=(number_of_observations,1))\nimperceptibility_sssim_save = np.nan*np.ones(shape=(number_of_observations,1))\npred_pert_lbls = np.zeros(shape=(number_of_observations))\npred_pert_sup_lbls = np.zeros(shape=(number_of_observations))\npred_lbls = np.zeros(shape=(number_of_observations))\n\ncnt = 0\n\nQ = 3\nepsilon_D = 0.18\n\n######################### loading perturbations from MIFGSM\nMIFGSM_perturbed_images = pickle.load(open(\"/home/user/.PyCharmCE2019.1/config/scratches/saved_models_variables/MIFGSM_perturbed_images.p\",\"rb\"))\n\nMIFGSM_perturbations = pickle.load(open(\"/home/user/.PyCharmCE2019.1/config/scratches/saved_models_variables/MIFGSM_perturbations.p\",\"rb\"))\n\nMIFGSM_pred_label_w_pert = pickle.load(open(\"/home/user/.PyCharmCE2019.1/config/scratches/saved_models_variables/MIFGSM_pred_label_w_pert.p\",\"rb\"))\n\nMIFGSM_pred_label_w_pert_super_label = pickle.load(open(\"/home/user/.PyCharmCE2019.1/config/scratches/saved_models_variables/MIFGSM_pred_super_label_w_pert.p\",\"rb\"))\n\n\n\n\n\n\nfor id in range(number_of_observations):\n\n ######## LET THE INPUT IMAGE be:\n id = id\n input_image = X_test_1d[id]\n\n input_image_reshaped = input_image.reshape(784)\n\n ######## get tru_lbl\n tru_lbl = test_labels[id]\n\n ######## get tru_sup_lbl\n tru_sup_lbl = sup_lbl_from_lbl(tru_lbl)\n\n ######## get pred_lbl\n pred_lbl = np.argmax(model1(input_image.reshape(1, 784, 1)))\n pred_lbls[id] = pred_lbl\n\n ######## get_pred_sup_lbl\n pred_sup_lbl = sup_lbl_from_lbl(pred_lbl)\n\n ######## get S_T and S_T_comp: this is based on the tru lbl not the predicted lbl\n [S_T,S_T_comp] = get_S_T_S_T_comp_from_lbl(tru_lbl)\n\n ######## get vectozied gradients and disc values of of the disgnated lbl\n\n Grad_MNIST_model1_vec_disgnated = Grad_MNIST_model1[id,:,:]\n\n #print('Grad_MNIST_model1_vec_disgnated = ' , Grad_MNIST_model1_vec_disgnated.shape)\n\n disc_values_disgnated = disc_values[id,:]\n\n ####### get S_T_comp_star as the reduced/sorted set with cardinality = Q\n # get the indicies of the highest Q values from the f(input image), where f is the discriminant vector before the softmax\n # vector before softmax is:\n disc_values = pickle.load(\n open(\"/home/user/.PyCharmCE2019.1/config/scratches/saved_models_variables/disc_values_before_SM.p\", \"rb\"))\n disc_values_disgnated = disc_values[id, :]\n\n # remove S_T values and place them with -100.0\n temp = disc_values[id, :]\n disc_values_disgnated_excluding_S_T = temp\n disc_values_disgnated_excluding_S_T[S_T] = -100.0\n S_T_comp_star = (-disc_values_disgnated_excluding_S_T).argsort()[0:Q]\n\n # # keep this to restart above variables in the case of using j_star from the NOC methid\n disc_values = pickle.load(\n open(\"/home/user/.PyCharmCE2019.1/config/scratches/saved_models_variables/disc_values_before_SM.p\", \"rb\"))\n disc_values_disgnated = disc_values[id, :]\n\n\n ###### SAVE eta[id] of each j \\in S_T_comp\n # initial\n eta_vec_j = np.zeros(shape=(10,28*28,1))\n # distance initial\n D_j = 1000000*np.ones(shape=(10, 1))\n\n\n\n ####################################### Alg .II\n\n ## try MIFGSM; if good, then exit the program and we found eta^*\n\n if MIFGSM_pred_label_w_pert_super_label[id] != tru_sup_lbl:\n eta_cvx = MIFGSM_perturbations[id,:,:,:].reshape(784,1)\n eta_vec[id, :, :] = eta_cvx.reshape(n, 1)\n eta_source = 'MIFGSM'\n cnt = cnt + 1\n rho_2 = Imperceptibility(input_image, eta_cvx)[0]\n rho_inf = Imperceptibility(input_image, eta_cvx)[1]\n D_ssim = Imperceptibility(input_image, eta_cvx)[2]\n imperceptibility_rho_2_save[id] = rho_2\n imperceptibility_rho_i_save[id] = rho_inf\n imperceptibility_sssim_save[id] = D_ssim\n image_pert = eta_cvx + input_image\n #pred_pert_sup_lbls[id] = sup_lbl_from_lbl(np.argmax(model1(image_pert.reshape(1, 784, 1))))\n pred_pert_lbls[id] = MIFGSM_pred_label_w_pert[id]\n pred_pert_sup_lbls[id] = MIFGSM_pred_label_w_pert_super_label[id]\n print('id = ', id, \"eta_source = \" , 'MIFGSM' , ' ; winning_label = ', 'Nadaaaaaa', 'pred_sup_lbl = ', pred_sup_lbl, 'predecited_perturbed_super_lbl = ',\n MIFGSM_pred_label_w_pert_super_label[id], ' (rho_2,rho_inf, ssim) = ', Imperceptibility(input_image,eta_cvx)[0:2], ' ; count = ', cnt)\n\n\n\n ## ELSE\n else:\n flag = 0\n eta_source = 'not MIFGSM'\n for jj in S_T_comp_star:\n j_star = jj\n # find eta_jj\n\n ########\n epsilon = 10\n\n ####### get matrix G \\in N \\times |S_T| and b \\in |S_T|, where G_columns = [grad_j_star - grad_l], for all l \\in S_T\n n = 28*28\n card_S_T = len(S_T) # cardinality of the set S_T\n\n mat_G = np.zeros(shape=(n,card_S_T)) # init mat_G\n\n vec_b_wout = np.zeros(shape=(card_S_T,1) )\n\n temp_jstar = Grad_MNIST_model1_vec_disgnated[j_star , : ,:]\n temp_jstar = temp_jstar.reshape(n,)\n b_jstar = disc_values_disgnated[j_star]\n #b_jstar = b_jstar.reshape(1,)\n\n for i in range(card_S_T):\n temp1 = Grad_MNIST_model1_vec_disgnated[S_T[i] , : ,:]\n temp1 = temp1.reshape(n,)\n\n b_l = disc_values_disgnated[S_T[i]]\n # b_l = b_l.reshape(1,)\n\n mat_G[:,i] = temp_jstar - temp1\n vec_b_wout[ i] = b_l - b_jstar\n\n vec_b = vec_b_wout + epsilon\n\n ###############################################################################################\n ##### ADMM\n #### algorithm parameters\n r_penalty_factor = 0.0075\n number_of_iterations_tau = 10\n\n # eADMM stopping criteria\n epsilon_A = 0.15\n\n admm_type = \"ADMM\"\n\n eta_cvx = ADMM_(input_image,model1,pred_sup_lbl,r_penalty_factor,number_of_iterations_tau,epsilon_A,mat_G, vec_b,admm_type)\n ################################################################################################\n\n\n\n\n ################# calculate the distance\n image_pert_temp = input_image + eta_cvx\n #D_j[jj] = LA.norm(eta_cvx, 2)\n D_j[jj] = Imperceptibility(input_image,eta_cvx)[0]\n\n if sup_lbl_from_lbl(np.argmax(model1(image_pert_temp.reshape(1, 784, 1)))) != pred_sup_lbl and D_j[jj] <= epsilon_D:\n\n #print('break for is used')\n flag = 1\n eta_cvx = eta_cvx\n eta_vec[id, :, :] = eta_cvx.reshape(n, 1)\n cnt = cnt + 1\n rho_2 = Imperceptibility(input_image, eta_cvx)[0]\n rho_inf = Imperceptibility(input_image, eta_cvx)[1]\n D_ssim = Imperceptibility(input_image, eta_cvx)[2]\n imperceptibility_rho_2_save[id] = rho_2\n imperceptibility_rho_i_save[id] = rho_inf\n imperceptibility_sssim_save[id] = D_ssim\n image_pert = eta_cvx + input_image\n pred_pert_lbls[id] = np.argmax(model1(image_pert.reshape(1, 784, 1)))\n pred_pert_sup_lbls[id] = sup_lbl_from_lbl(np.argmax(model1(image_pert.reshape(1, 784, 1))))\n print('id = ', id, \"eta_source = \", 'not MIFGSM and break is used', ' ; winning_label = ', jj, 'pred_sup_lbl = ',\n pred_sup_lbl, 'predecited_perturbed_super_lbl = ',\n pred_pert_sup_lbls[id], ' (rho_2,rho_inf, ssim) = ', Imperceptibility(input_image, eta_cvx)[0:2],\n ' ; count = ', cnt)\n break\n\n\n else:\n # save the mother fucking eta_cvx to choose from in the future\n # save eta for each j \\in S_T_comp\n eta_vec_j[jj,:,:] = eta_cvx.reshape(n,1)\n\n\n if flag != 1:\n winning_label = np.argmin(D_j)\n eta_cvx = eta_vec_j[winning_label, :, :]\n eta_cvx = eta_cvx\n rho_2 = Imperceptibility(input_image, eta_cvx)[0]\n rho_inf = Imperceptibility(input_image, eta_cvx)[1]\n D_ssim = Imperceptibility(input_image, eta_cvx)[2]\n\n\n # cnt is increased iff T(k(x+eta)) != T(k(x))\n if sup_lbl_from_lbl(np.argmax(model1((input_image+eta_cvx).reshape(1, 784, 1)))) != pred_sup_lbl:\n cnt = cnt + 1\n imperceptibility_rho_2_save[id] = rho_2\n imperceptibility_rho_i_save[id] = rho_inf\n imperceptibility_sssim_save[id] = D_ssim\n\n\n image_pert = eta_cvx + input_image\n pred_pert_lbls[id] = np.argmax(model1(image_pert.reshape(1, 784, 1)))\n pred_pert_sup_lbls[id] = sup_lbl_from_lbl(np.argmax(model1(image_pert.reshape(1, 784, 1))))\n print('id = ', id, \"eta_source = \", 'not MIFGSM and no break', ' ; winning_label = ', winning_label,\n 'pred_sup_lbl = ',\n pred_sup_lbl, 'predecited_perturbed_super_lbl = ',\n pred_pert_sup_lbls[id], ' (rho_2,rho_inf, ssim) = ', Imperceptibility(input_image, eta_cvx)[0:2],\n ' ; count = ', cnt)\n\n\n\n\n\nattack_success = cnt / number_of_observations\n\nprint('ATTACK SUCCESS = ' , attack_success*100 , '%')\n\n\nCA_pert, CA_pert_sup, RLA, ELA,RLA_sup, ELA_sup , sigma_2, sigma_inf, sigma_s = \\\n Attack_performance(test_labels[0:number_of_observations] ,\n pred_lbls,\n pred_pert_lbls ,\n imperceptibility_rho_2_save,\n imperceptibility_rho_i_save,\n imperceptibility_sssim_save)\n\n# attack performace\nprint('Number of observations = ', number_of_observations ,\n '\\n CA_pert = ' , CA_pert,\n \"\\n CA_pert_sup = \" , CA_pert_sup ,\n \"\\n RLA = \" , RLA ,\n \"\\n ELA = \" , ELA,\n '\\n RLA_sup = ' , RLA_sup,\n '\\n ELA_sup = ' , ELA_sup,\n \"\\n sigma_2 = \" , sigma_2 ,\n \"\\n sigma_inf = \" , sigma_inf ,\n '\\n ssim = ' , sigma_s)\n\n\n\n\n\n# # #####################################################################\n# # ################### Plotting images\n# # #####################################################################\n# print(\"\")\n#\n# plt.figure()\n# plt.subplot(1,3,1)\n# plt.title('Original')\n# plt.imshow(input_image.reshape(28,28))\n# plt.axis('off')\n#\n#\n# plt.subplot(1,3,2)\n# plt.title('pertubations')\n# plt.imshow(eta_cvx.reshape(28,28))\n# plt.axis('off')\n#\n#\n# plt.subplot(1,3,3)\n# plt.title('perturbed image')\n# plt.imshow(image_pert.reshape(28,28))\n# plt.axis('off')\n#\n#\n# plt.show()\n# # ########################################################################\n\n\nstop = timeit.default_timer()\n\nprint('Time: ', stop - start)\n\n#pickle.dump(eta_vec, open(\"eta_vec_alg2_samples.p\", \"wb\"))\n\n\nprint('break here')\n\n\n\n\n" ]
[ [ "numpy.ones", "numpy.zeros", "tensorflow.keras.models.load_model", "numpy.argmin", "tensorflow.keras.datasets.fashion_mnist.load_data" ] ]
ICRC-BME/epycom
[ "5bfa3fb9020f04536b7a08382533c8abf56ca85f" ]
[ "epycom/univariate/approximate_entropy.py" ]
[ "# -*- coding: utf-8 -*-\n# Copyright (c) St. Anne's University Hospital in Brno. International Clinical\n# Research Center, Biomedical Engineering. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\n# Third pary imports\nimport numpy as np\nfrom numba import njit\n\n# Local imports\nfrom ..utils.method import Method\n\n\n@njit('f8(f8[:], f8[:])', cache=True)\ndef _maxdist(x_i, x_j):\n dist = 0\n\n leni = len(x_i)\n lenj = len(x_j)\n\n if leni < lenj:\n n = len(x_i)\n else:\n n = len(x_j)\n\n for ua in range(n):\n if abs(x_i[ua] - x_j[ua]) > dist:\n dist = abs(x_i[ua] - x_j[ua])\n\n return dist\n\n\n@njit('f8(i8, i8, f8, f8[:])', cache=True)\ndef _phi_jitted(m, N, r, sig):\n z = N - m + 1\n\n xlen = N - m + 1\n x = np.full((xlen, m), np.inf, dtype='float64')\n\n # Sampling the signal\n for i in range(xlen):\n x[i] = sig[i: i + m]\n\n C = np.full(len(sig), np.inf, dtype='float64')\n iterator = cnt = 0\n for x_i in x:\n for x_j in x:\n if _maxdist(x_i, x_j) <= r:\n cnt += 1\n C[iterator] = cnt / (N - m + 1.0)\n cnt = 0\n iterator += 1\n\n C = C[:iterator]\n\n phi = 0\n for c in C:\n phi = phi+np.log(c)\n\n return phi/z\n\n\n@njit('f8(f8[:], f8, i8)', cache=True)\ndef compute_approximate_entropy(sig, r, m):\n \"\"\"\n Function computes approximate entropy of given signal\n\n Parameters\n ----------\n sig: np.ndarray\n 1D signal\n r: np.float64\n filtering treshold, recommended values: (0.1-0.25)*np.nanstd(sig)\n m: int\n window length of compared run of data, recommended (2-8)\n\n Returns\n -------\n entro: numpy.float64\n approximate entropy\n\n Example\n -------\n signal_entropy = approximate_entropy(data, 0.1*np.nanstd(data))\n \"\"\"\n\n N = sig.shape[0]\n return abs(_phi_jitted(m + 1, N, r, sig) - _phi_jitted(m, N, r, sig))\n\n\nclass ApproximateEntropy(Method):\n\n algorithm = 'APPROXIMATE_ENTROPY'\n algorithm_type = 'univariate'\n version = '1.0.0'\n dtype = [('apen', 'float32')]\n\n def __init__(self, **kwargs):\n \"\"\"\n Approximate entropy\n\n Parameters\n ----------\n sig: np.ndarray\n 1D signal\n m: int\n window length of compared run of data, recommended (2-8)\n r: float64\n filtering treshold, recommended values: (0.1-0.25)*std\n \"\"\"\n\n super().__init__(compute_approximate_entropy, **kwargs)\n self._event_flag = False\n" ]
[ [ "numpy.log", "numpy.full" ] ]
JPFrancoia/aws-data-wrangler
[ "5b08087d79b42683b03be91ba5ebc12ad4bd2d3d" ]
[ "awswrangler/s3.py" ]
[ "\"\"\"Amazon S3 Module.\"\"\"\n\nimport concurrent.futures\nimport csv\nimport logging\nimport time\nimport uuid\nfrom itertools import repeat\nfrom typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union\n\nimport boto3 # type: ignore\nimport botocore.exceptions # type: ignore\nimport pandas as pd # type: ignore\nimport pandas.io.parsers # type: ignore\nimport pyarrow as pa # type: ignore\nimport pyarrow.lib # type: ignore\nimport pyarrow.parquet # type: ignore\nimport s3fs # type: ignore\nfrom boto3.s3.transfer import TransferConfig # type: ignore\nfrom pandas.io.common import infer_compression # type: ignore\n\nfrom awswrangler import _data_types, _utils, catalog, exceptions\n\n_COMPRESSION_2_EXT: Dict[Optional[str], str] = {None: \"\", \"gzip\": \".gz\", \"snappy\": \".snappy\"}\n\n_logger: logging.Logger = logging.getLogger(__name__)\n\n\ndef get_bucket_region(bucket: str, boto3_session: Optional[boto3.Session] = None) -> str:\n \"\"\"Get bucket region name.\n\n Parameters\n ----------\n bucket : str\n Bucket name.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n\n Returns\n -------\n str\n Region code (e.g. 'us-east-1').\n\n Examples\n --------\n Using the default boto3 session\n\n >>> import awswrangler as wr\n >>> region = wr.s3.get_bucket_region('bucket-name')\n\n Using a custom boto3 session\n\n >>> import boto3\n >>> import awswrangler as wr\n >>> region = wr.s3.get_bucket_region('bucket-name', boto3_session=boto3.Session())\n\n \"\"\"\n client_s3: boto3.client = _utils.client(service_name=\"s3\", session=boto3_session)\n _logger.debug(f\"bucket: {bucket}\")\n region: str = client_s3.get_bucket_location(Bucket=bucket)[\"LocationConstraint\"]\n region = \"us-east-1\" if region is None else region\n _logger.debug(f\"region: {region}\")\n return region\n\n\ndef does_object_exist(path: str, boto3_session: Optional[boto3.Session] = None) -> bool:\n \"\"\"Check if object exists on S3.\n\n Parameters\n ----------\n path: str\n S3 path (e.g. s3://bucket/key).\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n\n Returns\n -------\n bool\n True if exists, False otherwise.\n\n Examples\n --------\n Using the default boto3 session\n\n >>> import awswrangler as wr\n >>> wr.s3.does_object_exist('s3://bucket/key_real')\n True\n >>> wr.s3.does_object_exist('s3://bucket/key_unreal')\n False\n\n Using a custom boto3 session\n\n >>> import boto3\n >>> import awswrangler as wr\n >>> wr.s3.does_object_exist('s3://bucket/key_real', boto3_session=boto3.Session())\n True\n >>> wr.s3.does_object_exist('s3://bucket/key_unreal', boto3_session=boto3.Session())\n False\n\n \"\"\"\n client_s3: boto3.client = _utils.client(service_name=\"s3\", session=boto3_session)\n bucket: str\n key: str\n bucket, key = path.replace(\"s3://\", \"\").split(\"/\", 1)\n try:\n client_s3.head_object(Bucket=bucket, Key=key)\n return True\n except botocore.exceptions.ClientError as ex:\n if ex.response[\"ResponseMetadata\"][\"HTTPStatusCode\"] == 404:\n return False\n raise ex # pragma: no cover\n\n\ndef list_objects(path: str, boto3_session: Optional[boto3.Session] = None) -> List[str]:\n \"\"\"List Amazon S3 objects from a prefix.\n\n Parameters\n ----------\n path : str\n S3 path (e.g. s3://bucket/prefix).\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n\n Returns\n -------\n List[str]\n List of objects paths.\n\n Examples\n --------\n Using the default boto3 session\n\n >>> import awswrangler as wr\n >>> wr.s3.list_objects('s3://bucket/prefix')\n ['s3://bucket/prefix0', 's3://bucket/prefix1', 's3://bucket/prefix2']\n\n Using a custom boto3 session\n\n >>> import boto3\n >>> import awswrangler as wr\n >>> wr.s3.list_objects('s3://bucket/prefix', boto3_session=boto3.Session())\n ['s3://bucket/prefix0', 's3://bucket/prefix1', 's3://bucket/prefix2']\n\n \"\"\"\n client_s3: boto3.client = _utils.client(service_name=\"s3\", session=boto3_session)\n paginator = client_s3.get_paginator(\"list_objects_v2\")\n bucket: str\n prefix: str\n bucket, prefix = _utils.parse_path(path=path)\n response_iterator = paginator.paginate(Bucket=bucket, Prefix=prefix, PaginationConfig={\"PageSize\": 1000})\n paths: List[str] = []\n for page in response_iterator:\n contents: Optional[List] = page.get(\"Contents\")\n if contents is not None:\n for content in contents:\n if (content is not None) and (\"Key\" in content):\n key: str = content[\"Key\"]\n paths.append(f\"s3://{bucket}/{key}\")\n return paths\n\n\ndef _path2list(path: Union[str, List[str]], boto3_session: Optional[boto3.Session]) -> List[str]:\n if isinstance(path, str): # prefix\n paths: List[str] = list_objects(path=path, boto3_session=boto3_session)\n elif isinstance(path, list):\n paths = path\n else:\n raise exceptions.InvalidArgumentType(f\"{type(path)} is not a valid path type. Please, use str or List[str].\")\n return paths\n\n\ndef delete_objects(\n path: Union[str, List[str]], use_threads: bool = True, boto3_session: Optional[boto3.Session] = None\n) -> None:\n \"\"\"Delete Amazon S3 objects from a received S3 prefix or list of S3 objects paths.\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n path : Union[str, List[str]]\n S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n\n Returns\n -------\n None\n None.\n\n Examples\n --------\n >>> import awswrangler as wr\n >>> wr.s3.delete_objects(['s3://bucket/key0', 's3://bucket/key1']) # Delete both objects\n >>> wr.s3.delete_objects('s3://bucket/prefix') # Delete all objects under the received prefix\n\n \"\"\"\n paths: List[str] = _path2list(path=path, boto3_session=boto3_session)\n if len(paths) < 1:\n return\n client_s3: boto3.client = _utils.client(service_name=\"s3\", session=boto3_session)\n buckets: Dict[str, List[str]] = _split_paths_by_bucket(paths=paths)\n for bucket, keys in buckets.items():\n chunks: List[List[str]] = _utils.chunkify(lst=keys, max_length=1_000)\n if use_threads is False:\n for chunk in chunks:\n _delete_objects(bucket=bucket, keys=chunk, client_s3=client_s3)\n else:\n cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)\n with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:\n executor.map(_delete_objects, repeat(bucket), chunks, repeat(client_s3))\n\n\ndef _split_paths_by_bucket(paths: List[str]) -> Dict[str, List[str]]:\n buckets: Dict[str, List[str]] = {}\n bucket: str\n key: str\n for path in paths:\n bucket, key = _utils.parse_path(path=path)\n if bucket not in buckets:\n buckets[bucket] = []\n buckets[bucket].append(key)\n return buckets\n\n\ndef _delete_objects(bucket: str, keys: List[str], client_s3: boto3.client) -> None:\n _logger.debug(f\"len(keys): {len(keys)}\")\n batch: List[Dict[str, str]] = [{\"Key\": key} for key in keys]\n client_s3.delete_objects(Bucket=bucket, Delete={\"Objects\": batch})\n\n\ndef describe_objects(\n path: Union[str, List[str]],\n wait_time: Optional[Union[int, float]] = None,\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n) -> Dict[str, Dict[str, Any]]:\n \"\"\"Describe Amazon S3 objects from a received S3 prefix or list of S3 objects paths.\n\n Fetch attributes like ContentLength, DeleteMarker, LastModified, ContentType, etc\n The full list of attributes can be explored under the boto3 head_object documentation:\n https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.head_object\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n path : Union[str, List[str]]\n S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).\n wait_time : Union[int,float], optional\n How much time (seconds) should Wrangler try to reach this objects.\n Very useful to overcome eventual consistence issues.\n `None` means only a single try will be done.\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n\n Returns\n -------\n Dict[str, Dict[str, Any]]\n Return a dictionary of objects returned from head_objects where the key is the object path.\n The response object can be explored here:\n https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.head_object\n\n Examples\n --------\n >>> import awswrangler as wr\n >>> descs0 = wr.s3.describe_objects(['s3://bucket/key0', 's3://bucket/key1']) # Describe both objects\n >>> descs1 = wr.s3.describe_objects('s3://bucket/prefix') # Describe all objects under the prefix\n >>> descs2 = wr.s3.describe_objects('s3://bucket/prefix', wait_time=30) # Overcoming eventual consistence issues\n\n \"\"\"\n paths: List[str] = _path2list(path=path, boto3_session=boto3_session)\n if len(paths) < 1:\n return {}\n client_s3: boto3.client = _utils.client(service_name=\"s3\", session=boto3_session)\n resp_list: List[Tuple[str, Dict[str, Any]]]\n if use_threads is False:\n resp_list = [_describe_object(path=p, wait_time=wait_time, client_s3=client_s3) for p in paths]\n else:\n cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)\n with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:\n resp_list = list(executor.map(_describe_object, paths, repeat(wait_time), repeat(client_s3)))\n desc_list: Dict[str, Dict[str, Any]] = dict(resp_list)\n return desc_list\n\n\ndef _describe_object(\n path: str, wait_time: Optional[Union[int, float]], client_s3: boto3.client\n) -> Tuple[str, Dict[str, Any]]:\n wait_time = int(wait_time) if isinstance(wait_time, float) else wait_time\n tries: int = wait_time if (wait_time is not None) and (wait_time > 0) else 1\n bucket: str\n key: str\n bucket, key = _utils.parse_path(path=path)\n desc: Dict[str, Any] = {}\n for i in range(tries, 0, -1):\n try:\n desc = client_s3.head_object(Bucket=bucket, Key=key)\n break\n except botocore.exceptions.ClientError as e: # pragma: no cover\n if e.response[\"ResponseMetadata\"][\"HTTPStatusCode\"] == 404: # Not Found\n _logger.debug(f\"Object not found. {i} seconds remaining to wait.\")\n if i == 1: # Last try, there is no more need to sleep\n break\n time.sleep(1)\n else:\n raise e\n return path, desc\n\n\ndef size_objects(\n path: Union[str, List[str]],\n wait_time: Optional[Union[int, float]] = None,\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n) -> Dict[str, Optional[int]]:\n \"\"\"Get the size (ContentLength) in bytes of Amazon S3 objects from a received S3 prefix or list of S3 objects paths.\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n path : Union[str, List[str]]\n S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).\n wait_time : Union[int,float], optional\n How much time (seconds) should Wrangler try to reach this objects.\n Very useful to overcome eventual consistence issues.\n `None` means only a single try will be done.\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n\n Returns\n -------\n Dict[str, Optional[int]]\n Dictionary where the key is the object path and the value is the object size.\n\n Examples\n --------\n >>> import awswrangler as wr\n >>> sizes0 = wr.s3.size_objects(['s3://bucket/key0', 's3://bucket/key1']) # Get the sizes of both objects\n >>> sizes1 = wr.s3.size_objects('s3://bucket/prefix') # Get the sizes of all objects under the received prefix\n >>> sizes2 = wr.s3.size_objects('s3://bucket/prefix', wait_time=30) # Overcoming eventual consistence issues\n\n \"\"\"\n desc_list: Dict[str, Dict[str, Any]] = describe_objects(\n path=path, wait_time=wait_time, use_threads=use_threads, boto3_session=boto3_session\n )\n size_list: Dict[str, Optional[int]] = {k: d.get(\"ContentLength\", None) for k, d in desc_list.items()}\n return size_list\n\n\ndef to_csv( # pylint: disable=too-many-arguments\n df: pd.DataFrame,\n path: str,\n sep: str = \",\",\n index: bool = True,\n columns: Optional[List[str]] = None,\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n s3_additional_kwargs: Optional[Dict[str, str]] = None,\n dataset: bool = False,\n partition_cols: Optional[List[str]] = None,\n mode: Optional[str] = None,\n database: Optional[str] = None,\n table: Optional[str] = None,\n dtype: Optional[Dict[str, str]] = None,\n description: Optional[str] = None,\n parameters: Optional[Dict[str, str]] = None,\n columns_comments: Optional[Dict[str, str]] = None,\n **pandas_kwargs,\n) -> Dict[str, Union[List[str], Dict[str, List[str]]]]:\n \"\"\"Write CSV file or dataset on Amazon S3.\n\n The concept of Dataset goes beyond the simple idea of files and enable more\n complex features like partitioning, casting and catalog integration (Amazon Athena/AWS Glue Catalog).\n\n Note\n ----\n The table name and all column names will be automatically sanitize using\n `wr.catalog.sanitize_table_name` and `wr.catalog.sanitize_column_name`.\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n df: pandas.DataFrame\n Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html\n path : str\n Amazon S3 path (e.g. s3://bucket/filename.csv).\n sep : str\n String of length 1. Field delimiter for the output file.\n index : bool\n Write row names (index).\n columns : List[str], optional\n Columns to write.\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 Session will be used if boto3_session receive None.\n s3_additional_kwargs:\n Forward to s3fs, useful for server side encryption\n https://s3fs.readthedocs.io/en/latest/#serverside-encryption\n dataset: bool\n If True store a parquet dataset instead of a single file.\n If True, enable all follow arguments:\n partition_cols, mode, database, table, description, parameters, columns_comments, .\n partition_cols: List[str], optional\n List of column names that will be used to create partitions. Only takes effect if dataset=True.\n mode: str, optional\n ``append`` (Default), ``overwrite``, ``overwrite_partitions``. Only takes effect if dataset=True.\n database : str, optional\n Glue/Athena catalog: Database name.\n table : str, optional\n Glue/Athena catalog: Table name.\n dtype: Dict[str, str], optional\n Dictionary of columns names and Athena/Glue types to be casted.\n Useful when you have columns with undetermined or mixed data types.\n Only takes effect if dataset=True.\n (e.g. {'col name': 'bigint', 'col2 name': 'int'})\n description: str, optional\n Glue/Athena catalog: Table description\n parameters: Dict[str, str], optional\n Glue/Athena catalog: Key/value pairs to tag the table.\n columns_comments: Dict[str, str], optional\n Glue/Athena catalog:\n Columns names and the related comments (e.g. {'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}).\n pandas_kwargs:\n keyword arguments forwarded to pandas.DataFrame.to_csv()\n https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_csv.html\n\n Returns\n -------\n None\n None.\n\n Examples\n --------\n Writing single file\n\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> wr.s3.to_csv(\n ... df=pd.DataFrame({'col': [1, 2, 3]}),\n ... path='s3://bucket/prefix/my_file.csv',\n ... )\n {\n 'paths': ['s3://bucket/prefix/my_file.csv'],\n 'partitions_values': {}\n }\n\n Writing single file encrypted with a KMS key\n\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> wr.s3.to_csv(\n ... df=pd.DataFrame({'col': [1, 2, 3]}),\n ... path='s3://bucket/prefix/my_file.csv',\n ... s3_additional_kwargs={\n ... 'ServerSideEncryption': 'aws:kms',\n ... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'\n ... }\n ... )\n {\n 'paths': ['s3://bucket/prefix/my_file.csv'],\n 'partitions_values': {}\n }\n\n Writing partitioned dataset\n\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> wr.s3.to_csv(\n ... df=pd.DataFrame({\n ... 'col': [1, 2, 3],\n ... 'col2': ['A', 'A', 'B']\n ... }),\n ... path='s3://bucket/prefix',\n ... dataset=True,\n ... partition_cols=['col2']\n ... )\n {\n 'paths': ['s3://.../col2=A/x.csv', 's3://.../col2=B/y.csv'],\n 'partitions_values: {\n 's3://.../col2=A/': ['A'],\n 's3://.../col2=B/': ['B']\n }\n }\n\n Writing dataset to S3 with metadata on Athena/Glue Catalog.\n\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> wr.s3.to_csv(\n ... df=pd.DataFrame({\n ... 'col': [1, 2, 3],\n ... 'col2': ['A', 'A', 'B']\n ... }),\n ... path='s3://bucket/prefix',\n ... dataset=True,\n ... partition_cols=['col2'],\n ... database='default', # Athena/Glue database\n ... table='my_table' # Athena/Glue table\n ... )\n {\n 'paths': ['s3://.../col2=A/x.csv', 's3://.../col2=B/y.csv'],\n 'partitions_values: {\n 's3://.../col2=A/': ['A'],\n 's3://.../col2=B/': ['B']\n }\n }\n\n Writing dataset casting empty column data type\n\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> wr.s3.to_csv(\n ... df=pd.DataFrame({\n ... 'col': [1, 2, 3],\n ... 'col2': ['A', 'A', 'B'],\n ... 'col3': [None, None, None]\n ... }),\n ... path='s3://bucket/prefix',\n ... dataset=True,\n ... database='default', # Athena/Glue database\n ... table='my_table' # Athena/Glue table\n ... dtype={'col3': 'date'}\n ... )\n {\n 'paths': ['s3://.../x.csv'],\n 'partitions_values: {}\n }\n\n \"\"\"\n if (database is None) ^ (table is None):\n raise exceptions.InvalidArgumentCombination(\n \"Please pass database and table arguments to be able to store the metadata into the Athena/Glue Catalog.\"\n )\n if df.empty is True:\n raise exceptions.EmptyDataFrame()\n session: boto3.Session = _utils.ensure_session(session=boto3_session)\n partition_cols = partition_cols if partition_cols else []\n dtype = dtype if dtype else {}\n columns_comments = columns_comments if columns_comments else {}\n partitions_values: Dict[str, List[str]] = {}\n fs: s3fs.S3FileSystem = _utils.get_fs(session=session, s3_additional_kwargs=s3_additional_kwargs)\n if dataset is False:\n if partition_cols:\n raise exceptions.InvalidArgumentCombination(\"Please, pass dataset=True to be able to use partition_cols.\")\n if mode is not None:\n raise exceptions.InvalidArgumentCombination(\"Please pass dataset=True to be able to use mode.\")\n if any(arg is not None for arg in (database, table, description, parameters)):\n raise exceptions.InvalidArgumentCombination(\n \"Please pass dataset=True to be able to use any one of these \"\n \"arguments: database, table, description, parameters, \"\n \"columns_comments.\"\n )\n pandas_kwargs[\"sep\"] = sep\n pandas_kwargs[\"index\"] = index\n pandas_kwargs[\"columns\"] = columns\n _to_text(file_format=\"csv\", df=df, path=path, fs=fs, **pandas_kwargs)\n paths = [path]\n else:\n mode = \"append\" if mode is None else mode\n exist: bool = False\n if columns:\n df = df[columns]\n if (database is not None) and (table is not None): # Normalize table to respect Athena's standards\n df = catalog.sanitize_dataframe_columns_names(df=df)\n partition_cols = [catalog.sanitize_column_name(p) for p in partition_cols]\n dtype = {catalog.sanitize_column_name(k): v.lower() for k, v in dtype.items()}\n columns_comments = {catalog.sanitize_column_name(k): v for k, v in columns_comments.items()}\n exist = catalog.does_table_exist(database=database, table=table, boto3_session=session)\n if (exist is True) and (mode in (\"append\", \"overwrite_partitions\")):\n for k, v in catalog.get_table_types(database=database, table=table, boto3_session=session).items():\n dtype[k] = v\n df = catalog.drop_duplicated_columns(df=df)\n paths, partitions_values = _to_csv_dataset(\n df=df,\n path=path,\n index=index,\n sep=sep,\n fs=fs,\n use_threads=use_threads,\n partition_cols=partition_cols,\n dtype=dtype,\n mode=mode,\n boto3_session=session,\n )\n if (database is not None) and (table is not None):\n columns_types, partitions_types = _data_types.athena_types_from_pandas_partitioned(\n df=df, index=index, partition_cols=partition_cols, dtype=dtype, index_left=True\n )\n if (exist is False) or (mode == \"overwrite\"):\n catalog.create_csv_table(\n database=database,\n table=table,\n path=path,\n columns_types=columns_types,\n partitions_types=partitions_types,\n description=description,\n parameters=parameters,\n columns_comments=columns_comments,\n boto3_session=session,\n mode=\"overwrite\",\n sep=sep,\n )\n if partitions_values:\n _logger.debug(f\"partitions_values:\\n{partitions_values}\")\n catalog.add_csv_partitions(\n database=database, table=table, partitions_values=partitions_values, boto3_session=session, sep=sep\n )\n return {\"paths\": paths, \"partitions_values\": partitions_values}\n\n\ndef _to_csv_dataset(\n df: pd.DataFrame,\n path: str,\n index: bool,\n sep: str,\n fs: s3fs.S3FileSystem,\n use_threads: bool,\n mode: str,\n dtype: Dict[str, str],\n partition_cols: Optional[List[str]] = None,\n boto3_session: Optional[boto3.Session] = None,\n) -> Tuple[List[str], Dict[str, List[str]]]:\n paths: List[str] = []\n partitions_values: Dict[str, List[str]] = {}\n path = path if path[-1] == \"/\" else f\"{path}/\"\n if mode not in [\"append\", \"overwrite\", \"overwrite_partitions\"]:\n raise exceptions.InvalidArgumentValue(\n f\"{mode} is a invalid mode, please use append, overwrite or overwrite_partitions.\"\n )\n if (mode == \"overwrite\") or ((mode == \"overwrite_partitions\") and (not partition_cols)):\n delete_objects(path=path, use_threads=use_threads, boto3_session=boto3_session)\n df = _data_types.cast_pandas_with_athena_types(df=df, dtype=dtype)\n _logger.debug(f\"dtypes: {df.dtypes}\")\n if not partition_cols:\n file_path: str = f\"{path}{uuid.uuid4().hex}.csv\"\n _to_text(\n file_format=\"csv\",\n df=df,\n path=file_path,\n fs=fs,\n quoting=csv.QUOTE_NONE,\n escapechar=\"\\\\\",\n header=False,\n date_format=\"%Y-%m-%d %H:%M:%S.%f\",\n index=index,\n sep=sep,\n )\n paths.append(file_path)\n else:\n for keys, subgroup in df.groupby(by=partition_cols, observed=True):\n subgroup = subgroup.drop(partition_cols, axis=\"columns\")\n keys = (keys,) if not isinstance(keys, tuple) else keys\n subdir = \"/\".join([f\"{name}={val}\" for name, val in zip(partition_cols, keys)])\n prefix: str = f\"{path}{subdir}/\"\n if mode == \"overwrite_partitions\":\n delete_objects(path=prefix, use_threads=use_threads, boto3_session=boto3_session)\n file_path = f\"{prefix}{uuid.uuid4().hex}.csv\"\n _to_text(\n file_format=\"csv\",\n df=subgroup,\n path=file_path,\n fs=fs,\n quoting=csv.QUOTE_NONE,\n escapechar=\"\\\\\",\n header=False,\n date_format=\"%Y-%m-%d %H:%M:%S.%f\",\n index=index,\n sep=sep,\n )\n paths.append(file_path)\n partitions_values[prefix] = [str(k) for k in keys]\n return paths, partitions_values\n\n\ndef to_json(\n df: pd.DataFrame,\n path: str,\n boto3_session: Optional[boto3.Session] = None,\n s3_additional_kwargs: Optional[Dict[str, str]] = None,\n **pandas_kwargs,\n) -> None:\n \"\"\"Write JSON file on Amazon S3.\n\n Parameters\n ----------\n df: pandas.DataFrame\n Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html\n path : str\n Amazon S3 path (e.g. s3://bucket/filename.csv).\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 Session will be used if boto3_session receive None.\n s3_additional_kwargs:\n Forward to s3fs, useful for server side encryption\n https://s3fs.readthedocs.io/en/latest/#serverside-encryption\n pandas_kwargs:\n keyword arguments forwarded to pandas.DataFrame.to_csv()\n https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_json.html\n\n Returns\n -------\n None\n None.\n\n Examples\n --------\n Writing JSON file\n\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> wr.s3.to_json(\n ... df=pd.DataFrame({'col': [1, 2, 3]}),\n ... path='s3://bucket/filename.json',\n ... )\n\n Writing CSV file encrypted with a KMS key\n\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> wr.s3.to_json(\n ... df=pd.DataFrame({'col': [1, 2, 3]}),\n ... path='s3://bucket/filename.json',\n ... s3_additional_kwargs={\n ... 'ServerSideEncryption': 'aws:kms',\n ... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'\n ... }\n ... )\n\n \"\"\"\n return _to_text(\n file_format=\"json\",\n df=df,\n path=path,\n boto3_session=boto3_session,\n s3_additional_kwargs=s3_additional_kwargs,\n **pandas_kwargs,\n )\n\n\ndef _to_text(\n file_format: str,\n df: pd.DataFrame,\n path: str,\n fs: Optional[s3fs.S3FileSystem] = None,\n boto3_session: Optional[boto3.Session] = None,\n s3_additional_kwargs: Optional[Dict[str, str]] = None,\n **pandas_kwargs,\n) -> None:\n if df.empty is True: # pragma: no cover\n raise exceptions.EmptyDataFrame()\n if fs is None:\n fs = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)\n with fs.open(path, \"w\") as f:\n if file_format == \"csv\":\n df.to_csv(f, **pandas_kwargs)\n elif file_format == \"json\":\n df.to_json(f, **pandas_kwargs)\n\n\ndef to_parquet( # pylint: disable=too-many-arguments\n df: pd.DataFrame,\n path: str,\n index: bool = False,\n compression: Optional[str] = \"snappy\",\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n s3_additional_kwargs: Optional[Dict[str, str]] = None,\n dataset: bool = False,\n partition_cols: Optional[List[str]] = None,\n mode: Optional[str] = None,\n database: Optional[str] = None,\n table: Optional[str] = None,\n dtype: Optional[Dict[str, str]] = None,\n description: Optional[str] = None,\n parameters: Optional[Dict[str, str]] = None,\n columns_comments: Optional[Dict[str, str]] = None,\n) -> Dict[str, Union[List[str], Dict[str, List[str]]]]:\n \"\"\"Write Parquet file or dataset on Amazon S3.\n\n The concept of Dataset goes beyond the simple idea of files and enable more\n complex features like partitioning, casting and catalog integration (Amazon Athena/AWS Glue Catalog).\n\n Note\n ----\n The table name and all column names will be automatically sanitize using\n `wr.catalog.sanitize_table_name` and `wr.catalog.sanitize_column_name`.\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n df: pandas.DataFrame\n Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html\n path : str\n S3 path (for file e.g. ``s3://bucket/prefix/filename.parquet``) (for dataset e.g. ``s3://bucket/prefix``).\n index : bool\n True to store the DataFrame index in file, otherwise False to ignore it.\n compression: str, optional\n Compression style (``None``, ``snappy``, ``gzip``).\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n s3_additional_kwargs:\n Forward to s3fs, useful for server side encryption\n https://s3fs.readthedocs.io/en/latest/#serverside-encryption\n dataset: bool\n If True store a parquet dataset instead of a single file.\n If True, enable all follow arguments:\n partition_cols, mode, database, table, description, parameters, columns_comments, .\n partition_cols: List[str], optional\n List of column names that will be used to create partitions. Only takes effect if dataset=True.\n mode: str, optional\n ``append`` (Default), ``overwrite``, ``overwrite_partitions``. Only takes effect if dataset=True.\n database : str, optional\n Glue/Athena catalog: Database name.\n table : str, optional\n Glue/Athena catalog: Table name.\n dtype: Dict[str, str], optional\n Dictionary of columns names and Athena/Glue types to be casted.\n Useful when you have columns with undetermined or mixed data types.\n Only takes effect if dataset=True.\n (e.g. {'col name': 'bigint', 'col2 name': 'int'})\n description: str, optional\n Glue/Athena catalog: Table description\n parameters: Dict[str, str], optional\n Glue/Athena catalog: Key/value pairs to tag the table.\n columns_comments: Dict[str, str], optional\n Glue/Athena catalog:\n Columns names and the related comments (e.g. {'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}).\n\n Returns\n -------\n Dict[str, Union[List[str], Dict[str, List[str]]]]\n Dictionary with:\n 'paths': List of all stored files paths on S3.\n 'partitions_values': Dictionary of partitions added with keys as S3 path locations\n and values as a list of partitions values as str.\n\n Examples\n --------\n Writing single file\n\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> wr.s3.to_parquet(\n ... df=pd.DataFrame({'col': [1, 2, 3]}),\n ... path='s3://bucket/prefix/my_file.parquet',\n ... )\n {\n 'paths': ['s3://bucket/prefix/my_file.parquet'],\n 'partitions_values': {}\n }\n\n Writing single file encrypted with a KMS key\n\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> wr.s3.to_parquet(\n ... df=pd.DataFrame({'col': [1, 2, 3]}),\n ... path='s3://bucket/prefix/my_file.parquet',\n ... s3_additional_kwargs={\n ... 'ServerSideEncryption': 'aws:kms',\n ... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'\n ... }\n ... )\n {\n 'paths': ['s3://bucket/prefix/my_file.parquet'],\n 'partitions_values': {}\n }\n\n Writing partitioned dataset\n\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> wr.s3.to_parquet(\n ... df=pd.DataFrame({\n ... 'col': [1, 2, 3],\n ... 'col2': ['A', 'A', 'B']\n ... }),\n ... path='s3://bucket/prefix',\n ... dataset=True,\n ... partition_cols=['col2']\n ... )\n {\n 'paths': ['s3://.../col2=A/x.parquet', 's3://.../col2=B/y.parquet'],\n 'partitions_values: {\n 's3://.../col2=A/': ['A'],\n 's3://.../col2=B/': ['B']\n }\n }\n\n Writing dataset to S3 with metadata on Athena/Glue Catalog.\n\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> wr.s3.to_parquet(\n ... df=pd.DataFrame({\n ... 'col': [1, 2, 3],\n ... 'col2': ['A', 'A', 'B']\n ... }),\n ... path='s3://bucket/prefix',\n ... dataset=True,\n ... partition_cols=['col2'],\n ... database='default', # Athena/Glue database\n ... table='my_table' # Athena/Glue table\n ... )\n {\n 'paths': ['s3://.../col2=A/x.parquet', 's3://.../col2=B/y.parquet'],\n 'partitions_values: {\n 's3://.../col2=A/': ['A'],\n 's3://.../col2=B/': ['B']\n }\n }\n\n Writing dataset casting empty column data type\n\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> wr.s3.to_parquet(\n ... df=pd.DataFrame({\n ... 'col': [1, 2, 3],\n ... 'col2': ['A', 'A', 'B'],\n ... 'col3': [None, None, None]\n ... }),\n ... path='s3://bucket/prefix',\n ... dataset=True,\n ... database='default', # Athena/Glue database\n ... table='my_table' # Athena/Glue table\n ... dtype={'col3': 'date'}\n ... )\n {\n 'paths': ['s3://.../x.parquet'],\n 'partitions_values: {}\n }\n\n \"\"\"\n if (database is None) ^ (table is None):\n raise exceptions.InvalidArgumentCombination(\n \"Please pass database and table arguments to be able to store the metadata into the Athena/Glue Catalog.\"\n )\n if df.empty is True:\n raise exceptions.EmptyDataFrame()\n session: boto3.Session = _utils.ensure_session(session=boto3_session)\n partition_cols = partition_cols if partition_cols else []\n dtype = dtype if dtype else {}\n columns_comments = columns_comments if columns_comments else {}\n partitions_values: Dict[str, List[str]] = {}\n cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)\n fs: s3fs.S3FileSystem = _utils.get_fs(session=session, s3_additional_kwargs=s3_additional_kwargs)\n compression_ext: Optional[str] = _COMPRESSION_2_EXT.get(compression, None)\n if compression_ext is None:\n raise exceptions.InvalidCompression(f\"{compression} is invalid, please use None, snappy or gzip.\")\n if dataset is False:\n if partition_cols:\n raise exceptions.InvalidArgumentCombination(\"Please, pass dataset=True to be able to use partition_cols.\")\n if mode is not None:\n raise exceptions.InvalidArgumentCombination(\"Please pass dataset=True to be able to use mode.\")\n if any(arg is not None for arg in (database, table, description, parameters)):\n raise exceptions.InvalidArgumentCombination(\n \"Please pass dataset=True to be able to use any one of these \"\n \"arguments: database, table, description, parameters, \"\n \"columns_comments.\"\n )\n paths = [\n _to_parquet_file(\n df=df, path=path, schema=None, index=index, compression=compression, cpus=cpus, fs=fs, dtype={}\n )\n ]\n else:\n mode = \"append\" if mode is None else mode\n exist: bool = False\n if (database is not None) and (table is not None): # Normalize table to respect Athena's standards\n df = catalog.sanitize_dataframe_columns_names(df=df)\n partition_cols = [catalog.sanitize_column_name(p) for p in partition_cols]\n dtype = {catalog.sanitize_column_name(k): v.lower() for k, v in dtype.items()}\n columns_comments = {catalog.sanitize_column_name(k): v for k, v in columns_comments.items()}\n exist = catalog.does_table_exist(database=database, table=table, boto3_session=session)\n if (exist is True) and (mode in (\"append\", \"overwrite_partitions\")):\n for k, v in catalog.get_table_types(database=database, table=table, boto3_session=session).items():\n dtype[k] = v\n df = catalog.drop_duplicated_columns(df=df)\n paths, partitions_values = _to_parquet_dataset(\n df=df,\n path=path,\n index=index,\n compression=compression,\n compression_ext=compression_ext,\n cpus=cpus,\n fs=fs,\n use_threads=use_threads,\n partition_cols=partition_cols,\n dtype=dtype,\n mode=mode,\n boto3_session=session,\n )\n if (database is not None) and (table is not None):\n columns_types, partitions_types = _data_types.athena_types_from_pandas_partitioned(\n df=df, index=index, partition_cols=partition_cols, dtype=dtype\n )\n if (exist is False) or (mode == \"overwrite\"):\n catalog.create_parquet_table(\n database=database,\n table=table,\n path=path,\n columns_types=columns_types,\n partitions_types=partitions_types,\n compression=compression,\n description=description,\n parameters=parameters,\n columns_comments=columns_comments,\n boto3_session=session,\n mode=\"overwrite\",\n )\n if partitions_values:\n _logger.debug(f\"partitions_values:\\n{partitions_values}\")\n catalog.add_parquet_partitions(\n database=database,\n table=table,\n partitions_values=partitions_values,\n compression=compression,\n boto3_session=session,\n )\n return {\"paths\": paths, \"partitions_values\": partitions_values}\n\n\ndef _to_parquet_dataset(\n df: pd.DataFrame,\n path: str,\n index: bool,\n compression: Optional[str],\n compression_ext: str,\n cpus: int,\n fs: s3fs.S3FileSystem,\n use_threads: bool,\n mode: str,\n dtype: Dict[str, str],\n partition_cols: Optional[List[str]] = None,\n boto3_session: Optional[boto3.Session] = None,\n) -> Tuple[List[str], Dict[str, List[str]]]:\n paths: List[str] = []\n partitions_values: Dict[str, List[str]] = {}\n path = path if path[-1] == \"/\" else f\"{path}/\"\n if mode not in [\"append\", \"overwrite\", \"overwrite_partitions\"]:\n raise exceptions.InvalidArgumentValue(\n f\"{mode} is a invalid mode, please use append, overwrite or overwrite_partitions.\"\n )\n if (mode == \"overwrite\") or ((mode == \"overwrite_partitions\") and (not partition_cols)):\n delete_objects(path=path, use_threads=use_threads, boto3_session=boto3_session)\n df = _data_types.cast_pandas_with_athena_types(df=df, dtype=dtype)\n schema: pa.Schema = _data_types.pyarrow_schema_from_pandas(\n df=df, index=index, ignore_cols=partition_cols, dtype=dtype\n )\n _logger.debug(f\"schema: {schema}\")\n if not partition_cols:\n file_path: str = f\"{path}{uuid.uuid4().hex}{compression_ext}.parquet\"\n _to_parquet_file(\n df=df, schema=schema, path=file_path, index=index, compression=compression, cpus=cpus, fs=fs, dtype=dtype\n )\n paths.append(file_path)\n else:\n for keys, subgroup in df.groupby(by=partition_cols, observed=True):\n subgroup = subgroup.drop(partition_cols, axis=\"columns\")\n keys = (keys,) if not isinstance(keys, tuple) else keys\n subdir = \"/\".join([f\"{name}={val}\" for name, val in zip(partition_cols, keys)])\n prefix: str = f\"{path}{subdir}/\"\n if mode == \"overwrite_partitions\":\n delete_objects(path=prefix, use_threads=use_threads, boto3_session=boto3_session)\n file_path = f\"{prefix}{uuid.uuid4().hex}{compression_ext}.parquet\"\n _to_parquet_file(\n df=subgroup,\n schema=schema,\n path=file_path,\n index=index,\n compression=compression,\n cpus=cpus,\n fs=fs,\n dtype=dtype,\n )\n paths.append(file_path)\n partitions_values[prefix] = [str(k) for k in keys]\n return paths, partitions_values\n\n\ndef _to_parquet_file(\n df: pd.DataFrame,\n path: str,\n schema: pa.Schema,\n index: bool,\n compression: Optional[str],\n cpus: int,\n fs: s3fs.S3FileSystem,\n dtype: Dict[str, str],\n) -> str:\n table: pa.Table = pyarrow.Table.from_pandas(df=df, schema=schema, nthreads=cpus, preserve_index=index, safe=True)\n for col_name, col_type in dtype.items():\n if col_name in table.column_names:\n col_index = table.column_names.index(col_name)\n pyarrow_dtype = _data_types.athena2pyarrow(col_type)\n field = pa.field(name=col_name, type=pyarrow_dtype)\n table = table.set_column(col_index, field, table.column(col_name).cast(pyarrow_dtype))\n _logger.debug(f\"Casting column {col_name} ({col_index}) to {col_type} ({pyarrow_dtype})\")\n pyarrow.parquet.write_table(\n table=table,\n where=path,\n write_statistics=True,\n use_dictionary=True,\n filesystem=fs,\n coerce_timestamps=\"ms\",\n compression=compression,\n flavor=\"spark\",\n )\n return path\n\n\ndef read_csv(\n path: Union[str, List[str]],\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n s3_additional_kwargs: Optional[Dict[str, str]] = None,\n chunksize: Optional[int] = None,\n **pandas_kwargs,\n) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:\n \"\"\"Read CSV file(s) from from a received S3 prefix or list of S3 objects paths.\n\n Note\n ----\n For partial and gradual reading use the argument ``chunksize`` instead of ``iterator``.\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n path : Union[str, List[str]]\n S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. ``[s3://bucket/key0, s3://bucket/key1]``).\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n s3_additional_kwargs:\n Forward to s3fs, useful for server side encryption\n https://s3fs.readthedocs.io/en/latest/#serverside-encryption\n chunksize: int, optional\n If specified, return an generator where chunksize is the number of rows to include in each chunk.\n pandas_kwargs:\n keyword arguments forwarded to pandas.read_csv().\n https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html\n\n Returns\n -------\n Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]\n Pandas DataFrame or a Generator in case of `chunksize != None`.\n\n Examples\n --------\n Reading all CSV files under a prefix\n\n >>> import awswrangler as wr\n >>> df = wr.s3.read_csv(path='s3://bucket/prefix/')\n\n Reading all CSV files under a prefix encrypted with a KMS key\n\n >>> import awswrangler as wr\n >>> df = wr.s3.read_csv(\n ... path='s3://bucket/prefix/',\n ... s3_additional_kwargs={\n ... 'ServerSideEncryption': 'aws:kms',\n ... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'\n ... }\n ... )\n\n Reading all CSV files from a list\n\n >>> import awswrangler as wr\n >>> df = wr.s3.read_csv(path=['s3://bucket/filename0.csv', 's3://bucket/filename1.csv'])\n\n Reading in chunks of 100 lines\n\n >>> import awswrangler as wr\n >>> dfs = wr.s3.read_csv(path=['s3://bucket/filename0.csv', 's3://bucket/filename1.csv'], chunksize=100)\n >>> for df in dfs:\n >>> print(df) # 100 lines Pandas DataFrame\n\n \"\"\"\n return _read_text(\n parser_func=pd.read_csv,\n path=path,\n use_threads=use_threads,\n boto3_session=boto3_session,\n s3_additional_kwargs=s3_additional_kwargs,\n chunksize=chunksize,\n **pandas_kwargs,\n )\n\n\ndef read_fwf(\n path: Union[str, List[str]],\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n s3_additional_kwargs: Optional[Dict[str, str]] = None,\n chunksize: Optional[int] = None,\n **pandas_kwargs,\n) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:\n \"\"\"Read fixed-width formatted file(s) from from a received S3 prefix or list of S3 objects paths.\n\n Note\n ----\n For partial and gradual reading use the argument ``chunksize`` instead of ``iterator``.\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n path : Union[str, List[str]]\n S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. ``[s3://bucket/key0, s3://bucket/key1]``).\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n s3_additional_kwargs:\n Forward to s3fs, useful for server side encryption\n https://s3fs.readthedocs.io/en/latest/#serverside-encryption\n chunksize: int, optional\n If specified, return an generator where chunksize is the number of rows to include in each chunk.\n pandas_kwargs:\n keyword arguments forwarded to pandas.read_fwf().\n https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_fwf.html\n\n Returns\n -------\n Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]\n Pandas DataFrame or a Generator in case of `chunksize != None`.\n\n Examples\n --------\n Reading all fixed-width formatted (FWF) files under a prefix\n\n >>> import awswrangler as wr\n >>> df = wr.s3.read_fwf(path='s3://bucket/prefix/')\n\n Reading all fixed-width formatted (FWF) files under a prefix encrypted with a KMS key\n\n >>> import awswrangler as wr\n >>> df = wr.s3.read_fwf(\n ... path='s3://bucket/prefix/',\n ... s3_additional_kwargs={\n ... 'ServerSideEncryption': 'aws:kms',\n ... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'\n ... }\n ... )\n\n Reading all fixed-width formatted (FWF) files from a list\n\n >>> import awswrangler as wr\n >>> df = wr.s3.read_fwf(path=['s3://bucket/filename0.txt', 's3://bucket/filename1.txt'])\n\n Reading in chunks of 100 lines\n\n >>> import awswrangler as wr\n >>> dfs = wr.s3.read_fwf(path=['s3://bucket/filename0.txt', 's3://bucket/filename1.txt'], chunksize=100)\n >>> for df in dfs:\n >>> print(df) # 100 lines Pandas DataFrame\n\n \"\"\"\n return _read_text(\n parser_func=pd.read_fwf,\n path=path,\n use_threads=use_threads,\n boto3_session=boto3_session,\n s3_additional_kwargs=s3_additional_kwargs,\n chunksize=chunksize,\n **pandas_kwargs,\n )\n\n\ndef read_json(\n path: Union[str, List[str]],\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n s3_additional_kwargs: Optional[Dict[str, str]] = None,\n chunksize: Optional[int] = None,\n **pandas_kwargs,\n) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:\n \"\"\"Read JSON file(s) from from a received S3 prefix or list of S3 objects paths.\n\n Note\n ----\n For partial and gradual reading use the argument ``chunksize`` instead of ``iterator``.\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n path : Union[str, List[str]]\n S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. ``[s3://bucket/key0, s3://bucket/key1]``).\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n s3_additional_kwargs:\n Forward to s3fs, useful for server side encryption\n https://s3fs.readthedocs.io/en/latest/#serverside-encryption\n chunksize: int, optional\n If specified, return an generator where chunksize is the number of rows to include in each chunk.\n pandas_kwargs:\n keyword arguments forwarded to pandas.read_json().\n https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_json.html\n\n Returns\n -------\n Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]\n Pandas DataFrame or a Generator in case of `chunksize != None`.\n\n Examples\n --------\n Reading all JSON files under a prefix\n\n >>> import awswrangler as wr\n >>> df = wr.s3.read_json(path='s3://bucket/prefix/')\n\n Reading all JSON files under a prefix encrypted with a KMS key\n\n >>> import awswrangler as wr\n >>> df = wr.s3.read_json(\n ... path='s3://bucket/prefix/',\n ... s3_additional_kwargs={\n ... 'ServerSideEncryption': 'aws:kms',\n ... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'\n ... }\n ... )\n\n Reading all JSON files from a list\n\n >>> import awswrangler as wr\n >>> df = wr.s3.read_json(path=['s3://bucket/filename0.json', 's3://bucket/filename1.json'])\n\n Reading in chunks of 100 lines\n\n >>> import awswrangler as wr\n >>> dfs = wr.s3.read_json(path=['s3://bucket/filename0.json', 's3://bucket/filename1.json'], chunksize=100)\n >>> for df in dfs:\n >>> print(df) # 100 lines Pandas DataFrame\n\n \"\"\"\n return _read_text(\n parser_func=pd.read_json,\n path=path,\n use_threads=use_threads,\n boto3_session=boto3_session,\n s3_additional_kwargs=s3_additional_kwargs,\n chunksize=chunksize,\n **pandas_kwargs,\n )\n\n\ndef _read_text(\n parser_func: Callable,\n path: Union[str, List[str]],\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n s3_additional_kwargs: Optional[Dict[str, str]] = None,\n chunksize: Optional[int] = None,\n **pandas_kwargs,\n) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:\n if \"iterator\" in pandas_kwargs:\n raise exceptions.InvalidArgument(\"Please, use chunksize instead of iterator.\")\n paths: List[str] = _path2list(path=path, boto3_session=boto3_session)\n if chunksize is not None:\n dfs: Iterator[pd.DataFrame] = _read_text_chunksize(\n parser_func=parser_func,\n paths=paths,\n boto3_session=boto3_session,\n chunksize=chunksize,\n pandas_args=pandas_kwargs,\n s3_additional_kwargs=s3_additional_kwargs,\n )\n return dfs\n if use_threads is False:\n df: pd.DataFrame = pd.concat(\n objs=[\n _read_text_full(\n parser_func=parser_func,\n path=p,\n boto3_session=boto3_session,\n pandas_args=pandas_kwargs,\n s3_additional_kwargs=s3_additional_kwargs,\n )\n for p in paths\n ],\n ignore_index=True,\n sort=False,\n )\n else:\n cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)\n with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:\n df = pd.concat(\n objs=executor.map(\n _read_text_full,\n repeat(parser_func),\n paths,\n repeat(boto3_session),\n repeat(pandas_kwargs),\n repeat(s3_additional_kwargs),\n ),\n ignore_index=True,\n sort=False,\n )\n return df\n\n\ndef _read_text_chunksize(\n parser_func: Callable,\n paths: List[str],\n boto3_session: boto3.Session,\n chunksize: int,\n pandas_args: Dict[str, Any],\n s3_additional_kwargs: Optional[Dict[str, str]] = None,\n) -> Iterator[pd.DataFrame]:\n fs: s3fs.S3FileSystem = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)\n for path in paths:\n _logger.debug(f\"path: {path}\")\n if pandas_args.get(\"compression\", \"infer\") == \"infer\":\n pandas_args[\"compression\"] = infer_compression(path, compression=\"infer\")\n with fs.open(path, \"rb\") as f:\n reader: pandas.io.parsers.TextFileReader = parser_func(f, chunksize=chunksize, **pandas_args)\n for df in reader:\n yield df\n\n\ndef _read_text_full(\n parser_func: Callable,\n path: str,\n boto3_session: boto3.Session,\n pandas_args: Dict[str, Any],\n s3_additional_kwargs: Optional[Dict[str, str]] = None,\n) -> pd.DataFrame:\n fs: s3fs.S3FileSystem = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)\n if pandas_args.get(\"compression\", \"infer\") == \"infer\":\n pandas_args[\"compression\"] = infer_compression(path, compression=\"infer\")\n with fs.open(path, \"rb\") as f:\n return parser_func(f, **pandas_args)\n\n\ndef _read_parquet_init(\n path: Union[str, List[str]],\n filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,\n categories: List[str] = None,\n validate_schema: bool = True,\n dataset: bool = False,\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n s3_additional_kwargs: Optional[Dict[str, str]] = None,\n) -> pyarrow.parquet.ParquetDataset:\n \"\"\"Encapsulate all initialization before the use of the pyarrow.parquet.ParquetDataset.\"\"\"\n if dataset is False:\n path_or_paths: Union[str, List[str]] = _path2list(path=path, boto3_session=boto3_session)\n elif isinstance(path, str):\n path_or_paths = path[:-1] if path.endswith(\"/\") else path\n else:\n path_or_paths = path\n _logger.debug(f\"path_or_paths: {path_or_paths}\")\n fs: s3fs.S3FileSystem = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)\n cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)\n data: pyarrow.parquet.ParquetDataset = pyarrow.parquet.ParquetDataset(\n path_or_paths=path_or_paths,\n filesystem=fs,\n metadata_nthreads=cpus,\n filters=filters,\n read_dictionary=categories,\n validate_schema=validate_schema,\n )\n return data\n\n\ndef read_parquet(\n path: Union[str, List[str]],\n filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,\n columns: Optional[List[str]] = None,\n validate_schema: bool = True,\n chunked: bool = False,\n dataset: bool = False,\n categories: List[str] = None,\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n s3_additional_kwargs: Optional[Dict[str, str]] = None,\n) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:\n \"\"\"Read Apache Parquet file(s) from from a received S3 prefix or list of S3 objects paths.\n\n The concept of Dataset goes beyond the simple idea of files and enable more\n complex features like partitioning and catalog integration (AWS Glue Catalog).\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n path : Union[str, List[str]]\n S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).\n filters: Union[List[Tuple], List[List[Tuple]]], optional\n List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.\n columns : List[str], optional\n Names of columns to read from the file(s).\n validate_schema:\n Check that individual file schemas are all the same / compatible. Schemas within a\n folder prefix should all be the same. Disable if you have schemas that are different\n and want to disable this check.\n chunked : bool\n If True will break the data in smaller DataFrames (Non deterministic number of lines).\n Otherwise return a single DataFrame with the whole data.\n dataset: bool\n If True read a parquet dataset instead of simple file(s) loading all the related partitions as columns.\n categories: List[str], optional\n List of columns names that should be returned as pandas.Categorical.\n Recommended for memory restricted environments.\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n s3_additional_kwargs:\n Forward to s3fs, useful for server side encryption\n https://s3fs.readthedocs.io/en/latest/#serverside-encryption\n\n Returns\n -------\n Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]\n Pandas DataFrame or a Generator in case of `chunked=True`.\n\n Examples\n --------\n Reading all Parquet files under a prefix\n\n >>> import awswrangler as wr\n >>> df = wr.s3.read_parquet(path='s3://bucket/prefix/')\n\n Reading all Parquet files under a prefix encrypted with a KMS key\n\n >>> import awswrangler as wr\n >>> df = wr.s3.read_parquet(\n ... path='s3://bucket/prefix/',\n ... s3_additional_kwargs={\n ... 'ServerSideEncryption': 'aws:kms',\n ... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'\n ... }\n ... )\n\n Reading all Parquet files from a list\n\n >>> import awswrangler as wr\n >>> df = wr.s3.read_parquet(path=['s3://bucket/filename0.parquet', 's3://bucket/filename1.parquet'])\n\n Reading in chunks\n\n >>> import awswrangler as wr\n >>> dfs = wr.s3.read_parquet(path=['s3://bucket/filename0.csv', 's3://bucket/filename1.csv'], chunked=True)\n >>> for df in dfs:\n >>> print(df) # Smaller Pandas DataFrame\n\n \"\"\"\n data: pyarrow.parquet.ParquetDataset = _read_parquet_init(\n path=path,\n filters=filters,\n dataset=dataset,\n categories=categories,\n use_threads=use_threads,\n boto3_session=boto3_session,\n s3_additional_kwargs=s3_additional_kwargs,\n validate_schema=validate_schema,\n )\n if chunked is False:\n return _read_parquet(\n data=data, columns=columns, categories=categories, use_threads=use_threads, validate_schema=validate_schema\n )\n return _read_parquet_chunked(data=data, columns=columns, categories=categories, use_threads=use_threads)\n\n\ndef _read_parquet(\n data: pyarrow.parquet.ParquetDataset,\n columns: Optional[List[str]] = None,\n categories: List[str] = None,\n use_threads: bool = True,\n validate_schema: bool = True,\n) -> pd.DataFrame:\n tables: List[pa.Table] = []\n for piece in data.pieces:\n table: pa.Table = piece.read(\n columns=columns, use_threads=use_threads, partitions=data.partitions, use_pandas_metadata=False\n )\n tables.append(table)\n promote: bool = not validate_schema\n table = pa.lib.concat_tables(tables, promote=promote)\n return table.to_pandas(\n use_threads=use_threads,\n split_blocks=True,\n self_destruct=True,\n integer_object_nulls=False,\n date_as_object=True,\n ignore_metadata=True,\n categories=categories,\n types_mapper=_data_types.pyarrow2pandas_extension,\n )\n\n\ndef _read_parquet_chunked(\n data: pyarrow.parquet.ParquetDataset,\n columns: Optional[List[str]] = None,\n categories: List[str] = None,\n use_threads: bool = True,\n) -> Iterator[pd.DataFrame]:\n for piece in data.pieces:\n table: pa.Table = piece.read(\n columns=columns, use_threads=use_threads, partitions=data.partitions, use_pandas_metadata=False\n )\n yield table.to_pandas(\n use_threads=use_threads,\n split_blocks=True,\n self_destruct=True,\n integer_object_nulls=False,\n date_as_object=True,\n ignore_metadata=True,\n categories=categories,\n types_mapper=_data_types.pyarrow2pandas_extension,\n )\n\n\ndef read_parquet_metadata(\n path: Union[str, List[str]],\n filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,\n dataset: bool = False,\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n) -> Tuple[Dict[str, str], Optional[Dict[str, str]]]:\n \"\"\"Read Apache Parquet file(s) metadata from from a received S3 prefix or list of S3 objects paths.\n\n The concept of Dataset goes beyond the simple idea of files and enable more\n complex features like partitioning and catalog integration (AWS Glue Catalog).\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n path : Union[str, List[str]]\n S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).\n filters: Union[List[Tuple], List[List[Tuple]]], optional\n List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.\n dataset: bool\n If True read a parquet dataset instead of simple file(s) loading all the related partitions as columns.\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n\n Returns\n -------\n Tuple[Dict[str, str], Optional[Dict[str, str]]]\n columns_types: Dictionary with keys as column names and vales as\n data types (e.g. {'col0': 'bigint', 'col1': 'double'}). /\n partitions_types: Dictionary with keys as partition names\n and values as data types (e.g. {'col2': 'date'}).\n\n Examples\n --------\n Reading all Parquet files (with partitions) metadata under a prefix\n\n >>> import awswrangler as wr\n >>> columns_types, partitions_types = wr.s3.read_parquet_metadata(path='s3://bucket/prefix/', dataset=True)\n\n Reading all Parquet files metadata from a list\n\n >>> import awswrangler as wr\n >>> columns_types, partitions_types = wr.s3.read_parquet_metadata(path=[\n ... 's3://bucket/filename0.parquet',\n ... 's3://bucket/filename1.parquet'\n ... ])\n\n \"\"\"\n data: pyarrow.parquet.ParquetDataset = _read_parquet_init(\n path=path, filters=filters, dataset=dataset, use_threads=use_threads, boto3_session=boto3_session\n )\n return _data_types.athena_types_from_pyarrow_schema(\n schema=data.schema.to_arrow_schema(), partitions=data.partitions\n )\n\n\ndef store_parquet_metadata(\n path: str,\n database: str,\n table: str,\n filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,\n dataset: bool = False,\n use_threads: bool = True,\n description: Optional[str] = None,\n parameters: Optional[Dict[str, str]] = None,\n columns_comments: Optional[Dict[str, str]] = None,\n compression: Optional[str] = None,\n boto3_session: Optional[boto3.Session] = None,\n) -> Tuple[Dict[str, str], Optional[Dict[str, str]], Optional[Dict[str, List[str]]]]:\n \"\"\"Infer and store parquet metadata on AWS Glue Catalog.\n\n Infer Apache Parquet file(s) metadata from from a received S3 prefix or list of S3 objects paths\n And then stores it on AWS Glue Catalog including all inferred partitions\n (No need of 'MCSK REPAIR TABLE')\n\n The concept of Dataset goes beyond the simple idea of files and enable more\n complex features like partitioning and catalog integration (AWS Glue Catalog).\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n path : Union[str, List[str]]\n S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).\n database : str\n Glue/Athena catalog: Database name.\n table : str\n Glue/Athena catalog: Table name.\n filters: Union[List[Tuple], List[List[Tuple]]], optional\n List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.\n dataset: bool\n If True read a parquet dataset instead of simple file(s) loading all the related partitions as columns.\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n description: str, optional\n Glue/Athena catalog: Table description\n parameters: Dict[str, str], optional\n Glue/Athena catalog: Key/value pairs to tag the table.\n columns_comments: Dict[str, str], optional\n Glue/Athena catalog:\n Columns names and the related comments (e.g. {'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}).\n compression: str, optional\n Compression style (``None``, ``snappy``, ``gzip``, etc).\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n\n Returns\n -------\n Tuple[Dict[str, str], Optional[Dict[str, str]], Optional[Dict[str, List[str]]]]\n The metadata used to create the Glue Table.\n columns_types: Dictionary with keys as column names and vales as\n data types (e.g. {'col0': 'bigint', 'col1': 'double'}). /\n partitions_types: Dictionary with keys as partition names\n and values as data types (e.g. {'col2': 'date'}). /\n partitions_values: Dictionary with keys as S3 path locations and values as a\n list of partitions values as str (e.g. {'s3://bucket/prefix/y=2020/m=10/': ['2020', '10']}).\n\n Examples\n --------\n Reading all Parquet files metadata under a prefix\n\n >>> import awswrangler as wr\n >>> columns_types, partitions_types, partitions_values = wr.s3.store_parquet_metadata(\n ... path='s3://bucket/prefix/',\n ... database='...',\n ... table='...',\n ... dataset=True\n ... )\n\n \"\"\"\n session: boto3.Session = _utils.ensure_session(session=boto3_session)\n data: pyarrow.parquet.ParquetDataset = _read_parquet_init(\n path=path, filters=filters, dataset=dataset, use_threads=use_threads, boto3_session=session\n )\n partitions: Optional[pyarrow.parquet.ParquetPartitions] = data.partitions\n columns_types, partitions_types = _data_types.athena_types_from_pyarrow_schema(\n schema=data.schema.to_arrow_schema(), partitions=partitions\n )\n catalog.create_parquet_table(\n database=database,\n table=table,\n path=path,\n columns_types=columns_types,\n partitions_types=partitions_types,\n description=description,\n parameters=parameters,\n columns_comments=columns_comments,\n boto3_session=session,\n )\n partitions_values: Dict[str, List[str]] = _data_types.athena_partitions_from_pyarrow_partitions(\n path=path, partitions=partitions\n )\n catalog.add_parquet_partitions(\n database=database,\n table=table,\n partitions_values=partitions_values,\n compression=compression,\n boto3_session=session,\n )\n return columns_types, partitions_types, partitions_values\n\n\ndef wait_objects_exist(\n paths: List[str],\n delay: Optional[Union[int, float]] = None,\n max_attempts: Optional[int] = None,\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n) -> None:\n \"\"\"Wait Amazon S3 objects exist.\n\n Polls S3.Client.head_object() every 5 seconds (default) until a successful\n state is reached. An error is returned after 20 (default) failed checks.\n https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Waiter.ObjectExists\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n paths : List[str]\n List of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).\n delay : Union[int,float], optional\n The amount of time in seconds to wait between attempts. Default: 5\n max_attempts : int, optional\n The maximum number of attempts to be made. Default: 20\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n\n Returns\n -------\n None\n None.\n\n Examples\n --------\n >>> import awswrangler as wr\n >>> wr.s3.wait_objects_exist(['s3://bucket/key0', 's3://bucket/key1']) # wait both objects\n\n \"\"\"\n return _wait_objects(\n waiter_name=\"object_exists\",\n paths=paths,\n delay=delay,\n max_attempts=max_attempts,\n use_threads=use_threads,\n boto3_session=boto3_session,\n )\n\n\ndef wait_objects_not_exist(\n paths: List[str],\n delay: Optional[Union[int, float]] = None,\n max_attempts: Optional[int] = None,\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n) -> None:\n \"\"\"Wait Amazon S3 objects not exist.\n\n Polls S3.Client.head_object() every 5 seconds (default) until a successful\n state is reached. An error is returned after 20 (default) failed checks.\n https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Waiter.ObjectNotExists\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n paths : List[str]\n List of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).\n delay : Union[int,float], optional\n The amount of time in seconds to wait between attempts. Default: 5\n max_attempts : int, optional\n The maximum number of attempts to be made. Default: 20\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n\n Returns\n -------\n None\n None.\n\n Examples\n --------\n >>> import awswrangler as wr\n >>> wr.s3.wait_objects_not_exist(['s3://bucket/key0', 's3://bucket/key1']) # wait both objects not exist\n\n \"\"\"\n return _wait_objects(\n waiter_name=\"object_not_exists\",\n paths=paths,\n delay=delay,\n max_attempts=max_attempts,\n use_threads=use_threads,\n boto3_session=boto3_session,\n )\n\n\ndef _wait_objects(\n waiter_name: str,\n paths: List[str],\n delay: Optional[Union[int, float]] = None,\n max_attempts: Optional[int] = None,\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n) -> None:\n delay = 5 if delay is None else delay\n max_attempts = 20 if max_attempts is None else max_attempts\n _delay: int = int(delay) if isinstance(delay, float) else delay\n\n if len(paths) < 1:\n return None\n client_s3: boto3.client = _utils.client(service_name=\"s3\", session=boto3_session)\n waiter = client_s3.get_waiter(waiter_name)\n _paths: List[Tuple[str, str]] = [_utils.parse_path(path=p) for p in paths]\n if use_threads is False:\n for bucket, key in _paths:\n waiter.wait(Bucket=bucket, Key=key, WaiterConfig={\"Delay\": _delay, \"MaxAttempts\": max_attempts})\n else:\n cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)\n with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:\n futures: List[concurrent.futures.Future] = []\n for bucket, key in _paths:\n future: concurrent.futures.Future = executor.submit(\n fn=waiter.wait, Bucket=bucket, Key=key, WaiterConfig={\"Delay\": _delay, \"MaxAttempts\": max_attempts}\n )\n futures.append(future)\n for future in futures:\n future.result()\n return None\n\n\ndef read_parquet_table(\n table: str,\n database: str,\n filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,\n columns: Optional[List[str]] = None,\n categories: List[str] = None,\n chunked: bool = False,\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n s3_additional_kwargs: Optional[Dict[str, str]] = None,\n) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:\n \"\"\"Read Apache Parquet table registered on AWS Glue Catalog.\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n table : str\n AWS Glue Catalog table name.\n database : str\n AWS Glue Catalog database name.\n filters: Union[List[Tuple], List[List[Tuple]]], optional\n List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.\n columns : List[str], optional\n Names of columns to read from the file(s).\n categories: List[str], optional\n List of columns names that should be returned as pandas.Categorical.\n Recommended for memory restricted environments.\n chunked : bool\n If True will break the data in smaller DataFrames (Non deterministic number of lines).\n Otherwise return a single DataFrame with the whole data.\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n s3_additional_kwargs:\n Forward to s3fs, useful for server side encryption\n https://s3fs.readthedocs.io/en/latest/#serverside-encryption\n\n Returns\n -------\n Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]\n Pandas DataFrame or a Generator in case of `chunked=True`.\n\n Examples\n --------\n Reading Parquet Table\n\n >>> import awswrangler as wr\n >>> df = wr.s3.read_parquet_table(database='...', table='...')\n\n Reading Parquet Table encrypted\n\n >>> import awswrangler as wr\n >>> df = wr.s3.read_parquet_table(\n ... database='...',\n ... table='...'\n ... s3_additional_kwargs={\n ... 'ServerSideEncryption': 'aws:kms',\n ... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'\n ... }\n ... )\n\n Reading Parquet Table in chunks\n\n >>> import awswrangler as wr\n >>> dfs = wr.s3.read_parquet_table(database='...', table='...', chunked=True)\n >>> for df in dfs:\n >>> print(df) # Smaller Pandas DataFrame\n\n \"\"\"\n path: str = catalog.get_table_location(database=database, table=table, boto3_session=boto3_session)\n return read_parquet(\n path=path,\n filters=filters,\n columns=columns,\n categories=categories,\n chunked=chunked,\n dataset=True,\n use_threads=use_threads,\n boto3_session=boto3_session,\n s3_additional_kwargs=s3_additional_kwargs,\n )\n\n\ndef merge_datasets(\n source_path: str,\n target_path: str,\n mode: str = \"append\",\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n) -> List[str]:\n \"\"\"Merge a source dataset into a target dataset.\n\n Note\n ----\n If you are merging tables (S3 datasets + Glue Catalog metadata),\n remember that you will also need to update your partitions metadata in some cases.\n (e.g. wr.athena.repair_table(table='...', database='...'))\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n source_path : str,\n S3 Path for the source directory.\n target_path : str,\n S3 Path for the target directory.\n mode: str, optional\n ``append`` (Default), ``overwrite``, ``overwrite_partitions``.\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n\n Returns\n -------\n List[str]\n List of new objects paths.\n\n Examples\n --------\n >>> import awswrangler as wr\n >>> wr.s3.merge_datasets(\n ... source_path=\"s3://bucket0/dir0/\",\n ... target_path=\"s3://bucket1/dir1/\",\n ... mode=\"append\"\n ... )\n [\"s3://bucket1/dir1/key0\", \"s3://bucket1/dir1/key1\"]\n\n \"\"\"\n source_path = source_path[:-1] if source_path[-1] == \"/\" else source_path\n target_path = target_path[:-1] if target_path[-1] == \"/\" else target_path\n session: boto3.Session = _utils.ensure_session(session=boto3_session)\n\n paths: List[str] = list_objects(path=f\"{source_path}/\", boto3_session=session)\n _logger.debug(f\"len(paths): {len(paths)}\")\n if len(paths) < 1:\n return []\n\n if mode == \"overwrite\":\n _logger.debug(f\"Deleting to overwrite: {target_path}/\")\n delete_objects(path=f\"{target_path}/\", use_threads=use_threads, boto3_session=session)\n elif mode == \"overwrite_partitions\":\n paths_wo_prefix: List[str] = [x.replace(f\"{source_path}/\", \"\") for x in paths]\n paths_wo_filename: List[str] = [f\"{x.rpartition('/')[0]}/\" for x in paths_wo_prefix]\n partitions_paths: List[str] = list(set(paths_wo_filename))\n target_partitions_paths = [f\"{target_path}/{x}\" for x in partitions_paths]\n for path in target_partitions_paths:\n _logger.debug(f\"Deleting to overwrite_partitions: {path}\")\n delete_objects(path=path, use_threads=use_threads, boto3_session=session)\n elif mode != \"append\":\n raise exceptions.InvalidArgumentValue(f\"{mode} is a invalid mode option.\")\n\n new_objects: List[str] = copy_objects(\n paths=paths, source_path=source_path, target_path=target_path, use_threads=use_threads, boto3_session=session\n )\n _logger.debug(f\"len(new_objects): {len(new_objects)}\")\n return new_objects\n\n\ndef copy_objects(\n paths: List[str],\n source_path: str,\n target_path: str,\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n) -> List[str]:\n \"\"\"Copy a list of S3 objects to another S3 directory.\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n paths : List[str]\n List of S3 objects paths (e.g. [s3://bucket/dir0/key0, s3://bucket/dir0/key1]).\n source_path : str,\n S3 Path for the source directory.\n target_path : str,\n S3 Path for the target directory.\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n\n Returns\n -------\n List[str]\n List of new objects paths.\n\n Examples\n --------\n >>> import awswrangler as wr\n >>> wr.s3.copy_objects(\n ... paths=[\"s3://bucket0/dir0/key0\", \"s3://bucket0/dir0/key1\"])\n ... source_path=\"s3://bucket0/dir0/\",\n ... target_path=\"s3://bucket1/dir1/\",\n ... )\n [\"s3://bucket1/dir1/key0\", \"s3://bucket1/dir1/key1\"]\n\n \"\"\"\n _logger.debug(f\"len(paths): {len(paths)}\")\n if len(paths) < 1:\n return []\n source_path = source_path[:-1] if source_path[-1] == \"/\" else source_path\n target_path = target_path[:-1] if target_path[-1] == \"/\" else target_path\n session: boto3.Session = _utils.ensure_session(session=boto3_session)\n batch: List[Tuple[str, str]] = []\n new_objects: List[str] = []\n for path in paths:\n path_wo_prefix: str = path.replace(f\"{source_path}/\", \"\")\n path_final: str = f\"{target_path}/{path_wo_prefix}\"\n new_objects.append(path_final)\n batch.append((path, path_final))\n _logger.debug(f\"len(new_objects): {len(new_objects)}\")\n _copy_objects(batch=batch, use_threads=use_threads, boto3_session=session)\n return new_objects\n\n\ndef _copy_objects(batch: List[Tuple[str, str]], use_threads: bool, boto3_session: boto3.Session) -> None:\n _logger.debug(f\"len(batch): {len(batch)}\")\n client_s3: boto3.client = _utils.client(service_name=\"s3\", session=boto3_session)\n resource_s3: boto3.resource = _utils.resource(service_name=\"s3\", session=boto3_session)\n for source, target in batch:\n source_bucket, source_key = _utils.parse_path(path=source)\n copy_source: Dict[str, str] = {\"Bucket\": source_bucket, \"Key\": source_key}\n target_bucket, target_key = _utils.parse_path(path=target)\n resource_s3.meta.client.copy(\n CopySource=copy_source,\n Bucket=target_bucket,\n Key=target_key,\n SourceClient=client_s3,\n Config=TransferConfig(num_download_attempts=15, use_threads=use_threads),\n )\n" ]
[ [ "pandas.io.common.infer_compression" ] ]
Llambi/Web_Semantica
[ "16f98a7d78ba08366a67caf2bd44f3f45af6ee21" ]
[ "IndexerQuery/model/QueryAnalizer.py" ]
[ "import numpy as np\n\nfrom model.indexer_v1 import Indexer\n\n\nclass QueryAnalizer:\n def __init__(self, query, document_list, enable_stemming=True, filter_stopwords=True):\n self.__query = Indexer([query], enable_stemming=enable_stemming, filter_stopwords=filter_stopwords)\n self.__indexer = Indexer(document_list, enable_stemming=enable_stemming, filter_stopwords=filter_stopwords)\n self.result = None\n\n def cosine_similarity(self):\n if self.result is not None:\n return self.result\n\n result = {}\n for query_term, value in self.__query.words_index.items():\n indexer_term = self.__indexer.words_index[query_term]\n\n tf_idf_query_term = self.__query.words_index[query_term][\"idf\"] * \\\n self.__query.words_index[query_term][\"documents\"][0][\"tf\"]\n\n tf_documents = list(map(lambda doc: doc[\"tf\"], indexer_term[\"documents\"]))\n\n dot_product = np.dot(tf_idf_query_term, tf_documents)\n\n result[query_term] = list(zip(\n list(\n map(\n lambda doc: doc[\"document\"].text,\n indexer_term[\"documents\"]))\n ,\n list(\n map(\n lambda elem: elem / (np.linalg.norm(tf_idf_query_term) + np.linalg.norm(tf_documents)),\n dot_product\n ))\n ))\n self.result = result\n for key, elm in self.result.items():\n self.result[key] = sorted(elm, key=lambda tup: tup[1], reverse=True)\n return self.result\n" ]
[ [ "numpy.dot", "numpy.linalg.norm" ] ]
dolboBobo/python3_ios
[ "e149f1bc2e50046c8810f83dae7739a8dea939ee", "e149f1bc2e50046c8810f83dae7739a8dea939ee" ]
[ "extraPackages/matplotlib-3.0.3/examples/images_contours_and_fields/contourf_log.py", "extraPackages/matplotlib-3.0.3/examples/subplots_axes_and_figures/zoom_inset_axes.py" ]
[ "\"\"\"\n============================\nContourf and log color scale\n============================\n\nDemonstrate use of a log color scale in contourf\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy import ma\nfrom matplotlib import ticker, cm\n\nN = 100\nx = np.linspace(-3.0, 3.0, N)\ny = np.linspace(-2.0, 2.0, N)\n\nX, Y = np.meshgrid(x, y)\n\n# A low hump with a spike coming out.\n# Needs to have z/colour axis on a log scale so we see both hump and spike.\n# linear scale only shows the spike.\nZ1 = np.exp(-(X)**2 - (Y)**2)\nZ2 = np.exp(-(X * 10)**2 - (Y * 10)**2)\nz = Z1 + 50 * Z2\n\n# Put in some negative values (lower left corner) to cause trouble with logs:\nz[:5, :5] = -1\n\n# The following is not strictly essential, but it will eliminate\n# a warning. Comment it out to see the warning.\nz = ma.masked_where(z <= 0, z)\n\n\n# Automatic selection of levels works; setting the\n# log locator tells contourf to use a log scale:\nfig, ax = plt.subplots()\ncs = ax.contourf(X, Y, z, locator=ticker.LogLocator(), cmap=cm.PuBu_r)\n\n# Alternatively, you can manually set the levels\n# and the norm:\n# lev_exp = np.arange(np.floor(np.log10(z.min())-1),\n# np.ceil(np.log10(z.max())+1))\n# levs = np.power(10, lev_exp)\n# cs = ax.contourf(X, Y, z, levs, norm=colors.LogNorm())\n\ncbar = fig.colorbar(cs)\n\nplt.show()\n\n#############################################################################\n#\n# ------------\n#\n# References\n# \"\"\"\"\"\"\"\"\"\"\n#\n# The use of the following functions, methods and classes is shown\n# in this example:\n\nimport matplotlib\nmatplotlib.axes.Axes.contourf\nmatplotlib.pyplot.contourf\nmatplotlib.figure.Figure.colorbar\nmatplotlib.pyplot.colorbar\nmatplotlib.axes.Axes.legend\nmatplotlib.pyplot.legend\nmatplotlib.ticker.LogLocator\n", "\"\"\"\n======================\nZoom region inset axes\n======================\n\nExample of an inset axes and a rectangle showing where the zoom is located.\n\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef get_demo_image():\n from matplotlib.cbook import get_sample_data\n import numpy as np\n f = get_sample_data(\"axes_grid/bivariate_normal.npy\", asfileobj=False)\n z = np.load(f)\n # z is a numpy array of 15x15\n return z, (-3, 4, -4, 3)\n\nfig, ax = plt.subplots(figsize=[5, 4])\n\n# make data\nZ, extent = get_demo_image()\nZ2 = np.zeros([150, 150], dtype=\"d\")\nny, nx = Z.shape\nZ2[30:30 + ny, 30:30 + nx] = Z\n\nax.imshow(Z2, extent=extent, interpolation=\"nearest\",\n origin=\"lower\")\n\n# inset axes....\naxins = ax.inset_axes([0.5, 0.5, 0.47, 0.47])\naxins.imshow(Z2, extent=extent, interpolation=\"nearest\",\n origin=\"lower\")\n# sub region of the original image\nx1, x2, y1, y2 = -1.5, -0.9, -2.5, -1.9\naxins.set_xlim(x1, x2)\naxins.set_ylim(y1, y2)\naxins.set_xticklabels('')\naxins.set_yticklabels('')\n\nax.indicate_inset_zoom(axins)\n\nplt.show()\n\n#############################################################################\n#\n# ------------\n#\n# References\n# \"\"\"\"\"\"\"\"\"\"\n#\n# The use of the following functions and methods is shown in this example:\n\nimport matplotlib\nmatplotlib.axes.Axes.inset_axes\nmatplotlib.axes.Axes.indicate_inset_zoom\nmatplotlib.axes.Axes.imshow\n" ]
[ [ "matplotlib.ticker.LogLocator", "numpy.ma.masked_where", "matplotlib.pyplot.subplots", "numpy.exp", "matplotlib.pyplot.show", "numpy.meshgrid", "numpy.linspace" ], [ "numpy.load", "numpy.zeros", "matplotlib.cbook.get_sample_data", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show" ] ]
ska-sa/katsdpdisp
[ "3fd2f5878c0bd3ae56815568446593b876881e3f" ]
[ "katsdpdisp/test/test_data.py" ]
[ "\"\"\"Tests for :py:mod:`katsdpdisp.data`.\"\"\"\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal\nfrom katsdpdisp.data import SparseArray\n\ndef test_sparsearray(fullslots=100,fullbls=10,fullchan=5,nslots=10,maxbaselines=6,islot_new_bls=6):\n \"\"\"Simulates the assignment and retrieval of data as it happens in the signal displays when \n it receives different sets of baseline data at different timestamps, with some time continuity.\n (fullslots,fullbls,fullchan) is the dimensions of the full/complete dataset\n (nslots,maxbaselines,fullchan) is the true size of the sparse array, representing a size of (nslots,fullbls,fullchan)\n where maxbaselines<fullbls\n islot_new_bls is the number of time stamps that passes before there is a new baseline product selected/chosen in the test sequence\"\"\"\n mx=SparseArray(nslots,fullbls,fullchan,maxbaselines,dtype=np.int32)\n\n rs = np.random.RandomState(seed=0)\n fulldata=rs.random_integers(0,10,[fullslots,fullbls,fullchan])\n histbaselines=[]\n for it in range(fullslots):\n if it%islot_new_bls==0:#add a new baseline, remove old, every so often\n while True:\n newbaseline=rs.random_integers(0,fullbls-1,[1])\n if len(histbaselines)==0 or (newbaseline not in histbaselines[-1]):\n break\n if (len(histbaselines)==0):\n newbaselines=np.r_[newbaseline]\n elif (len(histbaselines[-1])<islot_new_bls):\n newbaselines=np.r_[histbaselines[-1],newbaseline]\n else:\n newbaselines=np.r_[histbaselines[-1][1:],newbaseline]\n histbaselines.append(newbaselines)\n mx[it%nslots,histbaselines[-1],:]=fulldata[it,histbaselines[-1],:]\n for cit in range(islot_new_bls):\n if (cit>=len(histbaselines)):\n break\n hasthesebaselines=list(set(histbaselines[-1-cit]) & set(histbaselines[-1]))\n missingbaselines=list(set(histbaselines[-1-cit]) - set(histbaselines[-1]))\n retrieved=mx[(it-cit)%nslots,hasthesebaselines,:]\n assert_array_equal(retrieved, fulldata[it-cit,hasthesebaselines,:], 'SparseArray getitem test failed')\n missingretrieved=mx[(it-cit)%nslots,missingbaselines,:]\n assert_array_equal(missingretrieved,np.zeros(missingretrieved.shape,dtype=np.int32), 'SparseArray missing baseline test failed')\n\ndef test_sparsearray_indexing(fullslots=100,fullbls=10,fullchan=5,nslots=10,maxbaselines=6):\n mx=SparseArray(nslots,fullbls,fullchan,maxbaselines,dtype=np.int32)\n\n rs = np.random.RandomState(seed=0)\n fulldata=rs.random_integers(0,10,[fullslots,fullbls,fullchan])\n\n mx[0,0,0]=fulldata[0,0,0]\n assert_array_equal(mx[0,0,0], fulldata[0,0,0], 'SparseArray [scalar,scalar,scalar] index test failed')\n\n mx[1,1,:]=fulldata[1,1,:]\n assert_array_equal(mx[1,1,:], fulldata[1,1,:], 'SparseArray [scalar,scalar,slice] index test 2 failed') #baseline change so previous assignment purged (in future may retain until running out of memory and necessary to purge)\n\n mx[2,1,:]=fulldata[2,1,:]\n assert_array_equal(mx[1:3,1,:], fulldata[1:3,1,:], 'SparseArray retain old value test failed') #assign to same baseline so previous slot value remain\n\n mx[3,:maxbaselines,0]=fulldata[3,:maxbaselines,0]\n assert_array_equal(mx[3,:maxbaselines,0], fulldata[3,:maxbaselines,0], 'SparseArray [scalar,slice,scalar] index test failed')\n\n mx[:,1,3]=fulldata[:nslots,1,3]\n assert_array_equal(mx[:,1,3], fulldata[:nslots,1,3], 'SparseArray [slice,scalar,scalar] index test failed')\n \n mx[:,1,:]=fulldata[:nslots,1,:]\n assert_array_equal(mx[:,1,:], fulldata[:nslots,1,:], 'SparseArray [slice,scalar,slice] index test failed')\n\n mx[:,1:maxbaselines,:]=fulldata[2:nslots+2,1:maxbaselines,:]\n assert_array_equal(mx[:,1:maxbaselines,:], fulldata[2:nslots+2,1:maxbaselines,:], 'SparseArray [slice,slice,slice] index test failed')\n\n" ]
[ [ "numpy.random.RandomState", "numpy.testing.assert_array_equal", "numpy.zeros" ] ]
rmrafailov/metaworld
[ "b2cd055e5f2413ec6d66ef29e45d05af989dca3b" ]
[ "metaworld/policies/sawyer_coffee_pull_v2_policy.py" ]
[ "import numpy as np\n\nfrom metaworld.policies.action import Action\nfrom metaworld.policies.policy import Policy, assert_fully_parsed, move\n\n\nclass SawyerCoffeePullV2Policy(Policy):\n\n @staticmethod\n @assert_fully_parsed\n def _parse_obs(obs):\n return {\n 'hand_pos': obs[:3],\n 'mug_pos': obs[3:6],\n 'unused_info': obs[6:],\n }\n\n def get_action(self, obs):\n o_d = self._parse_obs(obs)\n\n action = Action({\n 'delta_pos': np.arange(3),\n 'grab_effort': 3\n })\n\n action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=10.)\n action['grab_effort'] = self._grab_effort(o_d)\n\n return action.array\n\n @staticmethod\n def _desired_pos(o_d):\n pos_curr = o_d['hand_pos']\n pos_mug = o_d['mug_pos'] + np.array([-.005, .0, .05])\n\n if np.linalg.norm(pos_curr[:2] - pos_mug[:2]) > 0.06:\n return pos_mug + np.array([.0, .0, .15])\n elif abs(pos_curr[2] - pos_mug[2]) > 0.02:\n return pos_mug\n elif pos_curr[1] > .65:\n return np.array([.5, .6, .1])\n else:\n return np.array([pos_curr[0] - .1, .6, .1])\n\n @staticmethod\n def _grab_effort(o_d):\n pos_curr = o_d['hand_pos']\n pos_mug = o_d['mug_pos'] + np.array([.01, .0, .05])\n\n if np.linalg.norm(pos_curr[:2] - pos_mug[:2]) > 0.06 or \\\n abs(pos_curr[2] - pos_mug[2]) > 0.1:\n return -1.\n else:\n return .7\n" ]
[ [ "numpy.array", "numpy.arange", "numpy.linalg.norm" ] ]
AndreeaMutu/Python-Baseball
[ "6ca5e5006fd01ffa5b55c4859ebad7251a1f35a6" ]
[ "stats/data.py" ]
[ "import os\nimport glob\nimport pandas as pd\n\ngame_files = glob.glob(os.path.join(os.getcwd(),'games','*.EVE'))\ngame_files.sort()\n\ngame_frames = []\nfor game_file in game_files:\n game_frame = pd.read_csv(game_file, names=['type','multi2','multi3','multi4','multi5','multi6','event'])\n game_frames.append(game_frame)\n\ngames = pd.concat(game_frames)\ngames.loc[games['multi5']=='??',['multi5']]=''\nidentifiers = games['multi2'].str.extract(r'(.LS(\\d{4})\\d{5})')\nidentifiers = identifiers.fillna(method='ffill')\nidentifiers.columns=['game_id', 'year']\ngames = pd.concat([games, identifiers], sort=False, axis=1)\ngames = games.fillna(' ')\n\ngames.loc[:, 'type'] = pd.Categorical(games.loc[:, 'type'])\nprint(games.head())\n" ]
[ [ "pandas.read_csv", "pandas.concat", "pandas.Categorical" ] ]
KleistvonLiu/denoise-imu-gyro
[ "76e75e194a3804c473be077663b4a668fc0b7c28" ]
[ "main_EUROC.py" ]
[ "import os\nimport torch\nimport src.learning as lr\nimport src.networks as sn\nimport src.losses as sl\nimport src.dataset as ds\nimport numpy as np\n\nbase_dir = os.path.dirname(os.path.realpath(__file__))\ndata_dir = '/path/to/EUROC/dataset'\n# test a given network\n# address = os.path.join(base_dir, 'results/EUROC/2020_02_18_16_52_55/')\n# or test the last trained network\naddress = \"last\"\n################################################################################\n# Network parameters\n################################################################################\nnet_class = sn.GyroNet\nnet_params = {\n 'in_dim': 6,\n 'out_dim': 3,\n 'c0': 16,\n 'dropout': 0.1,\n 'ks': [7, 7, 7, 7],\n 'ds': [4, 4, 4],\n 'momentum': 0.1,\n 'gyro_std': [1*np.pi/180, 2*np.pi/180, 5*np.pi/180],\n}\n################################################################################\n# Dataset parameters\n################################################################################\ndataset_class = ds.EUROCDataset\ndataset_params = {\n # where are raw data ?\n 'data_dir': data_dir,\n # where record preloaded data ?\n 'predata_dir': os.path.join(base_dir, 'data/EUROC'),\n # set train, val and test sequence\n 'train_seqs': [\n 'MH_01_easy',\n 'MH_03_medium',\n 'MH_05_difficult',\n 'V1_02_medium',\n 'V2_01_easy',\n 'V2_03_difficult'\n ],\n 'val_seqs': [\n 'MH_01_easy',\n 'MH_03_medium',\n 'MH_05_difficult',\n 'V1_02_medium',\n 'V2_01_easy',\n 'V2_03_difficult',\n ],\n 'test_seqs': [\n 'MH_02_easy',\n 'MH_04_difficult',\n 'V2_02_medium',\n 'V1_03_difficult',\n 'V1_01_easy',\n ],\n # size of trajectory during training\n 'N': 32 * 500, # should be integer * 'max_train_freq'\n 'min_train_freq': 16,\n 'max_train_freq': 32,\n}\n################################################################################\n# Training parameters\n################################################################################\ntrain_params = {\n 'optimizer_class': torch.optim.Adam,\n 'optimizer': {\n 'lr': 0.01,\n 'weight_decay': 1e-1,\n 'amsgrad': False,\n },\n 'loss_class': sl.GyroLoss,\n 'loss': {\n 'min_N': int(np.log2(dataset_params['min_train_freq'])),\n 'max_N': int(np.log2(dataset_params['max_train_freq'])),\n 'w': 1e6,\n 'target': 'rotation matrix',\n 'huber': 0.005,\n 'dt': 0.005,\n },\n 'scheduler_class': torch.optim.lr_scheduler.CosineAnnealingWarmRestarts,\n 'scheduler': {\n 'T_0': 600,\n 'T_mult': 2,\n 'eta_min': 1e-3,\n },\n 'dataloader': {\n 'batch_size': 10,\n 'pin_memory': False,\n 'num_workers': 0,\n 'shuffle': False,\n },\n # frequency of validation step\n 'freq_val': 600,\n # total number of epochs\n 'n_epochs': 1800,\n # where record results ?\n 'res_dir': os.path.join(base_dir, \"results/EUROC\"),\n # where record Tensorboard log ?\n 'tb_dir': os.path.join(base_dir, \"results/runs/EUROC\"),\n}\n################################################################################\n# Train on training data set\n################################################################################\n# learning_process = lr.GyroLearningBasedProcessing(train_params['res_dir'],\n# train_params['tb_dir'], net_class, net_params, None,\n# train_params['loss']['dt'])\n# learning_process.train(dataset_class, dataset_params, train_params)\n################################################################################\n# Test on full data set\n################################################################################\nlearning_process = lr.GyroLearningBasedProcessing(train_params['res_dir'],\n train_params['tb_dir'], net_class, net_params, address=address,\n dt=train_params['loss']['dt'])\nlearning_process.test(dataset_class, dataset_params, ['test'])" ]
[ [ "numpy.log2" ] ]
nzare/ignite
[ "b53c6aeef87754b3cd3638c91172b386dc73af12" ]
[ "tests/ignite/contrib/handlers/test_polyaxon_logger.py" ]
[ "import os\nfrom unittest.mock import MagicMock, call\n\nimport pytest\nimport torch\n\nfrom ignite.contrib.handlers.polyaxon_logger import *\nfrom ignite.engine import Engine, Events, State\n\nos.environ[\"POLYAXON_NO_OP\"] = \"1\"\n\n\ndef test_output_handler_with_wrong_logger_type():\n\n wrapper = OutputHandler(\"tag\", output_transform=lambda x: x)\n\n mock_logger = MagicMock()\n mock_engine = MagicMock()\n with pytest.raises(RuntimeError, match=\"Handler 'OutputHandler' works only with PolyaxonLogger\"):\n wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)\n\n\ndef test_output_handler_output_transform():\n\n wrapper = OutputHandler(\"tag\", output_transform=lambda x: x)\n mock_logger = MagicMock(spec=PolyaxonLogger)\n mock_logger.log_metrics = MagicMock()\n\n mock_engine = MagicMock()\n mock_engine.state = State()\n mock_engine.state.output = 12345\n mock_engine.state.iteration = 123\n\n wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)\n\n mock_logger.log_metrics.assert_called_once_with(step=123, **{\"tag/output\": 12345})\n\n wrapper = OutputHandler(\"another_tag\", output_transform=lambda x: {\"loss\": x})\n mock_logger = MagicMock(spec=PolyaxonLogger)\n mock_logger.log_metrics = MagicMock()\n\n wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)\n mock_logger.log_metrics.assert_called_once_with(step=123, **{\"another_tag/loss\": 12345})\n\n\ndef test_output_handler_metric_names():\n\n wrapper = OutputHandler(\"tag\", metric_names=[\"a\", \"b\", \"c\"])\n mock_logger = MagicMock(spec=PolyaxonLogger)\n mock_logger.log_metrics = MagicMock()\n\n mock_engine = MagicMock()\n mock_engine.state = State(metrics={\"a\": 12.23, \"b\": 23.45, \"c\": torch.tensor(10.0)})\n mock_engine.state.iteration = 5\n\n wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)\n\n assert mock_logger.log_metrics.call_count == 1\n mock_logger.log_metrics.assert_called_once_with(step=5, **{\"tag/a\": 12.23, \"tag/b\": 23.45, \"tag/c\": 10.0})\n\n wrapper = OutputHandler(\"tag\", metric_names=[\"a\",])\n\n mock_engine = MagicMock()\n mock_engine.state = State(metrics={\"a\": torch.Tensor([0.0, 1.0, 2.0, 3.0])})\n mock_engine.state.iteration = 5\n\n mock_logger = MagicMock(spec=PolyaxonLogger)\n mock_logger.log_metrics = MagicMock()\n\n wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)\n\n assert mock_logger.log_metrics.call_count == 1\n mock_logger.log_metrics.assert_has_calls(\n [call(step=5, **{\"tag/a/0\": 0.0, \"tag/a/1\": 1.0, \"tag/a/2\": 2.0, \"tag/a/3\": 3.0}),], any_order=True\n )\n\n wrapper = OutputHandler(\"tag\", metric_names=[\"a\", \"c\"])\n\n mock_engine = MagicMock()\n mock_engine.state = State(metrics={\"a\": 55.56, \"c\": \"Some text\"})\n mock_engine.state.iteration = 7\n\n mock_logger = MagicMock(spec=PolyaxonLogger)\n mock_logger.log_metrics = MagicMock()\n\n with pytest.warns(UserWarning):\n wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)\n\n assert mock_logger.log_metrics.call_count == 1\n mock_logger.log_metrics.assert_has_calls([call(step=7, **{\"tag/a\": 55.56})], any_order=True)\n\n # all metrics\n wrapper = OutputHandler(\"tag\", metric_names=\"all\")\n mock_logger = MagicMock(spec=PolyaxonLogger)\n mock_logger.log_metrics = MagicMock()\n\n mock_engine = MagicMock()\n mock_engine.state = State(metrics={\"a\": 12.23, \"b\": 23.45, \"c\": torch.tensor(10.0)})\n mock_engine.state.iteration = 5\n\n wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)\n\n assert mock_logger.log_metrics.call_count == 1\n mock_logger.log_metrics.assert_called_once_with(step=5, **{\"tag/a\": 12.23, \"tag/b\": 23.45, \"tag/c\": 10.0})\n\n\ndef test_output_handler_both():\n\n wrapper = OutputHandler(\"tag\", metric_names=[\"a\", \"b\"], output_transform=lambda x: {\"loss\": x})\n mock_logger = MagicMock(spec=PolyaxonLogger)\n mock_logger.log_metrics = MagicMock()\n\n mock_engine = MagicMock()\n mock_engine.state = State(metrics={\"a\": 12.23, \"b\": 23.45})\n mock_engine.state.epoch = 5\n mock_engine.state.output = 12345\n\n wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)\n\n assert mock_logger.log_metrics.call_count == 1\n mock_logger.log_metrics.assert_called_once_with(step=5, **{\"tag/a\": 12.23, \"tag/b\": 23.45, \"tag/loss\": 12345})\n\n\ndef test_output_handler_with_wrong_global_step_transform_output():\n def global_step_transform(*args, **kwargs):\n return \"a\"\n\n wrapper = OutputHandler(\"tag\", output_transform=lambda x: {\"loss\": x}, global_step_transform=global_step_transform)\n mock_logger = MagicMock(spec=PolyaxonLogger)\n mock_logger.log_metrics = MagicMock()\n\n mock_engine = MagicMock()\n mock_engine.state = State()\n mock_engine.state.epoch = 5\n mock_engine.state.output = 12345\n\n with pytest.raises(TypeError, match=\"global_step must be int\"):\n wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)\n\n\ndef test_output_handler_with_global_step_transform():\n def global_step_transform(*args, **kwargs):\n return 10\n\n wrapper = OutputHandler(\"tag\", output_transform=lambda x: {\"loss\": x}, global_step_transform=global_step_transform)\n mock_logger = MagicMock(spec=PolyaxonLogger)\n mock_logger.log_metrics = MagicMock()\n\n mock_engine = MagicMock()\n mock_engine.state = State()\n mock_engine.state.epoch = 5\n mock_engine.state.output = 12345\n\n wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)\n mock_logger.log_metrics.assert_called_once_with(step=10, **{\"tag/loss\": 12345})\n\n\ndef test_output_handler_with_global_step_from_engine():\n\n mock_another_engine = MagicMock()\n mock_another_engine.state = State()\n mock_another_engine.state.epoch = 10\n mock_another_engine.state.output = 12.345\n\n wrapper = OutputHandler(\n \"tag\",\n output_transform=lambda x: {\"loss\": x},\n global_step_transform=global_step_from_engine(mock_another_engine),\n )\n\n mock_logger = MagicMock(spec=PolyaxonLogger)\n mock_logger.log_metrics = MagicMock()\n\n mock_engine = MagicMock()\n mock_engine.state = State()\n mock_engine.state.epoch = 1\n mock_engine.state.output = 0.123\n\n wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)\n assert mock_logger.log_metrics.call_count == 1\n mock_logger.log_metrics.assert_has_calls(\n [call(step=mock_another_engine.state.epoch, **{\"tag/loss\": mock_engine.state.output})]\n )\n\n mock_another_engine.state.epoch = 11\n mock_engine.state.output = 1.123\n\n wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)\n assert mock_logger.log_metrics.call_count == 2\n mock_logger.log_metrics.assert_has_calls(\n [call(step=mock_another_engine.state.epoch, **{\"tag/loss\": mock_engine.state.output})]\n )\n\n\ndef test_optimizer_params_handler_wrong_setup():\n\n with pytest.raises(TypeError):\n OptimizerParamsHandler(optimizer=None)\n\n optimizer = MagicMock(spec=torch.optim.Optimizer)\n handler = OptimizerParamsHandler(optimizer=optimizer)\n\n mock_logger = MagicMock()\n mock_engine = MagicMock()\n with pytest.raises(RuntimeError, match=\"Handler OptimizerParamsHandler works only with PolyaxonLogger\"):\n handler(mock_engine, mock_logger, Events.ITERATION_STARTED)\n\n\ndef test_optimizer_params():\n\n optimizer = torch.optim.SGD([torch.Tensor(0)], lr=0.01)\n wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name=\"lr\")\n mock_logger = MagicMock(spec=PolyaxonLogger)\n mock_logger.log_metrics = MagicMock()\n mock_engine = MagicMock()\n mock_engine.state = State()\n mock_engine.state.iteration = 123\n\n wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)\n mock_logger.log_metrics.assert_called_once_with(**{\"lr/group_0\": 0.01, \"step\": 123})\n\n wrapper = OptimizerParamsHandler(optimizer, param_name=\"lr\", tag=\"generator\")\n mock_logger = MagicMock(spec=PolyaxonLogger)\n mock_logger.log_metrics = MagicMock()\n\n wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)\n mock_logger.log_metrics.assert_called_once_with(**{\"generator/lr/group_0\": 0.01, \"step\": 123})\n\n\ndef test_integration():\n\n n_epochs = 5\n data = list(range(50))\n\n losses = torch.rand(n_epochs * len(data))\n losses_iter = iter(losses)\n\n def update_fn(engine, batch):\n return next(losses_iter)\n\n trainer = Engine(update_fn)\n\n plx_logger = PolyaxonLogger()\n\n def dummy_handler(engine, logger, event_name):\n global_step = engine.state.get_event_attrib_value(event_name)\n logger.log_metrics(step=global_step, **{\"{}\".format(\"test_value\"): global_step})\n\n plx_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)\n\n trainer.run(data, max_epochs=n_epochs)\n\n\ndef test_integration_as_context_manager():\n\n n_epochs = 5\n data = list(range(50))\n\n losses = torch.rand(n_epochs * len(data))\n losses_iter = iter(losses)\n\n def update_fn(engine, batch):\n return next(losses_iter)\n\n with PolyaxonLogger() as plx_logger:\n\n trainer = Engine(update_fn)\n\n def dummy_handler(engine, logger, event_name):\n global_step = engine.state.get_event_attrib_value(event_name)\n logger.log_metrics(step=global_step, **{\"{}\".format(\"test_value\"): global_step})\n\n plx_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)\n\n trainer.run(data, max_epochs=n_epochs)\n\n\[email protected]\ndef no_site_packages():\n import sys\n\n polyaxon_client_modules = {}\n for k in sys.modules:\n if \"polyaxon\" in k:\n polyaxon_client_modules[k] = sys.modules[k]\n for k in polyaxon_client_modules:\n del sys.modules[k]\n\n prev_path = list(sys.path)\n sys.path = [p for p in sys.path if \"site-packages\" not in p]\n yield \"no_site_packages\"\n sys.path = prev_path\n for k in polyaxon_client_modules:\n sys.modules[k] = polyaxon_client_modules[k]\n\n\ndef test_no_polyaxon_client(no_site_packages):\n\n with pytest.raises(RuntimeError, match=r\"This contrib module requires polyaxon-client to be installed\"):\n PolyaxonLogger()\n" ]
[ [ "torch.tensor", "torch.Tensor" ] ]
Monnoroch/tensorflow
[ "1d76583411038767f673a0c96174c80eaf9ff42f" ]
[ "tensorflow/python/ops/math_ops.py" ]
[ "\"\"\"## Arithmetic Operators\n\nTensorFlow provides several operations that you can use to add basic arithmetic\noperators to your graph.\n\n@@add\n@@sub\n@@mul\n@@div\n@@mod\n\n## Basic Math Functions\n\nTensorFlow provides several operations that you can use to add basic\nmathematical functions to your graph.\n\n@@add_n\n@@abs\n@@neg\n@@sign\n@@inv\n@@square\n@@round\n@@sqrt\n@@rsqrt\n@@pow\n@@exp\n@@log\n@@ceil\n@@floor\n@@maximum\n@@minimum\n@@cos\n@@sin\n\n## Matrix Math Functions\n\nTensorFlow provides several operations that you can use to add basic\nmathematical functions for matrices to your graph.\n\n@@diag\n@@transpose\n\n@@matmul\n@@batch_matmul\n\n@@matrix_determinant\n@@batch_matrix_determinant\n\n@@matrix_inverse\n@@batch_matrix_inverse\n\n@@cholesky\n@@batch_cholesky\n\n## Complex Number Functions\n\nTensorFlow provides several operations that you can use to add complex number\nfunctions to your graph.\n\n@@complex\n@@complex_abs\n@@conj\n@@imag\n@@real\n\n## Reduction\n\nTensorFlow provides several operations that you can use to perform\ncommon math computations that reduce various dimensions of a tensor.\n\n@@reduce_sum\n@@reduce_prod\n@@reduce_min\n@@reduce_max\n@@reduce_mean\n@@reduce_all\n@@reduce_any\n\n@@accumulate_n\n\n## Segmentation\n\nTensorFlow provides several operations that you can use to perform common\nmath computations on tensor segments.\nHere a segmentation is a partitioning of a tensor along\nthe first dimension, i.e. it defines a mapping from the first dimension onto\n`segment_ids`. The `segment_ids` tensor should be the size of\nthe first dimension, `d0`, with consecutive IDs in the range `0` to `k`,\nwhere `k<d0`.\nIn particular, a segmentation of a matrix tensor is a mapping of rows to\nsegments.\n\nFor example:\n\n```python\nc = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])\ntf.segment_sum(c, tf.constant([0, 0, 1]))\n ==> [[0 0 0 0]\n [5 6 7 8]]\n```\n\n@@segment_sum\n@@segment_prod\n@@segment_min\n@@segment_max\n@@segment_mean\n\n@@unsorted_segment_sum\n\n@@sparse_segment_sum\n@@sparse_segment_mean\n\n\n## Sequence Comparison and Indexing\n\nTensorFlow provides several operations that you can use to add sequence\ncomparison and index extraction to your graph. You can use these operations to\ndetermine sequence differences and determine the indexes of specific values in\na tensor.\n\n@@argmin\n@@argmax\n\n@@listdiff\n@@where\n@@unique\n\n@@edit_distance\n\n@@invert_permutation\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.python.platform\n\nimport numpy as np\nimport six.moves\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.framework import types\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import common_shapes\nfrom tensorflow.python.ops import gen_math_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import gen_state_ops\n# pylint: disable=wildcard-import,undefined-variable\nfrom tensorflow.python.ops.gen_math_ops import *\n\n\n# Aliases for some automatically-generated names.\nargmax = gen_math_ops.arg_max\nargmin = gen_math_ops.arg_min\nlinspace = gen_math_ops.lin_space\n\n\n# pylint: disable=anomalous-backslash-in-string,protected-access\ndef abs(x, name=None):\n \"\"\"Computes the absolute value of a tensor.\n\n Given a tensor of real numbers `x`, this operation returns a tensor\n containing the absolute value of each element in `x`. For example, if x is\n an input element and y is an output element, this operation computes\n \\\\\\\\(y = |x|\\\\\\\\).\n\n See [`tf.complex_abs()`](#tf_complex_abs) to compute the absolute value of a complex\n number.\n\n Args:\n x: A `Tensor` of type `float`, `double`, `int32`, or `int64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` the same size and type as `x` with absolute values.\n \"\"\"\n with ops.op_scope([x], name, \"Abs\") as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n if x.dtype == types.complex64:\n return gen_math_ops.complex_abs(x, name=name)\n return gen_math_ops._abs(x, name=name)\n\n\n\ndef pow(x, y, name=None):\n \"\"\"Computes the power of one value to another.\n\n Given a tensor `x` and a tensor `y`, this operation computes \\\\\\\\(x^y\\\\\\\\) for\n corresponding elements in `x` and `y`. For example:\n\n ```\n # tensor 'x' is [[2, 2]], [3, 3]]\n # tensor 'y' is [[8, 16], [2, 3]]\n tf.pow(x, y) ==> [[256, 65536], [9, 27]]\n ```\n\n Args:\n x: A `Tensor` of type `float`, `double`, `int32`, `complex64`, or `int64`.\n y: A `Tensor` of type `float`, `double`, `int32`, `complex64`, or `int64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`.\n \"\"\"\n with ops.op_scope([x], name, \"Pow\") as name:\n return gen_math_ops._pow(x, y, name=name)\n\n\ndef complex(real, imag, name=None):\n \"\"\"Converts two real numbers to a complex number.\n\n Given a tensor `real` representing the real part of a complex number, and a\n tensor `imag` representing the imaginary part of a complex number, this\n operation computes complex numbers elementwise of the form \\\\\\\\(a + bj\\\\\\\\),\n where *a* represents the `real` part and *b* represents the `imag` part.\n\n The input tensors `real` and `imag` must be the same shape.\n\n For example:\n\n ```\n # tensor 'real' is [2.25, 3.25]\n # tensor `imag` is [4.75, 5.75]\n tf.complex(real, imag) ==> [[2.25 + 4.74j], [3.25 + 5.75j]]\n ```\n\n Args:\n real: A `Tensor` of type `float`.\n imag: A `Tensor` of type `float`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `complex64`.\n \"\"\"\n with ops.op_scope([real, imag], name, \"Complex\") as name:\n return gen_math_ops._complex(real, imag, name=name)\n\n\ndef round(x, name=None):\n \"\"\"Rounds the values of a tensor to the nearest integer, element-wise.\n\n For example:\n\n ```python\n # 'a' is [0.9, 2.5, 2.3, -4.4]\n tf.round(a) ==> [ 1.0, 3.0, 2.0, -4.0 ]\n ```\n\n Args:\n x: A `Tensor` of type `float` or `double`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of same shape and type as `x`.\n \"\"\"\n x = ops.convert_to_tensor(x, name=\"x\")\n if x.dtype.is_integer:\n return x\n else:\n return floor(x + 0.5, name=name)\n\n\ndef cast(x, dtype, name=None):\n \"\"\"Casts a tensor to a new type.\n\n The operation casts `x` (in case of `Tensor`) or `x.values`\n (in case of `SparseTensor`) to `dtype`.\n\n For example:\n\n ```python\n # tensor `a` is [1.8, 2.2], dtype=tf.float\n tf.cast(a, tf.int32) ==> [1, 2] # dtype=tf.int32\n ```\n\n Args:\n x: A `Tensor` or `SparseTensor`.\n dtype: The destination type.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` with same shape as `x`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `dtype`.\n \"\"\"\n with ops.op_scope([x], name, \"Cast\") as name:\n if isinstance(x, ops.SparseTensor):\n values_cast = cast(x.values, dtype, name=name)\n return ops.SparseTensor(x.indices, values_cast, x.shape)\n else:\n # TODO(touts): Handle what Josh said.\n #\n # Could return ops.convert_to_tensor(x, dtype=dtype, ...) here, but that\n # allows some conversions that cast() can't do, e.g. casting numbers to\n # strings.\n x = ops.convert_to_tensor(x, name=\"x\")\n if x.dtype.base_dtype == dtype:\n return x\n return gen_math_ops.cast(x, dtype, name=name)\n\n\ndef to_float(x, name=\"ToFloat\"):\n \"\"\"Casts a tensor to type `float32`.\n\n Args:\n x: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` with same shape as `x` with type `float32`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `float32`.\n \"\"\"\n return cast(x, types.float32, name=name)\n\n\ndef to_double(x, name=\"ToDouble\"):\n \"\"\"Casts a tensor to type `float64`.\n\n Args:\n x: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` with same shape as `x` with type `float64`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `float64`.\n \"\"\"\n return cast(x, types.float64, name=name)\n\n\ndef to_int32(x, name=\"ToInt32\"):\n \"\"\"Casts a tensor to type `int32`.\n\n Args:\n x: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` with same shape as `x` with type `int32`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `int32`.\n \"\"\"\n return cast(x, types.int32, name=name)\n\n\ndef to_int64(x, name=\"ToInt64\"):\n \"\"\"Casts a tensor to type `int64`.\n\n Args:\n x: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` with same shape as `x` with type `int64`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `int64`.\n \"\"\"\n return cast(x, types.int64, name=name)\n\n\ndef to_bfloat16(x, name=\"ToBFloat16\"):\n \"\"\"Casts a tensor to type `bfloat16`.\n\n Args:\n x: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` with same shape as `x` with type `bfloat16`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `bfloat16`.\n \"\"\"\n return cast(x, types.bfloat16, name=name)\n\n\nops.Tensor._override_operator(\"__neg__\", neg)\nops.Tensor._override_operator(\"__abs__\", abs)\n# __invert__ corresponds to the ~ operator. Here we follow the numpy convention\n# ~ marks an elementwise bit-wise inverse. This is only implemented for boolean\n# tensors and will throw a TypeError if used on nonboolean arrays\nops.Tensor._override_operator(\"__invert__\", logical_not)\n\n\ndef _OverrideBinaryOperatorHelper(func, op_name):\n \"\"\"Register operators with different tensor and scalar versions.\n\n Args:\n func: the operator\n op_name: name of the operator being overridden\n \"\"\"\n\n def binary_op_wrapper(x, y):\n with ops.op_scope([x, y], None, op_name) as name:\n assert isinstance(x, ops.Tensor)\n y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name=\"y\")\n return func(x, y, name=name)\n\n ops.Tensor._override_operator(\"__%s__\" % op_name, binary_op_wrapper)\n del binary_op_wrapper\n\n def r_binary_op_wrapper(y, x):\n with ops.op_scope([x, y], None, op_name) as name:\n assert isinstance(y, ops.Tensor)\n x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name=\"x\")\n return func(x, y, name=name)\n\n ops.Tensor._override_operator(\"__r%s__\" % op_name, r_binary_op_wrapper)\n del r_binary_op_wrapper\n\n\n# Conversion table for __truediv__. None entries mean no conversion required.\n_TRUEDIV_TABLE = {\n types.uint8: types.float32,\n types.int8: types.float32,\n types.int16: types.float32,\n types.int32: types.float64,\n types.int64: types.float64,\n types.float32: None,\n types.float64: None,\n types.complex64: None,\n}\n\n\ndef truediv(x, y, name=None):\n \"\"\"Divides x / y elementwise, always producing floating point results.\n\n The same as `tf.div` for floating point arguments, but casts integer arguments\n to floating point before dividing so that the result is always floating point.\n This op is generated by normal `x / y` division in Python 3 and in Python 2.7\n with `from __future__ import division`. If you want integer division that\n rounds down, use `x // y` or `tf.floordiv`.\n\n `x` and `y` must have the same numeric type. If the inputs are floating\n point, the output will have the same type. If the inputs are integral, the\n inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`\n and `int64` (matching the behavior of Numpy).\n\n Args:\n x: `Tensor` numerator of numeric type.\n y: `Tensor` denominator of numeric type.\n name: A name for the operation (optional).\n\n Returns:\n `x / y` evaluated in floating point.\n\n Raises:\n TypeError: If `x` and `y` have different dtypes.\n \"\"\"\n with ops.op_scope([x, y], name, \"truediv\") as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n y = ops.convert_to_tensor(y, name=\"y\")\n x_dtype = x.dtype.base_dtype\n y_dtype = y.dtype.base_dtype\n if x_dtype != y_dtype:\n raise TypeError(\"x and y must have the same dtype, got %r != %r\" %\n (x_dtype, y_dtype))\n try:\n dtype = _TRUEDIV_TABLE[x_dtype]\n except KeyError:\n raise TypeError(\"Invalid dtype %r in __truediv__\" % x_dtype)\n if dtype is not None:\n x = cast(x, dtype)\n y = cast(y, dtype)\n return div(x, y, name=name)\n\n\ndef floordiv(x, y, name=None):\n \"\"\"Divides `x / y` elementwise, rounding down for floating point.\n\n The same as `tf.div(x,y)`, but uses `tf.floor(tf.div(x,y))` for floating\n point arguments so that the result is always an integer (though possibly an\n integer represented as floating point). This op is generated by `x // y`\n floor division in Python 3 and in Python 2.7 with\n `from __future__ import division`.\n\n Note that for efficiency, __floordiv__ uses C semantics for negative numbers\n (unlike Python and Numpy).\n\n `x` and `y` must have the same type, and the result will have the same type\n as well.\n\n Args:\n x: `Tensor` numerator of real numeric type.\n y: `Tensor` numerator of real numeric type.\n name: A name for the operation (optional).\n\n Returns:\n `x / y` rounded down (except possibly for integers in C).\n\n Raises:\n TypeError: If the inputs are complex.\n \"\"\"\n with ops.op_scope([x, y], name, \"floordiv\") as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n dtype = x.dtype\n if dtype.is_floating:\n return floor(div(x, y), name=name)\n else:\n if not dtype.is_integer:\n raise TypeError(\"Expected floating point or integer, got %r\" % dtype)\n return div(x, y, name=name)\n\n\n_OverrideBinaryOperatorHelper(add, \"add\")\n_OverrideBinaryOperatorHelper(sub, \"sub\")\n_OverrideBinaryOperatorHelper(mul, \"mul\")\n_OverrideBinaryOperatorHelper(div, \"div\")\n_OverrideBinaryOperatorHelper(truediv, \"truediv\")\n_OverrideBinaryOperatorHelper(floordiv, \"floordiv\")\n_OverrideBinaryOperatorHelper(mod, \"mod\")\n\n\ndef logical_xor(x, y, name=\"LogicalXor\"):\n \"\"\"x ^ y = (x | y) & ~(x & y).\"\"\"\n # TODO(alemi) Make this a cwise op if people end up relying on it.\n return logical_and(logical_or(x, y), logical_not(logical_and(x, y)),\n name=name)\n\n_OverrideBinaryOperatorHelper(logical_and, \"and\")\n_OverrideBinaryOperatorHelper(logical_or, \"or\")\n_OverrideBinaryOperatorHelper(logical_xor, \"xor\")\n\nops.Tensor._override_operator(\"__lt__\", less)\nops.Tensor._override_operator(\"__le__\", less_equal)\nops.Tensor._override_operator(\"__gt__\", greater)\nops.Tensor._override_operator(\"__ge__\", greater_equal)\n\n\ndef range(start, limit, delta=1, name=\"range\"):\n \"\"\"Creates a sequence of integers.\n\n This operation creates a sequence of integers that begins at `start` and\n extends by increments of `delta` up to but not including `limit`.\n\n For example:\n\n ```\n # 'start' is 3\n # 'limit' is 18\n # 'delta' is 3\n tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]\n ```\n\n Args:\n start: A 0-D (scalar) of type `int32`. First entry in sequence.\n limit: A 0-D (scalar) of type `int32`. Upper limit of sequence,\n exclusive.\n delta: A 0-D `Tensor` (scalar) of type `int32`. Optional. Default is 1.\n Number that increments `start`.\n name: A name for the operation (optional).\n\n Returns:\n An 1-D `int32` `Tensor`.\n \"\"\"\n return gen_math_ops._range(start, limit, delta, name=name)\n\n\[email protected](\"Range\")\ndef _RangeShape(op):\n start_value = tensor_util.ConstantValue(op.inputs[0])\n limit_value = tensor_util.ConstantValue(op.inputs[1])\n delta_value = tensor_util.ConstantValue(op.inputs[2])\n if start_value is None or limit_value is None or delta_value is None:\n return [tensor_shape.vector(None)]\n else:\n return [tensor_shape.vector((limit_value - start_value + delta_value - 1) //\n delta_value)]\n\n\n# Reduction operations\ndef _ReductionDims(x, reduction_indices):\n \"\"\"Returns range(0, rank(x)) if reduction_indices is None.\"\"\"\n if reduction_indices is not None:\n return reduction_indices\n else:\n return range(0, array_ops.rank(x))\n\n\ndef reduce_sum(input_tensor, reduction_indices=None, keep_dims=False,\n name=None):\n \"\"\"Computes the sum of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `reduction_indices`.\n Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each\n entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions\n are retained with length 1.\n\n If `reduction_indices` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n # 'x' is [[1, 1, 1]]\n # [1, 1, 1]]\n tf.reduce_sum(x) ==> 6\n tf.reduce_sum(x, 0) ==> [2, 2, 2]\n tf.reduce_sum(x, 1) ==> [3, 3]\n tf.reduce_sum(x, 1, keep_dims=True) ==> [[3], [3]]\n tf.reduce_sum(x, [0, 1]) ==> 6\n ```\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n reduction_indices: The dimensions to reduce. If `None` (the defaut),\n reduces all dimensions.\n keep_dims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n \"\"\"\n return gen_math_ops._sum(input_tensor, _ReductionDims(input_tensor,\n reduction_indices),\n keep_dims, name=name)\n\n\ndef reduce_mean(input_tensor, reduction_indices=None, keep_dims=False,\n name=None):\n \"\"\"Computes the mean of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `reduction_indices`.\n Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each\n entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions\n are retained with length 1.\n\n If `reduction_indices` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n # 'x' is [[1., 1. ]]\n # [2., 2.]]\n tf.reduce_mean(x) ==> 1.5\n tf.reduce_mean(x, 0) ==> [1.5, 1.5]\n tf.reduce_mean(x, 1) ==> [1., 2.]\n ```\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n reduction_indices: The dimensions to reduce. If `None` (the defaut),\n reduces all dimensions.\n keep_dims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n \"\"\"\n return gen_math_ops._mean(input_tensor, _ReductionDims(input_tensor,\n reduction_indices),\n keep_dims, name=name)\n\n\ndef reduce_prod(input_tensor, reduction_indices=None, keep_dims=False,\n name=None):\n \"\"\"Computes the product of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `reduction_indices`.\n Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each\n entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions\n are retained with length 1.\n\n If `reduction_indices` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n reduction_indices: The dimensions to reduce. If `None` (the defaut),\n reduces all dimensions.\n keep_dims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n \"\"\"\n return gen_math_ops._prod(input_tensor, _ReductionDims(input_tensor,\n reduction_indices),\n keep_dims, name=name)\n\n\ndef reduce_min(input_tensor, reduction_indices=None, keep_dims=False,\n name=None):\n \"\"\"Computes the minimum of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `reduction_indices`.\n Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each\n entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions\n are retained with length 1.\n\n If `reduction_indices` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n reduction_indices: The dimensions to reduce. If `None` (the defaut),\n reduces all dimensions.\n keep_dims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n \"\"\"\n return gen_math_ops._min(input_tensor, _ReductionDims(input_tensor,\n reduction_indices),\n keep_dims, name=name)\n\n\ndef reduce_max(input_tensor, reduction_indices=None, keep_dims=False,\n name=None):\n \"\"\"Computes the maximum of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `reduction_indices`.\n Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each\n entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions\n are retained with length 1.\n\n If `reduction_indices` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n reduction_indices: The dimensions to reduce. If `None` (the defaut),\n reduces all dimensions.\n keep_dims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n \"\"\"\n return gen_math_ops._max(input_tensor, _ReductionDims(input_tensor,\n reduction_indices),\n keep_dims, name=name)\n\n\ndef reduce_all(input_tensor, reduction_indices=None, keep_dims=False,\n name=None):\n \"\"\"Computes the \"logical and\" of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `reduction_indices`.\n Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each\n entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions\n are retained with length 1.\n\n If `reduction_indices` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n # 'x' is [[True, True]]\n # [False, False]]\n tf.reduce_all(x) ==> False\n tf.reduce_all(x, 0) ==> [False, False]\n tf.reduce_all(x, 1) ==> [True, False]\n ```\n\n Args:\n input_tensor: The boolean tensor to reduce.\n reduction_indices: The dimensions to reduce. If `None` (the defaut),\n reduces all dimensions.\n keep_dims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n \"\"\"\n return gen_math_ops._all(input_tensor, _ReductionDims(input_tensor,\n reduction_indices),\n keep_dims, name=name)\n\n\ndef reduce_any(input_tensor, reduction_indices=None, keep_dims=False,\n name=None):\n \"\"\"Computes the \"logical or\" of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `reduction_indices`.\n Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each\n entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions\n are retained with length 1.\n\n If `reduction_indices` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n # 'x' is [[True, True]]\n # [False, False]]\n tf.reduce_any(x) ==> True\n tf.reduce_any(x, 0) ==> [True, True]\n tf.reduce_any(x, 1) ==> [True, False]\n ```\n\n Args:\n input_tensor: The boolean tensor to reduce.\n reduction_indices: The dimensions to reduce. If `None` (the defaut),\n reduces all dimensions.\n keep_dims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n \"\"\"\n return gen_math_ops._any(input_tensor, _ReductionDims(input_tensor,\n reduction_indices),\n keep_dims, name=name)\n\n\ndef matmul(a, b,\n transpose_a=False, transpose_b=False,\n a_is_sparse=False, b_is_sparse=False,\n name=None):\n \"\"\"Multiplies matrix `a` by matrix `b`, producing `a` * `b`.\n\n The inputs must be two-dimensional matrices, with matching inner dimensions,\n possibly after transposition.\n\n Both matrices must be of the same type. The supported types are:\n `float`, `double`, `int32`, `complex64`.\n\n Either matrix can be transposed on the fly by setting the corresponding flag\n to `True`. This is `False` by default.\n\n If one or both of the matrices contain a lot of zeros, a more efficient\n multiplication algorithm can be used by setting the corresponding\n `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.\n\n For example:\n\n ```python\n # 2-D tensor `a`\n a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) => [[1. 2. 3.]\n [4. 5. 6.]]\n # 2-D tensor `b`\n b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2]) => [[7. 8.]\n [9. 10.]\n [11. 12.]]\n c = tf.matmul(a, b) => [[58 64]\n [139 154]]\n ```\n\n Args:\n a: `Tensor` of type `float`, `double`, `int32` or `complex64`.\n b: `Tensor` with same type as `a`.\n transpose_a: If `True`, `a` is transposed before multiplication.\n transpose_b: If `True`, `b` is transposed before multiplication.\n a_is_sparse: If `True`, `a` is treated as a sparse matrix.\n b_is_sparse: If `True`, `b` is treated as a sparse matrix.\n name: Name for the operation (optional).\n\n Returns:\n A `Tensor` of the same type as `a`.\n \"\"\"\n with ops.op_scope([a, b], name, \"MatMul\") as name:\n a = ops.convert_to_tensor(a, name=\"a\")\n b = ops.convert_to_tensor(b, name=\"b\")\n if a.dtype == types.float32 and (a_is_sparse or b_is_sparse):\n return sparse_matmul(a, b,\n transpose_a=transpose_a,\n transpose_b=transpose_b,\n a_is_sparse=a_is_sparse,\n b_is_sparse=b_is_sparse,\n name=name)\n else:\n return gen_math_ops._mat_mul(a, b,\n transpose_a=transpose_a,\n transpose_b=transpose_b,\n name=name)\n\nsparse_matmul = gen_math_ops._sparse_mat_mul\nbatch_matmul = gen_math_ops._batch_mat_mul\n\nops.RegisterShape(\"MatMul\")(common_shapes.matmul_shape)\nops.RegisterShape(\"SparseMatMul\")(common_shapes.matmul_shape)\n\n\ndef _as_indexed_slices(x):\n \"\"\"Convert 'x' to IndexedSlices.\n\n Convert a dense Tensor to a block-sparse IndexedSlices.\n\n Args:\n x: Either a Tensor object, or an IndexedSlices object.\n\n Returns:\n An IndexedSlices object.\n\n Raises:\n TypeError: If 'x' is not a Tensor or an IndexedSlices object.\n \"\"\"\n # TODO(touts): op_scope\n if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):\n raise TypeError(\"Not a Tensor or IndexedSlices: %s\" % type(x))\n if isinstance(x, ops.IndexedSlices):\n return x\n x_shape = array_ops.shape(x)\n return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)\n\n\ndef _as_indexed_slices_list(inputs):\n \"\"\"Convert all elements of 'inputs' to IndexedSlices.\n\n Additionally, homogenize the types of all the indices to\n either int32 or int64.\n\n Args:\n inputs: List containing either Tensor or IndexedSlices objects.\n\n Returns:\n A list of IndexedSlices objects.\n\n Raises:\n TypeError: If 'inputs' is not a list or a tuple.\n \"\"\"\n if not isinstance(inputs, (list, tuple)):\n raise TypeError(\"Expected a list or tuple, not a %s\" % type(inputs))\n outputs = [_as_indexed_slices(i) for i in inputs]\n with_int32_index = [o.indices for o in outputs\n if o.indices.dtype == types.int32]\n if not with_int32_index or len(with_int32_index) == len(outputs):\n return outputs\n casted_outputs = []\n for o in outputs:\n if o.indices.dtype == types.int32:\n casted_outputs.append(\n ops.IndexedSlices(o.values, cast(o.indices, types.int64),\n o.dense_shape))\n else:\n casted_outputs.append(o)\n return casted_outputs\n\n\ndef accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):\n \"\"\"Returns the element-wise sum of a list of tensors.\n\n Optionally, pass `shape` and `tensor_dtype` for shape and type checking,\n otherwise, these are inferred.\n\n For example:\n\n ```python\n # tensor 'a' is [[1, 2], [3, 4]\n # tensor `b` is [[5, 0], [0, 6]]\n tf.accumulate_n([a, b, a]) ==> [[7, 4], [6, 14]]\n\n # Explicitly pass shape and type\n tf.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)\n ==> [[7, 4], [6, 14]]\n ```\n\n Args:\n inputs: A list of `Tensor` objects, each with same shape and type.\n shape: Shape of elements of `inputs`.\n tensor_dtype: The type of `inputs`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of same shape and type as the elements of `inputs`.\n\n Raises:\n ValueError: If `inputs` don't all have same shape and dtype or the shape\n cannot be inferred.\n \"\"\"\n if tensor_dtype is None:\n if not inputs or not isinstance(inputs, (list, tuple)):\n raise ValueError(\"inputs must be a list of at least one Tensor with the \"\n \"same dtype and shape\")\n inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)\n if not all(isinstance(x, ops.Tensor) for x in inputs):\n raise ValueError(\"inputs must be a list of at least one Tensor with the \"\n \"same dtype and shape\")\n if not all(x.dtype == inputs[0].dtype for x in inputs):\n raise ValueError(\"inputs must be a list of at least one Tensor with the \"\n \"same dtype and shape\")\n tensor_dtype = inputs[0].dtype\n if shape is not None:\n shape = tensor_shape.as_shape(shape)\n else:\n shape = tensor_shape.unknown_shape()\n for input_tensor in inputs:\n if isinstance(input_tensor, ops.Tensor):\n shape = shape.merge_with(input_tensor.get_shape())\n if not shape.is_fully_defined():\n # TODO(pbar): Make a version of assign_add that accepts an uninitialized\n # lvalue, and takes its shape from that? This would allow accumulate_n to\n # work in all situations that add_n currently works.\n raise ValueError(\"Cannot infer the shape of the accumulator for \"\n \"accumulate_n. Pass the shape argument, or set the shape \"\n \"of at least one of the inputs.\")\n with ops.op_scope(inputs, name, \"AccumulateN\") as name:\n var = gen_state_ops._temporary_variable(shape=shape, dtype=tensor_dtype)\n var_name = var.op.name\n var = state_ops.assign(var, array_ops.zeros_like(inputs[0]))\n update_ops = []\n for input_tensor in inputs:\n op = state_ops.assign_add(var, input_tensor, use_locking=True)\n update_ops.append(op)\n with ops.control_dependencies(update_ops):\n return gen_state_ops._destroy_temporary_variable(var,\n var_name=var_name,\n name=name)\n\n\[email protected](\"BatchMatMul\")\ndef _BatchMatMulShape(op):\n \"\"\"Shape function for BatchMatMul op.\"\"\"\n a_shape = op.inputs[0].get_shape()\n adj_a = op.get_attr(\"adj_x\")\n b_shape = op.inputs[1].get_shape()\n adj_b = op.get_attr(\"adj_y\")\n if not a_shape.is_fully_defined() or not b_shape.is_fully_defined():\n return [tensor_shape.unknown_shape()]\n batch_dims = a_shape[:-2].merge_with(b_shape[:-2])\n output_rows = a_shape[-1] if adj_a else a_shape[-2]\n output_cols = b_shape[-2] if adj_b else b_shape[-1]\n inner_a = a_shape[-2] if adj_a else a_shape[-1]\n inner_b = b_shape[-1] if adj_b else b_shape[-2]\n inner_a.assert_is_compatible_with(inner_b)\n return [batch_dims.concatenate([output_rows, output_cols])]\n\n\ndef sigmoid(x, name=None):\n \"\"\"Computes sigmoid of `x` element-wise.\n\n Specifically, `y = 1 / (1 + exp(-x))`.\n\n Args:\n x: A Tensor with type `float`, `double`, `int32`, `complex64`, `int64`,\n or `qint32`.\n name: A name for the operation (optional).\n\n Returns:\n A Tensor with the same type as `x` if `x.dtype != qint32`\n otherwise the return type is `quint8`.\n \"\"\"\n with ops.op_scope([x], name, \"Sigmoid\") as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n return gen_math_ops._sigmoid(x, name=name)\n\n\ndef tanh(x, name=None):\n \"\"\"Computes hyperbolic tangent of `x` element-wise.\n\n Args:\n x: A Tensor with type `float`, `double`, `int32`, `complex64`, `int64`,\n or `qint32`.\n name: A name for the operation (optional).\n\n Returns:\n A Tensor with the same type as `x` if `x.dtype != qint32` otherwise\n the return type is `quint8`.\n \"\"\"\n with ops.op_scope([x], name, \"Tanh\") as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n return gen_math_ops._tanh(x, name=name)\n\n\nops.RegisterShape(\"Abs\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Ceil\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Conj\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Cos\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Exp\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Floor\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Imag\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Inv\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"IsFinite\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"IsInf\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"IsNan\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Log\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"LogicalNot\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Neg\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Real\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Rsqrt\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Sign\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Sin\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Sqrt\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Square\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Sigmoid\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Tanh\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Cast\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"ComplexAbs\")(common_shapes.unchanged_shape)\n\n\[email protected](\"Add\")\[email protected](\"Complex\")\[email protected](\"Div\")\[email protected](\"Equal\")\[email protected](\"Greater\")\[email protected](\"GreaterEqual\")\[email protected](\"Less\")\[email protected](\"LessEqual\")\[email protected](\"LogicalAnd\")\[email protected](\"LogicalOr\")\[email protected](\"Maximum\")\[email protected](\"Minimum\")\[email protected](\"Mod\")\[email protected](\"Mul\")\[email protected](\"NotEqual\")\[email protected](\"Pow\")\[email protected](\"Sub\")\ndef _BroadcastShape(op):\n \"\"\"Common shape function for binary operators that broadcast their inputs.\"\"\"\n shape_x = op.inputs[0].get_shape()\n shape_y = op.inputs[1].get_shape()\n if shape_x.ndims is None or shape_y.ndims is None:\n return [tensor_shape.unknown_shape()]\n\n # To compute the broadcasted dimensions, we zip together shape_x and shape_y,\n # and pad with 1 to make them the same length.\n broadcasted_dims = reversed(list(six.moves.zip_longest(\n reversed(shape_x.dims),\n reversed(shape_y.dims),\n fillvalue=tensor_shape.Dimension(1))))\n # Next we combine the dimensions according to the numpy broadcasting rules.\n # http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html\n return_dims = []\n for (dim_x, dim_y) in broadcasted_dims:\n if dim_x.value is None or dim_y.value is None:\n # One or both dimensions is unknown. If either dimension is greater than\n # 1, we assume that the program is correct, and the other dimension will\n # be broadcast to match it.\n # TODO(mrry): If we eliminate the shape checks in C++, we must still\n # assert that the unknown dim is either 1 or the same as the known dim.\n if dim_x.value is not None and dim_x.value > 1:\n return_dims.append(dim_x)\n elif dim_y.value is not None and dim_y.value > 1:\n return_dims.append(dim_y)\n else:\n return_dims.append(None)\n elif dim_x.value == 1:\n # We will broadcast dim_x to dim_y.\n return_dims.append(dim_y)\n elif dim_y.value == 1:\n # We will broadcast dim_y to dim_x.\n return_dims.append(dim_x)\n elif dim_x.value == dim_y.value:\n # The dimensions are compatible, so output is the same size in that\n # dimension.\n return_dims.append(dim_x.merge_with(dim_y))\n else:\n raise ValueError(\"Incompatible shapes for broadcasting: %s and %s\"\n % (shape_x, shape_y))\n return [tensor_shape.TensorShape(return_dims)]\n\n\[email protected](\"AddN\")\ndef _AddNShape(op):\n merged_shape = tensor_shape.unknown_shape()\n for input_ in op.inputs:\n merged_shape = merged_shape.merge_with(input_.get_shape())\n return [merged_shape]\n\n\[email protected](\"Select\")\ndef _SelectShape(op):\n # All three inputs must have the same shape.\n return [op.inputs[0].get_shape()\n .merge_with(op.inputs[1].get_shape())\n .merge_with(op.inputs[2].get_shape())]\n\n\[email protected](\"ArgMax\")\[email protected](\"ArgMin\")\ndef _ArgOpShape(op):\n \"\"\"Common shape function for arg-reduction ops.\"\"\"\n dimension_shape = op.inputs[1].get_shape()\n dimension_shape.assert_is_compatible_with(tensor_shape.scalar())\n input_shape = op.inputs[0].get_shape()\n if input_shape.ndims is None:\n return [tensor_shape.unknown_shape()]\n elif input_shape.ndims <= 1:\n return [tensor_shape.scalar()]\n\n dimension = tensor_util.ConstantValue(op.inputs[1])\n if dimension is None:\n return [tensor_shape.unknown_shape(ndims=input_shape.ndims - 1)]\n elif 0 <= dimension and dimension < input_shape.ndims:\n returned_shape = []\n for i, dim in enumerate(input_shape.dims):\n if i != dimension:\n returned_shape.append(dim)\n return [tensor_shape.TensorShape(returned_shape)]\n else:\n raise ValueError(\n \"dimension (%d) must be in the range [0, %d), where %d is the number \"\n \"of dimensions in the input\"\n % (dimension, input_shape.ndims, input_shape.ndims))\n\n\[email protected](\"All\")\[email protected](\"Any\")\[email protected](\"Max\")\[email protected](\"Mean\")\[email protected](\"Min\")\[email protected](\"Prod\")\[email protected](\"Sum\")\ndef _ReductionShape(op):\n \"\"\"Common shape function for reduction ops.\"\"\"\n input_shape = op.inputs[0].get_shape()\n reduction_indices = tensor_util.ConstantValue(op.inputs[1])\n keep_dims = op.get_attr(\"keep_dims\")\n if reduction_indices is None or input_shape.ndims is None:\n if keep_dims:\n return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]\n else:\n return [tensor_shape.unknown_shape()]\n\n # Turn reduction_indices from scalar to vector if necessary\n reduction_indices = np.ravel(reduction_indices)\n\n for reduction_index in reduction_indices:\n if reduction_index < 0 or reduction_index >= input_shape.ndims:\n raise ValueError(\"Invalid reduction dimension %d for input with %d \"\n \"dimensions\" % (reduction_index, input_shape.ndims))\n\n returned_dims = []\n if keep_dims:\n for i, dim in enumerate(input_shape.dims):\n if i in reduction_indices:\n returned_dims.append(1)\n else:\n returned_dims.append(dim)\n else:\n for i, dim in enumerate(input_shape.dims):\n if i not in reduction_indices:\n returned_dims.append(dim)\n return [tensor_shape.TensorShape(returned_dims)]\n\n\[email protected](\"SegmentMax\")\[email protected](\"SegmentMean\")\[email protected](\"SegmentMin\")\[email protected](\"SegmentProd\")\[email protected](\"SegmentSum\")\ndef _SegmentReductionShape(op):\n \"\"\"Common shape function for segment reduction ops.\"\"\"\n data_shape = op.inputs[0].get_shape()\n segment_ids_shape = op.inputs[1].get_shape()\n segment_ids_shape.assert_has_rank(1)\n return [tensor_shape.TensorShape([None]).concatenate(data_shape[1:])]\n\n\[email protected](\"SparseSegmentMean\")\[email protected](\"SparseSegmentSum\")\ndef _SparseSegmentReductionShape(op):\n \"\"\"Common shape function for sparse segment reduction ops.\"\"\"\n data_shape = op.inputs[0].get_shape()\n indices_shape = op.inputs[1].get_shape()\n indices_shape.assert_has_rank(1)\n segment_ids_shape = op.inputs[2].get_shape()\n segment_ids_shape.assert_has_rank(1)\n indices_shape.assert_is_compatible_with(segment_ids_shape)\n return [tensor_shape.TensorShape([None]).concatenate(data_shape[1:])]\n\n\[email protected](\"SparseSegmentMeanGrad\")\ndef _SparseSegmentMeanGradShape(op):\n \"\"\"Shape function for the SparseSegmentMeanGrad op.\"\"\"\n input_shape = op.inputs[0].get_shape()\n indices_shape = op.inputs[1].get_shape().with_rank(1)\n unused_segment_ids_shape = op.inputs[2].get_shape().merge_with(indices_shape)\n unused_output_dim0_shape = op.inputs[3].get_shape().merge_with(\n tensor_shape.scalar())\n output_dim0 = tensor_util.ConstantValue(op.inputs[3])\n if output_dim0 is not None:\n dim0 = output_dim0[0]\n else:\n dim0 = None\n return [tensor_shape.TensorShape([dim0]).concatenate(input_shape[1:])]\n\n\[email protected](\"UnsortedSegmentSum\")\ndef _UnsortedSegmentSumShape(op):\n \"\"\"Shape function for UnsortedSegmentSum.\"\"\"\n data_shape = op.inputs[0].get_shape()\n segment_ids_shape = op.inputs[1].get_shape()\n mid = segment_ids_shape.ndims\n if mid is None:\n return [tensor_shape.unknown_shape()]\n else:\n num_segments = tensor_util.ConstantValue(op.inputs[2])\n return [tensor_shape.TensorShape([num_segments]).concatenate(\n data_shape[mid:])]\n\n\[email protected](\"LinSpace\")\ndef _LinspaceShape(op):\n num = tensor_util.ConstantValue(op.inputs[2])\n return [tensor_shape.vector(num)]\n" ]
[ [ "tensorflow.python.framework.tensor_shape.unknown_shape", "tensorflow.python.ops.gen_math_ops._range", "tensorflow.python.ops.gen_math_ops.complex_abs", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.ops.gen_state_ops._destroy_temporary_variable", "tensorflow.python.framework.tensor_util.ConstantValue", "tensorflow.python.ops.gen_math_ops._sigmoid", "tensorflow.python.ops.gen_state_ops._temporary_variable", "tensorflow.python.framework.tensor_shape.scalar", "tensorflow.python.framework.ops.Tensor._override_operator", "tensorflow.python.ops.gen_math_ops._pow", "tensorflow.python.ops.array_ops.rank", "tensorflow.python.framework.tensor_shape.as_shape", "tensorflow.python.ops.state_ops.assign_add", "tensorflow.python.ops.gen_math_ops._complex", "tensorflow.python.framework.ops.SparseTensor", "tensorflow.python.framework.ops.RegisterShape", "tensorflow.python.framework.tensor_shape.Dimension", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.framework.ops.convert_n_to_tensor_or_indexed_slices", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.ops.gen_math_ops._mat_mul", "tensorflow.python.framework.ops.op_scope", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.gen_math_ops.cast", "tensorflow.python.ops.array_ops.zeros_like", "tensorflow.python.ops.gen_math_ops._tanh", "numpy.ravel", "tensorflow.python.framework.tensor_shape.vector", "tensorflow.python.ops.gen_math_ops._abs" ] ]