repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
govindap/lyft_motion_prediction | [
"15412444fec69ce4a0082d8de730cb882833eab0"
] | [
"lyft_CNN.py"
] | [
"import numpy as np\nimport torch\nfrom torch import nn, optim\nfrom torch.utils.data import DataLoader\nfrom torchvision.models.resnet import resnet50, resnet34\nfrom torch import Tensor\nfrom typing import Dict\n\nfrom l5kit.configs import load_config_data\nfrom l5kit.data import LocalDataManager, ChunkedDataset\nfrom l5kit.dataset import AgentDataset, EgoDataset\nfrom l5kit.rasterization import build_rasterizer\nfrom l5kit.evaluation import write_pred_csv, compute_metrics_csv, read_gt_csv, create_chopped_dataset\nfrom l5kit.evaluation.chop_dataset import MIN_FUTURE_STEPS\nfrom l5kit.evaluation.metrics import neg_multi_log_likelihood, time_displace\nfrom l5kit.geometry import transform_points\nfrom l5kit.visualization import PREDICTED_POINTS_COLOR, TARGET_POINTS_COLOR, draw_trajectory\nfrom pathlib import Path\nimport pandas as pd\nimport os\nimport random\nimport time\nimport gc, psutil\n\ncfg = {\n 'format_version': 4,\n 'model_params': {\n 'model_architecture': \"resnet34\",\n 'history_num_frames': 10,\n 'history_step_size': 1,\n 'history_delta_time': 0.1,\n 'future_num_frames': 50,\n 'future_step_size': 1,\n 'future_delta_time': 0.1,\n 'model_name': \"model_resnet34\",\n 'lr': 1e-3,\n 'train': True,\n 'predict': True\n },\n\n 'raster_params': {\n 'raster_size': [224, 224],\n 'pixel_size': [0.5, 0.5],\n 'ego_center': [0.25, 0.5],\n 'map_type': 'py_semantic',\n 'satellite_map_key': 'aerial_map/aerial_map.png',\n 'semantic_map_key': 'semantic_map/semantic_map.pb',\n 'dataset_meta_key': 'meta.json',\n 'filter_agents_threshold': 0.5\n },\n\n 'train_data_loader': {\n 'key': 'scenes/train.zarr',\n 'batch_size': 16,\n 'shuffle': True,\n 'num_workers': 0\n },\n 'test_data_loader': {\n 'key': 'scenes/test.zarr',\n 'batch_size': 16,\n 'shuffle': False,\n 'num_workers': 0,\n },\n 'train_params': {\n 'steps': 120,\n 'update_steps': 50,\n 'checkpoint_steps': 100,\n 'precision': True\n }\n}\n\nclass LyftCNNModel(nn.Module):\n\n def __init__(self, cfg: Dict, num_modes=3):\n super().__init__()\n\n architecture = cfg[\"model_params\"][\"model_architecture\"]\n backbone = eval(architecture)(pretrained=True, progress=True)\n self.backbone = backbone\n\n num_history_channels = (cfg[\"model_params\"][\"history_num_frames\"] + 1) * 2\n num_in_channels = 3 + num_history_channels\n\n self.backbone.conv1 = nn.Conv2d(\n num_in_channels,\n self.backbone.conv1.out_channels,\n kernel_size=self.backbone.conv1.kernel_size,\n stride=self.backbone.conv1.stride,\n padding=self.backbone.conv1.padding,\n bias=False,\n )\n\n if architecture == \"resnet50\":\n backbone_out_features = 2048\n else:\n backbone_out_features = 512\n\n # X, Y coords for the future positions (output shape: batch_sizex50x2)\n self.future_len = cfg[\"model_params\"][\"future_num_frames\"]\n num_targets = 2 * self.future_len\n\n # You can add more layers here.\n self.head = nn.Sequential(\n # nn.Dropout(0.2),\n nn.Linear(in_features=backbone_out_features, out_features=4096),\n )\n\n self.num_preds = num_targets * num_modes\n self.num_modes = num_modes\n\n self.logit = nn.Linear(4096, out_features=self.num_preds + num_modes)\n\n def forward(self, x):\n x = self.backbone.conv1(x)\n x = self.backbone.bn1(x)\n x = self.backbone.relu(x)\n x = self.backbone.maxpool(x)\n\n x = self.backbone.layer1(x)\n x = self.backbone.layer2(x)\n x = self.backbone.layer3(x)\n x = self.backbone.layer4(x)\n\n x = self.backbone.avgpool(x)\n x = torch.flatten(x, 1)\n\n x = self.head(x)\n x = self.logit(x)\n\n # pred (batch_size)x(modes)x(time)x(2D coords)\n # confidences (batch_size)x(modes)\n bs, _ = x.shape\n pred, confidences = torch.split(x, self.num_preds, dim=1)\n pred = pred.view(bs, self.num_modes, self.future_len, 2)\n assert confidences.shape == (bs, self.num_modes)\n confidences = torch.softmax(confidences, dim=1)\n return pred, confidences\n\n"
] | [
[
"torch.softmax",
"torch.nn.Conv2d",
"torch.flatten",
"torch.nn.Linear",
"torch.split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zhouhan921001/DeepLearning-homework | [
"20562dc49ca5898b531a678c0e54c8d985fcc72f"
] | [
"DLCoursera_part1_week4_1.py"
] | [
"import numpy as np\nfrom dnn_utils import sigmoid,sigmoid_backward,relu,relu_backward\n\ndef initialize_two_layer(n_x,n_h,n_y):\n\n\tW1 = np.random.randn(n_h,n_x) * 0.01\n\tb1 = np.zeros(n_h,1)\n\tW2 = np.random.randn(n_y,n_h) * 0.01\n\tb2 = np.zeros(n_y,1)\n\n\tparam = {\"W1\":W1,\"b1\":b1,\"W2\":W2,\"b2\":b2}\n\n\treturn param\n\ndef initialize_l_layer(layer_dims):\n\t\n\tparam = {}\n\tL = len(layer_dims)\n\n\tfor l in range(1, L):\n\t\tparam['W' + str(l)] = np.random.randn(layer_dims[l],layer_dims[l-1]) * 0.01\n\t\tparam['b' + str(l)] = np.zeros(layer_dims[l],1)\n\n\treturn param\n\ndef linear_forward(W,A,b):\n\t\"\"\"\n\tImplement the linear part of neural unit\n\t\"\"\"\n\n\tZ = np.dot(W,A) + b\n\n\treturn Z\n\ndef linear_activation_forward(A_pre,W,b,activation):\n\t\"\"\"\n\tImplement neural unit with the activation of Relu or sigmoid\n\t\"\"\"\n\n\tif activation == \"Relu\":\n\n\t\tZ = linear_forward(W,A_pre,b)\n\t\tA,activation_cache = relu(Z)\n\n\telif activation == \"sigmoid\":\n\n\t\tZ = linear_forward(W,A_pre,b)\n\t\tA,activation_cache = sigmoid(Z)\n\n\t\tbackward_used_cache = (A_pre,W,b)\n\t\tcache = (backward_used_cache,activation_cache)\n\treturn A,cache\n\ndef L_model_forward(X,param):\n\t\"\"\"\n\tImplement forward propagation for L layers model\n\t\"\"\"\n\n\tcaches = []\n\tL = len(param) // 2\n\tA = X\n\n\tfor l in range(1,L):\n\n\t\tA,cache = linear_activation_forward(A,param['W'+str(l)],param['b'+str(l)],Relu)\n\t\tcaches.append(cache)\n\n\tAl,cache = linear_activation_forward(A,param['W'+str(l)],param['b'+str(l)],Relu)\n\tcaches.append(cache)\n\n\treturn Al,caches\n\ndef linear_backward(dz,cache):\n\t\"\"\"\n\tImplement the backward propagation of linear part\n\t\"\"\"\n\n\tm = dz.shape[1]\n\tdw = np.dot(dz,cache[0]) / m\n\tdb = np.sum(dz) / m\n\tdA_pre = np.dot(cache[1],dz)\n\n\treturn dw,db,dA_pre\n\ndef linear_activation_backward(dA,cache,activation):\n\t\"\"\"\n\tImplement the backward propagation of neural unit\n\t\"\"\"\n\n\tif activation == \"Relu\":\n\t\tdz = relu_backward(dA,cache[1])\n\n\telif activation == \"sigmoid\":\n\t\tdz = sigmoid_backward(dA,cache[1])\n\n\tdw,db,dA_pre = linear_backward(dz,cache[0])\n\n\treturn dw,db,dA_pre\n\ndef L_model_backward(AL,Y,caches):\n\t\"\"\"\n\tImplement the backward propagation for L layer model\n\t\"\"\"\n\tgrads = {}\n\tL = len(caches)\n\n\tdAl = - (np.divide(Y,AL) - np.divide(1-Y,1-AL))\n\tgrads['dw'+str(L)],grads['db'+str(L)],grads['dA'+str(L)] = linear_activation_backward(dAL,caches[-1],\"sigmoid\")\n\n\tfor l in reversed(range(L-1)):\n\t\tcache = caches[l]\n\t\tgrads['dw'+str(l+1)],grads['db'+str(l+1)],grads['dA'+str(l+1)] = linear_activation_backward(grads['dA'+str(l+2)],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcache,\"Relu\")\n\treturn grads\n\ndef update_param(param,grads,learning_rate):\n\t\"\"\"\n\tUpdate the parameters\n\t\"\"\"\n\n\tL = len(param) // 2\n\tfor l in range(L):\n\t\tparam['W'+str(l+1)] = param['W'+str(l+1)] - learning_rate * grads['W'+str(l+1)]\n\t\tparam['b'+str(l+1)] = param['b'+str(l+1)] - learning_rate * grads['b'+str(l+1)]\n\n\treturn param\n"
] | [
[
"numpy.dot",
"numpy.random.randn",
"numpy.zeros",
"numpy.sum",
"numpy.divide"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rmodi6/word-representations | [
"4f9a13cee9ff60ce3c667c833330b59de774ed39"
] | [
"word2vec_basic.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport math\nimport os, sys\nimport random\nimport zipfile\n\nimport numpy as np\nfrom six.moves import urllib\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\nimport loss_func as tf_func\n\nimport pickle\nfrom collections import namedtuple\n\n\n\n\nWord2Vec = namedtuple('Word2Vec', ['train_inputs', 'train_labels', 'loss', 'optimizer', 'global_step',\n 'embeddings', 'normalized_embeddings', 'valid_embeddings','similarity', \n 'saver','summary', 'summary_writer'])\n\ndef maybe_create_path(path):\n if not os.path.exists(path):\n os.mkdir(path)\n print (\"Created a path: %s\"%(path))\n\n\ndef maybe_download(filename, expected_bytes):\n #Download a file if not present, and make sure it's the right size.\n if not os.path.exists(filename):\n print('Downloading %s'%(url+filename))\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename\n\n\n\n# Read the data into a list of strings.\ndef read_data(filename):\n #Extract the first file enclosed in a zip file as a list of words\n with zipfile.ZipFile(filename) as f:\n data = tf.compat.as_str(f.read(f.namelist()[0])).split()\n return data\n\n\n\ndef build_dataset(words):\n count = [['UNK', -1]]\n count.extend(collections.Counter(words).most_common(vocabulary_size - 1))\n dictionary = dict()\n for word, _ in count:\n dictionary[word] = len(dictionary)\n data = list()\n unk_count = 0\n for word in words:\n if word in dictionary:\n index = dictionary[word]\n else:\n index = 0 # dictionary['UNK']\n unk_count += 1\n data.append(index)\n count[0][1] = unk_count\n reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n return data, count, dictionary, reverse_dictionary\n\n\ndef generate_batch(data, batch_size, num_skips, skip_window):\n \"\"\"\n Write the code generate a training batch\n\n @data_index: the index of a word. You can access a word using data[data_index]\n @batch_size: the number of instances in one batch\n @num_skips: the number of samples you want to draw in a window \n (In the below example, it was 2)\n @skip_windows: decides how many words to consider left and right from a context word. \n (So, skip_windows*2+1 = window_size)\n \n batch will contain word ids for context words. Dimension is [batch_size].\n labels will contain word ids for predicting(target) words. Dimension is [batch_size, 1].\n\n\n \"\"\"\n\n global data_index\n assert batch_size % num_skips == 0\n assert num_skips <= 2 * skip_window\n batch = np.ndarray(shape=(batch_size), dtype=np.int32)\n labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)\n\n \"\"\"\n =================================================================================\n\n You will generate small subset of training data, which is called batch.\n For skip-gram model, you will slide a window\n and sample training instances from the data insdie the window.\n\n Here is a small example.\n Suppose that we have a text: \"The quick brown fox jumps over the lazy dog.\"\n And batch_size = 8, window_size = 3\n\n \"[The quick brown] fox jumps over the lazy dog\"\n\n Context word would be 'quick' and predicting words are 'The' and 'brown'.\n This will generate training examples:\n context(x), predicted_word(y)\n (quick , The)\n (quick , brown)\n\n And then move the sliding window.\n \"The [quick brown fox] jumps over the lazy dog\"\n In the same way, we have to two more examples:\n (brown, quick)\n (brown, fox)\n\n move thd window again,\n \"The quick [brown fox jumps] over the lazy dog\"\n and we have\n (fox, brown)\n (fox, jumps)\n\n Finally we get two instance from the moved window,\n \"The quick brown [fox jumps over] the lazy dog\"\n (jumps, fox)\n (jumps, over)\n\n Since now we have 8 training instances, which is the batch size,\n stop generating batch and return batch data.\n\n\n ===============================================================================\n \"\"\"\n # Initialize batch_count to 0\n batch_count = 0\n while batch_count < batch_size: # Continue while we haven't generated required number of batches\n # Re-initialize data_index so that there are skip_window words on either side of data_index\n if (data_index - skip_window) < 0 or (data_index + skip_window) >= len(data):\n data_index = skip_window\n left_context_word = data_index - 1 # Index for outer words on left side of data_index\n right_context_word = data_index + 1 # Index for outer words on right side of data_index\n for x in range(skip_window): # Loop skip_window times\n batch[batch_count] = data[data_index] # Add data_index word to batch as center word\n labels[batch_count, 0] = data[left_context_word] # Add left index word to labels as target word\n batch[batch_count+1] = data[data_index] # Add data_index word to batch as center word\n labels[batch_count+1, 0] = data[right_context_word] # Add right index word to labels as target word\n batch_count += 2 # Increment batch_count by 2 as we added 2 words: one from left and one from right\n left_context_word -= 1 # Move left index towards left\n right_context_word += 1 # Move right index towards right\n data_index += 1 # Increment data_index making next word as center word\n return batch, labels # Return the generated batches and labels\n\n\ndef build_model(sess, graph, loss_model):\n \"\"\"\n Builds a tensor graph model\n \"\"\"\n model = None\n with graph.as_default():\n # Ops and variables pinned to the CPU because of missing GPU implementation\n with tf.device('/cpu:0'):\n # Input data.\n train_inputs = tf.placeholder(tf.int32, shape=[batch_size])\n train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])\n valid_dataset = tf.constant(valid_examples, dtype=tf.int32)\n\n global_step = tf.Variable(0, trainable=False)\n\n # Look up embeddings for inputs.\n embeddings = tf.Variable(\n tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))\n embed = tf.nn.embedding_lookup(embeddings, train_inputs)\n\n sm_weights = tf.Variable(\n tf.truncated_normal([vocabulary_size, embedding_size],\n stddev=1.0 / math.sqrt(embedding_size)))\n\n # Get context embeddings from lables\n true_w = tf.nn.embedding_lookup(sm_weights, train_labels)\n true_w = tf.reshape(true_w, [-1, embedding_size])\n\n\n # Construct the variables for the NCE loss \n nce_weights = tf.Variable(\n tf.truncated_normal([vocabulary_size, embedding_size],\n stddev=1.0 / math.sqrt(embedding_size)))\n nce_biases = tf.Variable(tf.zeros([vocabulary_size]))\n\n if loss_model == 'cross_entropy':\n loss = tf.reduce_mean(tf_func.cross_entropy_loss(embed, true_w))\n else:\n #sample negative examples with unigram probability\n sample = np.random.choice(vocabulary_size, num_sampled, p=unigram_prob, replace=False)\n\n loss = tf.reduce_mean(tf_func.nce_loss(embed, nce_weights, nce_biases, train_labels, sample, unigram_prob))\n\n # tf.summary.scalar('loss', loss)\n\n # Construct the SGD optimizer using a learning rate of 1.0.\n optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss, global_step=global_step)\n\n # Compute the cosine similarity between minibatch examples and all embeddings.\n norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))\n normalized_embeddings = embeddings / norm\n\n valid_embeddings = tf.nn.embedding_lookup(\n normalized_embeddings, valid_dataset)\n similarity = tf.matmul(\n valid_embeddings, normalized_embeddings, transpose_b=True)\n \n saver = tf.train.Saver(tf.global_variables())\n\n # Save summary\n # summary = tf.summary.merge_all()\n # summary_writer = tf.summary.FileWriter(summary_path + '/summary', sess.graph)\n summary = None\n summary_writer = None\n\n tf.global_variables_initializer().run()\n print(\"Initialized\")\n\n model = Word2Vec(train_inputs, train_labels, loss, optimizer, global_step, embeddings, \n normalized_embeddings, valid_embeddings, similarity, saver, summary, summary_writer)\n\n return model\n\n\ndef load_pretrained_model(sess, model, pretrained_model_path):\n if not os.path.exists(filename):\n print(\"Missing pre-trained model: [%s]\"%(pretrained_model_path)) \n return\n\n ckpt = tf.train.get_checkpoint_state(pretrained_model_path)\n if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):\n print(\"Reading model parameters from %s\" % ckpt.model_checkpoint_path)\n model.saver.restore(sess, ckpt.model_checkpoint_path)\n\n\ndef train(sess, model, data, dictionary, batch_size, num_skips, skip_window, \n max_num_steps, checkpoint_step, loss_model):\n \n average_loss_step = max(checkpoint_step/10, 100)\n\n average_loss = 0\n for step in xrange(max_num_steps):\n batch_inputs, batch_labels = generate_batch(data, batch_size, num_skips, skip_window)\n feed_dict = {model.train_inputs.name: batch_inputs, model.train_labels.name: batch_labels}\n\n # We perform one update step by evaluating the optimizer op (including it\n # in the list of returned values for session.run()\n # _, loss_val, summary = sess.run([model.optimizer, model.loss, model.summary], feed_dict=feed_dict)\n _, loss_val = sess.run([model.optimizer, model.loss], feed_dict=feed_dict)\n average_loss += loss_val\n\n if step % average_loss_step == 0:\n if step > 0:\n average_loss /= average_loss_step\n # The average loss is an estimate of the loss over the last 2000 batches.\n print(\"Average loss at step \", step, \": \", average_loss)\n average_loss = 0\n # model.summary_writer.add_summary(summary, model.global_step.eval())\n # model.summary_writer.flush()\n\n # Note that this is expensive (~20% slowdown if computed every 500 steps)\n if step % checkpoint_step == 0:\n sim = model.similarity.eval()\n for i in xrange(valid_size):\n valid_word = reverse_dictionary[valid_examples[i]]\n top_k = 8 # number of nearest neighbors\n nearest = (-sim[i, :]).argsort()[1:top_k + 1]\n log_str = \"Nearest to %s:\" % valid_word\n for k in xrange(top_k):\n close_word = reverse_dictionary[nearest[k]]\n log_str = \"%s %s,\" % (log_str, close_word)\n print(log_str)\n # chkpt_path = os.path.join(checkpoint_model_path, 'w2v_%s.cpkt'%(loss_model))\n # model.saver.save(sess, chkpt_path, global_step=model.global_step.eval())\n\n\n # model.summary_writer.close()\n\n # Saving the final embedding to a file \n final_embeddings = model.normalized_embeddings.eval()\n\n return final_embeddings\n\n\n\n\nif __name__ == '__main__':\n\n loss_model = 'cross_entropy'\n if len(sys.argv) > 1:\n if sys.argv[1] == 'nce':\n loss_model = 'nce'\n\n\n ####################################################################################\n # Step 1: Download the data.\n url = 'http://mattmahoney.net/dc/'\n filename = maybe_download('text8.zip', 31344016)\n\n\n words = read_data(filename)\n print('Data size', len(words))\n\n\n ####################################################################################\n # Step 2: Build the dictionary and replace rare words with UNK token.\n vocabulary_size = 100000 \n\n data, count, dictionary, reverse_dictionary = build_dataset(words)\n del words # Hint to reduce memory.\n print('Most common words (+UNK)', count[:5])\n print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])\n\n #Calculate the probability of unigrams\n unigram_cnt = [c for w, c in count]\n total = sum(unigram_cnt)\n unigram_prob = [c*1.0/total for c in unigram_cnt]\n\n data_index = 0\n\n\n ####################################################################################\n # Step 3: Test the function that generates a training batch for the skip-gram model.\n # TODO You must implement this method \"generate_batch\"\n # Uncomment below to check batch output\n\n # batch, labels = generate_batch(data, batch_size=8, num_skips=2, skip_window=1)\n # for i in range(8):\n # print(batch[i], reverse_dictionary[batch[i]],\n # '->', labels[i, 0], reverse_dictionary[labels[i, 0]])\n\n\n ####################################################################################\n # Hyper Parameters to config\n batch_size = 128\n embedding_size = 128 # Dimension of the embedding vector.\n skip_window = 4 # How many words to consider left and right.\n num_skips = 8 # How many times to reuse an input to generate a label.\n\n\n # We pick a random validation set to sample nearest neighbors. Here we limit the\n # validation samples to the words that have a low numeric ID, which by\n # construction are also the most frequent.\n valid_size = 16 # Random set of words to evaluate similarity on.\n valid_window = 100 # Only pick dev samples in the head of the distribution.\n valid_examples = np.random.choice(valid_window, valid_size, replace=False)\n num_sampled = 64 # Number of negative examples to sample.\n\n # summary_path = './summary_%s'%(loss_model)\n pretrained_model_path = './pretrained/'\n\n checkpoint_model_path = './checkpoints_%s/'%(loss_model)\n model_path = './models'\n\n \n # maximum training step\n max_num_steps = 200001\n checkpoint_step = 50000\n \n\n graph = tf.Graph()\n with tf.Session(graph=graph) as sess:\n\n ####################################################################################\n # Step 4: Build and train a skip-gram model.\n model = build_model(sess, graph, loss_model)\n\n # You must start with the pretrained model. \n # If you want to resume from your checkpoints, change this path name\n\n load_pretrained_model(sess, model, pretrained_model_path)\n\n\n ####################################################################################\n # Step 6: Begin training.\n maybe_create_path(checkpoint_model_path)\n embeddings = train(sess, model, data, dictionary, batch_size, num_skips, skip_window, \n max_num_steps, checkpoint_step, loss_model)\n\n\n ####################################################################################\n # Step 7: Save the trained model.\n trained_steps = model.global_step.eval()\n\n maybe_create_path(model_path)\n model_filepath = os.path.join(model_path, 'word2vec_%s.model'%(loss_model))\n print(\"Saving word2vec model as [%s]\"%(model_filepath))\n pickle.dump([dictionary, trained_steps, embeddings], open(model_filepath, 'w'))\n\n"
] | [
[
"tensorflow.device",
"tensorflow.zeros",
"tensorflow.global_variables",
"numpy.ndarray",
"tensorflow.Graph",
"tensorflow.Variable",
"tensorflow.Session",
"tensorflow.square",
"tensorflow.matmul",
"numpy.random.choice",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.nn.embedding_lookup",
"tensorflow.train.checkpoint_exists",
"tensorflow.train.get_checkpoint_state",
"tensorflow.constant",
"tensorflow.reshape",
"tensorflow.random_uniform"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
daniel20162016/my-first | [
"f9554dd476302b26e8a296393025f150922f349c",
"f9554dd476302b26e8a296393025f150922f349c",
"f9554dd476302b26e8a296393025f150922f349c"
] | [
"read_xml_all/calcul_matrix_compare_ce_good_192matrix.py",
"read_xml_all/calcul_matrix_compare_ou_good_192matrix.py",
"read_xml_all/calcul_matrix_compare_il_good_192matrix.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 31 15:45:22 2016\n\n@author: wang\n\"\"\"\n#from matplotlib import pylab as plt\n#from numpy import fft, fromstring, int16, linspace\n#import wave\n\nfrom read_wav_xml_good_1 import*\nfrom matrix_24_2 import*\nfrom max_matrix_norm import*\n\nimport numpy as np\n# open a wave file\nfilename = 'francois_filon_pure_3.wav'\nfilename_1 ='francois_filon_pure_3.xml'\nword ='ce'\n\nwave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word)\n#print 'word_start_point=',word_start_point\n#print 'word_length_point=',word_length_point\n#print 'word_end_point=',word_end_point\n\nXJ_1 =wave_signal_float\n\nt_step=1920;\nt_entre_step=1440;\n\nt_du_1_1 = int(word_start_point[0]);\nt_du_1_2 = int(word_end_point[0]);\n\nt_du_2_1 = int(word_start_point[1]);\nt_du_2_2 = int(word_end_point[1]);\n\nt_du_3_1 = int(word_start_point[2]);\nt_du_3_2 = int(word_end_point[2]);\n\nt_du_4_1 = int(word_start_point[3]);\nt_du_4_2 = int(word_end_point[3]);\n\nt_du_5_1 = int(word_start_point[4]);\nt_du_5_2 = int(word_end_point[4]);\nfs=framerate\n#XJ_du_1 = wave_signal_float[(t_du_1_1-1):t_du_1_2];\n#length_XJ_du_1 = int(word_length_point[0]+1);\n#x1,y1,z1=matrix_24_2(XJ_du_1,fs)\n#x1=max_matrix_norm(x1)\n\n\n#==============================================================================\n# this part is to calcul the first matrix \n#==============================================================================\nXJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];\nx1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)\nx1_1=max_matrix_norm(x1_1)\nmatrix_all_step_new_1 = np.zeros([192])\n\nfor i in range(0,24):\n matrix_all_step_new_1[i]=x1_1[i]\n#==============================================================================\n# the other colonne is the all fft\n#==============================================================================\nfor i in range(1,8):\n XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];\n x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)\n x1_all=max_matrix_norm(x1_all)\n for j in range(0,24):\n matrix_all_step_new_1[24*i+j]=x1_all[j]\n\n#==============================================================================\n# this part is to calcul the second matrix\n#==============================================================================\nfor k in range (1,2):\n t_start=t_du_2_1\n XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];\n x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)\n x1_1=max_matrix_norm(x1_1)\n matrix_all_step_new_2 = np.zeros([192])\n for i in range(0,24):\n matrix_all_step_new_2[i]=x1_1[i]\n#==============================================================================\n# the other colonne is the all fft\n#==============================================================================\n for i in range(1,8):\n XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];\n x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)\n x1_all=max_matrix_norm(x1_all)\n for j in range(0,24):\n matrix_all_step_new_2[24*i+j]=x1_all[j]\n \n#==============================================================================\n# this part is to calcul the 3 matrix\n#==============================================================================\nfor k in range (1,2):\n t_start=t_du_3_1\n XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];\n x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)\n x1_1=max_matrix_norm(x1_1)\n matrix_all_step_new_3 = np.zeros([192])\n for i in range(0,24):\n matrix_all_step_new_3[i]=x1_1[i]\n#==============================================================================\n# the other colonne is the all fft\n#==============================================================================\n for i in range(1,8):\n XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];\n x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)\n x1_all=max_matrix_norm(x1_all)\n for j in range(0,24): \n matrix_all_step_new_3[24*i+j]=x1_all[j]\n\n#==============================================================================\n# this part is to calcul the 4 matrix\n#==============================================================================\nfor k in range (1,2):\n t_start=t_du_4_1\n XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];\n x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)\n x1_1=max_matrix_norm(x1_1)\n matrix_all_step_new_4 = np.zeros([192])\n for i in range(0,24):\n matrix_all_step_new_4[i]=x1_1[i]\n#==============================================================================\n# the other colonne is the all fft\n#==============================================================================\n for i in range(1,8):\n# print i\n XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];\n x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)\n x1_all=max_matrix_norm(x1_all)\n for j in range(0,24):\n matrix_all_step_new_4[24*i+j]=x1_all[j]\n#print 'matrix_all_step_4=',matrix_all_step_4\n\n#==============================================================================\n# this part is to calcul the 5 matrix\n#==============================================================================\nfor k in range (1,2):\n t_start=t_du_5_1\n XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];\n x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)\n x1_1=max_matrix_norm(x1_1)\n matrix_all_step_new_5 = np.zeros([192])\n for i in range(0,24):\n matrix_all_step_new_5[i]=x1_1[i]\n#==============================================================================\n# the other colonne is the all fft\n#==============================================================================\n for i in range(1,8):\n# print i\n XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];\n x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)\n x1_all=max_matrix_norm(x1_all)\n for j in range(0,24):\n matrix_all_step_new_5[24*i+j]=x1_all[j] \n#print 'matrix_all_step_5=',matrix_all_step_5\n\nnp.savez('ce_compare_192_matrix.npz',matrix_all_step_new_1,matrix_all_step_new_2,matrix_all_step_new_3,matrix_all_step_new_4,matrix_all_step_new_5)\n\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 31 15:45:22 2016\n\n@author: wang\n\"\"\"\n#from matplotlib import pylab as plt\n#from numpy import fft, fromstring, int16, linspace\n#import wave\n\nfrom read_wav_xml_good_1 import*\nfrom matrix_24_2 import*\nfrom max_matrix_norm import*\n\nimport numpy as np\n# open a wave file\nfilename = 'francois_filon_pure_3.wav'\nfilename_1 ='francois_filon_pure_3.xml'\nword ='ou'\n\nwave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word)\n#print 'word_start_point=',word_start_point\n#print 'word_length_point=',word_length_point\n#print 'word_end_point=',word_end_point\n\nXJ_1 =wave_signal_float\n\nt_step=1920;\nt_entre_step=1440;\n\nt_du_1_1 = int(word_start_point[0]);\nt_du_1_2 = int(word_end_point[0]);\n\nt_du_2_1 = int(word_start_point[1]);\nt_du_2_2 = int(word_end_point[1]);\n\nt_du_3_1 = int(word_start_point[2]);\nt_du_3_2 = int(word_end_point[2]);\n\nt_du_4_1 = int(word_start_point[3]);\nt_du_4_2 = int(word_end_point[3]);\n\nt_du_5_1 = int(word_start_point[4]);\nt_du_5_2 = int(word_end_point[4]);\nfs=framerate\n#XJ_du_1 = wave_signal_float[(t_du_1_1-1):t_du_1_2];\n#length_XJ_du_1 = int(word_length_point[0]+1);\n#x1,y1,z1=matrix_24_2(XJ_du_1,fs)\n#x1=max_matrix_norm(x1)\n\n\n#==============================================================================\n# this part is to calcul the first matrix \n#==============================================================================\nXJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];\nx1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)\nx1_1=max_matrix_norm(x1_1)\nmatrix_all_step_new_1 = np.zeros([192])\n\nfor i in range(0,24):\n matrix_all_step_new_1[i]=x1_1[i]\n#==============================================================================\n# the other colonne is the all fft\n#==============================================================================\nfor i in range(1,8):\n XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];\n x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)\n x1_all=max_matrix_norm(x1_all)\n for j in range(0,24):\n matrix_all_step_new_1[24*i+j]=x1_all[j]\n\n#==============================================================================\n# this part is to calcul the second matrix\n#==============================================================================\nfor k in range (1,2):\n t_start=t_du_2_1\n XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];\n x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)\n x1_1=max_matrix_norm(x1_1)\n matrix_all_step_new_2 = np.zeros([192])\n for i in range(0,24):\n matrix_all_step_new_2[i]=x1_1[i]\n#==============================================================================\n# the other colonne is the all fft\n#==============================================================================\n for i in range(1,8):\n XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];\n x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)\n x1_all=max_matrix_norm(x1_all)\n for j in range(0,24):\n matrix_all_step_new_2[24*i+j]=x1_all[j]\n \n#==============================================================================\n# this part is to calcul the 3 matrix\n#==============================================================================\nfor k in range (1,2):\n t_start=t_du_3_1\n XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];\n x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)\n x1_1=max_matrix_norm(x1_1)\n matrix_all_step_new_3 = np.zeros([192])\n for i in range(0,24):\n matrix_all_step_new_3[i]=x1_1[i]\n#==============================================================================\n# the other colonne is the all fft\n#==============================================================================\n for i in range(1,8):\n XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];\n x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)\n x1_all=max_matrix_norm(x1_all)\n for j in range(0,24): \n matrix_all_step_new_3[24*i+j]=x1_all[j]\n\n#==============================================================================\n# this part is to calcul the 4 matrix\n#==============================================================================\nfor k in range (1,2):\n t_start=t_du_4_1\n XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];\n x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)\n x1_1=max_matrix_norm(x1_1)\n matrix_all_step_new_4 = np.zeros([192])\n for i in range(0,24):\n matrix_all_step_new_4[i]=x1_1[i]\n#==============================================================================\n# the other colonne is the all fft\n#==============================================================================\n for i in range(1,8):\n# print i\n XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];\n x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)\n x1_all=max_matrix_norm(x1_all)\n for j in range(0,24):\n matrix_all_step_new_4[24*i+j]=x1_all[j]\n#print 'matrix_all_step_4=',matrix_all_step_4\n\n#==============================================================================\n# this part is to calcul the 5 matrix\n#==============================================================================\nfor k in range (1,2):\n t_start=t_du_5_1\n XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];\n x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)\n x1_1=max_matrix_norm(x1_1)\n matrix_all_step_new_5 = np.zeros([192])\n for i in range(0,24):\n matrix_all_step_new_5[i]=x1_1[i]\n#==============================================================================\n# the other colonne is the all fft\n#==============================================================================\n for i in range(1,8):\n# print i\n XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];\n x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)\n x1_all=max_matrix_norm(x1_all)\n for j in range(0,24):\n matrix_all_step_new_5[24*i+j]=x1_all[j] \n#print 'matrix_all_step_5=',matrix_all_step_5\n\nnp.savez('ou_compare_192_matrix.npz',matrix_all_step_new_1,matrix_all_step_new_2,matrix_all_step_new_3,matrix_all_step_new_4,matrix_all_step_new_5)\n\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 31 15:45:22 2016\n\n@author: wang\n\"\"\"\n#from matplotlib import pylab as plt\n#from numpy import fft, fromstring, int16, linspace\n#import wave\n\nfrom read_wav_xml_good_1 import*\nfrom matrix_24_2 import*\nfrom max_matrix_norm import*\n\nimport numpy as np\n# open a wave file\nfilename = 'francois_filon_pure_3.wav'\nfilename_1 ='francois_filon_pure_3.xml'\nword ='il'\n\nwave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word)\n#print 'word_start_point=',word_start_point\n#print 'word_length_point=',word_length_point\n#print 'word_end_point=',word_end_point\n\nXJ_1 =wave_signal_float\n\nt_step=1920;\nt_entre_step=1440;\n\nt_du_1_1 = int(word_start_point[0]);\nt_du_1_2 = int(word_end_point[0]);\n\nt_du_2_1 = int(word_start_point[1]);\nt_du_2_2 = int(word_end_point[1]);\n\nt_du_3_1 = int(word_start_point[2]);\nt_du_3_2 = int(word_end_point[2]);\n\nt_du_4_1 = int(word_start_point[3]);\nt_du_4_2 = int(word_end_point[3]);\n\nt_du_5_1 = int(word_start_point[4]);\nt_du_5_2 = int(word_end_point[4]);\nfs=framerate\n#XJ_du_1 = wave_signal_float[(t_du_1_1-1):t_du_1_2];\n#length_XJ_du_1 = int(word_length_point[0]+1);\n#x1,y1,z1=matrix_24_2(XJ_du_1,fs)\n#x1=max_matrix_norm(x1)\n\n\n#==============================================================================\n# this part is to calcul the first matrix \n#==============================================================================\nXJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];\nx1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)\nx1_1=max_matrix_norm(x1_1)\nmatrix_all_step_new_1 = np.zeros([192])\n\nfor i in range(0,24):\n matrix_all_step_new_1[i]=x1_1[i]\n#==============================================================================\n# the other colonne is the all fft\n#==============================================================================\nfor i in range(1,8):\n XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];\n x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)\n x1_all=max_matrix_norm(x1_all)\n for j in range(0,24):\n matrix_all_step_new_1[24*i+j]=x1_all[j]\n\n#==============================================================================\n# this part is to calcul the second matrix\n#==============================================================================\nfor k in range (1,2):\n t_start=t_du_2_1\n XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];\n x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)\n x1_1=max_matrix_norm(x1_1)\n matrix_all_step_new_2 = np.zeros([192])\n for i in range(0,24):\n matrix_all_step_new_2[i]=x1_1[i]\n#==============================================================================\n# the other colonne is the all fft\n#==============================================================================\n for i in range(1,8):\n XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];\n x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)\n x1_all=max_matrix_norm(x1_all)\n for j in range(0,24):\n matrix_all_step_new_2[24*i+j]=x1_all[j]\n \n#==============================================================================\n# this part is to calcul the 3 matrix\n#==============================================================================\nfor k in range (1,2):\n t_start=t_du_3_1\n XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];\n x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)\n x1_1=max_matrix_norm(x1_1)\n matrix_all_step_new_3 = np.zeros([192])\n for i in range(0,24):\n matrix_all_step_new_3[i]=x1_1[i]\n#==============================================================================\n# the other colonne is the all fft\n#==============================================================================\n for i in range(1,8):\n XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];\n x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)\n x1_all=max_matrix_norm(x1_all)\n for j in range(0,24): \n matrix_all_step_new_3[24*i+j]=x1_all[j]\n\n#==============================================================================\n# this part is to calcul the 4 matrix\n#==============================================================================\nfor k in range (1,2):\n t_start=t_du_4_1\n XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];\n x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)\n x1_1=max_matrix_norm(x1_1)\n matrix_all_step_new_4 = np.zeros([192])\n for i in range(0,24):\n matrix_all_step_new_4[i]=x1_1[i]\n#==============================================================================\n# the other colonne is the all fft\n#==============================================================================\n for i in range(1,8):\n# print i\n XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];\n x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)\n x1_all=max_matrix_norm(x1_all)\n for j in range(0,24):\n matrix_all_step_new_4[24*i+j]=x1_all[j]\n#print 'matrix_all_step_4=',matrix_all_step_4\n\n#==============================================================================\n# this part is to calcul the 5 matrix\n#==============================================================================\nfor k in range (1,2):\n t_start=t_du_5_1\n XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];\n x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)\n x1_1=max_matrix_norm(x1_1)\n matrix_all_step_new_5 = np.zeros([192])\n for i in range(0,24):\n matrix_all_step_new_5[i]=x1_1[i]\n#==============================================================================\n# the other colonne is the all fft\n#==============================================================================\n for i in range(1,8):\n# print i\n XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];\n x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)\n x1_all=max_matrix_norm(x1_all)\n for j in range(0,24):\n matrix_all_step_new_5[24*i+j]=x1_all[j] \n#print 'matrix_all_step_5=',matrix_all_step_5\n\nnp.savez('il_compare_192_matrix.npz',matrix_all_step_new_1,matrix_all_step_new_2,matrix_all_step_new_3,matrix_all_step_new_4,matrix_all_step_new_5)\n\n"
] | [
[
"numpy.savez",
"numpy.zeros"
],
[
"numpy.savez",
"numpy.zeros"
],
[
"numpy.savez",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
daniel-thom/PyDSS | [
"8c7ae2d3a17d596b42a92e33f7d29329e26fbc30"
] | [
"PyDSS/pyPostprocessor/PostprocessScripts/DERMSOptimizer_helper_modules/opt_funcs.py"
] | [
"import numpy as np\nfrom scipy.sparse import lil_matrix\nimport scipy.sparse.linalg as sp\nimport scipy.sparse as sparse\nimport math\nimport csv\nimport matplotlib.pyplot as plt\n\ndef linear_powerflow_model(Y00,Y01,Y10,Y11_inv,I_coeff,V1,slack_no):\n # voltage linearlization\n V1_conj = np.conj(V1[slack_no:])\n V1_conj_inv = 1 / V1_conj\n coeff_V = Y11_inv * V1_conj_inv\n coeff_V_P = coeff_V\n coeff_V_Q = -1j*coeff_V\n coeff_Vm = -np.dot(Y11_inv,np.dot(Y10,V1[:slack_no]))\n\n # voltage magnitude linearization\n m = coeff_Vm\n m_inv = 1 / coeff_Vm\n coeff_Vmag_k = abs(m)\n A = (np.multiply(coeff_V.transpose(),m_inv)).transpose()\n coeff_Vmag_P = (np.multiply(A.real.transpose(),coeff_Vmag_k)).transpose()\n coeff_Vmag_Q = (np.multiply((-1j*A).real.transpose(),coeff_Vmag_k)).transpose()\n\n # current linearization\n if len(I_coeff):\n coeff_I_P = np.dot(I_coeff[:,slack_no:],coeff_V_P)\n coeff_I_Q = np.dot(I_coeff[:,slack_no:],coeff_V_Q)\n coeff_I_const = np.dot(I_coeff[:,slack_no:],coeff_Vm) + np.dot(I_coeff[:,:slack_no],V1[:slack_no])\n else:\n coeff_I_P = []\n coeff_I_Q = []\n coeff_I_const = []\n\n #=========================================Yiyun's Notes===========================================#\n # Output relations: Vmag = coeff_Vmag_P * Pnode + coeff_Vmag_Q * Qnode + coeff_Vm\n # I = coeff_I_P * Pnode + coeff_I_Q * Qnode + coeff_I_const (complex value)\n # ================================================================================================#\n\n return coeff_V_P, coeff_V_Q, coeff_Vm, coeff_Vmag_P, coeff_Vmag_Q, coeff_Vmag_k, coeff_I_P, coeff_I_Q, coeff_I_const\n\ndef validate_linear_model(coeff_Vp,coeff_Vq,coeff_Vm,PQ_node,slack_number):\n V_cal = coeff_Vm + np.dot(coeff_Vp,np.array([np.real(ii)*1000 for ii in PQ_node[slack_number:]])) + np.dot(coeff_Vq,np.array([np.imag(ii)*1000 for ii in PQ_node[slack_number:]]))\n v_cal_1 = coeff_Vm + np.dot(coeff_Vp,np.conj(PQ_node[slack_number:]*1000))\n #coeff_Vp*Pnode + coeff_Vq*Qnode + coeff_Vm\n\n # =========================================Yiyun's Notes===========================================#\n # 1000 should be the S base\n # =================================================================================================#\n\n return [V_cal,v_cal_1]\n\ndef check_VI_correct(V1,PQ_node,slack_number,coeff_V,coeff_Vm,coeff_Vmag_P,coeff_Vmag_Q,coeff_Vmag_k,Y10,Y11,coeff_I_P, coeff_I_Q, coeff_I_const,I_coeff):\n V1_linear = np.dot(coeff_V,np.conj(PQ_node[slack_number:]*1000)) + coeff_Vm\n V1_linear = list(V1_linear)\n Vdiff = list(map(lambda x: abs(x[0]-x[1])/abs(x[0])*100,zip(V1[slack_number:],V1_linear)))\n print(sum(Vdiff))\n with open('voltage_diff.csv','w') as f:\n csvwriter = csv.writer(f)\n csvwriter.writerow(Vdiff)\n f.close()\n\n V1_mag_linear = np.dot(coeff_Vmag_P,(PQ_node[slack_number:]*1000).real) + np.dot(coeff_Vmag_Q,(PQ_node[slack_number:]*1000).imag) + coeff_Vmag_k\n V1_mag_linear = list(V1_mag_linear)\n Vdiff = list(map(lambda x: abs(abs(x[0])-x[1])/abs(x[0])*100,zip(V1[slack_number:],V1_mag_linear)))\n print(sum(Vdiff))\n with open('voltageMag_diff.csv','w') as f:\n csvwriter = csv.writer(f)\n csvwriter.writerow(Vdiff)\n f.close()\n\n # get Ibus \n Ibus = list(map(lambda x: (x[0]*1000/x[1]).conjugate(),zip(list(PQ_node)[slack_number:],V1[slack_number:])))\n Ibus_cal_0 = np.dot(Y10,V1[0:slack_number])\n Ibus_cal_1 = np.dot(Y11,V1[slack_number:])\n Ibus_cal = list(map(lambda x: x[0]+x[1],zip(Ibus_cal_0,Ibus_cal_1)))\n Idiff = list(map(lambda x: abs(x[0]-x[1]),zip(Ibus,Ibus_cal)))\n print(sum(Idiff))\n with open('currentBus_diff.csv','w') as f:\n csvwriter = csv.writer(f)\n csvwriter.writerow(Idiff)\n f.close()\n\n # get Ibranch\n Ibranch = np.dot(I_coeff,V1)\n Ibranch_cal = np.dot(I_coeff[:,slack_number:],V1_linear)+np.dot(I_coeff[:,0:slack_number],V1[:slack_number])\n Ibranch_diff = list(map(lambda x: abs(x[0]-x[1]),zip(Ibranch,Ibranch_cal)))\n print(sum(Ibranch_diff))\n with open('current_diff.csv','w') as f:\n csvwriter = csv.writer(f)\n csvwriter.writerow(Ibranch_diff)\n f.close()\n\ndef costFun(x,dual_upper,dual_lower,v1_pu,Ppv_max,coeff_p,coeff_q,NPV,control_bus_index,Vupper,Vlower,dual_current,ThermalLimit,I1_mag):\n # cost_function = coeff_p*(Pmax-P)^2+coeff_q*Q^2+dual_upper*(v1-1.05)+dual_lower*(0.95-v1)\n f1 = 0\n for ii in range(NPV):\n f1 = f1 + coeff_p*(Ppv_max[ii]-x[ii])*(Ppv_max[ii]-x[ii])+coeff_q*x[ii+NPV]*x[ii+NPV]\n #f = f1 + np.dot(dual_upper,(np.array(v1_pu)[control_bus_index]-Vupper)) + np.dot(dual_lower,(Vlower-np.array(v1_pu)[control_bus_index]))\n v_evaluate = [v1_pu[ii] for ii in control_bus_index]\n f2 = f1 + np.dot(dual_upper,np.array([max(ii-Vupper,0) for ii in v_evaluate])) + np.dot(dual_lower,np.array([max(Vlower-ii,0) for ii in v_evaluate]))\n f3 = np.dot(dual_current,np.array([max(ii,0) for ii in list(map(lambda x: x[0]*x[0]-x[1]*x[1],zip(I1_mag,ThermalLimit)))]))\n f = f2+f3\n\n # =========================================Yiyun's Notes===========================================#\n # f1 is the quadratic PV curtailment plus quadratic reactive power injection\n # f2 is the Lagrangian term for voltage violations and line current violations\n # ===> Note the \"control_bus_index\" might be the index for measurement sensitivity analysis\n # =================================================================================================#\n\n return [f1,f]\n\ndef PV_costFun_gradient(x, coeff_p, coeff_q, Pmax):\n grad = np.zeros(len(x))\n for ii in range(int(len(x)/2)):\n grad[ii] = -2*coeff_p*(Pmax[ii]*1000-x[ii]*1000)\n grad[ii+int(len(x)/2)] = 2*coeff_q*x[ii+int(len(x)/2)]*1000\n #grad[ii + int(len(x) / 2)] = 0\n\n # =========================================Yiyun's Notes===========================================#\n # x is the decision vector [P,Q]\n # =================================================================================================#\n\n return grad\n\ndef voltage_constraint_gradient(AllNodeNames,node_withPV, dual_upper, dual_lower, coeff_Vmag_p, coeff_Vmag_q):\n node_noslackbus = AllNodeNames\n node_noslackbus[0:3] = []\n\n # =========================================Yiyun's Notes===========================================#\n # remove the slack bus\n # =================================================================================================#\n\n grad_upper = np.matrix([0] * len(node_noslackbus)*2).transpose()\n grad_lower = np.matrix([0] * len(node_noslackbus)*2).transpose()\n count = 0\n for node in node_noslackbus:\n if node in node_withPV:\n grad_upper[count] = dual_upper.transpose()*coeff_Vmag_p[:,count]\n grad_upper[count+len(node_noslackbus)] = dual_upper.transpose() * coeff_Vmag_q[:,count]\n grad_lower[count] = -dual_lower.transpose() * coeff_Vmag_p[:, count]\n grad_lower[count + len(node_noslackbus)] = -dual_lower.transpose() * coeff_Vmag_q[:, count]\n count = count + 1\n return [grad_upper,grad_lower]\n\ndef current_constraint_gradient(AllNodeNames,node_withPV, dual_upper,coeff_Imag_p, coeff_Imag_q):\n node_noslackbus = AllNodeNames\n node_noslackbus[0:3] = []\n grad_upper = np.matrix([0] * len(node_noslackbus)*2).transpose()\n count = 0\n for node in node_noslackbus:\n if node in node_withPV:\n grad_upper[count] = dual_upper.transpose()*coeff_Imag_p[:,count]\n grad_upper[count+len(node_noslackbus)] = dual_upper.transpose() * coeff_Imag_q[:,count]\n count = count + 1\n return grad_upper\n\n # =========================================Yiyun's Notes===========================================#\n # PV_costFun_gradient, voltage_constraint_gradient, current_constraint_gradient and project_PV..\n # ... are set up for updating the PV decision variables in eq(10)\n # =================================================================================================#\n\ndef voltage_constraint(V1_mag):\n g = V1_mag-1.05\n g.append(0.95-V1_mag)\n return g\n\ndef current_constraint(I1_mag,Imax):\n g = []\n g.append(I1_mag-Imax)\n\n # =========================================Yiyun's Notes===========================================#\n # assume single directional power flow\n # voltage_constraint, current_constraint, and project_dualvariable are set up for updating the dual...\n # ... variables in eq (11)\n # =================================================================================================#\n\n return g\n\ndef project_dualvariable(mu):\n for ii in range(len(mu)):\n mu[ii] = max(mu[ii],0)\n\n # =========================================Yiyun's Notes===========================================#\n # If the corresponding constraints in primal problem is in canonical form, then dual variable is >=0\n # =================================================================================================#\n\n return mu\n\ndef project_PV(x,Pmax,Sinv):\n Qavailable = 0\n Pavailable = 0\n num = len(Sinv)\n for ii in range(num):\n if x[ii] > Pmax[ii]:\n x[ii] = Pmax[ii]\n elif x[ii] < 0:\n x[ii] = 0\n\n if Sinv[ii] > x[ii]:\n Qmax = math.sqrt(Sinv[ii]*Sinv[ii]-x[ii]*x[ii])\n else:\n Qmax = 0\n if x[ii+num] > Qmax:\n x[ii+num] = Qmax\n # elif x[ii + num] < 0:\n # x[ii + num] = 0\n elif x[ii+num] < -Qmax:\n x[ii+num] = -Qmax\n\n Pavailable = Pavailable + Pmax[ii]\n Qavailable = Qavailable + Qmax\n return [x,Pavailable,Qavailable]\n\ndef dual_update(mu,coeff_mu,constraint):\n mu_new = mu + coeff_mu*constraint\n mu_new = project_dualvariable(mu_new)\n\n # =========================================Yiyun's Notes===========================================#\n # normal way for update Lagrangian variable is by the sub-gradient of cost function\n # Here is the equation (11) in the draft paper\n # =================================================================================================#\n\n return mu_new\n\ndef matrix_cal_for_subPower(V0, Y00, Y01, Y11, V1_noload):\n diag_V0 = np.matrix([[complex(0, 0)] * 3] * 3)\n diag_V0[0, 0] = V0[0]\n diag_V0[1, 1] = V0[1]\n diag_V0[2, 2] = V0[2]\n K = diag_V0 * Y01.conj() * np.linalg.inv(Y11.conj())\n g = diag_V0 * Y00.conj() * np.matrix(V0).transpose().conj() + diag_V0 * Y01.conj() * V1_noload.conj()\n return[K,g]\n\ndef subPower_PQ(V1, PQ_node, K, g):\n diag_V1 = np.matrix([[complex(0, 0)] * len(V1)] * len(V1))\n for ii in range(len(V1)):\n diag_V1[ii, ii] = V1[ii]\n M = K * np.linalg.inv(diag_V1)\n MR = M.real\n MI = M.imag\n P0 = g.real + (MR.dot(PQ_node.real)*1000 - MI.dot(PQ_node.imag)*1000)\n Q0 = g.imag + (MR.dot(PQ_node.imag)*1000 + MI.dot(PQ_node.real)*1000)\n\n P0 = P0/1000\n Q0 = Q0/1000 # convert to kW/kVar\n\n # =========================================Yiyun's Notes===========================================#\n # Power injection at substation/feeder head\n # =================================================================================================#\n\n return [P0, Q0, M]\n\ndef sub_costFun_gradient(x, sub_ref, coeff_sub, sub_measure, M, node_withPV):\n grad_a = np.matrix([0] * len(x)).transpose()\n grad_b = np.matrix([0] * len(x)).transpose()\n grad_c = np.matrix([0] * len(x)).transpose()\n\n MR = M.real\n MI = M.imag\n count = 0\n for node in node_withPV:\n grad_a[count] = -MR[0, int(node)]\n grad_b[count] = -MR[1, int(node)]\n grad_c[count] = -MR[2, int(node)]\n\n grad_a[count + len(node_withPV)] = MI[0, int(node)]\n grad_b[count + len(node_withPV)] = MI[1, int(node)]\n grad_c[count + len(node_withPV)] = MI[2, int(node)]\n\n count = count + 1\n\n res = coeff_sub * ((sub_measure[0] - sub_ref[0]) *1000* grad_a + (sub_measure[1] - sub_ref[1])*1000 * grad_b\n + (sub_measure[2] - sub_ref[2])*1000 * grad_c)\n res = res/1000\n\n return res\n\ndef projection(x,xmax,xmin):\n for ii in range(len(x)):\n if x.item(ii) > xmax[ii]:\n x[ii] = xmax[ii]\n if x.item(ii) < xmin[ii]:\n x[ii] = xmin[ii]\n return x\n\nclass DERMS:\n def __init__(self, pvData,controlbus,controlelem,controlelem_limit,sub_node_names,sub_elem_names):\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # PV_name: names of all PVs in the zone\n # PV_size: sizes of all PVs in the zone\n # PV_location: busnames of all PVs in the zone\n # controlbus: names of all controlled nodes\n # sub_node_names: names of all nodes in the zone\n # sub_node_names \"include\" controlbus\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n self.PV_name = pvData[\"pvName\"]\n self.PV_location = pvData[\"pvLocation\"]\n self.PV_size = pvData[\"pvSize\"]\n self.inverter_size = pvData[\"inverterSize\"]\n self.control_bus = controlbus\n\n sub_node_names = [ii.upper() for ii in sub_node_names]\n self.controlbus_index = [sub_node_names.index(ii.upper()) for ii in controlbus] # control bus index in the sub system (number)\n # here\n PVbus_index = []\n for bus in self.PV_location:\n temp = bus.split('.')\n if len(temp) == 1:\n temp = temp + ['1', '2', '3']\n for ii in range(len(temp) - 1):\n PVbus_index.append(sub_node_names.index((temp[0] + '.' + temp[ii + 1]).upper()))\n\n # =========================================Yiyun's Notes===========================================#\n # adding .1 .2 .3 following the number to recognize the three phases.\n # =================================================================================================#\n self.PVbus_index = PVbus_index\n self.control_elem = controlelem\n self.controlelem_limit = controlelem_limit\n self.controlelem_index = [sub_elem_names.index(ii) for ii in controlelem] # control branches index in the sub system (number)\n\n def monitor(self, dss, dssObjects, PVSystem_1phase):\n PVpowers = []\n for pv in PVSystem_1phase[\"Name\"].tolist():\n nPhases = dssObjects[\"Generators\"][pv].GetValue(\"phases\")\n power = dssObjects[\"Generators\"][pv].GetValue(\"Powers\")\n PVpowers.append([sum(power[::2])/nPhases, sum(power[1::2])/nPhases])\n PVpowers = np.asarray(PVpowers)\n\n Vmes = []\n for bus in self.control_bus:\n busName = bus.split('.')[0].lower()\n Vmag = dssObjects[\"Buses\"][busName].GetValue(\"puVmagAngle\")[::2]\n allbusnode = dss.Bus.Nodes()\n phase = bus.split('.')[1]\n index = allbusnode.index(int(phase))\n Vnode = Vmag[index]\n Vmes.append(Vnode)\n\n Imes = []\n for elem in self.control_elem:\n className = elem.split('.')[0] + \"s\"\n I = dssObjects[className][elem].GetValue(\"CurrentsMagAng\")[::2][:3] #TODO: Why is there a hardcoded [:3] ?\n Imes.append(I)\n\n return [self.PV_location,PVpowers,Vmes,Imes]\n\n\n\n def control(self, linear_PF_coeff, Options,stepsize,mu0,Vlimit,PVpower,Imes,Vmes,PV_Pmax_forecast):\n coeff_p = Options[\"coeff_p\"]\n coeff_q = Options[\"coeff_q\"]\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # linear_PF_coeff is the linear power flow model coefficients for the zone, and linear power flow model\n # coefficients are the result vector from function \"linear_powerflow_model\"\n # coeff_p, coeff_q are constant coefficients in PV cost function\n # stepsize is a vector of stepsize constants\n # mu0 is the dual variable from last time step: mu_Vmag_upper0, mu_Vmag_lower0, mu_I0\n # Vlimit is the allowed voltage limit: Vupper and Vlower\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n PVname = self.PV_name\n NPV = len(PVname)\n x0 = np.zeros(2 * NPV)\n for ii in range(NPV):\n x0[ii] = -PVpower[ii][0] # in kW\n x0[ii + NPV] = -PVpower[ii][1] # in kVar\n\n #coeff_V_P = linear_PF_coeff[0]\n #coeff_V_Q = linear_PF_coeff[1]\n #coeff_Vm = linear_PF_coeff[2]\n coeff_Vmag_P = linear_PF_coeff[3]\n coeff_Vmag_Q = linear_PF_coeff[4]\n #coeff_Vmag_k = linear_PF_coeff[5]\n coeff_I_P = linear_PF_coeff[6]\n coeff_I_Q = linear_PF_coeff[7]\n #coeff_I_const = linear_PF_coeff[8]\n stepsize_xp = stepsize[0]\n stepsize_xq = stepsize[1]\n stepsize_mu = stepsize[2]\n Vupper = Vlimit[0]\n Vlower = Vlimit[1]\n\n controlbus_index = self.controlbus_index\n PVbus_index = self.PVbus_index\n controlelem_index = self.controlelem_index\n PV_inverter_size = self.inverter_size\n Imes_limit = self.controlelem_limit\n\n mu_Vmag_upper0 = mu0[0]\n mu_Vmag_lower0 = mu0[1]\n mu_I0 = mu0[2]\n\n #print([max(mu_Vmag_upper0),max(mu_Vmag_lower0)])\n # compute gradient\n\n PVcost_fun_gradient = PV_costFun_gradient(x0, coeff_p, coeff_q, PV_Pmax_forecast)\n\n Vmag_upper_gradient = np.concatenate((np.dot(coeff_Vmag_P[np.ix_([ii for ii in controlbus_index],[ii for ii in PVbus_index])].transpose(), mu_Vmag_upper0),\n np.dot(coeff_Vmag_Q[np.ix_([ii for ii in controlbus_index], [ii for ii in PVbus_index])].transpose(), mu_Vmag_upper0)),axis=0)\n Vmag_lower_gradient = np.concatenate((np.dot(coeff_Vmag_P[np.ix_([ii for ii in controlbus_index],[ii for ii in PVbus_index])].transpose(), mu_Vmag_lower0),\n np.dot(coeff_Vmag_Q[np.ix_([ii for ii in controlbus_index],[ii for ii in PVbus_index])].transpose(), mu_Vmag_lower0)),axis=0)\n\n Vmag_gradient = Vmag_upper_gradient - Vmag_lower_gradient\n if len(mu_I0)>0 :\n temp_real = mu_I0 * np.array(Imes.real)\n temp_imag = mu_I0 * np.array(Imes.imag)\n\n I_gradient_real = np.concatenate((np.dot(\n coeff_I_P[np.ix_([ii for ii in controlelem_index], [ii for ii in PVbus_index])].real.transpose(),\n temp_real), np.dot(\n coeff_I_Q[np.ix_([ii for ii in controlelem_index], [ii for ii in PVbus_index])].real.transpose(),\n temp_real)), axis=0)\n I_gradient_imag = np.concatenate((np.dot(\n coeff_I_P[np.ix_([ii for ii in controlelem_index], [ii for ii in PVbus_index])].imag.transpose(),\n temp_imag), np.dot(\n coeff_I_Q[np.ix_([ii for ii in controlelem_index], [ii for ii in PVbus_index])].imag.transpose(),\n temp_imag)), axis=0)\n I_gradient = 2 * I_gradient_real + 2 * I_gradient_imag\n else:\n I_gradient = 0\n\n gradient = PVcost_fun_gradient + Vmag_gradient + I_gradient / 1000\n\n # compute x1, mu1\n x1 = np.concatenate([x0[:NPV] - stepsize_xp * gradient[:NPV], x0[NPV:] - stepsize_xq * gradient[NPV:]])\n #print('solved: '+str(sum(x1[0:NPV]))+','+str(sum(x1[NPV:]))) # in kW/kVar\n [x1, Pmax_allPV, Qmax_allPV] = project_PV(x1, PV_Pmax_forecast, PV_inverter_size)\n #print('Available P = '+str(Pmax_allPV)+' , Available Q = '+str(Qmax_allPV))\n #print('projected: ' + str(sum(x1[0:NPV])) + ',' + str(sum(x1[NPV:]))) # in kW/kVar\n x1 = np.array([round(ii, 5) for ii in x1])\n\n mu_Vmag_lower1 = mu_Vmag_lower0 + stepsize_mu * (Vlower - np.array(Vmes))\n mu_Vmag_upper1 = mu_Vmag_upper0 + stepsize_mu * (np.array(Vmes) - Vupper)\n mu_Vmag_lower1 = project_dualvariable(mu_Vmag_lower1)\n mu_Vmag_upper1 = project_dualvariable(mu_Vmag_upper1)\n if mu_I0:\n mu_I1 = mu_I0 + stepsize_mu / 300 * np.array(list(map(lambda x: x[0] * x[0] - x[1] * x[1], zip(Imes, Imes_limit))))\n mu_I1 = project_dualvariable(mu_I1)\n else:\n mu_I1 = mu_I0\n mu1 = [mu_Vmag_upper1,mu_Vmag_lower1,mu_I1]\n # =========================================Yiyun's Notes===========================================#\n # Each time of calling DERMS.control, it is a one step update of PV real and reactive power outputs\n # =================================================================================================#\n\n return [x1,mu1]\n"
] | [
[
"numpy.matrix",
"numpy.dot",
"numpy.ix_",
"numpy.imag",
"numpy.conj",
"numpy.linalg.inv",
"numpy.asarray",
"numpy.concatenate",
"numpy.real",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
norberto-schmidt/openmc | [
"ff4844303154a68027b9c746300f5704f73e0875",
"ff4844303154a68027b9c746300f5704f73e0875",
"ff4844303154a68027b9c746300f5704f73e0875",
"ff4844303154a68027b9c746300f5704f73e0875"
] | [
"tests/unit_tests/test_data_photon.py",
"tests/unit_tests/test_polynomials.py",
"openmc/mgxs/mgxs.py",
"openmc/cmfd.py"
] | [
"#!/usr/bin/env python\n\nfrom collections.abc import Mapping, Callable\nimport os\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport openmc.data\n\n\[email protected](scope='module')\ndef elements_endf():\n \"\"\"Dictionary of element ENDF data indexed by atomic symbol.\"\"\"\n endf_data = os.environ['OPENMC_ENDF_DATA']\n elements = {'H': 1, 'O': 8, 'Al': 13, 'Cu': 29, 'Ag': 47, 'U': 92, 'Pu': 94}\n data = {}\n for symbol, Z in elements.items():\n p_file = 'photoat-{:03}_{}_000.endf'.format(Z, symbol)\n p_path = os.path.join(endf_data, 'photoat', p_file)\n a_file = 'atom-{:03}_{}_000.endf'.format(Z, symbol)\n a_path = os.path.join(endf_data, 'atomic_relax', a_file)\n data[symbol] = openmc.data.IncidentPhoton.from_endf(p_path, a_path)\n return data\n\n\[email protected]()\ndef element(request, elements_endf):\n \"\"\"Element ENDF data\"\"\"\n return elements_endf[request.param]\n\n\[email protected](\n 'element, atomic_number', [\n ('Al', 13),\n ('Cu', 29),\n ('Pu', 94)\n ],\n indirect=['element']\n)\ndef test_attributes(element, atomic_number):\n assert element.atomic_number == atomic_number\n\n\[email protected](\n 'element, subshell, binding_energy, num_electrons', [\n ('H', 'K', 13.61, 1.0),\n ('O', 'L3', 14.15, 2.67),\n ('U', 'P2', 34.09, 2.0)\n ],\n indirect=['element']\n)\ndef test_atomic_relaxation(element, subshell, binding_energy, num_electrons):\n atom_relax = element.atomic_relaxation\n assert isinstance(atom_relax, openmc.data.photon.AtomicRelaxation)\n assert subshell in atom_relax.subshells\n assert atom_relax.binding_energy[subshell] == binding_energy\n assert atom_relax.num_electrons[subshell] == num_electrons\n\n\[email protected]('element', ['Al', 'Cu', 'Pu'], indirect=True)\ndef test_transitions(element):\n transitions = element.atomic_relaxation.transitions\n assert transitions\n assert isinstance(transitions, Mapping)\n for matrix in transitions.values():\n assert isinstance(matrix, pd.core.frame.DataFrame)\n assert len(matrix.columns) == 4\n assert sum(matrix['probability']) == pytest.approx(1.0)\n\n\[email protected](\n 'element, I, i_shell, ionization_energy, num_electrons', [\n ('H', 19.2, 0, 13.6, 1),\n ('O', 95.0, 2, 13.62, 4),\n ('U', 890.0, 25, 6.033, -3)\n ],\n indirect=['element']\n)\ndef test_bremsstrahlung(element, I, i_shell, ionization_energy, num_electrons):\n brems = element.bremsstrahlung\n assert isinstance(brems, Mapping)\n assert brems['I'] == I\n assert brems['num_electrons'][i_shell] == num_electrons\n assert brems['ionization_energy'][i_shell] == ionization_energy\n assert np.all(np.diff(brems['electron_energy']) > 0.0)\n assert np.all(np.diff(brems['photon_energy']) > 0.0)\n assert brems['photon_energy'][0] == 0.0\n assert brems['photon_energy'][-1] == 1.0\n assert brems['dcs'].shape == (200, 30)\n\n\[email protected](\n 'element, n_shell', [\n ('H', 1),\n ('O', 3),\n ('Al', 5)\n ],\n indirect=['element']\n)\ndef test_compton_profiles(element, n_shell):\n profile = element.compton_profiles\n assert profile\n assert isinstance(profile, Mapping)\n assert all(isinstance(x, Callable) for x in profile['J'])\n assert all(len(x) == n_shell for x in profile.values())\n\n\[email protected](\n 'element, reaction', [\n ('Cu', 541),\n ('Ag', 502),\n ('Pu', 504)\n ],\n indirect=['element']\n)\ndef test_reactions(element, reaction):\n reactions = element.reactions\n assert all(isinstance(x, openmc.data.PhotonReaction) for x in reactions.values())\n assert reaction in reactions\n with pytest.raises(KeyError):\n reactions[18]\n\n\[email protected]('element', ['Pu'], indirect=True)\ndef test_export_to_hdf5(tmpdir, element):\n filename = str(tmpdir.join('tmp.h5'))\n element.export_to_hdf5(filename)\n assert os.path.exists(filename)\n # Read in data from hdf5\n element2 = openmc.data.IncidentPhoton.from_hdf5(filename)\n # Check for some cross section and datasets of element and element2\n energy = np.logspace(np.log10(1.0), np.log10(1.0e10), num=100)\n for mt in (502, 504, 515, 517, 522, 541, 570):\n xs = element[mt].xs(energy)\n xs2 = element2[mt].xs(energy)\n assert np.allclose(xs, xs2)\n assert element[502].scattering_factor == element2[502].scattering_factor\n assert element.atomic_relaxation.transitions['O3'].equals(\n element2.atomic_relaxation.transitions['O3'])\n assert (element.compton_profiles['binding_energy'] ==\n element2.compton_profiles['binding_energy']).all()\n assert (element.bremsstrahlung['electron_energy'] ==\n element2.bremsstrahlung['electron_energy']).all()\n # Export to hdf5 again\n element2.export_to_hdf5(filename, 'w')\n\ndef test_photodat_only(run_in_tmpdir):\n endf_dir = Path(os.environ['OPENMC_ENDF_DATA'])\n photoatomic_file = endf_dir / 'photoat' / 'photoat-001_H_000.endf'\n data = openmc.data.IncidentPhoton.from_endf(photoatomic_file)\n data.export_to_hdf5('tmp.h5', 'w')",
"import numpy as np\n\nimport openmc\n\n\ndef test_zernike_radial():\n coeff = np.asarray([1.3, -3.0, 9e-1, -6e-1, 0.11])\n zn_rad = openmc.ZernikeRadial(coeff)\n assert zn_rad.order == 8\n assert zn_rad.radius == 1\n\n coeff = np.asarray([1.3, -3.0, 9e-1, -6e-1, 0.11, 0.222])\n zn_rad = openmc.ZernikeRadial(coeff, 0.392)\n assert zn_rad.order == 10\n assert zn_rad.radius == 0.392\n norm_vec = (2 * np.arange(6) + 1) / (np.pi * 0.392 ** 2)\n norm_coeff = norm_vec * coeff\n\n rho = 0.5\n # Reference solution from running the Fortran implementation\n raw_zn = np.array([\n 1.00000000e+00, -5.00000000e-01, -1.25000000e-01,\n 4.37500000e-01, -2.89062500e-01, -8.98437500e-02])\n\n ref_vals = np.sum(norm_coeff * raw_zn)\n\n test_vals = zn_rad(rho * zn_rad.radius)\n\n assert ref_vals == test_vals\n\n rho = [0.2, 0.5]\n # Reference solution from running the Fortran implementation\n raw_zn1 = np.array([\n 1.00000000e+00, -9.20000000e-01, 7.69600000e-01,\n -5.66720000e-01, 3.35219200e-01, -1.01747000e-01])\n raw_zn2 = np.array([\n 1.00000000e+00, -5.00000000e-01, -1.25000000e-01,\n 4.37500000e-01, -2.89062500e-01, -8.98437500e-02])\n\n ref_vals = [np.sum(norm_coeff * raw_zn1), np.sum(norm_coeff * raw_zn2)]\n\n test_vals = zn_rad([i * zn_rad.radius for i in rho])\n\n assert np.allclose(ref_vals, test_vals)\n\n\ndef test_zernike():\n import openmc.lib as lib\n \n coeff = np.asarray([1.1e-1, -3.2e2, 5.3, 7.4, -9.5, 0.005])\n zn_azimuthal = openmc.Zernike(coeff)\n assert zn_azimuthal.order == 2\n assert zn_azimuthal.radius == 1\n\n coeff = np.asarray([1.5, -3.6, 9.7e-1, -6.8e-1, 0.11, 0.33e2, 0.002, 13.75, \n 3.1, -7.3, 7.8e-1, -1.1e-1, 2.56, 5.25e3, 0.123])\n zn_azimuthal = openmc.Zernike(coeff, 0.392)\n assert zn_azimuthal.order == 4\n assert zn_azimuthal.radius == 0.392\n norm_vec = np.array([1, 4, 4, 6, 3, 6, 8, 8, 8, 8, \n 10, 10, 5, 10, 10]) / (np.pi * 0.392 ** 2)\n norm_coeff = norm_vec * coeff \n \n rho = 0.5\n \n theta = np.radians(45) \n # Reference solution from running the C API for calc_zn\n raw_zn = lib.calc_zn(zn_azimuthal.order, rho, theta)\n \n ref_vals = np.sum(norm_coeff * raw_zn)\n\n test_vals = zn_azimuthal(rho * zn_azimuthal.radius, theta)\n\n assert ref_vals == test_vals\n\n rho = [0.2, 0.5]\n \n theta = np.radians(30) \n #Reference solution from running the C API for calc_zn\n raw_zn1 = lib.calc_zn(zn_azimuthal.order, rho[0], theta)\n \n raw_zn2 = lib.calc_zn(zn_azimuthal.order, rho[1], theta)\n \n ref_vals = [np.sum(norm_coeff * raw_zn1), np.sum(norm_coeff * raw_zn2)]\n\n test_vals = zn_azimuthal([i * zn_azimuthal.radius for i in rho], theta)\n \n assert np.allclose(ref_vals, test_vals) \n \n rho = 0.2\n \n theta = np.radians([30, 60]) \n #Reference solution from running the C API for calc_zn\n raw_zn1 = lib.calc_zn(zn_azimuthal.order, rho, theta[0])\n \n raw_zn2 = lib.calc_zn(zn_azimuthal.order, rho, theta[1])\n\n ref_vals = [np.sum(norm_coeff * raw_zn1), np.sum(norm_coeff * raw_zn2)]\n\n test_vals = zn_azimuthal(rho * zn_azimuthal.radius, [j for j in theta])\n \n assert np.allclose(ref_vals, test_vals) \n \n rho = [0.2, 0.5]\n \n theta = np.radians([30, 60]) \n #Reference solution from running the C API for calc_zn\n raw_zn1 = lib.calc_zn(zn_azimuthal.order, rho[0], theta[0])\n \n raw_zn2 = lib.calc_zn(zn_azimuthal.order, rho[1], theta[0])\n \n raw_zn3 = lib.calc_zn(zn_azimuthal.order, rho[0], theta[1])\n \n raw_zn4 = lib.calc_zn(zn_azimuthal.order, rho[1], theta[1])\n\n ref_vals = [np.sum(norm_coeff * raw_zn1), np.sum(norm_coeff * raw_zn2),\n np.sum(norm_coeff * raw_zn3), np.sum(norm_coeff * raw_zn4)]\n\n test_vals = zn_azimuthal([i * zn_azimuthal.radius for i in rho], [j for j in theta])\n \n test_vals = np.ravel(test_vals) \n \n assert np.allclose(ref_vals, test_vals) \n \n\n",
"from collections import OrderedDict\nimport copy\nfrom numbers import Integral\nimport os\nimport warnings\n\nimport h5py\nimport numpy as np\n\nimport openmc\nfrom openmc.data import REACTION_MT, REACTION_NAME, FISSION_MTS\nimport openmc.checkvalue as cv\nfrom ..tallies import ESTIMATOR_TYPES\nfrom . import EnergyGroups\n\n\n# Supported cross section types\nMGXS_TYPES = (\n 'total',\n 'transport',\n 'nu-transport',\n 'absorption',\n 'capture',\n 'fission',\n 'nu-fission',\n 'kappa-fission',\n 'scatter',\n 'nu-scatter',\n 'scatter matrix',\n 'nu-scatter matrix',\n 'multiplicity matrix',\n 'nu-fission matrix',\n 'scatter probability matrix',\n 'consistent scatter matrix',\n 'consistent nu-scatter matrix',\n 'chi',\n 'chi-prompt',\n 'inverse-velocity',\n 'prompt-nu-fission',\n 'prompt-nu-fission matrix',\n 'current',\n 'diffusion-coefficient',\n 'nu-diffusion-coefficient'\n)\n\n# Some scores from REACTION_MT are not supported, or are simply overkill to\n# support and test (like inelastic levels), remoev those from consideration\n_BAD_SCORES = [\"(n,misc)\", \"(n,absorption)\", \"(n,total)\", \"fission\"]\n_BAD_SCORES += [REACTION_NAME[mt] for mt in FISSION_MTS]\nARBITRARY_VECTOR_TYPES = tuple(k for k in REACTION_MT.keys()\n if k not in _BAD_SCORES)\nARBITRARY_MATRIX_TYPES = []\nfor rxn in ARBITRARY_VECTOR_TYPES:\n # Preclude the fission channels from being treated as a matrix\n if rxn not in [REACTION_NAME[mt] for mt in FISSION_MTS]:\n split_rxn = rxn.strip(\"()\").split(\",\")\n if len(split_rxn) > 1 and \"n\" in split_rxn[1]:\n # Then there is a neutron product, so it can also be a matrix\n ARBITRARY_MATRIX_TYPES.append(rxn + \" matrix\")\nARBITRARY_MATRIX_TYPES = tuple(ARBITRARY_MATRIX_TYPES)\n\n# Supported domain types\nDOMAIN_TYPES = (\n 'cell',\n 'distribcell',\n 'universe',\n 'material',\n 'mesh'\n)\n\n# Filter types corresponding to each domain\n_DOMAIN_TO_FILTER = {\n 'cell': openmc.CellFilter,\n 'distribcell': openmc.DistribcellFilter,\n 'universe': openmc.UniverseFilter,\n 'material': openmc.MaterialFilter,\n 'mesh': openmc.MeshFilter\n}\n\n# Supported domain classes\n_DOMAINS = (\n openmc.Cell,\n openmc.Universe,\n openmc.Material,\n openmc.RegularMesh\n)\n\n# Supported ScatterMatrixXS angular distribution types. Note that 'histogram' is\n# defined here and used in mgxs_library.py, but it is not used for the current\n# module\nSCATTER_TABULAR = 'tabular'\nSCATTER_LEGENDRE = 'legendre'\nSCATTER_HISTOGRAM = 'histogram'\nMU_TREATMENTS = (\n SCATTER_LEGENDRE,\n SCATTER_HISTOGRAM\n)\n\n# Maximum Legendre order supported by OpenMC\n_MAX_LEGENDRE = 10\n\n\ndef _df_column_convert_to_bin(df, current_name, new_name, values_to_bin,\n reverse_order=False):\n \"\"\"Convert a Pandas DataFrame column from the bin edges to an index for\n each bin. This method operates on the DataFrame, df, in-place.\n\n Parameters\n ----------\n df : pandas.DataFrame\n A Pandas DataFrame containing the cross section data.\n current_name : str\n Name of the column to replace with bins\n new_name : str\n New name for column after the data is replaced with bins\n values_to_bin : Iterable of Real\n Values of the bin edges to be used for identifying the bins\n reverse_order : bool\n Whether the bin indices should be reversed\n\n \"\"\"\n\n # Get the current values\n df_bins = np.asarray(df[current_name])\n new_vals = np.zeros_like(df_bins, dtype=int)\n # Replace the values with the index of the closest entry in values_to_bin\n # The closest is used because it is expected that the values in df could\n # have lost precision along the way\n for i, df_val in enumerate(df_bins):\n idx = np.searchsorted(values_to_bin, df_val)\n # Check to make sure if the value is just above the search result\n if idx > 0 and np.isclose(values_to_bin[idx - 1], df_val):\n idx -= 1\n # If it is just below the search result then we are done\n new_vals[i] = idx\n # Switch to a one-based indexing\n new_vals += 1\n\n # Reverse the ordering if requested (this is for energy group ordering)\n if reverse_order:\n new_vals = (len(values_to_bin) - 1) - new_vals + 1\n\n # Assign the values\n df[current_name] = new_vals[:]\n\n # And rename the column\n df.rename(columns={current_name: new_name}, inplace=True)\n\n\nclass MGXS:\n \"\"\"An abstract multi-group cross section for some energy group structure\n within some spatial domain.\n\n This class can be used for both OpenMC input generation and tally data\n post-processing to compute spatially-homogenized and energy-integrated\n multi-group cross sections for multi-group neutronics calculations.\n\n .. note:: Users should instantiate the subclasses of this abstract class.\n\n Parameters\n ----------\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n The domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n The domain type for spatial homogenization\n energy_groups : openmc.mgxs.EnergyGroups\n The energy group structure for energy condensation\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n name : str, optional\n Name of the multi-group cross section. Used as a label to identify\n tallies in OpenMC 'tallies.xml' file.\n num_polar : Integral, optional\n Number of equi-width polar angle bins for angle discretization;\n defaults to one bin\n num_azimuthal : Integral, optional\n Number of equi-width azimuthal angle bins for angle discretization;\n defaults to one bin\n\n Attributes\n ----------\n name : str, optional\n Name of the multi-group cross section\n rxn_type : str\n Reaction type (e.g., 'total', 'nu-fission', etc.)\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n Domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n Domain type for spatial homogenization\n energy_groups : openmc.mgxs.EnergyGroups\n Energy group structure for energy condensation\n num_polar : Integral\n Number of equi-width polar angle bins for angle discretization\n num_azimuthal : Integral\n Number of equi-width azimuthal angle bins for angle discretization\n tally_trigger : openmc.Trigger\n An (optional) tally precision trigger given to each tally used to\n compute the cross section\n scores : list of str\n The scores in each tally used to compute the multi-group cross section\n filters : list of openmc.Filter\n The filters in each tally used to compute the multi-group cross section\n tally_keys : list of str\n The keys into the tallies dictionary for each tally used to compute\n the multi-group cross section\n estimator : {'tracklength', 'collision', 'analog'}\n The tally estimator used to compute the multi-group cross section\n tallies : collections.OrderedDict\n OpenMC tallies needed to compute the multi-group cross section\n rxn_rate_tally : openmc.Tally\n Derived tally for the reaction rate tally used in the numerator to\n compute the multi-group cross section. This attribute is None\n unless the multi-group cross section has been computed.\n xs_tally : openmc.Tally\n Derived tally for the multi-group cross section. This attribute\n is None unless the multi-group cross section has been computed.\n num_subdomains : int\n The number of subdomains is unity for 'material', 'cell' and 'universe'\n domain types. This is equal to the number of cell instances\n for 'distribcell' domain types (it is equal to unity prior to loading\n tally data from a statepoint file) and the number of mesh cells for\n 'mesh' domain types.\n num_nuclides : int\n The number of nuclides for which the multi-group cross section is\n being tracked. This is unity if the by_nuclide attribute is False.\n nuclides : Iterable of str or 'sum'\n The optional user-specified nuclides for which to compute cross\n sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides\n are not specified by the user, all nuclides in the spatial domain\n are included. This attribute is 'sum' if by_nuclide is false.\n sparse : bool\n Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format\n for compressed data storage\n loaded_sp : bool\n Whether or not a statepoint file has been loaded with tally data\n derived : bool\n Whether or not the MGXS is merged from one or more other MGXS\n hdf5_key : str\n The key used to index multi-group cross sections in an HDF5 data store\n\n \"\"\"\n\n # Store whether or not the number density should be removed for microscopic\n # values of this data\n _divide_by_density = True\n\n def __init__(self, domain=None, domain_type=None,\n energy_groups=None, by_nuclide=False, name='', num_polar=1,\n num_azimuthal=1):\n self._name = ''\n self._rxn_type = None\n self._by_nuclide = None\n self._nuclides = None\n self._estimator = 'tracklength'\n self._domain = None\n self._domain_type = None\n self._energy_groups = None\n self._num_polar = 1\n self._num_azimuthal = 1\n self._tally_trigger = None\n self._tallies = None\n self._rxn_rate_tally = None\n self._xs_tally = None\n self._sparse = False\n self._loaded_sp = False\n self._derived = False\n self._hdf5_key = None\n self._valid_estimators = ESTIMATOR_TYPES\n\n self.name = name\n self.by_nuclide = by_nuclide\n\n if domain_type is not None:\n self.domain_type = domain_type\n if domain is not None:\n self.domain = domain\n if energy_groups is not None:\n self.energy_groups = energy_groups\n self.num_polar = num_polar\n self.num_azimuthal = num_azimuthal\n\n def __deepcopy__(self, memo):\n existing = memo.get(id(self))\n\n # If this object has been copied before, return the first copy made\n if existing is not None:\n return existing\n\n # If this is the first time we have tried to copy this object, copy it\n clone = type(self).__new__(type(self))\n clone._name = self.name\n clone._rxn_type = self.rxn_type\n clone._by_nuclide = self.by_nuclide\n clone._nuclides = copy.deepcopy(self._nuclides, memo)\n clone._domain = self.domain\n clone._domain_type = self.domain_type\n clone._energy_groups = copy.deepcopy(self.energy_groups, memo)\n clone._num_polar = self._num_polar\n clone._num_azimuthal = self._num_azimuthal\n clone._tally_trigger = copy.deepcopy(self.tally_trigger, memo)\n clone._rxn_rate_tally = copy.deepcopy(self._rxn_rate_tally, memo)\n clone._xs_tally = copy.deepcopy(self._xs_tally, memo)\n clone._sparse = self.sparse\n clone._loaded_sp = self._loaded_sp\n clone._derived = self.derived\n clone._hdf5_key = self._hdf5_key\n\n clone._tallies = OrderedDict()\n for tally_type, tally in self.tallies.items():\n clone.tallies[tally_type] = copy.deepcopy(tally, memo)\n\n memo[id(self)] = clone\n\n return clone\n\n def _add_angle_filters(self, filters):\n \"\"\"Add the azimuthal and polar bins to the MGXS filters if needed.\n Filters will be provided as a ragged 2D list of openmc.Filter objects.\n\n Parameters\n ----------\n filters : Iterable of Iterable of openmc.Filter\n Ragged 2D list of openmc.Filter objects for the energy and spatial\n domains. The angle filters will be added to the list.\n\n Returns\n -------\n Iterable of Iterable of openmc.Filter\n Ragged 2D list of openmc.Filter objects for the energy and spatial\n domains with the angle filters added to the list.\n\n \"\"\"\n\n if self.num_polar > 1 or self.num_azimuthal > 1:\n # Then the user has requested angular data, so create the bins\n pol_bins = np.linspace(0., np.pi, num=self.num_polar + 1,\n endpoint=True)\n azi_bins = np.linspace(-np.pi, np.pi, num=self.num_azimuthal + 1,\n endpoint=True)\n\n for filt in filters:\n filt.insert(0, openmc.PolarFilter(pol_bins))\n filt.insert(1, openmc.AzimuthalFilter(azi_bins))\n\n return filters\n\n def _squeeze_xs(self, xs):\n \"\"\"Remove dimensions which are not needed from a cross section array\n due to user options. This is used by the openmc.Mgxs.get_xs(...) method\n\n Parameters\n ----------\n xs : np.ndarray\n Cross sections array with dimensions to be squeezed\n\n Returns\n -------\n np.ndarray\n Squeezed array of cross sections\n\n \"\"\"\n\n # numpy.squeeze will return a ValueError if the axis has a size\n # greater than 1, to avoid this we will try each axis one at a\n # time to preclude the ValueError.\n initial_shape = len(xs.shape)\n for axis in range(initial_shape - 1, -1, -1):\n if axis not in self._dont_squeeze and xs.shape[axis] == 1:\n xs = np.squeeze(xs, axis=axis)\n return xs\n\n def _df_convert_columns_to_bins(self, df):\n \"\"\"This method converts all relevant and present DataFrame columns from\n their bin boundaries to the index for each bin. This method operates on\n the DataFrame, df, in place. The method returns a list of the columns\n in which it has operated on.\n\n Parameters\n ----------\n df : pandas.DataFrame\n A Pandas DataFrame containing the cross section data.\n\n Returns\n -------\n columns : Iterable of str\n Names of the re-named and re-valued columns\n\n \"\"\"\n # Override polar and azimuthal bounds with indices\n if self.num_polar > 1 or self.num_azimuthal > 1:\n # First for polar\n bins = np.linspace(0., np.pi, self.num_polar + 1, True)\n _df_column_convert_to_bin(df, 'polar low', 'polar bin', bins)\n del df['polar high']\n\n # Second for azimuthal\n bins = np.linspace(-np.pi, np.pi, self.num_azimuthal + 1, True)\n _df_column_convert_to_bin(df, 'azimuthal low', 'azimuthal bin',\n bins)\n del df['azimuthal high']\n columns = ['polar bin', 'azimuthal bin']\n else:\n columns = []\n\n # Override energy groups bounds with indices\n if 'energy low [eV]' in df:\n _df_column_convert_to_bin(df, 'energy low [eV]', 'group in',\n self.energy_groups.group_edges,\n reverse_order=True)\n del df['energy high [eV]']\n columns += ['group in']\n if 'energyout low [eV]' in df:\n _df_column_convert_to_bin(df, 'energyout low [eV]', 'group out',\n self.energy_groups.group_edges,\n reverse_order=True)\n del df['energyout high [eV]']\n columns += ['group out']\n\n if 'mu low' in df and hasattr(self, 'histogram_bins'):\n # Only the ScatterMatrix class has the histogram_bins attribute\n bins = np.linspace(-1., 1., self.histogram_bins + 1, True)\n _df_column_convert_to_bin(df, 'mu low', 'mu bin', bins)\n del df['mu high']\n columns += ['mu bin']\n\n return columns\n\n @property\n def _dont_squeeze(self):\n \"\"\"Create a tuple of axes which should not be removed during the get_xs\n process\n \"\"\"\n if self.num_polar > 1 or self.num_azimuthal > 1:\n return (0, 1, 3)\n else:\n return (1, )\n\n @property\n def name(self):\n return self._name\n\n @property\n def rxn_type(self):\n return self._rxn_type\n\n @property\n def by_nuclide(self):\n return self._by_nuclide\n\n @property\n def domain(self):\n return self._domain\n\n @property\n def domain_type(self):\n return self._domain_type\n\n @property\n def energy_groups(self):\n return self._energy_groups\n\n @property\n def num_polar(self):\n return self._num_polar\n\n @property\n def num_azimuthal(self):\n return self._num_azimuthal\n\n @property\n def tally_trigger(self):\n return self._tally_trigger\n\n @property\n def num_groups(self):\n return self.energy_groups.num_groups\n\n @property\n def scores(self):\n return ['flux', self.rxn_type]\n\n @property\n def filters(self):\n group_edges = self.energy_groups.group_edges\n energy_filter = openmc.EnergyFilter(group_edges)\n filters = []\n for i in range(len(self.scores)):\n filters.append([energy_filter])\n\n return self._add_angle_filters(filters)\n\n @property\n def tally_keys(self):\n return self.scores\n\n @property\n def estimator(self):\n return self._estimator\n\n @property\n def tallies(self):\n\n # Instantiate tallies if they do not exist\n if self._tallies is None:\n\n # Initialize a collection of Tallies\n self._tallies = OrderedDict()\n\n # Create a domain Filter object\n filter_type = _DOMAIN_TO_FILTER[self.domain_type]\n if self.domain_type == 'mesh':\n domain_filter = filter_type(self.domain)\n else:\n domain_filter = filter_type(self.domain.id)\n\n if isinstance(self.estimator, str):\n estimators = [self.estimator] * len(self.scores)\n else:\n estimators = self.estimator\n\n # Create each Tally needed to compute the multi group cross section\n tally_metadata = \\\n zip(self.scores, self.tally_keys, self.filters, estimators)\n for score, key, filters, estimator in tally_metadata:\n self._tallies[key] = openmc.Tally(name=self.name)\n self._tallies[key].scores = [score]\n self._tallies[key].estimator = estimator\n if score != 'current':\n self._tallies[key].filters = [domain_filter]\n\n # If a tally trigger was specified, add it to each tally\n if self.tally_trigger:\n trigger_clone = copy.deepcopy(self.tally_trigger)\n trigger_clone.scores = [score]\n self._tallies[key].triggers.append(trigger_clone)\n\n # Add non-domain specific Filters (e.g., 'energy') to the Tally\n for add_filter in filters:\n self._tallies[key].filters.append(add_filter)\n\n # If this is a by-nuclide cross-section, add nuclides to Tally\n if self.by_nuclide and score != 'flux':\n self._tallies[key].nuclides += self.get_nuclides()\n else:\n self._tallies[key].nuclides.append('total')\n\n return self._tallies\n\n @property\n def rxn_rate_tally(self):\n if self._rxn_rate_tally is None:\n self._rxn_rate_tally = self.tallies[self.rxn_type]\n self._rxn_rate_tally.sparse = self.sparse\n\n return self._rxn_rate_tally\n\n @property\n def xs_tally(self):\n if self._xs_tally is None:\n if self.tallies is None:\n msg = 'Unable to get xs_tally since tallies have ' \\\n 'not been loaded from a statepoint'\n raise ValueError(msg)\n\n self._xs_tally = self.rxn_rate_tally / self.tallies['flux']\n self._compute_xs()\n\n return self._xs_tally\n\n @property\n def sparse(self):\n return self._sparse\n\n @property\n def num_subdomains(self):\n if self.domain_type.startswith('sum('):\n domain_type = self.domain_type[4:-1]\n else:\n domain_type = self.domain_type\n if self._rxn_type == 'current':\n filter_type = openmc.MeshSurfaceFilter\n else:\n filter_type = _DOMAIN_TO_FILTER[domain_type]\n domain_filter = self.xs_tally.find_filter(filter_type)\n return domain_filter.num_bins\n\n @property\n def num_nuclides(self):\n if self.by_nuclide:\n return len(self.get_nuclides())\n else:\n return 1\n\n @property\n def nuclides(self):\n if self.by_nuclide:\n return self.get_nuclides()\n else:\n return ['sum']\n\n @property\n def loaded_sp(self):\n return self._loaded_sp\n\n @property\n def derived(self):\n return self._derived\n\n @property\n def hdf5_key(self):\n if self._hdf5_key is not None:\n return self._hdf5_key\n else:\n return self._rxn_type\n\n @name.setter\n def name(self, name):\n cv.check_type('name', name, str)\n self._name = name\n\n @by_nuclide.setter\n def by_nuclide(self, by_nuclide):\n cv.check_type('by_nuclide', by_nuclide, bool)\n self._by_nuclide = by_nuclide\n\n @nuclides.setter\n def nuclides(self, nuclides):\n cv.check_iterable_type('nuclides', nuclides, str)\n self._nuclides = nuclides\n\n @estimator.setter\n def estimator(self, estimator):\n cv.check_value('estimator', estimator, self._valid_estimators)\n self._estimator = estimator\n\n @domain.setter\n def domain(self, domain):\n cv.check_type('domain', domain, _DOMAINS)\n self._domain = domain\n\n # Assign a domain type\n if self.domain_type is None:\n if isinstance(domain, openmc.Material):\n self._domain_type = 'material'\n elif isinstance(domain, openmc.Cell):\n self._domain_type = 'cell'\n elif isinstance(domain, openmc.Universe):\n self._domain_type = 'universe'\n elif isinstance(domain, openmc.RegularMesh):\n self._domain_type = 'mesh'\n\n @domain_type.setter\n def domain_type(self, domain_type):\n cv.check_value('domain type', domain_type, DOMAIN_TYPES)\n self._domain_type = domain_type\n\n @energy_groups.setter\n def energy_groups(self, energy_groups):\n cv.check_type('energy groups', energy_groups, openmc.mgxs.EnergyGroups)\n self._energy_groups = energy_groups\n\n @num_polar.setter\n def num_polar(self, num_polar):\n cv.check_type('num_polar', num_polar, Integral)\n cv.check_greater_than('num_polar', num_polar, 0)\n self._num_polar = num_polar\n\n @num_azimuthal.setter\n def num_azimuthal(self, num_azimuthal):\n cv.check_type('num_azimuthal', num_azimuthal, Integral)\n cv.check_greater_than('num_azimuthal', num_azimuthal, 0)\n self._num_azimuthal = num_azimuthal\n\n @tally_trigger.setter\n def tally_trigger(self, tally_trigger):\n cv.check_type('tally trigger', tally_trigger, openmc.Trigger)\n self._tally_trigger = tally_trigger\n\n @sparse.setter\n def sparse(self, sparse):\n \"\"\"Convert tally data from NumPy arrays to SciPy list of lists (LIL)\n sparse matrices, and vice versa.\n\n This property may be used to reduce the amount of data in memory during\n tally data processing. The tally data will be stored as SciPy LIL\n matrices internally within the Tally object. All tally data access\n properties and methods will return data as a dense NumPy array.\n\n \"\"\"\n\n cv.check_type('sparse', sparse, bool)\n\n # Sparsify or densify the derived MGXS tallies and the base tallies\n if self._xs_tally:\n self.xs_tally.sparse = sparse\n if self._rxn_rate_tally:\n self.rxn_rate_tally.sparse = sparse\n\n for tally_name in self.tallies:\n self.tallies[tally_name].sparse = sparse\n\n self._sparse = sparse\n\n @staticmethod\n def get_mgxs(mgxs_type, domain=None, domain_type=None,\n energy_groups=None, by_nuclide=False, name='', num_polar=1,\n num_azimuthal=1):\n \"\"\"Return a MGXS subclass object for some energy group structure within\n some spatial domain for some reaction type.\n\n This is a factory method which can be used to quickly create MGXS\n subclass objects for various reaction types.\n\n Parameters\n ----------\n mgxs_type : str or Integral\n The type of multi-group cross section object to return; valid\n values are members of MGXS_TYPES, or the reaction types that are\n the keys of REACTION_MT. Note that if a reaction type from\n REACTION_MT is used, it can be appended with ' matrix' to obtain\n a multigroup matrix (from incoming to outgoing energy groups) for\n reactions with a neutron in an outgoing channel.\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n The domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n The domain type for spatial homogenization\n energy_groups : openmc.mgxs.EnergyGroups\n The energy group structure for energy condensation\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain.\n Defaults to False\n name : str, optional\n Name of the multi-group cross section. Used as a label to identify\n tallies in OpenMC 'tallies.xml' file. Defaults to the empty string.\n num_polar : Integral, optional\n Number of equi-width polar angles for angle discretization;\n defaults to no discretization\n num_azimuthal : Integral, optional\n Number of equi-width azimuthal angles for angle discretization;\n defaults to no discretization\n\n Returns\n -------\n openmc.mgxs.MGXS\n A subclass of the abstract MGXS class for the multi-group cross\n section type requested by the user\n\n \"\"\"\n\n cv.check_value(\n \"mgxs_type\", mgxs_type,\n MGXS_TYPES + ARBITRARY_VECTOR_TYPES + ARBITRARY_MATRIX_TYPES)\n\n if mgxs_type == 'total':\n mgxs = TotalXS(domain, domain_type, energy_groups)\n elif mgxs_type == 'transport':\n mgxs = TransportXS(domain, domain_type, energy_groups)\n elif mgxs_type == 'nu-transport':\n mgxs = TransportXS(domain, domain_type, energy_groups, nu=True)\n elif mgxs_type == 'absorption':\n mgxs = AbsorptionXS(domain, domain_type, energy_groups)\n elif mgxs_type == 'capture':\n mgxs = CaptureXS(domain, domain_type, energy_groups)\n elif mgxs_type == 'fission':\n mgxs = FissionXS(domain, domain_type, energy_groups)\n elif mgxs_type == 'nu-fission':\n mgxs = FissionXS(domain, domain_type, energy_groups, nu=True)\n elif mgxs_type == 'kappa-fission':\n mgxs = KappaFissionXS(domain, domain_type, energy_groups)\n elif mgxs_type == 'scatter':\n mgxs = ScatterXS(domain, domain_type, energy_groups)\n elif mgxs_type == 'nu-scatter':\n mgxs = ScatterXS(domain, domain_type, energy_groups, nu=True)\n elif mgxs_type == 'scatter matrix':\n mgxs = ScatterMatrixXS(domain, domain_type, energy_groups)\n elif mgxs_type == 'nu-scatter matrix':\n mgxs = ScatterMatrixXS(domain, domain_type, energy_groups, nu=True)\n elif mgxs_type == 'multiplicity matrix':\n mgxs = MultiplicityMatrixXS(domain, domain_type, energy_groups)\n elif mgxs_type == 'scatter probability matrix':\n mgxs = ScatterProbabilityMatrix(domain, domain_type, energy_groups)\n elif mgxs_type == 'consistent scatter matrix':\n mgxs = ScatterMatrixXS(domain, domain_type, energy_groups)\n mgxs.formulation = 'consistent'\n elif mgxs_type == 'consistent nu-scatter matrix':\n mgxs = ScatterMatrixXS(domain, domain_type, energy_groups, nu=True)\n mgxs.formulation = 'consistent'\n elif mgxs_type == 'nu-fission matrix':\n mgxs = NuFissionMatrixXS(domain, domain_type, energy_groups)\n elif mgxs_type == 'chi':\n mgxs = Chi(domain, domain_type, energy_groups)\n elif mgxs_type == 'chi-prompt':\n mgxs = Chi(domain, domain_type, energy_groups, prompt=True)\n elif mgxs_type == 'inverse-velocity':\n mgxs = InverseVelocity(domain, domain_type, energy_groups)\n elif mgxs_type == 'prompt-nu-fission':\n mgxs = FissionXS(domain, domain_type, energy_groups, prompt=True)\n elif mgxs_type == 'prompt-nu-fission matrix':\n mgxs = NuFissionMatrixXS(domain, domain_type, energy_groups,\n prompt=True)\n elif mgxs_type == 'current':\n mgxs = Current(domain, domain_type, energy_groups)\n elif mgxs_type == 'diffusion-coefficient':\n mgxs = DiffusionCoefficient(domain, domain_type, energy_groups)\n elif mgxs_type == 'nu-diffusion-coefficient':\n mgxs = DiffusionCoefficient(domain, domain_type, energy_groups, nu=True)\n elif mgxs_type in ARBITRARY_VECTOR_TYPES:\n # Then it is a reaction not covered by the above that is\n # supported by the ArbitraryXS Class\n mgxs = ArbitraryXS(mgxs_type, domain, domain_type, energy_groups)\n elif mgxs_type in ARBITRARY_MATRIX_TYPES:\n mgxs = ArbitraryMatrixXS(mgxs_type, domain, domain_type,\n energy_groups)\n\n mgxs.by_nuclide = by_nuclide\n mgxs.name = name\n mgxs.num_polar = num_polar\n mgxs.num_azimuthal = num_azimuthal\n return mgxs\n\n def get_nuclides(self):\n \"\"\"Get all nuclides in the cross section's spatial domain.\n\n Returns\n -------\n list of str\n A list of the string names for each nuclide in the spatial domain\n (e.g., ['U235', 'U238', 'O16'])\n\n Raises\n ------\n ValueError\n When this method is called before the spatial domain has been set.\n\n \"\"\"\n\n if self.domain is None:\n raise ValueError('Unable to get all nuclides without a domain')\n\n # If the user defined nuclides, return them\n if self._nuclides:\n return self._nuclides\n\n # Otherwise, return all nuclides in the spatial domain\n else:\n return self.domain.get_nuclides()\n\n def get_nuclide_density(self, nuclide):\n \"\"\"Get the atomic number density in units of atoms/b-cm for a nuclide\n in the cross section's spatial domain.\n\n Parameters\n ----------\n nuclide : str\n A nuclide name string (e.g., 'U235')\n\n Returns\n -------\n float\n The atomic number density (atom/b-cm) for the nuclide of interest\n\n \"\"\"\n\n cv.check_type('nuclide', nuclide, str)\n\n # Get list of all nuclides in the spatial domain\n nuclides = self.domain.get_nuclide_densities()\n\n return nuclides[nuclide][1] if nuclide in nuclides else 0.0\n\n def get_nuclide_densities(self, nuclides='all'):\n \"\"\"Get an array of atomic number densities in units of atom/b-cm for all\n nuclides in the cross section's spatial domain.\n\n Parameters\n ----------\n nuclides : Iterable of str or 'all' or 'sum'\n A list of nuclide name strings (e.g., ['U235', 'U238']). The\n special string 'all' will return the atom densities for all nuclides\n in the spatial domain. The special string 'sum' will return the atom\n density summed across all nuclides in the spatial domain. Defaults\n to 'all'.\n\n Returns\n -------\n numpy.ndarray of float\n An array of the atomic number densities (atom/b-cm) for each of the\n nuclides in the spatial domain\n\n Raises\n ------\n ValueError\n When this method is called before the spatial domain has been set.\n\n \"\"\"\n\n if self.domain is None:\n raise ValueError('Unable to get nuclide densities without a domain')\n\n # Sum the atomic number densities for all nuclides\n if nuclides == 'sum':\n nuclides = self.get_nuclides()\n densities = np.zeros(1, dtype=np.float)\n for nuclide in nuclides:\n densities[0] += self.get_nuclide_density(nuclide)\n\n # Tabulate the atomic number densities for all nuclides\n elif nuclides == 'all':\n nuclides = self.get_nuclides()\n densities = np.zeros(self.num_nuclides, dtype=np.float)\n for i, nuclide in enumerate(nuclides):\n densities[i] += self.get_nuclide_density(nuclide)\n\n # Tabulate the atomic number densities for each specified nuclide\n else:\n densities = np.zeros(len(nuclides), dtype=np.float)\n for i, nuclide in enumerate(nuclides):\n densities[i] = self.get_nuclide_density(nuclide)\n\n return densities\n\n def _compute_xs(self):\n \"\"\"Performs generic cleanup after a subclass' uses tally arithmetic to\n compute a multi-group cross section as a derived tally.\n\n This method replaces CrossNuclides generated by tally arithmetic with\n the original Nuclide objects in the xs_tally instance attribute. The\n simple Nuclides allow for cleaner output through Pandas DataFrames as\n well as simpler data access through the get_xs(...) class method.\n\n In addition, this routine resets NaNs in the multi group cross section\n array to 0.0. This may be needed occur if no events were scored in\n certain tally bins, which will lead to a divide-by-zero situation.\n\n \"\"\"\n\n # If computing xs for each nuclide, replace CrossNuclides with originals\n if self.by_nuclide:\n self.xs_tally._nuclides = []\n nuclides = self.get_nuclides()\n for nuclide in nuclides:\n self.xs_tally.nuclides.append(openmc.Nuclide(nuclide))\n\n # Remove NaNs which may have resulted from divide-by-zero operations\n self.xs_tally._mean = np.nan_to_num(self.xs_tally.mean)\n self.xs_tally._std_dev = np.nan_to_num(self.xs_tally.std_dev)\n self.xs_tally.sparse = self.sparse\n\n def load_from_statepoint(self, statepoint):\n \"\"\"Extracts tallies in an OpenMC StatePoint with the data needed to\n compute multi-group cross sections.\n\n This method is needed to compute cross section data from tallies\n in an OpenMC StatePoint object.\n\n .. note:: The statepoint must be linked with an OpenMC Summary object.\n\n Parameters\n ----------\n statepoint : openmc.StatePoint\n An OpenMC StatePoint object with tally data\n\n Raises\n ------\n ValueError\n When this method is called with a statepoint that has not been\n linked with a summary object.\n\n \"\"\"\n\n cv.check_type('statepoint', statepoint, openmc.StatePoint)\n\n if statepoint.summary is None:\n msg = 'Unable to load data from a statepoint which has not been ' \\\n 'linked with a summary file'\n raise ValueError(msg)\n\n # Override the domain object that loaded from an OpenMC summary file\n # NOTE: This is necessary for micro cross-sections which require\n # the isotopic number densities as computed by OpenMC\n su = statepoint.summary\n if self.domain_type in ('cell', 'distribcell'):\n self.domain = su._fast_cells[self.domain.id]\n elif self.domain_type == 'universe':\n self.domain = su._fast_universes[self.domain.id]\n elif self.domain_type == 'material':\n self.domain = su._fast_materials[self.domain.id]\n elif self.domain_type == 'mesh':\n self.domain = statepoint.meshes[self.domain.id]\n else:\n msg = 'Unable to load data from a statepoint for domain type {0} ' \\\n 'which is not yet supported'.format(self.domain_type)\n raise ValueError(msg)\n\n # Use tally \"slicing\" to ensure that tallies correspond to our domain\n # NOTE: This is important if tally merging was used\n if self.domain_type == 'mesh':\n filters = [_DOMAIN_TO_FILTER[self.domain_type]]\n filter_bins = [tuple(self.domain.indices)]\n elif self.domain_type != 'distribcell':\n filters = [_DOMAIN_TO_FILTER[self.domain_type]]\n filter_bins = [(self.domain.id,)]\n # Distribcell filters only accept single cell - neglect it when slicing\n else:\n filters = []\n filter_bins = []\n\n # Clear any tallies previously loaded from a statepoint\n if self.loaded_sp:\n self._tallies = None\n self._xs_tally = None\n self._rxn_rate_tally = None\n self._loaded_sp = False\n\n # Find, slice and store Tallies from StatePoint\n # The tally slicing is needed if tally merging was used\n for tally_type, tally in self.tallies.items():\n sp_tally = statepoint.get_tally(\n tally.scores, tally.filters, tally.nuclides,\n estimator=tally.estimator, exact_filters=True)\n sp_tally = sp_tally.get_slice(\n tally.scores, filters, filter_bins, tally.nuclides)\n sp_tally.sparse = self.sparse\n self.tallies[tally_type] = sp_tally\n\n self._loaded_sp = True\n\n def get_xs(self, groups='all', subdomains='all', nuclides='all',\n xs_type='macro', order_groups='increasing',\n value='mean', squeeze=True, **kwargs):\n r\"\"\"Returns an array of multi-group cross sections.\n\n This method constructs a 3D NumPy array for the requested\n multi-group cross section data for one or more subdomains\n (1st dimension), energy groups (2nd dimension), and nuclides\n (3rd dimension).\n\n Parameters\n ----------\n groups : Iterable of Integral or 'all'\n Energy groups of interest. Defaults to 'all'.\n subdomains : Iterable of Integral or 'all'\n Subdomain IDs of interest. Defaults to 'all'.\n nuclides : Iterable of str or 'all' or 'sum'\n A list of nuclide name strings (e.g., ['U235', 'U238']). The\n special string 'all' will return the cross sections for all nuclides\n in the spatial domain. The special string 'sum' will return the\n cross section summed over all nuclides. Defaults to 'all'.\n xs_type: {'macro', 'micro'}\n Return the macro or micro cross section in units of cm^-1 or barns.\n Defaults to 'macro'.\n order_groups: {'increasing', 'decreasing'}\n Return the cross section indexed according to increasing or\n decreasing energy groups (decreasing or increasing energies).\n Defaults to 'increasing'.\n value : {'mean', 'std_dev', 'rel_err'}\n A string for the type of value to return. Defaults to 'mean'.\n squeeze : bool\n A boolean representing whether to eliminate the extra dimensions\n of the multi-dimensional array to be returned. Defaults to True.\n\n Returns\n -------\n numpy.ndarray\n A NumPy array of the multi-group cross section indexed in the order\n each group, subdomain and nuclide is listed in the parameters.\n\n Raises\n ------\n ValueError\n When this method is called before the multi-group cross section is\n computed from tally data.\n\n \"\"\"\n\n cv.check_value('value', value, ['mean', 'std_dev', 'rel_err'])\n cv.check_value('xs_type', xs_type, ['macro', 'micro'])\n\n # FIXME: Unable to get microscopic xs for mesh domain because the mesh\n # cells do not know the nuclide densities in each mesh cell.\n if self.domain_type == 'mesh' and xs_type == 'micro':\n msg = 'Unable to get micro xs for mesh domain since the mesh ' \\\n 'cells do not know the nuclide densities in each mesh cell.'\n raise ValueError(msg)\n\n filters = []\n filter_bins = []\n\n # Construct a collection of the domain filter bins\n if not isinstance(subdomains, str):\n cv.check_iterable_type('subdomains', subdomains, Integral,\n max_depth=3)\n\n filters.append(_DOMAIN_TO_FILTER[self.domain_type])\n subdomain_bins = []\n for subdomain in subdomains:\n subdomain_bins.append(subdomain)\n filter_bins.append(tuple(subdomain_bins))\n\n # Construct list of energy group bounds tuples for all requested groups\n if not isinstance(groups, str):\n cv.check_iterable_type('groups', groups, Integral)\n filters.append(openmc.EnergyFilter)\n energy_bins = []\n for group in groups:\n energy_bins.append(\n (self.energy_groups.get_group_bounds(group),))\n filter_bins.append(tuple(energy_bins))\n\n # Construct a collection of the nuclides to retrieve from the xs tally\n if self.by_nuclide:\n if nuclides == 'all' or nuclides == 'sum' or nuclides == ['sum']:\n query_nuclides = self.get_nuclides()\n else:\n query_nuclides = nuclides\n else:\n query_nuclides = ['total']\n\n # If user requested the sum for all nuclides, use tally summation\n if nuclides == 'sum' or nuclides == ['sum']:\n xs_tally = self.xs_tally.summation(nuclides=query_nuclides)\n xs = xs_tally.get_values(filters=filters,\n filter_bins=filter_bins, value=value)\n else:\n xs = self.xs_tally.get_values(filters=filters,\n filter_bins=filter_bins,\n nuclides=query_nuclides, value=value)\n\n # Divide by atom number densities for microscopic cross sections\n if xs_type == 'micro' and self._divide_by_density:\n if self.by_nuclide:\n densities = self.get_nuclide_densities(nuclides)\n else:\n densities = self.get_nuclide_densities('sum')\n if value == 'mean' or value == 'std_dev':\n xs /= densities[np.newaxis, :, np.newaxis]\n\n # Eliminate the trivial score dimension\n xs = np.squeeze(xs, axis=len(xs.shape) - 1)\n xs = np.nan_to_num(xs)\n\n if groups == 'all':\n num_groups = self.num_groups\n else:\n num_groups = len(groups)\n\n # Reshape tally data array with separate axes for domain and energy\n # Accomodate the polar and azimuthal bins if needed\n num_subdomains = int(xs.shape[0] / (num_groups * self.num_polar *\n self.num_azimuthal))\n if self.num_polar > 1 or self.num_azimuthal > 1:\n new_shape = (self.num_polar, self.num_azimuthal, num_subdomains,\n num_groups)\n else:\n new_shape = (num_subdomains, num_groups)\n new_shape += xs.shape[1:]\n xs = np.reshape(xs, new_shape)\n\n # Reverse data if user requested increasing energy groups since\n # tally data is stored in order of increasing energies\n if order_groups == 'increasing':\n xs = xs[..., ::-1, :]\n\n if squeeze:\n # We want to squeeze out everything but the polar, azimuthal,\n # and energy group data.\n xs = self._squeeze_xs(xs)\n\n return xs\n\n def get_flux(self, groups='all', subdomains='all',\n order_groups='increasing', value='mean',\n squeeze=True, **kwargs):\n r\"\"\"Returns an array of the fluxes used to weight the MGXS.\n\n This method constructs a 2D NumPy array for the requested\n weighting flux for one or more subdomains (1st dimension), and\n energy groups (2nd dimension).\n\n Parameters\n ----------\n groups : Iterable of Integral or 'all'\n Energy groups of interest. Defaults to 'all'.\n subdomains : Iterable of Integral or 'all'\n Subdomain IDs of interest. Defaults to 'all'.\n order_groups: {'increasing', 'decreasing'}\n Return the cross section indexed according to increasing or\n decreasing energy groups (decreasing or increasing energies).\n Defaults to 'increasing'.\n value : {'mean', 'std_dev', 'rel_err'}\n A string for the type of value to return. Defaults to 'mean'.\n squeeze : bool\n A boolean representing whether to eliminate the extra dimensions\n of the multi-dimensional array to be returned. Defaults to True.\n\n Returns\n -------\n numpy.ndarray\n A NumPy array of the flux indexed in the order\n each group and subdomain is listed in the parameters.\n\n Raises\n ------\n ValueError\n When this method is called before the data is available from tally\n data, or, when this is used on an MGXS type without a flux score.\n\n \"\"\"\n\n cv.check_value('value', value, ['mean', 'std_dev', 'rel_err'])\n\n filters = []\n filter_bins = []\n\n # Construct a collection of the domain filter bins\n if not isinstance(subdomains, str):\n cv.check_iterable_type('subdomains', subdomains, Integral,\n max_depth=3)\n\n filters.append(_DOMAIN_TO_FILTER[self.domain_type])\n subdomain_bins = []\n for subdomain in subdomains:\n subdomain_bins.append(subdomain)\n filter_bins.append(tuple(subdomain_bins))\n\n # Construct list of energy group bounds tuples for all requested groups\n if not isinstance(groups, str):\n cv.check_iterable_type('groups', groups, Integral)\n filters.append(openmc.EnergyFilter)\n energy_bins = []\n for group in groups:\n energy_bins.append(\n (self.energy_groups.get_group_bounds(group),))\n filter_bins.append(tuple(energy_bins))\n\n # Determine which flux to obtain\n # Step through in order of usefulness\n for key in ['flux', 'flux (tracklength)', 'flux (analog)']:\n if key in self.tally_keys:\n tally = self.tallies[key]\n break\n else:\n msg = \"MGXS of Type {} do not have an explicit weighting flux!\"\n raise ValueError(msg.format(self.__name__))\n\n flux = tally.get_values(filters=filters, filter_bins=filter_bins,\n nuclides=['total'], value=value)\n\n # Eliminate the trivial score dimension\n flux = np.squeeze(flux, axis=len(flux.shape) - 1)\n # Eliminate the trivial nuclide dimension\n flux = np.squeeze(flux, axis=len(flux.shape) - 1)\n flux = np.nan_to_num(flux)\n\n if groups == 'all':\n num_groups = self.num_groups\n else:\n num_groups = len(groups)\n\n # Reshape tally data array with separate axes for domain and energy\n # Accomodate the polar and azimuthal bins if needed\n num_subdomains = int(flux.shape[0] / (num_groups * self.num_polar *\n self.num_azimuthal))\n if self.num_polar > 1 or self.num_azimuthal > 1:\n new_shape = (self.num_polar, self.num_azimuthal, num_subdomains,\n num_groups)\n else:\n new_shape = (num_subdomains, num_groups)\n new_shape += flux.shape[1:]\n flux = np.reshape(flux, new_shape)\n\n # Reverse data if user requested increasing energy groups since\n # tally data is stored in order of increasing energies\n if order_groups == 'increasing':\n flux = flux[..., ::-1]\n\n if squeeze:\n # We want to squeeze out everything but the polar, azimuthal,\n # and energy group data.\n flux = self._squeeze_xs(flux)\n\n return flux\n\n def get_condensed_xs(self, coarse_groups):\n \"\"\"Construct an energy-condensed version of this cross section.\n\n Parameters\n ----------\n coarse_groups : openmc.mgxs.EnergyGroups\n The coarse energy group structure of interest\n\n Returns\n -------\n MGXS\n A new MGXS condensed to the group structure of interest\n\n \"\"\"\n\n cv.check_type('coarse_groups', coarse_groups, EnergyGroups)\n cv.check_less_than('coarse groups', coarse_groups.num_groups,\n self.num_groups, equality=True)\n cv.check_value('upper coarse energy', coarse_groups.group_edges[-1],\n [self.energy_groups.group_edges[-1]])\n cv.check_value('lower coarse energy', coarse_groups.group_edges[0],\n [self.energy_groups.group_edges[0]])\n\n # Clone this MGXS to initialize the condensed version\n condensed_xs = copy.deepcopy(self)\n condensed_xs._rxn_rate_tally = None\n condensed_xs._xs_tally = None\n condensed_xs._sparse = False\n condensed_xs._energy_groups = coarse_groups\n\n # Build energy indices to sum across\n energy_indices = []\n for group in range(coarse_groups.num_groups, 0, -1):\n low, high = coarse_groups.get_group_bounds(group)\n low_index = np.where(self.energy_groups.group_edges == low)[0][0]\n energy_indices.append(low_index)\n\n fine_edges = self.energy_groups.group_edges\n\n # Condense each of the tallies to the coarse group structure\n for tally in condensed_xs.tallies.values():\n\n # Make condensed tally derived and null out sum, sum_sq\n tally._derived = True\n tally._sum = None\n tally._sum_sq = None\n\n # Get tally data arrays reshaped with one dimension per filter\n mean = tally.get_reshaped_data(value='mean')\n std_dev = tally.get_reshaped_data(value='std_dev')\n\n # Sum across all applicable fine energy group filters\n for i, tally_filter in enumerate(tally.filters):\n if not isinstance(tally_filter, (openmc.EnergyFilter,\n openmc.EnergyoutFilter)):\n continue\n elif len(tally_filter.bins) != len(fine_edges) - 1:\n continue\n elif not np.allclose(tally_filter.bins[:, 0], fine_edges[:-1]):\n continue\n else:\n cedge = coarse_groups.group_edges\n tally_filter.values = cedge\n tally_filter.bins = np.vstack((cedge[:-1], cedge[1:])).T\n mean = np.add.reduceat(mean, energy_indices, axis=i)\n std_dev = np.add.reduceat(std_dev**2, energy_indices,\n axis=i)\n std_dev = np.sqrt(std_dev)\n\n # Reshape condensed data arrays with one dimension for all filters\n mean = np.reshape(mean, tally.shape)\n std_dev = np.reshape(std_dev, tally.shape)\n\n # Override tally's data with the new condensed data\n tally._mean = mean\n tally._std_dev = std_dev\n\n # Compute the energy condensed multi-group cross section\n condensed_xs.sparse = self.sparse\n return condensed_xs\n\n def get_subdomain_avg_xs(self, subdomains='all'):\n \"\"\"Construct a subdomain-averaged version of this cross section.\n\n This method is useful for averaging cross sections across distribcell\n instances. The method performs spatial homogenization to compute the\n scalar flux-weighted average cross section across the subdomains.\n\n Parameters\n ----------\n subdomains : Iterable of Integral or 'all'\n The subdomain IDs to average across. Defaults to 'all'.\n\n Returns\n -------\n openmc.mgxs.MGXS\n A new MGXS averaged across the subdomains of interest\n\n Raises\n ------\n ValueError\n When this method is called before the multi-group cross section is\n computed from tally data.\n\n \"\"\"\n\n # Construct a collection of the subdomain filter bins to average across\n if not isinstance(subdomains, str):\n cv.check_iterable_type('subdomains', subdomains, Integral)\n subdomains = [(subdomain,) for subdomain in subdomains]\n subdomains = [tuple(subdomains)]\n elif self.domain_type == 'distribcell':\n subdomains = [i for i in range(self.num_subdomains)]\n subdomains = [tuple(subdomains)]\n else:\n subdomains = None\n\n # Clone this MGXS to initialize the subdomain-averaged version\n avg_xs = copy.deepcopy(self)\n avg_xs._rxn_rate_tally = None\n avg_xs._xs_tally = None\n\n # Average each of the tallies across subdomains\n for tally_type, tally in avg_xs.tallies.items():\n filt_type = _DOMAIN_TO_FILTER[self.domain_type]\n tally_avg = tally.summation(filter_type=filt_type,\n filter_bins=subdomains)\n avg_xs.tallies[tally_type] = tally_avg\n\n avg_xs._domain_type = 'sum({0})'.format(self.domain_type)\n avg_xs.sparse = self.sparse\n return avg_xs\n\n def _get_homogenized_mgxs(self, other_mgxs, denom_score='flux'):\n \"\"\"Construct a homogenized MGXS with other MGXS objects.\n\n This method constructs a new MGXS object that is the flux-weighted\n combination of two MGXS objects. It is equivalent to what one would\n obtain if the tally spatial domain were designed to encompass the\n individual domains for both MGXS objects. This is accomplished by\n summing the rxn rate (numerator) tally and the denominator tally\n (often a tally of the flux over the spatial domain) that are used to\n compute a multi-group cross-section.\n\n Parameters\n ----------\n other_mgxs : openmc.mgxs.MGXS or Iterable of openmc.mgxs.MGXS\n The MGXS to homogenize with this one.\n denom_score : str\n The denominator score in the denominator of computing the MGXS.\n\n Returns\n -------\n openmc.mgxs.MGXS\n A new homogenized MGXS\n\n Raises\n ------\n ValueError\n If the other_mgxs is of a different type.\n\n \"\"\"\n\n # Check type of denom score\n cv.check_type('denom_score', denom_score, str)\n\n # Construct a collection of the subdomain filter bins to homogenize\n # across\n if isinstance(other_mgxs, openmc.mgxs.MGXS):\n other_mgxs = [other_mgxs]\n\n cv.check_iterable_type('other_mgxs', other_mgxs, openmc.mgxs.MGXS)\n for mgxs in other_mgxs:\n if mgxs.rxn_type != self.rxn_type:\n msg = 'Not able to homogenize two MGXS with different rxn types'\n raise ValueError(msg)\n\n # Clone this MGXS to initialize the homogenized version\n homogenized_mgxs = copy.deepcopy(self)\n homogenized_mgxs._derived = True\n name = 'hom({}, '.format(self.domain.name)\n\n # Get the domain filter\n filter_type = _DOMAIN_TO_FILTER[self.domain_type]\n self_filter = self.rxn_rate_tally.find_filter(filter_type)\n\n # Get the rxn rate and denom tallies\n rxn_rate_tally = self.rxn_rate_tally\n denom_tally = self.tallies[denom_score]\n\n for mgxs in other_mgxs:\n\n # Swap the domain filter bins for the other mgxs rxn rate tally\n other_rxn_rate_tally = copy.deepcopy(mgxs.rxn_rate_tally)\n other_filter = other_rxn_rate_tally.find_filter(filter_type)\n other_filter._bins = self_filter._bins\n\n # Swap the domain filter bins for the denom tally\n other_denom_tally = copy.deepcopy(mgxs.tallies[denom_score])\n other_filter = other_denom_tally.find_filter(filter_type)\n other_filter._bins = self_filter._bins\n\n # Add the rxn rate and denom tallies\n rxn_rate_tally += other_rxn_rate_tally\n denom_tally += other_denom_tally\n\n # Update the name for the homogenzied MGXS\n name += '{}, '.format(mgxs.domain.name)\n\n # Set the properties of the homogenized MGXS\n homogenized_mgxs._rxn_rate_tally = rxn_rate_tally\n homogenized_mgxs.tallies[denom_score] = denom_tally\n homogenized_mgxs._domain.name = name[:-2] + ')'\n\n return homogenized_mgxs\n\n def get_homogenized_mgxs(self, other_mgxs):\n \"\"\"Construct a homogenized mgxs with other MGXS objects.\n\n Parameters\n ----------\n other_mgxs : openmc.mgxs.MGXS or Iterable of openmc.mgxs.MGXS\n The MGXS to homogenize with this one.\n\n Returns\n -------\n openmc.mgxs.MGXS\n A new homogenized MGXS\n\n Raises\n ------\n ValueError\n If the other_mgxs is of a different type.\n\n \"\"\"\n\n return self._get_homogenized_mgxs(other_mgxs, 'flux')\n\n def get_slice(self, nuclides=[], groups=[]):\n \"\"\"Build a sliced MGXS for the specified nuclides and energy groups.\n\n This method constructs a new MGXS to encapsulate a subset of the data\n represented by this MGXS. The subset of data to include in the tally\n slice is determined by the nuclides and energy groups specified in\n the input parameters.\n\n Parameters\n ----------\n nuclides : list of str\n A list of nuclide name strings\n (e.g., ['U235', 'U238']; default is [])\n groups : list of int\n A list of energy group indices starting at 1 for the high energies\n (e.g., [1, 2, 3]; default is [])\n\n Returns\n -------\n openmc.mgxs.MGXS\n A new MGXS object which encapsulates the subset of data requested\n for the nuclide(s) and/or energy group(s) requested in the\n parameters.\n\n \"\"\"\n\n cv.check_iterable_type('nuclides', nuclides, str)\n cv.check_iterable_type('energy_groups', groups, Integral)\n\n # Build lists of filters and filter bins to slice\n filters = []\n filter_bins = []\n\n if len(groups) != 0:\n energy_bins = []\n for group in groups:\n group_bounds = self.energy_groups.get_group_bounds(group)\n energy_bins.append(group_bounds)\n filter_bins.append(tuple(energy_bins))\n filters.append(openmc.EnergyFilter)\n\n # Clone this MGXS to initialize the sliced version\n slice_xs = copy.deepcopy(self)\n slice_xs._rxn_rate_tally = None\n slice_xs._xs_tally = None\n\n # Slice each of the tallies across nuclides and energy groups\n for tally_type, tally in slice_xs.tallies.items():\n slice_nuclides = [nuc for nuc in nuclides if nuc in tally.nuclides]\n if len(groups) != 0 and tally.contains_filter(openmc.EnergyFilter):\n tally_slice = tally.get_slice(filters=filters,\n filter_bins=filter_bins,\n nuclides=slice_nuclides)\n else:\n tally_slice = tally.get_slice(nuclides=slice_nuclides)\n slice_xs.tallies[tally_type] = tally_slice\n\n # Assign sliced energy group structure to sliced MGXS\n if groups:\n new_group_edges = []\n for group in groups:\n group_edges = self.energy_groups.get_group_bounds(group)\n new_group_edges.extend(group_edges)\n new_group_edges = np.unique(new_group_edges)\n slice_xs.energy_groups.group_edges = sorted(new_group_edges)\n\n # Assign sliced nuclides to sliced MGXS\n if nuclides:\n slice_xs.nuclides = nuclides\n\n slice_xs.sparse = self.sparse\n return slice_xs\n\n def can_merge(self, other):\n \"\"\"Determine if another MGXS can be merged with this one\n\n If results have been loaded from a statepoint, then MGXS are only\n mergeable along one and only one of enegy groups or nuclides.\n\n Parameters\n ----------\n other : openmc.mgxs.MGXS\n MGXS to check for merging\n\n \"\"\"\n\n if not isinstance(other, type(self)):\n return False\n\n # Compare reaction type, energy groups, nuclides, domain type\n if self.rxn_type != other.rxn_type:\n return False\n elif not self.energy_groups.can_merge(other.energy_groups):\n return False\n elif self.by_nuclide != other.by_nuclide:\n return False\n elif self.domain_type != other.domain_type:\n return False\n elif 'distribcell' not in self.domain_type and self.domain != other.domain:\n return False\n elif not self.xs_tally.can_merge(other.xs_tally):\n return False\n elif not self.rxn_rate_tally.can_merge(other.rxn_rate_tally):\n return False\n\n # If all conditionals pass then MGXS are mergeable\n return True\n\n def merge(self, other):\n \"\"\"Merge another MGXS with this one\n\n MGXS are only mergeable if their energy groups and nuclides are either\n identical or mutually exclusive. If results have been loaded from a\n statepoint, then MGXS are only mergeable along one and only one of\n energy groups or nuclides.\n\n Parameters\n ----------\n other : openmc.mgxs.MGXS\n MGXS to merge with this one\n\n Returns\n -------\n merged_mgxs : openmc.mgxs.MGXS\n Merged MGXS\n\n \"\"\"\n\n if not self.can_merge(other):\n raise ValueError('Unable to merge MGXS')\n\n # Create deep copy of tally to return as merged tally\n merged_mgxs = copy.deepcopy(self)\n merged_mgxs._derived = True\n\n # Merge energy groups\n if self.energy_groups != other.energy_groups:\n merged_groups = self.energy_groups.merge(other.energy_groups)\n merged_mgxs.energy_groups = merged_groups\n\n # Merge nuclides\n if self.nuclides != other.nuclides:\n\n # The nuclides must be mutually exclusive\n for nuclide in self.nuclides:\n if nuclide in other.nuclides:\n msg = 'Unable to merge MGXS with shared nuclides'\n raise ValueError(msg)\n\n # Concatenate lists of nuclides for the merged MGXS\n merged_mgxs.nuclides = self.nuclides + other.nuclides\n\n # Null base tallies but merge reaction rate and cross section tallies\n merged_mgxs._tallies = OrderedDict()\n merged_mgxs._rxn_rate_tally = self.rxn_rate_tally.merge(other.rxn_rate_tally)\n merged_mgxs._xs_tally = self.xs_tally.merge(other.xs_tally)\n\n return merged_mgxs\n\n def print_xs(self, subdomains='all', nuclides='all', xs_type='macro'):\n \"\"\"Print a string representation for the multi-group cross section.\n\n Parameters\n ----------\n subdomains : Iterable of Integral or 'all'\n The subdomain IDs of the cross sections to include in the report.\n Defaults to 'all'.\n nuclides : Iterable of str or 'all' or 'sum'\n The nuclides of the cross-sections to include in the report. This\n may be a list of nuclide name strings (e.g., ['U235', 'U238']).\n The special string 'all' will report the cross sections for all\n nuclides in the spatial domain. The special string 'sum' will report\n the cross sections summed over all nuclides. Defaults to 'all'.\n xs_type: {'macro', 'micro'}\n Return the macro or micro cross section in units of cm^-1 or barns.\n Defaults to 'macro'.\n\n \"\"\"\n\n # Construct a collection of the subdomains to report\n if not isinstance(subdomains, str):\n cv.check_iterable_type('subdomains', subdomains, Integral)\n elif self.domain_type == 'distribcell':\n subdomains = np.arange(self.num_subdomains, dtype=np.int)\n elif self.domain_type == 'mesh':\n subdomains = list(self.domain.indices)\n else:\n subdomains = [self.domain.id]\n\n # Construct a collection of the nuclides to report\n if self.by_nuclide:\n if nuclides == 'all':\n nuclides = self.get_nuclides()\n elif nuclides == 'sum':\n nuclides = ['sum']\n else:\n cv.check_iterable_type('nuclides', nuclides, str)\n else:\n nuclides = ['sum']\n\n cv.check_value('xs_type', xs_type, ['macro', 'micro'])\n\n # Build header for string with type and domain info\n string = 'Multi-Group XS\\n'\n string += '{0: <16}=\\t{1}\\n'.format('\\tReaction Type', self.rxn_type)\n string += '{0: <16}=\\t{1}\\n'.format('\\tDomain Type', self.domain_type)\n string += '{0: <16}=\\t{1}\\n'.format('\\tDomain ID', self.domain.id)\n\n # Generate the header for an individual XS\n xs_header = '\\tCross Sections [{0}]:'.format(self.get_units(xs_type))\n\n # If cross section data has not been computed, only print string header\n if self.tallies is None:\n print(string)\n return\n\n # Set polar/azimuthal bins\n if self.num_polar > 1 or self.num_azimuthal > 1:\n pol_bins = np.linspace(0., np.pi, num=self.num_polar + 1,\n endpoint=True)\n azi_bins = np.linspace(-np.pi, np.pi, num=self.num_azimuthal + 1,\n endpoint=True)\n\n # Loop over all subdomains\n for subdomain in subdomains:\n\n if self.domain_type == 'distribcell' or self.domain_type == 'mesh':\n string += '{0: <16}=\\t{1}\\n'.format('\\tSubdomain', subdomain)\n\n # Loop over all Nuclides\n for nuclide in nuclides:\n\n # Build header for nuclide type\n if nuclide != 'sum':\n string += '{0: <16}=\\t{1}\\n'.format('\\tNuclide', nuclide)\n\n # Build header for cross section type\n string += '{0: <16}\\n'.format(xs_header)\n template = '{0: <12}Group {1} [{2: <10} - {3: <10}eV]:\\t'\n\n average_xs = self.get_xs(nuclides=[nuclide],\n subdomains=[subdomain],\n xs_type=xs_type, value='mean')\n rel_err_xs = self.get_xs(nuclides=[nuclide],\n subdomains=[subdomain],\n xs_type=xs_type, value='rel_err')\n rel_err_xs = rel_err_xs * 100.\n\n if self.num_polar > 1 or self.num_azimuthal > 1:\n # Loop over polar, azimuthal, and energy group ranges\n for pol in range(len(pol_bins) - 1):\n pol_low, pol_high = pol_bins[pol: pol + 2]\n for azi in range(len(azi_bins) - 1):\n azi_low, azi_high = azi_bins[azi: azi + 2]\n string += '\\t\\tPolar Angle: [{0:5f} - {1:5f}]'.format(\n pol_low, pol_high) + \\\n '\\tAzimuthal Angle: [{0:5f} - {1:5f}]'.format(\n azi_low, azi_high) + '\\n'\n for group in range(1, self.num_groups + 1):\n bounds = \\\n self.energy_groups.get_group_bounds(group)\n string += '\\t' + template.format('', group,\n bounds[0],\n bounds[1])\n\n string += '{0:.2e} +/- {1:.2e}%'.format(\n average_xs[pol, azi, group - 1],\n rel_err_xs[pol, azi, group - 1])\n string += '\\n'\n string += '\\n'\n else:\n # Loop over energy groups\n for group in range(1, self.num_groups + 1):\n bounds = self.energy_groups.get_group_bounds(group)\n string += template.format('', group, bounds[0],\n bounds[1])\n string += '{0:.2e} +/- {1:.2e}%'.format(\n average_xs[group - 1], rel_err_xs[group - 1])\n string += '\\n'\n string += '\\n'\n string += '\\n'\n\n print(string)\n\n def build_hdf5_store(self, filename='mgxs.h5', directory='mgxs',\n subdomains='all', nuclides='all',\n xs_type='macro', row_column='inout', append=True,\n libver='earliest'):\n \"\"\"Export the multi-group cross section data to an HDF5 binary file.\n\n This method constructs an HDF5 file which stores the multi-group\n cross section data. The data is stored in a hierarchy of HDF5 groups\n from the domain type, domain id, subdomain id (for distribcell domains),\n nuclides and cross section type. Two datasets for the mean and standard\n deviation are stored for each subdomain entry in the HDF5 file.\n\n .. note:: This requires the h5py Python package.\n\n Parameters\n ----------\n filename : str\n Filename for the HDF5 file. Defaults to 'mgxs.h5'.\n directory : str\n Directory for the HDF5 file. Defaults to 'mgxs'.\n subdomains : Iterable of Integral or 'all'\n The subdomain IDs of the cross sections to include in the report.\n Defaults to 'all'.\n nuclides : Iterable of str or 'all' or 'sum'\n The nuclides of the cross-sections to include in the report. This\n may be a list of nuclide name strings (e.g., ['U235', 'U238']).\n The special string 'all' will report the cross sections for all\n nuclides in the spatial domain. The special string 'sum' will report\n the cross sections summed over all nuclides. Defaults to 'all'.\n xs_type: {'macro', 'micro'}\n Store the macro or micro cross section in units of cm^-1 or barns.\n Defaults to 'macro'.\n row_column: {'inout', 'outin'}\n Store scattering matrices indexed first by incoming group and\n second by outgoing group ('inout'), or vice versa ('outin').\n Defaults to 'inout'.\n append : bool\n If true, appends to an existing HDF5 file with the same filename\n directory (if one exists). Defaults to True.\n libver : {'earliest', 'latest'}\n Compatibility mode for the HDF5 file. 'latest' will produce files\n that are less backwards compatible but have performance benefits.\n\n Raises\n ------\n ValueError\n When this method is called before the multi-group cross section is\n computed from tally data.\n\n \"\"\"\n # Make directory if it does not exist\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n filename = os.path.join(directory, filename)\n filename = filename.replace(' ', '-')\n\n if append and os.path.isfile(filename):\n xs_results = h5py.File(filename, 'a')\n else:\n xs_results = h5py.File(filename, 'w', libver=libver)\n\n # Construct a collection of the subdomains to report\n if not isinstance(subdomains, str):\n cv.check_iterable_type('subdomains', subdomains, Integral)\n elif self.domain_type == 'distribcell':\n subdomains = np.arange(self.num_subdomains, dtype=np.int)\n elif self.domain_type == 'sum(distribcell)':\n domain_filter = self.xs_tally.find_filter('sum(distribcell)')\n subdomains = domain_filter.bins\n elif self.domain_type == 'mesh':\n subdomains = list(self.domain.indices)\n else:\n subdomains = [self.domain.id]\n\n # Construct a collection of the nuclides to report\n if self.by_nuclide:\n if nuclides == 'all':\n nuclides = self.get_nuclides()\n densities = np.zeros(len(nuclides), dtype=np.float)\n elif nuclides == 'sum':\n nuclides = ['sum']\n else:\n cv.check_iterable_type('nuclides', nuclides, str)\n else:\n nuclides = ['sum']\n\n cv.check_value('xs_type', xs_type, ['macro', 'micro'])\n\n # Create an HDF5 group within the file for the domain\n domain_type_group = xs_results.require_group(self.domain_type)\n domain_group = domain_type_group.require_group(str(self.domain.id))\n\n # Determine number of digits to pad subdomain group keys\n num_digits = len(str(self.num_subdomains))\n\n # Create a separate HDF5 group for each subdomain\n for subdomain in subdomains:\n\n # Create an HDF5 group for the subdomain\n if self.domain_type == 'distribcell':\n group_name = ''.zfill(num_digits)\n subdomain_group = domain_group.require_group(group_name)\n else:\n subdomain_group = domain_group\n\n # Create a separate HDF5 group for this cross section\n rxn_group = subdomain_group.require_group(self.hdf5_key)\n\n # Create a separate HDF5 group for each nuclide\n for j, nuclide in enumerate(nuclides):\n\n if nuclide != 'sum':\n density = densities[j]\n nuclide_group = rxn_group.require_group(nuclide)\n nuclide_group.require_dataset('density', dtype=np.float64,\n data=[density], shape=(1,))\n else:\n nuclide_group = rxn_group\n\n # Extract the cross section for this subdomain and nuclide\n average = self.get_xs(subdomains=[subdomain], nuclides=[nuclide],\n xs_type=xs_type, value='mean',\n row_column=row_column)\n std_dev = self.get_xs(subdomains=[subdomain], nuclides=[nuclide],\n xs_type=xs_type, value='std_dev',\n row_column=row_column)\n\n # Add MGXS results data to the HDF5 group\n nuclide_group.require_dataset('average', dtype=np.float64,\n shape=average.shape, data=average)\n nuclide_group.require_dataset('std. dev.', dtype=np.float64,\n shape=std_dev.shape, data=std_dev)\n\n # Close the results HDF5 file\n xs_results.close()\n\n def export_xs_data(self, filename='mgxs', directory='mgxs',\n format='csv', groups='all', xs_type='macro'):\n \"\"\"Export the multi-group cross section data to a file.\n\n This method leverages the functionality in the Pandas library to export\n the multi-group cross section data in a variety of output file formats\n for storage and/or post-processing.\n\n Parameters\n ----------\n filename : str\n Filename for the exported file. Defaults to 'mgxs'.\n directory : str\n Directory for the exported file. Defaults to 'mgxs'.\n format : {'csv', 'excel', 'pickle', 'latex'}\n The format for the exported data file. Defaults to 'csv'.\n groups : Iterable of Integral or 'all'\n Energy groups of interest. Defaults to 'all'.\n xs_type: {'macro', 'micro'}\n Store the macro or micro cross section in units of cm^-1 or barns.\n Defaults to 'macro'.\n\n \"\"\"\n\n cv.check_type('filename', filename, str)\n cv.check_type('directory', directory, str)\n cv.check_value('format', format, ['csv', 'excel', 'pickle', 'latex'])\n cv.check_value('xs_type', xs_type, ['macro', 'micro'])\n\n # Make directory if it does not exist\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n filename = os.path.join(directory, filename)\n filename = filename.replace(' ', '-')\n\n # Get a Pandas DataFrame for the data\n df = self.get_pandas_dataframe(groups=groups, xs_type=xs_type)\n\n # Export the data using Pandas IO API\n if format == 'csv':\n df.to_csv(filename + '.csv', index=False)\n elif format == 'excel':\n if self.domain_type == 'mesh':\n df.to_excel(filename + '.xls')\n else:\n df.to_excel(filename + '.xls', index=False)\n elif format == 'pickle':\n df.to_pickle(filename + '.pkl')\n elif format == 'latex':\n if self.domain_type == 'distribcell':\n msg = 'Unable to export distribcell multi-group cross section' \\\n 'data to a LaTeX table'\n raise NotImplementedError(msg)\n\n df.to_latex(filename + '.tex', bold_rows=True,\n longtable=True, index=False)\n\n # Surround LaTeX table with code needed to run pdflatex\n with open(filename + '.tex', 'r') as original:\n data = original.read()\n with open(filename + '.tex', 'w') as modified:\n modified.write(\n '\\\\documentclass[preview, 12pt, border=1mm]{standalone}\\n')\n modified.write('\\\\usepackage{caption}\\n')\n modified.write('\\\\usepackage{longtable}\\n')\n modified.write('\\\\usepackage{booktabs}\\n')\n modified.write('\\\\begin{document}\\n\\n')\n modified.write(data)\n modified.write('\\n\\\\end{document}')\n\n def get_pandas_dataframe(self, groups='all', nuclides='all',\n xs_type='macro', paths=True):\n \"\"\"Build a Pandas DataFrame for the MGXS data.\n\n This method leverages :meth:`openmc.Tally.get_pandas_dataframe`, but\n renames the columns with terminology appropriate for cross section data.\n\n Parameters\n ----------\n groups : Iterable of Integral or 'all'\n Energy groups of interest. Defaults to 'all'.\n nuclides : Iterable of str or 'all' or 'sum'\n The nuclides of the cross-sections to include in the dataframe. This\n may be a list of nuclide name strings (e.g., ['U235', 'U238']).\n The special string 'all' will include the cross sections for all\n nuclides in the spatial domain. The special string 'sum' will\n include the cross sections summed over all nuclides. Defaults\n to 'all'.\n xs_type: {'macro', 'micro'}\n Return macro or micro cross section in units of cm^-1 or barns.\n Defaults to 'macro'.\n paths : bool, optional\n Construct columns for distribcell tally filters (default is True).\n The geometric information in the Summary object is embedded into\n a Multi-index column with a geometric \"path\" to each distribcell\n instance.\n\n Returns\n -------\n pandas.DataFrame\n A Pandas DataFrame for the cross section data.\n\n Raises\n ------\n ValueError\n When this method is called before the multi-group cross section is\n computed from tally data.\n\n \"\"\"\n\n if not isinstance(groups, str):\n cv.check_iterable_type('groups', groups, Integral)\n if nuclides != 'all' and nuclides != 'sum':\n cv.check_iterable_type('nuclides', nuclides, str)\n cv.check_value('xs_type', xs_type, ['macro', 'micro'])\n\n # Get a Pandas DataFrame from the derived xs tally\n if self.by_nuclide and nuclides == 'sum':\n\n # Use tally summation to sum across all nuclides\n xs_tally = self.xs_tally.summation(nuclides=self.get_nuclides())\n df = xs_tally.get_pandas_dataframe(paths=paths)\n\n # Remove nuclide column since it is homogeneous and redundant\n if self.domain_type == 'mesh':\n df.drop('sum(nuclide)', axis=1, level=0, inplace=True)\n else:\n df.drop('sum(nuclide)', axis=1, inplace=True)\n\n # If the user requested a specific set of nuclides\n elif self.by_nuclide and nuclides != 'all':\n xs_tally = self.xs_tally.get_slice(nuclides=nuclides)\n df = xs_tally.get_pandas_dataframe(paths=paths)\n\n # If the user requested all nuclides, keep nuclide column in dataframe\n else:\n df = self.xs_tally.get_pandas_dataframe(paths=paths)\n\n # Remove the score column since it is homogeneous and redundant\n if self.domain_type == 'mesh':\n df = df.drop('score', axis=1, level=0)\n else:\n df = df.drop('score', axis=1)\n\n # Convert azimuthal, polar, energy in and energy out bin values in to\n # bin indices\n columns = self._df_convert_columns_to_bins(df)\n\n # Select out those groups the user requested\n if not isinstance(groups, str):\n if 'group in' in df:\n df = df[df['group in'].isin(groups)]\n if 'group out' in df:\n df = df[df['group out'].isin(groups)]\n\n # If user requested micro cross sections, divide out the atom densities\n if xs_type == 'micro' and self._divide_by_density:\n if self.by_nuclide:\n densities = self.get_nuclide_densities(nuclides)\n else:\n densities = self.get_nuclide_densities('sum')\n densities = np.repeat(densities, len(self.rxn_rate_tally.scores))\n tile_factor = int(df.shape[0] / len(densities))\n df['mean'] /= np.tile(densities, tile_factor)\n df['std. dev.'] /= np.tile(densities, tile_factor)\n\n # Replace NaNs by zeros (happens if nuclide density is zero)\n df['mean'].replace(np.nan, 0.0, inplace=True)\n df['std. dev.'].replace(np.nan, 0.0, inplace=True)\n\n # Sort the dataframe by domain type id (e.g., distribcell id) and\n # energy groups such that data is from fast to thermal\n if self.domain_type == 'mesh':\n mesh_str = 'mesh {0}'.format(self.domain.id)\n df.sort_values(by=[(mesh_str, 'x'), (mesh_str, 'y'),\n (mesh_str, 'z')] + columns, inplace=True)\n else:\n df.sort_values(by=[self.domain_type] + columns, inplace=True)\n\n return df\n\n def get_units(self, xs_type='macro'):\n \"\"\"This method returns the units of a MGXS based on a desired xs_type.\n\n Parameters\n ----------\n xs_type: {'macro', 'micro'}\n Return the macro or micro cross section units.\n Defaults to 'macro'.\n\n Returns\n -------\n str\n A string representing the units of the MGXS.\n\n \"\"\"\n\n cv.check_value('xs_type', xs_type, ['macro', 'micro'])\n\n return 'cm^-1' if xs_type == 'macro' else 'barns'\n\n\nclass MatrixMGXS(MGXS):\n \"\"\"An abstract multi-group cross section for some energy group structure\n within some spatial domain. This class is specifically intended for\n cross sections which depend on both the incoming and outgoing energy groups\n and are therefore represented by matrices. Examples of this include the\n scattering and nu-fission matrices.\n\n This class can be used for both OpenMC input generation and tally data\n post-processing to compute spatially-homogenized and energy-integrated\n multi-group cross sections for multi-group neutronics calculations.\n\n .. note:: Users should instantiate the subclasses of this abstract class.\n\n Parameters\n ----------\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n The domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n The domain type for spatial homogenization\n energy_groups : openmc.mgxs.EnergyGroups\n The energy group structure for energy condensation\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n name : str, optional\n Name of the multi-group cross section. Used as a label to identify\n tallies in OpenMC 'tallies.xml' file.\n num_polar : Integral, optional\n Number of equi-width polar angle bins for angle discretization;\n defaults to one bin\n num_azimuthal : Integral, optional\n Number of equi-width azimuthal angle bins for angle discretization;\n defaults to one bin\n\n Attributes\n ----------\n name : str, optional\n Name of the multi-group cross section\n rxn_type : str\n Reaction type (e.g., 'total', 'nu-fission', etc.)\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n Domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n Domain type for spatial homogenization\n energy_groups : openmc.mgxs.EnergyGroups\n Energy group structure for energy condensation\n num_polar : Integral\n Number of equi-width polar angle bins for angle discretization\n num_azimuthal : Integral\n Number of equi-width azimuthal angle bins for angle discretization\n tally_trigger : openmc.Trigger\n An (optional) tally precision trigger given to each tally used to\n compute the cross section\n scores : list of str\n The scores in each tally used to compute the multi-group cross section\n filters : list of openmc.Filter\n The filters in each tally used to compute the multi-group cross section\n tally_keys : list of str\n The keys into the tallies dictionary for each tally used to compute\n the multi-group cross section\n estimator : {'tracklength', 'collision', 'analog'}\n The tally estimator used to compute the multi-group cross section\n tallies : collections.OrderedDict\n OpenMC tallies needed to compute the multi-group cross section\n rxn_rate_tally : openmc.Tally\n Derived tally for the reaction rate tally used in the numerator to\n compute the multi-group cross section. This attribute is None\n unless the multi-group cross section has been computed.\n xs_tally : openmc.Tally\n Derived tally for the multi-group cross section. This attribute\n is None unless the multi-group cross section has been computed.\n num_subdomains : int\n The number of subdomains is unity for 'material', 'cell' and 'universe'\n domain types. This is equal to the number of cell instances\n for 'distribcell' domain types (it is equal to unity prior to loading\n tally data from a statepoint file) and the number of mesh cells for\n 'mesh' domain types.\n num_nuclides : int\n The number of nuclides for which the multi-group cross section is\n being tracked. This is unity if the by_nuclide attribute is False.\n nuclides : Iterable of str or 'sum'\n The optional user-specified nuclides for which to compute cross\n sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides\n are not specified by the user, all nuclides in the spatial domain\n are included. This attribute is 'sum' if by_nuclide is false.\n sparse : bool\n Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format\n for compressed data storage\n loaded_sp : bool\n Whether or not a statepoint file has been loaded with tally data\n derived : bool\n Whether or not the MGXS is merged from one or more other MGXS\n hdf5_key : str\n The key used to index multi-group cross sections in an HDF5 data store\n\n \"\"\"\n @property\n def _dont_squeeze(self):\n \"\"\"Create a tuple of axes which should not be removed during the get_xs\n process\n \"\"\"\n if self.num_polar > 1 or self.num_azimuthal > 1:\n return (0, 1, 3, 4)\n else:\n return (1, 2)\n\n @property\n def filters(self):\n # Create the non-domain specific Filters for the Tallies\n group_edges = self.energy_groups.group_edges\n energy = openmc.EnergyFilter(group_edges)\n energyout = openmc.EnergyoutFilter(group_edges)\n filters = [[energy], [energy, energyout]]\n\n return self._add_angle_filters(filters)\n\n def get_xs(self, in_groups='all', out_groups='all', subdomains='all',\n nuclides='all', xs_type='macro', order_groups='increasing',\n row_column='inout', value='mean', squeeze=True, **kwargs):\n \"\"\"Returns an array of multi-group cross sections.\n\n This method constructs a 4D NumPy array for the requested\n multi-group cross section data for one or more subdomains\n (1st dimension), energy groups in (2nd dimension), energy groups out\n (3rd dimension), and nuclides (4th dimension).\n\n Parameters\n ----------\n in_groups : Iterable of Integral or 'all'\n Incoming energy groups of interest. Defaults to 'all'.\n out_groups : Iterable of Integral or 'all'\n Outgoing energy groups of interest. Defaults to 'all'.\n subdomains : Iterable of Integral or 'all'\n Subdomain IDs of interest. Defaults to 'all'.\n nuclides : Iterable of str or 'all' or 'sum'\n A list of nuclide name strings (e.g., ['U235', 'U238']). The\n special string 'all' will return the cross sections for all\n nuclides in the spatial domain. The special string 'sum' will\n return the cross section summed over all nuclides. Defaults to\n 'all'.\n xs_type: {'macro', 'micro'}\n Return the macro or micro cross section in units of cm^-1 or barns.\n Defaults to 'macro'.\n order_groups: {'increasing', 'decreasing'}\n Return the cross section indexed according to increasing or\n decreasing energy groups (decreasing or increasing energies).\n Defaults to 'increasing'.\n row_column: {'inout', 'outin'}\n Return the cross section indexed first by incoming group and\n second by outgoing group ('inout'), or vice versa ('outin').\n Defaults to 'inout'.\n value : {'mean', 'std_dev', 'rel_err'}\n A string for the type of value to return. Defaults to 'mean'.\n squeeze : bool\n A boolean representing whether to eliminate the extra dimensions\n of the multi-dimensional array to be returned. Defaults to True.\n\n Returns\n -------\n numpy.ndarray\n A NumPy array of the multi-group cross section indexed in the order\n each group and subdomain is listed in the parameters.\n\n Raises\n ------\n ValueError\n When this method is called before the multi-group cross section is\n computed from tally data.\n\n \"\"\"\n\n cv.check_value('value', value, ['mean', 'std_dev', 'rel_err'])\n cv.check_value('xs_type', xs_type, ['macro', 'micro'])\n\n # FIXME: Unable to get microscopic xs for mesh domain because the mesh\n # cells do not know the nuclide densities in each mesh cell.\n if self.domain_type == 'mesh' and xs_type == 'micro':\n msg = 'Unable to get micro xs for mesh domain since the mesh ' \\\n 'cells do not know the nuclide densities in each mesh cell.'\n raise ValueError(msg)\n\n filters = []\n filter_bins = []\n\n # Construct a collection of the domain filter bins\n if not isinstance(subdomains, str):\n cv.check_iterable_type('subdomains', subdomains, Integral,\n max_depth=3)\n filters.append(_DOMAIN_TO_FILTER[self.domain_type])\n subdomain_bins = []\n for subdomain in subdomains:\n subdomain_bins.append(subdomain)\n filter_bins.append(tuple(subdomain_bins))\n\n # Construct list of energy group bounds tuples for all requested groups\n if not isinstance(in_groups, str):\n cv.check_iterable_type('groups', in_groups, Integral)\n filters.append(openmc.EnergyFilter)\n energy_bins = []\n for group in in_groups:\n energy_bins.append((self.energy_groups.get_group_bounds(group),))\n filter_bins.append(tuple(energy_bins))\n\n # Construct list of energy group bounds tuples for all requested groups\n if not isinstance(out_groups, str):\n cv.check_iterable_type('groups', out_groups, Integral)\n for group in out_groups:\n filters.append(openmc.EnergyoutFilter)\n filter_bins.append((\n self.energy_groups.get_group_bounds(group),))\n\n # Construct a collection of the nuclides to retrieve from the xs tally\n if self.by_nuclide:\n if nuclides == 'all' or nuclides == 'sum' or nuclides == ['sum']:\n query_nuclides = self.get_nuclides()\n else:\n query_nuclides = nuclides\n else:\n query_nuclides = ['total']\n\n # Use tally summation if user requested the sum for all nuclides\n if nuclides == 'sum' or nuclides == ['sum']:\n xs_tally = self.xs_tally.summation(nuclides=query_nuclides)\n xs = xs_tally.get_values(filters=filters, filter_bins=filter_bins,\n value=value)\n else:\n xs = self.xs_tally.get_values(filters=filters,\n filter_bins=filter_bins,\n nuclides=query_nuclides, value=value)\n\n # Divide by atom number densities for microscopic cross sections\n if xs_type == 'micro' and self._divide_by_density:\n if self.by_nuclide:\n densities = self.get_nuclide_densities(nuclides)\n else:\n densities = self.get_nuclide_densities('sum')\n if value == 'mean' or value == 'std_dev':\n xs /= densities[np.newaxis, :, np.newaxis]\n\n # Eliminate the trivial score dimension\n xs = np.squeeze(xs, axis=len(xs.shape) - 1)\n xs = np.nan_to_num(xs)\n\n if in_groups == 'all':\n num_in_groups = self.num_groups\n else:\n num_in_groups = len(in_groups)\n\n if out_groups == 'all':\n num_out_groups = self.num_groups\n else:\n num_out_groups = len(out_groups)\n\n # Reshape tally data array with separate axes for domain and energy\n # Accomodate the polar and azimuthal bins if needed\n num_subdomains = int(xs.shape[0] / (num_in_groups * num_out_groups *\n self.num_polar *\n self.num_azimuthal))\n if self.num_polar > 1 or self.num_azimuthal > 1:\n new_shape = (self.num_polar, self.num_azimuthal, num_subdomains,\n num_in_groups, num_out_groups)\n new_shape += xs.shape[1:]\n xs = np.reshape(xs, new_shape)\n\n # Transpose the matrix if requested by user\n if row_column == 'outin':\n xs = np.swapaxes(xs, 3, 4)\n else:\n new_shape = (num_subdomains, num_in_groups, num_out_groups)\n new_shape += xs.shape[1:]\n xs = np.reshape(xs, new_shape)\n\n # Transpose the matrix if requested by user\n if row_column == 'outin':\n xs = np.swapaxes(xs, 1, 2)\n\n # Reverse data if user requested increasing energy groups since\n # tally data is stored in order of increasing energies\n if order_groups == 'increasing':\n xs = xs[..., ::-1, ::-1, :]\n\n if squeeze:\n # We want to squeeze out everything but the polar, azimuthal,\n # and in/out energy group data.\n xs = self._squeeze_xs(xs)\n\n return xs\n\n def get_slice(self, nuclides=[], in_groups=[], out_groups=[]):\n \"\"\"Build a sliced MatrixMGXS object for the specified nuclides and\n energy groups.\n\n This method constructs a new MGXS to encapsulate a subset of the data\n represented by this MGXS. The subset of data to include in the tally\n slice is determined by the nuclides and energy groups specified in\n the input parameters.\n\n Parameters\n ----------\n nuclides : list of str\n A list of nuclide name strings\n (e.g., ['U235', 'U238']; default is [])\n in_groups : list of int\n A list of incoming energy group indices starting at 1 for the high\n energies (e.g., [1, 2, 3]; default is [])\n out_groups : list of int\n A list of outgoing energy group indices starting at 1 for the high\n energies (e.g., [1, 2, 3]; default is [])\n\n Returns\n -------\n openmc.mgxs.MatrixMGXS\n A new MatrixMGXS object which encapsulates the subset of data\n requested for the nuclide(s) and/or energy group(s) requested in\n the parameters.\n\n \"\"\"\n\n # Call super class method and null out derived tallies\n slice_xs = super().get_slice(nuclides, in_groups)\n slice_xs._rxn_rate_tally = None\n slice_xs._xs_tally = None\n\n # Slice outgoing energy groups if needed\n if len(out_groups) != 0:\n filter_bins = []\n for group in out_groups:\n group_bounds = self.energy_groups.get_group_bounds(group)\n filter_bins.append(group_bounds)\n filter_bins = [tuple(filter_bins)]\n\n # Slice each of the tallies across energyout groups\n for tally_type, tally in slice_xs.tallies.items():\n if tally.contains_filter(openmc.EnergyoutFilter):\n tally_slice = tally.get_slice(\n filters=[openmc.EnergyoutFilter],\n filter_bins=filter_bins)\n slice_xs.tallies[tally_type] = tally_slice\n\n slice_xs.sparse = self.sparse\n return slice_xs\n\n def print_xs(self, subdomains='all', nuclides='all', xs_type='macro'):\n \"\"\"Prints a string representation for the multi-group cross section.\n\n Parameters\n ----------\n subdomains : Iterable of Integral or 'all'\n The subdomain IDs of the cross sections to include in the report.\n Defaults to 'all'.\n nuclides : Iterable of str or 'all' or 'sum'\n The nuclides of the cross-sections to include in the report. This\n may be a list of nuclide name strings (e.g., ['U235', 'U238']).\n The special string 'all' will report the cross sections for all\n nuclides in the spatial domain. The special string 'sum' will\n report the cross sections summed over all nuclides. Defaults to\n 'all'.\n xs_type: {'macro', 'micro'}\n Return the macro or micro cross section in units of cm^-1 or barns.\n Defaults to 'macro'.\n\n \"\"\"\n\n # Construct a collection of the subdomains to report\n if not isinstance(subdomains, str):\n cv.check_iterable_type('subdomains', subdomains, Integral)\n elif self.domain_type == 'distribcell':\n subdomains = np.arange(self.num_subdomains, dtype=np.int)\n elif self.domain_type == 'mesh':\n subdomains = list(self.domain.indices)\n else:\n subdomains = [self.domain.id]\n\n # Construct a collection of the nuclides to report\n if self.by_nuclide:\n if nuclides == 'all':\n nuclides = self.get_nuclides()\n if nuclides == 'sum':\n nuclides = ['sum']\n else:\n cv.check_iterable_type('nuclides', nuclides, str)\n else:\n nuclides = ['sum']\n\n cv.check_value('xs_type', xs_type, ['macro', 'micro'])\n\n # Build header for string with type and domain info\n string = 'Multi-Group XS\\n'\n string += '{0: <16}=\\t{1}\\n'.format('\\tReaction Type', self.rxn_type)\n string += '{0: <16}=\\t{1}\\n'.format('\\tDomain Type', self.domain_type)\n string += '{0: <16}=\\t{1}\\n'.format('\\tDomain ID', self.domain.id)\n\n # Generate the header for an individual XS\n xs_header = '\\tCross Sections [{0}]:'.format(self.get_units(xs_type))\n\n # If cross section data has not been computed, only print string header\n if self.tallies is None:\n print(string)\n return\n\n string += '{0: <16}\\n'.format('\\tEnergy Groups:')\n template = '{0: <12}Group {1} [{2: <10} - {3: <10}eV]\\n'\n\n # Loop over energy groups ranges\n for group in range(1, self.num_groups + 1):\n bounds = self.energy_groups.get_group_bounds(group)\n string += template.format('', group, bounds[0], bounds[1])\n\n # Set polar and azimuthal bins if necessary\n if self.num_polar > 1 or self.num_azimuthal > 1:\n pol_bins = np.linspace(0., np.pi, num=self.num_polar + 1,\n endpoint=True)\n azi_bins = np.linspace(-np.pi, np.pi, num=self.num_azimuthal + 1,\n endpoint=True)\n\n # Loop over all subdomains\n for subdomain in subdomains:\n\n if self.domain_type == 'distribcell' or self.domain_type == 'mesh':\n string += '{0: <16}=\\t{1}\\n'.format('\\tSubdomain', subdomain)\n\n # Loop over all Nuclides\n for nuclide in nuclides:\n\n # Build header for nuclide type\n if xs_type != 'sum':\n string += '{0: <16}=\\t{1}\\n'.format('\\tNuclide', nuclide)\n\n # Build header for cross section type\n string += '{0: <16}\\n'.format(xs_header)\n template = '{0: <12}Group {1} -> Group {2}:\\t\\t'\n\n average_xs = self.get_xs(nuclides=[nuclide],\n subdomains=[subdomain],\n xs_type=xs_type, value='mean')\n rel_err_xs = self.get_xs(nuclides=[nuclide],\n subdomains=[subdomain],\n xs_type=xs_type, value='rel_err')\n rel_err_xs = rel_err_xs * 100.\n\n if self.num_polar > 1 or self.num_azimuthal > 1:\n # Loop over polar, azi, and in/out energy group ranges\n for pol in range(len(pol_bins) - 1):\n pol_low, pol_high = pol_bins[pol: pol + 2]\n for azi in range(len(azi_bins) - 1):\n azi_low, azi_high = azi_bins[azi: azi + 2]\n string += '\\t\\tPolar Angle: [{0:5f} - {1:5f}]'.format(\n pol_low, pol_high) + \\\n '\\tAzimuthal Angle: [{0:5f} - {1:5f}]'.format(\n azi_low, azi_high) + '\\n'\n for in_group in range(1, self.num_groups + 1):\n for out_group in range(1, self.num_groups + 1):\n string += '\\t' + template.format('',\n in_group,\n out_group)\n string += '{0:.2e} +/- {1:.2e}%'.format(\n average_xs[pol, azi, in_group - 1,\n out_group - 1],\n rel_err_xs[pol, azi, in_group - 1,\n out_group - 1])\n string += '\\n'\n string += '\\n'\n string += '\\n'\n else:\n # Loop over incoming/outgoing energy groups ranges\n for in_group in range(1, self.num_groups + 1):\n for out_group in range(1, self.num_groups + 1):\n string += template.format('', in_group, out_group)\n string += '{0:.2e} +/- {1:.2e}%'.format(\n average_xs[in_group - 1, out_group - 1],\n rel_err_xs[in_group - 1, out_group - 1])\n string += '\\n'\n string += '\\n'\n string += '\\n'\n string += '\\n'\n\n print(string)\n\n\nclass TotalXS(MGXS):\n r\"\"\"A total multi-group cross section.\n\n This class can be used for both OpenMC input generation and tally data\n post-processing to compute spatially-homogenized and energy-integrated\n multi-group total cross sections for multi-group neutronics calculations. At\n a minimum, one needs to set the :attr:`TotalXS.energy_groups` and\n :attr:`TotalXS.domain` properties. Tallies for the flux and appropriate\n reaction rates over the specified domain are generated automatically via the\n :attr:`TotalXS.tallies` property, which can then be appended to a\n :class:`openmc.Tallies` instance.\n\n For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the\n necessary data to compute multi-group cross sections from a\n :class:`openmc.StatePoint` instance. The derived multi-group cross section\n can then be obtained from the :attr:`TotalXS.xs_tally` property.\n\n For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the\n total cross section is calculated as:\n\n .. math::\n\n \\frac{\\int_{r \\in V} dr \\int_{4\\pi} d\\Omega \\int_{E_g}^{E_{g-1}} dE \\;\n \\sigma_t (r, E) \\psi (r, E, \\Omega)}{\\int_{r \\in V} dr \\int_{4\\pi}\n d\\Omega \\int_{E_g}^{E_{g-1}} dE \\; \\psi (r, E, \\Omega)}.\n\n Parameters\n ----------\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n The domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n The domain type for spatial homogenization\n groups : openmc.mgxs.EnergyGroups\n The energy group structure for energy condensation\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n name : str, optional\n Name of the multi-group cross section. Used as a label to identify\n tallies in OpenMC 'tallies.xml' file.\n num_polar : Integral, optional\n Number of equi-width polar angle bins for angle discretization;\n defaults to one bin\n num_azimuthal : Integral, optional\n Number of equi-width azimuthal angle bins for angle discretization;\n defaults to one bin\n\n Attributes\n ----------\n name : str, optional\n Name of the multi-group cross section\n rxn_type : str\n Reaction type (e.g., 'total', 'nu-fission', etc.)\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n Domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n Domain type for spatial homogenization\n energy_groups : openmc.mgxs.EnergyGroups\n Energy group structure for energy condensation\n num_polar : Integral\n Number of equi-width polar angle bins for angle discretization\n num_azimuthal : Integral\n Number of equi-width azimuthal angle bins for angle discretization\n tally_trigger : openmc.Trigger\n An (optional) tally precision trigger given to each tally used to\n compute the cross section\n scores : list of str\n The scores in each tally used to compute the multi-group cross section\n filters : list of openmc.Filter\n The filters in each tally used to compute the multi-group cross section\n tally_keys : list of str\n The keys into the tallies dictionary for each tally used to compute\n the multi-group cross section\n estimator : {'tracklength', 'collision', 'analog'}\n The tally estimator used to compute the multi-group cross section\n tallies : collections.OrderedDict\n OpenMC tallies needed to compute the multi-group cross section. The keys\n are strings listed in the :attr:`TotalXS.tally_keys` property and values\n are instances of :class:`openmc.Tally`.\n rxn_rate_tally : openmc.Tally\n Derived tally for the reaction rate tally used in the numerator to\n compute the multi-group cross section. This attribute is None\n unless the multi-group cross section has been computed.\n xs_tally : openmc.Tally\n Derived tally for the multi-group cross section. This attribute\n is None unless the multi-group cross section has been computed.\n num_subdomains : int\n The number of subdomains is unity for 'material', 'cell' and 'universe'\n domain types. This is equal to the number of cell instances\n for 'distribcell' domain types (it is equal to unity prior to loading\n tally data from a statepoint file).\n num_nuclides : int\n The number of nuclides for which the multi-group cross section is\n being tracked. This is unity if the by_nuclide attribute is False.\n nuclides : Iterable of str or 'sum'\n The optional user-specified nuclides for which to compute cross\n sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides\n are not specified by the user, all nuclides in the spatial domain\n are included. This attribute is 'sum' if by_nuclide is false.\n sparse : bool\n Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format\n for compressed data storage\n loaded_sp : bool\n Whether or not a statepoint file has been loaded with tally data\n derived : bool\n Whether or not the MGXS is merged from one or more other MGXS\n hdf5_key : str\n The key used to index multi-group cross sections in an HDF5 data store\n\n \"\"\"\n\n def __init__(self, domain=None, domain_type=None, groups=None,\n by_nuclide=False, name='', num_polar=1, num_azimuthal=1):\n super().__init__(domain, domain_type, groups, by_nuclide, name,\n num_polar, num_azimuthal)\n self._rxn_type = 'total'\n\n\nclass TransportXS(MGXS):\n r\"\"\"A transport-corrected total multi-group cross section.\n\n This class can be used for both OpenMC input generation and tally data\n post-processing to compute spatially-homogenized and energy-integrated\n multi-group cross sections for multi-group neutronics calculations. At a\n minimum, one needs to set the :attr:`TransportXS.energy_groups` and\n :attr:`TransportXS.domain` properties. Tallies for the flux and appropriate\n reaction rates over the specified domain are generated automatically via the\n :attr:`TransportXS.tallies` property, which can then be appended to a\n :class:`openmc.Tallies` instance.\n\n For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the\n necessary data to compute multi-group cross sections from a\n :class:`openmc.StatePoint` instance. The derived multi-group cross section\n can then be obtained from the :attr:`TransportXS.xs_tally` property.\n\n For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the\n transport-corrected total cross section is calculated as:\n\n .. math::\n\n \\begin{aligned}\n \\langle \\sigma_t \\phi \\rangle &= \\int_{r \\in V} dr \\int_{4\\pi}\n d\\Omega \\int_{E_g}^{E_{g-1}} dE \\sigma_t (r, E) \\psi\n (r, E, \\Omega) \\\\\n \\langle \\sigma_{s1} \\phi \\rangle &= \\int_{r \\in V} dr\n \\int_{4\\pi} d\\Omega \\int_{E_g}^{E_{g-1}} dE \\int_{4\\pi}\n d\\Omega' \\int_0^\\infty dE' \\int_{-1}^1 d\\mu \\; \\mu \\sigma_s\n (r, E' \\rightarrow E, \\Omega' \\cdot \\Omega)\n \\phi (r, E', \\Omega) \\\\\n \\langle \\phi \\rangle &= \\int_{r \\in V} dr \\int_{4\\pi} d\\Omega\n \\int_{E_g}^{E_{g-1}} dE \\; \\psi (r, E, \\Omega) \\\\\n \\sigma_{tr} &= \\frac{\\langle \\sigma_t \\phi \\rangle - \\langle \\sigma_{s1}\n \\phi \\rangle}{\\langle \\phi \\rangle}\n \\end{aligned}\n\n To incorporate the effect of scattering multiplication in the above\n relation, the `nu` parameter can be set to `True`.\n\n Parameters\n ----------\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n The domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n The domain type for spatial homogenization\n groups : openmc.mgxs.EnergyGroups\n The energy group structure for energy condensation\n nu : bool\n If True, the cross section data will include neutron multiplication;\n defaults to False.\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n name : str, optional\n Name of the multi-group cross section. Used as a label to identify\n tallies in OpenMC 'tallies.xml' file.\n num_polar : Integral, optional\n Number of equi-width polar angle bins for angle discretization;\n defaults to one bin\n num_azimuthal : Integral, optional\n Number of equi-width azimuthal angle bins for angle discretization;\n defaults to one bin\n\n Attributes\n ----------\n name : str, optional\n Name of the multi-group cross section\n rxn_type : str\n Reaction type (e.g., 'total', 'nu-fission', etc.)\n nu : bool\n If True, the cross section data will include neutron multiplication\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n Domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n Domain type for spatial homogenization\n energy_groups : openmc.mgxs.EnergyGroups\n Energy group structure for energy condensation\n num_polar : Integral\n Number of equi-width polar angle bins for angle discretization\n num_azimuthal : Integral\n Number of equi-width azimuthal angle bins for angle discretization\n tally_trigger : openmc.Trigger\n An (optional) tally precision trigger given to each tally used to\n compute the cross section\n scores : list of str\n The scores in each tally used to compute the multi-group cross section\n filters : list of openmc.Filter\n The filters in each tally used to compute the multi-group cross section\n tally_keys : list of str\n The keys into the tallies dictionary for each tally used to compute\n the multi-group cross section\n estimator : 'analog'\n The tally estimator used to compute the multi-group cross section\n tallies : collections.OrderedDict\n OpenMC tallies needed to compute the multi-group cross section. The keys\n are strings listed in the :attr:`TransportXS.tally_keys` property and\n values are instances of :class:`openmc.Tally`.\n rxn_rate_tally : openmc.Tally\n Derived tally for the reaction rate tally used in the numerator to\n compute the multi-group cross section. This attribute is None\n unless the multi-group cross section has been computed.\n xs_tally : openmc.Tally\n Derived tally for the multi-group cross section. This attribute\n is None unless the multi-group cross section has been computed.\n num_subdomains : int\n The number of subdomains is unity for 'material', 'cell' and 'universe'\n domain types. This is equal to the number of cell instances\n for 'distribcell' domain types (it is equal to unity prior to loading\n tally data from a statepoint file).\n num_nuclides : int\n The number of nuclides for which the multi-group cross section is\n being tracked. This is unity if the by_nuclide attribute is False.\n nuclides : Iterable of str or 'sum'\n The optional user-specified nuclides for which to compute cross\n sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides\n are not specified by the user, all nuclides in the spatial domain\n are included. This attribute is 'sum' if by_nuclide is false.\n sparse : bool\n Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format\n for compressed data storage\n loaded_sp : bool\n Whether or not a statepoint file has been loaded with tally data\n derived : bool\n Whether or not the MGXS is merged from one or more other MGXS\n hdf5_key : str\n The key used to index multi-group cross sections in an HDF5 data store\n\n \"\"\"\n\n def __init__(self, domain=None, domain_type=None, groups=None, nu=False,\n by_nuclide=False, name='', num_polar=1, num_azimuthal=1):\n super().__init__(domain, domain_type, groups, by_nuclide, name,\n num_polar, num_azimuthal)\n\n # Use tracklength estimators for the total MGXS term, and\n # analog estimators for the transport correction term\n self._estimator = ['tracklength', 'tracklength', 'analog', 'analog']\n self._valid_estimators = ['analog']\n self.nu = nu\n\n def __deepcopy__(self, memo):\n clone = super().__deepcopy__(memo)\n clone._nu = self.nu\n return clone\n\n @property\n def scores(self):\n if not self.nu:\n return ['flux', 'total', 'flux', 'scatter']\n else:\n return ['flux', 'total', 'flux', 'nu-scatter']\n\n @property\n def tally_keys(self):\n return ['flux (tracklength)', 'total', 'flux (analog)', 'scatter-1']\n\n @property\n def filters(self):\n group_edges = self.energy_groups.group_edges\n energy_filter = openmc.EnergyFilter(group_edges)\n energyout_filter = openmc.EnergyoutFilter(group_edges)\n p1_filter = openmc.LegendreFilter(1)\n filters = [[energy_filter], [energy_filter],\n [energy_filter], [energyout_filter, p1_filter]]\n\n return self._add_angle_filters(filters)\n\n @property\n def rxn_rate_tally(self):\n if self._rxn_rate_tally is None:\n # Switch EnergyoutFilter to EnergyFilter.\n p1_tally = self.tallies['scatter-1']\n old_filt = p1_tally.filters[-2]\n new_filt = openmc.EnergyFilter(old_filt.values)\n p1_tally.filters[-2] = new_filt\n\n # Slice Legendre expansion filter and change name of score\n p1_tally = p1_tally.get_slice(filters=[openmc.LegendreFilter],\n filter_bins=[('P1',)],\n squeeze=True)\n p1_tally._scores = ['scatter-1']\n\n self._rxn_rate_tally = self.tallies['total'] - p1_tally\n self._rxn_rate_tally.sparse = self.sparse\n\n return self._rxn_rate_tally\n\n @property\n def xs_tally(self):\n if self._xs_tally is None:\n if self.tallies is None:\n msg = 'Unable to get xs_tally since tallies have ' \\\n 'not been loaded from a statepoint'\n raise ValueError(msg)\n\n # Switch EnergyoutFilter to EnergyFilter.\n p1_tally = self.tallies['scatter-1']\n old_filt = p1_tally.filters[-2]\n new_filt = openmc.EnergyFilter(old_filt.values)\n p1_tally.filters[-2] = new_filt\n\n # Slice Legendre expansion filter and change name of score\n p1_tally = p1_tally.get_slice(filters=[openmc.LegendreFilter],\n filter_bins=[('P1',)],\n squeeze=True)\n p1_tally._scores = ['scatter-1']\n\n # Compute total cross section\n total_xs = self.tallies['total'] / self.tallies['flux (tracklength)']\n\n # Compute transport correction term\n trans_corr = p1_tally / self.tallies['flux (analog)']\n\n # Compute the transport-corrected total cross section\n self._xs_tally = total_xs - trans_corr\n self._compute_xs()\n\n return self._xs_tally\n\n @property\n def nu(self):\n return self._nu\n\n @nu.setter\n def nu(self, nu):\n cv.check_type('nu', nu, bool)\n self._nu = nu\n if not nu:\n self._rxn_type = 'transport'\n else:\n self._rxn_type = 'nu-transport'\n\n\nclass DiffusionCoefficient(TransportXS):\n r\"\"\"A diffusion coefficient multi-group cross section.\n\n This class can be used for both OpenMC input generation and tally data\n post-processing to compute spatially-homogenized and energy-integrated\n multi-group cross sections for multi-group neutronics calculations. At a\n minimum, one needs to set the :attr:`DiffusionCoefficient.energy_groups` and\n :attr:`DiffusionCoefficient.domain` properties. Tallies for the flux and appropriate\n reaction rates over the specified domain are generated automatically via the\n :attr:`DiffusionCoefficient.tallies` property, which can then be appended to a\n :class:`openmc.Tallies` instance.\n\n For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the\n necessary data to compute multi-group cross sections from a\n :class:`openmc.StatePoint` instance. The derived multi-group cross section\n can then be obtained from the :attr:`DiffusionCoefficient.xs_tally` property.\n\n For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the\n diffusion coefficient is calculated as:\n\n .. math::\n\n \\begin{aligned}\n \\langle \\sigma_t \\phi \\rangle &= \\int_{r \\in V} dr \\int_{4\\pi}\n d\\Omega \\int_{E_g}^{E_{g-1}} dE \\sigma_t (r, E) \\psi\n (r, E, \\Omega) \\\\\n \\langle \\sigma_{s1} \\phi \\rangle &= \\int_{r \\in V} dr\n \\int_{4\\pi} d\\Omega \\int_{E_g}^{E_{g-1}} dE \\int_{4\\pi}\n d\\Omega' \\int_0^\\infty dE' \\int_{-1}^1 d\\mu \\; \\mu \\sigma_s\n (r, E' \\rightarrow E, \\Omega' \\cdot \\Omega)\n \\phi (r, E', \\Omega) \\\\\n \\langle \\phi \\rangle &= \\int_{r \\in V} dr \\int_{4\\pi} d\\Omega\n \\int_{E_g}^{E_{g-1}} dE \\; \\psi (r, E, \\Omega) \\\\\n \\sigma_{tr} &= \\frac{\\langle \\sigma_t \\phi \\rangle - \\langle \\sigma_{s1}\n \\phi \\rangle}{\\langle \\phi \\rangle} \\\\\n D = \\frac{1}{3 \\sigma_{tr}}\n \\end{aligned}\n\n To incorporate the effect of scattering multiplication in the above\n relation, the `nu` parameter can be set to `True`.\n\n .. versionadded:: 0.12.1\n\n Parameters\n ----------\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n The domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n The domain type for spatial homogenization\n groups : openmc.mgxs.EnergyGroups\n The energy group structure for energy condensation\n nu : bool\n If True, the cross section data will include neutron multiplication;\n defaults to False.\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n name : str, optional\n Name of the multi-group cross section. Used as a label to identify\n tallies in OpenMC 'tallies.xml' file.\n num_polar : Integral, optional\n Number of equi-width polar angle bins for angle discretization;\n defaults to one bin\n num_azimuthal : Integral, optional\n Number of equi-width azimuthal angle bins for angle discretization;\n defaults to one bin\n\n Attributes\n ----------\n name : str, optional\n Name of the multi-group cross section\n rxn_type : str\n Reaction type (e.g., 'total', 'nu-fission', etc.)\n nu : bool\n If True, the cross section data will include neutron multiplication\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n Domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n Domain type for spatial homogenization\n energy_groups : openmc.mgxs.EnergyGroups\n Energy group structure for energy condensation\n num_polar : Integral\n Number of equi-width polar angle bins for angle discretization\n num_azimuthal : Integral\n Number of equi-width azimuthal angle bins for angle discretization\n tally_trigger : openmc.Trigger\n An (optional) tally precision trigger given to each tally used to\n compute the cross section\n scores : list of str\n The scores in each tally used to compute the multi-group cross section\n filters : list of openmc.Filter\n The filters in each tally used to compute the multi-group cross section\n tally_keys : list of str\n The keys into the tallies dictionary for each tally used to compute\n the multi-group cross section\n estimator : 'analog'\n The tally estimator used to compute the multi-group cross section\n tallies : collections.OrderedDict\n OpenMC tallies needed to compute the multi-group cross section. The keys\n are strings listed in the :attr:`TransportXS.tally_keys` property and\n values are instances of :class:`openmc.Tally`.\n rxn_rate_tally : openmc.Tally\n Derived tally for the reaction rate tally used in the numerator to\n compute the multi-group cross section. This attribute is None\n unless the multi-group cross section has been computed.\n xs_tally : openmc.Tally\n Derived tally for the multi-group cross section. This attribute\n is None unless the multi-group cross section has been computed.\n num_subdomains : int\n The number of subdomains is unity for 'material', 'cell' and 'universe'\n domain types. This is equal to the number of cell instances\n for 'distribcell' domain types (it is equal to unity prior to loading\n tally data from a statepoint file).\n num_nuclides : int\n The number of nuclides for which the multi-group cross section is\n being tracked. This is unity if the by_nuclide attribute is False.\n nuclides : Iterable of str or 'sum'\n The optional user-specified nuclides for which to compute cross\n sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides\n are not specified by the user, all nuclides in the spatial domain\n are included. This attribute is 'sum' if by_nuclide is false.\n sparse : bool\n Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format\n for compressed data storage\n loaded_sp : bool\n Whether or not a statepoint file has been loaded with tally data\n derived : bool\n Whether or not the MGXS is merged from one or more other MGXS\n hdf5_key : str\n The key used to index multi-group cross sections in an HDF5 data store\n\n \"\"\"\n\n def __init__(self, domain=None, domain_type=None, groups=None, nu=False,\n by_nuclide=False, name='', num_polar=1, num_azimuthal=1):\n super(DiffusionCoefficient, self).__init__(domain, domain_type, groups,\n nu, by_nuclide, name,\n num_polar, num_azimuthal)\n if not nu:\n self._rxn_type = 'diffusion-coefficient'\n else:\n self._rxn_type = 'nu-diffusion-coefficient'\n\n\n @property\n def rxn_rate_tally(self):\n if self._rxn_rate_tally is None:\n # Switch EnergyoutFilter to EnergyFilter.\n p1_tally = self.tallies['scatter-1']\n old_filt = p1_tally.filters[-2]\n new_filt = openmc.EnergyFilter(old_filt.values)\n p1_tally.filters[-2] = new_filt\n\n # Slice Legendre expansion filter and change name of score\n p1_tally = p1_tally.get_slice(filters=[openmc.LegendreFilter],\n filter_bins=[('P1',)],\n squeeze=True)\n p1_tally._scores = ['scatter-1']\n\n transport = self.tallies['total'] - p1_tally\n self._rxn_rate_tally = transport**(-1) / 3.0\n self._rxn_rate_tally.sparse = self.sparse\n\n return self._rxn_rate_tally\n\n @property\n def xs_tally(self):\n if self._xs_tally is None:\n if self.tallies is None:\n msg = 'Unable to get xs_tally since tallies have ' \\\n 'not been loaded from a statepoint'\n raise ValueError(msg)\n\n # Switch EnergyoutFilter to EnergyFilter\n p1_tally = self.tallies['scatter-1']\n old_filt = p1_tally.filters[-2]\n new_filt = openmc.EnergyFilter(old_filt.values)\n p1_tally.filters[-2] = new_filt\n\n # Slice Legendre expansion filter and change name of score\n p1_tally = p1_tally.get_slice(filters=[openmc.LegendreFilter],\n filter_bins=[('P1',)],\n squeeze=True)\n p1_tally._scores = ['scatter-1']\n\n # Compute total cross section\n total_xs = self.tallies['total'] / self.tallies['flux (tracklength)']\n\n # Compute transport correction term\n trans_corr = p1_tally / self.tallies['flux (analog)']\n\n # Compute the diffusion coefficient\n transport = total_xs - trans_corr\n diff_coef = transport**(-1) / 3.0\n self._xs_tally = diff_coef\n self._compute_xs()\n\n return self._xs_tally\n\nclass AbsorptionXS(MGXS):\n r\"\"\"An absorption multi-group cross section.\n\n Absorption is defined as all reactions that do not produce secondary\n neutrons (disappearance) plus fission reactions.\n\n This class can be used for both OpenMC input generation and tally data\n post-processing to compute spatially-homogenized and energy-integrated\n multi-group absorption cross sections for multi-group neutronics\n calculations. At a minimum, one needs to set the\n :attr:`AbsorptionXS.energy_groups` and :attr:`AbsorptionXS.domain`\n properties. Tallies for the flux and appropriate reaction rates over the\n specified domain are generated automatically via the\n :attr:`AbsorptionXS.tallies` property, which can then be appended to a\n :class:`openmc.Tallies` instance.\n\n For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the\n necessary data to compute multi-group cross sections from a\n :class:`openmc.StatePoint` instance. The derived multi-group cross section\n can then be obtained from the :attr:`AbsorptionXS.xs_tally` property.\n\n For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the\n absorption cross section is calculated as:\n\n .. math::\n\n \\frac{\\int_{r \\in V} dr \\int_{4\\pi} d\\Omega \\int_{E_g}^{E_{g-1}} dE \\;\n \\sigma_a (r, E) \\psi (r, E, \\Omega)}{\\int_{r \\in V} dr \\int_{4\\pi}\n d\\Omega \\int_{E_g}^{E_{g-1}} dE \\; \\psi (r, E, \\Omega)}.\n\n Parameters\n ----------\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n The domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n The domain type for spatial homogenization\n groups : openmc.mgxs.EnergyGroups\n The energy group structure for energy condensation\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n name : str, optional\n Name of the multi-group cross section. Used as a label to identify\n tallies in OpenMC 'tallies.xml' file.\n num_polar : Integral, optional\n Number of equi-width polar angle bins for angle discretization;\n defaults to one bin\n num_azimuthal : Integral, optional\n Number of equi-width azimuthal angle bins for angle discretization;\n defaults to one bin\n\n Attributes\n ----------\n name : str, optional\n Name of the multi-group cross section\n rxn_type : str\n Reaction type (e.g., 'total', 'nu-fission', etc.)\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n Domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n Domain type for spatial homogenization\n energy_groups : openmc.mgxs.EnergyGroups\n Energy group structure for energy condensation\n num_polar : Integral\n Number of equi-width polar angle bins for angle discretization\n num_azimuthal : Integral\n Number of equi-width azimuthal angle bins for angle discretization\n tally_trigger : openmc.Trigger\n An (optional) tally precision trigger given to each tally used to\n compute the cross section\n scores : list of str\n The scores in each tally used to compute the multi-group cross section\n filters : list of openmc.Filter\n The filters in each tally used to compute the multi-group cross section\n tally_keys : list of str\n The keys into the tallies dictionary for each tally used to compute\n the multi-group cross section\n estimator : {'tracklength', 'collision', 'analog'}\n The tally estimator used to compute the multi-group cross section\n tallies : collections.OrderedDict\n OpenMC tallies needed to compute the multi-group cross section. The keys\n are strings listed in the :attr:`AbsorptionXS.tally_keys` property and\n values are instances of :class:`openmc.Tally`.\n rxn_rate_tally : openmc.Tally\n Derived tally for the reaction rate tally used in the numerator to\n compute the multi-group cross section. This attribute is None\n unless the multi-group cross section has been computed.\n xs_tally : openmc.Tally\n Derived tally for the multi-group cross section. This attribute\n is None unless the multi-group cross section has been computed.\n num_subdomains : int\n The number of subdomains is unity for 'material', 'cell' and 'universe'\n domain types. This is equal to the number of cell instances\n for 'distribcell' domain types (it is equal to unity prior to loading\n tally data from a statepoint file) and the number of mesh cells for\n 'mesh' domain types.\n num_nuclides : int\n The number of nuclides for which the multi-group cross section is\n being tracked. This is unity if the by_nuclide attribute is False.\n nuclides : Iterable of str or 'sum'\n The optional user-specified nuclides for which to compute cross\n sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides\n are not specified by the user, all nuclides in the spatial domain\n are included. This attribute is 'sum' if by_nuclide is false.\n sparse : bool\n Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format\n for compressed data storage\n loaded_sp : bool\n Whether or not a statepoint file has been loaded with tally data\n derived : bool\n Whether or not the MGXS is merged from one or more other MGXS\n hdf5_key : str\n The key used to index multi-group cross sections in an HDF5 data store\n\n \"\"\"\n\n def __init__(self, domain=None, domain_type=None, groups=None,\n by_nuclide=False, name='', num_polar=1, num_azimuthal=1):\n super().__init__(domain, domain_type, groups, by_nuclide, name,\n num_polar, num_azimuthal)\n self._rxn_type = 'absorption'\n\n\nclass CaptureXS(MGXS):\n r\"\"\"A capture multi-group cross section.\n\n The neutron capture reaction rate is defined as the difference between\n OpenMC's 'absorption' and 'fission' reaction rate score types. This includes\n not only radiative capture, but all forms of neutron disappearance aside\n from fission (i.e., MT > 100).\n\n This class can be used for both OpenMC input generation and tally data\n post-processing to compute spatially-homogenized and energy-integrated\n multi-group capture cross sections for multi-group neutronics\n calculations. At a minimum, one needs to set the\n :attr:`CaptureXS.energy_groups` and :attr:`CaptureXS.domain`\n properties. Tallies for the flux and appropriate reaction rates over the\n specified domain are generated automatically via the\n :attr:`CaptureXS.tallies` property, which can then be appended to a\n :class:`openmc.Tallies` instance.\n\n For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the\n necessary data to compute multi-group cross sections from a\n :class:`openmc.StatePoint` instance. The derived multi-group cross section\n can then be obtained from the :attr:`CaptureXS.xs_tally` property.\n\n For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the\n capture cross section is calculated as:\n\n .. math::\n\n \\frac{\\int_{r \\in V} dr \\int_{4\\pi} d\\Omega \\int_{E_g}^{E_{g-1}} dE \\;\n \\left [ \\sigma_a (r, E) \\psi (r, E, \\Omega) - \\sigma_f (r, E) \\psi (r, E,\n \\Omega) \\right ]}{\\int_{r \\in V} dr \\int_{4\\pi} d\\Omega\n \\int_{E_g}^{E_{g-1}} dE \\; \\psi (r, E, \\Omega)}.\n\n Parameters\n ----------\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n The domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n The domain type for spatial homogenization\n groups : openmc.mgxs.EnergyGroups\n The energy group structure for energy condensation\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n name : str, optional\n Name of the multi-group cross section. Used as a label to identify\n tallies in OpenMC 'tallies.xml' file.\n num_polar : Integral, optional\n Number of equi-width polar angle bins for angle discretization;\n defaults to one bin\n num_azimuthal : Integral, optional\n Number of equi-width azimuthal angle bins for angle discretization;\n defaults to one bin\n\n Attributes\n ----------\n name : str, optional\n Name of the multi-group cross section\n rxn_type : str\n Reaction type (e.g., 'total', 'nu-fission', etc.)\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n Domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n Domain type for spatial homogenization\n energy_groups : openmc.mgxs.EnergyGroups\n Energy group structure for energy condensation\n num_polar : Integral\n Number of equi-width polar angle bins for angle discretization\n num_azimuthal : Integral\n Number of equi-width azimuthal angle bins for angle discretization\n tally_trigger : openmc.Trigger\n An (optional) tally precision trigger given to each tally used to\n compute the cross section\n scores : list of str\n The scores in each tally used to compute the multi-group cross section\n filters : list of openmc.Filter\n The filters in each tally used to compute the multi-group cross section\n tally_keys : list of str\n The keys into the tallies dictionary for each tally used to compute\n the multi-group cross section\n estimator : {'tracklength', 'collision', 'analog'}\n The tally estimator used to compute the multi-group cross section\n tallies : collections.OrderedDict\n OpenMC tallies needed to compute the multi-group cross section. The keys\n are strings listed in the :attr:`CaptureXS.tally_keys` property and\n values are instances of :class:`openmc.Tally`.\n rxn_rate_tally : openmc.Tally\n Derived tally for the reaction rate tally used in the numerator to\n compute the multi-group cross section. This attribute is None\n unless the multi-group cross section has been computed.\n xs_tally : openmc.Tally\n Derived tally for the multi-group cross section. This attribute\n is None unless the multi-group cross section has been computed.\n num_subdomains : int\n The number of subdomains is unity for 'material', 'cell' and 'universe'\n domain types. This is equal to the number of cell instances\n for 'distribcell' domain types (it is equal to unity prior to loading\n tally data from a statepoint file).\n num_nuclides : int\n The number of nuclides for which the multi-group cross section is\n being tracked. This is unity if the by_nuclide attribute is False.\n nuclides : Iterable of str or 'sum'\n The optional user-specified nuclides for which to compute cross\n sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides\n are not specified by the user, all nuclides in the spatial domain\n are included. This attribute is 'sum' if by_nuclide is false.\n sparse : bool\n Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format\n for compressed data storage\n loaded_sp : bool\n Whether or not a statepoint file has been loaded with tally data\n derived : bool\n Whether or not the MGXS is merged from one or more other MGXS\n hdf5_key : str\n The key used to index multi-group cross sections in an HDF5 data store\n\n \"\"\"\n\n def __init__(self, domain=None, domain_type=None, groups=None,\n by_nuclide=False, name='', num_polar=1, num_azimuthal=1):\n super().__init__(domain, domain_type, groups, by_nuclide, name,\n num_polar, num_azimuthal)\n self._rxn_type = 'capture'\n\n @property\n def scores(self):\n return ['flux', 'absorption', 'fission']\n\n @property\n def rxn_rate_tally(self):\n if self._rxn_rate_tally is None:\n self._rxn_rate_tally = \\\n self.tallies['absorption'] - self.tallies['fission']\n self._rxn_rate_tally.sparse = self.sparse\n return self._rxn_rate_tally\n\n\nclass FissionXS(MGXS):\n r\"\"\"A fission multi-group cross section.\n\n This class can be used for both OpenMC input generation and tally data\n post-processing to compute spatially-homogenized and energy-integrated\n multi-group fission cross sections for multi-group neutronics\n calculations. At a minimum, one needs to set the\n :attr:`FissionXS.energy_groups` and :attr:`FissionXS.domain`\n properties. Tallies for the flux and appropriate reaction rates over the\n specified domain are generated automatically via the\n :attr:`FissionXS.tallies` property, which can then be appended to a\n :class:`openmc.Tallies` instance.\n\n For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the\n necessary data to compute multi-group cross sections from a\n :class:`openmc.StatePoint` instance. The derived multi-group cross section\n can then be obtained from the :attr:`FissionXS.xs_tally` property.\n\n For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the\n fission cross section is calculated as:\n\n .. math::\n\n \\frac{\\int_{r \\in V} dr \\int_{4\\pi} d\\Omega \\int_{E_g}^{E_{g-1}} dE \\;\n \\sigma_f (r, E) \\psi (r, E, \\Omega)}{\\int_{r \\in V} dr \\int_{4\\pi}\n d\\Omega \\int_{E_g}^{E_{g-1}} dE \\; \\psi (r, E, \\Omega)}.\n\n To incorporate the effect of neutron multiplication in the above\n relation, the `nu` parameter can be set to `True`.\n\n This class can also be used to gather a prompt-nu-fission cross section\n (which only includes the contributions from prompt neutrons). This is\n accomplished by setting the :attr:`FissionXS.prompt` attribute to `True`.\n Since the prompt-nu-fission cross section requires neutron multiplication,\n the `nu` parameter will automatically be set to `True` if `prompt` is also\n `True`.\n\n Parameters\n ----------\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n The domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n The domain type for spatial homogenization\n groups : openmc.mgxs.EnergyGroups\n The energy group structure for energy condensation\n nu : bool\n If True, the cross section data will include neutron multiplication;\n defaults to False\n prompt : bool\n If true, computes cross sections which only includes prompt neutrons;\n defaults to False which includes prompt and delayed in total. Setting\n this to True will also set nu to True\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n name : str, optional\n Name of the multi-group cross section. Used as a label to identify\n tallies in OpenMC 'tallies.xml' file.\n num_polar : Integral, optional\n Number of equi-width polar angle bins for angle discretization;\n defaults to one bin\n num_azimuthal : Integral, optional\n Number of equi-width azimuthal angle bins for angle discretization;\n defaults to one bin\n\n Attributes\n ----------\n name : str, optional\n Name of the multi-group cross section\n rxn_type : str\n Reaction type (e.g., 'total', 'nu-fission', etc.)\n nu : bool\n If True, the cross section data will include neutron multiplication\n prompt : bool\n If true, computes cross sections which only includes prompt neutrons\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n Domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n Domain type for spatial homogenization\n energy_groups : openmc.mgxs.EnergyGroups\n Energy group structure for energy condensation\n num_polar : Integral\n Number of equi-width polar angle bins for angle discretization\n num_azimuthal : Integral\n Number of equi-width azimuthal angle bins for angle discretization\n tally_trigger : openmc.Trigger\n An (optional) tally precision trigger given to each tally used to\n compute the cross section\n scores : list of str\n The scores in each tally used to compute the multi-group cross section\n filters : list of openmc.Filter\n The filters in each tally used to compute the multi-group cross section\n tally_keys : list of str\n The keys into the tallies dictionary for each tally used to compute\n the multi-group cross section\n estimator : {'tracklength', 'collision', 'analog'}\n The tally estimator used to compute the multi-group cross section\n tallies : collections.OrderedDict\n OpenMC tallies needed to compute the multi-group cross section. The keys\n are strings listed in the :attr:`FissionXS.tally_keys` property and\n values are instances of :class:`openmc.Tally`.\n rxn_rate_tally : openmc.Tally\n Derived tally for the reaction rate tally used in the numerator to\n compute the multi-group cross section. This attribute is None\n unless the multi-group cross section has been computed.\n xs_tally : openmc.Tally\n Derived tally for the multi-group cross section. This attribute\n is None unless the multi-group cross section has been computed.\n num_subdomains : int\n The number of subdomains is unity for 'material', 'cell' and 'universe'\n domain types. This is equal to the number of cell instances\n for 'distribcell' domain types (it is equal to unity prior to loading\n tally data from a statepoint file).\n num_nuclides : int\n The number of nuclides for which the multi-group cross section is\n being tracked. This is unity if the by_nuclide attribute is False.\n nuclides : Iterable of str or 'sum'\n The optional user-specified nuclides for which to compute cross\n sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides\n are not specified by the user, all nuclides in the spatial domain\n are included. This attribute is 'sum' if by_nuclide is false.\n sparse : bool\n Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format\n for compressed data storage\n loaded_sp : bool\n Whether or not a statepoint file has been loaded with tally data\n derived : bool\n Whether or not the MGXS is merged from one or more other MGXS\n hdf5_key : str\n The key used to index multi-group cross sections in an HDF5 data store\n\n \"\"\"\n\n def __init__(self, domain=None, domain_type=None, groups=None, nu=False,\n prompt=False, by_nuclide=False, name='', num_polar=1,\n num_azimuthal=1):\n super().__init__(domain, domain_type, groups, by_nuclide, name,\n num_polar, num_azimuthal)\n self._nu = False\n self._prompt = False\n self.nu = nu\n self.prompt = prompt\n\n def __deepcopy__(self, memo):\n clone = super().__deepcopy__(memo)\n clone._nu = self.nu\n clone._prompt = self.prompt\n return clone\n\n @property\n def nu(self):\n return self._nu\n\n @property\n def prompt(self):\n return self._prompt\n\n @nu.setter\n def nu(self, nu):\n cv.check_type('nu', nu, bool)\n self._nu = nu\n if not self.prompt:\n if not self.nu:\n self._rxn_type = 'fission'\n else:\n self._rxn_type = 'nu-fission'\n else:\n self._rxn_type = 'prompt-nu-fission'\n\n @prompt.setter\n def prompt(self, prompt):\n cv.check_type('prompt', prompt, bool)\n self._prompt = prompt\n if not self.prompt:\n if not self.nu:\n self._rxn_type = 'fission'\n else:\n self._rxn_type = 'nu-fission'\n else:\n self._rxn_type = 'prompt-nu-fission'\n\n\nclass KappaFissionXS(MGXS):\n r\"\"\"A recoverable fission energy production rate multi-group cross section.\n\n The recoverable energy per fission, :math:`\\kappa`, is defined as the\n fission product kinetic energy, prompt and delayed neutron kinetic energies,\n prompt and delayed :math:`\\gamma`-ray total energies, and the total energy\n released by the delayed :math:`\\beta` particles. The neutrino energy does\n not contribute to this response. The prompt and delayed :math:`\\gamma`-rays\n are assumed to deposit their energy locally.\n\n This class can be used for both OpenMC input generation and tally data\n post-processing to compute spatially-homogenized and energy-integrated\n multi-group cross sections for multi-group neutronics calculations. At a\n minimum, one needs to set the :attr:`KappaFissionXS.energy_groups` and\n :attr:`KappaFissionXS.domain` properties. Tallies for the flux and appropriate\n reaction rates over the specified domain are generated automatically via the\n :attr:`KappaFissionXS.tallies` property, which can then be appended to a\n :class:`openmc.Tallies` instance.\n\n For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the\n necessary data to compute multi-group cross sections from a\n :class:`openmc.StatePoint` instance. The derived multi-group cross section\n can then be obtained from the :attr:`KappaFissionXS.xs_tally` property.\n\n For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the\n recoverable fission energy production rate cross section is calculated as:\n\n .. math::\n\n \\frac{\\int_{r \\in V} dr \\int_{4\\pi} d\\Omega \\int_{E_g}^{E_{g-1}} dE \\;\n \\kappa\\sigma_f (r, E) \\psi (r, E, \\Omega)}{\\int_{r \\in V} dr \\int_{4\\pi}\n d\\Omega \\int_{E_g}^{E_{g-1}} dE \\; \\psi (r, E, \\Omega)}.\n\n Parameters\n ----------\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n The domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n The domain type for spatial homogenization\n groups : openmc.mgxs.EnergyGroups\n The energy group structure for energy condensation\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n name : str, optional\n Name of the multi-group cross section. Used as a label to identify\n tallies in OpenMC 'tallies.xml' file.\n num_polar : Integral, optional\n Number of equi-width polar angle bins for angle discretization;\n defaults to one bin\n num_azimuthal : Integral, optional\n Number of equi-width azimuthal angle bins for angle discretization;\n defaults to one bin\n\n Attributes\n ----------\n name : str, optional\n Name of the multi-group cross section\n rxn_type : str\n Reaction type (e.g., 'total', 'nu-fission', etc.)\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n Domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n Domain type for spatial homogenization\n energy_groups : openmc.mgxs.EnergyGroups\n Energy group structure for energy condensation\n num_polar : Integral\n Number of equi-width polar angle bins for angle discretization\n num_azimuthal : Integral\n Number of equi-width azimuthal angle bins for angle discretization\n tally_trigger : openmc.Trigger\n An (optional) tally precision trigger given to each tally used to\n compute the cross section\n scores : list of str\n The scores in each tally used to compute the multi-group cross section\n filters : list of openmc.Filter\n The filters in each tally used to compute the multi-group cross section\n tally_keys : list of str\n The keys into the tallies dictionary for each tally used to compute\n the multi-group cross section\n estimator : {'tracklength', 'collision', 'analog'}\n The tally estimator used to compute the multi-group cross section\n tallies : collections.OrderedDict\n OpenMC tallies needed to compute the multi-group cross section. The keys\n are strings listed in the :attr:`KappaFissionXS.tally_keys` property and\n values are instances of :class:`openmc.Tally`.\n rxn_rate_tally : openmc.Tally\n Derived tally for the reaction rate tally used in the numerator to\n compute the multi-group cross section. This attribute is None\n unless the multi-group cross section has been computed.\n xs_tally : openmc.Tally\n Derived tally for the multi-group cross section. This attribute\n is None unless the multi-group cross section has been computed.\n num_subdomains : int\n The number of subdomains is unity for 'material', 'cell' and 'universe'\n domain types. This is equal to the number of cell instances\n for 'distribcell' domain types (it is equal to unity prior to loading\n tally data from a statepoint file).\n num_nuclides : int\n The number of nuclides for which the multi-group cross section is\n being tracked. This is unity if the by_nuclide attribute is False.\n nuclides : Iterable of str or 'sum'\n The optional user-specified nuclides for which to compute cross\n sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides\n are not specified by the user, all nuclides in the spatial domain\n are included. This attribute is 'sum' if by_nuclide is false.\n sparse : bool\n Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format\n for compressed data storage\n loaded_sp : bool\n Whether or not a statepoint file has been loaded with tally data\n derived : bool\n Whether or not the MGXS is merged from one or more other MGXS\n hdf5_key : str\n The key used to index multi-group cross sections in an HDF5 data store\n\n \"\"\"\n\n def __init__(self, domain=None, domain_type=None, groups=None,\n by_nuclide=False, name='', num_polar=1, num_azimuthal=1):\n super().__init__(domain, domain_type, groups, by_nuclide, name,\n num_polar, num_azimuthal)\n self._rxn_type = 'kappa-fission'\n\n\nclass ScatterXS(MGXS):\n r\"\"\"A scattering multi-group cross section.\n\n The scattering cross section is defined as the difference between the total\n and absorption cross sections.\n\n This class can be used for both OpenMC input generation and tally data\n post-processing to compute spatially-homogenized and energy-integrated\n multi-group cross sections for multi-group neutronics calculations. At a\n minimum, one needs to set the :attr:`ScatterXS.energy_groups` and\n :attr:`ScatterXS.domain` properties. Tallies for the flux and\n appropriate reaction rates over the specified domain are generated\n automatically via the :attr:`ScatterXS.tallies` property, which can\n then be appended to a :class:`openmc.Tallies` instance.\n\n For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the\n necessary data to compute multi-group cross sections from a\n :class:`openmc.StatePoint` instance. The derived multi-group cross section\n can then be obtained from the :attr:`ScatterXS.xs_tally` property.\n\n For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the\n scattering cross section is calculated as:\n\n .. math::\n\n \\frac{\\int_{r \\in V} dr \\int_{4\\pi} d\\Omega \\int_{E_g}^{E_{g-1}} dE \\;\n \\left [ \\sigma_t (r, E) \\psi (r, E, \\Omega) - \\sigma_a (r, E) \\psi (r, E,\n \\Omega) \\right ]}{\\int_{r \\in V} dr \\int_{4\\pi} d\\Omega\n \\int_{E_g}^{E_{g-1}} dE \\; \\psi (r, E, \\Omega)}.\n\n To incorporate the effect of scattering multiplication from (n,xn)\n reactions in the above relation, the `nu` parameter can be set to `True`.\n\n Parameters\n ----------\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n The domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n The domain type for spatial homogenization\n groups : openmc.mgxs.EnergyGroups\n The energy group structure for energy condensation\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n name : str, optional\n Name of the multi-group cross section. Used as a label to identify\n tallies in OpenMC 'tallies.xml' file.\n num_polar : Integral, optional\n Number of equi-width polar angle bins for angle discretization;\n defaults to one bin\n num_azimuthal : Integral, optional\n Number of equi-width azimuthal angle bins for angle discretization;\n defaults to one bin\n nu : bool\n If True, the cross section data will include neutron multiplication;\n defaults to False\n\n Attributes\n ----------\n name : str, optional\n Name of the multi-group cross section\n rxn_type : str\n Reaction type (e.g., 'total', 'nu-fission', etc.)\n nu : bool\n If True, the cross section data will include neutron multiplication\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n Domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n Domain type for spatial homogenization\n energy_groups : openmc.mgxs.EnergyGroups\n Energy group structure for energy condensation\n num_polar : Integral\n Number of equi-width polar angle bins for angle discretization\n num_azimuthal : Integral\n Number of equi-width azimuthal angle bins for angle discretization\n tally_trigger : openmc.Trigger\n An (optional) tally precision trigger given to each tally used to\n compute the cross section\n scores : list of str\n The scores in each tally used to compute the multi-group cross section\n filters : list of openmc.Filter\n The filters in each tally used to compute the multi-group cross section\n tally_keys : list of str\n The keys into the tallies dictionary for each tally used to compute\n the multi-group cross section\n estimator : {'tracklength', 'collision', 'analog'}\n The tally estimator used to compute the multi-group cross section\n tallies : collections.OrderedDict\n OpenMC tallies needed to compute the multi-group cross section. The keys\n are strings listed in the :attr:`ScatterXS.tally_keys` property and\n values are instances of :class:`openmc.Tally`.\n rxn_rate_tally : openmc.Tally\n Derived tally for the reaction rate tally used in the numerator to\n compute the multi-group cross section. This attribute is None\n unless the multi-group cross section has been computed.\n xs_tally : openmc.Tally\n Derived tally for the multi-group cross section. This attribute\n is None unless the multi-group cross section has been computed.\n num_subdomains : int\n The number of subdomains is unity for 'material', 'cell' and 'universe'\n domain types. This is equal to the number of cell instances\n for 'distribcell' domain types (it is equal to unity prior to loading\n tally data from a statepoint file).\n num_nuclides : int\n The number of nuclides for which the multi-group cross section is\n being tracked. This is unity if the by_nuclide attribute is False.\n nuclides : Iterable of str or 'sum'\n The optional user-specified nuclides for which to compute cross\n sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides\n are not specified by the user, all nuclides in the spatial domain\n are included. This attribute is 'sum' if by_nuclide is false.\n sparse : bool\n Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format\n for compressed data storage\n loaded_sp : bool\n Whether or not a statepoint file has been loaded with tally data\n derived : bool\n Whether or not the MGXS is merged from one or more other MGXS\n hdf5_key : str\n The key used to index multi-group cross sections in an HDF5 data store\n\n \"\"\"\n\n def __init__(self, domain=None, domain_type=None, groups=None,\n by_nuclide=False, name='', num_polar=1,\n num_azimuthal=1, nu=False):\n super().__init__(domain, domain_type, groups, by_nuclide, name,\n num_polar, num_azimuthal)\n self.nu = nu\n\n def __deepcopy__(self, memo):\n clone = super().__deepcopy__(memo)\n clone._nu = self.nu\n return clone\n\n @property\n def nu(self):\n return self._nu\n\n @nu.setter\n def nu(self, nu):\n cv.check_type('nu', nu, bool)\n self._nu = nu\n if not nu:\n self._rxn_type = 'scatter'\n else:\n self._rxn_type = 'nu-scatter'\n self._estimator = 'analog'\n self._valid_estimators = ['analog']\n\n\nclass ArbitraryXS(MGXS):\n r\"\"\"A multi-group cross section for an arbitrary reaction type.\n\n This class can be used for both OpenMC input generation and tally data\n post-processing to compute spatially-homogenized and energy-integrated\n multi-group total cross sections for multi-group neutronics calculations.\n At a minimum, one needs to set the :attr:`ArbitraryXS.energy_groups` and\n :attr:`ArbitraryXS.domain` properties. Tallies for the flux and appropriate\n reaction rates over the specified domain are generated automatically via the\n :attr:`ArbitraryXS.tallies` property, which can then be appended to a\n :class:`openmc.Tallies` instance.\n\n For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the\n necessary data to compute multi-group cross sections from a\n :class:`openmc.StatePoint` instance. The derived multi-group cross section\n can then be obtained from the :attr:`ArbitraryXS.xs_tally` property.\n\n For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the\n requested cross section is calculated as:\n\n .. math::\n\n \\frac{\\int_{r \\in V} dr \\int_{4\\pi} d\\Omega \\int_{E_g}^{E_{g-1}} dE \\;\n \\sigma_X (r, E) \\psi (r, E, \\Omega)}{\\int_{r \\in V} dr \\int_{4\\pi}\n d\\Omega \\int_{E_g}^{E_{g-1}} dE \\; \\psi (r, E, \\Omega)}\n\n where :math:`\\sigma_X` is the requested reaction type of interest.\n\n Parameters\n ----------\n rxn_type : str\n Reaction type (e.g., '(n,2n)', '(n,Xt)', etc.)\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n The domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n The domain type for spatial homogenization\n groups : openmc.mgxs.EnergyGroups\n The energy group structure for energy condensation\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n name : str, optional\n Name of the multi-group cross section. Used as a label to identify\n tallies in OpenMC 'tallies.xml' file.\n num_polar : Integral, optional\n Number of equi-width polar angle bins for angle discretization;\n defaults to one bin\n num_azimuthal : Integral, optional\n Number of equi-width azimuthal angle bins for angle discretization;\n defaults to one bin\n\n Attributes\n ----------\n name : str, optional\n Name of the multi-group cross section\n rxn_type : str\n Reaction type (e.g., '(n,2n)', '(n,Xt)', etc.)\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n Domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n Domain type for spatial homogenization\n energy_groups : openmc.mgxs.EnergyGroups\n Energy group structure for energy condensation\n num_polar : Integral\n Number of equi-width polar angle bins for angle discretization\n num_azimuthal : Integral\n Number of equi-width azimuthal angle bins for angle discretization\n tally_trigger : openmc.Trigger\n An (optional) tally precision trigger given to each tally used to\n compute the cross section\n scores : list of str\n The scores in each tally used to compute the multi-group cross section\n filters : list of openmc.Filter\n The filters in each tally used to compute the multi-group cross section\n tally_keys : list of str\n The keys into the tallies dictionary for each tally used to compute\n the multi-group cross section\n estimator : {'tracklength', 'collision', 'analog'}\n The tally estimator used to compute the multi-group cross section\n tallies : collections.OrderedDict\n OpenMC tallies needed to compute the multi-group cross section. The keys\n are strings listed in the :attr:`TotalXS.tally_keys` property and values\n are instances of :class:`openmc.Tally`.\n rxn_rate_tally : openmc.Tally\n Derived tally for the reaction rate tally used in the numerator to\n compute the multi-group cross section. This attribute is None\n unless the multi-group cross section has been computed.\n xs_tally : openmc.Tally\n Derived tally for the multi-group cross section. This attribute\n is None unless the multi-group cross section has been computed.\n num_subdomains : int\n The number of subdomains is unity for 'material', 'cell' and 'universe'\n domain types. This is equal to the number of cell instances\n for 'distribcell' domain types (it is equal to unity prior to loading\n tally data from a statepoint file).\n num_nuclides : int\n The number of nuclides for which the multi-group cross section is\n being tracked. This is unity if the by_nuclide attribute is False.\n nuclides : Iterable of str or 'sum'\n The optional user-specified nuclides for which to compute cross\n sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides\n are not specified by the user, all nuclides in the spatial domain\n are included. This attribute is 'sum' if by_nuclide is false.\n sparse : bool\n Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format\n for compressed data storage\n loaded_sp : bool\n Whether or not a statepoint file has been loaded with tally data\n derived : bool\n Whether or not the MGXS is merged from one or more other MGXS\n hdf5_key : str\n The key used to index multi-group cross sections in an HDF5 data store\n\n \"\"\"\n\n def __init__(self, rxn_type, domain=None, domain_type=None, groups=None,\n by_nuclide=False, name='', num_polar=1, num_azimuthal=1):\n cv.check_value(\"rxn_type\", rxn_type, ARBITRARY_VECTOR_TYPES)\n super().__init__(domain, domain_type, groups, by_nuclide, name,\n num_polar, num_azimuthal)\n self._rxn_type = rxn_type\n\n\nclass ArbitraryMatrixXS(MatrixMGXS):\n r\"\"\"A multi-group matrix cross section for an arbitrary reaction type.\n\n This class can be used for both OpenMC input generation and tally data\n post-processing to compute spatially-homogenized and energy-integrated\n multi-group cross sections for multi-group neutronics calculations. At a\n minimum, one needs to set the :attr:`ArbitraryMatrixXS.energy_groups` and\n :attr:`ArbitraryMatrixXS.domain` properties. Tallies for the flux and\n appropriate reaction rates over the specified domain are generated\n automatically via the :attr:`ArbitraryMatrixXS.tallies` property, which can\n then be appended to a :class:`openmc.Tallies` instance.\n\n For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the\n necessary data to compute multi-group cross sections from a\n :class:`openmc.StatePoint` instance. The derived multi-group cross section\n can then be obtained from the :attr:`ArbitraryMatrixXS.xs_tally` property.\n\n For a spatial domain :math:`V`, incoming energy group\n :math:`[E_{g'},E_{g'-1}]`, and outgoing energy group :math:`[E_g,E_{g-1}]`,\n the fission production is calculated as:\n\n .. math::\n\n \\begin{aligned}\n \\langle \\sigma_{X,g'\\rightarrow g} \\phi \\rangle &= \\int_{r \\in V} dr\n \\int_{4\\pi} d\\Omega' \\int_{E_{g'}}^{E_{g'-1}} dE' \\int_{E_g}^{E_{g-1}} dE\n \\; \\chi(E) \\sigma_X (r, E') \\psi(r, E', \\Omega')\\\\\n \\langle \\phi \\rangle &= \\int_{r \\in V} dr \\int_{4\\pi} d\\Omega\n \\int_{E_g}^{E_{g-1}} dE \\; \\psi (r, E, \\Omega) \\\\\n \\sigma_{X,g'\\rightarrow g} &= \\frac{\\langle \\sigma_{X,g'\\rightarrow\n g} \\phi \\rangle}{\\langle \\phi \\rangle}\n \\end{aligned}\n\n where :math:`\\sigma_X` is the requested reaction type of interest.\n\n Parameters\n ----------\n rxn_type : str\n Reaction type (e.g., '(n,2n)', '(n,nta)', etc.). Valid names have\n neutrons as a product.\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n The domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n The domain type for spatial homogenization\n groups : openmc.mgxs.EnergyGroups\n The energy group structure for energy condensation\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n name : str, optional\n Name of the multi-group cross section. Used as a label to identify\n tallies in OpenMC 'tallies.xml' file.\n num_polar : Integral, optional\n Number of equi-width polar angle bins for angle discretization;\n defaults to one bin\n num_azimuthal : Integral, optional\n Number of equi-width azimuthal angle bins for angle discretization;\n defaults to one bin\n\n Attributes\n ----------\n name : str, optional\n Name of the multi-group cross section\n rxn_type : str\n Reaction type (e.g., 'total', 'nu-fission', etc.)\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n Domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n Domain type for spatial homogenization\n energy_groups : openmc.mgxs.EnergyGroups\n Energy group structure for energy condensation\n num_polar : Integral\n Number of equi-width polar angle bins for angle discretization\n num_azimuthal : Integral\n Number of equi-width azimuthal angle bins for angle discretization\n tally_trigger : openmc.Trigger\n An (optional) tally precision trigger given to each tally used to\n compute the cross section\n scores : list of str\n The scores in each tally used to compute the multi-group cross section\n filters : list of openmc.Filter\n The filters in each tally used to compute the multi-group cross section\n tally_keys : list of str\n The keys into the tallies dictionary for each tally used to compute\n the multi-group cross section\n estimator : 'analog'\n The tally estimator used to compute the multi-group cross section\n tallies : collections.OrderedDict\n OpenMC tallies needed to compute the multi-group cross section. The keys\n are strings listed in the :attr:`NuFissionMatrixXS.tally_keys`\n property and values are instances of :class:`openmc.Tally`.\n rxn_rate_tally : openmc.Tally\n Derived tally for the reaction rate tally used in the numerator to\n compute the multi-group cross section. This attribute is None\n unless the multi-group cross section has been computed.\n xs_tally : openmc.Tally\n Derived tally for the multi-group cross section. This attribute\n is None unless the multi-group cross section has been computed.\n num_subdomains : int\n The number of subdomains is unity for 'material', 'cell' and 'universe'\n domain types. This is equal to the number of cell instances\n for 'distribcell' domain types (it is equal to unity prior to loading\n tally data from a statepoint file).\n num_nuclides : int\n The number of nuclides for which the multi-group cross section is\n being tracked. This is unity if the by_nuclide attribute is False.\n nuclides : Iterable of str or 'sum'\n The optional user-specified nuclides for which to compute cross\n sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides\n are not specified by the user, all nuclides in the spatial domain\n are included. This attribute is 'sum' if by_nuclide is false.\n sparse : bool\n Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format\n for compressed data storage\n loaded_sp : bool\n Whether or not a statepoint file has been loaded with tally data\n derived : bool\n Whether or not the MGXS is merged from one or more other MGXS\n hdf5_key : str\n The key used to index multi-group cross sections in an HDF5 data store\n\n \"\"\"\n\n def __init__(self, rxn_type, domain=None, domain_type=None, groups=None,\n by_nuclide=False, name='', num_polar=1,\n num_azimuthal=1):\n cv.check_value(\"rxn_type\", rxn_type, ARBITRARY_MATRIX_TYPES)\n super().__init__(domain, domain_type, groups, by_nuclide, name,\n num_polar, num_azimuthal)\n self._rxn_type = rxn_type.split(\" \")[0]\n self._estimator = 'analog'\n self._valid_estimators = ['analog']\n\n\nclass ScatterMatrixXS(MatrixMGXS):\n r\"\"\"A scattering matrix multi-group cross section with the cosine of the\n change-in-angle represented as one or more Legendre moments or a histogram.\n\n This class can be used for both OpenMC input generation and tally data\n post-processing to compute spatially-homogenized and energy-integrated\n multi-group cross sections for multi-group neutronics calculations. At a\n minimum, one needs to set the :attr:`ScatterMatrixXS.energy_groups` and\n :attr:`ScatterMatrixXS.domain` properties. Tallies for the flux and\n appropriate reaction rates over the specified domain are generated\n automatically via the :attr:`ScatterMatrixXS.tallies` property, which can\n then be appended to a :class:`openmc.Tallies` instance.\n\n For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the\n necessary data to compute multi-group cross sections from a\n :class:`openmc.StatePoint` instance. The derived multi-group cross section\n can then be obtained from the :attr:`ScatterMatrixXS.xs_tally` property.\n\n For a spatial domain :math:`V`, incoming energy group\n :math:`[E_{g'},E_{g'-1}]`, and outgoing energy group :math:`[E_g,E_{g-1}]`,\n the Legendre scattering moments are calculated as:\n\n .. math::\n\n \\begin{aligned}\n \\langle \\sigma_{s,\\ell,g'\\rightarrow g} \\phi \\rangle &= \\int_{r \\in V} dr\n \\int_{4\\pi} d\\Omega' \\int_{E_{g'}}^{E_{g'-1}} dE' \\int_{4\\pi} d\\Omega\n \\int_{E_g}^{E_{g-1}} dE \\; P_\\ell (\\Omega \\cdot \\Omega') \\sigma_s (r, E'\n \\rightarrow E, \\Omega' \\cdot \\Omega) \\psi(r, E', \\Omega')\\\\\n \\langle \\phi \\rangle &= \\int_{r \\in V} dr \\int_{4\\pi} d\\Omega\n \\int_{E_g}^{E_{g-1}} dE \\; \\psi (r, E, \\Omega) \\\\\n \\sigma_{s,\\ell,g'\\rightarrow g} &= \\frac{\\langle\n \\sigma_{s,\\ell,g'\\rightarrow g} \\phi \\rangle}{\\langle \\phi \\rangle}\n \\end{aligned}\n\n If the order is zero and a :math:`P_0` transport-correction is applied\n (default), the scattering matrix elements are:\n\n .. math::\n\n \\sigma_{s,g'\\rightarrow g} = \\frac{\\langle \\sigma_{s,0,g'\\rightarrow g}\n \\phi \\rangle - \\delta_{gg'} \\sum_{g''} \\langle \\sigma_{s,1,g''\\rightarrow\n g} \\phi \\rangle}{\\langle \\phi \\rangle}\n\n To incorporate the effect of neutron multiplication from (n,xn) reactions\n in the above relation, the `nu` parameter can be set to `True`.\n\n An alternative form of the scattering matrix is computed when the\n `formulation` property is set to 'consistent' rather than the default\n of 'simple'. This formulation computes the scattering matrix multi-group\n cross section as the product of the scatter cross section and\n group-to-group scattering probabilities.\n\n Unlike the default 'simple' formulation, the 'consistent' formulation\n is computed from the groupwise scattering cross section which uses a\n tracklength estimator. This ensures that reaction rate balance is exactly\n preserved with a :class:`TotalXS` computed using a tracklength estimator.\n\n For a scattering probability matrix :math:`P_{s,\\ell,g'\\rightarrow g}` and\n scattering cross section :math:`\\sigma_s (r, E)` for incoming energy group\n :math:`[E_{g'},E_{g'-1}]` and outgoing energy group :math:`[E_g,E_{g-1}]`,\n the Legendre scattering moments are calculated as:\n\n .. math::\n\n \\sigma_{s,\\ell,g'\\rightarrow g} = \\sigma_s (r, E) \\times\n P_{s,\\ell,g'\\rightarrow g}\n\n To incorporate the effect of neutron multiplication from (n,xn) reactions\n in the 'consistent' scattering matrix, the `nu` parameter can be set to `True`\n such that the Legendre scattering moments are calculated as:\n\n .. math::\n\n \\sigma_{s,\\ell,g'\\rightarrow g} = \\upsilon_{g'\\rightarrow g} \\times\n \\sigma_s (r, E) \\times P_{s,\\ell,g'\\rightarrow g}\n\n Parameters\n ----------\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n The domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n The domain type for spatial homogenization\n groups : openmc.mgxs.EnergyGroups\n The energy group structure for energy condensation\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n name : str, optional\n Name of the multi-group cross section. Used as a label to identify\n tallies in OpenMC 'tallies.xml' file.\n num_polar : int, optional\n Number of equi-width polar angle bins for angle discretization;\n defaults to one bin\n num_azimuthal : int, optional\n Number of equi-width azimuthal angle bins for angle discretization;\n defaults to one bin\n nu : bool\n If True, the cross section data will include neutron multiplication;\n defaults to False\n\n Attributes\n ----------\n formulation : 'simple' or 'consistent'\n The calculation approach to use ('simple' by default). The 'simple'\n formulation simply divides the group-to-group scattering rates by\n the groupwise flux, each computed from analog tally estimators. The\n 'consistent' formulation multiplies the groupwise scattering rates\n by the group-to-group scatter probability matrix, the former computed\n from tracklength tallies and the latter computed from analog tallies.\n The 'consistent' formulation is designed to better conserve reaction\n rate balance with the total and absorption cross sections computed\n using tracklength tally estimators.\n correction : 'P0' or None\n Apply the P0 correction to scattering matrices if set to 'P0'; this is\n used only if :attr:`ScatterMatrixXS.scatter_format` is 'legendre'\n scatter_format : {'legendre', or 'histogram'}\n Representation of the angular scattering distribution (default is\n 'legendre')\n legendre_order : int\n The highest Legendre moment in the scattering matrix; this is used if\n :attr:`ScatterMatrixXS.scatter_format` is 'legendre'. (default is 0)\n histogram_bins : int\n The number of equally-spaced bins for the histogram representation of\n the angular scattering distribution; this is used if\n :attr:`ScatterMatrixXS.scatter_format` is 'histogram'. (default is 16)\n name : str, optional\n Name of the multi-group cross section\n rxn_type : str\n Reaction type (e.g., 'total', 'nu-fission', etc.)\n nu : bool\n If True, the cross section data will include neutron multiplication\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n Domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n Domain type for spatial homogenization\n energy_groups : openmc.mgxs.EnergyGroups\n Energy group structure for energy condensation\n num_polar : int\n Number of equi-width polar angle bins for angle discretization\n num_azimuthal : int\n Number of equi-width azimuthal angle bins for angle discretization\n tally_trigger : openmc.Trigger\n An (optional) tally precision trigger given to each tally used to\n compute the cross section\n scores : list of str\n The scores in each tally used to compute the multi-group cross section\n filters : list of openmc.Filter\n The filters in each tally used to compute the multi-group cross section\n tally_keys : list of str\n The keys into the tallies dictionary for each tally used to compute\n the multi-group cross section\n estimator : 'analog'\n The tally estimator used to compute the multi-group cross section\n tallies : collections.OrderedDict\n OpenMC tallies needed to compute the multi-group cross section. The keys\n are strings listed in the :attr:`ScatterMatrixXS.tally_keys` property\n and values are instances of :class:`openmc.Tally`.\n rxn_rate_tally : openmc.Tally\n Derived tally for the reaction rate tally used in the numerator to\n compute the multi-group cross section. This attribute is None\n unless the multi-group cross section has been computed.\n xs_tally : openmc.Tally\n Derived tally for the multi-group cross section. This attribute\n is None unless the multi-group cross section has been computed.\n num_subdomains : int\n The number of subdomains is unity for 'material', 'cell' and 'universe'\n domain types. This is equal to the number of cell instances\n for 'distribcell' domain types (it is equal to unity prior to loading\n tally data from a statepoint file).\n num_nuclides : int\n The number of nuclides for which the multi-group cross section is\n being tracked. This is unity if the by_nuclide attribute is False.\n nuclides : Iterable of str or 'sum'\n The optional user-specified nuclides for which to compute cross\n sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides\n are not specified by the user, all nuclides in the spatial domain\n are included. This attribute is 'sum' if by_nuclide is false.\n sparse : bool\n Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format\n for compressed data storage\n loaded_sp : bool\n Whether or not a statepoint file has been loaded with tally data\n derived : bool\n Whether or not the MGXS is merged from one or more other MGXS\n hdf5_key : str\n The key used to index multi-group cross sections in an HDF5 data store\n\n \"\"\"\n\n def __init__(self, domain=None, domain_type=None, groups=None,\n by_nuclide=False, name='', num_polar=1,\n num_azimuthal=1, nu=False):\n super().__init__(domain, domain_type, groups, by_nuclide, name,\n num_polar, num_azimuthal)\n self._formulation = 'simple'\n self._correction = 'P0'\n self._scatter_format = SCATTER_LEGENDRE\n self._legendre_order = 0\n self._histogram_bins = 16\n self._estimator = 'analog'\n self._valid_estimators = ['analog']\n self.nu = nu\n\n def __deepcopy__(self, memo):\n clone = super().__deepcopy__(memo)\n clone._formulation = self.formulation\n clone._correction = self.correction\n clone._scatter_format = self.scatter_format\n clone._legendre_order = self.legendre_order\n clone._histogram_bins = self.histogram_bins\n clone._nu = self.nu\n return clone\n\n @property\n def _dont_squeeze(self):\n \"\"\"Create a tuple of axes which should not be removed during the get_xs\n process\n \"\"\"\n if self.num_polar > 1 or self.num_azimuthal > 1:\n if self.scatter_format == SCATTER_HISTOGRAM:\n return (0, 1, 3, 4, 5)\n else:\n return (0, 1, 3, 4)\n else:\n if self.scatter_format == SCATTER_HISTOGRAM:\n return (1, 2, 3)\n else:\n return (1, 2)\n\n @property\n def formulation(self):\n return self._formulation\n\n @property\n def correction(self):\n return self._correction\n\n @property\n def scatter_format(self):\n return self._scatter_format\n\n @property\n def legendre_order(self):\n return self._legendre_order\n\n @property\n def histogram_bins(self):\n return self._histogram_bins\n\n @property\n def nu(self):\n return self._nu\n\n @property\n def scores(self):\n\n if self.formulation == 'simple':\n scores = ['flux', self.rxn_type]\n\n else:\n # Add scores for groupwise scattering cross section\n scores = ['flux', 'scatter']\n\n # Add scores for group-to-group scattering probability matrix\n # these scores also contain the angular information, whether it be\n # Legendre expansion or histogram bins\n scores.append('scatter')\n\n # Add scores for multiplicity matrix; scatter info for the\n # denominator will come from the previous score\n if self.nu:\n scores.append('nu-scatter')\n\n # Add scores for transport correction\n if self.correction == 'P0' and self.legendre_order == 0:\n scores.extend([self.rxn_type, 'flux'])\n\n return scores\n\n @property\n def tally_keys(self):\n if self.formulation == 'simple':\n return super().tally_keys\n else:\n # Add keys for groupwise scattering cross section\n tally_keys = ['flux (tracklength)', 'scatter']\n\n # Add keys for group-to-group scattering probability matrix\n tally_keys.append('scatter matrix')\n\n # Add keys for multiplicity matrix\n if self.nu:\n tally_keys.extend(['nu-scatter'])\n\n # Add keys for transport correction\n if self.correction == 'P0' and self.legendre_order == 0:\n tally_keys.extend(['correction', 'flux (analog)'])\n\n return tally_keys\n\n @property\n def estimator(self):\n if self.formulation == 'simple':\n return self._estimator\n else:\n # Add estimators for groupwise scattering cross section\n estimators = ['tracklength', 'tracklength']\n\n # Add estimators for group-to-group scattering probabilities\n estimators.append('analog')\n\n # Add estimators for multiplicity matrix\n if self.nu:\n estimators.extend(['analog'])\n\n # Add estimators for transport correction\n if self.correction == 'P0' and self.legendre_order == 0:\n estimators.extend(['analog', 'analog'])\n\n return estimators\n\n @property\n def filters(self):\n if self.formulation == 'simple':\n group_edges = self.energy_groups.group_edges\n energy = openmc.EnergyFilter(group_edges)\n energyout = openmc.EnergyoutFilter(group_edges)\n\n if self.scatter_format == SCATTER_LEGENDRE:\n if self.correction == 'P0' and self.legendre_order == 0:\n angle_filter = openmc.LegendreFilter(order=1)\n else:\n angle_filter = \\\n openmc.LegendreFilter(order=self.legendre_order)\n elif self.scatter_format == SCATTER_HISTOGRAM:\n bins = np.linspace(-1., 1., num=self.histogram_bins + 1,\n endpoint=True)\n angle_filter = openmc.MuFilter(bins)\n filters = [[energy], [energy, energyout, angle_filter]]\n\n else:\n group_edges = self.energy_groups.group_edges\n energy = openmc.EnergyFilter(group_edges)\n energyout = openmc.EnergyoutFilter(group_edges)\n\n # Groupwise scattering cross section\n filters = [[energy], [energy]]\n\n # Group-to-group scattering probability matrix\n if self.scatter_format == SCATTER_LEGENDRE:\n angle_filter = openmc.LegendreFilter(order=self.legendre_order)\n elif self.scatter_format == SCATTER_HISTOGRAM:\n bins = np.linspace(-1., 1., num=self.histogram_bins + 1,\n endpoint=True)\n angle_filter = openmc.MuFilter(bins)\n filters.append([energy, energyout, angle_filter])\n\n # Multiplicity matrix\n if self.nu:\n filters.extend([[energy, energyout]])\n\n # Add filters for transport correction\n if self.correction == 'P0' and self.legendre_order == 0:\n filters.extend([[energyout, openmc.LegendreFilter(1)],\n [energy]])\n\n return self._add_angle_filters(filters)\n\n @property\n def rxn_rate_tally(self):\n\n if self._rxn_rate_tally is None:\n\n if self.formulation == 'simple':\n if self.scatter_format == SCATTER_LEGENDRE:\n # If using P0 correction subtract P1 scatter from the diag.\n if self.correction == 'P0' and self.legendre_order == 0:\n scatter_p0 = self.tallies[self.rxn_type].get_slice(\n filters=[openmc.LegendreFilter],\n filter_bins=[('P0',)])\n scatter_p1 = self.tallies[self.rxn_type].get_slice(\n filters=[openmc.LegendreFilter],\n filter_bins=[('P1',)])\n\n # Set the Legendre order of these tallies to be 0\n # so they can be subtracted\n legendre = openmc.LegendreFilter(order=0)\n scatter_p0.filters[-1] = legendre\n scatter_p1.filters[-1] = legendre\n\n scatter_p1 = scatter_p1.summation(\n filter_type=openmc.EnergyFilter,\n remove_filter=True)\n\n energy_filter = \\\n scatter_p0.find_filter(openmc.EnergyFilter)\n\n # Transform scatter-p1 into an energyin/out matrix\n # to match scattering matrix shape for tally arithmetic\n energy_filter = copy.deepcopy(energy_filter)\n scatter_p1 = \\\n scatter_p1.diagonalize_filter(energy_filter, 1)\n\n self._rxn_rate_tally = scatter_p0 - scatter_p1\n\n # Otherwise, extract scattering moment reaction rate Tally\n else:\n self._rxn_rate_tally = self.tallies[self.rxn_type]\n elif self.scatter_format == SCATTER_HISTOGRAM:\n # Extract scattering rate distribution tally\n self._rxn_rate_tally = self.tallies[self.rxn_type]\n\n self._rxn_rate_tally.sparse = self.sparse\n\n else:\n msg = 'The reaction rate tally is poorly defined' \\\n ' for the consistent formulation'\n raise NotImplementedError(msg)\n\n return self._rxn_rate_tally\n\n @property\n def xs_tally(self):\n if self._xs_tally is None:\n if self.tallies is None:\n msg = 'Unable to get xs_tally since tallies have ' \\\n 'not been loaded from a statepoint'\n raise ValueError(msg)\n\n # Use super class method\n if self.formulation == 'simple':\n self._xs_tally = MGXS.xs_tally.fget(self)\n\n else:\n # Compute scattering probability matrixS\n tally_key = 'scatter matrix'\n\n # Compute normalization factor summed across outgoing energies\n if self.scatter_format == SCATTER_LEGENDRE:\n norm = self.tallies[tally_key].get_slice(\n scores=['scatter'],\n filters=[openmc.LegendreFilter],\n filter_bins=[('P0',)], squeeze=True)\n\n # Compute normalization factor summed across outgoing mu bins\n elif self.scatter_format == SCATTER_HISTOGRAM:\n norm = self.tallies[tally_key].get_slice(\n scores=['scatter'])\n norm = norm.summation(\n filter_type=openmc.MuFilter, remove_filter=True)\n norm = norm.summation(filter_type=openmc.EnergyoutFilter,\n remove_filter=True)\n\n # Compute groupwise scattering cross section\n self._xs_tally = self.tallies['scatter'] * \\\n self.tallies[tally_key] / norm / \\\n self.tallies['flux (tracklength)']\n\n # Override the nuclides for tally arithmetic\n self._xs_tally.nuclides = self.tallies['scatter'].nuclides\n\n # Multiply by the multiplicity matrix\n if self.nu:\n numer = self.tallies['nu-scatter']\n # Get the denominator\n if self.scatter_format == SCATTER_LEGENDRE:\n denom = self.tallies[tally_key].get_slice(\n scores=['scatter'],\n filters=[openmc.LegendreFilter],\n filter_bins=[('P0',)], squeeze=True)\n\n # Compute normalization factor summed across mu bins\n elif self.scatter_format == SCATTER_HISTOGRAM:\n denom = self.tallies[tally_key].get_slice(\n scores=['scatter'])\n\n # Sum across all mu bins\n denom = denom.summation(\n filter_type=openmc.MuFilter, remove_filter=True)\n\n self._xs_tally *= (numer / denom)\n\n # If using P0 correction subtract scatter-1 from the diagonal\n if self.correction == 'P0' and self.legendre_order == 0:\n scatter_p1 = self.tallies['correction'].get_slice(\n filters=[openmc.LegendreFilter], filter_bins=[('P1',)])\n flux = self.tallies['flux (analog)']\n\n # Set the Legendre order of the P1 tally to be P0\n # so it can be subtracted\n legendre = openmc.LegendreFilter(order=0)\n scatter_p1.filters[-1] = legendre\n\n # Transform scatter-p1 tally into an energyin/out matrix\n # to match scattering matrix shape for tally arithmetic\n energy_filter = flux.find_filter(openmc.EnergyFilter)\n energy_filter = copy.deepcopy(energy_filter)\n scatter_p1 = scatter_p1.diagonalize_filter(energy_filter, 1)\n\n # Compute the trasnport correction term\n correction = scatter_p1 / flux\n\n # Override the nuclides for tally arithmetic\n correction.nuclides = scatter_p1.nuclides\n\n # Set xs_tally to be itself with only P0 data\n self._xs_tally = self._xs_tally.get_slice(\n filters=[openmc.LegendreFilter], filter_bins=[('P0',)])\n # Tell xs_tally that it is P0\n legendre_xs_tally = \\\n self._xs_tally.find_filter(openmc.LegendreFilter)\n legendre_xs_tally.order = 0\n\n # And subtract the P1 correction from the P0 matrix\n self._xs_tally -= correction\n\n self._compute_xs()\n\n # Force the angle filter to be the last filter\n if self.scatter_format == SCATTER_HISTOGRAM:\n angle_filter = self._xs_tally.find_filter(openmc.MuFilter)\n else:\n angle_filter = \\\n self._xs_tally.find_filter(openmc.LegendreFilter)\n angle_filter_index = self._xs_tally.filters.index(angle_filter)\n # If the angle filter index is not last, then make it last\n if angle_filter_index != len(self._xs_tally.filters) - 1:\n energyout_filter = \\\n self._xs_tally.find_filter(openmc.EnergyoutFilter)\n self._xs_tally._swap_filters(energyout_filter,\n angle_filter)\n\n return self._xs_tally\n\n @nu.setter\n def nu(self, nu):\n cv.check_type('nu', nu, bool)\n self._nu = nu\n\n if self.formulation == 'simple':\n if not nu:\n self._rxn_type = 'scatter'\n self._hdf5_key = 'scatter matrix'\n else:\n self._rxn_type = 'nu-scatter'\n self._hdf5_key = 'nu-scatter matrix'\n else:\n if not nu:\n self._rxn_type = 'scatter'\n self._hdf5_key = 'consistent scatter matrix'\n else:\n self._rxn_type = 'nu-scatter'\n self._hdf5_key = 'consistent nu-scatter matrix'\n\n @formulation.setter\n def formulation(self, formulation):\n cv.check_value('formulation', formulation, ('simple', 'consistent'))\n self._formulation = formulation\n\n if self.formulation == 'simple':\n self._valid_estimators = ['analog']\n if not self.nu:\n self._hdf5_key = 'scatter matrix'\n else:\n self._hdf5_key = 'nu-scatter matrix'\n else:\n self._valid_estimators = ['tracklength']\n if not self.nu:\n self._hdf5_key = 'consistent scatter matrix'\n else:\n self._hdf5_key = 'consistent nu-scatter matrix'\n\n @correction.setter\n def correction(self, correction):\n cv.check_value('correction', correction, ('P0', None))\n\n if self.scatter_format == SCATTER_LEGENDRE:\n if correction == 'P0' and self.legendre_order > 0:\n msg = 'The P0 correction will be ignored since the ' \\\n 'scattering order {} is greater than '\\\n 'zero'.format(self.legendre_order)\n warnings.warn(msg)\n elif self.scatter_format == SCATTER_HISTOGRAM:\n msg = 'The P0 correction will be ignored since the ' \\\n 'scatter format is set to histogram'\n warnings.warn(msg)\n\n self._correction = correction\n\n @scatter_format.setter\n def scatter_format(self, scatter_format):\n cv.check_value('scatter_format', scatter_format, MU_TREATMENTS)\n self._scatter_format = scatter_format\n\n @legendre_order.setter\n def legendre_order(self, legendre_order):\n cv.check_type('legendre_order', legendre_order, Integral)\n cv.check_greater_than('legendre_order', legendre_order, 0,\n equality=True)\n cv.check_less_than('legendre_order', legendre_order, _MAX_LEGENDRE,\n equality=True)\n\n if self.scatter_format == SCATTER_LEGENDRE:\n if self.correction == 'P0' and legendre_order > 0:\n msg = 'The P0 correction will be ignored since the ' \\\n 'scattering order {} is greater than '\\\n 'zero'.format(legendre_order)\n warnings.warn(msg, RuntimeWarning)\n self.correction = None\n elif self.scatter_format == SCATTER_HISTOGRAM:\n msg = 'The legendre order will be ignored since the ' \\\n 'scatter format is set to histogram'\n warnings.warn(msg)\n\n self._legendre_order = legendre_order\n\n @histogram_bins.setter\n def histogram_bins(self, histogram_bins):\n cv.check_type('histogram_bins', histogram_bins, Integral)\n cv.check_greater_than('histogram_bins', histogram_bins, 0)\n\n self._histogram_bins = histogram_bins\n\n def load_from_statepoint(self, statepoint):\n \"\"\"Extracts tallies in an OpenMC StatePoint with the data needed to\n compute multi-group cross sections.\n\n This method is needed to compute cross section data from tallies\n in an OpenMC StatePoint object.\n\n .. note:: The statepoint must be linked with an OpenMC Summary object.\n\n Parameters\n ----------\n statepoint : openmc.StatePoint\n An OpenMC StatePoint object with tally data\n\n Raises\n ------\n ValueError\n When this method is called with a statepoint that has not been\n linked with a summary object.\n\n \"\"\"\n\n # Clear any tallies previously loaded from a statepoint\n if self.loaded_sp:\n self._tallies = None\n self._xs_tally = None\n self._rxn_rate_tally = None\n self._loaded_sp = False\n\n super().load_from_statepoint(statepoint)\n\n def get_slice(self, nuclides=[], in_groups=[], out_groups=[],\n legendre_order='same'):\n \"\"\"Build a sliced ScatterMatrix for the specified nuclides and\n energy groups.\n\n This method constructs a new MGXS to encapsulate a subset of the data\n represented by this MGXS. The subset of data to include in the tally\n slice is determined by the nuclides and energy groups specified in\n the input parameters.\n\n Parameters\n ----------\n nuclides : list of str\n A list of nuclide name strings\n (e.g., ['U235', 'U238']; default is [])\n in_groups : list of int\n A list of incoming energy group indices starting at 1 for the high\n energies (e.g., [1, 2, 3]; default is [])\n out_groups : list of int\n A list of outgoing energy group indices starting at 1 for the high\n energies (e.g., [1, 2, 3]; default is [])\n legendre_order : int or 'same'\n The highest Legendre moment in the sliced MGXS. If order is 'same'\n then the sliced MGXS will have the same Legendre moments as the\n original MGXS (default). If order is an integer less than the\n original MGXS' order, then only those Legendre moments up to that\n order will be included in the sliced MGXS.\n\n Returns\n -------\n openmc.mgxs.MatrixMGXS\n A new MatrixMGXS which encapsulates the subset of data requested\n for the nuclide(s) and/or energy group(s) requested in the\n parameters.\n\n \"\"\"\n\n # Call super class method and null out derived tallies\n slice_xs = super().get_slice(nuclides, in_groups)\n slice_xs._rxn_rate_tally = None\n slice_xs._xs_tally = None\n\n # Slice the Legendre order if needed\n if legendre_order != 'same' and self.scatter_format == SCATTER_LEGENDRE:\n cv.check_type('legendre_order', legendre_order, Integral)\n cv.check_less_than('legendre_order', legendre_order,\n self.legendre_order, equality=True)\n slice_xs.legendre_order = legendre_order\n\n # Slice the scattering tally\n filter_bins = [tuple(['P{}'.format(i)\n for i in range(self.legendre_order + 1)])]\n slice_xs.tallies[self.rxn_type] = \\\n slice_xs.tallies[self.rxn_type].get_slice(\n filters=[openmc.LegendreFilter], filter_bins=filter_bins)\n\n # Slice outgoing energy groups if needed\n if len(out_groups) != 0:\n filter_bins = []\n for group in out_groups:\n group_bounds = self.energy_groups.get_group_bounds(group)\n filter_bins.append(group_bounds)\n filter_bins = [tuple(filter_bins)]\n\n # Slice each of the tallies across energyout groups\n for tally_type, tally in slice_xs.tallies.items():\n if tally.contains_filter(openmc.EnergyoutFilter):\n tally_slice = tally.get_slice(\n filters=[openmc.EnergyoutFilter],\n filter_bins=filter_bins)\n slice_xs.tallies[tally_type] = tally_slice\n\n slice_xs.sparse = self.sparse\n return slice_xs\n\n def get_xs(self, in_groups='all', out_groups='all',\n subdomains='all', nuclides='all', moment='all',\n xs_type='macro', order_groups='increasing',\n row_column='inout', value='mean', squeeze=True):\n r\"\"\"Returns an array of multi-group cross sections.\n\n This method constructs a 5D NumPy array for the requested\n multi-group cross section data for one or more subdomains\n (1st dimension), energy groups in (2nd dimension), energy groups out\n (3rd dimension), nuclides (4th dimension), and moments/histograms\n (5th dimension).\n\n .. note:: The scattering moments are not multiplied by the\n :math:`(2\\ell+1)/2` prefactor in the expansion of the\n scattering source into Legendre moments in the neutron\n transport equation.\n\n Parameters\n ----------\n in_groups : Iterable of Integral or 'all'\n Incoming energy groups of interest. Defaults to 'all'.\n out_groups : Iterable of Integral or 'all'\n Outgoing energy groups of interest. Defaults to 'all'.\n subdomains : Iterable of Integral or 'all'\n Subdomain IDs of interest. Defaults to 'all'.\n nuclides : Iterable of str or 'all' or 'sum'\n A list of nuclide name strings (e.g., ['U235', 'U238']). The\n special string 'all' will return the cross sections for all nuclides\n in the spatial domain. The special string 'sum' will return the\n cross section summed over all nuclides. Defaults to 'all'.\n moment : int or 'all'\n The scattering matrix moment to return. All moments will be\n returned if the moment is 'all' (default); otherwise, a specific\n moment will be returned.\n xs_type: {'macro', 'micro'}\n Return the macro or micro cross section in units of cm^-1 or barns.\n Defaults to 'macro'.\n order_groups: {'increasing', 'decreasing'}\n Return the cross section indexed according to increasing or\n decreasing energy groups (decreasing or increasing energies).\n Defaults to 'increasing'.\n row_column: {'inout', 'outin'}\n Return the cross section indexed first by incoming group and\n second by outgoing group ('inout'), or vice versa ('outin').\n Defaults to 'inout'.\n value : {'mean', 'std_dev', 'rel_err'}\n A string for the type of value to return. Defaults to 'mean'.\n squeeze : bool\n A boolean representing whether to eliminate the extra dimensions\n of the multi-dimensional array to be returned. Defaults to True.\n\n Returns\n -------\n numpy.ndarray\n A NumPy array of the multi-group cross section indexed in the order\n each group and subdomain is listed in the parameters.\n\n Raises\n ------\n ValueError\n When this method is called before the multi-group cross section is\n computed from tally data.\n\n \"\"\"\n\n cv.check_value('value', value, ['mean', 'std_dev', 'rel_err'])\n cv.check_value('xs_type', xs_type, ['macro', 'micro'])\n\n # FIXME: Unable to get microscopic xs for mesh domain because the mesh\n # cells do not know the nuclide densities in each mesh cell.\n if self.domain_type == 'mesh' and xs_type == 'micro':\n msg = 'Unable to get micro xs for mesh domain since the mesh ' \\\n 'cells do not know the nuclide densities in each mesh cell.'\n raise ValueError(msg)\n\n filters = []\n filter_bins = []\n\n # Construct a collection of the domain filter bins\n if not isinstance(subdomains, str):\n cv.check_iterable_type('subdomains', subdomains, Integral, max_depth=3)\n filters.append(_DOMAIN_TO_FILTER[self.domain_type])\n subdomain_bins = []\n for subdomain in subdomains:\n subdomain_bins.append(subdomain)\n filter_bins.append(tuple(subdomain_bins))\n\n # Construct list of energy group bounds tuples for all requested groups\n if not isinstance(in_groups, str):\n cv.check_iterable_type('groups', in_groups, Integral)\n filters.append(openmc.EnergyFilter)\n energy_bins = []\n for group in in_groups:\n energy_bins.append(\n (self.energy_groups.get_group_bounds(group),))\n filter_bins.append(tuple(energy_bins))\n\n # Construct list of energy group bounds tuples for all requested groups\n if not isinstance(out_groups, str):\n cv.check_iterable_type('groups', out_groups, Integral)\n for group in out_groups:\n filters.append(openmc.EnergyoutFilter)\n filter_bins.append((self.energy_groups.get_group_bounds(group),))\n\n # Construct CrossScore for requested scattering moment\n if self.scatter_format == SCATTER_LEGENDRE:\n if moment != 'all':\n cv.check_type('moment', moment, Integral)\n cv.check_greater_than('moment', moment, 0, equality=True)\n cv.check_less_than(\n 'moment', moment, self.legendre_order, equality=True)\n filters.append(openmc.LegendreFilter)\n filter_bins.append(('P{}'.format(moment),))\n num_angle_bins = 1\n else:\n num_angle_bins = self.legendre_order + 1\n else:\n num_angle_bins = self.histogram_bins\n\n # Construct a collection of the nuclides to retrieve from the xs tally\n if self.by_nuclide:\n if nuclides == 'all' or nuclides == 'sum' or nuclides == ['sum']:\n query_nuclides = self.get_nuclides()\n else:\n query_nuclides = nuclides\n else:\n query_nuclides = ['total']\n\n # Use tally summation if user requested the sum for all nuclides\n scores = self.xs_tally.scores\n if nuclides == 'sum' or nuclides == ['sum']:\n xs_tally = self.xs_tally.summation(nuclides=query_nuclides)\n xs = xs_tally.get_values(scores=scores, filters=filters,\n filter_bins=filter_bins, value=value)\n else:\n xs = self.xs_tally.get_values(scores=scores, filters=filters,\n filter_bins=filter_bins,\n nuclides=query_nuclides, value=value)\n\n # Divide by atom number densities for microscopic cross sections\n if xs_type == 'micro' and self._divide_by_density:\n if self.by_nuclide:\n densities = self.get_nuclide_densities(nuclides)\n else:\n densities = self.get_nuclide_densities('sum')\n if value == 'mean' or value == 'std_dev':\n xs /= densities[np.newaxis, :, np.newaxis]\n\n # Convert and nans to zero\n xs = np.nan_to_num(xs)\n\n if in_groups == 'all':\n num_in_groups = self.num_groups\n else:\n num_in_groups = len(in_groups)\n\n if out_groups == 'all':\n num_out_groups = self.num_groups\n else:\n num_out_groups = len(out_groups)\n\n # Reshape tally data array with separate axes for domain and energy\n # Accomodate the polar and azimuthal bins if needed\n num_subdomains = int(xs.shape[0] / (num_angle_bins * num_in_groups *\n num_out_groups * self.num_polar *\n self.num_azimuthal))\n if self.num_polar > 1 or self.num_azimuthal > 1:\n new_shape = (self.num_polar, self.num_azimuthal,\n num_subdomains, num_in_groups, num_out_groups,\n num_angle_bins)\n new_shape += xs.shape[1:]\n xs = np.reshape(xs, new_shape)\n\n # Transpose the scattering matrix if requested by user\n if row_column == 'outin':\n xs = np.swapaxes(xs, 3, 4)\n\n # Reverse data if user requested increasing energy groups since\n # tally data is stored in order of increasing energies\n if order_groups == 'increasing':\n xs = xs[:, :, :, ::-1, ::-1, ...]\n else:\n new_shape = (num_subdomains, num_in_groups, num_out_groups,\n num_angle_bins)\n\n new_shape += xs.shape[1:]\n xs = np.reshape(xs, new_shape)\n\n # Transpose the scattering matrix if requested by user\n if row_column == 'outin':\n xs = np.swapaxes(xs, 1, 2)\n\n # Reverse data if user requested increasing energy groups since\n # tally data is stored in order of increasing energies\n if order_groups == 'increasing':\n xs = xs[:, ::-1, ::-1, ...]\n\n if squeeze:\n # We want to squeeze out everything but the angles, in_groups,\n # out_groups, and, if needed, num_angle_bins dimension. These must\n # not be squeezed so 1-group, 1-angle problems have the correct\n # shape.\n xs = self._squeeze_xs(xs)\n return xs\n\n def get_pandas_dataframe(self, groups='all', nuclides='all',\n xs_type='macro', paths=False):\n \"\"\"Build a Pandas DataFrame for the MGXS data.\n\n This method leverages :meth:`openmc.Tally.get_pandas_dataframe`, but\n renames the columns with terminology appropriate for cross section data.\n\n Parameters\n ----------\n groups : Iterable of Integral or 'all'\n Energy groups of interest. Defaults to 'all'.\n nuclides : Iterable of str or 'all' or 'sum'\n The nuclides of the cross-sections to include in the dataframe. This\n may be a list of nuclide name strings (e.g., ['U235', 'U238']).\n The special string 'all' will include the cross sections for all\n nuclides in the spatial domain. The special string 'sum' will\n include the cross sections summed over all nuclides. Defaults to\n 'all'.\n xs_type: {'macro', 'micro'}\n Return macro or micro cross section in units of cm^-1 or barns.\n Defaults to 'macro'.\n paths : bool, optional\n Construct columns for distribcell tally filters (default is True).\n The geometric information in the Summary object is embedded into\n a Multi-index column with a geometric \"path\" to each distribcell\n instance.\n\n Returns\n -------\n pandas.DataFrame\n A Pandas DataFrame for the cross section data.\n\n Raises\n ------\n ValueError\n When this method is called before the multi-group cross section is\n computed from tally data.\n\n \"\"\"\n\n # Build the dataframe using the parent class method\n df = super().get_pandas_dataframe(groups, nuclides, xs_type,\n paths=paths)\n\n # If the matrix is P0, remove the legendre column\n if self.scatter_format == SCATTER_LEGENDRE and self.legendre_order == 0:\n df = df.drop(axis=1, labels=['legendre'])\n\n return df\n\n def print_xs(self, subdomains='all', nuclides='all',\n xs_type='macro', moment=0):\n \"\"\"Prints a string representation for the multi-group cross section.\n\n Parameters\n ----------\n subdomains : Iterable of Integral or 'all'\n The subdomain IDs of the cross sections to include in the report.\n Defaults to 'all'.\n nuclides : Iterable of str or 'all' or 'sum'\n The nuclides of the cross-sections to include in the report. This\n may be a list of nuclide name strings (e.g., ['U235', 'U238']).\n The special string 'all' will report the cross sections for all\n nuclides in the spatial domain. The special string 'sum' will\n report the cross sections summed over all nuclides. Defaults to\n 'all'.\n xs_type: {'macro', 'micro'}\n Return the macro or micro cross section in units of cm^-1 or barns.\n Defaults to 'macro'.\n moment : int\n The scattering moment to print (default is 0)\n\n \"\"\"\n\n # Construct a collection of the subdomains to report\n if not isinstance(subdomains, str):\n cv.check_iterable_type('subdomains', subdomains, Integral)\n elif self.domain_type == 'distribcell':\n subdomains = np.arange(self.num_subdomains, dtype=np.int)\n elif self.domain_type == 'mesh':\n subdomains = list(self.domain.indices)\n else:\n subdomains = [self.domain.id]\n\n # Construct a collection of the nuclides to report\n if self.by_nuclide:\n if nuclides == 'all':\n nuclides = self.get_nuclides()\n if nuclides == 'sum':\n nuclides = ['sum']\n else:\n cv.check_iterable_type('nuclides', nuclides, str)\n else:\n nuclides = ['sum']\n\n cv.check_value('xs_type', xs_type, ['macro', 'micro'])\n\n if self.correction != 'P0' and self.scatter_format == SCATTER_LEGENDRE:\n rxn_type = '{0} (P{1})'.format(self.rxn_type, moment)\n else:\n rxn_type = self.rxn_type\n\n # Build header for string with type and domain info\n string = 'Multi-Group XS\\n'\n string += '{0: <16}=\\t{1}\\n'.format('\\tReaction Type', rxn_type)\n string += '{0: <16}=\\t{1}\\n'.format('\\tDomain Type', self.domain_type)\n string += '{0: <16}=\\t{1}\\n'.format('\\tDomain ID', self.domain.id)\n\n # Generate the header for an individual XS\n xs_header = '\\tCross Sections [{0}]:'.format(self.get_units(xs_type))\n\n # If cross section data has not been computed, only print string header\n if self.tallies is None:\n print(string)\n return\n\n string += '{0: <16}\\n'.format('\\tEnergy Groups:')\n template = '{0: <12}Group {1} [{2: <10} - {3: <10}eV]\\n'\n\n # Loop over energy groups ranges\n for group in range(1, self.num_groups + 1):\n bounds = self.energy_groups.get_group_bounds(group)\n string += template.format('', group, bounds[0], bounds[1])\n\n # Set polar and azimuthal bins if necessary\n if self.num_polar > 1 or self.num_azimuthal > 1:\n pol_bins = np.linspace(0., np.pi, num=self.num_polar + 1,\n endpoint=True)\n azi_bins = np.linspace(-np.pi, np.pi, num=self.num_azimuthal + 1,\n endpoint=True)\n\n # Loop over all subdomains\n for subdomain in subdomains:\n\n if self.domain_type == 'distribcell' or self.domain_type == 'mesh':\n string += '{0: <16}=\\t{1}\\n'.format('\\tSubdomain', subdomain)\n\n # Loop over all Nuclides\n for nuclide in nuclides:\n\n # Build header for nuclide type\n if xs_type != 'sum':\n string += '{0: <16}=\\t{1}\\n'.format('\\tNuclide', nuclide)\n\n # Build header for cross section type\n string += '{0: <16}\\n'.format(xs_header)\n\n average_xs = self.get_xs(nuclides=[nuclide],\n subdomains=[subdomain],\n xs_type=xs_type, value='mean',\n moment=moment)\n rel_err_xs = self.get_xs(nuclides=[nuclide],\n subdomains=[subdomain],\n xs_type=xs_type, value='rel_err',\n moment=moment)\n rel_err_xs = rel_err_xs * 100.\n\n # Create a function for printing group and histogram data\n def print_groups_and_histogram(avg_xs, err_xs, num_groups,\n num_histogram_bins):\n template = '{0: <12}Group {1} -> Group {2}:\\t\\t'\n to_print = \"\"\n # Loop over incoming/outgoing energy groups ranges\n for in_group in range(1, num_groups + 1):\n for out_group in range(1, num_groups + 1):\n to_print += template.format('', in_group,\n out_group)\n if num_histogram_bins > 0:\n for i in range(num_histogram_bins):\n to_print += \\\n '\\n{0: <16}Histogram Bin {1}:{2: <6}'.format(\n '', i + 1, '')\n to_print += '{0:.2e} +/- {1:.2e}%'.format(\n avg_xs[in_group - 1, out_group - 1, i],\n err_xs[in_group - 1, out_group - 1, i])\n to_print += '\\n'\n else:\n to_print += '{0:.2e} +/- {1:.2e}%'.format(\n avg_xs[in_group - 1, out_group - 1],\n err_xs[in_group - 1, out_group - 1])\n to_print += '\\n'\n to_print += '\\n'\n return to_print\n\n # Set the number of histogram bins\n if self.scatter_format == SCATTER_HISTOGRAM:\n num_mu_bins = self.histogram_bins\n else:\n num_mu_bins = 0\n\n if self.num_polar > 1 or self.num_azimuthal > 1:\n # Loop over polar, azi, and in/out energy group ranges\n for pol in range(len(pol_bins) - 1):\n pol_low, pol_high = pol_bins[pol: pol + 2]\n for azi in range(len(azi_bins) - 1):\n azi_low, azi_high = azi_bins[azi: azi + 2]\n string += \\\n '\\t\\tPolar Angle: [{0:5f} - {1:5f}]'.format(\n pol_low, pol_high) + \\\n '\\tAzimuthal Angle: [{0:5f} - {1:5f}]'.format(\n azi_low, azi_high) + '\\n'\n string += print_groups_and_histogram(\n average_xs[pol, azi, ...],\n rel_err_xs[pol, azi, ...], self.num_groups,\n num_mu_bins)\n string += '\\n'\n else:\n string += print_groups_and_histogram(\n average_xs, rel_err_xs, self.num_groups, num_mu_bins)\n string += '\\n'\n string += '\\n'\n string += '\\n'\n\n print(string)\n\n\nclass MultiplicityMatrixXS(MatrixMGXS):\n r\"\"\"The scattering multiplicity matrix.\n\n This class can be used for both OpenMC input generation and tally data\n post-processing to compute spatially-homogenized and energy-integrated\n multi-group cross sections for multi-group neutronics calculations. At a\n minimum, one needs to set the :attr:`MultiplicityMatrixXS.energy_groups` and\n :attr:`MultiplicityMatrixXS.domain` properties. Tallies for the flux and\n appropriate reaction rates over the specified domain are generated\n automatically via the :attr:`MultiplicityMatrixXS.tallies` property, which\n can then be appended to a :class:`openmc.Tallies` instance.\n\n For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the\n necessary data to compute multi-group cross sections from a\n :class:`openmc.StatePoint` instance. The derived multi-group cross section\n can then be obtained from the :attr:`MultiplicityMatrixXS.xs_tally`\n property.\n\n For a spatial domain :math:`V`, incoming energy group\n :math:`[E_{g'},E_{g'-1}]`, and outgoing energy group :math:`[E_g,E_{g-1}]`,\n the multiplicity is calculated as:\n\n .. math::\n\n \\begin{aligned}\n \\langle \\upsilon \\sigma_{s,g'\\rightarrow g} \\phi \\rangle &= \\int_{r \\in\n D} dr \\int_{4\\pi} d\\Omega' \\int_{E_{g'}}^{E_{g'-1}} dE' \\int_{4\\pi}\n d\\Omega \\int_{E_g}^{E_{g-1}} dE \\; \\sum_i \\upsilon_i \\sigma_i (r, E' \\rightarrow\n E, \\Omega' \\cdot \\Omega) \\psi(r, E', \\Omega') \\\\\n \\langle \\sigma_{s,g'\\rightarrow g} \\phi \\rangle &= \\int_{r \\in\n D} dr \\int_{4\\pi} d\\Omega' \\int_{E_{g'}}^{E_{g'-1}} dE' \\int_{4\\pi}\n d\\Omega \\int_{E_g}^{E_{g-1}} dE \\; \\sum_i \\upsilon_i \\sigma_i (r, E' \\rightarrow\n E, \\Omega' \\cdot \\Omega) \\psi(r, E', \\Omega') \\\\\n \\upsilon_{g'\\rightarrow g} &= \\frac{\\langle \\upsilon\n \\sigma_{s,g'\\rightarrow g} \\rangle}{\\langle \\sigma_{s,g'\\rightarrow g}\n \\rangle}\n \\end{aligned}\n\n where :math:`\\upsilon_i` is the multiplicity for the :math:`i`-th reaction.\n\n Parameters\n ----------\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n The domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n The domain type for spatial homogenization\n groups : openmc.mgxs.EnergyGroups\n The energy group structure for energy condensation\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n name : str, optional\n Name of the multi-group cross section. Used as a label to identify\n tallies in OpenMC 'tallies.xml' file.\n num_polar : Integral, optional\n Number of equi-width polar angle bins for angle discretization;\n defaults to one bin\n num_azimuthal : Integral, optional\n Number of equi-width azimuthal angle bins for angle discretization;\n defaults to one bin\n\n Attributes\n ----------\n name : str, optional\n Name of the multi-group cross section\n rxn_type : str\n Reaction type (e.g., 'total', 'nu-fission', etc.)\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n Domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n Domain type for spatial homogenization\n energy_groups : openmc.mgxs.EnergyGroups\n Energy group structure for energy condensation\n num_polar : Integral\n Number of equi-width polar angle bins for angle discretization\n num_azimuthal : Integral\n Number of equi-width azimuthal angle bins for angle discretization\n tally_trigger : openmc.Trigger\n An (optional) tally precision trigger given to each tally used to\n compute the cross section\n scores : list of str\n The scores in each tally used to compute the multi-group cross section\n filters : list of openmc.Filter\n The filters in each tally used to compute the multi-group cross section\n tally_keys : list of str\n The keys into the tallies dictionary for each tally used to compute\n the multi-group cross section\n estimator : 'analog'\n The tally estimator used to compute the multi-group cross section\n tallies : collections.OrderedDict\n OpenMC tallies needed to compute the multi-group cross section. The keys\n are strings listed in the :attr:`MultiplicityMatrixXS.tally_keys`\n property and values are instances of :class:`openmc.Tally`.\n rxn_rate_tally : openmc.Tally\n Derived tally for the reaction rate tally used in the numerator to\n compute the multi-group cross section. This attribute is None\n unless the multi-group cross section has been computed.\n xs_tally : openmc.Tally\n Derived tally for the multi-group cross section. This attribute\n is None unless the multi-group cross section has been computed.\n num_subdomains : int\n The number of subdomains is unity for 'material', 'cell' and 'universe'\n domain types. This is equal to the number of cell instances\n for 'distribcell' domain types (it is equal to unity prior to loading\n tally data from a statepoint file).\n num_nuclides : int\n The number of nuclides for which the multi-group cross section is\n being tracked. This is unity if the by_nuclide attribute is False.\n nuclides : Iterable of str or 'sum'\n The optional user-specified nuclides for which to compute cross\n sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides\n are not specified by the user, all nuclides in the spatial domain\n are included. This attribute is 'sum' if by_nuclide is false.\n sparse : bool\n Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format\n for compressed data storage\n loaded_sp : bool\n Whether or not a statepoint file has been loaded with tally data\n derived : bool\n Whether or not the MGXS is merged from one or more other MGXS\n hdf5_key : str\n The key used to index multi-group cross sections in an HDF5 data store\n\n \"\"\"\n\n # Store whether or not the number density should be removed for microscopic\n # values of this data; since a multiplicity matrix should reflect the\n # multiplication relative to 1, this class will not divide by density\n # for microscopic data\n _divide_by_density = False\n\n def __init__(self, domain=None, domain_type=None, groups=None,\n by_nuclide=False, name='', num_polar=1, num_azimuthal=1):\n super().__init__(domain, domain_type, groups, by_nuclide, name,\n num_polar, num_azimuthal)\n self._rxn_type = 'multiplicity matrix'\n self._estimator = 'analog'\n self._valid_estimators = ['analog']\n\n @property\n def scores(self):\n scores = ['nu-scatter', 'scatter']\n return scores\n\n @property\n def filters(self):\n # Create the non-domain specific Filters for the Tallies\n group_edges = self.energy_groups.group_edges\n energy = openmc.EnergyFilter(group_edges)\n energyout = openmc.EnergyoutFilter(group_edges)\n filters = [[energy, energyout], [energy, energyout]]\n\n return self._add_angle_filters(filters)\n\n @property\n def rxn_rate_tally(self):\n if self._rxn_rate_tally is None:\n self._rxn_rate_tally = self.tallies['nu-scatter']\n self._rxn_rate_tally.sparse = self.sparse\n return self._rxn_rate_tally\n\n @property\n def xs_tally(self):\n\n if self._xs_tally is None:\n scatter = self.tallies['scatter']\n\n # Compute the multiplicity\n self._xs_tally = self.rxn_rate_tally / scatter\n super()._compute_xs()\n\n return self._xs_tally\n\n\nclass ScatterProbabilityMatrix(MatrixMGXS):\n r\"\"\"The group-to-group scattering probability matrix.\n\n This class can be used for both OpenMC input generation and tally data\n post-processing to compute spatially-homogenized and energy-integrated\n multi-group cross sections for multi-group neutronics calculations. At a\n minimum, one needs to set the :attr:`ScatterProbabilityMatrix.energy_groups`\n and :attr:`ScatterProbabilityMatrix.domain` properties. Tallies for the\n appropriate reaction rates over the specified domain are generated\n automatically via the :attr:`ScatterProbabilityMatrix.tallies` property,\n which can then be appended to a :class:`openmc.Tallies` instance.\n\n For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the\n necessary data to compute multi-group cross sections from a\n :class:`openmc.StatePoint` instance. The derived multi-group cross section\n can then be obtained from the :attr:`ScatterProbabilityMatrix.xs_tally`\n property.\n\n For a spatial domain :math:`V`, incoming energy group\n :math:`[E_{g'},E_{g'-1}]`, and outgoing energy group :math:`[E_g,E_{g-1}]`,\n the group-to-group scattering probabilities are calculated as:\n\n .. math::\n\n \\begin{aligned}\n \\langle \\sigma_{s,g'\\rightarrow g} \\phi \\rangle &= \\int_{r \\in V} dr\n \\int_{4\\pi} d\\Omega' \\int_{E_{g'}}^{E_{g'-1}} dE' \\int_{4\\pi} d\\Omega\n \\int_{E_g}^{E_{g-1}} dE \\; \\sigma_{s} (r, E' \\rightarrow E, \\Omega'\n \\cdot \\Omega) \\psi(r, E', \\Omega')\\\\\n \\langle \\sigma_{s,0,g'} \\phi \\rangle &= \\int_{r \\in V} dr\n \\int_{4\\pi} d\\Omega' \\int_{E_{g'}}^{E_{g'-1}} dE' \\int_{4\\pi} d\\Omega\n \\int_{0}^{\\infty} dE \\; \\sigma_s (r, E'\n \\rightarrow E, \\Omega' \\cdot \\Omega) \\psi(r, E', \\Omega')\\\\\n P_{s,g'\\rightarrow g} &= \\frac{\\langle\n \\sigma_{s,g'\\rightarrow g} \\phi \\rangle}{\\langle\n \\sigma_{s,g'} \\phi \\rangle}\n \\end{aligned}\n\n Parameters\n ----------\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n The domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n The domain type for spatial homogenization\n groups : openmc.mgxs.EnergyGroups\n The energy group structure for energy condensation\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n name : str, optional\n Name of the multi-group cross section. Used as a label to identify\n tallies in OpenMC 'tallies.xml' file.\n num_polar : Integral, optional\n Number of equi-width polar angle bins for angle discretization;\n defaults to one bin\n num_azimuthal : Integral, optional\n Number of equi-width azimuthal angle bins for angle discretization;\n defaults to one bin\n\n Attributes\n ----------\n name : str, optional\n Name of the multi-group cross section\n rxn_type : str\n Reaction type (e.g., 'total', 'nu-fission', etc.)\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n Domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n Domain type for spatial homogenization\n energy_groups : openmc.mgxs.EnergyGroups\n Energy group structure for energy condensation\n num_polar : Integral\n Number of equi-width polar angle bins for angle discretization\n num_azimuthal : Integral\n Number of equi-width azimuthal angle bins for angle discretization\n tally_trigger : openmc.Trigger\n An (optional) tally precision trigger given to each tally used to\n compute the cross section\n scores : list of str\n The scores in each tally used to compute the multi-group cross section\n filters : list of openmc.Filter\n The filters in each tally used to compute the multi-group cross section\n tally_keys : list of str\n The keys into the tallies dictionary for each tally used to compute\n the multi-group cross section\n estimator : 'analog'\n The tally estimator used to compute the multi-group cross section\n tallies : collections.OrderedDict\n OpenMC tallies needed to compute the multi-group cross section. The keys\n are strings listed in the :attr:`ScatterProbabilityMatrix.tally_keys`\n property and values are instances of :class:`openmc.Tally`.\n rxn_rate_tally : openmc.Tally\n Derived tally for the reaction rate tally used in the numerator to\n compute the multi-group cross section. This attribute is None\n unless the multi-group cross section has been computed.\n xs_tally : openmc.Tally\n Derived tally for the multi-group cross section. This attribute\n is None unless the multi-group cross section has been computed.\n num_subdomains : int\n The number of subdomains is unity for 'material', 'cell' and 'universe'\n domain types. This is equal to the number of cell instances\n for 'distribcell' domain types (it is equal to unity prior to loading\n tally data from a statepoint file).\n num_nuclides : int\n The number of nuclides for which the multi-group cross section is\n being tracked. This is unity if the by_nuclide attribute is False.\n nuclides : Iterable of str or 'sum'\n The optional user-specified nuclides for which to compute cross\n sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides\n are not specified by the user, all nuclides in the spatial domain\n are included. This attribute is 'sum' if by_nuclide is false.\n sparse : bool\n Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format\n for compressed data storage\n loaded_sp : bool\n Whether or not a statepoint file has been loaded with tally data\n derived : bool\n Whether or not the MGXS is merged from one or more other MGXS\n hdf5_key : str\n The key used to index multi-group cross sections in an HDF5 data store\n\n \"\"\"\n\n # Store whether or not the number density should be removed for microscopic\n # values of this data; since this probability matrix is always normalized\n # to 1.0, this density division is not necessary\n _divide_by_density = False\n\n def __init__(self, domain=None, domain_type=None, groups=None,\n by_nuclide=False, name='', num_polar=1, num_azimuthal=1):\n super().__init__(domain, domain_type, groups, by_nuclide,\n name, num_polar, num_azimuthal)\n self._rxn_type = 'scatter'\n self._hdf5_key = 'scatter probability matrix'\n self._estimator = 'analog'\n self._valid_estimators = ['analog']\n\n @property\n def scores(self):\n return [self.rxn_type]\n\n @property\n def filters(self):\n # Create the non-domain specific Filters for the Tallies\n group_edges = self.energy_groups.group_edges\n energy = openmc.EnergyFilter(group_edges)\n energyout = openmc.EnergyoutFilter(group_edges)\n filters = [[energy, energyout]]\n return self._add_angle_filters(filters)\n\n @property\n def rxn_rate_tally(self):\n if self._rxn_rate_tally is None:\n self._rxn_rate_tally = self.tallies[self.rxn_type]\n self._rxn_rate_tally.sparse = self.sparse\n return self._rxn_rate_tally\n\n @property\n def xs_tally(self):\n\n if self._xs_tally is None:\n norm = self.rxn_rate_tally.get_slice(scores=[self.rxn_type])\n norm = norm.summation(\n filter_type=openmc.EnergyoutFilter, remove_filter=True)\n\n # Compute the group-to-group probabilities\n self._xs_tally = self.tallies[self.rxn_type] / norm\n super()._compute_xs()\n\n return self._xs_tally\n\n\nclass NuFissionMatrixXS(MatrixMGXS):\n r\"\"\"A fission production matrix multi-group cross section.\n\n This class can be used for both OpenMC input generation and tally data\n post-processing to compute spatially-homogenized and energy-integrated\n multi-group cross sections for multi-group neutronics calculations. At a\n minimum, one needs to set the :attr:`NuFissionMatrixXS.energy_groups` and\n :attr:`NuFissionMatrixXS.domain` properties. Tallies for the flux and\n appropriate reaction rates over the specified domain are generated\n automatically via the :attr:`NuFissionMatrixXS.tallies` property, which can\n then be appended to a :class:`openmc.Tallies` instance.\n\n For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the\n necessary data to compute multi-group cross sections from a\n :class:`openmc.StatePoint` instance. The derived multi-group cross section\n can then be obtained from the :attr:`NuFissionMatrixXS.xs_tally` property.\n\n For a spatial domain :math:`V`, incoming energy group\n :math:`[E_{g'},E_{g'-1}]`, and outgoing energy group :math:`[E_g,E_{g-1}]`,\n the fission production is calculated as:\n\n .. math::\n\n \\begin{aligned}\n \\langle \\nu\\sigma_{f,g'\\rightarrow g} \\phi \\rangle &= \\int_{r \\in V} dr\n \\int_{4\\pi} d\\Omega' \\int_{E_{g'}}^{E_{g'-1}} dE' \\int_{E_g}^{E_{g-1}} dE\n \\; \\chi(E) \\nu\\sigma_f (r, E') \\psi(r, E', \\Omega')\\\\\n \\langle \\phi \\rangle &= \\int_{r \\in V} dr \\int_{4\\pi} d\\Omega\n \\int_{E_g}^{E_{g-1}} dE \\; \\psi (r, E, \\Omega) \\\\\n \\nu\\sigma_{f,g'\\rightarrow g} &= \\frac{\\langle \\nu\\sigma_{f,g'\\rightarrow\n g} \\phi \\rangle}{\\langle \\phi \\rangle}\n \\end{aligned}\n\n Parameters\n ----------\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n The domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n The domain type for spatial homogenization\n groups : openmc.mgxs.EnergyGroups\n The energy group structure for energy condensation\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n name : str, optional\n Name of the multi-group cross section. Used as a label to identify\n tallies in OpenMC 'tallies.xml' file.\n num_polar : Integral, optional\n Number of equi-width polar angle bins for angle discretization;\n defaults to one bin\n num_azimuthal : Integral, optional\n Number of equi-width azimuthal angle bins for angle discretization;\n defaults to one bin\n prompt : bool\n If true, computes cross sections which only includes prompt neutrons;\n defaults to False which includes prompt and delayed in total\n\n Attributes\n ----------\n name : str, optional\n Name of the multi-group cross section\n rxn_type : str\n Reaction type (e.g., 'total', 'nu-fission', etc.)\n prompt : bool\n If true, computes cross sections which only includes prompt neutrons\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n Domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n Domain type for spatial homogenization\n energy_groups : openmc.mgxs.EnergyGroups\n Energy group structure for energy condensation\n num_polar : Integral\n Number of equi-width polar angle bins for angle discretization\n num_azimuthal : Integral\n Number of equi-width azimuthal angle bins for angle discretization\n tally_trigger : openmc.Trigger\n An (optional) tally precision trigger given to each tally used to\n compute the cross section\n scores : list of str\n The scores in each tally used to compute the multi-group cross section\n filters : list of openmc.Filter\n The filters in each tally used to compute the multi-group cross section\n tally_keys : list of str\n The keys into the tallies dictionary for each tally used to compute\n the multi-group cross section\n estimator : 'analog'\n The tally estimator used to compute the multi-group cross section\n tallies : collections.OrderedDict\n OpenMC tallies needed to compute the multi-group cross section. The keys\n are strings listed in the :attr:`NuFissionMatrixXS.tally_keys`\n property and values are instances of :class:`openmc.Tally`.\n rxn_rate_tally : openmc.Tally\n Derived tally for the reaction rate tally used in the numerator to\n compute the multi-group cross section. This attribute is None\n unless the multi-group cross section has been computed.\n xs_tally : openmc.Tally\n Derived tally for the multi-group cross section. This attribute\n is None unless the multi-group cross section has been computed.\n num_subdomains : int\n The number of subdomains is unity for 'material', 'cell' and 'universe'\n domain types. This is equal to the number of cell instances\n for 'distribcell' domain types (it is equal to unity prior to loading\n tally data from a statepoint file).\n num_nuclides : int\n The number of nuclides for which the multi-group cross section is\n being tracked. This is unity if the by_nuclide attribute is False.\n nuclides : Iterable of str or 'sum'\n The optional user-specified nuclides for which to compute cross\n sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides\n are not specified by the user, all nuclides in the spatial domain\n are included. This attribute is 'sum' if by_nuclide is false.\n sparse : bool\n Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format\n for compressed data storage\n loaded_sp : bool\n Whether or not a statepoint file has been loaded with tally data\n derived : bool\n Whether or not the MGXS is merged from one or more other MGXS\n hdf5_key : str\n The key used to index multi-group cross sections in an HDF5 data store\n\n \"\"\"\n\n def __init__(self, domain=None, domain_type=None, groups=None,\n by_nuclide=False, name='', num_polar=1,\n num_azimuthal=1, prompt=False):\n super().__init__(domain, domain_type, groups, by_nuclide, name,\n num_polar, num_azimuthal)\n if not prompt:\n self._rxn_type = 'nu-fission'\n self._hdf5_key = 'nu-fission matrix'\n else:\n self._rxn_type = 'prompt-nu-fission'\n self._hdf5_key = 'prompt-nu-fission matrix'\n self._estimator = 'analog'\n self._valid_estimators = ['analog']\n self.prompt = prompt\n\n @property\n def prompt(self):\n return self._prompt\n\n @prompt.setter\n def prompt(self, prompt):\n cv.check_type('prompt', prompt, bool)\n self._prompt = prompt\n\n def __deepcopy__(self, memo):\n clone = super().__deepcopy__(memo)\n clone._prompt = self.prompt\n return clone\n\n\nclass Chi(MGXS):\n r\"\"\"The fission spectrum.\n\n This class can be used for both OpenMC input generation and tally data\n post-processing to compute spatially-homogenized and energy-integrated\n multi-group cross sections for multi-group neutronics calculations. At a\n minimum, one needs to set the :attr:`Chi.energy_groups` and\n :attr:`Chi.domain` properties. Tallies for the flux and appropriate reaction\n rates over the specified domain are generated automatically via the\n :attr:`Chi.tallies` property, which can then be appended to a\n :class:`openmc.Tallies` instance.\n\n For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the\n necessary data to compute multi-group cross sections from a\n :class:`openmc.StatePoint` instance. The derived multi-group cross section\n can then be obtained from the :attr:`Chi.xs_tally` property.\n\n For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the\n fission spectrum is calculated as:\n\n .. math::\n\n \\begin{aligned}\n \\langle \\nu\\sigma_{f,g' \\rightarrow g} \\phi \\rangle &= \\int_{r \\in V} dr\n \\int_{4\\pi} d\\Omega' \\int_0^\\infty dE' \\int_{E_g}^{E_{g-1}} dE \\; \\chi(E)\n \\nu\\sigma_f (r, E') \\psi(r, E', \\Omega')\\\\\n \\langle \\nu\\sigma_f \\phi \\rangle &= \\int_{r \\in V} dr \\int_{4\\pi}\n d\\Omega' \\int_0^\\infty dE' \\int_0^\\infty dE \\; \\chi(E) \\nu\\sigma_f (r,\n E') \\psi(r, E', \\Omega') \\\\\n \\chi_g &= \\frac{\\langle \\nu\\sigma_{f,g' \\rightarrow g} \\phi \\rangle}\n {\\langle \\nu\\sigma_f \\phi \\rangle}\n \\end{aligned}\n\n This class can also be used to gather a prompt-chi (which only includes the\n outgoing energy spectrum of prompt neutrons). This is accomplished by\n setting the :attr:`Chi.prompt` attribute to `True`.\n\n Parameters\n ----------\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n The domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n The domain type for spatial homogenization\n groups : openmc.mgxs.EnergyGroups\n The energy group structure for energy condensation\n prompt : bool\n If true, computes cross sections which only includes prompt neutrons;\n defaults to False which includes prompt and delayed in total\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n name : str, optional\n Name of the multi-group cross section. Used as a label to identify\n tallies in OpenMC 'tallies.xml' file.\n num_polar : Integral, optional\n Number of equi-width polar angle bins for angle discretization;\n defaults to one bin\n num_azimuthal : Integral, optional\n Number of equi-width azimuthal angle bins for angle discretization;\n defaults to one bin\n\n Attributes\n ----------\n name : str, optional\n Name of the multi-group cross section\n rxn_type : str\n Reaction type (e.g., 'total', 'nu-fission', etc.)\n prompt : bool\n If true, computes cross sections which only includes prompt neutrons\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n Domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n Domain type for spatial homogenization\n energy_groups : openmc.mgxs.EnergyGroups\n Energy group structure for energy condensation\n num_polar : Integral\n Number of equi-width polar angle bins for angle discretization\n num_azimuthal : Integral\n Number of equi-width azimuthal angle bins for angle discretization\n tally_trigger : openmc.Trigger\n An (optional) tally precision trigger given to each tally used to\n compute the cross section\n scores : list of str\n The scores in each tally used to compute the multi-group cross section\n filters : list of openmc.Filter\n The filters in each tally used to compute the multi-group cross section\n tally_keys : list of str\n The keys into the tallies dictionary for each tally used to compute\n the multi-group cross section\n estimator : 'analog'\n The tally estimator used to compute the multi-group cross section\n tallies : collections.OrderedDict\n OpenMC tallies needed to compute the multi-group cross section. The keys\n are strings listed in the :attr:`Chi.tally_keys` property and values are\n instances of :class:`openmc.Tally`.\n rxn_rate_tally : openmc.Tally\n Derived tally for the reaction rate tally used in the numerator to\n compute the multi-group cross section. This attribute is None\n unless the multi-group cross section has been computed.\n xs_tally : openmc.Tally\n Derived tally for the multi-group cross section. This attribute\n is None unless the multi-group cross section has been computed.\n num_subdomains : int\n The number of subdomains is unity for 'material', 'cell' and 'universe'\n domain types. This is equal to the number of cell instances\n for 'distribcell' domain types (it is equal to unity prior to loading\n tally data from a statepoint file).\n num_nuclides : int\n The number of nuclides for which the multi-group cross section is\n being tracked. This is unity if the by_nuclide attribute is False.\n nuclides : Iterable of str or 'sum'\n The optional user-specified nuclides for which to compute cross\n sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides\n are not specified by the user, all nuclides in the spatial domain\n are included. This attribute is 'sum' if by_nuclide is false.\n sparse : bool\n Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format\n for compressed data storage\n loaded_sp : bool\n Whether or not a statepoint file has been loaded with tally data\n derived : bool\n Whether or not the MGXS is merged from one or more other MGXS\n hdf5_key : str\n The key used to index multi-group cross sections in an HDF5 data store\n\n \"\"\"\n\n # Store whether or not the number density should be removed for microscopic\n # values of this data; since this chi data is normalized to 1.0, the\n # data should not be divided by the number density\n _divide_by_density = False\n\n def __init__(self, domain=None, domain_type=None, groups=None,\n prompt=False, by_nuclide=False, name='', num_polar=1,\n num_azimuthal=1):\n super().__init__(domain, domain_type, groups, by_nuclide, name,\n num_polar, num_azimuthal)\n if not prompt:\n self._rxn_type = 'chi'\n else:\n self._rxn_type = 'chi-prompt'\n self._estimator = 'analog'\n self._valid_estimators = ['analog']\n self.prompt = prompt\n\n def __deepcopy__(self, memo):\n clone = super().__deepcopy__(memo)\n clone._prompt = self.prompt\n return clone\n\n @property\n def prompt(self):\n return self._prompt\n\n @property\n def _dont_squeeze(self):\n \"\"\"Create a tuple of axes which should not be removed during the get_xs\n process\n \"\"\"\n if self.num_polar > 1 or self.num_azimuthal > 1:\n return (0, 1, 3)\n else:\n return (1,)\n\n @property\n def scores(self):\n if not self.prompt:\n return ['nu-fission', 'nu-fission']\n else:\n return ['prompt-nu-fission', 'prompt-nu-fission']\n\n @property\n def filters(self):\n # Create the non-domain specific Filters for the Tallies\n group_edges = self.energy_groups.group_edges\n energyout = openmc.EnergyoutFilter(group_edges)\n energyin = openmc.EnergyFilter([group_edges[0], group_edges[-1]])\n filters = [[energyin], [energyout]]\n\n return self._add_angle_filters(filters)\n\n @property\n def tally_keys(self):\n return ['nu-fission-in', 'nu-fission-out']\n\n @property\n def rxn_rate_tally(self):\n if self._rxn_rate_tally is None:\n self._rxn_rate_tally = self.tallies['nu-fission-out']\n self._rxn_rate_tally.sparse = self.sparse\n return self._rxn_rate_tally\n\n @property\n def xs_tally(self):\n\n if self._xs_tally is None:\n nu_fission_in = self.tallies['nu-fission-in']\n\n # Remove coarse energy filter to keep it out of tally arithmetic\n energy_filter = nu_fission_in.find_filter(openmc.EnergyFilter)\n nu_fission_in.remove_filter(energy_filter)\n\n # Compute chi\n self._xs_tally = self.rxn_rate_tally / nu_fission_in\n\n # Add the coarse energy filter back to the nu-fission tally\n nu_fission_in.filters.append(energy_filter)\n\n return self._xs_tally\n\n @prompt.setter\n def prompt(self, prompt):\n cv.check_type('prompt', prompt, bool)\n self._prompt = prompt\n if not self.prompt:\n self._rxn_type = 'nu-fission'\n self._hdf5_key = 'chi'\n else:\n self._rxn_type = 'prompt-nu-fission'\n self._hdf5_key = 'chi-prompt'\n\n def get_homogenized_mgxs(self, other_mgxs):\n \"\"\"Construct a homogenized mgxs with other MGXS objects.\n\n Parameters\n ----------\n other_mgxs : openmc.mgxs.MGXS or Iterable of openmc.mgxs.MGXS\n The MGXS to homogenize with this one.\n\n Returns\n -------\n openmc.mgxs.MGXS\n A new homogenized MGXS\n\n Raises\n ------\n ValueError\n If the other_mgxs is of a different type.\n\n \"\"\"\n\n return self._get_homogenized_mgxs(other_mgxs, 'nu-fission-in')\n\n def get_slice(self, nuclides=[], groups=[]):\n \"\"\"Build a sliced Chi for the specified nuclides and energy groups.\n\n This method constructs a new MGXS to encapsulate a subset of the data\n represented by this MGXS. The subset of data to include in the tally\n slice is determined by the nuclides and energy groups specified in\n the input parameters.\n\n Parameters\n ----------\n nuclides : list of str\n A list of nuclide name strings\n (e.g., ['U235', 'U238']; default is [])\n groups : list of Integral\n A list of energy group indices starting at 1 for the high energies\n (e.g., [1, 2, 3]; default is [])\n\n Returns\n -------\n openmc.mgxs.MGXS\n A new MGXS which encapsulates the subset of data requested\n for the nuclide(s) and/or energy group(s) requested in the\n parameters.\n\n \"\"\"\n\n # Temporarily remove energy filter from nu-fission-in since its\n # group structure will work in super MGXS.get_slice(...) method\n nu_fission_in = self.tallies['nu-fission-in']\n energy_filter = nu_fission_in.find_filter(openmc.EnergyFilter)\n nu_fission_in.remove_filter(energy_filter)\n\n # Call super class method and null out derived tallies\n slice_xs = super().get_slice(nuclides, groups)\n slice_xs._rxn_rate_tally = None\n slice_xs._xs_tally = None\n\n # Slice energy groups if needed\n if len(groups) != 0:\n filter_bins = []\n for group in groups:\n group_bounds = self.energy_groups.get_group_bounds(group)\n filter_bins.append(group_bounds)\n filter_bins = [tuple(filter_bins)]\n\n # Slice nu-fission-out tally along energyout filter\n nu_fission_out = slice_xs.tallies['nu-fission-out']\n tally_slice = nu_fission_out.get_slice(\n filters=[openmc.EnergyoutFilter], filter_bins=filter_bins)\n slice_xs._tallies['nu-fission-out'] = tally_slice\n\n # Add energy filter back to nu-fission-in tallies\n self.tallies['nu-fission-in'].add_filter(energy_filter)\n slice_xs._tallies['nu-fission-in'].add_filter(energy_filter)\n\n slice_xs.sparse = self.sparse\n return slice_xs\n\n def merge(self, other):\n \"\"\"Merge another Chi with this one\n\n If results have been loaded from a statepoint, then Chi are only\n mergeable along one and only one of energy groups or nuclides.\n\n Parameters\n ----------\n other : openmc.mgxs.MGXS\n MGXS to merge with this one\n\n Returns\n -------\n merged_mgxs : openmc.mgxs.MGXS\n Merged MGXS\n \"\"\"\n\n if not self.can_merge(other):\n raise ValueError('Unable to merge a Chi MGXS')\n\n # Create deep copy of tally to return as merged tally\n merged_mgxs = copy.deepcopy(self)\n merged_mgxs._derived = True\n merged_mgxs._rxn_rate_tally = None\n merged_mgxs._xs_tally = None\n\n # Merge energy groups\n if self.energy_groups != other.energy_groups:\n merged_groups = self.energy_groups.merge(other.energy_groups)\n merged_mgxs.energy_groups = merged_groups\n\n # Merge nuclides\n if self.nuclides != other.nuclides:\n\n # The nuclides must be mutually exclusive\n for nuclide in self.nuclides:\n if nuclide in other.nuclides:\n msg = 'Unable to merge a Chi MGXS with shared nuclides'\n raise ValueError(msg)\n\n # Concatenate lists of nuclides for the merged MGXS\n merged_mgxs.nuclides = self.nuclides + other.nuclides\n\n # Merge tallies\n for tally_key in self.tallies:\n merged_tally = self.tallies[tally_key].merge(other.tallies[tally_key])\n merged_mgxs.tallies[tally_key] = merged_tally\n\n return merged_mgxs\n\n def get_xs(self, groups='all', subdomains='all', nuclides='all',\n xs_type='macro', order_groups='increasing',\n value='mean', squeeze=True, **kwargs):\n \"\"\"Returns an array of the fission spectrum.\n\n This method constructs a 3D NumPy array for the requested\n multi-group cross section data for one or more subdomains\n (1st dimension), energy groups (2nd dimension), and nuclides\n (3rd dimension).\n\n Parameters\n ----------\n groups : Iterable of Integral or 'all'\n Energy groups of interest. Defaults to 'all'.\n subdomains : Iterable of Integral or 'all'\n Subdomain IDs of interest. Defaults to 'all'.\n nuclides : Iterable of str or 'all' or 'sum'\n A list of nuclide name strings (e.g., ['U235', 'U238']). The\n special string 'all' will return the cross sections for all nuclides\n in the spatial domain. The special string 'sum' will return the\n cross section summed over all nuclides. Defaults to 'all'.\n xs_type: {'macro', 'micro'}\n This parameter is not relevant for chi but is included here to\n mirror the parent MGXS.get_xs(...) class method\n order_groups: {'increasing', 'decreasing'}\n Return the cross section indexed according to increasing or\n decreasing energy groups (decreasing or increasing energies).\n Defaults to 'increasing'.\n value : {'mean', 'std_dev', 'rel_err'}\n A string for the type of value to return. Defaults to 'mean'.\n squeeze : bool\n A boolean representing whether to eliminate the extra dimensions\n of the multi-dimensional array to be returned. Defaults to True.\n\n Returns\n -------\n numpy.ndarray\n A NumPy array of the multi-group cross section indexed in the order\n each group, subdomain and nuclide is listed in the parameters.\n\n Raises\n ------\n ValueError\n When this method is called before the multi-group cross section is\n computed from tally data.\n\n \"\"\"\n\n cv.check_value('value', value, ['mean', 'std_dev', 'rel_err'])\n cv.check_value('xs_type', xs_type, ['macro', 'micro'])\n\n # FIXME: Unable to get microscopic xs for mesh domain because the mesh\n # cells do not know the nuclide densities in each mesh cell.\n if self.domain_type == 'mesh' and xs_type == 'micro':\n msg = 'Unable to get micro xs for mesh domain since the mesh ' \\\n 'cells do not know the nuclide densities in each mesh cell.'\n raise ValueError(msg)\n\n filters = []\n filter_bins = []\n\n # Construct a collection of the domain filter bins\n if not isinstance(subdomains, str):\n cv.check_iterable_type('subdomains', subdomains, Integral,\n max_depth=3)\n filters.append(_DOMAIN_TO_FILTER[self.domain_type])\n subdomain_bins = []\n for subdomain in subdomains:\n subdomain_bins.append(subdomain)\n filter_bins.append(tuple(subdomain_bins))\n\n # Construct list of energy group bounds tuples for all requested groups\n if not isinstance(groups, str):\n cv.check_iterable_type('groups', groups, Integral)\n filters.append(openmc.EnergyoutFilter)\n energy_bins = []\n for group in groups:\n energy_bins.append(\n (self.energy_groups.get_group_bounds(group),))\n filter_bins.append(tuple(energy_bins))\n\n # If chi was computed for each nuclide in the domain\n if self.by_nuclide:\n\n # Get the sum as the fission source weighted average chi for all\n # nuclides in the domain\n if nuclides == 'sum' or nuclides == ['sum']:\n\n # Retrieve the fission production tallies\n nu_fission_in = self.tallies['nu-fission-in']\n nu_fission_out = self.tallies['nu-fission-out']\n\n # Sum out all nuclides\n nuclides = self.get_nuclides()\n nu_fission_in = nu_fission_in.summation(nuclides=nuclides)\n nu_fission_out = nu_fission_out.summation(nuclides=nuclides)\n\n # Remove coarse energy filter to keep it out of tally arithmetic\n energy_filter = nu_fission_in.find_filter(openmc.EnergyFilter)\n nu_fission_in.remove_filter(energy_filter)\n\n # Compute chi and store it as the xs_tally attribute so we can\n # use the generic get_xs(...) method\n xs_tally = nu_fission_out / nu_fission_in\n\n # Add the coarse energy filter back to the nu-fission tally\n nu_fission_in.filters.append(energy_filter)\n\n xs = xs_tally.get_values(filters=filters,\n filter_bins=filter_bins, value=value)\n\n # Get chi for all nuclides in the domain\n elif nuclides == 'all':\n nuclides = self.get_nuclides()\n xs = self.xs_tally.get_values(filters=filters,\n filter_bins=filter_bins,\n nuclides=nuclides, value=value)\n\n # Get chi for user-specified nuclides in the domain\n else:\n cv.check_iterable_type('nuclides', nuclides, str)\n xs = self.xs_tally.get_values(filters=filters,\n filter_bins=filter_bins,\n nuclides=nuclides, value=value)\n\n # If chi was computed as an average of nuclides in the domain\n else:\n xs = self.xs_tally.get_values(filters=filters,\n filter_bins=filter_bins, value=value)\n\n # Eliminate the trivial score dimension\n xs = np.squeeze(xs, axis=len(xs.shape) - 1)\n xs = np.nan_to_num(xs)\n\n if groups == 'all':\n num_groups = self.num_groups\n else:\n num_groups = len(groups)\n\n # Reshape tally data array with separate axes for domain and energy\n # Accomodate the polar and azimuthal bins if needed\n num_subdomains = int(xs.shape[0] / (num_groups * self.num_polar *\n self.num_azimuthal))\n if self.num_polar > 1 or self.num_azimuthal > 1:\n new_shape = (self.num_polar, self.num_azimuthal, num_subdomains,\n num_groups) + xs.shape[1:]\n else:\n new_shape = (num_subdomains, num_groups) + xs.shape[1:]\n xs = np.reshape(xs, new_shape)\n\n # Reverse data if user requested increasing energy groups since\n # tally data is stored in order of increasing energies\n if order_groups == 'increasing':\n xs = xs[..., ::-1, :]\n\n if squeeze:\n # We want to squeeze out everything but the polar, azimuthal,\n # and energy group data.\n xs = self._squeeze_xs(xs)\n\n return xs\n\n def get_units(self, xs_type='macro'):\n \"\"\"Returns the units of Chi.\n\n This method returns the units of Chi, which is \"%\" for both macro\n and micro xs types.\n\n Parameters\n ----------\n xs_type: {'macro', 'micro'}\n Return the macro or micro cross section units.\n Defaults to 'macro'.\n\n Returns\n -------\n str\n A string representing the units of Chi.\n\n \"\"\"\n\n cv.check_value('xs_type', xs_type, ['macro', 'micro'])\n\n # Chi has the same units (%) for both macro and micro\n return '%'\n\n\nclass InverseVelocity(MGXS):\n r\"\"\"An inverse velocity multi-group cross section.\n\n This class can be used for both OpenMC input generation and tally data\n post-processing to compute spatially-homogenized and energy-integrated\n multi-group neutron inverse velocities for multi-group neutronics\n calculations. The units of inverse velocity are seconds per centimeter. At a\n minimum, one needs to set the :attr:`InverseVelocity.energy_groups` and\n :attr:`InverseVelocity.domain` properties. Tallies for the flux and\n appropriate reaction rates over the specified domain are generated\n automatically via the :attr:`InverseVelocity.tallies` property, which can\n then be appended to a :class:`openmc.Tallies` instance.\n\n For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the\n necessary data to compute multi-group cross sections from a\n :class:`openmc.StatePoint` instance. The derived multi-group cross section\n can then be obtained from the :attr:`InverseVelocity.xs_tally` property.\n\n For a spatial domain :math:`V` and energy group :math:`[E_g,E_{g-1}]`, the\n neutron inverse velocities are calculated by tallying the flux-weighted\n inverse velocity and the flux. The inverse velocity is then the\n flux-weighted inverse velocity divided by the flux:\n\n .. math::\n\n \\frac{\\int_{r \\in V} dr \\int_{4\\pi} d\\Omega \\int_{E_g}^{E_{g-1}} dE \\;\n \\frac{\\psi (r, E, \\Omega)}{v (r, E)}}{\\int_{r \\in V} dr \\int_{4\\pi}\n d\\Omega \\int_{E_g}^{E_{g-1}} dE \\; \\psi (r, E, \\Omega)}\n\n Parameters\n ----------\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n The domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n The domain type for spatial homogenization\n groups : openmc.mgxs.EnergyGroups\n The energy group structure for energy condensation\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n name : str, optional\n Name of the multi-group cross section. Used as a label to identify\n tallies in OpenMC 'tallies.xml' file.\n num_polar : Integral, optional\n Number of equi-width polar angle bins for angle discretization;\n defaults to one bin\n num_azimuthal : Integral, optional\n Number of equi-width azimuthal angle bins for angle discretization;\n defaults to one bin\n\n Attributes\n ----------\n name : str, optional\n Name of the multi-group cross section\n rxn_type : str\n Reaction type (e.g., 'total', 'nu-fission', etc.)\n by_nuclide : bool\n If true, computes cross sections for each nuclide in domain\n domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.RegularMesh\n Domain for spatial homogenization\n domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}\n Domain type for spatial homogenization\n energy_groups : openmc.mgxs.EnergyGroups\n Energy group structure for energy condensation\n num_polar : Integral\n Number of equi-width polar angle bins for angle discretization\n num_azimuthal : Integral\n Number of equi-width azimuthal angle bins for angle discretization\n tally_trigger : openmc.Trigger\n An (optional) tally precision trigger given to each tally used to\n compute the cross section\n scores : list of str\n The scores in each tally used to compute the multi-group cross section\n filters : list of openmc.Filter\n The filters in each tally used to compute the multi-group cross section\n tally_keys : list of str\n The keys into the tallies dictionary for each tally used to compute\n the multi-group cross section\n estimator : {'tracklength', 'collision', 'analog'}\n The tally estimator used to compute the multi-group cross section\n tallies : collections.OrderedDict\n OpenMC tallies needed to compute the multi-group cross section. The keys\n are strings listed in the :attr:`InverseVelocity.tally_keys` property\n and values are instances of :class:`openmc.Tally`.\n rxn_rate_tally : openmc.Tally\n Derived tally for the reaction rate tally used in the numerator to\n compute the multi-group cross section. This attribute is None\n unless the multi-group cross section has been computed.\n xs_tally : openmc.Tally\n Derived tally for the multi-group cross section. This attribute\n is None unless the multi-group cross section has been computed.\n num_subdomains : int\n The number of subdomains is unity for 'material', 'cell' and 'universe'\n domain types. This is equal to the number of cell instances\n for 'distribcell' domain types (it is equal to unity prior to loading\n tally data from a statepoint file) and the number of mesh cells for\n 'mesh' domain types.\n num_nuclides : int\n The number of nuclides for which the multi-group cross section is\n being tracked. This is unity if the by_nuclide attribute is False.\n nuclides : Iterable of str or 'sum'\n The optional user-specified nuclides for which to compute cross\n sections (e.g., 'U-238', 'O-16'). If by_nuclide is True but nuclides\n are not specified by the user, all nuclides in the spatial domain\n are included. This attribute is 'sum' if by_nuclide is false.\n sparse : bool\n Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format\n for compressed data storage\n loaded_sp : bool\n Whether or not a statepoint file has been loaded with tally data\n derived : bool\n Whether or not the MGXS is merged from one or more other MGXS\n hdf5_key : str\n The key used to index multi-group cross sections in an HDF5 data store\n\n \"\"\"\n\n # Store whether or not the number density should be removed for microscopic\n # values of this data; since the inverse velocity does not contain number\n # density scaling, we should not remove the number density from microscopic\n # values\n _divide_by_density = False\n\n def __init__(self, domain=None, domain_type=None, groups=None,\n by_nuclide=False, name='', num_polar=1, num_azimuthal=1):\n super().__init__(domain, domain_type, groups, by_nuclide, name,\n num_polar, num_azimuthal)\n self._rxn_type = 'inverse-velocity'\n\n def get_units(self, xs_type='macro'):\n \"\"\"Returns the units of InverseVelocity.\n\n This method returns the units of an InverseVelocity based on a desired\n xs_type.\n\n Parameters\n ----------\n xs_type: {'macro', 'micro'}\n Return the macro or micro cross section units.\n Defaults to 'macro'.\n\n Returns\n -------\n str\n A string representing the units of the InverseVelocity.\n\n \"\"\"\n\n if xs_type == 'macro':\n return 'second/cm'\n else:\n raise ValueError('Unable to return the units of InverseVelocity'\n ' for xs_type other than \"macro\"')\n\n\nclass MeshSurfaceMGXS(MGXS):\n \"\"\"An abstract multi-group cross section for some energy group structure\n on the surfaces of a mesh domain.\n\n This class can be used for both OpenMC input generation and tally data\n post-processing to compute surface- and energy-integrated multi-group cross\n sections for multi-group neutronics calculations.\n\n .. note:: Users should instantiate the subclasses of this abstract class.\n\n .. versionadded:: 0.12.1\n\n Parameters\n ----------\n domain : openmc.RegularMesh\n The domain for spatial homogenization\n domain_type : {'mesh'}\n The domain type for spatial homogenization\n energy_groups : openmc.mgxs.EnergyGroups\n The energy group structure for energy condensation\n by_nuclide : bool\n Unused in MeshSurfaceMGXS\n name : str, optional\n Name of the multi-group cross section. Used as a label to identify\n tallies in OpenMC 'tallies.xml' file.\n\n Attributes\n ----------\n name : str, optional\n Name of the multi-group cross section\n rxn_type : str\n Reaction type (e.g., 'total', 'nu-fission', etc.)\n by_nuclide : bool\n Unused in MeshSurfaceMGXS\n domain : Mesh\n Domain for spatial homogenization\n domain_type : {'mesh'}\n Domain type for spatial homogenization\n energy_groups : openmc.mgxs.EnergyGroups\n Energy group structure for energy condensation\n tally_trigger : openmc.Trigger\n An (optional) tally precision trigger given to each tally used to\n compute the cross section\n scores : list of str\n The scores in each tally used to compute the multi-group cross section\n filters : list of openmc.Filter\n The filters in each tally used to compute the multi-group cross section\n tally_keys : list of str\n The keys into the tallies dictionary for each tally used to compute\n the multi-group cross section\n estimator : {'analog'}\n The tally estimator used to compute the multi-group cross section\n tallies : collections.OrderedDict\n OpenMC tallies needed to compute the multi-group cross section\n rxn_rate_tally : openmc.Tally\n Derived tally for the reaction rate tally used in the numerator to\n compute the multi-group cross section. This attribute is None\n unless the multi-group cross section has been computed.\n xs_tally : openmc.Tally\n Derived tally for the multi-group cross section. This attribute\n is None unless the multi-group cross section has been computed.\n num_subdomains : int\n The number of subdomains is equal to the number of mesh surfaces times\n two to account for both the incoming and outgoing current from the\n mesh cell surfaces.\n num_nuclides : int\n Unused in MeshSurfaceMGXS\n nuclides : Iterable of str or 'sum'\n Unused in MeshSurfaceMGXS\n sparse : bool\n Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format\n for compressed data storage\n loaded_sp : bool\n Whether or not a statepoint file has been loaded with tally data\n derived : bool\n Whether or not the MGXS is merged from one or more other MGXS\n hdf5_key : str\n The key used to index multi-group cross sections in an HDF5 data store\n \"\"\"\n\n def __init__(self, domain=None, domain_type=None, energy_groups=None,\n by_nuclide=False, name=''):\n super(MeshSurfaceMGXS, self).__init__(domain, domain_type, energy_groups,\n by_nuclide, name)\n self._estimator = ['analog']\n self._valid_estimators = ['analog']\n\n @property\n def scores(self):\n return [self.rxn_type]\n\n @property\n def domain(self):\n return self._domain\n\n @property\n def domain_type(self):\n return self._domain_type\n\n @domain.setter\n def domain(self, domain):\n cv.check_type('domain', domain, openmc.RegularMesh)\n self._domain = domain\n\n # Assign a domain type\n if self.domain_type is None:\n self._domain_type = 'mesh'\n\n @domain_type.setter\n def domain_type(self, domain_type):\n cv.check_value('domain type', domain_type, 'mesh')\n self._domain_type = domain_type\n\n @property\n def filters(self):\n group_edges = self.energy_groups.group_edges\n energy_filter = openmc.EnergyFilter(group_edges)\n mesh = _DOMAIN_TO_FILTER[self.domain_type](self.domain).mesh\n meshsurface_filter = openmc.MeshSurfaceFilter(mesh)\n filters = [[meshsurface_filter, energy_filter]]\n\n return self._add_angle_filters(filters)\n\n @property\n def xs_tally(self):\n if self._xs_tally is None:\n if self.tallies is None:\n msg = 'Unable to get xs_tally since tallies have ' \\\n 'not been loaded from a statepoint'\n raise ValueError(msg)\n\n self._xs_tally = self.rxn_rate_tally\n self._compute_xs()\n\n return self._xs_tally\n\n def load_from_statepoint(self, statepoint):\n \"\"\"Extracts tallies in an OpenMC StatePoint with the data needed to\n compute multi-group cross sections.\n\n This method is needed to compute cross section data from tallies\n in an OpenMC StatePoint object.\n\n .. note:: The statepoint must first be linked with a :class:`openmc.Summary`\n object.\n\n Parameters\n ----------\n statepoint : openmc.StatePoint\n An OpenMC StatePoint object with tally data\n Raises\n ------\n ValueError\n When this method is called with a statepoint that has not been\n linked with a summary object.\n \"\"\"\n\n cv.check_type('statepoint', statepoint, openmc.statepoint.StatePoint)\n\n if statepoint.summary is None:\n msg = 'Unable to load data from a statepoint which has not been ' \\\n 'linked with a summary file'\n raise ValueError(msg)\n\n filters= []\n filter_bins = []\n\n # Clear any tallies previously loaded from a statepoint\n if self.loaded_sp:\n self._tallies = None\n self._xs_tally = None\n self._rxn_rate_tally = None\n self._loaded_sp = False\n\n # Find, slice and store Tallies from StatePoint\n # The tally slicing is needed if tally merging was used\n for tally_type, tally in self.tallies.items():\n sp_tally = statepoint.get_tally(\n tally.scores, tally.filters, tally.nuclides,\n estimator=tally.estimator, exact_filters=True)\n sp_tally = sp_tally.get_slice(\n tally.scores, filters, filter_bins, tally.nuclides)\n sp_tally.sparse = self.sparse\n self.tallies[tally_type] = sp_tally\n\n self._loaded_sp = True\n\n def get_xs(self, groups='all', subdomains='all', nuclides='all',\n xs_type='macro', order_groups='increasing',\n value='mean', squeeze=True, **kwargs):\n r\"\"\"Returns an array of multi-group cross sections.\n\n This method constructs a 3D NumPy array for the requested\n multi-group cross section data for one or more subdomains\n (1st dimension), energy groups (2nd dimension), and nuclides\n (3rd dimension).\n\n Parameters\n ----------\n groups : Iterable of Integral or 'all'\n Energy groups of interest. Defaults to 'all'.\n subdomains : Iterable of Integral or 'all'\n Subdomain IDs of interest. Defaults to 'all'.\n nuclides : Iterable of str or 'all' or 'sum'\n Unused in MeshSurfaceMGXS, its value will be ignored. The nuclides\n dimension of the resultant array will always have a length of 1.\n xs_type: {'macro'}\n The 'macro'/'micro' distinction does not apply to MeshSurfaceMGXS.\n The calculation of a 'micro' xs_type is omited in this class.\n order_groups: {'increasing', 'decreasing'}\n Return the cross section indexed according to increasing or\n decreasing energy groups (decreasing or increasing energies).\n Defaults to 'increasing'.\n value : {'mean', 'std_dev', 'rel_err'}\n A string for the type of value to return. Defaults to 'mean'.\n squeeze : bool\n A boolean representing whether to eliminate the extra dimensions\n of the multi-dimensional array to be returned. Defaults to True.\n Returns\n -------\n numpy.ndarray\n A NumPy array of the multi-group cross section indexed in the order\n each group, subdomain and nuclide is listed in the parameters.\n Raises\n ------\n ValueError\n When this method is called before the multi-group cross section is\n computed from tally data.\n \"\"\"\n\n cv.check_value('value', value, ['mean', 'std_dev', 'rel_err'])\n cv.check_value('xs_type', xs_type, ['macro'])\n\n filters = []\n filter_bins = []\n\n # Construct a collection of the domain filter bins\n if not isinstance(subdomains, str):\n cv.check_iterable_type('subdomains', subdomains, Integral,\n max_depth=3)\n\n filters.append(_DOMAIN_TO_FILTER[self.domain_type])\n subdomain_bins = []\n for subdomain in subdomains:\n subdomain_bins.append(subdomain)\n filter_bins.append(tuple(subdomain_bins))\n\n xs = self.xs_tally.get_values(filters=filters,\n filter_bins=filter_bins, value=value)\n\n # Construct list of energy group bounds tuples for all requested groups\n if not isinstance(groups, str):\n cv.check_iterable_type('groups', groups, Integral)\n filters.append(openmc.EnergyFilter)\n energy_bins = []\n for group in groups:\n energy_bins.append(\n (self.energy_groups.get_group_bounds(group),))\n filter_bins.append(tuple(energy_bins))\n\n # Eliminate the trivial score dimension\n xs = np.squeeze(xs, axis=len(xs.shape) - 1)\n xs = np.nan_to_num(xs)\n\n if groups == 'all':\n num_groups = self.num_groups\n else:\n num_groups = len(groups)\n\n # Reshape tally data array with separate axes for domain and energy\n # Accomodate the polar and azimuthal bins if needed\n num_surfaces = 4 * self.domain.n_dimension\n num_subdomains = int(xs.shape[0] / (num_groups * self.num_polar *\n self.num_azimuthal * num_surfaces))\n if self.num_polar > 1 or self.num_azimuthal > 1:\n new_shape = (self.num_polar, self.num_azimuthal, num_subdomains,\n num_groups, num_surfaces)\n else:\n new_shape = (num_subdomains, num_groups, num_surfaces)\n new_shape += xs.shape[1:]\n new_xs = np.zeros(new_shape)\n for cell in range(num_subdomains):\n for g in range(num_groups):\n for s in range(num_surfaces):\n new_xs[cell,g,s] = \\\n xs[cell*num_surfaces*num_groups+s*num_groups+g]\n xs = new_xs\n\n # Reverse data if user requested increasing energy groups since\n # tally data is stored in order of increasing energies\n if order_groups == 'increasing':\n xs = xs[..., ::-1, :, :]\n\n if squeeze:\n # We want to squeeze out everything but the polar, azimuthal,\n # and energy group data.\n xs = self._squeeze_xs(xs)\n\n return xs\n\n def get_pandas_dataframe(self, groups='all', nuclides='all',\n xs_type='macro', paths=True):\n \"\"\"Build a Pandas DataFrame for the MGXS data.\n\n This method leverages :meth:`openmc.Tally.get_pandas_dataframe`, but\n renames the columns with terminology appropriate for cross section data.\n\n Parameters\n ----------\n groups : Iterable of Integral or 'all'\n Energy groups of interest. Defaults to 'all'.\n nuclides : Iterable of str or 'all' or 'sum'\n Unused in MeshSurfaceMGXS, its value will be ignored. The nuclides\n dimension of the resultant array will always have a length of 1.\n xs_type: {'macro'}\n 'micro' unused in MeshSurfaceMGXS.\n paths : bool, optional\n Construct columns for distribcell tally filters (default is True).\n The geometric information in the Summary object is embedded into\n a Multi-index column with a geometric \"path\" to each distribcell\n instance.\n\n Returns\n -------\n pandas.DataFrame\n A Pandas DataFrame for the cross section data.\n\n Raises\n ------\n ValueError\n When this method is called before the multi-group cross section is\n computed from tally data.\n \"\"\"\n\n if not isinstance(groups, str):\n cv.check_iterable_type('groups', groups, Integral)\n cv.check_value('xs_type', xs_type, ['macro'])\n\n df = self.xs_tally.get_pandas_dataframe(paths=paths)\n\n # Remove the score column since it is homogeneous and redundant\n df = df.drop('score', axis=1, level=0)\n\n # Convert azimuthal, polar, energy in and energy out bin values in to\n # bin indices\n columns = self._df_convert_columns_to_bins(df)\n\n # Select out those groups the user requested\n if not isinstance(groups, str):\n if 'group in' in df:\n df = df[df['group in'].isin(groups)]\n if 'group out' in df:\n df = df[df['group out'].isin(groups)]\n\n mesh_str = 'mesh {0}'.format(self.domain.id)\n col_key = (mesh_str, 'surf')\n surfaces = df.pop(col_key)\n df.insert(len(self.domain.dimension), col_key, surfaces)\n if len(self.domain.dimension) == 1:\n df.sort_values(by=[(mesh_str, 'x'), (mesh_str, 'surf')]\n + columns, inplace=True)\n elif len(self.domain.dimension) == 2:\n df.sort_values(by=[(mesh_str, 'x'), (mesh_str, 'y'),\n (mesh_str, 'surf')] + columns, inplace=True)\n elif len(self.domain.dimension) == 3:\n df.sort_values(by=[(mesh_str, 'x'), (mesh_str, 'y'),\n (mesh_str, 'z'), (mesh_str, 'surf')] + columns, inplace=True)\n\n return df\n\n\nclass Current(MeshSurfaceMGXS):\n r\"\"\"A current multi-group cross section.\n\n This class can be used for both OpenMC input generation and tally data\n post-processing to compute surface- and energy-integrated\n multi-group current cross sections for multi-group neutronics calculations. At\n a minimum, one needs to set the :attr:`Current.energy_groups` and\n :attr:`Current.domain` properties. Tallies for the appropriate\n reaction rates over the specified domain are generated automatically via the\n :attr:`Current.tallies` property, which can then be appended to a\n :class:`openmc.Tallies` instance.\n\n For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the\n necessary data to compute multi-group cross sections from a\n :class:`openmc.StatePoint` instance. The derived multi-group cross section\n can then be obtained from the :attr:`Current.xs_tally` property.\n For a spatial domain :math:`S` and energy group :math:`[E_g,E_{g-1}]`, the\n total cross section is calculated as:\n\n .. math::\n \\frac{\\int_{r \\in S} dS \\int_{E_g}^{E_{g-1}} dE \\;\n J(r, E)}{\\int_{r \\in S} dS \\int_{E_g}^{E_{g-1}} dE}.\n\n .. versionadded:: 0.12.1\n\n Parameters\n ----------\n domain : openmc.RegularMesh\n The domain for spatial homogenization\n domain_type : ('mesh'}\n The domain type for spatial homogenization\n groups : openmc.mgxs.EnergyGroups\n The energy group structure for energy condensation\n by_nuclide : bool\n Unused in MeshSurfaceMGXS\n name : str, optional\n Name of the multi-group cross section. Used as a label to identify\n tallies in OpenMC 'tallies.xml' file.\n\n Attributes\n ----------\n name : str, optional\n Name of the multi-group cross section\n rxn_type : str\n Reaction type (e.g., 'total', 'nu-fission', etc.)\n by_nuclide : bool\n Unused in MeshSurfaceMGXS\n domain : openmc.RegularMesh\n Domain for spatial homogenization\n domain_type : {'mesh'}\n Domain type for spatial homogenization\n energy_groups : openmc.mgxs.EnergyGroups\n Energy group structure for energy condensation\n tally_trigger : openmc.Trigger\n An (optional) tally precision trigger given to each tally used to\n compute the cross section\n scores : list of str\n The scores in each tally used to compute the multi-group cross section\n filters : list of openmc.Filter\n The filters in each tally used to compute the multi-group cross section\n tally_keys : list of str\n The keys into the tallies dictionary for each tally used to compute\n the multi-group cross section\n estimator : {'analog'}\n The tally estimator used to compute the multi-group cross section\n tallies : collections.OrderedDict\n OpenMC tallies needed to compute the multi-group cross section. The keys\n are strings listed in the :attr:`TotalXS.tally_keys` property and values\n are instances of :class:`openmc.Tally`.\n rxn_rate_tally : openmc.Tally\n Derived tally for the reaction rate tally used in the numerator to\n compute the multi-group cross section. This attribute is None\n unless the multi-group cross section has been computed.\n xs_tally : openmc.Tally\n Derived tally for the multi-group cross section. This attribute\n is None unless the multi-group cross section has been computed.\n num_subdomains : int\n The number of subdomains is equal to the number of mesh surfaces times\n two to account for both the incoming and outgoing current from the\n mesh cell surfaces.\n num_nuclides : int\n Unused in MeshSurfaceMGXS\n nuclides : Iterable of str or 'sum'\n Unused in MeshSurfaceMGXS\n sparse : bool\n Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format\n for compressed data storage\n loaded_sp : bool\n Whether or not a statepoint file has been loaded with tally data\n derived : bool\n Whether or not the MGXS is merged from one or more other MGXS\n hdf5_key : str\n The key used to index multi-group cross sections in an HDF5 data store\n \"\"\"\n\n def __init__(self, domain=None, domain_type=None,\n groups=None, by_nuclide=False, name=''):\n super(Current, self).__init__(domain, domain_type,\n groups, by_nuclide, name)\n self._rxn_type = 'current'\n",
"\"\"\"This module can be used to specify parameters used for coarse mesh finite\ndifference (CMFD) acceleration in OpenMC. CMFD was first proposed by [Smith]_\nand is widely used in accelerating neutron transport problems.\n\nReferences\n----------\n\n.. [Smith] K. Smith, \"Nodal method storage reduction by non-linear\n iteration\", *Trans. Am. Nucl. Soc.*, **44**, 265 (1983).\n\n\"\"\"\n\nfrom collections.abc import Iterable, Mapping\nfrom contextlib import contextmanager\nfrom numbers import Real, Integral\nimport sys\nimport time\nimport warnings\n\nimport h5py\nimport numpy as np\nfrom scipy import sparse\n\nimport openmc.lib\nfrom .checkvalue import (check_type, check_length, check_value,\n check_greater_than, check_less_than)\nfrom .exceptions import OpenMCError\n\n# See if mpi4py module can be imported, define have_mpi global variable\ntry:\n from mpi4py import MPI\n have_mpi = True\nexcept ImportError:\n have_mpi = False\n\n# Maximum/minimum neutron energies\n_ENERGY_MAX_NEUTRON = np.inf\n_ENERGY_MIN_NEUTRON = 0.\n\n# Tolerance for detecting zero flux values\n_TINY_BIT = 1.e-8\n\n# For non-accelerated regions on coarse mesh overlay\n_CMFD_NOACCEL = -1\n\n# Constant to represent a zero flux \"albedo\"\n_ZERO_FLUX = 999.0\n\n# Map that returns index of current direction in numpy current matrix\n_CURRENTS = {\n 'out_left': 0, 'in_left': 1, 'out_right': 2, 'in_right': 3,\n 'out_back': 4, 'in_back': 5, 'out_front': 6, 'in_front': 7,\n 'out_bottom': 8, 'in_bottom': 9, 'out_top': 10, 'in_top': 11\n}\n\n\nclass CMFDMesh:\n \"\"\"A structured Cartesian mesh used for CMFD acceleration.\n\n Attributes\n ----------\n lower_left : Iterable of float\n The lower-left corner of a regular structured mesh. If only two\n coordinates are given, it is assumed that the mesh is an x-y mesh.\n upper_right : Iterable of float\n The upper-right corner of a regular structured mesh. If only two\n coordinates are given, it is assumed that the mesh is an x-y mesh.\n dimension : Iterable of int\n The number of mesh cells in each direction for a regular structured\n mesh.\n width : Iterable of float\n The width of mesh cells in each direction for a regular structured\n mesh\n energy : Iterable of float\n Energy bins in eV, listed in ascending order (e.g. [0.0, 0.625e-1,\n 20.0e6]) for CMFD tallies and acceleration. If no energy bins are\n listed, OpenMC automatically assumes a one energy group calculation\n over the entire energy range.\n albedo : Iterable of float\n Surface ratio of incoming to outgoing partial currents on global\n boundary conditions. They are listed in the following order: -x +x -y\n +y -z +z.\n map : Iterable of int\n An optional acceleration map can be specified to overlay on the coarse\n mesh spatial grid. If this option is used, a ``0`` is used for a\n non-accelerated region and a ``1`` is used for an accelerated region.\n For a simple 4x4 coarse mesh with a 2x2 fuel lattice surrounded by\n reflector, the map is:\n\n ::\n\n [0, 0, 0, 0,\n 0, 1, 1, 0,\n 0, 1, 1, 0,\n 0, 0, 0, 0]\n\n Therefore a 2x2 system of equations is solved rather than a 4x4. This\n is extremely important to use in reflectors as neutrons will not\n contribute to any tallies far away from fission source neutron regions.\n A ``1`` must be used to identify any fission source region.\n mesh_type : str\n Type of structured mesh to use. Acceptable values are:\n * \"regular\" - Use RegularMesh to define CMFD mesh\n * \"rectilinear\" - Use RectilinearMesh to define CMFD\n grid : Iterable of Iterable of float\n Grid used to define RectilinearMesh. First dimension must have length\n 3 where grid[0], grid[1], and grid[2] correspond to the x-, y-, and\n z-grids respectively\n\n \"\"\"\n\n def __init__(self):\n self._lower_left = None\n self._upper_right = None\n self._dimension = None\n self._width = None\n self._energy = None\n self._albedo = None\n self._map = None\n self._mesh_type = 'regular'\n self._grid = None\n\n def __repr__(self):\n outstr = type(self).__name__ + '\\n'\n if self._mesh_type == 'regular':\n outstr += (self._get_repr(self._lower_left, \"Lower left\") + \"\\n\" +\n self._get_repr(self._upper_right, \"Upper right\") + \"\\n\" +\n self._get_repr(self._dimension, \"Dimension\") + \"\\n\" +\n self._get_repr(self._width, \"Width\") + \"\\n\" +\n self._get_repr(self._albedo, \"Albedo\"))\n elif self._mesh_type == 'rectilinear':\n outstr += (self._get_repr(self._grid[0], \"X-grid\") + \"\\n\" +\n self._get_repr(self._grid[1], \"Y-grid\") + \"\\n\" +\n self._get_repr(self._grid[2], \"Z-grid\"))\n return outstr\n\n def _get_repr(self, list_var, label):\n outstr = \"\\t{:<11} = \".format(label)\n if list(list_var):\n outstr += \", \".join(str(i) for i in list_var)\n return outstr\n\n @property\n def lower_left(self):\n return self._lower_left\n\n @property\n def upper_right(self):\n return self._upper_right\n\n @property\n def dimension(self):\n return self._dimension\n\n @property\n def width(self):\n return self._width\n\n @property\n def energy(self):\n return self._energy\n\n @property\n def albedo(self):\n return self._albedo\n\n @property\n def map(self):\n return self._map\n\n @property\n def mesh_type(self):\n return self._mesh_type\n\n @property\n def grid(self):\n return self._grid\n\n @lower_left.setter\n def lower_left(self, lower_left):\n check_type('CMFD mesh lower_left', lower_left, Iterable, Real)\n check_length('CMFD mesh lower_left', lower_left, 2, 3)\n self._lower_left = lower_left\n self._display_mesh_warning('regular', 'CMFD mesh lower_left')\n\n @upper_right.setter\n def upper_right(self, upper_right):\n check_type('CMFD mesh upper_right', upper_right, Iterable, Real)\n check_length('CMFD mesh upper_right', upper_right, 2, 3)\n self._upper_right = upper_right\n self._display_mesh_warning('regular', 'CMFD mesh upper_right')\n\n @dimension.setter\n def dimension(self, dimension):\n check_type('CMFD mesh dimension', dimension, Iterable, Integral)\n check_length('CMFD mesh dimension', dimension, 2, 3)\n for d in dimension:\n check_greater_than('CMFD mesh dimension', d, 0)\n self._dimension = dimension\n\n @width.setter\n def width(self, width):\n check_type('CMFD mesh width', width, Iterable, Real)\n check_length('CMFD mesh width', width, 2, 3)\n for w in width:\n check_greater_than('CMFD mesh width', w, 0)\n self._width = width\n self._display_mesh_warning('regular', 'CMFD mesh width')\n\n @energy.setter\n def energy(self, energy):\n check_type('CMFD mesh energy', energy, Iterable, Real)\n for e in energy:\n check_greater_than('CMFD mesh energy', e, 0, True)\n self._energy = energy\n\n @albedo.setter\n def albedo(self, albedo):\n check_type('CMFD mesh albedo', albedo, Iterable, Real)\n check_length('CMFD mesh albedo', albedo, 6)\n for a in albedo:\n check_greater_than('CMFD mesh albedo', a, 0, True)\n check_less_than('CMFD mesh albedo', a, 1, True)\n self._albedo = albedo\n\n @map.setter\n def map(self, mesh_map):\n check_type('CMFD mesh map', mesh_map, Iterable, Integral)\n for m in mesh_map:\n check_value('CMFD mesh map', m, [0, 1])\n self._map = mesh_map\n\n @mesh_type.setter\n def mesh_type(self, mesh_type):\n check_value('CMFD mesh type', mesh_type, ['regular', 'rectilinear'])\n self._mesh_type = mesh_type\n\n @grid.setter\n def grid(self, grid):\n grid_length = 3\n dims = ['x', 'y', 'z']\n\n check_length('CMFD mesh grid', grid, grid_length)\n for i in range(grid_length):\n check_type('CMFD mesh {}-grid'.format(dims[i]), grid[i], Iterable,\n Real)\n check_greater_than('CMFD mesh {}-grid length'.format(dims[i]),\n len(grid[i]), 1)\n self._grid = np.array(grid)\n self._display_mesh_warning('rectilinear', 'CMFD mesh grid')\n\n def _display_mesh_warning(self, mesh_type, variable_label):\n if self._mesh_type != mesh_type:\n warn_msg = (f'Setting {variable_label} if mesh type is not set to '\n f'{mesh_type} will have no effect')\n warnings.warn(warn_msg, RuntimeWarning)\n\n\nclass CMFDRun:\n r\"\"\"Class for running CMFD acceleration through the C API.\n\n Attributes\n ----------\n tally_begin : int\n Batch number at which CMFD tallies should begin accummulating\n solver_begin: int\n Batch number at which CMFD solver should start executing\n ref_d : list of floats\n List of reference diffusion coefficients to fix CMFD parameters to\n display : dict\n Dictionary indicating which CMFD results to output. Note that CMFD\n k-effective will always be outputted. Acceptable keys are:\n\n * \"balance\" - Whether to output RMS [%] of the resdiual from the\n neutron balance equation on CMFD tallies (bool)\n * \"dominance\" - Whether to output the estimated dominance ratio from\n the CMFD iterations (bool)\n * \"entropy\" - Whether to output the *entropy* of the CMFD predicted\n fission source (bool)\n * \"source\" - Whether to ouput the RMS [%] between the OpenMC fission\n source and CMFD fission source (bool)\n\n downscatter : bool\n Indicate whether an effective downscatter cross section should be used\n when using 2-group CMFD.\n feedback : bool\n Indicate whether or not the CMFD diffusion result is used to adjust the weight\n of fission source neutrons on the next OpenMC batch. Defaults to False.\n cmfd_ktol : float\n Tolerance on the eigenvalue when performing CMFD power iteration\n mesh : openmc.cmfd.CMFDMesh\n Structured mesh to be used for acceleration\n norm : float\n Normalization factor applied to the CMFD fission source distribution\n power_monitor : bool\n View convergence of power iteration during CMFD acceleration\n run_adjoint : bool\n Perform adjoint calculation on the last batch\n w_shift : float\n Optional Wielandt shift parameter for accelerating power iterations. By\n default, it is very large so there is effectively no impact.\n stol : float\n Tolerance on the fission source when performing CMFD power iteration\n reset : list of int\n List of batch numbers at which CMFD tallies should be reset\n write_matrices : bool\n Write sparse matrices that are used during CMFD acceleration (loss,\n production) and resultant normalized flux vector phi to file\n spectral : float\n Optional spectral radius that can be used to accelerate the convergence\n of Gauss-Seidel iterations during CMFD power iteration.\n gauss_seidel_tolerance : Iterable of float\n Two parameters specifying the absolute inner tolerance and the relative\n inner tolerance for Gauss-Seidel iterations when performing CMFD.\n adjoint_type : {'physical', 'math'}\n Stores type of adjoint calculation that should be performed.\n ``run_adjoint`` must be true for an adjoint calculation to be\n perfomed. Options are:\n\n * \"physical\" - Create adjoint matrices from physical parameters of\n CMFD problem\n * \"math\" - Create adjoint matrices mathematically as the transpose of\n loss and production CMFD matrices\n\n window_type : {'expanding', 'rolling', 'none'}\n Specifies type of tally window scheme to use to accumulate CMFD\n tallies. Options are:\n\n * \"expanding\" - Have an expanding window that doubles in size\n to give more weight to more recent tallies as more generations are\n simulated\n * \"rolling\" - Have a fixed window size that aggregates tallies from\n the same number of previous generations tallied\n * \"none\" - Don't use a windowing scheme so that all tallies from last\n time they were reset are used for the CMFD algorithm.\n\n window_size : int\n Size of window to use for tally window scheme. Only relevant when\n window_type is set to \"rolling\"\n indices : numpy.ndarray\n Stores spatial and group dimensions as [nx, ny, nz, ng]\n cmfd_src : numpy.ndarray\n CMFD source distribution calculated from solving CMFD equations\n entropy : list of floats\n \"Shannon entropy\" from CMFD fission source, stored for each generation\n that CMFD is invoked\n balance : list of floats\n RMS of neutron balance equations, stored for each generation that CMFD\n is invoked\n src_cmp : list of floats\n RMS deviation of OpenMC and CMFD normalized source, stored for each\n generation that CMFD is invoked\n dom : list of floats\n Dominance ratio from solving CMFD matrix equations, stored for each\n generation that CMFD is invoked\n k_cmfd : list of floats\n List of CMFD k-effectives, stored for each generation that CMFD is\n invoked\n time_cmfd : float\n Time for entire CMFD calculation, in seconds\n time_cmfdbuild : float\n Time for building CMFD matrices, in seconds\n time_cmfdsolve : float\n Time for solving CMFD matrix equations, in seconds\n use_all_threads : bool\n Whether to use all threads allocated to OpenMC for CMFD solver\n intracomm : mpi4py.MPI.Intracomm or None\n MPI intercommunicator for running MPI commands\n\n \"\"\"\n\n def __init__(self):\n \"\"\"Constructor for CMFDRun class. Default values for instance variables\n set in this method.\n\n \"\"\"\n # Variables that users can modify\n self._tally_begin = 1\n self._solver_begin = 1\n self._ref_d = np.array([])\n self._display = {'balance': False, 'dominance': False,\n 'entropy': False, 'source': False}\n self._downscatter = False\n self._feedback = False\n self._cmfd_ktol = 1.e-8\n self._mesh = None\n self._norm = 1.\n self._power_monitor = False\n self._run_adjoint = False\n self._w_shift = 1.e6\n self._stol = 1.e-8\n self._reset = []\n self._write_matrices = False\n self._spectral = 0.0\n self._gauss_seidel_tolerance = [1.e-10, 1.e-5]\n self._adjoint_type = 'physical'\n self._window_type = 'none'\n self._window_size = 10\n self._intracomm = None\n self._use_all_threads = False\n\n # External variables used during runtime but users cannot control\n self._set_reference_params = False\n self._indices = np.zeros(4, dtype=np.int32)\n self._egrid = None\n self._albedo = None\n self._coremap = None\n self._mesh_id = None\n self._tally_ids = None\n self._energy_filters = None\n self._cmfd_on = False\n self._mat_dim = _CMFD_NOACCEL\n self._keff_bal = None\n self._keff = None\n self._adj_keff = None\n self._phi = None\n self._adj_phi = None\n self._openmc_src_rate = None\n self._flux_rate = None\n self._total_rate = None\n self._p1scatt_rate = None\n self._scatt_rate = None\n self._nfiss_rate = None\n self._current_rate = None\n self._flux = None\n self._totalxs = None\n self._p1scattxs = None\n self._scattxs = None\n self._nfissxs = None\n self._diffcof = None\n self._dtilde = None\n self._dhat = None\n self._hxyz = None\n self._current = None\n self._cmfd_src = None\n self._openmc_src = None\n self._entropy = []\n self._balance = []\n self._src_cmp = []\n self._dom = []\n self._k_cmfd = []\n self._resnb = None\n self._reset_every = None\n self._time_cmfd = None\n self._time_cmfdbuild = None\n self._time_cmfdsolve = None\n\n # All index-related variables, for numpy vectorization\n self._first_x_accel = None\n self._last_x_accel = None\n self._first_y_accel = None\n self._last_y_accel = None\n self._first_z_accel = None\n self._last_z_accel = None\n self._notfirst_x_accel = None\n self._notlast_x_accel = None\n self._notfirst_y_accel = None\n self._notlast_y_accel = None\n self._notfirst_z_accel = None\n self._notlast_z_accel = None\n self._is_adj_ref_left = None\n self._is_adj_ref_right = None\n self._is_adj_ref_back = None\n self._is_adj_ref_front = None\n self._is_adj_ref_bottom = None\n self._is_adj_ref_top = None\n self._accel_idxs = None\n self._accel_neig_left_idxs = None\n self._accel_neig_right_idxs = None\n self._accel_neig_back_idxs = None\n self._accel_neig_front_idxs = None\n self._accel_neig_bot_idxs = None\n self._accel_neig_top_idxs = None\n self._loss_row = None\n self._loss_col = None\n self._prod_row = None\n self._prod_col = None\n\n @property\n def tally_begin(self):\n return self._tally_begin\n\n @property\n def solver_begin(self):\n return self._solver_begin\n\n @property\n def ref_d(self):\n return self._ref_d\n\n @property\n def display(self):\n return self._display\n\n @property\n def downscatter(self):\n return self._downscatter\n\n @property\n def feedback(self):\n return self._feedback\n\n @property\n def cmfd_ktol(self):\n return self._cmfd_ktol\n\n @property\n def mesh(self):\n return self._mesh\n\n @property\n def norm(self):\n return self._norm\n\n @property\n def adjoint_type(self):\n return self._adjoint_type\n\n @property\n def window_type(self):\n return self._window_type\n\n @property\n def window_size(self):\n return self._window_size\n\n @property\n def power_monitor(self):\n return self._power_monitor\n\n @property\n def run_adjoint(self):\n return self._run_adjoint\n\n @property\n def w_shift(self):\n return self._w_shift\n\n @property\n def stol(self):\n return self._stol\n\n @property\n def spectral(self):\n return self._spectral\n\n @property\n def reset(self):\n return self._reset\n\n @property\n def write_matrices(self):\n return self._write_matrices\n\n @property\n def gauss_seidel_tolerance(self):\n return self._gauss_seidel_tolerance\n\n @property\n def indices(self):\n return self._indices\n\n @property\n def use_all_threads(self):\n return self._use_all_threads\n\n @property\n def cmfd_src(self):\n return self._cmfd_src\n\n @property\n def dom(self):\n return self._dom\n\n @property\n def src_cmp(self):\n return self._src_cmp\n\n @property\n def balance(self):\n return self._balance\n\n @property\n def entropy(self):\n return self._entropy\n\n @property\n def k_cmfd(self):\n return self._k_cmfd\n\n @tally_begin.setter\n def tally_begin(self, begin):\n check_type('CMFD tally begin batch', begin, Integral)\n check_greater_than('CMFD tally begin batch', begin, 0)\n self._tally_begin = begin\n\n @solver_begin.setter\n def solver_begin(self, begin):\n check_type('CMFD feedback begin batch', begin, Integral)\n check_greater_than('CMFD feedback begin batch', begin, 0)\n self._solver_begin = begin\n\n @ref_d.setter\n def ref_d(self, diff_params):\n check_type('Reference diffusion params', diff_params,\n Iterable, Real)\n self._ref_d = np.array(diff_params)\n\n @display.setter\n def display(self, display):\n check_type('display', display, Mapping)\n for key, value in display.items():\n check_value('display key', key,\n ('balance', 'entropy', 'dominance', 'source'))\n check_type(\"display['{}']\".format(key), value, bool)\n self._display[key] = value\n\n @downscatter.setter\n def downscatter(self, downscatter):\n check_type('CMFD downscatter', downscatter, bool)\n self._downscatter = downscatter\n\n @feedback.setter\n def feedback(self, feedback):\n check_type('CMFD feedback', feedback, bool)\n self._feedback = feedback\n\n @cmfd_ktol.setter\n def cmfd_ktol(self, cmfd_ktol):\n check_type('CMFD eigenvalue tolerance', cmfd_ktol, Real)\n self._cmfd_ktol = cmfd_ktol\n\n @mesh.setter\n def mesh(self, cmfd_mesh):\n check_type('CMFD mesh', cmfd_mesh, CMFDMesh)\n\n if cmfd_mesh.mesh_type == 'regular':\n # Check dimension defined\n if cmfd_mesh.dimension is None:\n raise ValueError('CMFD regular mesh requires spatial '\n 'dimensions to be specified')\n\n # Check lower left defined\n if cmfd_mesh.lower_left is None:\n raise ValueError('CMFD regular mesh requires lower left '\n 'coordinates to be specified')\n\n # Check that both upper right and width both not defined\n if cmfd_mesh.upper_right is not None and cmfd_mesh.width is not None:\n raise ValueError('Both upper right coordinates and width '\n 'cannot be specified for CMFD regular mesh')\n\n # Check that at least one of width or upper right is defined\n if cmfd_mesh.upper_right is None and cmfd_mesh.width is None:\n raise ValueError('CMFD regular mesh requires either upper right '\n 'coordinates or width to be specified')\n\n # Check width and lower length are same dimension and define\n # upper_right\n if cmfd_mesh.width is not None:\n check_length('CMFD mesh width', cmfd_mesh.width,\n len(cmfd_mesh.lower_left))\n cmfd_mesh.upper_right = np.array(cmfd_mesh.lower_left) + \\\n np.array(cmfd_mesh.width) * np.array(cmfd_mesh.dimension)\n\n # Check upper_right and lower length are same dimension and define\n # width\n elif cmfd_mesh.upper_right is not None:\n check_length('CMFD mesh upper right', cmfd_mesh.upper_right,\n len(cmfd_mesh.lower_left))\n # Check upper right coordinates are greater than lower left\n if np.any(np.array(cmfd_mesh.upper_right) <=\n np.array(cmfd_mesh.lower_left)):\n raise ValueError('CMFD regular mesh requires upper right '\n 'coordinates to be greater than lower '\n 'left coordinates')\n cmfd_mesh.width = np.true_divide((np.array(cmfd_mesh.upper_right) -\n np.array(cmfd_mesh.lower_left)),\n np.array(cmfd_mesh.dimension))\n elif cmfd_mesh.mesh_type == 'rectilinear':\n # Check dimension defined\n if cmfd_mesh.grid is None:\n raise ValueError('CMFD rectilinear mesh requires spatial '\n 'grid to be specified')\n cmfd_mesh.dimension = [len(cmfd_mesh.grid[i]) - 1 for i in range(3)]\n\n self._mesh = cmfd_mesh\n\n @norm.setter\n def norm(self, norm):\n check_type('CMFD norm', norm, Real)\n self._norm = norm\n\n @adjoint_type.setter\n def adjoint_type(self, adjoint_type):\n check_type('CMFD adjoint type', adjoint_type, str)\n check_value('CMFD adjoint type', adjoint_type,\n ['math', 'physical'])\n self._adjoint_type = adjoint_type\n\n @window_type.setter\n def window_type(self, window_type):\n check_type('CMFD window type', window_type, str)\n check_value('CMFD window type', window_type,\n ['none', 'rolling', 'expanding'])\n self._window_type = window_type\n\n @window_size.setter\n def window_size(self, window_size):\n check_type('CMFD window size', window_size, Integral)\n check_greater_than('CMFD window size', window_size, 0)\n if self._window_type != 'rolling':\n warn_msg = 'Window size will have no effect on CMFD simulation ' \\\n 'unless window type is set to \"rolling\".'\n warnings.warn(warn_msg, RuntimeWarning)\n self._window_size = window_size\n\n @power_monitor.setter\n def power_monitor(self, power_monitor):\n check_type('CMFD power monitor', power_monitor, bool)\n self._power_monitor = power_monitor\n\n @run_adjoint.setter\n def run_adjoint(self, run_adjoint):\n check_type('CMFD run adjoint', run_adjoint, bool)\n self._run_adjoint = run_adjoint\n\n @w_shift.setter\n def w_shift(self, w_shift):\n check_type('CMFD Wielandt shift', w_shift, Real)\n self._w_shift = w_shift\n\n @stol.setter\n def stol(self, stol):\n check_type('CMFD fission source tolerance', stol, Real)\n self._stol = stol\n\n @spectral.setter\n def spectral(self, spectral):\n check_type('CMFD spectral radius', spectral, Real)\n self._spectral = spectral\n\n @reset.setter\n def reset(self, reset):\n check_type('tally reset batches', reset, Iterable, Integral)\n self._reset = reset\n\n @write_matrices.setter\n def write_matrices(self, write_matrices):\n check_type('CMFD write matrices', write_matrices, bool)\n self._write_matrices = write_matrices\n\n @gauss_seidel_tolerance.setter\n def gauss_seidel_tolerance(self, gauss_seidel_tolerance):\n check_type('CMFD Gauss-Seidel tolerance', gauss_seidel_tolerance,\n Iterable, Real)\n check_length('Gauss-Seidel tolerance', gauss_seidel_tolerance, 2)\n self._gauss_seidel_tolerance = gauss_seidel_tolerance\n\n @use_all_threads.setter\n def use_all_threads(self, use_all_threads):\n check_type('CMFD use all threads', use_all_threads, bool)\n self._use_all_threads = use_all_threads\n\n def run(self, **kwargs):\n \"\"\"Run OpenMC with coarse mesh finite difference acceleration\n\n This method is called by the user to run CMFD once instance variables of\n CMFDRun class are set\n\n Parameters\n ----------\n **kwargs\n All keyword arguments are passed to\n :func:`openmc.lib.run_in_memory`.\n\n \"\"\"\n with self.run_in_memory(**kwargs):\n for _ in self.iter_batches():\n pass\n\n @contextmanager\n def run_in_memory(self, **kwargs):\n \"\"\" Context manager for running CMFD functions with OpenMC shared\n library functions.\n\n This function can be used with a 'with' statement to ensure the\n CMFDRun class is properly initialized/finalized. For example::\n\n from openmc import cmfd\n cmfd_run = cmfd.CMFDRun()\n with cmfd_run.run_in_memory():\n do_stuff_before_simulation_start()\n for _ in cmfd_run.iter_batches():\n do_stuff_between_batches()\n\n Parameters\n ----------\n **kwargs\n All keyword arguments passed to :func:`openmc.lib.run_in_memory`.\n\n \"\"\"\n # Store intracomm for part of CMFD routine where MPI reduce and\n # broadcast calls are made\n if 'intracomm' in kwargs and kwargs['intracomm'] is not None:\n self._intracomm = kwargs['intracomm']\n elif have_mpi:\n self._intracomm = MPI.COMM_WORLD\n\n # Run and pass arguments to C API run_in_memory function\n with openmc.lib.run_in_memory(**kwargs):\n self.init()\n yield\n self.finalize()\n\n def iter_batches(self):\n \"\"\" Iterator over batches.\n\n This function returns a generator-iterator that allows Python code to\n be run between batches when running an OpenMC simulation with CMFD.\n It should be used in conjunction with\n :func`openmc.cmfd.CMFDRun.run_in_memory` to ensure proper\n initialization/finalization of CMFDRun instance.\n\n \"\"\"\n status = 0\n while status == 0:\n status = self.next_batch()\n yield\n\n def init(self):\n \"\"\" Initialize CMFDRun instance by setting up CMFD parameters and\n calling :func:`openmc.lib.simulation_init`\n\n \"\"\"\n # Configure CMFD parameters\n self._configure_cmfd()\n\n # Create tally objects\n self._create_cmfd_tally()\n\n if openmc.lib.master():\n # Compute and store array indices used to build cross section\n # arrays\n self._precompute_array_indices()\n\n # Compute and store row and column indices used to build CMFD\n # matrices\n self._precompute_matrix_indices()\n\n # Initialize all variables used for linear solver in C++\n self._initialize_linsolver()\n\n # Initialize simulation\n openmc.lib.simulation_init()\n\n # Set cmfd_run variable to True through C API\n openmc.lib.settings.cmfd_run = True\n\n def next_batch(self):\n \"\"\" Run next batch for CMFDRun.\n\n Returns\n -------\n int\n Status after running a batch (0=normal, 1=reached maximum number of\n batches, 2=tally triggers reached)\n\n \"\"\"\n # Initialize CMFD batch\n self._cmfd_init_batch()\n\n # Run next batch\n status = openmc.lib.next_batch()\n\n # Perform CMFD calculations\n self._execute_cmfd()\n\n # Write CMFD data to statepoint\n if openmc.lib.is_statepoint_batch():\n self.statepoint_write()\n return status\n\n def finalize(self):\n \"\"\" Finalize simulation by calling\n :func:`openmc.lib.simulation_finalize` and print out CMFD timing\n information.\n\n \"\"\"\n # Finalize simuation\n openmc.lib.simulation_finalize()\n\n if openmc.lib.master():\n # Print out CMFD timing statistics\n self._write_cmfd_timing_stats()\n\n def statepoint_write(self, filename=None):\n \"\"\"Write all simulation parameters to statepoint\n\n Parameters\n ----------\n filename : str\n Filename of statepoint\n\n \"\"\"\n if filename is None:\n batch_str_len = len(str(openmc.lib.settings.get_batches()))\n batch_str = str(openmc.lib.current_batch()).zfill(batch_str_len)\n filename = f'statepoint.{batch_str}.h5'\n\n # Call C API statepoint_write to save source distribution with CMFD\n # feedback\n openmc.lib.statepoint_write(filename=filename)\n\n # Append CMFD data to statepoint file using h5py\n self._write_cmfd_statepoint(filename)\n\n def _write_cmfd_statepoint(self, filename):\n \"\"\"Append all CNFD simulation parameters to existing statepoint\n\n Parameters\n ----------\n filename : str\n Filename of statepoint\n\n \"\"\"\n if openmc.lib.master():\n with h5py.File(filename, 'a') as f:\n if 'cmfd' not in f:\n if openmc.lib.settings.verbosity >= 5:\n print(' Writing CMFD data to {}...'.format(filename))\n sys.stdout.flush()\n cmfd_group = f.create_group(\"cmfd\")\n cmfd_group.attrs['cmfd_on'] = self._cmfd_on\n cmfd_group.attrs['feedback'] = self._feedback\n cmfd_group.attrs['solver_begin'] = self._solver_begin\n cmfd_group.attrs['mesh_id'] = self._mesh_id\n cmfd_group.attrs['tally_begin'] = self._tally_begin\n cmfd_group.attrs['time_cmfd'] = self._time_cmfd\n cmfd_group.attrs['time_cmfdbuild'] = self._time_cmfdbuild\n cmfd_group.attrs['time_cmfdsolve'] = self._time_cmfdsolve\n cmfd_group.attrs['window_size'] = self._window_size\n cmfd_group.attrs['window_type'] = self._window_type\n cmfd_group.create_dataset('k_cmfd', data=self._k_cmfd)\n cmfd_group.create_dataset('dom', data=self._dom)\n cmfd_group.create_dataset('src_cmp', data=self._src_cmp)\n cmfd_group.create_dataset('balance', data=self._balance)\n cmfd_group.create_dataset('entropy', data=self._entropy)\n cmfd_group.create_dataset('reset', data=self._reset)\n cmfd_group.create_dataset('albedo', data=self._albedo)\n cmfd_group.create_dataset('coremap', data=self._coremap)\n cmfd_group.create_dataset('egrid', data=self._egrid)\n cmfd_group.create_dataset('indices', data=self._indices)\n cmfd_group.create_dataset('tally_ids',\n data=self._tally_ids)\n cmfd_group.create_dataset('current_rate',\n data=self._current_rate)\n cmfd_group.create_dataset('flux_rate',\n data=self._flux_rate)\n cmfd_group.create_dataset('nfiss_rate',\n data=self._nfiss_rate)\n cmfd_group.create_dataset('openmc_src_rate',\n data=self._openmc_src_rate)\n cmfd_group.create_dataset('p1scatt_rate',\n data=self._p1scatt_rate)\n cmfd_group.create_dataset('scatt_rate',\n data=self._scatt_rate)\n cmfd_group.create_dataset('total_rate',\n data=self._total_rate)\n elif openmc.settings.verbosity >= 5:\n print(' CMFD data not written to statepoint file as it '\n 'already exists in {}'.format(filename), flush=True)\n\n def _initialize_linsolver(self):\n # Determine number of rows in CMFD matrix\n ng = self._indices[3]\n n = self._mat_dim*ng\n\n # Create temp loss matrix to pass row/col indices to C++ linear solver\n loss_row = self._loss_row\n loss_col = self._loss_col\n temp_data = np.ones(len(loss_row))\n temp_loss = sparse.csr_matrix((temp_data, (loss_row, loss_col)),\n shape=(n, n))\n\n # Pass coremap as 1-d array of 32-bit integers\n coremap = np.swapaxes(self._coremap, 0, 2).flatten().astype(np.int32)\n\n args = temp_loss.indptr, len(temp_loss.indptr), \\\n temp_loss.indices, len(temp_loss.indices), n, \\\n self._spectral, coremap, self._use_all_threads\n return openmc.lib._dll.openmc_initialize_linsolver(*args)\n\n def _write_cmfd_output(self):\n \"\"\"Write CMFD output to buffer at the end of each batch\"\"\"\n # Display CMFD k-effective\n outstr = '{:>11s}CMFD k: {:0.5f}'.format('', self._k_cmfd[-1])\n # Display value of additional fields based on display dict\n outstr += '\\n'\n if self._display['dominance']:\n outstr += ('{:>11s}Dom Rat: {:0.5f}\\n'\n .format('', self._dom[-1]))\n if self._display['entropy']:\n outstr += ('{:>11s}CMFD Ent: {:0.5f}\\n'\n .format('', self._entropy[-1]))\n if self._display['source']:\n outstr += ('{:>11s}RMS Src: {:0.5f}\\n'\n .format('', self._src_cmp[-1]))\n if self._display['balance']:\n outstr += ('{:>11s}RMS Bal: {:0.5f}\\n'\n .format('', self._balance[-1]))\n\n print(outstr)\n sys.stdout.flush()\n\n def _write_cmfd_timing_stats(self):\n \"\"\"Write CMFD timing stats to buffer after finalizing simulation\"\"\"\n outstr = (\"=====================> \"\n \"CMFD TIMING STATISTICS <====================\\n\\n\"\n \" Time in CMFD = {:.5e} seconds\\n\"\n \" Building matrices = {:.5e} seconds\\n\"\n \" Solving matrices = {:.5e} seconds\\n\")\n print(outstr.format(self._time_cmfd, self._time_cmfdbuild,\n self._time_cmfdsolve))\n sys.stdout.flush()\n\n def _configure_cmfd(self):\n \"\"\"Initialize CMFD parameters and set CMFD input variables\"\"\"\n # Check if restarting simulation from statepoint file\n if not openmc.lib.settings.restart_run:\n # Define all variables necessary for running CMFD\n self._initialize_cmfd()\n\n else:\n # Reset CMFD parameters from statepoint file\n path_statepoint = openmc.lib.settings.path_statepoint\n self._reset_cmfd(path_statepoint)\n\n def _initialize_cmfd(self):\n \"\"\"Sets values of CMFD instance variables based on user input,\n separating between variables that only exist on all processes\n and those that only exist on the master process\n\n \"\"\"\n # Print message to user and flush output to stdout\n if openmc.lib.settings.verbosity >= 7 and openmc.lib.master():\n print(' Configuring CMFD parameters for simulation')\n sys.stdout.flush()\n\n # Check if CMFD mesh is defined\n if self._mesh is None:\n raise ValueError('No CMFD mesh has been specified for '\n 'simulation')\n\n # Set spatial dimensions of CMFD object\n for i, n in enumerate(self._mesh.dimension):\n self._indices[i] = n\n\n # Check if in continuous energy mode\n if not openmc.lib.settings.run_CE:\n raise OpenMCError('CMFD must be run in continuous energy mode')\n\n # Set number of energy groups\n if self._mesh.energy is not None:\n ng = len(self._mesh.energy)\n self._egrid = np.array(self._mesh.energy)\n self._indices[3] = ng - 1\n self._energy_filters = True\n else:\n self._egrid = np.array([_ENERGY_MIN_NEUTRON, _ENERGY_MAX_NEUTRON])\n self._indices[3] = 1\n self._energy_filters = False\n\n # Get acceleration map, otherwise set all regions to be accelerated\n if self._mesh.map is not None:\n check_length('CMFD coremap', self._mesh.map,\n np.product(self._indices[0:3]))\n if openmc.lib.master():\n self._coremap = np.array(self._mesh.map)\n else:\n if openmc.lib.master():\n self._coremap = np.ones((np.product(self._indices[0:3])),\n dtype=int)\n\n # Check CMFD tallies accummulated before feedback turned on\n if self._feedback and self._solver_begin < self._tally_begin:\n raise ValueError('Tally begin must be less than or equal to '\n 'CMFD begin')\n\n # Initialize parameters for CMFD tally windows\n self._set_tally_window()\n\n # Extract spatial and energy indices\n nx, ny, nz, ng = self._indices\n\n # Initialize CMFD source to all ones\n self._cmfd_src = np.ones((nx, ny, nz, ng))\n\n # Define all variables that will exist only on master process\n if openmc.lib.master():\n # Set global albedo\n if self._mesh.albedo is not None:\n self._albedo = np.array(self._mesh.albedo)\n else:\n self._albedo = np.array([1., 1., 1., 1., 1., 1.])\n\n # Set up CMFD coremap\n self._set_coremap()\n\n # Allocate parameters that need to be stored for tally window\n self._openmc_src_rate = np.zeros((nx, ny, nz, ng, 0))\n self._flux_rate = np.zeros((nx, ny, nz, ng, 0))\n self._total_rate = np.zeros((nx, ny, nz, ng, 0))\n self._p1scatt_rate = np.zeros((nx, ny, nz, ng, 0))\n self._scatt_rate = np.zeros((nx, ny, nz, ng, ng, 0))\n self._nfiss_rate = np.zeros((nx, ny, nz, ng, ng, 0))\n self._current_rate = np.zeros((nx, ny, nz, 12, ng, 0))\n\n # Initialize timers\n self._time_cmfd = 0.0\n self._time_cmfdbuild = 0.0\n self._time_cmfdsolve = 0.0\n\n def _reset_cmfd(self, filename):\n \"\"\"Reset all CMFD parameters from statepoint\n\n Parameters\n ----------\n filename : str\n Filename of statepoint to read from\n\n \"\"\"\n with h5py.File(filename, 'r') as f:\n if 'cmfd' not in f:\n raise OpenMCError('Could not find CMFD parameters in ',\n 'file {}'.format(filename))\n else:\n # Overwrite CMFD values from statepoint\n if (openmc.lib.master() and\n openmc.lib.settings.verbosity >= 5):\n print(' Loading CMFD data from {}...'.format(filename))\n sys.stdout.flush()\n cmfd_group = f['cmfd']\n\n # Define variables that exist on all processes\n self._cmfd_on = cmfd_group.attrs['cmfd_on']\n self._feedback = cmfd_group.attrs['feedback']\n self._solver_begin = cmfd_group.attrs['solver_begin']\n self._tally_begin = cmfd_group.attrs['tally_begin']\n self._k_cmfd = list(cmfd_group['k_cmfd'])\n self._dom = list(cmfd_group['dom'])\n self._src_cmp = list(cmfd_group['src_cmp'])\n self._balance = list(cmfd_group['balance'])\n self._entropy = list(cmfd_group['entropy'])\n self._reset = list(cmfd_group['reset'])\n self._egrid = cmfd_group['egrid'][()]\n self._indices = cmfd_group['indices'][()]\n default_egrid = np.array([_ENERGY_MIN_NEUTRON,\n _ENERGY_MAX_NEUTRON])\n self._energy_filters = not np.array_equal(self._egrid,\n default_egrid)\n self._window_size = cmfd_group.attrs['window_size']\n self._window_type = cmfd_group.attrs['window_type']\n self._reset_every = (self._window_type == 'expanding' or\n self._window_type == 'rolling')\n\n # Overwrite CMFD mesh properties\n cmfd_mesh_name = 'mesh ' + str(cmfd_group.attrs['mesh_id'])\n cmfd_mesh = f['tallies']['meshes'][cmfd_mesh_name]\n self._mesh.mesh_type = cmfd_mesh['type'][()].decode()\n if self._mesh.mesh_type == 'regular':\n self._mesh.dimension = cmfd_mesh['dimension'][()]\n self._mesh.lower_left = cmfd_mesh['lower_left'][()]\n self._mesh.upper_right = cmfd_mesh['upper_right'][()]\n self._mesh.width = cmfd_mesh['width'][()]\n elif self._mesh.mesh_type == 'rectilinear':\n x_grid = cmfd_mesh['x_grid'][()]\n y_grid = cmfd_mesh['y_grid'][()]\n z_grid = cmfd_mesh['z_grid'][()]\n self._mesh.grid = [x_grid, y_grid, z_grid]\n\n # Define variables that exist only on master process\n if openmc.lib.master():\n self._time_cmfd = cmfd_group.attrs['time_cmfd']\n self._time_cmfdbuild = cmfd_group.attrs['time_cmfdbuild']\n self._time_cmfdsolve = cmfd_group.attrs['time_cmfdsolve']\n self._albedo = cmfd_group['albedo'][()]\n self._coremap = cmfd_group['coremap'][()]\n self._current_rate = cmfd_group['current_rate'][()]\n self._flux_rate = cmfd_group['flux_rate'][()]\n self._nfiss_rate = cmfd_group['nfiss_rate'][()]\n self._openmc_src_rate = cmfd_group['openmc_src_rate'][()]\n self._p1scatt_rate = cmfd_group['p1scatt_rate'][()]\n self._scatt_rate = cmfd_group['scatt_rate'][()]\n self._total_rate = cmfd_group['total_rate'][()]\n self._mat_dim = np.max(self._coremap) + 1\n\n def _set_tally_window(self):\n \"\"\"Sets parameters to handle different tally window options\"\"\"\n # Set parameters for expanding window\n if self._window_type == 'expanding':\n self._reset_every = True\n self._window_size = 1\n # Set parameters for rolling window\n elif self.window_type == 'rolling':\n self._reset_every = True\n # Set parameters for default case, with no window\n else:\n self._window_size = 1\n self._reset_every = False\n\n def _cmfd_init_batch(self):\n \"\"\"Handles CMFD options at the beginning of each batch\"\"\"\n # Get current batch through C API\n # Add 1 as next_batch has not been called yet\n current_batch = openmc.lib.current_batch() + 1\n\n # Check to activate CMFD solver and possible feedback\n if self._solver_begin == current_batch:\n self._cmfd_on = True\n\n # Check to reset tallies\n if ((len(self._reset) > 0 and current_batch in self._reset)\n or self._reset_every):\n self._cmfd_tally_reset()\n\n def _execute_cmfd(self):\n \"\"\"Runs CMFD calculation on master node\"\"\"\n if openmc.lib.master():\n # Start CMFD timer\n time_start_cmfd = time.time()\n\n if openmc.lib.current_batch() >= self._tally_begin:\n # Calculate all cross sections based on tally window averages\n self._compute_xs()\n\n # Execute CMFD algorithm if CMFD on for current batch\n if self._cmfd_on:\n # Run CMFD on single processor on master\n if openmc.lib.master():\n # Create CMFD data based on OpenMC tallies\n self._set_up_cmfd()\n\n # Call solver\n self._cmfd_solver_execute()\n\n # Store k-effective\n self._k_cmfd.append(self._keff)\n\n # Check to perform adjoint on last batch\n batches = openmc.lib.settings.get_batches()\n if openmc.lib.current_batch() == batches and self._run_adjoint:\n self._cmfd_solver_execute(adjoint=True)\n\n # Calculate fission source\n self._calc_fission_source()\n\n # Calculate weight factors through C++ and manipulate CMFD\n # source into a 1-D vector that matches C++ array ordering\n src_flipped = np.flip(self._cmfd_src, axis=3)\n src_swapped = np.swapaxes(src_flipped, 0, 2)\n args = self._feedback, src_swapped.flatten()\n openmc.lib._dll.openmc_cmfd_reweight(*args)\n\n # Stop CMFD timer\n if openmc.lib.master():\n time_stop_cmfd = time.time()\n self._time_cmfd += time_stop_cmfd - time_start_cmfd\n if self._cmfd_on:\n # Write CMFD output if CMFD on for current batch\n self._write_cmfd_output()\n\n def _cmfd_tally_reset(self):\n \"\"\"Resets all CMFD tallies in memory\"\"\"\n # Print message\n if (openmc.lib.settings.verbosity >= 6 and openmc.lib.master() and\n not self._reset_every):\n print(' CMFD tallies reset')\n sys.stdout.flush()\n\n # Reset CMFD tallies\n tallies = openmc.lib.tallies\n for tally_id in self._tally_ids:\n tallies[tally_id].reset()\n\n def _set_up_cmfd(self):\n \"\"\"Configures CMFD object for a CMFD eigenvalue calculation\n\n \"\"\"\n # Compute effective downscatter cross section\n if self._downscatter:\n self._compute_effective_downscatter()\n\n # Check neutron balance\n self._neutron_balance()\n\n # Calculate dtilde\n self._compute_dtilde()\n\n # Calculate dhat\n self._compute_dhat()\n\n def _cmfd_solver_execute(self, adjoint=False):\n \"\"\"Sets up and runs power iteration solver for CMFD\n\n Parameters\n ----------\n adjoint : bool\n Whether or not to run an adjoint calculation\n\n \"\"\"\n # Check for physical adjoint\n physical_adjoint = adjoint and self._adjoint_type == 'physical'\n\n # Start timer for build\n time_start_buildcmfd = time.time()\n\n # Build the loss and production matrices\n if not adjoint:\n # Build matrices without adjoint calculation\n loss = self._build_loss_matrix(False)\n prod = self._build_prod_matrix(False)\n else:\n # Build adjoint matrices by running adjoint calculation\n if self._adjoint_type == 'physical':\n loss = self._build_loss_matrix(True)\n prod = self._build_prod_matrix(True)\n # Build adjoint matrices as transpose of non-adjoint matrices\n else:\n loss = self._build_loss_matrix(False).transpose()\n prod = self._build_prod_matrix(False).transpose()\n\n # Write out the matrices.\n if self._write_matrices:\n if not adjoint:\n self._write_matrix(loss, 'loss')\n self._write_matrix(prod, 'prod')\n else:\n self._write_matrix(loss, 'adj_loss')\n self._write_matrix(prod, 'adj_prod')\n\n # Stop timer for build\n time_stop_buildcmfd = time.time()\n self._time_cmfdbuild += time_stop_buildcmfd - time_start_buildcmfd\n\n # Begin power iteration\n time_start_solvecmfd = time.time()\n phi, keff, dom = self._execute_power_iter(loss, prod)\n time_stop_solvecmfd = time.time()\n self._time_cmfdsolve += time_stop_solvecmfd - time_start_solvecmfd\n\n # Save results, normalizing phi to sum to 1\n if adjoint:\n self._adj_keff = keff\n self._adj_phi = phi/np.sqrt(np.sum(phi*phi))\n else:\n self._keff = keff\n self._phi = phi/np.sqrt(np.sum(phi*phi))\n\n self._dom.append(dom)\n\n # Write out flux vector\n if self._write_matrices:\n if adjoint:\n self._write_vector(self._adj_phi, 'adj_fluxvec')\n else:\n self._write_vector(self._phi, 'fluxvec')\n\n def _write_vector(self, vector, base_filename):\n \"\"\"Write a 1-D numpy array to file and also save it in .npy format.\n This particular format allows users to load the variable directly in a\n Python session with np.load()\n\n Parameters\n ----------\n vector : numpy.ndarray\n Vector that will be saved\n base_filename : str\n Filename to save vector as, without any file extension at the end.\n Vector will be saved to file [base_filename].dat and in numpy\n format as [base_filename].npy\n\n \"\"\"\n # Write each element in vector to file\n with open(base_filename+'.dat', 'w') as fh:\n for val in vector:\n fh.write('{:0.8f}\\n'.format(val))\n\n # Save as numpy format\n np.save(base_filename, vector)\n\n def _write_matrix(self, matrix, base_filename):\n \"\"\"Write a numpy matrix to file and also save it in .npz format. This\n particular format allows users to load the variable directly in a\n Python session with scipy.sparse.load_npz()\n\n Parameters\n ----------\n matrix : scipy.sparse.spmatrix\n Sparse matrix that will be saved\n base_filename : str\n Filename to save matrix entries, without any file extension at the\n end. Matrix entries will be saved to file [base_filename].dat and\n in scipy format as [base_filename].npz\n\n \"\"\"\n # Write row, col, and data of each entry in sparse matrix. This ignores\n # all zero-entries, and indices are written with zero-based indexing\n with open(base_filename+'.dat', 'w') as fh:\n for row in range(matrix.shape[0]):\n # Get all cols for particular row in matrix\n cols = matrix.indices[matrix.indptr[row]:matrix.indptr[row+1]]\n # Get all data entries for particular row in matrix\n data = matrix.data[matrix.indptr[row]:matrix.indptr[row+1]]\n for i in range(len(cols)):\n fh.write('{:3d}, {:3d}, {:0.8f}\\n'.format(\n row, cols[i], data[i]))\n\n # Save matrix in scipy format\n sparse.save_npz(base_filename, matrix)\n\n def _calc_fission_source(self):\n \"\"\"Calculates CMFD fission source from CMFD flux. If a coremap is\n defined, there will be a discrepancy between the spatial indices in the\n variables ``phi`` and ``nfissxs``, so ``phi`` needs to be mapped to the\n spatial indices of the cross sections. This can be done in a vectorized\n numpy manner or with for loops\n\n \"\"\"\n # Extract number of groups and number of accelerated regions\n nx, ny, nz, ng = self._indices\n n = self._mat_dim\n\n # Compute cmfd_src in a vecotorized manner by phi to the spatial\n # indices of the actual problem so that cmfd_flux can be multiplied by\n # nfissxs\n\n # Calculate volume\n vol = np.product(self._hxyz, axis=3)\n\n # Reshape phi by number of groups\n phi = self._phi.reshape((n, ng))\n\n # Extract indices of coremap that are accelerated\n idx = self._accel_idxs\n\n # Initialize CMFD flux map that maps phi to actual spatial and\n # group indices of problem\n cmfd_flux = np.zeros((nx, ny, nz, ng))\n\n # Loop over all groups and set CMFD flux based on indices of\n # coremap and values of phi\n for g in range(ng):\n phi_g = phi[:,g]\n cmfd_flux[idx + (g,)] = phi_g[self._coremap[idx]]\n\n # Compute fission source\n cmfd_src = (np.sum(self._nfissxs[:,:,:,:,:] *\n cmfd_flux[:,:,:,:,np.newaxis], axis=3))\n\n # Normalize source such that it sums to 1.0\n self._cmfd_src = cmfd_src / np.sum(cmfd_src)\n\n # Compute entropy\n if openmc.lib.settings.entropy_on:\n # Compute source times log_2(source)\n source = self._cmfd_src[self._cmfd_src > 0] \\\n * np.log(self._cmfd_src[self._cmfd_src > 0])/np.log(2)\n\n # Sum source and store\n self._entropy.append(-1.0 * np.sum(source))\n\n # Normalize source so average is 1.0\n self._cmfd_src = self._cmfd_src/np.sum(self._cmfd_src) * self._norm\n\n # Calculate differences between normalized sources\n self._src_cmp.append(np.sqrt(1.0 / self._norm\n * np.sum((self._cmfd_src - self._openmc_src)**2)))\n\n def _build_loss_matrix(self, adjoint):\n # Extract spatial and energy indices and define matrix dimension\n ng = self._indices[3]\n n = self._mat_dim*ng\n\n # Define data entries used to build csr matrix\n data = np.array([])\n\n dtilde_left = self._dtilde[:,:,:,:,0]\n dtilde_right = self._dtilde[:,:,:,:,1]\n dtilde_back = self._dtilde[:,:,:,:,2]\n dtilde_front = self._dtilde[:,:,:,:,3]\n dtilde_bottom = self._dtilde[:,:,:,:,4]\n dtilde_top = self._dtilde[:,:,:,:,5]\n dhat_left = self._dhat[:,:,:,:,0]\n dhat_right = self._dhat[:,:,:,:,1]\n dhat_back = self._dhat[:,:,:,:,2]\n dhat_front = self._dhat[:,:,:,:,3]\n dhat_bottom = self._dhat[:,:,:,:,4]\n dhat_top = self._dhat[:,:,:,:,5]\n\n dx = self._hxyz[:,:,:,np.newaxis,0]\n dy = self._hxyz[:,:,:,np.newaxis,1]\n dz = self._hxyz[:,:,:,np.newaxis,2]\n\n # Define net leakage coefficient for each surface in each matrix\n # element\n jnet = (((dtilde_right + dhat_right)-(-1.0 * dtilde_left + dhat_left))\n / dx +\n ((dtilde_front + dhat_front)-(-1.0 * dtilde_back + dhat_back))\n / dy +\n ((dtilde_top + dhat_top)-(-1.0 * dtilde_bottom + dhat_bottom))\n / dz)\n\n for g in range(ng):\n # Define leakage terms that relate terms to their neighbors to the\n # left\n dtilde = self._dtilde[:,:,:,g,0][self._accel_neig_left_idxs]\n dhat = self._dhat[:,:,:,g,0][self._accel_neig_left_idxs]\n dx = self._hxyz[:,:,:,0][self._accel_neig_left_idxs]\n vals = (-1.0 * dtilde - dhat) / dx\n # Store data to add to CSR matrix\n data = np.append(data, vals)\n\n # Define leakage terms that relate terms to their neighbors to the\n # right\n dtilde = self._dtilde[:,:,:,g,1][self._accel_neig_right_idxs]\n dhat = self._dhat[:,:,:,g,1][self._accel_neig_right_idxs]\n dx = self._hxyz[:,:,:,0][self._accel_neig_right_idxs]\n vals = (-1.0 * dtilde + dhat) / dx\n # Store data to add to CSR matrix\n data = np.append(data, vals)\n\n # Define leakage terms that relate terms to their neighbors in the\n # back\n dtilde = self._dtilde[:,:,:,g,2][self._accel_neig_back_idxs]\n dhat = self._dhat[:,:,:,g,2][self._accel_neig_back_idxs]\n dy = self._hxyz[:,:,:,1][self._accel_neig_back_idxs]\n vals = (-1.0 * dtilde - dhat) / dy\n # Store data to add to CSR matrix\n data = np.append(data, vals)\n\n # Define leakage terms that relate terms to their neighbors in the\n # front\n dtilde = self._dtilde[:,:,:,g,3][self._accel_neig_front_idxs]\n dhat = self._dhat[:,:,:,g,3][self._accel_neig_front_idxs]\n dy = self._hxyz[:,:,:,1][self._accel_neig_front_idxs]\n vals = (-1.0 * dtilde + dhat) / dy\n # Store data to add to CSR matrix\n data = np.append(data, vals)\n\n # Define leakage terms that relate terms to their neighbors to the\n # bottom\n dtilde = self._dtilde[:,:,:,g,4][self._accel_neig_bot_idxs]\n dhat = self._dhat[:,:,:,g,4][self._accel_neig_bot_idxs]\n dz = self._hxyz[:,:,:,2][self._accel_neig_bot_idxs]\n vals = (-1.0 * dtilde - dhat) / dz\n # Store data to add to CSR matrix\n data = np.append(data, vals)\n\n # Define leakage terms that relate terms to their neighbors to the\n # top\n dtilde = self._dtilde[:,:,:,g,5][self._accel_neig_top_idxs]\n dhat = self._dhat[:,:,:,g,5][self._accel_neig_top_idxs]\n dz = self._hxyz[:,:,:,2][self._accel_neig_top_idxs]\n vals = (-1.0 * dtilde + dhat) / dz\n # Store data to add to CSR matrix\n data = np.append(data, vals)\n\n # Define terms that relate to loss of neutrons in a cell. These\n # correspond to all the diagonal entries of the loss matrix\n jnet_g = jnet[:,:,:,g][self._accel_idxs]\n total_xs = self._totalxs[:,:,:,g][self._accel_idxs]\n scatt_xs = self._scattxs[:,:,:,g,g][self._accel_idxs]\n vals = jnet_g + total_xs - scatt_xs\n # Store data to add to CSR matrix\n data = np.append(data, vals)\n\n # Define terms that relate to in-scattering from group to group.\n # These terms relate a mesh index to all mesh indices with the same\n # spatial dimensions but belong to a different energy group\n for h in range(ng):\n if h != g:\n # Get scattering macro xs, transposed\n if adjoint:\n scatt_xs = self._scattxs[:,:,:,g,h][self._accel_idxs]\n # Get scattering macro xs\n else:\n scatt_xs = self._scattxs[:,:,:,h,g][self._accel_idxs]\n vals = -1.0 * scatt_xs\n # Store data to add to CSR matrix\n data = np.append(data, vals)\n\n # Create csr matrix\n loss_row = self._loss_row\n loss_col = self._loss_col\n loss = sparse.csr_matrix((data, (loss_row, loss_col)), shape=(n, n))\n return loss\n\n def _build_prod_matrix(self, adjoint):\n # Extract spatial and energy indices and define matrix dimension\n ng = self._indices[3]\n n = self._mat_dim*ng\n\n # Define rows, columns, and data used to build csr matrix\n data = np.array([])\n\n # Define terms that relate to fission production from group to group.\n for g in range(ng):\n for h in range(ng):\n # Get nu-fission macro xs, transposed\n if adjoint:\n vals = (self._nfissxs[:, :, :, g, h])[self._accel_idxs]\n # Get nu-fission macro xs\n else:\n vals = (self._nfissxs[:, :, :, h, g])[self._accel_idxs]\n # Store rows, cols, and data to add to CSR matrix\n data = np.append(data, vals)\n\n # Create csr matrix\n prod_row = self._prod_row\n prod_col = self._prod_col\n prod = sparse.csr_matrix((data, (prod_row, prod_col)), shape=(n, n))\n return prod\n\n def _execute_power_iter(self, loss, prod):\n \"\"\"Main power iteration routine for the CMFD calculation\n\n Parameters\n ----------\n loss : scipy.sparse.spmatrix\n Sparse matrix storing elements of CMFD loss matrix\n prod : scipy.sparse.spmatrix\n Sparse matrix storing elements of CMFD production matrix\n\n Returns\n -------\n phi_n : numpy.ndarray\n Flux vector of CMFD problem\n k_n : float\n Eigenvalue of CMFD problem\n dom : float\n Dominance ratio of CMFD problem\n\n \"\"\"\n # Get problem size\n n = loss.shape[0]\n\n # Set up tolerances for C++ solver\n atoli = self._gauss_seidel_tolerance[0]\n rtoli = self._gauss_seidel_tolerance[1]\n toli = rtoli * 100\n\n # Set up flux vectors, intital guess set to 1\n phi_n = np.ones((n,))\n phi_o = np.ones((n,))\n\n # Set up source vectors\n s_n = np.zeros((n,))\n s_o = np.zeros((n,))\n\n # Set initial guess\n k_n = openmc.lib.keff()[0]\n k_o = k_n\n dw = self._w_shift\n k_s = k_o + dw\n k_ln = 1.0/(1.0/k_n - 1.0/k_s)\n k_lo = k_ln\n\n # Set norms to 0\n norm_n = 0.0\n norm_o = 0.0\n\n # Maximum number of power iterations\n maxits = 10000\n\n # Perform Wielandt shift\n loss -= 1.0/k_s*prod\n\n # Begin power iteration\n for i in range(maxits):\n # Check if reach max number of iterations\n if i == maxits - 1:\n raise OpenMCError('Reached maximum iterations in CMFD power '\n 'iteration solver.')\n\n # Compute source vector\n s_o = prod.dot(phi_o)\n\n # Normalize source vector\n s_o /= k_lo\n\n # Compute new flux with C++ solver\n innerits = openmc.lib._dll.openmc_run_linsolver(loss.data, s_o,\n phi_n, toli)\n\n # Compute new source vector\n s_n = prod.dot(phi_n)\n\n # Compute new shifted eigenvalue\n k_ln = np.sum(s_n) / np.sum(s_o)\n\n # Compute new eigenvalue\n k_n = 1.0/(1.0/k_ln + 1.0/k_s)\n\n # Renormalize the old source\n s_o *= k_lo\n\n # Check convergence\n iconv, norm_n = self._check_convergence(s_n, s_o, k_n, k_o, i+1,\n innerits)\n\n # If converged, calculate dominance ratio and break from loop\n if iconv:\n dom = norm_n / norm_o\n return phi_n, k_n, dom\n\n # Record old values if not converged\n phi_o = phi_n\n k_o = k_n\n k_lo = k_ln\n norm_o = norm_n\n\n # Update tolerance for inner iterations\n toli = max(atoli, rtoli*norm_n)\n\n def _check_convergence(self, s_n, s_o, k_n, k_o, iteration, innerits):\n \"\"\"Checks the convergence of the CMFD problem\n\n Parameters\n ----------\n s_n : numpy.ndarray\n Source vector from current iteration\n s_o : numpy.ndarray\n Source vector from previous iteration\n k_n : float\n K-effective from current iteration\n k_o : float\n K-effective from previous iteration\n iteration : int\n Iteration number\n innerits : int\n Number of iterations required for convergence in inner GS loop\n\n Returns\n -------\n iconv : bool\n Whether the power iteration has reached convergence\n serr : float\n Error in source from previous iteration to current iteration, used\n for dominance ratio calculations\n\n \"\"\"\n # Calculate error in keff\n kerr = abs(k_o - k_n) / k_n\n\n # Calculate max error in source\n with np.errstate(divide='ignore', invalid='ignore'):\n serr = np.sqrt(np.sum(np.where(s_n > 0, ((s_n-s_o) / s_n)**2, 0))\n / len(s_n))\n\n # Check for convergence\n iconv = kerr < self._cmfd_ktol and serr < self._stol\n\n # Print out to user\n if self._power_monitor and openmc.lib.master():\n print('{:8s}{:20s}{:25s}{:s}{:s}'.format(\n ' {:d}:'.format(iteration),\n 'k-eff: {:0.8f}'.format(k_n),\n 'k-error: {:.5e}'.format(kerr),\n 'src-error: {:.5e}'.format(serr),\n ' {:d}'.format(innerits)\n ), flush=True)\n\n return iconv, serr\n\n def _set_coremap(self):\n \"\"\"Sets the core mapping information. All regions marked with zero\n are set to CMFD_NOACCEL, while all regions marked with 1 are set to a\n unique index that maps each fuel region to a row number when building\n CMFD matrices\n\n \"\"\"\n # Set number of accelerated regions in problem. This will be related to\n # the dimension of CMFD matrices\n self._mat_dim = np.sum(self._coremap)\n\n # Define coremap as cumulative sum over accelerated regions,\n # otherwise set value to _CMFD_NOACCEL\n self._coremap = np.where(self._coremap == 0, _CMFD_NOACCEL,\n np.cumsum(self._coremap) - 1)\n\n # Reshape coremap to three dimensional array\n # Indices of coremap in user input switched in x and z axes\n nx, ny, nz = self._indices[:3]\n self._coremap = self._coremap.reshape(nz, ny, nx)\n self._coremap = np.swapaxes(self._coremap, 0, 2)\n\n def _compute_xs(self):\n \"\"\"Takes CMFD tallies from OpenMC and computes macroscopic cross\n sections, flux, and diffusion coefficients for each mesh cell using\n a tally window scheme\n\n \"\"\"\n # Update window size for expanding window if necessary\n num_cmfd_batches = openmc.lib.current_batch() - self._tally_begin + 1\n if (self._window_type == 'expanding' and\n num_cmfd_batches == self._window_size * 2):\n self._window_size *= 2\n\n # Discard tallies from oldest batch if window limit reached\n tally_windows = self._flux_rate.shape[-1] + 1\n if tally_windows > self._window_size:\n self._flux_rate = self._flux_rate[...,1:]\n self._total_rate = self._total_rate[...,1:]\n self._p1scatt_rate = self._p1scatt_rate[...,1:]\n self._scatt_rate = self._scatt_rate[...,1:]\n self._nfiss_rate = self._nfiss_rate[...,1:]\n self._current_rate = self._current_rate[...,1:]\n self._openmc_src_rate = self._openmc_src_rate[...,1:]\n tally_windows -= 1\n\n # Extract spatial and energy indices\n nx, ny, nz, ng = self._indices\n\n # Get tallies in-memory\n tallies = openmc.lib.tallies\n\n # Set conditional numpy array as boolean vector based on coremap\n is_accel = self._coremap != _CMFD_NOACCEL\n\n # Get flux from CMFD tally 0\n tally_id = self._tally_ids[0]\n flux = tallies[tally_id].results[:,0,1]\n\n # Define target tally reshape dimensions. This defines how openmc\n # tallies are ordered by dimension\n target_tally_shape = [nz, ny, nx, ng, 1]\n\n # Reshape flux array to target shape. Swap x and z axes so that\n # flux shape is now [nx, ny, nz, ng, 1]\n reshape_flux = np.swapaxes(flux.reshape(target_tally_shape), 0, 2)\n\n # Flip energy axis as tally results are given in reverse order of\n # energy group\n reshape_flux = np.flip(reshape_flux, axis=3)\n\n # Bank flux to flux_rate\n self._flux_rate = np.append(self._flux_rate, reshape_flux, axis=4)\n\n # Compute flux as aggregate of banked flux_rate over tally window\n self._flux = np.where(is_accel[..., np.newaxis],\n np.sum(self._flux_rate, axis=4), 0.0)\n\n # Detect zero flux, abort if located and cmfd is on\n zero_flux = np.logical_and(self._flux < _TINY_BIT,\n is_accel[..., np.newaxis])\n if np.any(zero_flux) and self._cmfd_on:\n # Get index of first zero flux in flux array\n idx = np.argwhere(zero_flux)[0]\n\n # Throw error message (one-based indexing)\n # Index of group is flipped\n err_message = 'Detected zero flux without coremap overlay' + \\\n ' at mesh: (' + \\\n ', '.join(str(i+1) for i in idx[:-1]) + \\\n ') in group ' + str(ng-idx[-1])\n raise OpenMCError(err_message)\n\n # Get total reaction rate (rr) from CMFD tally 0\n totalrr = tallies[tally_id].results[:,1,1]\n\n # Reshape total reaction rate array to target shape. Swap x and z axes\n # so that shape is now [nx, ny, nz, ng, 1]\n reshape_totalrr = np.swapaxes(totalrr.reshape(target_tally_shape),\n 0, 2)\n\n # Total reaction rate is flipped in energy axis as tally results are\n # given in reverse order of energy group\n reshape_totalrr = np.flip(reshape_totalrr, axis=3)\n\n # Bank total reaction rate to total_rate\n self._total_rate = np.append(self._total_rate, reshape_totalrr,\n axis=4)\n\n # Compute total xs as aggregate of banked total_rate over tally window\n # divided by flux\n self._totalxs = np.divide(np.sum(self._total_rate, axis=4),\n self._flux, where=self._flux > 0,\n out=np.zeros_like(self._totalxs))\n\n # Get scattering rr from CMFD tally 1\n # flux is repeated to account for extra dimensionality of scattering xs\n tally_id = self._tally_ids[1]\n scattrr = tallies[tally_id].results[:,0,1]\n\n # Define target tally reshape dimensions for xs with incoming\n # and outgoing energies\n target_tally_shape = [nz, ny, nx, ng, ng, 1]\n\n # Reshape scattrr array to target shape. Swap x and z axes so that\n # shape is now [nx, ny, nz, ng, ng, 1]\n reshape_scattrr = np.swapaxes(scattrr.reshape(target_tally_shape),\n 0, 2)\n\n # Scattering rr is flipped in both incoming and outgoing energy axes\n # as tally results are given in reverse order of energy group\n reshape_scattrr = np.flip(reshape_scattrr, axis=3)\n reshape_scattrr = np.flip(reshape_scattrr, axis=4)\n\n # Bank scattering rr to scatt_rate\n self._scatt_rate = np.append(self._scatt_rate, reshape_scattrr,\n axis=5)\n\n # Compute scattering xs as aggregate of banked scatt_rate over tally\n # window divided by flux. Flux dimensionality increased to account for\n # extra dimensionality of scattering xs\n extended_flux = self._flux[:,:,:,:,np.newaxis]\n self._scattxs = np.divide(np.sum(self._scatt_rate, axis=5),\n extended_flux, where=extended_flux > 0,\n out=np.zeros_like(self._scattxs))\n\n # Get nu-fission rr from CMFD tally 1\n nfissrr = tallies[tally_id].results[:,1,1]\n num_realizations = tallies[tally_id].num_realizations\n\n # Reshape nfissrr array to target shape. Swap x and z axes so that\n # shape is now [nx, ny, nz, ng, ng, 1]\n reshape_nfissrr = np.swapaxes(nfissrr.reshape(target_tally_shape),\n 0, 2)\n\n # Nu-fission rr is flipped in both incoming and outgoing energy axes\n # as tally results are given in reverse order of energy group\n reshape_nfissrr = np.flip(reshape_nfissrr, axis=3)\n reshape_nfissrr = np.flip(reshape_nfissrr, axis=4)\n\n # Bank nu-fission rr to nfiss_rate\n self._nfiss_rate = np.append(self._nfiss_rate, reshape_nfissrr,\n axis=5)\n\n # Compute nu-fission xs as aggregate of banked nfiss_rate over tally\n # window divided by flux. Flux dimensionality increased to account for\n # extra dimensionality of nu-fission xs\n self._nfissxs = np.divide(np.sum(self._nfiss_rate, axis=5),\n extended_flux, where=extended_flux > 0,\n out=np.zeros_like(self._nfissxs))\n\n # Openmc source distribution is sum of nu-fission rr in incoming\n # energies\n openmc_src = np.sum(reshape_nfissrr, axis=3)\n\n # Bank OpenMC source distribution from current batch to\n # openmc_src_rate\n self._openmc_src_rate = np.append(self._openmc_src_rate, openmc_src,\n axis=4)\n\n # Compute source distribution over entire tally window\n self._openmc_src = np.sum(self._openmc_src_rate, axis=4)\n\n # Compute k_eff from source distribution\n self._keff_bal = (np.sum(self._openmc_src) / num_realizations /\n tally_windows)\n\n # Normalize openmc source distribution\n self._openmc_src /= np.sum(self._openmc_src) * self._norm\n\n # Get surface currents from CMFD tally 2\n tally_id = self._tally_ids[2]\n current = tallies[tally_id].results[:,0,1]\n\n # Define target tally reshape dimensions for current\n target_tally_shape = [nz, ny, nx, 12, ng, 1]\n\n # Reshape current array to target shape. Swap x and z axes so that\n # shape is now [nx, ny, nz, 12, ng, 1]\n reshape_current = np.swapaxes(current.reshape(target_tally_shape),\n 0, 2)\n\n # Current is flipped in energy axis as tally results are given in\n # reverse order of energy group\n reshape_current = np.flip(reshape_current, axis=4)\n\n # Bank current to current_rate\n self._current_rate = np.append(self._current_rate, reshape_current,\n axis=5)\n\n # Compute current as aggregate of banked current_rate over tally window\n self._current = np.where(is_accel[..., np.newaxis, np.newaxis],\n np.sum(self._current_rate, axis=5), 0.0)\n\n # Get p1 scatter rr from CMFD tally 3\n tally_id = self._tally_ids[3]\n p1scattrr = tallies[tally_id].results[:,0,1]\n\n # Define target tally reshape dimensions for p1 scatter tally\n target_tally_shape = [nz, ny, nx, 2, ng, 1]\n\n # Reshape and extract only p1 data from tally results as there is\n # no need for p0 data\n reshape_p1scattrr = np.swapaxes(p1scattrr.reshape(target_tally_shape),\n 0, 2)[:,:,:,1,:,:]\n\n # p1-scatter rr is flipped in energy axis as tally results are given in\n # reverse order of energy group\n reshape_p1scattrr = np.flip(reshape_p1scattrr, axis=3)\n\n # Bank p1-scatter rr to p1scatt_rate\n self._p1scatt_rate = np.append(self._p1scatt_rate, reshape_p1scattrr,\n axis=4)\n\n # Compute p1-scatter xs as aggregate of banked p1scatt_rate over tally\n # window divided by flux\n self._p1scattxs = np.divide(np.sum(self._p1scatt_rate, axis=4),\n self._flux, where=self._flux > 0,\n out=np.zeros_like(self._p1scattxs))\n\n if self._set_reference_params:\n # Set diffusion coefficients based on reference value\n self._diffcof = np.where(self._flux > 0,\n self._ref_d[None, None, None, :], 0.0)\n else:\n # Calculate and store diffusion coefficient\n with np.errstate(divide='ignore', invalid='ignore'):\n self._diffcof = np.where(self._flux > 0, 1.0 / (3.0 *\n (self._totalxs-self._p1scattxs)), 0.)\n\n def _compute_effective_downscatter(self):\n \"\"\"Changes downscatter rate for zero upscatter\"\"\"\n # Extract energy index\n ng = self._indices[3]\n\n # Return if not two groups\n if ng != 2:\n return\n\n # Extract cross sections and flux for each group\n flux1 = self._flux[:,:,:,0]\n flux2 = self._flux[:,:,:,1]\n sigt1 = self._totalxs[:,:,:,0]\n sigt2 = self._totalxs[:,:,:,1]\n\n # First energy index is incoming energy, second is outgoing energy\n sigs11 = self._scattxs[:,:,:,0,0]\n sigs21 = self._scattxs[:,:,:,1,0]\n sigs12 = self._scattxs[:,:,:,0,1]\n sigs22 = self._scattxs[:,:,:,1,1]\n\n # Compute absorption xs\n siga1 = sigt1 - sigs11 - sigs12\n siga2 = sigt2 - sigs22 - sigs21\n\n # Compute effective downscatter XS\n sigs12_eff = sigs12 - sigs21 * np.divide(flux2, flux1,\n where=flux1 > 0,\n out=np.zeros_like(flux2))\n\n # Recompute total cross sections and record\n self._totalxs[:,:,:,0] = siga1 + sigs11 + sigs12_eff\n self._totalxs[:,:,:,1] = siga2 + sigs22\n\n # Record effective dowmscatter xs\n self._scattxs[:,:,:,0,1] = sigs12_eff\n\n # Zero out upscatter cross section\n self._scattxs[:,:,:,1,0] = 0.0\n\n def _neutron_balance(self):\n \"\"\"Computes the RMS neutron balance over the CMFD mesh\"\"\"\n # Extract energy indices\n ng = self._indices[3]\n\n # Get number of accelerated regions\n num_accel = self._mat_dim\n\n # Get openmc k-effective\n keff = openmc.lib.keff()[0]\n\n # Define leakage in each mesh cell and energy group\n leakage = (((self._current[:,:,:,_CURRENTS['out_right'],:] -\n self._current[:,:,:,_CURRENTS['in_right'],:]) -\n (self._current[:,:,:,_CURRENTS['in_left'],:] -\n self._current[:,:,:,_CURRENTS['out_left'],:])) +\n ((self._current[:,:,:,_CURRENTS['out_front'],:] -\n self._current[:,:,:,_CURRENTS['in_front'],:]) -\n (self._current[:,:,:,_CURRENTS['in_back'],:] -\n self._current[:,:,:,_CURRENTS['out_back'],:])) +\n ((self._current[:,:,:,_CURRENTS['out_top'],:] -\n self._current[:,:,:,_CURRENTS['in_top'],:]) -\n (self._current[:,:,:,_CURRENTS['in_bottom'],:] -\n self._current[:,:,:,_CURRENTS['out_bottom'],:])))\n\n # Compute total rr\n interactions = self._totalxs * self._flux\n\n # Compute scattering rr by broadcasting flux in outgoing energy and\n # summing over incoming energy\n scattering = np.sum(self._scattxs * self._flux[:,:,:,:, np.newaxis],\n axis=3)\n\n # Compute fission rr by broadcasting flux in outgoing energy and\n # summing over incoming energy\n fission = np.sum(self._nfissxs * self._flux[:,:,:,:, np.newaxis],\n axis=3)\n\n # Compute residual\n res = leakage + interactions - scattering - (1.0 / keff) * fission\n\n # Normalize res by flux and bank res\n self._resnb = np.divide(res, self._flux, where=self._flux > 0,\n out=np.zeros_like(self._flux))\n\n # Calculate RMS and record for this batch\n self._balance.append(np.sqrt(\n np.sum(np.multiply(self._resnb, self._resnb)) /\n (ng * num_accel)))\n\n def _precompute_array_indices(self):\n \"\"\"Initializes cross section arrays and computes the indices\n used to populate dtilde and dhat\n\n \"\"\"\n # Extract spatial indices\n nx, ny, nz, ng = self._indices\n\n # Allocate dimensions for each mesh cell\n self._hxyz = np.zeros((nx, ny, nz, 3))\n self._hxyz[:] = openmc.lib.meshes[self._mesh_id].width\n\n # Allocate flux, cross sections and diffusion coefficient\n self._flux = np.zeros((nx, ny, nz, ng))\n self._totalxs = np.zeros((nx, ny, nz, ng))\n self._p1scattxs = np.zeros((nx, ny, nz, ng))\n self._scattxs = np.zeros((nx, ny, nz, ng, ng)) # Incoming, outgoing\n self._nfissxs = np.zeros((nx, ny, nz, ng, ng)) # Incoming, outgoing\n self._diffcof = np.zeros((nx, ny, nz, ng))\n\n # Allocate dtilde and dhat\n self._dtilde = np.zeros((nx, ny, nz, ng, 6))\n self._dhat = np.zeros((nx, ny, nz, ng, 6))\n\n # Set reference diffusion parameters\n if self._ref_d.size > 0:\n self._set_reference_params = True\n # Check length of reference diffusion parameters equal to number of\n # energy groups\n if self._ref_d.size != self._indices[3]:\n raise OpenMCError('Number of reference diffusion parameters '\n 'must equal number of CMFD energy groups')\n\n # Logical for determining whether region of interest is accelerated\n # region\n is_accel = self._coremap != _CMFD_NOACCEL\n # Logical for determining whether a zero flux \"albedo\" b.c. should be\n # applied\n is_zero_flux_alb = abs(self._albedo - _ZERO_FLUX) < _TINY_BIT\n x_inds, y_inds, z_inds = np.indices((nx, ny, nz))\n\n # Define slice equivalent to is_accel[0,:,:]\n slice_x = x_inds[:1,:,:]\n slice_y = y_inds[:1,:,:]\n slice_z = z_inds[:1,:,:]\n bndry_accel = is_accel[(slice_x, slice_y, slice_z)]\n self._first_x_accel = (slice_x[bndry_accel], slice_y[bndry_accel],\n slice_z[bndry_accel])\n\n # Define slice equivalent to is_accel[-1,:,:]\n slice_x = x_inds[-1:,:,:]\n slice_y = y_inds[-1:,:,:]\n slice_z = z_inds[-1:,:,:]\n bndry_accel = is_accel[(slice_x, slice_y, slice_z)]\n self._last_x_accel = (slice_x[bndry_accel], slice_y[bndry_accel],\n slice_z[bndry_accel])\n\n # Define slice equivalent to is_accel[:,0,:]\n slice_x = x_inds[:,:1,:]\n slice_y = y_inds[:,:1,:]\n slice_z = z_inds[:,:1,:]\n bndry_accel = is_accel[(slice_x, slice_y, slice_z)]\n self._first_y_accel = (slice_x[bndry_accel], slice_y[bndry_accel],\n slice_z[bndry_accel])\n\n # Define slice equivalent to is_accel[:,-1,:]\n slice_x = x_inds[:,-1:,:]\n slice_y = y_inds[:,-1:,:]\n slice_z = z_inds[:,-1:,:]\n bndry_accel = is_accel[(slice_x, slice_y, slice_z)]\n self._last_y_accel = (slice_x[bndry_accel], slice_y[bndry_accel],\n slice_z[bndry_accel])\n\n # Define slice equivalent to is_accel[:,:,0]\n slice_x = x_inds[:,:,:1]\n slice_y = y_inds[:,:,:1]\n slice_z = z_inds[:,:,:1]\n bndry_accel = is_accel[(slice_x, slice_y, slice_z)]\n self._first_z_accel = (slice_x[bndry_accel], slice_y[bndry_accel],\n slice_z[bndry_accel])\n\n # Define slice equivalent to is_accel[:,:,-1]\n slice_x = x_inds[:,:,-1:]\n slice_y = y_inds[:,:,-1:]\n slice_z = z_inds[:,:,-1:]\n bndry_accel = is_accel[(slice_x, slice_y, slice_z)]\n self._last_z_accel = (slice_x[bndry_accel], slice_y[bndry_accel],\n slice_z[bndry_accel])\n\n # Define slice equivalent to is_accel[1:,:,:]\n slice_x = x_inds[1:,:,:]\n slice_y = y_inds[1:,:,:]\n slice_z = z_inds[1:,:,:]\n bndry_accel = is_accel[(slice_x, slice_y, slice_z)]\n self._notfirst_x_accel = (slice_x[bndry_accel], slice_y[bndry_accel],\n slice_z[bndry_accel])\n\n # Define slice equivalent to is_accel[:-1,:,:]\n slice_x = x_inds[:-1,:,:]\n slice_y = y_inds[:-1,:,:]\n slice_z = z_inds[:-1,:,:]\n bndry_accel = is_accel[(slice_x, slice_y, slice_z)]\n self._notlast_x_accel = (slice_x[bndry_accel], slice_y[bndry_accel],\n slice_z[bndry_accel])\n\n # Define slice equivalent to is_accel[:,1:,:]\n slice_x = x_inds[:,1:,:]\n slice_y = y_inds[:,1:,:]\n slice_z = z_inds[:,1:,:]\n bndry_accel = is_accel[(slice_x, slice_y, slice_z)]\n self._notfirst_y_accel = (slice_x[bndry_accel], slice_y[bndry_accel],\n slice_z[bndry_accel])\n\n # Define slice equivalent to is_accel[:,:-1,:]\n slice_x = x_inds[:,:-1,:]\n slice_y = y_inds[:,:-1,:]\n slice_z = z_inds[:,:-1,:]\n bndry_accel = is_accel[(slice_x, slice_y, slice_z)]\n self._notlast_y_accel = (slice_x[bndry_accel], slice_y[bndry_accel],\n slice_z[bndry_accel])\n\n # Define slice equivalent to is_accel[:,:,1:]\n slice_x = x_inds[:,:,1:]\n slice_y = y_inds[:,:,1:]\n slice_z = z_inds[:,:,1:]\n bndry_accel = is_accel[(slice_x, slice_y, slice_z)]\n self._notfirst_z_accel = (slice_x[bndry_accel], slice_y[bndry_accel],\n slice_z[bndry_accel])\n\n # Define slice equivalent to is_accel[:,:,:-1]\n slice_x = x_inds[:,:,:-1]\n slice_y = y_inds[:,:,:-1]\n slice_z = z_inds[:,:,:-1]\n bndry_accel = is_accel[(slice_x, slice_y, slice_z)]\n self._notlast_z_accel = (slice_x[bndry_accel], slice_y[bndry_accel],\n slice_z[bndry_accel])\n\n # Store logical for whether neighboring cell is reflector region\n # in all directions\n adj_reflector_left = np.roll(self._coremap, 1, axis=0) == _CMFD_NOACCEL\n self._is_adj_ref_left = adj_reflector_left[\n self._notfirst_x_accel + (np.newaxis,)]\n\n adj_reflector_right = np.roll(self._coremap, -1, axis=0) == \\\n _CMFD_NOACCEL\n self._is_adj_ref_right = adj_reflector_right[\n self._notlast_x_accel + (np.newaxis,)]\n\n adj_reflector_back = np.roll(self._coremap, 1, axis=1) == \\\n _CMFD_NOACCEL\n self._is_adj_ref_back = adj_reflector_back[\n self._notfirst_y_accel + (np.newaxis,)]\n\n adj_reflector_front = np.roll(self._coremap, -1, axis=1) == \\\n _CMFD_NOACCEL\n self._is_adj_ref_front = adj_reflector_front[\n self._notlast_y_accel + (np.newaxis,)]\n\n adj_reflector_bottom = np.roll(self._coremap, 1, axis=2) == \\\n _CMFD_NOACCEL\n self._is_adj_ref_bottom = adj_reflector_bottom[\n self._notfirst_z_accel + (np.newaxis,)]\n\n adj_reflector_top = np.roll(self._coremap, -1, axis=2) == \\\n _CMFD_NOACCEL\n self._is_adj_ref_top = adj_reflector_top[\n self._notlast_z_accel + (np.newaxis,)]\n\n def _precompute_matrix_indices(self):\n \"\"\"Computes the indices and row/column data used to populate CMFD CSR\n matrices. These indices are used in _build_loss_matrix and\n _build_prod_matrix.\n\n \"\"\"\n # Extract energy group indices\n ng = self._indices[3]\n\n # Shift coremap in all directions to determine whether leakage term\n # should be defined for particular cell in matrix\n coremap_shift_left = np.pad(self._coremap, ((1,0),(0,0),(0,0)),\n mode='constant',\n constant_values=_CMFD_NOACCEL)[:-1,:,:]\n\n coremap_shift_right = np.pad(self._coremap, ((0,1),(0,0),(0,0)),\n mode='constant',\n constant_values=_CMFD_NOACCEL)[1:,:,:]\n\n coremap_shift_back = np.pad(self._coremap, ((0,0),(1,0),(0,0)),\n mode='constant',\n constant_values=_CMFD_NOACCEL)[:,:-1,:]\n\n coremap_shift_front = np.pad(self._coremap, ((0,0),(0,1),(0,0)),\n mode='constant',\n constant_values=_CMFD_NOACCEL)[:,1:,:]\n\n coremap_shift_bottom = np.pad(self._coremap, ((0,0),(0,0),(1,0)),\n mode='constant',\n constant_values=_CMFD_NOACCEL)[:,:,:-1]\n\n coremap_shift_top = np.pad(self._coremap, ((0,0),(0,0),(0,1)),\n mode='constant',\n constant_values=_CMFD_NOACCEL)[:,:,1:]\n\n # Create empty row and column vectors to store for loss matrix\n row = np.array([])\n col = np.array([])\n\n # Store all indices used to populate production and loss matrix\n is_accel = self._coremap != _CMFD_NOACCEL\n self._accel_idxs = np.where(is_accel)\n self._accel_neig_left_idxs = (np.where(is_accel &\n (coremap_shift_left != _CMFD_NOACCEL)))\n self._accel_neig_right_idxs = (np.where(is_accel &\n (coremap_shift_right != _CMFD_NOACCEL)))\n self._accel_neig_back_idxs = (np.where(is_accel &\n (coremap_shift_back != _CMFD_NOACCEL)))\n self._accel_neig_front_idxs = (np.where(is_accel &\n (coremap_shift_front != _CMFD_NOACCEL)))\n self._accel_neig_bot_idxs = (np.where(is_accel &\n (coremap_shift_bottom != _CMFD_NOACCEL)))\n self._accel_neig_top_idxs = (np.where(is_accel &\n (coremap_shift_top != _CMFD_NOACCEL)))\n\n for g in range(ng):\n # Extract row and column data of regions where a cell and its\n # neighbor to the left are both fuel regions\n idx_x = ng * (self._coremap[self._accel_neig_left_idxs]) + g\n idx_y = ng * (coremap_shift_left[self._accel_neig_left_idxs]) + g\n row = np.append(row, idx_x)\n col = np.append(col, idx_y)\n\n # Extract row and column data of regions where a cell and its\n # neighbor to the right are both fuel regions\n idx_x = ng * (self._coremap[self._accel_neig_right_idxs]) + g\n idx_y = ng * (coremap_shift_right[self._accel_neig_right_idxs]) + g\n row = np.append(row, idx_x)\n col = np.append(col, idx_y)\n\n # Extract row and column data of regions where a cell and its\n # neighbor to the back are both fuel regions\n idx_x = ng * (self._coremap[self._accel_neig_back_idxs]) + g\n idx_y = ng * (coremap_shift_back[self._accel_neig_back_idxs]) + g\n row = np.append(row, idx_x)\n col = np.append(col, idx_y)\n\n # Extract row and column data of regions where a cell and its\n # neighbor to the front are both fuel regions\n idx_x = ng * (self._coremap[self._accel_neig_front_idxs]) + g\n idx_y = ng * (coremap_shift_front[self._accel_neig_front_idxs]) + g\n row = np.append(row, idx_x)\n col = np.append(col, idx_y)\n\n # Extract row and column data of regions where a cell and its\n # neighbor to the bottom are both fuel regions\n idx_x = ng * (self._coremap[self._accel_neig_bot_idxs]) + g\n idx_y = ng * (coremap_shift_bottom[self._accel_neig_bot_idxs]) \\\n + g\n row = np.append(row, idx_x)\n col = np.append(col, idx_y)\n\n # Extract row and column data of regions where a cell and its\n # neighbor to the top are both fuel regions\n idx_x = ng * (self._coremap[self._accel_neig_top_idxs]) + g\n idx_y = ng * (coremap_shift_top[self._accel_neig_top_idxs]) + g\n row = np.append(row, idx_x)\n col = np.append(col, idx_y)\n\n # Extract all regions where a cell is a fuel region\n idx_x = ng * (self._coremap[self._accel_idxs]) + g\n idx_y = idx_x\n row = np.append(row, idx_x)\n col = np.append(col, idx_y)\n\n for h in range(ng):\n if h != g:\n # Extract all regions where a cell is a fuel region\n idx_x = ng * (self._coremap[self._accel_idxs]) + g\n idx_y = ng * (self._coremap[self._accel_idxs]) + h\n row = np.append(row, idx_x)\n col = np.append(col, idx_y)\n\n # Store row and col as rows and columns of production matrix\n self._loss_row = row\n self._loss_col = col\n\n # Create empty row and column vectors to store for production matrix\n row = np.array([], dtype=int)\n col = np.array([], dtype=int)\n\n for g in range(ng):\n for h in range(ng):\n # Extract all regions where a cell is a fuel region\n idx_x = ng * (self._coremap[self._accel_idxs]) + g\n idx_y = ng * (self._coremap[self._accel_idxs]) + h\n # Store rows, cols, and data to add to CSR matrix\n row = np.append(row, idx_x)\n col = np.append(col, idx_y)\n\n # Store row and col as rows and columns of production matrix\n self._prod_row = row\n self._prod_col = col\n\n def _compute_dtilde(self):\n \"\"\"Computes the diffusion coupling coefficient using a vectorized numpy\n approach. Aggregate values for the dtilde multidimensional array are\n populated by first defining values on the problem boundary, and then\n for all other regions. For indices not lying on a boundary, dtilde\n values are distinguished between regions that neighbor a reflector\n region and regions that don't neighbor a reflector\n\n \"\"\"\n # Logical for determining whether a zero flux \"albedo\" b.c. should be\n # applied\n is_zero_flux_alb = abs(self._albedo - _ZERO_FLUX) < _TINY_BIT\n\n # Define dtilde at left surface for all mesh cells on left boundary\n # Separate between zero flux b.c. and alebdo b.c.\n boundary = self._first_x_accel\n boundary_grps = boundary + (slice(None),)\n D = self._diffcof[boundary_grps]\n dx = self._hxyz[boundary + (np.newaxis, 0)]\n if is_zero_flux_alb[0]:\n self._dtilde[boundary_grps + (0,)] = 2.0 * D / dx\n else:\n alb = self._albedo[0]\n self._dtilde[boundary_grps + (0,)] = ((2.0 * D * (1.0 - alb))\n / (4.0 * D * (1.0 + alb) +\n (1.0 - alb) * dx))\n\n # Define dtilde at right surface for all mesh cells on right boundary\n # Separate between zero flux b.c. and alebdo b.c.\n boundary = self._last_x_accel\n boundary_grps = boundary + (slice(None),)\n D = self._diffcof[boundary_grps]\n dx = self._hxyz[boundary + (np.newaxis, 0)]\n if is_zero_flux_alb[1]:\n self._dtilde[boundary_grps + (1,)] = 2.0 * D / dx\n else:\n alb = self._albedo[1]\n self._dtilde[boundary_grps + (1,)] = ((2.0 * D * (1.0 - alb))\n / (4.0 * D * (1.0 + alb) +\n (1.0 - alb) * dx))\n\n # Define dtilde at back surface for all mesh cells on back boundary\n # Separate between zero flux b.c. and alebdo b.c.\n boundary = self._first_y_accel\n boundary_grps = boundary + (slice(None),)\n D = self._diffcof[boundary_grps]\n dy = self._hxyz[boundary + (np.newaxis, 1)]\n if is_zero_flux_alb[2]:\n self._dtilde[boundary_grps + (2,)] = 2.0 * D / dy\n else:\n alb = self._albedo[2]\n self._dtilde[boundary_grps + (2,)] = ((2.0 * D * (1.0 - alb))\n / (4.0 * D * (1.0 + alb) +\n (1.0 - alb) * dy))\n\n # Define dtilde at front surface for all mesh cells on front boundary\n # Separate between zero flux b.c. and alebdo b.c.\n boundary = self._last_y_accel\n boundary_grps = boundary + (slice(None),)\n D = self._diffcof[boundary_grps]\n dy = self._hxyz[boundary + (np.newaxis, 1)]\n if is_zero_flux_alb[3]:\n self._dtilde[boundary_grps + (3,)] = 2.0 * D / dy\n else:\n alb = self._albedo[3]\n self._dtilde[boundary_grps + (3,)] = ((2.0 * D * (1.0 - alb))\n / (4.0 * D * (1.0 + alb) +\n (1.0 - alb) * dy))\n\n # Define dtilde at bottom surface for all mesh cells on bottom boundary\n # Separate between zero flux b.c. and alebdo b.c.\n boundary = self._first_z_accel\n boundary_grps = boundary + (slice(None),)\n D = self._diffcof[boundary_grps]\n dz = self._hxyz[boundary + (np.newaxis, 2)]\n if is_zero_flux_alb[4]:\n self._dtilde[boundary_grps + (4,)] = 2.0 * D / dz\n else:\n alb = self._albedo[4]\n self._dtilde[boundary_grps + (4,)] = ((2.0 * D * (1.0 - alb))\n / (4.0 * D * (1.0 + alb) +\n (1.0 - alb) * dz))\n\n # Define dtilde at top surface for all mesh cells on top boundary\n # Separate between zero flux b.c. and alebdo b.c.\n boundary = self._last_z_accel\n boundary_grps = boundary + (slice(None),)\n\n D = self._diffcof[boundary_grps]\n dz = self._hxyz[boundary + (np.newaxis, 2)]\n if is_zero_flux_alb[5]:\n self._dtilde[boundary_grps + (5,)] = 2.0 * D / dz\n else:\n alb = self._albedo[5]\n self._dtilde[boundary_grps + (5,)] = ((2.0 * D * (1 - alb))\n / (4.0 * D * (1.0 + alb) +\n (1.0 - alb) * dz))\n\n # Define reflector albedo for all cells on the left surface, in case\n # a cell borders a reflector region on the left\n current_in_left = self._current[:,:,:,_CURRENTS['in_left'],:]\n current_out_left = self._current[:,:,:,_CURRENTS['out_left'],:]\n ref_albedo = np.divide(current_in_left, current_out_left,\n where=current_out_left > 1.0e-10,\n out=np.ones_like(current_out_left))\n\n # Diffusion coefficient of neighbor to left\n neig_dc = np.roll(self._diffcof, 1, axis=0)\n # Cell dimensions of neighbor to left\n neig_hxyz = np.roll(self._hxyz, 1, axis=0)\n\n # Define dtilde at left surface for all mesh cells not on left boundary\n # Dtilde is defined differently for regions that do and don't neighbor\n # reflector regions\n boundary = self._notfirst_x_accel\n boundary_grps = boundary + (slice(None),)\n D = self._diffcof[boundary_grps]\n dx = self._hxyz[boundary + (np.newaxis, 0)]\n neig_D = neig_dc[boundary_grps]\n neig_dx = neig_hxyz[boundary + (np.newaxis, 0)]\n alb = ref_albedo[boundary_grps]\n is_adj_ref = self._is_adj_ref_left\n dtilde = np.where(is_adj_ref, (2.0 * D * (1.0 - alb)) /\n (4.0 * D * (1.0 + alb) + (1.0 - alb) * dx),\n (2.0 * D * neig_D) / (neig_dx * D + dx * neig_D))\n self._dtilde[boundary_grps + (0,)] = dtilde\n\n # Define reflector albedo for all cells on the right surface, in case\n # a cell borders a reflector region on the right\n current_in_right = self._current[:,:,:,_CURRENTS['in_right'],:]\n current_out_right = self._current[:,:,:,_CURRENTS['out_right'],:]\n ref_albedo = np.divide(current_in_right, current_out_right,\n where=current_out_right > 1.0e-10,\n out=np.ones_like(current_out_right))\n\n # Diffusion coefficient of neighbor to right\n neig_dc = np.roll(self._diffcof, -1, axis=0)\n # Cell dimensions of neighbor to right\n neig_hxyz = np.roll(self._hxyz, -1, axis=0)\n\n # Define dtilde at right surface for all mesh cells not on right\n # boundary. Dtilde is defined differently for regions that do and don't\n # neighbor reflector regions\n boundary = self._notlast_x_accel\n boundary_grps = boundary + (slice(None),)\n D = self._diffcof[boundary_grps]\n dx = self._hxyz[boundary + (np.newaxis, 0)]\n neig_D = neig_dc[boundary_grps]\n neig_dx = neig_hxyz[boundary + (np.newaxis, 0)]\n alb = ref_albedo[boundary_grps]\n is_adj_ref = self._is_adj_ref_right\n dtilde = np.where(is_adj_ref, (2.0 * D * (1.0 - alb)) /\n (4.0 * D * (1.0 + alb) + (1.0 - alb) * dx),\n (2.0 * D * neig_D) / (neig_dx * D + dx * neig_D))\n self._dtilde[boundary_grps + (1,)] = dtilde\n\n # Define reflector albedo for all cells on the back surface, in case\n # a cell borders a reflector region on the back\n current_in_back = self._current[:,:,:,_CURRENTS['in_back'],:]\n current_out_back = self._current[:,:,:,_CURRENTS['out_back'],:]\n ref_albedo = np.divide(current_in_back, current_out_back,\n where=current_out_back > 1.0e-10,\n out=np.ones_like(current_out_back))\n\n # Diffusion coefficient of neighbor to back\n neig_dc = np.roll(self._diffcof, 1, axis=1)\n # Cell dimensions of neighbor to back\n neig_hxyz = np.roll(self._hxyz, 1, axis=1)\n\n # Define dtilde at back surface for all mesh cells not on back boundary\n # Dtilde is defined differently for regions that do and don't neighbor\n # reflector regions\n boundary = self._notfirst_y_accel\n boundary_grps = boundary + (slice(None),)\n D = self._diffcof[boundary_grps]\n dy = self._hxyz[boundary + (np.newaxis, 1)]\n neig_D = neig_dc[boundary_grps]\n neig_dy = neig_hxyz[boundary + (np.newaxis, 1)]\n alb = ref_albedo[boundary_grps]\n is_adj_ref = self._is_adj_ref_back\n dtilde = np.where(is_adj_ref, (2.0 * D * (1.0 - alb)) /\n (4.0 * D * (1.0 + alb) + (1.0 - alb) * dy),\n (2.0 * D * neig_D) / (neig_dy * D + dy * neig_D))\n self._dtilde[boundary_grps + (2,)] = dtilde\n\n # Define reflector albedo for all cells on the front surface, in case\n # a cell borders a reflector region in the front\n current_in_front = self._current[:,:,:,_CURRENTS['in_front'],:]\n current_out_front = self._current[:,:,:,_CURRENTS['out_front'],:]\n ref_albedo = np.divide(current_in_front, current_out_front,\n where=current_out_front > 1.0e-10,\n out=np.ones_like(current_out_front))\n\n # Diffusion coefficient of neighbor to front\n neig_dc = np.roll(self._diffcof, -1, axis=1)\n # Cell dimensions of neighbor to front\n neig_hxyz = np.roll(self._hxyz, -1, axis=1)\n\n # Define dtilde at front surface for all mesh cells not on front\n # boundary. Dtilde is defined differently for regions that do and don't\n # neighbor reflector regions\n boundary = self._notlast_y_accel\n boundary_grps = boundary + (slice(None),)\n D = self._diffcof[boundary_grps]\n dy = self._hxyz[boundary + (np.newaxis, 1)]\n neig_D = neig_dc[boundary_grps]\n neig_dy = neig_hxyz[boundary + (np.newaxis, 1)]\n alb = ref_albedo[boundary_grps]\n is_adj_ref = self._is_adj_ref_front\n dtilde = np.where(is_adj_ref, (2.0 * D * (1.0 - alb)) /\n (4.0 * D * (1.0 + alb) + (1.0 - alb) * dy),\n (2.0 * D * neig_D) / (neig_dy * D + dy * neig_D))\n self._dtilde[boundary_grps + (3,)] = dtilde\n\n # Define reflector albedo for all cells on the bottom surface, in case\n # a cell borders a reflector region on the bottom\n current_in_bottom = self._current[:,:,:,_CURRENTS['in_bottom'],:]\n current_out_bottom = self._current[:,:,:,_CURRENTS['out_bottom'],:]\n ref_albedo = np.divide(current_in_bottom, current_out_bottom,\n where=current_out_bottom > 1.0e-10,\n out=np.ones_like(current_out_bottom))\n\n # Diffusion coefficient of neighbor to bottom\n neig_dc = np.roll(self._diffcof, 1, axis=2)\n # Cell dimensions of neighbor to bottom\n neig_hxyz = np.roll(self._hxyz, 1, axis=2)\n\n # Define dtilde at bottom surface for all mesh cells not on bottom\n # boundary. Dtilde is defined differently for regions that do and don't\n # neighbor reflector regions\n boundary = self._notfirst_z_accel\n boundary_grps = boundary + (slice(None),)\n D = self._diffcof[boundary_grps]\n dz = self._hxyz[boundary + (np.newaxis, 2)]\n neig_D = neig_dc[boundary_grps]\n neig_dz = neig_hxyz[boundary + (np.newaxis, 2)]\n alb = ref_albedo[boundary_grps]\n is_adj_ref = self._is_adj_ref_bottom\n dtilde = np.where(is_adj_ref, (2.0 * D * (1.0 - alb)) /\n (4.0 * D * (1.0 + alb) + (1.0 - alb) * dz),\n (2.0 * D * neig_D) / (neig_dz * D + dz * neig_D))\n self._dtilde[boundary_grps + (4,)] = dtilde\n\n # Define reflector albedo for all cells on the top surface, in case\n # a cell borders a reflector region on the top\n current_in_top = self._current[:,:,:,_CURRENTS['in_top'],:]\n current_out_top = self._current[:,:,:,_CURRENTS['out_top'],:]\n ref_albedo = np.divide(current_in_top, current_out_top,\n where=current_out_top > 1.0e-10,\n out=np.ones_like(current_out_top))\n\n # Diffusion coefficient of neighbor to top\n neig_dc = np.roll(self._diffcof, -1, axis=2)\n # Cell dimensions of neighbor to top\n neig_hxyz = np.roll(self._hxyz, -1, axis=2)\n\n # Define dtilde at top surface for all mesh cells not on top boundary\n # Dtilde is defined differently for regions that do and don't neighbor\n # reflector regions\n boundary = self._notlast_z_accel\n boundary_grps = boundary + (slice(None),)\n D = self._diffcof[boundary_grps]\n dz = self._hxyz[boundary + (np.newaxis, 2)]\n neig_D = neig_dc[boundary_grps]\n neig_dz = neig_hxyz[boundary + (np.newaxis, 2)]\n alb = ref_albedo[boundary_grps]\n is_adj_ref = self._is_adj_ref_top\n dtilde = np.where(is_adj_ref, (2.0 * D * (1.0 - alb)) /\n (4.0 * D * (1.0 + alb) + (1.0 - alb) * dz),\n (2.0 * D * neig_D) / (neig_dz * D + dz * neig_D))\n self._dtilde[boundary_grps + (5,)] = dtilde\n\n def _compute_dhat(self):\n \"\"\"Computes the nonlinear coupling coefficient using a vectorized numpy\n approach. Aggregate values for the dhat multidimensional array are\n populated by first defining values on the problem boundary, and then\n for all other regions. For indices not lying by a boundary, dhat values\n are distinguished between regions that neighbor a reflector region and\n regions that don't neighbor a reflector\n\n \"\"\"\n # Define current in each direction\n current_in_left = self._current[:,:,:,_CURRENTS['in_left'],:]\n current_out_left = self._current[:,:,:,_CURRENTS['out_left'],:]\n current_in_right = self._current[:,:,:,_CURRENTS['in_right'],:]\n current_out_right = self._current[:,:,:,_CURRENTS['out_right'],:]\n current_in_back = self._current[:,:,:,_CURRENTS['in_back'],:]\n current_out_back = self._current[:,:,:,_CURRENTS['out_back'],:]\n current_in_front = self._current[:,:,:,_CURRENTS['in_front'],:]\n current_out_front = self._current[:,:,:,_CURRENTS['out_front'],:]\n current_in_bottom = self._current[:,:,:,_CURRENTS['in_bottom'],:]\n current_out_bottom = self._current[:,:,:,_CURRENTS['out_bottom'],:]\n current_in_top = self._current[:,:,:,_CURRENTS['in_top'],:]\n current_out_top = self._current[:,:,:,_CURRENTS['out_top'],:]\n\n dx = self._hxyz[:,:,:,np.newaxis,0]\n dy = self._hxyz[:,:,:,np.newaxis,1]\n dz = self._hxyz[:,:,:,np.newaxis,2]\n dxdydz = np.prod(self._hxyz, axis=3)[:,:,:,np.newaxis]\n\n # Define net current on each face\n net_current_left = (current_in_left - current_out_left) / dxdydz * dx\n net_current_right = (current_out_right - current_in_right) / dxdydz * \\\n dx\n net_current_back = (current_in_back - current_out_back) / dxdydz * dy\n net_current_front = (current_out_front - current_in_front) / dxdydz * \\\n dy\n net_current_bottom = (current_in_bottom - current_out_bottom) / \\\n dxdydz * dz\n net_current_top = (current_out_top - current_in_top) / dxdydz * dz\n\n # Define flux in each cell\n cell_flux = self._flux / dxdydz\n # Extract indices of coremap that are accelerated\n is_accel = self._coremap != _CMFD_NOACCEL\n\n # Define dhat at left surface for all mesh cells on left boundary\n boundary = self._first_x_accel\n boundary_grps = boundary + (slice(None),)\n net_current = net_current_left[boundary_grps]\n dtilde = self._dtilde[boundary + (slice(None), 0)]\n flux = cell_flux[boundary_grps]\n self._dhat[boundary_grps + (0,)] = (net_current + dtilde * flux) / flux\n\n # Define dhat at right surface for all mesh cells on right boundary\n boundary = self._last_x_accel\n boundary_grps = boundary + (slice(None),)\n net_current = net_current_right[boundary_grps]\n dtilde = self._dtilde[boundary + (slice(None), 1)]\n flux = cell_flux[boundary_grps]\n self._dhat[boundary_grps + (1,)] = (net_current - dtilde * flux) / flux\n\n # Define dhat at back surface for all mesh cells on back boundary\n boundary = self._first_y_accel\n boundary_grps = boundary + (slice(None),)\n net_current = net_current_back[boundary_grps]\n dtilde = self._dtilde[boundary + (slice(None), 2)]\n flux = cell_flux[boundary_grps]\n self._dhat[boundary_grps + (2,)] = (net_current + dtilde * flux) / flux\n\n # Define dhat at front surface for all mesh cells on front boundary\n boundary = self._last_y_accel\n boundary_grps = boundary + (slice(None),)\n net_current = net_current_front[boundary_grps]\n dtilde = self._dtilde[boundary + (slice(None), 3)]\n flux = cell_flux[boundary_grps]\n self._dhat[boundary_grps + (3,)] = (net_current - dtilde * flux) / flux\n\n # Define dhat at bottom surface for all mesh cells on bottom boundary\n boundary = self._first_z_accel\n boundary_grps = boundary + (slice(None),)\n net_current = net_current_bottom[boundary_grps]\n dtilde = self._dtilde[boundary + (slice(None), 4)]\n flux = cell_flux[boundary_grps]\n self._dhat[boundary_grps + (4,)] = (net_current + dtilde * flux) / flux\n\n # Define dhat at top surface for all mesh cells on top boundary\n boundary = self._last_z_accel\n boundary_grps = boundary + (slice(None),)\n net_current = net_current_top[boundary_grps]\n dtilde = self._dtilde[boundary + (slice(None), 5)]\n flux = cell_flux[boundary_grps]\n self._dhat[boundary_grps + (5,)] = (net_current - dtilde * flux) / flux\n\n # Cell flux of neighbor to left\n neig_flux = np.roll(self._flux, 1, axis=0) / dxdydz\n\n # Define dhat at left surface for all mesh cells not on left boundary\n # Dhat is defined differently for regions that do and don't neighbor\n # reflector regions\n boundary = self._notfirst_x_accel\n boundary_grps = boundary + (slice(None),)\n net_current = net_current_left[boundary_grps]\n dtilde = self._dtilde[boundary_grps + (0,)]\n flux = cell_flux[boundary_grps]\n flux_left = neig_flux[boundary_grps]\n is_adj_ref = self._is_adj_ref_left\n dhat = np.where(is_adj_ref, (net_current + dtilde * flux) / flux,\n (net_current - dtilde * (flux_left - flux)) /\n (flux_left + flux))\n self._dhat[boundary_grps + (0,)] = dhat\n\n # Cell flux of neighbor to right\n neig_flux = np.roll(self._flux, -1, axis=0) / dxdydz\n\n # Define dhat at right surface for all mesh cells not on right boundary\n # Dhat is defined differently for regions that do and don't neighbor\n # reflector regions\n boundary = self._notlast_x_accel\n boundary_grps = boundary + (slice(None),)\n net_current = net_current_right[boundary_grps]\n dtilde = self._dtilde[boundary_grps + (1,)]\n flux = cell_flux[boundary_grps]\n flux_right = neig_flux[boundary_grps]\n is_adj_ref = self._is_adj_ref_right\n dhat = np.where(is_adj_ref, (net_current - dtilde * flux) / flux,\n (net_current + dtilde * (flux_right - flux)) /\n (flux_right + flux))\n self._dhat[boundary_grps + (1,)] = dhat\n\n # Cell flux of neighbor to back\n neig_flux = np.roll(self._flux, 1, axis=1) / dxdydz\n\n # Define dhat at back surface for all mesh cells not on back boundary\n # Dhat is defined differently for regions that do and don't neighbor\n # reflector regions\n boundary = self._notfirst_y_accel\n boundary_grps = boundary + (slice(None),)\n net_current = net_current_back[boundary_grps]\n dtilde = self._dtilde[boundary_grps + (2,)]\n flux = cell_flux[boundary_grps]\n flux_back = neig_flux[boundary_grps]\n is_adj_ref = self._is_adj_ref_back\n dhat = np.where(is_adj_ref, (net_current + dtilde * flux) / flux,\n (net_current - dtilde * (flux_back - flux)) /\n (flux_back + flux))\n self._dhat[boundary_grps + (2,)] = dhat\n\n # Cell flux of neighbor to front\n neig_flux = np.roll(self._flux, -1, axis=1) / dxdydz\n\n # Define dhat at front surface for all mesh cells not on front boundary\n # Dhat is defined differently for regions that do and don't neighbor\n # reflector regions\n boundary = self._notlast_y_accel\n boundary_grps = boundary + (slice(None),)\n net_current = net_current_front[boundary_grps]\n dtilde = self._dtilde[boundary_grps + (3,)]\n flux = cell_flux[boundary_grps]\n flux_front = neig_flux[boundary_grps]\n is_adj_ref = self._is_adj_ref_front\n dhat = np.where(is_adj_ref, (net_current - dtilde * flux) / flux,\n (net_current + dtilde * (flux_front - flux)) /\n (flux_front + flux))\n self._dhat[boundary_grps + (3,)] = dhat\n\n # Cell flux of neighbor to bottom\n neig_flux = np.roll(self._flux, 1, axis=2) / dxdydz\n\n # Define dhat at bottom surface for all mesh cells not on bottom\n # boundary. Dhat is defined differently for regions that do and don't\n # neighbor reflector regions\n boundary = self._notfirst_z_accel\n boundary_grps = boundary + (slice(None),)\n net_current = net_current_bottom[boundary_grps]\n dtilde = self._dtilde[boundary_grps + (4,)]\n flux = cell_flux[boundary_grps]\n flux_bottom = neig_flux[boundary_grps]\n is_adj_ref = self._is_adj_ref_bottom\n dhat = np.where(is_adj_ref, (net_current + dtilde * flux) / flux,\n (net_current - dtilde * (flux_bottom - flux)) /\n (flux_bottom + flux))\n self._dhat[boundary_grps + (4,)] = dhat\n\n # Cell flux of neighbor to top\n neig_flux = np.roll(self._flux, -1, axis=2) / dxdydz\n\n # Define dhat at top surface for all mesh cells not on top boundary\n # Dhat is defined differently for regions that do and don't neighbor\n # reflector regions\n boundary = self._notlast_z_accel\n boundary_grps = boundary + (slice(None),)\n net_current = net_current_top[boundary_grps]\n dtilde = self._dtilde[boundary_grps + (5,)]\n flux = cell_flux[boundary_grps]\n flux_top = neig_flux[boundary_grps]\n is_adj_ref = self._is_adj_ref_top\n dhat = np.where(is_adj_ref, (net_current - dtilde * flux) / flux,\n (net_current + dtilde * (flux_top - flux)) /\n (flux_top + flux))\n self._dhat[boundary_grps + (5,)] = dhat\n\n def _create_cmfd_tally(self):\n \"\"\"Creates all tallies in-memory that are used to solve CMFD problem\"\"\"\n # Create Mesh object based on CMFDMesh mesh_type, stored internally\n if self._mesh.mesh_type == 'regular':\n cmfd_mesh = openmc.lib.RegularMesh()\n # Set dimension and parameters of mesh object\n cmfd_mesh.dimension = self._mesh.dimension\n cmfd_mesh.set_parameters(lower_left=self._mesh.lower_left,\n upper_right=self._mesh.upper_right,\n width=self._mesh.width)\n elif self._mesh.mesh_type == 'rectilinear':\n cmfd_mesh = openmc.lib.RectilinearMesh()\n # Set grid of mesh object\n x_grid, y_grid, z_grid = self._mesh.grid\n cmfd_mesh.set_grid(x_grid, y_grid, z_grid)\n\n # Store id of mesh object\n self._mesh_id = cmfd_mesh.id\n\n # Create mesh Filter object, stored internally\n mesh_filter = openmc.lib.MeshFilter()\n # Set mesh for Mesh Filter\n mesh_filter.mesh = cmfd_mesh\n\n # Set up energy filters, if applicable\n if self._energy_filters:\n # Create Energy Filter object, stored internally\n energy_filter = openmc.lib.EnergyFilter()\n # Set bins for Energy Filter\n energy_filter.bins = self._egrid\n\n # Create Energy Out Filter object, stored internally\n energyout_filter = openmc.lib.EnergyoutFilter()\n # Set bins for Energy Filter\n energyout_filter.bins = self._egrid\n\n # Create Mesh Surface Filter object, stored internally\n meshsurface_filter = openmc.lib.MeshSurfaceFilter()\n # Set mesh for Mesh Surface Filter\n meshsurface_filter.mesh = cmfd_mesh\n\n # Create Legendre Filter object, stored internally\n legendre_filter = openmc.lib.LegendreFilter()\n # Set order for Legendre Filter\n legendre_filter.order = 1\n\n # Create CMFD tallies, stored internally\n n_tallies = 4\n self._tally_ids = []\n for i in range(n_tallies):\n cmfd_tally = openmc.lib.Tally()\n # Set nuclide bins\n cmfd_tally.nuclides = ['total']\n self._tally_ids.append(cmfd_tally.id)\n\n # Set attributes of CMFD flux, total tally\n if i == 0:\n # Set filters for tally\n if self._energy_filters:\n cmfd_tally.filters = [mesh_filter, energy_filter]\n else:\n cmfd_tally.filters = [mesh_filter]\n # Set scores, type, and estimator for tally\n cmfd_tally.scores = ['flux', 'total']\n cmfd_tally.type = 'volume'\n cmfd_tally.estimator = 'analog'\n\n # Set attributes of CMFD neutron production tally\n elif i == 1:\n # Set filters for tally\n if self._energy_filters:\n cmfd_tally.filters = [mesh_filter, energy_filter,\n energyout_filter]\n else:\n cmfd_tally.filters = [mesh_filter]\n # Set scores, type, and estimator for tally\n cmfd_tally.scores = ['nu-scatter', 'nu-fission']\n cmfd_tally.type = 'volume'\n cmfd_tally.estimator = 'analog'\n\n # Set attributes of CMFD surface current tally\n elif i == 2:\n # Set filters for tally\n if self._energy_filters:\n cmfd_tally.filters = [meshsurface_filter, energy_filter]\n else:\n cmfd_tally.filters = [meshsurface_filter]\n # Set scores, type, and estimator for tally\n cmfd_tally.scores = ['current']\n cmfd_tally.type = 'mesh-surface'\n cmfd_tally.estimator = 'analog'\n\n # Set attributes of CMFD P1 scatter tally\n elif i == 3:\n # Set filters for tally\n if self._energy_filters:\n cmfd_tally.filters = [mesh_filter, legendre_filter,\n energy_filter]\n else:\n cmfd_tally.filters = [mesh_filter, legendre_filter]\n # Set scores for tally\n cmfd_tally.scores = ['scatter']\n cmfd_tally.type = 'volume'\n cmfd_tally.estimator = 'analog'\n\n # Set all tallies to be active from beginning\n cmfd_tally.active = True\n\n # Initialize CMFD mesh and energy grid in C++ for CMFD reweight\n args = self._tally_ids[0], self._indices, self._norm\n openmc.lib._dll.openmc_initialize_mesh_egrid(*args)\n"
] | [
[
"numpy.log10",
"numpy.diff",
"numpy.allclose"
],
[
"numpy.radians",
"numpy.allclose",
"numpy.asarray",
"numpy.arange",
"numpy.ravel",
"numpy.array",
"numpy.sum"
],
[
"numpy.add.reduceat",
"numpy.swapaxes",
"numpy.allclose",
"numpy.linspace",
"numpy.unique",
"numpy.asarray",
"numpy.reshape",
"numpy.squeeze",
"numpy.arange",
"numpy.sqrt",
"numpy.nan_to_num",
"numpy.tile",
"numpy.zeros_like",
"numpy.searchsorted",
"numpy.zeros",
"numpy.where",
"numpy.vstack",
"numpy.isclose"
],
[
"numpy.product",
"numpy.cumsum",
"numpy.max",
"numpy.zeros_like",
"numpy.any",
"numpy.where",
"numpy.roll",
"numpy.swapaxes",
"numpy.ones_like",
"numpy.pad",
"numpy.save",
"numpy.zeros",
"numpy.log",
"numpy.multiply",
"scipy.sparse.csr_matrix",
"numpy.append",
"numpy.errstate",
"numpy.logical_and",
"scipy.sparse.save_npz",
"numpy.flip",
"numpy.array",
"numpy.sum",
"numpy.array_equal",
"numpy.indices",
"numpy.ones",
"numpy.argwhere",
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"1.7",
"1.0",
"1.2",
"1.8"
],
"tensorflow": []
}
] |
eric91sanchez/hair_seg | [
"4f688daac0ec4ea906ff0462ae51634293e35447"
] | [
"hair_seg/evaluate.py"
] | [
"\"\"\"\nEvaluate\n\"\"\"\n\nimport re\nimport math\nimport datetime\nimport random\nimport torch\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader\nimport matplotlib.pyplot as plt\n\nfrom loss import iou_loss, HairMattingLoss, acc_loss, F1_loss\nfrom utils import create_multi_figure\n\nUSE_CUDA = torch.cuda.is_available()\nDEVICE = torch.device(\"cuda\" if USE_CUDA else \"cpu\")\n\n\ndef evalTest(test_data, model, args):\n testloader = DataLoader(test_data, batch_size=4, shuffle=False)\n hairmat_loss = HairMattingLoss(args.grad_lambda)\n\n total_loss, total_iou, total_acc, total_f1 = 0, 0, 0, 0\n for batch in testloader:\n image, mask = (i.to(DEVICE) for i in batch)\n\n pred = model(image)\n total_loss += hairmat_loss(pred, mask, image).item()\n iloss = iou_loss(pred, mask).item()\n total_iou += iloss\n aloss = acc_loss(pred, mask).item()\n total_acc += aloss\n floss = F1_loss(pred, mask).item()\n total_f1 += floss\n\n print(\"Testing Loss: \", total_loss / len(testloader))\n print(\"Testing IOU: \", total_iou / len(testloader))\n print(\"Testing Acc: \", total_acc / len(testloader))\n print(\"Testing F1: \", total_f1 / len(testloader))\n\n\ndef evaluateOne(img, model, absolute=True):\n img = img.to(DEVICE).unsqueeze(0)\n pred = model(img)\n\n if absolute:\n pred[pred > 0.5] = 1.0\n pred[pred <= 0.5] = 0.0\n else:\n pred[pred < 0.4] = 0\n # pred[pred < .90] = 0\n\n rows = [[img[0], pred[0]]]\n create_multi_figure(rows, dye=True)\n plt.savefig(\"result.jpg\")\n\n\ndef evaluate(test_data, model, num, absolute=True):\n rows = [None] * num\n for i in range(num):\n idx = random.randint(0, len(test_data) - 1)\n\n image, mask = (i.to(DEVICE).unsqueeze(0) for i in test_data[idx])\n pred = model(image)\n\n if absolute:\n pred[pred > 0.5] = 1.0\n pred[pred <= 0.5] = 0.0\n else:\n pred[pred < 0.4] = 0\n\n rows[i] = [image[0], mask[0], pred[0]] # get batch\n\n create_multi_figure(rows, dye=True)\n plt.savefig(\"result.jpg\")\n"
] | [
[
"torch.device",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.savefig",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pnijhara/improver | [
"5961a6fab9a79cd63a943eff07bf79d4e5f0ff03",
"5961a6fab9a79cd63a943eff07bf79d4e5f0ff03",
"5961a6fab9a79cd63a943eff07bf79d4e5f0ff03",
"5961a6fab9a79cd63a943eff07bf79d4e5f0ff03",
"a5c31be3430df429ae38e7c16e267fcbc2af1858"
] | [
"improver_tests/between_thresholds/test_between_thresholds.py",
"improver_tests/blending/weighted_blend/test_MergeCubesForWeightedBlending.py",
"improver_tests/ensemble_copula_coupling/ensemble_copula_coupling/test_ConvertLocationAndScaleParametersToPercentiles.py",
"improver/blending/calculate_weights_and_blend.py",
"improver/standardise.py"
] | [
"# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# (C) British Crown Copyright 2017-2020 Met Office.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\"\"\"Tests for the OccurrenceBetweenThresholds plugin\"\"\"\n\nimport unittest\n\nimport iris\nimport numpy as np\nfrom iris.tests import IrisTest\n\nfrom improver.between_thresholds import OccurrenceBetweenThresholds\n\nfrom ..set_up_test_cubes import set_up_percentile_cube, set_up_probability_cube\n\n\nclass Test_process(IrisTest):\n \"\"\"Test the process method\"\"\"\n\n def setUp(self):\n \"\"\"Set up a test cube with probability data\"\"\"\n data = np.array(\n [\n [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],\n [[0.9, 0.9, 0.9], [0.8, 0.8, 0.8], [0.7, 0.7, 0.7]],\n [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3]],\n [[0.0, 0.0, 0.0], [0.1, 0.1, 0.1], [0.1, 0.2, 0.2]],\n ],\n dtype=np.float32,\n )\n temp_thresholds = np.array([279, 280, 281, 282], dtype=np.float32)\n vis_thresholds = np.array([100, 1000, 5000, 10000], dtype=np.float32)\n\n self.temp_cube = set_up_probability_cube(data, temp_thresholds)\n self.vis_cube = set_up_probability_cube(\n np.flip(data, axis=0),\n vis_thresholds,\n variable_name=\"visibility\",\n threshold_units=\"m\",\n spp__relative_to_threshold=\"below\",\n )\n\n # set up a cube of rainfall rates in m s-1 (~1e-8 values)\n self.precip_cube = self.temp_cube.copy()\n self.precip_cube.coord(\"air_temperature\").rename(\"rainfall_rate\")\n self.precip_cube.coord(\"rainfall_rate\").var_name = \"threshold\"\n self.precip_cube.coord(\"rainfall_rate\").points = np.array(\n [0, 0.25, 0.5, 1], dtype=np.float32\n )\n self.precip_cube.coord(\"rainfall_rate\").units = \"mm h-1\"\n self.precip_cube.coord(\"rainfall_rate\").convert_units(\"m s-1\")\n\n def test_above_threshold(self):\n \"\"\"Test values from an \"above threshold\" cube\"\"\"\n threshold_ranges = [[280, 281], [281, 282]]\n expected_data = np.array(\n [\n [[0.8, 0.7, 0.6], [0.7, 0.6, 0.5], [0.6, 0.5, 0.4]],\n [[0.1, 0.2, 0.3], [0.0, 0.1, 0.2], [0.0, 0.0, 0.1]],\n ],\n dtype=np.float32,\n )\n plugin = OccurrenceBetweenThresholds(threshold_ranges.copy(), \"K\")\n result = plugin(self.temp_cube)\n self.assertIsInstance(result, iris.cube.Cube)\n self.assertEqual(\n result.name(), \"probability_of_air_temperature_between_thresholds\"\n )\n self.assertArrayAlmostEqual(result.data, expected_data)\n thresh_coord = result.coord(\"air_temperature\")\n self.assertArrayAlmostEqual(thresh_coord.points, [281.0, 282.0])\n self.assertArrayAlmostEqual(thresh_coord.bounds, threshold_ranges)\n self.assertEqual(\n thresh_coord.attributes[\"spp__relative_to_threshold\"], \"between_thresholds\"\n )\n\n def test_below_threshold(self):\n \"\"\"Test values from a \"below threshold\" cube\"\"\"\n threshold_ranges = [[1000, 5000]]\n expected_data = np.array(\n [[0.8, 0.7, 0.6], [0.7, 0.6, 0.5], [0.6, 0.5, 0.4]], dtype=np.float32\n )\n plugin = OccurrenceBetweenThresholds(threshold_ranges.copy(), \"m\")\n result = plugin(self.vis_cube)\n self.assertArrayAlmostEqual(result.data, expected_data)\n self.assertArrayAlmostEqual(result.coord(\"visibility\").points, [5000.0])\n self.assertArrayAlmostEqual(result.coord(\"visibility\").bounds, threshold_ranges)\n\n def test_skip_threshold(self):\n \"\"\"Test calculation works for non-adjacent thresholds\"\"\"\n threshold_ranges = [[100, 1000], [1000, 10000]]\n expected_data = np.array(\n [\n [[0.1, 0.2, 0.3], [0.0, 0.1, 0.2], [0.0, 0.0, 0.1]],\n [[0.9, 0.8, 0.7], [0.9, 0.8, 0.7], [0.9, 0.8, 0.7]],\n ],\n dtype=np.float32,\n )\n plugin = OccurrenceBetweenThresholds(threshold_ranges, \"m\")\n result = plugin(self.vis_cube)\n self.assertArrayAlmostEqual(result.data, expected_data)\n\n def test_threshold_units(self):\n \"\"\"Test calculation works for thresholds specified in different units\n from the cube data\"\"\"\n threshold_ranges = [[0.1, 1], [1, 10]]\n expected_data = np.array(\n [\n [[0.1, 0.2, 0.3], [0.0, 0.1, 0.2], [0.0, 0.0, 0.1]],\n [[0.9, 0.8, 0.7], [0.9, 0.8, 0.7], [0.9, 0.8, 0.7]],\n ],\n dtype=np.float32,\n )\n plugin = OccurrenceBetweenThresholds(threshold_ranges, \"km\")\n result = plugin(self.vis_cube)\n self.assertArrayAlmostEqual(result.data, expected_data)\n # check original cube units are not modified\n self.assertEqual(self.vis_cube.coord(\"visibility\").units, \"m\")\n # check output cube units match original cube\n self.assertEqual(result.coord(\"visibility\").units, \"m\")\n self.assertArrayAlmostEqual(result.coord(\"visibility\").points, [1000, 10000])\n\n def test_error_non_probability_cube(self):\n \"\"\"Test failure if cube doesn't contain probabilities\"\"\"\n perc_cube = set_up_percentile_cube(\n np.ones((3, 3, 3), dtype=np.float32),\n np.array((25, 50, 75), dtype=np.float32),\n )\n plugin = OccurrenceBetweenThresholds([[25, 50]], \"K\")\n msg = \"Input is not a probability cube\"\n with self.assertRaisesRegex(ValueError, msg):\n plugin(perc_cube)\n\n def test_error_between_thresholds_cube(self):\n \"\"\"Test failure if cube isn't above or below threshold\"\"\"\n # use plugin to generate a \"between_thresholds\" cube...\n between_thresholds_cube = OccurrenceBetweenThresholds(\n [[280, 281], [281, 282]], \"K\"\n )(self.temp_cube)\n plugin = OccurrenceBetweenThresholds([[281, 282]], \"K\")\n msg = \"Input cube must contain\"\n with self.assertRaisesRegex(ValueError, msg):\n plugin(between_thresholds_cube)\n\n def test_error_thresholds_unavailable(self):\n \"\"\"Test error if cube doesn't contain the required thresholds\"\"\"\n threshold_ranges = [[10, 100], [1000, 30000]]\n plugin = OccurrenceBetweenThresholds(threshold_ranges, \"m\")\n msg = (\n \"visibility threshold 10 m is not available\\n\"\n \"visibility threshold 30000 m is not available\"\n )\n with self.assertRaisesRegex(ValueError, msg):\n plugin(self.vis_cube)\n\n def test_threshold_matching_tolerance(self):\n \"\"\"Test threshold matching succeeds for absolute values close to\n zero\"\"\"\n new_thresholds = np.array([272.15, 273.15, 274.15, 275.15], dtype=np.float32)\n self.temp_cube.coord(\"air_temperature\").points = new_thresholds\n threshold_ranges = [[-1, 0], [0, 2]]\n expected_data = np.array(\n [\n [[0.1, 0.1, 0.1], [0.2, 0.2, 0.2], [0.3, 0.3, 0.3]],\n [[0.9, 0.9, 0.9], [0.7, 0.7, 0.7], [0.6, 0.5, 0.5]],\n ],\n dtype=np.float32,\n )\n plugin = OccurrenceBetweenThresholds(threshold_ranges, \"degC\")\n result = plugin(self.temp_cube)\n self.assertArrayAlmostEqual(result.data, expected_data)\n\n def test_thresholds_indistinguishable(self):\n \"\"\"Test behaviour in a case where cube extraction cannot work within a\n tolerance of 1e-5\"\"\"\n # set threshold ranges in m s-1\n points = self.precip_cube.coord(\"rainfall_rate\").points.copy()\n threshold_ranges = [[points[1], points[2]]]\n msg = \"Plugin cannot distinguish between thresholds at\"\n with self.assertRaisesRegex(ValueError, msg):\n OccurrenceBetweenThresholds(threshold_ranges, \"m s-1\")\n\n def test_original_units_indistinguishable(self):\n \"\"\"Test cubes where thresholds are indistinguisable in SI units can be\n correctly processed using threshold ranges specified in a unit with\n more than 1e-5 discrimination\"\"\"\n expected_data = np.array(\n [[0.8, 0.7, 0.6], [0.7, 0.6, 0.5], [0.6, 0.5, 0.4]], dtype=np.float32\n )\n threshold_ranges = [[0.25, 0.5]]\n plugin = OccurrenceBetweenThresholds(threshold_ranges, \"mm h-1\")\n result = plugin(self.precip_cube)\n self.assertArrayAlmostEqual(result.data, expected_data)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# (C) British Crown Copyright 2017-2020 Met Office.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\"\"\"Unit tests for MergeCubesForWeightedBlending\"\"\"\n\nimport unittest\nfrom datetime import datetime as dt\n\nimport iris\nimport numpy as np\nfrom iris.tests import IrisTest\n\nfrom improver.blending.weighted_blend import MergeCubesForWeightedBlending\nfrom improver.utilities.warnings_handler import ManageWarnings\n\nfrom ...set_up_test_cubes import set_up_probability_cube, set_up_variable_cube\n\n\nclass Test__init__(IrisTest):\n \"\"\"Test the __init__ method\"\"\"\n\n def test_basic(self):\n \"\"\"Test default initialisation\"\"\"\n plugin = MergeCubesForWeightedBlending(\"realization\")\n self.assertEqual(plugin.blend_coord, \"realization\")\n self.assertIsNone(plugin.weighting_coord)\n self.assertIsNone(plugin.model_id_attr)\n\n def test_optional_args(self):\n \"\"\"Test model ID and weighting coordinate setting\"\"\"\n plugin = MergeCubesForWeightedBlending(\n \"model_id\",\n weighting_coord=\"forecast_period\",\n model_id_attr=\"mosg__model_configuration\",\n )\n self.assertEqual(plugin.weighting_coord, \"forecast_period\")\n self.assertEqual(plugin.model_id_attr, \"mosg__model_configuration\")\n\n def test_error_missing_model_id_attr(self):\n \"\"\"Test exception is raised if blending over model with no identifying\n attribute\"\"\"\n msg = \"model_id_attr required to blend over model_id\"\n with self.assertRaisesRegex(ValueError, msg):\n MergeCubesForWeightedBlending(\"model_id\")\n\n @ManageWarnings(record=True)\n def test_warning_unnecessary_model_id_attr(self, warning_list=None):\n \"\"\"Test warning if model_id_attr is set for non-model blending\"\"\"\n warning_msg = \"model_id_attr not required\"\n plugin = MergeCubesForWeightedBlending(\n \"realization\", model_id_attr=\"mosg__model_configuration\"\n )\n self.assertTrue(any(item.category == UserWarning for item in warning_list))\n self.assertTrue(any(warning_msg in str(item) for item in warning_list))\n self.assertIsNone(plugin.model_id_attr)\n\n\nclass Test__create_model_coordinates(IrisTest):\n \"\"\"Test the _create_model_coordinates method\"\"\"\n\n def setUp(self):\n \"\"\"Set up some probability cubes from different models\"\"\"\n data = np.array(\n [0.9 * np.ones((3, 3)), 0.5 * np.ones((3, 3)), 0.1 * np.ones((3, 3))],\n dtype=np.float32,\n )\n thresholds = np.array([273.0, 275.0, 277.0], dtype=np.float32)\n time_point = dt(2015, 11, 23, 7)\n\n # set up a MOGREPS-UK cube with 7 hour forecast period\n self.cube_enuk = set_up_probability_cube(\n data.copy(),\n thresholds,\n standard_grid_metadata=\"uk_ens\",\n time=time_point,\n frt=dt(2015, 11, 23, 0),\n )\n\n # set up a UKV cube with 4 hour forecast period\n self.cube_ukv = set_up_probability_cube(\n data.copy(),\n thresholds,\n standard_grid_metadata=\"uk_det\",\n time=time_point,\n frt=dt(2015, 11, 23, 3),\n )\n\n self.cubelist = iris.cube.CubeList([self.cube_enuk, self.cube_ukv])\n self.plugin = MergeCubesForWeightedBlending(\n \"model\",\n weighting_coord=\"forecast_period\",\n model_id_attr=\"mosg__model_configuration\",\n )\n\n def test_basic(self):\n \"\"\"Test model ID and model configuration coords are created and that\n the model_id_attr (in this case 'mosg__model_configuration') is\n correctly updated\"\"\"\n self.plugin._create_model_coordinates(self.cubelist)\n for cube in self.cubelist:\n cube_coords = [coord.name() for coord in cube.coords()]\n self.assertIn(\"model_id\", cube_coords)\n self.assertIn(\"model_configuration\", cube_coords)\n self.assertEqual(cube.attributes[\"mosg__model_configuration\"], \"blend\")\n\n def test_values(self):\n \"\"\"Test values of model coordinates are as expected\"\"\"\n expected_id = [0, 1000]\n expected_config = [\"uk_ens\", \"uk_det\"]\n self.plugin._create_model_coordinates(self.cubelist)\n for cube, m_id, m_conf in zip(self.cubelist, expected_id, expected_config):\n self.assertEqual(cube.coord(\"model_id\").points, [m_id])\n self.assertEqual(cube.coord(\"model_configuration\").points, [m_conf])\n\n def test_unmatched_model_id_attr(self):\n \"\"\"Test error if model_id_attr is not present on both input cubes\"\"\"\n self.cubelist[0].attributes.pop(\"mosg__model_configuration\")\n msg = \"Cannot create model ID coordinate for grid blending \"\n with self.assertRaisesRegex(ValueError, msg):\n self.plugin._create_model_coordinates(self.cubelist)\n\n def test_error_same_model(self):\n \"\"\"Test error if input cubes are from the same model\"\"\"\n new_cubelist = iris.cube.CubeList(\n [self.cube_enuk.copy(), self.cube_enuk.copy()]\n )\n msg = \"Cannot create model dimension\"\n with self.assertRaisesRegex(ValueError, msg):\n self.plugin._create_model_coordinates(new_cubelist)\n\n\nclass Test_process(IrisTest):\n \"\"\"Test the process method\"\"\"\n\n def setUp(self):\n \"\"\"Set up some probability cubes from different models\"\"\"\n data = np.array(\n [0.9 * np.ones((3, 3)), 0.5 * np.ones((3, 3)), 0.1 * np.ones((3, 3))],\n dtype=np.float32,\n )\n thresholds = np.array([273.0, 275.0, 277.0], dtype=np.float32)\n time_point = dt(2015, 11, 23, 7)\n time_bounds = [dt(2015, 11, 23, 4), time_point]\n\n # set up a MOGREPS-UK cube with 7 hour forecast period\n self.cube_enuk = set_up_probability_cube(\n data.copy(),\n thresholds,\n standard_grid_metadata=\"uk_ens\",\n time=time_point,\n frt=dt(2015, 11, 23, 0),\n time_bounds=time_bounds,\n )\n\n # set up a UKV cube with 4 hour forecast period\n self.cube_ukv = set_up_probability_cube(\n data.copy(),\n thresholds,\n standard_grid_metadata=\"uk_det\",\n time=time_point,\n frt=dt(2015, 11, 23, 3),\n time_bounds=time_bounds,\n )\n\n self.cubelist = iris.cube.CubeList([self.cube_enuk, self.cube_ukv])\n\n # set up some non-UK test cubes\n cube_non_mo_ens = self.cube_enuk.copy()\n cube_non_mo_ens.attributes.pop(\"mosg__model_configuration\")\n cube_non_mo_ens.attributes[\"non_mo_model_config\"] = \"non_uk_ens\"\n cube_non_mo_det = self.cube_ukv.copy()\n cube_non_mo_det.attributes.pop(\"mosg__model_configuration\")\n cube_non_mo_det.attributes[\"non_mo_model_config\"] = \"non_uk_det\"\n\n self.non_mo_cubelist = iris.cube.CubeList([cube_non_mo_ens, cube_non_mo_det])\n\n # set up plugin for multi-model blending weighted by forecast period\n self.plugin = MergeCubesForWeightedBlending(\n \"model\",\n weighting_coord=\"forecast_period\",\n model_id_attr=\"mosg__model_configuration\",\n )\n\n def test_basic(self):\n \"\"\"Test single cube is returned unchanged\"\"\"\n cube = self.cube_enuk.copy()\n result = self.plugin.process(cube)\n self.assertArrayAlmostEqual(result.data, self.cube_enuk.data)\n self.assertEqual(result.metadata, self.cube_enuk.metadata)\n\n def test_single_item_list(self):\n \"\"\"Test cube from single item list is returned unchanged\"\"\"\n cubelist = iris.cube.CubeList([self.cube_enuk.copy()])\n result = self.plugin.process(cubelist)\n self.assertArrayAlmostEqual(result.data, self.cube_enuk.data)\n self.assertEqual(result.metadata, self.cube_enuk.metadata)\n\n def test_multi_model_merge(self):\n \"\"\"Test models merge OK and have expected model coordinates\"\"\"\n result = self.plugin.process(self.cubelist)\n self.assertIsInstance(result, iris.cube.Cube)\n self.assertArrayEqual(result.coord(\"model_id\").points, [0, 1000])\n self.assertArrayEqual(\n result.coord(\"model_configuration\").points, [\"uk_ens\", \"uk_det\"]\n )\n\n def test_time_coords(self):\n \"\"\"Test merged cube has scalar time coordinates if weighting models\n by forecast period\"\"\"\n result = self.plugin.process(self.cubelist)\n # test resulting cube has single 4 hour (shorter) forecast period\n self.assertEqual(result.coord(\"forecast_period\").points, [4 * 3600])\n # check time and frt points are also consistent with the UKV input cube\n self.assertEqual(\n result.coord(\"time\").points, self.cube_ukv.coord(\"time\").points\n )\n self.assertEqual(\n result.coord(\"forecast_reference_time\").points,\n self.cube_ukv.coord(\"forecast_reference_time\").points,\n )\n\n def test_cycle_blend(self):\n \"\"\"Test merge for blending over forecast_reference_time\"\"\"\n cube = self.cube_ukv.copy()\n cube.coord(\"forecast_reference_time\").points = (\n cube.coord(\"forecast_reference_time\").points + 3600\n )\n cube.coord(\"forecast_period\").points = (\n cube.coord(\"forecast_reference_time\").points - 3600\n )\n plugin = MergeCubesForWeightedBlending(\"forecast_reference_time\")\n result = plugin.process([self.cube_ukv, cube])\n self.assertIsInstance(result, iris.cube.Cube)\n self.assertIn(\n result.coord(\"forecast_reference_time\"), result.coords(dim_coords=True)\n )\n # check no model coordinates have been added\n with self.assertRaises(iris.exceptions.CoordinateNotFoundError):\n result.coord(\"model_id\")\n with self.assertRaises(iris.exceptions.CoordinateNotFoundError):\n result.coord(\"model_configuration\")\n\n def test_blend_coord_ascending(self):\n \"\"\"Test the order of the output blend coordinate is always ascending,\n independent of the input cube order\"\"\"\n frt = self.cube_ukv.coord(\"forecast_reference_time\").points[0]\n fp = self.cube_ukv.coord(\"forecast_period\").points[0]\n cube1 = self.cube_ukv.copy()\n cube1.coord(\"forecast_reference_time\").points = [frt + 3600]\n cube1.coord(\"forecast_period\").points = [fp - 3600]\n cube2 = self.cube_ukv.copy()\n cube2.coord(\"forecast_reference_time\").points = [frt + 7200]\n cube2.coord(\"forecast_period\").points = [fp - 7200]\n # input unordered cubes; expect ordered output\n expected_points = np.array([frt, frt + 3600, frt + 7200], dtype=np.int64)\n plugin = MergeCubesForWeightedBlending(\"forecast_reference_time\")\n result = plugin.process([cube1, self.cube_ukv, cube2])\n self.assertArrayEqual(\n result.coord(\"forecast_reference_time\").points, expected_points\n )\n\n def test_cycletime(self):\n \"\"\"Test merged cube has updated forecast reference time and forecast\n period if specified using the 'cycletime' argument\"\"\"\n result = self.plugin.process(self.cubelist, cycletime=\"20151123T0600Z\")\n # test resulting cube has forecast period consistent with cycletime\n self.assertEqual(result.coord(\"forecast_period\").points, [3600])\n self.assertEqual(\n result.coord(\"forecast_reference_time\").points,\n self.cube_ukv.coord(\"forecast_reference_time\").points + 3 * 3600,\n )\n # check validity time is unchanged\n self.assertEqual(\n result.coord(\"time\").points, self.cube_ukv.coord(\"time\").points\n )\n\n def test_non_mo_model_id(self):\n \"\"\"Test that a model ID attribute string can be specified when\n merging multi model cubes\"\"\"\n plugin = MergeCubesForWeightedBlending(\n \"model\", model_id_attr=\"non_mo_model_config\"\n )\n result = plugin.process(self.non_mo_cubelist)\n self.assertIsInstance(result, iris.cube.Cube)\n self.assertArrayEqual(result.coord(\"model_id\").points, [0, 1000])\n\n def test_model_id_attr_mismatch(self):\n \"\"\"Test that when a model ID attribute string is specified that does\n not match the model ID attribute key name on both cubes to be merged,\n an error is thrown\"\"\"\n plugin = MergeCubesForWeightedBlending(\n \"model\", model_id_attr=\"non_matching_model_config\"\n )\n msg = \"Cannot create model ID coordinate\"\n with self.assertRaisesRegex(ValueError, msg):\n plugin.process(self.non_mo_cubelist)\n\n def test_model_id_attr_mismatch_one_cube(self):\n \"\"\"Test that when a model ID attribute string is specified that only\n matches the model ID attribute key name on one of the cubes to be\n merged, an error is thrown\"\"\"\n self.non_mo_cubelist[1].attributes.pop(\"non_mo_model_config\")\n self.non_mo_cubelist[1].attributes[\"non_matching_model_config\"] = \"non_uk_det\"\n plugin = MergeCubesForWeightedBlending(\n \"model\", model_id_attr=\"non_matching_model_config\"\n )\n msg = \"Cannot create model ID coordinate\"\n with self.assertRaisesRegex(ValueError, msg):\n plugin.process(self.non_mo_cubelist)\n\n def test_time_bounds_mismatch(self):\n \"\"\"Test failure for cycle blending when time bounds ranges are not\n matched (ie cycle blending different \"accumulation periods\")\"\"\"\n cube2 = self.cube_ukv.copy()\n cube2.coord(\"forecast_reference_time\").points = (\n cube2.coord(\"forecast_reference_time\").points + 3600\n )\n cube2.coord(\"time\").bounds = [\n cube2.coord(\"time\").bounds[0, 0] + 3600,\n cube2.coord(\"time\").bounds[0, 1],\n ]\n cube2.coord(\"forecast_period\").bounds = [\n cube2.coord(\"forecast_period\").bounds[0, 0] + 3600,\n cube2.coord(\"forecast_period\").bounds[0, 1],\n ]\n msg = \"Cube with mismatching time bounds ranges cannot be blended\"\n with self.assertRaisesRegex(ValueError, msg):\n MergeCubesForWeightedBlending(\"forecast_reference_time\").process(\n [self.cube_ukv, cube2]\n )\n\n def test_blend_coord_not_present(self):\n \"\"\"Test exception when blend coord is not present on inputs\"\"\"\n msg = \"realization coordinate is not present on all input cubes\"\n with self.assertRaisesRegex(ValueError, msg):\n MergeCubesForWeightedBlending(\"realization\").process(self.cubelist)\n\n def test_blend_realizations(self):\n \"\"\"Test processing works for merging over coordinates that don't\n require specific setup\"\"\"\n data = np.ones((1, 3, 3), dtype=np.float32)\n cube1 = set_up_variable_cube(data, realizations=np.array([0]))\n cube1 = iris.util.squeeze(cube1)\n cube2 = set_up_variable_cube(data, realizations=np.array([1]))\n cube2 = iris.util.squeeze(cube2)\n plugin = MergeCubesForWeightedBlending(\"realization\")\n result = plugin.process([cube1, cube2])\n self.assertIsInstance(result, iris.cube.Cube)\n self.assertArrayEqual(result.coord(\"realization\").points, np.array([0, 1]))\n self.assertEqual(result[0].metadata, cube1.metadata)\n self.assertEqual(result[1].metadata, cube2.metadata)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# (C) British Crown Copyright 2017-2020 Met Office.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\nUnit tests for the\n`ensemble_copula_coupling.ConvertLocationAndScaleParametersToPercentiles`\n\"\"\"\nimport unittest\n\nimport iris\nimport numpy as np\nfrom iris.cube import Cube\nfrom iris.tests import IrisTest\n\nfrom improver.ensemble_copula_coupling.ensemble_copula_coupling import (\n ConvertLocationAndScaleParametersToPercentiles as Plugin,\n)\nfrom improver.utilities.warnings_handler import ManageWarnings\n\nfrom ...set_up_test_cubes import set_up_variable_cube\nfrom .ecc_test_data import ECC_TEMPERATURE_REALIZATIONS, set_up_spot_test_cube\n\n\nclass Test__repr__(IrisTest):\n\n \"\"\"Test string representation of plugin.\"\"\"\n\n def test_basic(self):\n \"\"\"Test string representation\"\"\"\n expected_string = (\n \"<ConvertLocationAndScaleParametersToPercentiles: \"\n \"distribution: norm; shape_parameters: []>\"\n )\n result = str(Plugin())\n self.assertEqual(result, expected_string)\n\n\nclass Test__location_and_scale_parameters_to_percentiles(IrisTest):\n\n \"\"\"Test the _location_and_scale_parameters_to_percentiles plugin.\"\"\"\n\n def setUp(self):\n \"\"\"Set up temperature cube.\"\"\"\n self.temperature_cube = set_up_variable_cube(ECC_TEMPERATURE_REALIZATIONS)\n self.data = np.array(\n [\n [\n [225.568115, 236.818115, 248.068115],\n [259.318115, 270.568115, 281.818115],\n [293.068115, 304.318115, 315.568115],\n ],\n [\n [229.483322, 240.733322, 251.983322],\n [263.233307, 274.483307, 285.733307],\n [296.983307, 308.233307, 319.483307],\n ],\n [\n [233.398529, 244.648529, 255.898529],\n [267.148499, 278.398499, 289.648499],\n [300.898499, 312.148499, 323.398499],\n ],\n ],\n dtype=np.float32,\n )\n\n self.location_parameter = self.temperature_cube.collapsed(\n \"realization\", iris.analysis.MEAN\n )\n self.scale_parameter = self.temperature_cube.collapsed(\n \"realization\", iris.analysis.VARIANCE\n )\n self.percentiles = [10, 50, 90]\n\n @ManageWarnings(ignored_messages=[\"Collapsing a non-contiguous coordinate.\"])\n def test_check_data(self):\n \"\"\"\n Test that the plugin returns an Iris.cube.Cube matching the expected\n data values when a cubes containing location and scale parameters are\n passed in, which are equivalent to the ensemble mean and ensemble\n variance. The resulting data values are the percentiles, which have\n been generated.\n \"\"\"\n result = Plugin()._location_and_scale_parameters_to_percentiles(\n self.location_parameter,\n self.scale_parameter,\n self.temperature_cube,\n self.percentiles,\n )\n self.assertIsInstance(result, Cube)\n np.testing.assert_allclose(result.data, self.data, rtol=1.0e-4)\n\n @ManageWarnings(ignored_messages=[\"Collapsing a non-contiguous coordinate.\"])\n def test_masked_location_parameter(self):\n \"\"\"\n Test that the plugin returns the correctly masked data when\n given a location parameter that is masked.\n \"\"\"\n mask = np.array([[1, 0, 0], [0, 0, 0], [0, 1, 0]])\n expected_mask = np.broadcast_to(mask, (3, 3, 3))\n expected_data = np.ma.masked_array(self.data, mask=expected_mask)\n self.location_parameter.data = np.ma.masked_array(\n self.location_parameter.data, mask=mask\n )\n result = Plugin()._location_and_scale_parameters_to_percentiles(\n self.location_parameter,\n self.scale_parameter,\n self.temperature_cube,\n self.percentiles,\n )\n np.testing.assert_allclose(result.data, expected_data, rtol=1.0e-4)\n\n @ManageWarnings(ignored_messages=[\"Collapsing a non-contiguous coordinate.\"])\n def test_masked_scale_parameter(self):\n \"\"\"\n Test that the plugin returns the correctly masked data when\n given a scale parameter that is masked.\n \"\"\"\n mask = np.array([[0, 0, 0], [0, 0, 0], [1, 0, 1]])\n expected_mask = np.broadcast_to(mask, (3, 3, 3))\n expected_data = np.ma.masked_array(self.data, mask=expected_mask)\n self.scale_parameter.data = np.ma.masked_array(\n self.scale_parameter.data, mask=mask\n )\n result = Plugin()._location_and_scale_parameters_to_percentiles(\n self.location_parameter,\n self.scale_parameter,\n self.temperature_cube,\n self.percentiles,\n )\n np.testing.assert_allclose(result.data, expected_data, rtol=1.0e-4)\n\n @ManageWarnings(ignored_messages=[\"Collapsing a non-contiguous coordinate.\"])\n def test_both_masked(self):\n \"\"\"\n Test that the plugin returns the correctly masked data when\n both the scale and location parameters are masked.\n \"\"\"\n mask1 = np.array([[0, 1, 0], [0, 0, 0], [0, 0, 0]])\n mask2 = np.array([[0, 0, 0], [1, 0, 0], [0, 0, 0]])\n expected_mask = np.broadcast_to(mask1 + mask2, (3, 3, 3))\n expected_data = np.ma.masked_array(self.data, mask=expected_mask)\n self.location_parameter.data = np.ma.masked_array(\n self.location_parameter.data, mask=mask1\n )\n self.scale_parameter.data = np.ma.masked_array(\n self.scale_parameter.data, mask=mask2\n )\n result = Plugin()._location_and_scale_parameters_to_percentiles(\n self.location_parameter,\n self.scale_parameter,\n self.temperature_cube,\n self.percentiles,\n )\n np.testing.assert_allclose(result.data, expected_data, rtol=1.0e-4)\n\n @ManageWarnings(ignored_messages=[\"Collapsing a non-contiguous coordinate.\"])\n def test_simple_data_truncnorm_distribution(self):\n \"\"\"\n Test that the plugin returns an iris.cube.Cube matching the expected\n data values when cubes containing the location parameter and scale\n parameter are passed in. In this test, the ensemble mean and variance\n is used as a proxy for the location and scale parameter. The resulting\n data values are the percentiles, which have been generated using a\n truncated normal distribution.\n \"\"\"\n data = np.array(\n [\n [[1, 1, 1], [1, 1, 1], [1, 1, 1]],\n [[2, 2, 2], [2, 2, 2], [2, 2, 2]],\n [[3, 3, 3], [3, 3, 3], [3, 3, 3]],\n ]\n )\n self.temperature_cube.data = data\n\n expected_data = np.array(\n [\n [\n [1.3042759, 1.3042759, 1.3042759],\n [1.3042759, 1.3042759, 1.3042759],\n [1.3042759, 1.3042759, 1.3042759],\n ],\n [\n [3.0300407, 3.0300407, 3.0300407],\n [3.0300407, 3.0300407, 3.0300407],\n [3.0300407, 3.0300407, 3.0300407],\n ],\n [\n [4.8261294, 4.8261294, 4.8261294],\n [4.8261294, 4.8261294, 4.8261294],\n [4.8261294, 4.8261294, 4.8261294],\n ],\n ]\n )\n\n # Use an adjusted version of the ensemble mean as a proxy for the\n # location parameter for the truncated normal distribution.\n current_forecast_predictor = self.temperature_cube.collapsed(\n \"realization\", iris.analysis.MEAN\n )\n current_forecast_predictor.data = current_forecast_predictor.data + 1\n # Use an adjusted version of the ensemble variance as a proxy for the\n # scale parameter for the truncated normal distribution.\n current_forecast_variance = self.temperature_cube.collapsed(\n \"realization\", iris.analysis.VARIANCE\n )\n current_forecast_variance.data = current_forecast_variance.data + 1\n plugin = Plugin(\n distribution=\"truncnorm\",\n shape_parameters=np.array([0, np.inf], dtype=np.float32),\n )\n result = plugin._location_and_scale_parameters_to_percentiles(\n current_forecast_predictor,\n current_forecast_variance,\n self.temperature_cube,\n self.percentiles,\n )\n self.assertIsInstance(result, Cube)\n self.assertArrayAlmostEqual(result.data, expected_data)\n\n @ManageWarnings(ignored_messages=[\"Collapsing a non-contiguous coordinate.\"])\n def test_simple_data(self):\n \"\"\"\n Test that the plugin returns the expected values for the generated\n percentiles when an idealised set of data values between 1 and 3\n is used to create the mean (location parameter) and the variance\n (scale parameter).\n \"\"\"\n data = np.array(\n [\n [[1, 1, 1], [1, 1, 1], [1, 1, 1]],\n [[2, 2, 2], [2, 2, 2], [2, 2, 2]],\n [[3, 3, 3], [3, 3, 3], [3, 3, 3]],\n ]\n )\n self.temperature_cube.data = data\n\n expected_data = np.array(\n [\n [\n [0.71844843, 0.71844843, 0.71844843],\n [0.71844843, 0.71844843, 0.71844843],\n [0.71844843, 0.71844843, 0.71844843],\n ],\n [[2.0, 2.0, 2.0], [2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],\n [\n [3.28155157, 3.28155157, 3.28155157],\n [3.28155157, 3.28155157, 3.28155157],\n [3.28155157, 3.28155157, 3.28155157],\n ],\n ]\n )\n\n current_forecast_predictor = self.temperature_cube.collapsed(\n \"realization\", iris.analysis.MEAN\n )\n current_forecast_variance = self.temperature_cube.collapsed(\n \"realization\", iris.analysis.VARIANCE\n )\n result = Plugin()._location_and_scale_parameters_to_percentiles(\n current_forecast_predictor,\n current_forecast_variance,\n self.temperature_cube,\n self.percentiles,\n )\n self.assertArrayAlmostEqual(result.data, expected_data)\n\n @ManageWarnings(\n ignored_messages=[\n \"invalid value encountered\",\n \"Collapsing a non-contiguous coordinate.\",\n ],\n warning_types=[RuntimeWarning, UserWarning],\n )\n def test_if_identical_data(self):\n \"\"\"\n Test that the plugin returns the expected values, if every\n percentile has an identical value. This causes an issue because\n the default for the underlying scipy function is to yield a NaN for\n tied values. For this application, any NaN values are overwritten with\n the predicted mean value for all probability thresholds.\n \"\"\"\n data = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]])\n # Repeat data in the realization dimension.\n data = np.repeat(data[np.newaxis, :, :], 3, axis=0)\n self.temperature_cube.data = data\n\n expected_data = np.array(\n [\n [[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]],\n [[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]],\n [[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]],\n ]\n )\n\n current_forecast_predictor = self.temperature_cube.collapsed(\n \"realization\", iris.analysis.MEAN\n )\n current_forecast_variance = self.temperature_cube.collapsed(\n \"realization\", iris.analysis.VARIANCE\n )\n result = Plugin()._location_and_scale_parameters_to_percentiles(\n current_forecast_predictor,\n current_forecast_variance,\n self.temperature_cube,\n self.percentiles,\n )\n self.assertArrayAlmostEqual(result.data, expected_data)\n\n @ManageWarnings(\n ignored_messages=[\n \"invalid value encountered\",\n \"Collapsing a non-contiguous coordinate.\",\n ],\n warning_types=[RuntimeWarning, UserWarning],\n )\n def test_if_nearly_identical_data(self):\n \"\"\"\n Test that the plugin returns the expected values, if every\n percentile has an identical value. This causes an issue because\n the default for the underlying scipy function is to yield a NaN for\n tied values. For this application, any NaN values are overwritten with\n the predicted mean value for all probability thresholds.\n \"\"\"\n data = np.array(\n [\n [[1.0, 1.0, 1.0], [4.0, 2.0, 2.0], [3.0, 3.0, 3.0]],\n [[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]],\n [[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]],\n ]\n )\n self.temperature_cube.data = data\n\n expected_data = np.array(\n [\n [[1.0, 1.0, 1.0], [1.18685838, 2.0, 2.0], [3.0, 3.0, 3.0]],\n [[1.0, 1.0, 1.0], [2.66666667, 2.0, 2.0], [3.0, 3.0, 3.0]],\n [[1.0, 1.0, 1.0], [4.14647495, 2.0, 2.0], [3.0, 3.0, 3.0]],\n ]\n )\n\n current_forecast_predictor = self.temperature_cube.collapsed(\n \"realization\", iris.analysis.MEAN\n )\n current_forecast_variance = self.temperature_cube.collapsed(\n \"realization\", iris.analysis.VARIANCE\n )\n result = Plugin()._location_and_scale_parameters_to_percentiles(\n current_forecast_predictor,\n current_forecast_variance,\n self.temperature_cube,\n self.percentiles,\n )\n self.assertArrayAlmostEqual(result.data, expected_data)\n\n @ManageWarnings(ignored_messages=[\"Collapsing a non-contiguous coordinate.\"])\n def test_many_percentiles(self):\n \"\"\"\n Test that the plugin returns an iris.cube.Cube if many percentiles\n are requested.\n \"\"\"\n percentiles = np.linspace(1, 99, num=1000, endpoint=True)\n result = Plugin()._location_and_scale_parameters_to_percentiles(\n self.location_parameter,\n self.scale_parameter,\n self.temperature_cube,\n percentiles,\n )\n self.assertIsInstance(result, Cube)\n\n @ManageWarnings(ignored_messages=[\"Collapsing a non-contiguous coordinate.\"])\n def test_negative_percentiles(self):\n \"\"\"\n Test that the plugin returns the expected values for the\n percentiles if negative probabilities are requested.\n \"\"\"\n percentiles = [-10, 10]\n msg = \"NaNs are present within the result for the\"\n with self.assertRaisesRegex(ValueError, msg):\n Plugin()._location_and_scale_parameters_to_percentiles(\n self.location_parameter,\n self.scale_parameter,\n self.temperature_cube,\n percentiles,\n )\n\n @ManageWarnings(ignored_messages=[\"Collapsing a non-contiguous coordinate.\"])\n def test_spot_forecasts_check_data(self):\n \"\"\"\n Test that the plugin returns an Iris.cube.Cube matching the expected\n data values when a cube containing mean (location parameter) and\n variance (scale parameter) is passed in. The resulting data values are\n the percentiles, which have been generated for a spot forecast.\n \"\"\"\n data = np.reshape(self.data, (3, 9))\n cube = set_up_spot_test_cube()\n\n current_forecast_predictor = cube.collapsed(\"realization\", iris.analysis.MEAN)\n current_forecast_variance = cube.collapsed(\n \"realization\", iris.analysis.VARIANCE\n )\n result = Plugin()._location_and_scale_parameters_to_percentiles(\n current_forecast_predictor,\n current_forecast_variance,\n cube,\n self.percentiles,\n )\n self.assertIsInstance(result, Cube)\n self.assertArrayAlmostEqual(result.data, data)\n\n\nclass Test_process(IrisTest):\n\n \"\"\"Test the process plugin.\"\"\"\n\n def setUp(self):\n \"\"\"Set up temperature cube.\"\"\"\n self.cube = set_up_variable_cube(ECC_TEMPERATURE_REALIZATIONS)\n self.forecast_predictor = self.cube.collapsed(\"realization\", iris.analysis.MEAN)\n self.forecast_variance = self.cube.collapsed(\n \"realization\", iris.analysis.VARIANCE\n )\n self.no_of_percentiles = len(self.cube.coord(\"realization\").points)\n\n @ManageWarnings(\n ignored_messages=[\n \"Only a single cube so no differences\",\n \"Collapsing a non-contiguous coordinate.\",\n ]\n )\n def test_basic(self):\n \"\"\"Test that the plugin returns an Iris.cube.Cube.\"\"\"\n result = Plugin().process(\n self.forecast_predictor,\n self.forecast_variance,\n self.cube,\n no_of_percentiles=self.no_of_percentiles,\n )\n self.assertIsInstance(result, Cube)\n\n @ManageWarnings(\n ignored_messages=[\n \"Only a single cube so no differences\",\n \"Collapsing a non-contiguous coordinate.\",\n ]\n )\n def test_number_of_percentiles(self):\n \"\"\"\n Test that the plugin returns a cube with the expected number of\n percentiles.\n \"\"\"\n expected = np.array(\n [\n [\n [227.42273, 238.67273, 249.92273],\n [261.1727, 272.4227, 283.6727],\n [294.9227, 306.1727, 317.4227],\n ],\n [\n [229.48332, 240.73332, 251.98332],\n [263.2333, 274.4833, 285.7333],\n [296.9833, 308.2333, 319.4833],\n ],\n [\n [231.54391, 242.79391, 254.04391],\n [265.2939, 276.5439, 287.7939],\n [299.0439, 310.2939, 321.5439],\n ],\n ]\n )\n\n result = Plugin().process(\n self.forecast_predictor,\n self.forecast_variance,\n self.cube,\n no_of_percentiles=self.no_of_percentiles,\n )\n\n self.assertEqual(len(result.coord(\"percentile\").points), self.no_of_percentiles)\n self.assertArrayAlmostEqual(expected, result.data, decimal=4)\n\n @ManageWarnings(\n ignored_messages=[\n \"Only a single cube so no differences\",\n \"Collapsing a non-contiguous coordinate.\",\n ]\n )\n def test_list_of_percentiles(self):\n \"\"\"\n Test that the plugin returns a cube with the expected percentiles\n when a specific list of percentiles is provided.\n \"\"\"\n percentiles = [10, 50, 90]\n expected = np.array(\n [\n [\n [225.56812, 236.81812, 248.06812],\n [259.3181, 270.5681, 281.8181],\n [293.0681, 304.3181, 315.5681],\n ],\n [\n [229.48332, 240.73332, 251.98332],\n [263.2333, 274.4833, 285.7333],\n [296.9833, 308.2333, 319.4833],\n ],\n [\n [233.39853, 244.64853, 255.89853],\n [267.1485, 278.3985, 289.6485],\n [300.8985, 312.1485, 323.3985],\n ],\n ]\n )\n\n result = Plugin().process(\n self.forecast_predictor,\n self.forecast_variance,\n self.cube,\n percentiles=percentiles,\n )\n\n self.assertEqual(len(percentiles), len(result.coord(\"percentile\").points))\n self.assertArrayAlmostEqual(percentiles, result.coord(\"percentile\").points)\n self.assertArrayAlmostEqual(expected, result.data, decimal=4)\n\n @ManageWarnings(\n ignored_messages=[\n \"Only a single cube so no differences\",\n \"Collapsing a non-contiguous coordinate.\",\n ]\n )\n def test_multiple_keyword_arguments_error(self):\n \"\"\"\n Test that the plugin raises an error when both the no_of_percentiles\n keyword argument and the percentiles keyword argument are provided.\n \"\"\"\n percentiles = [10, 25, 50, 75, 90]\n msg = \"Please specify either the number of percentiles or\"\n with self.assertRaisesRegex(ValueError, msg):\n Plugin().process(\n self.forecast_predictor,\n self.forecast_variance,\n self.cube,\n no_of_percentiles=self.no_of_percentiles,\n percentiles=percentiles,\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# (C) British Crown Copyright 2017-2020 Met Office.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\"\"\"Plugin to calculate blend weights and blend data across a dimension\"\"\"\n\nimport warnings\n\nimport numpy as np\n\nfrom improver import BasePlugin\nfrom improver.blending.spatial_weights import SpatiallyVaryingWeightsFromMask\nfrom improver.blending.weighted_blend import (\n MergeCubesForWeightedBlending,\n WeightedBlendAcrossWholeDimension,\n)\nfrom improver.blending.weights import (\n ChooseDefaultWeightsLinear,\n ChooseDefaultWeightsNonLinear,\n ChooseWeightsLinear,\n)\nfrom improver.metadata.amend import amend_attributes\nfrom improver.metadata.forecast_times import rebadge_forecasts_as_latest_cycle\nfrom improver.utilities.spatial import (\n check_if_grid_is_equal_area,\n distance_to_number_of_grid_cells,\n)\n\n\nclass WeightAndBlend(BasePlugin):\n \"\"\"\n Wrapper class to calculate weights and blend data across cycles or models\n \"\"\"\n\n def __init__(\n self,\n blend_coord,\n wts_calc_method,\n weighting_coord=None,\n wts_dict=None,\n y0val=None,\n ynval=None,\n cval=None,\n inverse_ordering=False,\n ):\n \"\"\"\n Initialise central parameters\n\n Args:\n blend_coord (str):\n Coordinate over which blending will be performed (eg \"model\"\n for grid blending)\n wts_calc_method (str):\n Weights calculation method (\"linear\", \"nonlinear\" or \"dict\")\n weighting_coord (str):\n Coordinate over which linear weights should be calculated (from\n dictionary)\n wts_dict (dict):\n Dictionary containing parameters for linear weights calculation\n y0val (float):\n Relative weight of first file for default linear weights plugin\n ynval (float):\n Relative weight of last file for default linear weights plugin\n cval (float):\n Parameter for default non-linear weights plugin\n inverse_ordering (bool):\n Option to invert weighting order for non-linear weights plugin\n so that higher blend coordinate values get higher weights (eg\n if cycle blending over forecast reference time).\n \"\"\"\n self.blend_coord = blend_coord\n self.wts_calc_method = wts_calc_method\n self.weighting_coord = None\n\n if self.wts_calc_method == \"dict\":\n self.weighting_coord = weighting_coord\n self.wts_dict = wts_dict\n elif self.wts_calc_method == \"linear\":\n self.y0val = y0val\n self.ynval = ynval\n elif self.wts_calc_method == \"nonlinear\":\n self.cval = cval\n self.inverse_ordering = inverse_ordering\n else:\n raise ValueError(\n \"Weights calculation method '{}' unrecognised\".format(\n self.wts_calc_method\n )\n )\n\n def _calculate_blending_weights(self, cube):\n \"\"\"\n Wrapper for plugins to calculate blending weights by the appropriate\n method.\n\n Args:\n cube (iris.cube.Cube):\n Cube of input data to be blended\n\n Returns:\n iris.cube.Cube:\n Cube containing 1D array of weights for blending\n \"\"\"\n if self.wts_calc_method == \"dict\":\n if \"model\" in self.blend_coord:\n config_coord = \"model_configuration\"\n else:\n config_coord = self.blend_coord\n\n weights = ChooseWeightsLinear(\n self.weighting_coord, self.wts_dict, config_coord_name=config_coord\n )(cube)\n\n elif self.wts_calc_method == \"linear\":\n weights = ChooseDefaultWeightsLinear(y0val=self.y0val, ynval=self.ynval)(\n cube, self.blend_coord\n )\n\n elif self.wts_calc_method == \"nonlinear\":\n weights = ChooseDefaultWeightsNonLinear(self.cval)(\n cube, self.blend_coord, inverse_ordering=self.inverse_ordering\n )\n\n return weights\n\n def _update_spatial_weights(self, cube, weights, fuzzy_length):\n \"\"\"\n Update weights using spatial information\n\n Args:\n cube (iris.cube.Cube):\n Cube of input data to be blended\n weights (iris.cube.Cube):\n Initial 1D cube of weights scaled by self.weighting_coord\n fuzzy_length (float):\n Distance (in metres) over which to smooth weights at domain\n boundaries\n\n Returns:\n iris.cube.Cube:\n Updated 3D cube of spatially-varying weights\n \"\"\"\n check_if_grid_is_equal_area(cube)\n grid_cells = distance_to_number_of_grid_cells(\n cube, fuzzy_length, return_int=False\n )\n plugin = SpatiallyVaryingWeightsFromMask(grid_cells)\n weights = plugin(cube, weights, self.blend_coord)\n return weights\n\n def process(\n self,\n cubelist,\n cycletime=None,\n model_id_attr=None,\n spatial_weights=False,\n fuzzy_length=20000,\n attributes_dict=None,\n ):\n \"\"\"\n Merge a cubelist, calculate appropriate blend weights and compute the\n weighted mean. Returns a single cube collapsed over the dimension\n given by self.blend_coord.\n\n Args:\n cubelist (iris.cube.CubeList):\n List of cubes to be merged and blended\n cycletime (str):\n Forecast reference time to use for output cubes, in the format\n YYYYMMDDTHHMMZ. If not set, the latest of the input cube\n forecast reference times is used.\n model_id_attr (str):\n Name of the attribute by which to identify the source model and\n construct \"model\" coordinates for blending.\n spatial_weights (bool):\n If true, calculate spatial weights.\n fuzzy_length (float):\n Distance (in metres) over which to smooth spatial weights.\n Default is 20 km.\n attributes_dict (dict or None):\n Changes to cube attributes to be applied after blending\n\n Warns:\n UserWarning: If blending masked data without spatial weights.\n This has not been fully tested.\n \"\"\"\n # Prepare cubes for weighted blending, including creating model_id and\n # model_configuration coordinates for multi-model blending. The merged\n # cube has a monotonically ascending blend coordinate. Plugin raises an\n # error if blend_coord is not present on all input cubes.\n merger = MergeCubesForWeightedBlending(\n self.blend_coord,\n weighting_coord=self.weighting_coord,\n model_id_attr=model_id_attr,\n )\n cube = merger(cubelist, cycletime=cycletime)\n\n # if blend_coord has only one value (for example cycle blending with\n # only one cycle available), or is not present (case where only\n # one model has been provided for a model blend), update attributes\n # and ensure that the forecast reference time on the returned cube\n # is set to the current IMPROVER processing cycle.\n coord_names = [coord.name() for coord in cube.coords()]\n if (\n self.blend_coord not in coord_names\n or len(cube.coord(self.blend_coord).points) == 1\n ):\n result = cube.copy()\n if attributes_dict is not None:\n amend_attributes(result, attributes_dict)\n (result,) = rebadge_forecasts_as_latest_cycle([result], cycletime)\n\n # otherwise, calculate weights and blend across specified dimension\n else:\n # set up special treatment for model blending\n if \"model\" in self.blend_coord:\n self.blend_coord = \"model_id\"\n\n # calculate blend weights\n weights = self._calculate_blending_weights(cube)\n if spatial_weights:\n weights = self._update_spatial_weights(cube, weights, fuzzy_length)\n elif np.ma.is_masked(cube.data):\n # Raise warning if blending masked arrays using non-spatial weights.\n warnings.warn(\n \"Blending masked data without spatial weights has not been\"\n \" fully tested.\"\n )\n\n # blend across specified dimension\n BlendingPlugin = WeightedBlendAcrossWholeDimension(self.blend_coord)\n result = BlendingPlugin(\n cube,\n weights=weights,\n cycletime=cycletime,\n attributes_dict=attributes_dict,\n )\n\n return result\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# (C) British Crown Copyright 2017-2020 Met Office.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\"\"\"Plugin to regrid cube data and standardise metadata\"\"\"\n\nimport warnings\n\nimport iris\nimport numpy as np\nfrom iris.analysis import Linear, Nearest\nfrom iris.exceptions import CoordinateNotFoundError\nfrom scipy.interpolate import griddata\n\nfrom improver import BasePlugin\nfrom improver.metadata.amend import amend_attributes\nfrom improver.metadata.check_datatypes import (\n check_units,\n get_required_dtype,\n get_required_units,\n)\nfrom improver.metadata.constants.attributes import MANDATORY_ATTRIBUTE_DEFAULTS\nfrom improver.metadata.constants.mo_attributes import MOSG_GRID_ATTRIBUTES\nfrom improver.metadata.constants.time_types import TIME_COORDS\nfrom improver.threshold import BasicThreshold\nfrom improver.utilities.cube_checker import spatial_coords_match\nfrom improver.utilities.round import round_close\nfrom improver.utilities.spatial import OccurrenceWithinVicinity\n\n\ndef grid_contains_cutout(grid, cutout):\n \"\"\"\n Check that a spatial cutout is contained within a given grid\n\n Args:\n grid (iris.cube.Cube):\n A cube defining a data grid\n cutout (iris.cube.Cube):\n The cutout to search for within the grid\n\n Returns:\n bool:\n True if cutout is contained within grid, False otherwise\n \"\"\"\n if spatial_coords_match(grid, cutout):\n return True\n\n # check whether \"cutout\" coordinate points match a subset of \"grid\"\n # points on both axes\n for axis in [\"x\", \"y\"]:\n grid_coord = grid.coord(axis=axis)\n cutout_coord = cutout.coord(axis=axis)\n # check coordinate metadata\n if (\n cutout_coord.name() != grid_coord.name()\n or cutout_coord.units != grid_coord.units\n or cutout_coord.coord_system != grid_coord.coord_system\n ):\n return False\n\n # search for cutout coordinate points in larger grid\n cutout_start = cutout_coord.points[0]\n find_start = [\n np.isclose(cutout_start, grid_point) for grid_point in grid_coord.points\n ]\n if not np.any(find_start):\n return False\n\n start = find_start.index(True)\n end = start + len(cutout_coord.points)\n try:\n if not np.allclose(cutout_coord.points, grid_coord.points[start:end]):\n return False\n except ValueError:\n # raised by np.allclose if \"end\" index overshoots edge of grid\n # domain - slicing does not raise IndexError\n return False\n\n return True\n\n\nclass StandardiseMetadata(BasePlugin):\n \"\"\"Plugin to standardise cube metadata\"\"\"\n\n @staticmethod\n def _collapse_scalar_dimensions(cube):\n \"\"\"\n Demote any scalar dimensions (excluding \"realization\") on the input\n cube to auxiliary coordinates.\n\n Returns:\n iris.cube.Cube\n \"\"\"\n coords_to_collapse = []\n for coord in cube.coords(dim_coords=True):\n if len(coord.points) == 1 and \"realization\" not in coord.name():\n coords_to_collapse.append(coord)\n for coord in coords_to_collapse:\n cube = next(cube.slices_over(coord))\n return cube\n\n @staticmethod\n def _remove_scalar_coords(cube, coords_to_remove):\n \"\"\"Removes named coordinates from the input cube.\"\"\"\n for coord in coords_to_remove:\n try:\n cube.remove_coord(coord)\n except CoordinateNotFoundError:\n continue\n\n @staticmethod\n def _standardise_dtypes_and_units(cube):\n \"\"\"\n Modify input cube in place to conform to mandatory dtype and unit\n standards.\n\n Args:\n cube (iris.cube.Cube:\n Cube to be updated in place\n\n \"\"\"\n\n def as_correct_dtype(obj, required_dtype):\n \"\"\"\n Returns an object updated if necessary to the required dtype\n\n Args:\n obj (np.ndarray):\n The object to be updated\n required_dtype (np.dtype):\n The dtype required\n\n Returns:\n np.ndarray\n \"\"\"\n if obj.dtype != required_dtype:\n return obj.astype(required_dtype)\n return obj\n\n cube.data = as_correct_dtype(cube.data, get_required_dtype(cube))\n for coord in cube.coords():\n if coord.name() in TIME_COORDS and not check_units(coord):\n coord.convert_units(get_required_units(coord))\n req_dtype = get_required_dtype(coord)\n # ensure points and bounds have the same dtype\n if np.issubdtype(req_dtype, np.integer):\n coord.points = round_close(coord.points)\n coord.points = as_correct_dtype(coord.points, req_dtype)\n if coord.has_bounds():\n if np.issubdtype(req_dtype, np.integer):\n coord.bounds = round_close(coord.bounds)\n coord.bounds = as_correct_dtype(coord.bounds, req_dtype)\n\n def process(\n self,\n cube,\n new_name=None,\n new_units=None,\n coords_to_remove=None,\n attributes_dict=None,\n ):\n \"\"\"\n Perform compulsory and user-configurable metadata adjustments. The\n compulsory adjustments are to collapse any scalar dimensions apart from\n realization (which is expected always to be a dimension); to cast the cube\n data and coordinates into suitable datatypes; and to convert time-related\n metadata into the required units.\n\n Args:\n cube (iris.cube.Cube):\n Input cube to be standardised\n new_name (str or None):\n Optional rename for output cube\n new_units (str or None):\n Optional unit conversion for output cube\n coords_to_remove (list of str or None):\n Optional list of scalar coordinates to remove from output cube\n attributes_dict (dict or None):\n Optional dictionary of required attribute updates. Keys are\n attribute names, and values are the required value or \"remove\".\n\n Returns:\n iris.cube.Cube\n \"\"\"\n cube = self._collapse_scalar_dimensions(cube)\n\n if new_name:\n cube.rename(new_name)\n if new_units:\n cube.convert_units(new_units)\n if coords_to_remove:\n self._remove_scalar_coords(cube, coords_to_remove)\n if attributes_dict:\n amend_attributes(cube, attributes_dict)\n\n # this must be done after unit conversion as if the input is an integer\n # field, unit conversion outputs the new data as float64\n self._standardise_dtypes_and_units(cube)\n\n return cube\n\n\nclass RegridLandSea(BasePlugin):\n \"\"\"Regrid a field with the option to adjust the output so that regridded land\n points always take values from a land point on the source grid, and vice versa\n for sea points\"\"\"\n\n REGRID_REQUIRES_LANDMASK = {\n \"bilinear\": False,\n \"nearest\": False,\n \"nearest-with-mask\": True,\n }\n\n def __init__(\n self,\n regrid_mode=\"bilinear\",\n extrapolation_mode=\"nanmask\",\n landmask=None,\n landmask_vicinity=25000,\n ):\n \"\"\"\n Initialise regridding parameters\n\n Args:\n regrid_mode (str):\n Mode of interpolation in regridding. Valid options are \"bilinear\",\n \"nearest\" or \"nearest-with-mask\". The \"nearest-with-mask\" option\n triggers adjustment of regridded points to match source points in\n terms of land / sea type.\n extrapolation_mode (str):\n Mode to fill regions outside the domain in regridding.\n landmask (iris.cube.Cube or None):\n Land-sea mask (\"land_binary_mask\") on the input cube grid, with\n land points set to one and sea points set to zero. Required for\n \"nearest-with-mask\" regridding option.\n landmask_vicinity (float):\n Radius of vicinity to search for a coastline, in metres\n \"\"\"\n if regrid_mode not in self.REGRID_REQUIRES_LANDMASK:\n msg = \"Unrecognised regrid mode {}\"\n raise ValueError(msg.format(regrid_mode))\n if landmask is None and self.REGRID_REQUIRES_LANDMASK[regrid_mode]:\n msg = \"Regrid mode {} requires an input landmask cube\"\n raise ValueError(msg.format(regrid_mode))\n self.regrid_mode = regrid_mode\n self.extrapolation_mode = extrapolation_mode\n self.landmask_source_grid = landmask\n self.landmask_vicinity = None if landmask is None else landmask_vicinity\n self.landmask_name = \"land_binary_mask\"\n\n def _adjust_landsea(self, cube, target_grid):\n \"\"\"\n Adjust regridded data using differences between the target landmask\n and that obtained by regridding the source grid landmask, to ensure\n that the \"land\" or \"sea\" nature of the points in the regridded cube\n matches that of the target grid.\n\n Args:\n cube (iris.cube.Cube):\n Cube after initial regridding\n target_grid (iris.cube.Cube):\n Cube containing landmask data on the target grid\n\n Returns:\n iris.cube.Cube: Adjusted cube\n \"\"\"\n if self.landmask_name not in self.landmask_source_grid.name():\n msg = \"Expected {} in input_landmask cube but found {}\".format(\n self.landmask_name, repr(self.landmask_source_grid)\n )\n warnings.warn(msg)\n\n if self.landmask_name not in target_grid.name():\n msg = \"Expected {} in target_grid cube but found {}\".format(\n self.landmask_name, repr(target_grid)\n )\n warnings.warn(msg)\n\n return AdjustLandSeaPoints(vicinity_radius=self.landmask_vicinity)(\n cube, self.landmask_source_grid, target_grid\n )\n\n def _regrid_to_target(self, cube, target_grid, regridded_title):\n \"\"\"\n Regrid cube to target_grid, inherit grid attributes and update title\n\n Args:\n cube (iris.cube.Cube):\n Cube to be regridded\n target_grid (iris.cube.Cube):\n Data on the target grid. If regridding with mask, this cube\n should contain land-sea mask data to be used in adjusting land\n and sea points after regridding.\n regridded_title (str or None):\n New value for the \"title\" attribute to be used after\n regridding. If not set, a default value is used.\n\n Returns:\n iris.cube.Cube: Regridded cube with updated attributes\n \"\"\"\n regridder = Linear(extrapolation_mode=self.extrapolation_mode)\n if \"nearest\" in self.regrid_mode:\n regridder = Nearest(extrapolation_mode=self.extrapolation_mode)\n cube = cube.regrid(target_grid, regridder)\n\n if self.REGRID_REQUIRES_LANDMASK[self.regrid_mode]:\n cube = self._adjust_landsea(cube, target_grid)\n\n # identify grid-describing attributes on source cube that need updating\n required_grid_attributes = [\n attr for attr in cube.attributes if attr in MOSG_GRID_ATTRIBUTES\n ]\n # update attributes if available on target grid, otherwise remove\n for key in required_grid_attributes:\n if key in target_grid.attributes:\n cube.attributes[key] = target_grid.attributes[key]\n else:\n cube.attributes.pop(key)\n\n cube.attributes[\"title\"] = (\n MANDATORY_ATTRIBUTE_DEFAULTS[\"title\"]\n if regridded_title is None\n else regridded_title\n )\n\n return cube\n\n def process(self, cube, target_grid, regridded_title=None):\n \"\"\"\n Regrids cube onto spatial grid provided by target_grid\n\n Args:\n cube (iris.cube.Cube):\n Cube to be regridded\n target_grid (iris.cube.Cube):\n Data on the target grid. If regridding with mask, this cube\n should contain land-sea mask data to be used in adjusting land\n and sea points after regridding.\n regridded_title (str or None):\n New value for the \"title\" attribute to be used after\n regridding. If not set, a default value is used.\n\n Returns:\n iris.cube.Cube: Regridded cube with updated attributes\n \"\"\"\n # if regridding using a land-sea mask, check this covers the source\n # grid in the required coordinates\n if self.REGRID_REQUIRES_LANDMASK[self.regrid_mode]:\n if not grid_contains_cutout(self.landmask_source_grid, cube):\n raise ValueError(\"Source landmask does not match input grid\")\n return self._regrid_to_target(cube, target_grid, regridded_title)\n\n\nclass AdjustLandSeaPoints(BasePlugin):\n \"\"\"\n Replace data values at points where the nearest-regridding technique\n selects a source grid-point with an opposite land-sea-mask value to the\n target grid-point.\n The replacement data values are selected from a vicinity of points on the\n source-grid and the closest point of the correct mask is used.\n Where no match is found within the vicinity, the data value is not changed.\n \"\"\"\n\n def __init__(self, extrapolation_mode=\"nanmask\", vicinity_radius=25000.0):\n \"\"\"\n Initialise class\n\n Args:\n extrapolation_mode (str):\n Mode to use for extrapolating data into regions\n beyond the limits of the source_data domain.\n Available modes are documented in\n `iris.analysis <https://scitools.org.uk/iris/docs/latest/iris/\n iris/analysis.html#iris.analysis.Nearest>`_\n Defaults to \"nanmask\".\n vicinity_radius (float):\n Distance in metres to search for a sea or land point.\n \"\"\"\n self.input_land = None\n self.nearest_cube = None\n self.output_land = None\n self.output_cube = None\n self.regridder = Nearest(extrapolation_mode=extrapolation_mode)\n self.vicinity = OccurrenceWithinVicinity(vicinity_radius)\n\n def __repr__(self):\n \"\"\"\n Print a human-readable representation of the instantiated object.\n \"\"\"\n return \"<AdjustLandSeaPoints: regridder: {}; vicinity: {}>\".format(\n self.regridder, self.vicinity\n )\n\n def correct_where_input_true(self, selector_val):\n \"\"\"\n Replace points in the output_cube where output_land matches the\n selector_val and the input_land does not match, but has matching\n points in the vicinity, with the nearest matching point in the\n vicinity in the original nearest_cube.\n\n Updates self.output_cube.data\n\n Args:\n selector_val (int):\n Value of mask to replace if needed.\n Intended to be 1 for filling land points near the coast\n and 0 for filling sea points near the coast.\n \"\"\"\n # Find all points on output grid matching selector_val\n use_points = np.where(self.input_land.data == selector_val)\n\n # If there are no matching points on the input grid, no alteration can\n # be made. This tests the size of the y-coordinate of use_points.\n if use_points[0].size is 0:\n return\n\n # Get shape of output grid\n ynum, xnum = self.output_land.shape\n\n # Using only these points, extrapolate to fill domain using nearest\n # neighbour. This will generate a grid where the non-selector_val\n # points are filled with the nearest value in the same mask\n # classification.\n (y_points, x_points) = np.mgrid[0:ynum, 0:xnum]\n selector_data = griddata(\n use_points,\n self.nearest_cube.data[use_points],\n (y_points, x_points),\n method=\"nearest\",\n )\n\n # Identify nearby points on regridded input_land that match the\n # selector_value\n if selector_val > 0.5:\n thresholder = BasicThreshold(0.5)\n else:\n thresholder = BasicThreshold(0.5, comparison_operator=\"<=\")\n in_vicinity = self.vicinity(thresholder(self.input_land))\n\n # Identify those points sourced from the opposite mask that are\n # close to a source point of the correct mask\n (mismatch_points,) = np.logical_and(\n np.logical_and(\n self.output_land.data == selector_val,\n self.input_land.data != selector_val,\n ),\n in_vicinity.data > 0.5,\n )\n\n # Replace these points with the filled-domain data\n self.output_cube.data[mismatch_points] = selector_data[mismatch_points]\n\n def process(self, cube, input_land, output_land):\n \"\"\"\n Update cube.data so that output_land and sea points match an input_land\n or sea point respectively so long as one is present within the\n specified vicinity radius. Note that before calling this plugin the\n input land mask MUST be checked against the source grid, to ensure\n the grids match.\n\n Args:\n cube (iris.cube.Cube):\n Cube of data to be updated (on same grid as output_land).\n input_land (iris.cube.Cube):\n Cube of land_binary_mask data on the grid from which \"cube\" has\n been reprojected (it is expected that the iris.analysis.Nearest\n method would have been used). Land points should be set to one\n and sea points set to zero.\n This is used to determine where the input model data is\n representing land and sea points.\n output_land (iris.cube.Cube):\n Cube of land_binary_mask data on target grid.\n \"\"\"\n # Check cube and output_land are on the same grid:\n if not spatial_coords_match(cube, output_land):\n raise ValueError(\n \"X and Y coordinates do not match for cubes {}\"\n \"and {}\".format(repr(cube), repr(output_land))\n )\n self.output_land = output_land\n\n # Regrid input_land to output_land grid.\n self.input_land = input_land.regrid(self.output_land, self.regridder)\n\n # Slice over x-y grids for multi-realization data.\n result = iris.cube.CubeList()\n for xyslice in cube.slices([cube.coord(axis=\"y\"), cube.coord(axis=\"x\")]):\n\n # Store and copy cube ready for the output data\n self.nearest_cube = xyslice\n self.output_cube = self.nearest_cube.copy()\n\n # Update sea points that were incorrectly sourced from land points\n self.correct_where_input_true(0)\n\n # Update land points that were incorrectly sourced from sea points\n self.correct_where_input_true(1)\n\n result.append(self.output_cube)\n\n result = result.merge_cube()\n return result\n"
] | [
[
"numpy.array",
"numpy.flip",
"numpy.ones"
],
[
"numpy.array",
"numpy.ones"
],
[
"numpy.linspace",
"numpy.reshape",
"numpy.broadcast_to",
"numpy.ma.masked_array",
"numpy.repeat",
"numpy.array",
"numpy.testing.assert_allclose"
],
[
"numpy.ma.is_masked"
],
[
"numpy.allclose",
"numpy.issubdtype",
"numpy.any",
"scipy.interpolate.griddata",
"numpy.logical_and",
"numpy.where",
"numpy.isclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.13",
"1.16",
"1.9",
"1.18",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
lsqshr/pytorch-lightning | [
"c6b68883879e38719688865aceac746477f0a9b9"
] | [
"tests/core/test_datamodules.py"
] | [
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport pickle\nfrom argparse import ArgumentParser\nfrom typing import Any, Dict\nfrom unittest import mock\nfrom unittest.mock import call, PropertyMock\n\nimport pytest\nimport torch\n\nfrom pytorch_lightning import LightningDataModule, Trainer\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.utilities import AttributeDict\nfrom pytorch_lightning.utilities.model_helpers import is_overridden\nfrom tests.helpers import BoringDataModule, BoringModel\nfrom tests.helpers.datamodules import ClassifDataModule\nfrom tests.helpers.runif import RunIf\nfrom tests.helpers.simple_models import ClassificationModel\nfrom tests.helpers.utils import reset_seed\n\n\[email protected](\"pytorch_lightning.trainer.trainer.Trainer.node_rank\", new_callable=PropertyMock)\[email protected](\"pytorch_lightning.trainer.trainer.Trainer.local_rank\", new_callable=PropertyMock)\ndef test_can_prepare_data(local_rank, node_rank):\n\n model = BoringModel()\n dm = BoringDataModule()\n trainer = Trainer()\n trainer.model = model\n trainer.datamodule = dm\n\n # 1 no DM\n # prepare_data_per_node = True\n # local rank = 0 (True)\n trainer.prepare_data_per_node = True\n\n dm.random_full = None\n dm._has_prepared_data = False\n local_rank.return_value = 0\n assert trainer.local_rank == 0\n assert trainer.data_connector.can_prepare_data()\n\n trainer.data_connector.prepare_data()\n assert dm.random_full is not None\n\n # local rank = 1 (False)\n dm.random_full = None\n dm._has_prepared_data = False\n local_rank.return_value = 1\n assert trainer.local_rank == 1\n assert not trainer.data_connector.can_prepare_data()\n\n trainer.data_connector.prepare_data()\n assert dm.random_full is None\n\n # prepare_data_per_node = False (prepare across all nodes)\n # global rank = 0 (True)\n dm.random_full = None\n dm._has_prepared_data = False\n trainer.prepare_data_per_node = False\n node_rank.return_value = 0\n local_rank.return_value = 0\n assert trainer.data_connector.can_prepare_data()\n\n trainer.data_connector.prepare_data()\n assert dm.random_full is not None\n\n # global rank = 1 (False)\n dm.random_full = None\n dm._has_prepared_data = False\n node_rank.return_value = 1\n local_rank.return_value = 0\n assert not trainer.data_connector.can_prepare_data()\n\n trainer.data_connector.prepare_data()\n assert dm.random_full is None\n\n node_rank.return_value = 0\n local_rank.return_value = 1\n assert not trainer.data_connector.can_prepare_data()\n\n trainer.data_connector.prepare_data()\n assert dm.random_full is None\n\n # 2 dm\n # prepar per node = True\n # local rank = 0 (True)\n trainer.prepare_data_per_node = True\n local_rank.return_value = 0\n\n # is_overridden prepare data = True\n # has been called\n # False\n dm._has_prepared_data = True\n assert not trainer.data_connector.can_prepare_data()\n\n # has not been called\n # True\n dm._has_prepared_data = False\n assert trainer.data_connector.can_prepare_data()\n\n # is_overridden prepare data = False\n # True\n dm.prepare_data = None\n assert trainer.data_connector.can_prepare_data()\n\n\ndef test_hooks_no_recursion_error():\n # hooks were appended in cascade every tine a new data module was instantiated leading to a recursion error.\n # See https://github.com/PyTorchLightning/pytorch-lightning/issues/3652\n class DummyDM(LightningDataModule):\n def setup(self, *args, **kwargs):\n pass\n\n def prepare_data(self, *args, **kwargs):\n pass\n\n for i in range(1005):\n dm = DummyDM()\n dm.setup()\n dm.prepare_data()\n\n\ndef test_helper_boringdatamodule():\n dm = BoringDataModule()\n dm.prepare_data()\n dm.setup()\n\n\ndef test_helper_boringdatamodule_with_verbose_setup():\n dm = BoringDataModule()\n dm.prepare_data()\n dm.setup(\"fit\")\n dm.setup(\"test\")\n\n\ndef test_data_hooks_called():\n dm = BoringDataModule()\n assert not dm.has_prepared_data\n assert not dm.has_setup_fit\n assert not dm.has_setup_test\n assert not dm.has_setup_validate\n assert not dm.has_setup_predict\n assert not dm.has_teardown_fit\n assert not dm.has_teardown_test\n assert not dm.has_teardown_validate\n assert not dm.has_teardown_predict\n\n dm.prepare_data()\n assert dm.has_prepared_data\n assert not dm.has_setup_fit\n assert not dm.has_setup_test\n assert not dm.has_setup_validate\n assert not dm.has_setup_predict\n assert not dm.has_teardown_fit\n assert not dm.has_teardown_test\n assert not dm.has_teardown_validate\n assert not dm.has_teardown_predict\n\n dm.setup()\n assert dm.has_prepared_data\n assert dm.has_setup_fit\n assert dm.has_setup_test\n assert dm.has_setup_validate\n assert not dm.has_setup_predict\n assert not dm.has_teardown_fit\n assert not dm.has_teardown_test\n assert not dm.has_teardown_validate\n assert not dm.has_teardown_predict\n\n dm.teardown()\n assert dm.has_prepared_data\n assert dm.has_setup_fit\n assert dm.has_setup_test\n assert dm.has_setup_validate\n assert not dm.has_setup_predict\n assert dm.has_teardown_fit\n assert dm.has_teardown_test\n assert dm.has_teardown_validate\n assert not dm.has_teardown_predict\n\n\[email protected](\"use_kwarg\", (False, True))\ndef test_data_hooks_called_verbose(use_kwarg):\n dm = BoringDataModule()\n dm.prepare_data()\n assert not dm.has_setup_fit\n assert not dm.has_setup_test\n assert not dm.has_setup_validate\n assert not dm.has_setup_predict\n assert not dm.has_teardown_fit\n assert not dm.has_teardown_test\n assert not dm.has_teardown_validate\n assert not dm.has_teardown_predict\n\n dm.setup(stage=\"fit\") if use_kwarg else dm.setup(\"fit\")\n assert dm.has_setup_fit\n assert not dm.has_setup_validate\n assert not dm.has_setup_test\n assert not dm.has_setup_predict\n\n dm.setup(stage=\"validate\") if use_kwarg else dm.setup(\"validate\")\n assert dm.has_setup_fit\n assert dm.has_setup_validate\n assert not dm.has_setup_test\n assert not dm.has_setup_predict\n\n dm.setup(stage=\"test\") if use_kwarg else dm.setup(\"test\")\n assert dm.has_setup_fit\n assert dm.has_setup_validate\n assert dm.has_setup_test\n assert not dm.has_setup_predict\n\n dm.setup(stage=\"predict\") if use_kwarg else dm.setup(\"predict\")\n assert dm.has_setup_fit\n assert dm.has_setup_validate\n assert dm.has_setup_test\n assert dm.has_setup_predict\n\n dm.teardown(stage=\"fit\") if use_kwarg else dm.teardown(\"fit\")\n assert dm.has_teardown_fit\n assert not dm.has_teardown_validate\n assert not dm.has_teardown_test\n assert not dm.has_teardown_predict\n\n dm.teardown(stage=\"validate\") if use_kwarg else dm.teardown(\"validate\")\n assert dm.has_teardown_fit\n assert dm.has_teardown_validate\n assert not dm.has_teardown_test\n assert not dm.has_teardown_predict\n\n dm.teardown(stage=\"test\") if use_kwarg else dm.teardown(\"test\")\n assert dm.has_teardown_fit\n assert dm.has_teardown_validate\n assert dm.has_teardown_test\n assert not dm.has_teardown_predict\n\n dm.teardown(stage=\"predict\") if use_kwarg else dm.teardown(\"predict\")\n assert dm.has_teardown_fit\n assert dm.has_teardown_validate\n assert dm.has_teardown_test\n assert dm.has_teardown_predict\n\n\ndef test_dm_add_argparse_args(tmpdir):\n parser = ArgumentParser()\n parser = BoringDataModule.add_argparse_args(parser)\n args = parser.parse_args([\"--data_dir\", str(tmpdir)])\n assert args.data_dir == str(tmpdir)\n\n\ndef test_dm_init_from_argparse_args(tmpdir):\n parser = ArgumentParser()\n parser = BoringDataModule.add_argparse_args(parser)\n args = parser.parse_args([\"--data_dir\", str(tmpdir)])\n dm = BoringDataModule.from_argparse_args(args)\n dm.prepare_data()\n dm.setup()\n assert dm.data_dir == args.data_dir == str(tmpdir)\n\n\ndef test_dm_pickle_after_init():\n dm = BoringDataModule()\n pickle.dumps(dm)\n\n\ndef test_train_loop_only(tmpdir):\n reset_seed()\n\n dm = ClassifDataModule()\n model = ClassificationModel()\n\n model.validation_step = None\n model.validation_step_end = None\n model.validation_epoch_end = None\n model.test_step = None\n model.test_step_end = None\n model.test_epoch_end = None\n\n trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, weights_summary=None)\n\n # fit model\n trainer.fit(model, datamodule=dm)\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n assert trainer.callback_metrics[\"train_loss\"] < 1.0\n\n\ndef test_train_val_loop_only(tmpdir):\n reset_seed()\n\n dm = ClassifDataModule()\n model = ClassificationModel()\n\n model.validation_step = None\n model.validation_step_end = None\n model.validation_epoch_end = None\n\n trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, weights_summary=None)\n\n # fit model\n trainer.fit(model, datamodule=dm)\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n assert trainer.callback_metrics[\"train_loss\"] < 1.0\n\n\ndef test_dm_checkpoint_save(tmpdir):\n class CustomBoringModel(BoringModel):\n def validation_step(self, batch, batch_idx):\n out = super().validation_step(batch, batch_idx)\n self.log(\"early_stop_on\", out[\"x\"])\n return out\n\n class CustomBoringDataModule(BoringDataModule):\n def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:\n checkpoint[self.__class__.__name__] = self.__class__.__name__\n\n def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:\n self.checkpoint_state = checkpoint.get(self.__class__.__name__)\n\n reset_seed()\n dm = CustomBoringDataModule()\n model = CustomBoringModel()\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_train_batches=2,\n limit_val_batches=1,\n weights_summary=None,\n callbacks=[ModelCheckpoint(dirpath=tmpdir, monitor=\"early_stop_on\")],\n )\n\n # fit model\n trainer.fit(model, dm)\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n checkpoint_path = list(trainer.checkpoint_callback.best_k_models.keys())[0]\n checkpoint = torch.load(checkpoint_path)\n assert dm.__class__.__name__ in checkpoint\n assert checkpoint[dm.__class__.__name__] == dm.__class__.__name__\n\n\ndef test_full_loop(tmpdir):\n reset_seed()\n\n dm = ClassifDataModule()\n model = ClassificationModel()\n\n trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, weights_summary=None, deterministic=True)\n\n # fit model\n trainer.fit(model, dm)\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n assert dm.trainer is not None\n\n # validate\n result = trainer.validate(model, dm)\n assert dm.trainer is not None\n assert result[0][\"val_acc\"] > 0.7\n\n # test\n result = trainer.test(model, dm)\n assert dm.trainer is not None\n assert result[0][\"test_acc\"] > 0.6\n\n\n@RunIf(min_gpus=1)\[email protected](\"pytorch_lightning.accelerators.accelerator.Accelerator.lightning_module\", new_callable=PropertyMock)\ndef test_dm_apply_batch_transfer_handler(get_module_mock):\n expected_device = torch.device(\"cuda\", 0)\n\n class CustomBatch:\n def __init__(self, data):\n self.samples = data[0]\n self.targets = data[1]\n\n class CurrentTestDM(LightningDataModule):\n rank = 0\n transfer_batch_to_device_hook_rank = None\n on_before_batch_transfer_hook_rank = None\n on_after_batch_transfer_hook_rank = None\n\n def on_before_batch_transfer(self, batch, dataloader_idx):\n assert dataloader_idx == 0\n self.on_before_batch_transfer_hook_rank = self.rank\n self.rank += 1\n batch.samples += 1\n return batch\n\n def on_after_batch_transfer(self, batch, dataloader_idx):\n assert dataloader_idx == 0\n assert batch.samples.device == batch.targets.device == expected_device\n self.on_after_batch_transfer_hook_rank = self.rank\n self.rank += 1\n batch.targets *= 2\n return batch\n\n def transfer_batch_to_device(self, batch, device, dataloader_idx):\n assert dataloader_idx == 0\n self.transfer_batch_to_device_hook_rank = self.rank\n self.rank += 1\n batch.samples = batch.samples.to(device)\n batch.targets = batch.targets.to(device)\n return batch\n\n dm = CurrentTestDM()\n model = BoringModel()\n\n batch = CustomBatch((torch.zeros(5, 32), torch.ones(5, 1, dtype=torch.long)))\n\n trainer = Trainer(gpus=1)\n # running .fit() would require us to implement custom data loaders, we mock the model reference instead\n get_module_mock.return_value = model\n if is_overridden(\"transfer_batch_to_device\", dm):\n model.transfer_batch_to_device = dm.transfer_batch_to_device\n\n model.on_before_batch_transfer = dm.on_before_batch_transfer\n model.transfer_batch_to_device = dm.transfer_batch_to_device\n model.on_after_batch_transfer = dm.on_after_batch_transfer\n\n batch_gpu = trainer.accelerator.batch_to_device(batch, expected_device)\n\n assert dm.on_before_batch_transfer_hook_rank == 0\n assert dm.transfer_batch_to_device_hook_rank == 1\n assert dm.on_after_batch_transfer_hook_rank == 2\n assert batch_gpu.samples.device == batch_gpu.targets.device == expected_device\n assert torch.allclose(batch_gpu.samples.cpu(), torch.ones(5, 32))\n assert torch.allclose(batch_gpu.targets.cpu(), torch.ones(5, 1, dtype=torch.long) * 2)\n\n\ndef test_dm_reload_dataloaders_every_n_epochs(tmpdir):\n \"\"\"\n Test datamodule, where trainer argument\n reload_dataloaders_every_n_epochs is set to a non negative integer\n \"\"\"\n\n class CustomBoringDataModule(BoringDataModule):\n def __init__(self):\n super().__init__()\n self._epochs_called_for = []\n\n def train_dataloader(self):\n assert self.trainer.current_epoch not in self._epochs_called_for\n self._epochs_called_for.append(self.trainer.current_epoch)\n return super().train_dataloader()\n\n dm = CustomBoringDataModule()\n model = BoringModel()\n\n model.validation_step = None\n model.validation_step_end = None\n model.validation_epoch_end = None\n model.test_step = None\n model.test_step_end = None\n model.test_epoch_end = None\n\n trainer = Trainer(default_root_dir=tmpdir, max_epochs=3, limit_train_batches=2, reload_dataloaders_every_n_epochs=2)\n trainer.fit(model, dm)\n\n\nclass DummyDS(torch.utils.data.Dataset):\n def __getitem__(self, index):\n return 1\n\n def __len__(self):\n return 100\n\n\nclass DummyIDS(torch.utils.data.IterableDataset):\n def __iter__(self):\n yield 1\n\n\[email protected](\"iterable\", (False, True))\ndef test_dm_init_from_datasets_dataloaders(iterable):\n ds = DummyIDS if iterable else DummyDS\n\n train_ds = ds()\n dm = LightningDataModule.from_datasets(train_ds, batch_size=4, num_workers=0)\n with mock.patch(\"pytorch_lightning.core.datamodule.DataLoader\") as dl_mock:\n dm.train_dataloader()\n dl_mock.assert_called_once_with(train_ds, batch_size=4, shuffle=not iterable, num_workers=0, pin_memory=True)\n assert dm.val_dataloader() is None\n assert dm.test_dataloader() is None\n\n train_ds_sequence = [ds(), ds()]\n dm = LightningDataModule.from_datasets(train_ds_sequence, batch_size=4, num_workers=0)\n with mock.patch(\"pytorch_lightning.core.datamodule.DataLoader\") as dl_mock:\n dm.train_dataloader()\n dl_mock.assert_has_calls(\n [\n call(train_ds_sequence[0], batch_size=4, shuffle=not iterable, num_workers=0, pin_memory=True),\n call(train_ds_sequence[1], batch_size=4, shuffle=not iterable, num_workers=0, pin_memory=True),\n ]\n )\n assert dm.val_dataloader() is None\n assert dm.test_dataloader() is None\n\n valid_ds = ds()\n test_ds = ds()\n dm = LightningDataModule.from_datasets(val_dataset=valid_ds, test_dataset=test_ds, batch_size=2, num_workers=0)\n with mock.patch(\"pytorch_lightning.core.datamodule.DataLoader\") as dl_mock:\n dm.val_dataloader()\n dl_mock.assert_called_with(valid_ds, batch_size=2, shuffle=False, num_workers=0, pin_memory=True)\n dm.test_dataloader()\n dl_mock.assert_called_with(test_ds, batch_size=2, shuffle=False, num_workers=0, pin_memory=True)\n assert dm.train_dataloader() is None\n\n valid_dss = [ds(), ds()]\n test_dss = [ds(), ds()]\n dm = LightningDataModule.from_datasets(train_ds, valid_dss, test_dss, batch_size=4, num_workers=0)\n with mock.patch(\"pytorch_lightning.core.datamodule.DataLoader\") as dl_mock:\n dm.val_dataloader()\n dm.test_dataloader()\n dl_mock.assert_has_calls(\n [\n call(valid_dss[0], batch_size=4, shuffle=False, num_workers=0, pin_memory=True),\n call(valid_dss[1], batch_size=4, shuffle=False, num_workers=0, pin_memory=True),\n call(test_dss[0], batch_size=4, shuffle=False, num_workers=0, pin_memory=True),\n call(test_dss[1], batch_size=4, shuffle=False, num_workers=0, pin_memory=True),\n ]\n )\n\n\nclass DataModuleWithHparams(LightningDataModule):\n def __init__(self, arg0, arg1, kwarg0=None):\n super().__init__()\n self.save_hyperparameters()\n\n\ndef test_simple_hyperparameters_saving():\n data = DataModuleWithHparams(10, \"foo\", kwarg0=\"bar\")\n assert data.hparams == AttributeDict({\"arg0\": 10, \"arg1\": \"foo\", \"kwarg0\": \"bar\"})\n"
] | [
[
"torch.device",
"torch.zeros",
"torch.ones",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bbo-lab/multitrackpy | [
"a25ebdb94969b0682c851ab69ba5895173b581d0"
] | [
"multitrackpy/mtt.py"
] | [
"import numpy as np\nimport h5py\nfrom pprint import pprint\n\ndef read_calib(mtt_path):\n mtt_file = h5py.File(mtt_path)\n\n istracking = np.squeeze(np.asarray([mtt_file['mt']['cam_istracking']]) == 1)\n calind = np.squeeze(np.int32(mtt_file['mt']['calind']))[istracking] - 1\n\n mc = {\n 'Rglobal': np.asarray(mtt_file['mt']['mc']['Rglobal']).transpose((0, 2, 1)), # in reverse order in h5 file!\n 'Tglobal': np.asarray(mtt_file['mt']['mc']['Tglobal']),\n 'cal': []\n }\n\n for ci in calind:\n mc['cal'].append({\n 'scaling': np.asarray(mtt_file[mtt_file['mt']['mc']['cal']['scaling'][ci, 0]]).T[0],\n 'icent': np.asarray(mtt_file[mtt_file['mt']['mc']['cal']['icent'][ci, 0]]).T[0],\n 'distortion_coefs': np.asarray(mtt_file[mtt_file['mt']['mc']['cal']['distortion_coefs'][ci, 0]]),\n 'sensorsize': np.asarray(mtt_file[mtt_file['mt']['mc']['cal']['sensorsize'][ci, 0]]).T[0],\n 'scale_pixels': np.asarray(mtt_file[mtt_file['mt']['mc']['cal']['scale_pixels'][ci, 0]]),\n })\n\n # pprint(mc)\n return mc\n\n\ndef read_video_paths(vid_dir, mtt_path):\n mtt_file = h5py.File(mtt_path)\n istracking = np.squeeze(np.asarray([mtt_file['mt']['cam_istracking']]) == 1)\n return [vid_dir + ''.join([chr(c) for c in mtt_file[mtt_file['mt']['vidname'][0, i]][:].T.astype(np.int)[0]]) for i\n in np.where(istracking)[0]]\n\n\ndef read_spacecoords(mtt_path):\n mtt_file = h5py.File(mtt_path)\n return np.asarray(mtt_file['mt']['objmodel']['space_coord'])\n\n\ndef read_frame_n(mtt_path):\n mtt_file = h5py.File(mtt_path)\n return len(mtt_file['mt']['t'])\n"
] | [
[
"numpy.asarray",
"numpy.int32",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Leofltt/rg_sound_generation | [
"8e79b4d9dce028def43284f80521a2ec61d0066c"
] | [
"members/amit/clf/data_generator_binary.py"
] | [
"import random\nimport shutil\nimport os\nimport numpy as np\nimport data_loader\nimport audio_processing\n\nfrom typing import Dict\nfrom loguru import logger\nfrom tqdm import tqdm\nfrom pprint import pprint\n\n\nclass DataGenerator:\n def __init__(self, conf: Dict, batch_size: int = 8):\n assert \"csv_file_path\" in conf\n assert \"base_dir\" in conf\n self.conf = conf.copy()\n self.batch_size = batch_size\n self.examples = data_loader.data_loader(conf)\n self.num_examples = len(self.examples)\n self.train = {0: [], 1: []}\n self.valid = {0: [], 1: []}\n self.train_counts = {0: 0, 1: 0}\n self.valid_counts = {0: 0, 1: 0}\n self.num_train = 0\n self.num_valid = 0\n self.classes = [0, 1]\n self.input_shapes = {\n \"spec\": (),\n \"hpss\": ()\n }\n logger.info(\"DataGenerator instantiated\")\n self.preprocess()\n logger.info(\"Preprocessing complete\")\n\n def preprocess(self):\n logger.info(\"Preprocessing examples\")\n logger.info(f\"{self.input_shapes['spec']} = Current input shape for spec\")\n\n folder = os.path.join(self.conf.get(\"preprocess_dir\"))\n\n if self.conf.get(\"reset_data\"):\n if os.path.isdir(folder):\n shutil.rmtree(folder)\n\n if not os.path.isdir(folder):\n os.mkdir(folder)\n\n min_level = 50 - self.conf.get(\"threshold\")\n max_level = 50 + self.conf.get(\"threshold\")\n valid_split = int(self.conf.get(\"valid_split\") * 100)\n\n logger.info(f\"Min level {min_level}, Max level {max_level}\")\n\n for key, value in tqdm(self.examples.items()):\n audio_file_name = value[\"audio_file_name\"]\n file_path = os.path.join(self.conf.get(\"base_dir\"), f\"{audio_file_name}.wav\")\n current_class = 1\n\n for j, feature in enumerate(self.conf.get(\"features\")):\n current_val = int(value[feature])\n current_class = -1\n if current_val < min_level:\n current_class = 0\n elif current_val > max_level:\n current_class = 1\n\n if current_class == -1:\n continue\n\n target_file_path = os.path.join(self.conf.get(\"preprocess_dir\"), audio_file_name)\n\n if not os.path.isfile(f\"{target_file_path}.spec.npy\"):\n spec, hpss = audio_processing.get_features(file_path, self.conf)\n self.input_shapes[\"spec\"] = spec.shape\n self.input_shapes[\"hpss\"] = hpss.shape\n np.save(f\"{target_file_path}.spec\", spec)\n np.save(f\"{target_file_path}.hpss\", hpss)\n elif len(self.input_shapes[\"spec\"]) == 0:\n spec = np.load(f\"{target_file_path}.spec.npy\")\n hpss = np.load(f\"{target_file_path}.hpss.npy\")\n logger.info(\"Setting input shapes based on previous files\")\n logger.info(f\"{spec.shape}, {hpss.shape}\")\n self.input_shapes[\"spec\"] = spec.shape\n self.input_shapes[\"hpss\"] = hpss.shape\n\n if random.randint(0, 99) < valid_split:\n self.valid[current_class].append(target_file_path)\n self.valid_counts[current_class] += 1\n else:\n self.train[current_class].append(target_file_path)\n self.train_counts[current_class] += 1\n self.num_train = sum(list(self.train_counts.values()))\n self.num_valid = sum(list(self.train_counts.values()))\n\n logger.info(\"Class counts in training set\")\n pprint(self.train_counts)\n logger.info(\"Class counts in validation set\")\n pprint(self.valid_counts)\n\n def generator(self, set_name: str):\n assert set_name in [\"train\", \"valid\"], \"Set name must be either train or valid\"\n\n while True:\n spec_batch = np.zeros((self.batch_size,) + self.input_shapes[\"spec\"])\n hpss_batch = np.zeros((self.batch_size,) + self.input_shapes[\"hpss\"])\n y_batch = np.zeros((self.batch_size, ))\n current_set = eval(f\"self.{set_name}\")\n\n for i in range(0, self.batch_size):\n target_class = random.choice([0, 1])\n example_file = random.choice(current_set[target_class])\n example_spec = np.load(f\"{example_file}.spec.npy\") * self.conf.get(\"scale_factor\")\n example_hpss = np.load(f\"{example_file}.hpss.npy\") * self.conf.get(\"scale_factor\")\n spec_batch[i] = example_spec\n hpss_batch[i] = example_hpss\n y_batch[i] = target_class\n\n yield {\"spec\": spec_batch, \"hpss\": hpss_batch}, {\"output\": y_batch}\n"
] | [
[
"numpy.load",
"numpy.zeros",
"numpy.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bshrram/Graduation-Project---Omnidirectional-Conveyor-Table | [
"6414fbcb3d53f3c3351c25ac8b48aa73397c250d",
"6414fbcb3d53f3c3351c25ac8b48aa73397c250d"
] | [
"feedback_system/findTable.py",
"shortestPath.py"
] | [
"import numpy as np\nimport cv2 as cv\n\nflann_params= dict(algorithm = 6,\n table_number = 6, # 12\n key_size = 12, # 20\n multi_probe_level = 1) #2\n\n\ndef init_feature():\n \"\"\"initialize feature detector and matcher algorithm\n \"\"\"\n detector = cv.ORB_create(3000)\n norm = cv.NORM_HAMMING\n #matcher = cv.BFMatcher(norm)\n matcher = cv.FlannBasedMatcher(flann_params, {})\n return detector, matcher\n\n\ndef filter_matches(kp1, kp2, matches, ratio = 0.8):\n \"\"\"filter matches to keep strong matches only\n \"\"\"\n mkp1, mkp2 = [], []\n for m in matches:\n if len(m) == 2 and m[0].distance < m[1].distance * ratio:\n m = m[0]\n mkp1.append( kp1[m.queryIdx] )\n mkp2.append( kp2[m.trainIdx] )\n p1 = np.float32([kp.pt for kp in mkp1])\n p2 = np.float32([kp.pt for kp in mkp2])\n kp_pairs = zip(mkp1, mkp2)\n return p1, p2, list(kp_pairs)\n\n\nc = []\ndef explore_match(win, img1, img2, kp_pairs, status = None, H = None):\n h1, w1 = img1.shape[:2]\n h2, w2 = img2.shape[:2]\n vis = np.zeros((max(h1, h2), w1+w2, 3), np.uint8)\n vis[:h1, :w1, :3] = img1\n vis[:h2, w1:w1+w2, :3] = img2\n img3 = vis\n h3, w3 = img3.shape[:2]\n\n if H is not None:\n corners = np.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])\n corners1 = np.float32( cv.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0))\n corners = np.int32( cv.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0) )\n c = corners\n cv.polylines(vis, [corners], True, (0, 0, 255))\n\n if status is None:\n status = np.ones(len(kp_pairs), np.bool_)\n \n p1, p2 = [], [] \n for kpp in kp_pairs:\n p1.append(np.int32(kpp[0].pt))\n p2.append(np.int32(np.array(kpp[1].pt) + [w1, 0]))\n\n green = (0, 255, 0)\n red = (0, 0, 255)\n\n for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):\n if inlier:\n col = green\n cv.circle(vis, (x1, y1), 2, col, -1)\n cv.circle(vis, (x2, y2), 2, col, -1)\n else:\n col = red\n r = 2\n thickness = 3\n cv.line(vis, (x1-r, y1-r), (x1+r, y1+r), col, thickness)\n cv.line(vis, (x1-r, y1+r), (x1+r, y1-r), col, thickness)\n cv.line(vis, (x2-r, y2-r), (x2+r, y2+r), col, thickness)\n cv.line(vis, (x2-r, y2+r), (x2+r, y2-r), col, thickness)\n\n for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):\n if inlier:\n cv.line(vis, (x1, y1), (x2, y2), green)\n\n cv.imshow(win, vis)\n return corners1\n\n\nscale_percent =25\nimg1 = cv.imread(cv.samples.findFile('table7A.jpg'))\nwidth = int(img1.shape[1] * scale_percent / 100)\nheight = int(img1.shape[0] * scale_percent / 100)\n#img1 = cv.resize(img1, (width,height))\n\n\ndetector, matcher = init_feature()\n\n# apply orb on table image\nkp1, desc1 = detector.detectAndCompute(img1, None)\n\ndef getCorners(frame):\n \n # apply orb on frame\n kp2, desc2 = detector.detectAndCompute(frame, None)\n\n print('matching...')\n raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) \n #filter matches and keep strong matches\n p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)\n if len(p1) >= 4:\n # H: transformation matrix\n H, status = cv.findHomography(p1, p2, cv.RANSAC, 5.0)\n print('%d / %d inliers/matched' % (np.sum(status), len(status)))\n else:\n H, status = None, None\n print('%d matches found, not enough for homography estimation' % len(p1))\n\n corners = explore_match('find_table', img1, frame, kp_pairs, status, H)\n return corners\n\ndef getTableFromFrame (corners, frame):\n h1, w1 = img1.shape[:2]\n h2, w2 = frame.shape[:2]\n vis = np.zeros((max(h1, h2), w1+w2, 3), np.uint8)\n vis[:h1, :w1, :3] = img1\n vis[:h2, w1:w1+w2, :3] = frame\n pts1 = corners\n pts2 = np.float32([[0,0],[w1,0],[w1,h1], [0,h1]])\n M = cv.getPerspectiveTransform(pts1,pts2)\n # print((w1, h1))\n dst = cv.warpPerspective(vis, M,(w1,h1))\n return dst\n",
"import time\nimport sys\nsys.path.insert(1, './feedback_system')\nsys.path.insert(2, './control_system')\nfrom common import *\n\nfrom feedback_system.findTable import *\nfrom feedback_system.tracker import Tracker\nfrom feedback_system.detector import Detector\nfrom PID.pid_controller import PIDController\nimport numpy as np\nimport cv2 as cv\nimport argparse\nimport imutils\nfrom PathPlanning import *\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--input', type=str,\n help='Path to a video or a sequence of image.', default='data/videos/9.mp4')\nparser.add_argument('--algo', type=str,\n help='Background subtraction method (KNN, MOG2, COLOR).', default='COLOR')\nparser.add_argument('--train', type=str,\n help='Path to a video or a sequence of image.', default='data/videos/2.mp4')\nargs = parser.parse_args()\n\n\nif args.algo == 'COLOR':\n lower_blue = np.array([105, 50, 50])\n upper_blue = np.array([130, 220, 220])\n lower_black = np.array([95,45, 45 ])\n upper_black = np.array([140, 255, 255])\n detector = Detector(type=\"COLOR\", color=(lower_blue, upper_blue))\n\n\ndef followshortestPath(myTable, cell1, cell2):\n cell1 = [int(i) for i in cell1]\n cell2 = [int(i) for i in cell2]\n\n tracker = Tracker(160, 30, 10, 100)\n track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),\n (0, 255, 255), (255, 0, 255), (255, 127, 255),\n (127, 0, 255), (127, 0, 127)]\n detectorqr = cv.QRCodeDetector()\n #capture = cv.VideoCapture(cv.samples.findFileOrKeep(args.input))\n capture = cv.VideoCapture('http://192.168.43.1:8080/video')\n if not capture.isOpened:\n print('Unable to open: ' + args.input)\n exit(0)\n r= False\n frames = 0\n inf = 999999991\n corners = [[0, 0], [inf, 0], [inf, inf], [0, inf]]\n locations = pathCoordinates(dijPath(4, 10, cell1, cell2 ), myTable)\n endCells = list(map(myTable.getCellByLocation, locations))\n locations = smooth(locations)\n\n\n\n index = 0\n\n pastPos = (0, 0) # xpast, ypast\n dir = 1 # direction of rotate\n hang = 0\n hangFrames = 0\n\n endAngle = 90\n kp = 0.36\n ki = 0.40\n kd = 0.4\n umax = 150 # max controller output, (N)\n alpha = 0.8 # derivative filter smoothing factor\n pid = PIDController(kp = kp, ki = ki, kd = kd, max_windup = 200, u_bounds\n = [-umax, umax], alpha = alpha)\n\n pid.setTarget(endAngle)\n\n waitBox = 0\n count = 0\n t = 0\n y0 =0\n y1 = 0\n fps = 25\n fRotate = 1\n cornersList = []\n \n while True:\n keyboard = cv.waitKey(1)\n if keyboard == 'q' or keyboard == 27:\n for i in range(20):\n comCells = myTable.getCommonCells(myTable.cells[i])\n myTable.cells[i].stop(comCells)\n time.sleep(.2)\n break\n ret, frame = capture.read()\n if frame is None:\n break\n # frame = cv.resize(frame, (640, 360))\n #frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n frames = frames + 1\n if frames < 50:\n corners1 = getCorners(frame)\n cornersList.append(corners1)\n continue\n\n if frames < 120:\n continue\n\n if frames == 120:\n cornersMean = np.mean(cornersList, axis=0)\n cornersStd = np.std(cornersList, axis=0)\n for i in range(len(cornersList)):\n for j in range(len(cornersList[0])):\n for k in range(len(cornersList[0][0])):\n std = cornersStd[j][k]\n mean = cornersMean[j][k]\n if (cornersList[i][j][k] < mean - 2*std) or (cornersList[i][j][k] > mean + 2*std) : \n cornersList[i][j][k] = mean\n\n corners = np.mean(cornersList, axis=0)\n \n frame = getTableFromFrame(corners, frame)\n (centers, angles) = detector.Detect(frame)\n h1, w1 = frame.shape[:2]\n if len(centers) == 0:\n continue\n\n if waitBox:\n waitBox -= 1\n for i in range(20):\n comCells = myTable.getCommonCells(myTable.cells[i])\n myTable.cells[i].stop(comCells)\n continue\n \n centersMM = pixelToMm((float(centers[0][0]), float(centers[0][1])), w1, h1)\n angle = angles[0][0]\n\n h = [hang, hangFrames, dir]\n\n if index == -1:\n cell = myTable.getCellsByNearLocation(centersMM, 1)[0]\n waitBox = 30\n index = 0\n\n [index, hang, hangFrames] = myTable.followPath(locations, centersMM, angle, index, h)\n if hang: \n continue\n\n\n curPos = (centers[0][0], centers[0][1])\n [hang, hangFrames, dir] = myTable.isHanging(hang, hangFrames, curPos, pastPos, dir)\n pastPos = curPos\n \n\n # if (len(centers) > 0):\n # tracker.Update(centers)\n\n # for i in range(len(tracker.tracks)):\n # if (len(tracker.tracks[i].trace) > 1):\n # for j in range(len(tracker.tracks[i].trace)-1):\n # # Draw trace line\n # x1 = tracker.tracks[i].trace[j][0][0]\n # y1 = tracker.tracks[i].trace[j][1][0]\n # x2 = tracker.tracks[i].trace[j+1][0][0]\n # y2 = tracker.tracks[i].trace[j+1][1][0]\n # clr = tracker.tracks[i].track_id % 9\n # cv.line(frame, (int(x1), int(y1)), (int(x2), int(y2)),\n # track_colors[clr], 2)\n\n # l = len(tracker.tracks[i].trace)\n # if (l > 1):\n # xcur = tracker.tracks[0].trace[l-1][0][0]\n # ycur = tracker.tracks[0].trace[l-1][1][0]\n # xp = tracker.tracks[0].trace[l-2][0][0]\n # yp = tracker.tracks[0].trace[l-2][1][0]\n # dis = calculateDistance((xcur, ycur), (xp, yp))\n # if (ref < 10):\n # count+=1\n # else:\n # count = 0\n\n # # Display the resulting tracking frame\n #cv.imshow('Tracking', frame)\n"
] | [
[
"numpy.array",
"numpy.int32",
"numpy.sum",
"numpy.float32"
],
[
"numpy.std",
"numpy.array",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
valkenzz/Bigeleisen_KIE | [
"aa82ee63c77be2e9d0bd97702c297aa70dfaa362"
] | [
"Bigeleisen_KIE.py"
] | [
"#Importation : \r\nimport pandas as pd\r\nimport numpy as np\r\n################################################\r\n#Parameters : \r\n#Planck constant (J/Hz)\r\nh=6.62607004*10**-34\r\n#Boltzmann constant (J/K)\r\nkB=1.38064852*10**-23\r\n#Light velocity in vaccum (m/s)\r\nc=299792458.0\r\n\r\n####################################################################################\r\n#Functions:\r\n######################################################################################\r\n#We check for errors : \r\n \r\n#We check if all values are positiv for initial states\r\ndef CheckPositiv(Data):\r\n if len(Data)!=len([i for i in Data if (i>0)]):\r\n print(\"At least one initial state hasn't every frequency that are positiv\")\r\n \r\ndef error(isH,isD,tsH,tsD):\r\n CheckPositiv(isH)\r\n CheckPositiv(isD) \r\n \r\n#####################################################################################\r\n\r\n\r\n#Function which takes the lists of vibration frequencies of 2 states to give the product of the ratio of frequencies\r\ndef Operation(Data1,Data2):\r\n if len(Data1)!=len(Data2):\r\n print(\"The number of frequencies isn't the same for two same states\")\r\n return \r\n x=1\r\n for i in range(len(Data1)):\r\n x=x*Data1[i]/Data2[i]\r\n return x\r\n\r\n#Function which takes one list of vibration frequencies to give the sinh of Ui = h*x/(kB*T) according to the Biegelheisen equation\r\ndef Ui(Data,T):\r\n return pd.Series(Data).apply(lambda x : np.sinh(float(x)*((h*100*c)/(2.0*kB*float(T)))))\r\n\r\n#Function which takes in entry the lists of frequencies (cm-1) and the temperature (K) and gives the KIE\r\n#isH is the vibration frequencies of the molecule containing the light isotope at the initial state\r\n#isD is the vibration frequencies of the molecule containing the heavy isotope at the initial state\r\n#tsH is the vibration frequencies of the molecule containing the light isotope at the transition state\r\n#tsD is the vibration frequencies of the molecule containing the heavy isotope at the transition state\r\n#T is the temperature in Kelvin\r\ndef KIE(isH,isD,tsH,tsD,T):\r\n error(isH,isD,tsH,tsD)\r\n #We calculate the sinh of h*x/(kB*T)\r\n UisH=Ui(isH,T).tolist()\r\n UtsH=Ui(tsH,T).tolist()\r\n UisD=Ui(isD,T).tolist()\r\n UtsD=Ui(tsD,T).tolist()\r\n #######################\r\n #We begin to calculate the ratio of the two imaginary frequencies\r\n op1=tsH[0]/tsD[0]\r\n Result=op1\r\n #We calculate the second factor\r\n Result=Result*Operation(tsH[1:],tsD[1:])\r\n #We calculate the third factor\r\n Result=Result*Operation(isD,isH)\r\n #We calculate the fourth factor\r\n Result=Result*Operation(UtsD[1:],UtsH[1:])\r\n #We calculate the fifth factor\r\n Result=Result*Operation(UisH,UisD) \r\n return Result\r\n\r\n\r\n####################################################################################\r\n\r\n\r\n"
] | [
[
"pandas.Series"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
apexnetai/cifar-10-guide | [
"7c76f310e93da3a229ce9d66defd770ee1c7dc56"
] | [
"cifar10/custom_models.py"
] | [
"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\n\n\nclass CustomResnetV1(nn.Module):\n\n def __init__(self):\n super(CustomResnetV1, self).__init__()\n self.resnet = torchvision.models.resnet18(pretrained=True)\n self.resnet.conv1 = nn.Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(0, 0), bias=False)\n self.resnet.fc = nn.Linear(512, 256)\n\n self.bn1a = nn.BatchNorm1d(256)\n self.fc11 = nn.Linear(256, 256)\n self.fc12 = nn.Linear(256, 256)\n self.bn1b = nn.BatchNorm1d(256)\n self.fc13 = nn.Linear(256, 256)\n self.fc14 = nn.Linear(256, 256)\n self.bn1c = nn.BatchNorm1d(256)\n self.fc15 = nn.Linear(256, 256)\n self.fc16 = nn.Linear(256, 256)\n self.fc_down1 = nn.Linear(256, 128)\n\n self.bn2a = nn.BatchNorm1d(128)\n self.fc21 = nn.Linear(128, 128)\n self.fc22 = nn.Linear(128, 128)\n self.bn2b = nn.BatchNorm1d(128)\n self.fc23 = nn.Linear(128, 128)\n self.fc24 = nn.Linear(128, 128)\n self.bn2c = nn.BatchNorm1d(128)\n self.fc25 = nn.Linear(128, 128)\n self.fc26 = nn.Linear(128, 128)\n self.fc_down2 = nn.Linear(128, 64)\n\n self.bn3a = nn.BatchNorm1d(64)\n self.fc31 = nn.Linear(64, 64)\n self.fc32 = nn.Linear(64, 64)\n self.bn3b = nn.BatchNorm1d(64)\n self.fc33 = nn.Linear(64, 64)\n self.fc34 = nn.Linear(64, 64)\n self.bn3c = nn.BatchNorm1d(64)\n self.fc35 = nn.Linear(64, 64)\n self.fc36 = nn.Linear(64, 64)\n\n self.fc4 = nn.Linear(64, 10)\n\n #self.drop1 = nn.Dropout2d(0.5)\n\n def forward(self, x):\n x_ = F.relu(self.resnet(x))\n\n x = self.bn1a(x_)\n x = F.relu(self.fc11(x))\n x = F.relu(self.fc12(x))\n x_ = torch.add(x, x_)\n x = self.bn1b(x_)\n x = F.relu(self.fc13(x))\n x = F.relu(self.fc14(x))\n x_ = torch.add(x, x_)\n x = self.bn1c(x_)\n x = F.relu(self.fc15(x))\n x = F.relu(self.fc16(x))\n x_ = self.fc_down1(torch.add(x, x_))\n\n x = self.bn2a(x_)\n x = F.relu(self.fc21(x))\n x = F.relu(self.fc22(x))\n x_ = torch.add(x, x_)\n x = self.bn2b(x_)\n x = F.relu(self.fc23(x))\n x = F.relu(self.fc24(x))\n x_ = torch.add(x, x_)\n x = self.bn2c(x_)\n x = F.relu(self.fc25(x))\n x = F.relu(self.fc26(x))\n x_ = self.fc_down2(torch.add(x, x_))\n\n x = self.bn3a(x_)\n x = F.relu(self.fc31(x))\n x = F.relu(self.fc32(x))\n x_ = torch.add(x, x_)\n x = self.bn3b(x_)\n x = F.relu(self.fc33(x))\n x = F.relu(self.fc34(x))\n x_ = torch.add(x, x_)\n x = self.bn3c(x_)\n x = F.relu(self.fc35(x))\n x = F.relu(self.fc36(x))\n x_ = torch.add(x, x_)\n\n x = self.fc4(x_)\n\n return F.log_softmax(x, dim=1)\n"
] | [
[
"torch.nn.BatchNorm1d",
"torch.add",
"torch.nn.functional.log_softmax",
"torch.nn.Conv2d",
"torch.nn.Linear"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zxpzhong/DR_3DFM | [
"6ef7d0d86813f4cc407a0d1011a2623e4775fbee"
] | [
"utils/Finger/tool/tools.py"
] | [
"# 定义全局变量和方法\nimport numpy as np\nimport math\n# import process.process_finger_data as pfd\n\n# 目前选用的图片尺寸\ncur_pic_size = [640, 400]\n# cur_pic_size = [1280, 800]\n# 相机索引对应相机名称\ncamera_index_to_name = ['A', 'B', 'C', 'D', 'E', 'F']\n# 6个相机的外参\ncamera_a_outer_para = np.mat([[0.574322111, 0.771054881, 0.275006333, 0.93847817],\n [0.565423192, -0.130698104, -0.814379899, -0.36935905],\n [-0.591988790, 0.623211341, -0.511035123, 4.78810628],\n [0, 0, 0, 1]])\ncamera_b_outer_para = np.mat([[0.456023570, 0.727006744, 0.513326112, 1.72205846],\n [-0.146061166, 0.630108915, -0.762645980, -0.30452329],\n [-0.877900131, 0.272807532, 0.393531969, 5.53092307],\n [0, 0, 0, 1]])\ncamera_c_outer_para = np.mat([[0.609183831, 0.528225460, 0.591500569, 1.59956459],\n [-0.738350101, 0.649953779, 0.179997814, 0.5030131],\n [-0.289368602, -0.546386263, 0.785956655, 5.58635091],\n [0, 0, 0, 1]])\ncamera_d_outer_para = np.mat([[0.771746127, 0.478767298, 0.418556793, 0.955855425],\n [-0.476877262, 0.000270229651, 0.878969854, 0.477556906],\n [0.420708915, -0.877941799, 0.228521787, 4.61760675],\n [0, 0, 0, 1]])\ncamera_e_outer_para = np.mat([[0.788882832, 0.555210653, 0.263448302, 0.71648894],\n [0.159053746, -0.598545227, 0.785140445, 0.00777088],\n [0.593604063, -0.577481378, -0.560490387, 4.30437514],\n [0, 0, 0, 1]])\ncamera_f_outer_para = np.mat([[0.712321206, 0.689000523, 0.133704068, 1.13938413],\n [0.694227260, -0.719684989, 0.0101009224, -0.28640104],\n [0.103184351, 0.0856259076, -0.990969825, 4.49819911],\n [0, 0, 0, 1]])\n\n# 六个相机的内参\ncamera_a_inner_para = np.mat([[967.5377197, 0, 703.1273732, 0],\n [0, 967.9393921, 351.0187561, 0],\n [0, 0, 1, 0]])\ncamera_b_inner_para = np.mat([[963.2991943, 0, 589.8122291, 0],\n [0, 962.7422485, 412.5244055, 0],\n [0, 0, 1, 0]])\ncamera_c_inner_para = np.mat([[967.4086914, 0, 612.7826353, 0],\n [0, 968.0758667, 451.7366286, 0],\n [0, 0, 1, 0]])\ncamera_d_inner_para = np.mat([[961.0868530, 0, 692.7282436, 0],\n [0, 960.6126708, 417.4375162, 0],\n [0, 0, 1, 0]])\ncamera_e_inner_para = np.mat([[955.4882812, 0, 730.3056525, 0],\n [0, 953.7589722, 451.5117967, 0],\n [0, 0, 1, 0]])\ncamera_f_inner_para = np.mat([[962.0779419, 0, 595.2503222, 0],\n [0, 961.0998535, 396.8389609, 0],\n [0, 0, 1, 0]])\n\n# 六个相机的投影矩阵为 投影矩阵=内参x外参\n# 所有相机的投影矩阵放到一个三维矩阵里(1280x800)\nall_camera_projection_mat = [\n [[1.39434783e+02, 1.18422163e+03, -9.32437833e+01, 4.27466162e+03],\n [3.39496212e+02, 9.22510264e+01, -9.67653298e+02, 1.32319794e+03],\n [-5.91988790e-01, 6.23211341e-01, -5.11035123e-01, 4.78810628e+00]],\n [[-7.85090956e+01, 8.61230229e+02, 7.26596598e+02, 4.92106359e+03],\n [-5.02774485e+02, 7.19172239e+02, -5.71889964e+02, 1.98846331e+03],\n [-8.77900131e-01, 2.72807532e-01, 3.93531969e-01, 5.53092307e+00]],\n [[4.12009678e+02, 1.76193887e+02, 1.05384338e+03, 4.97065152e+03],\n [-8.45497311e+02, 3.82381880e+02, 5.29296949e+02, 3.01051417e+03],\n [-2.89368602e-01, -5.46386263e-01, 7.85956655e-01, 5.58635091e+00]],\n [[1.03315200e+03, -1.48038125e+02, 5.60572927e+02, 4.11740670e+03],\n [-2.82474656e+02, -3.66226258e+02, 9.39743146e+02, 2.38630951e+03],\n [4.20708915e-01, -8.77941799e-01, 2.28521787e-01, 4.61760675e+00]],\n [[1.18728070e+03, 1.08759358e+02, -1.57607533e+02, 3.82810628e+03],\n [4.19718174e+02, -8.31607535e+02, 4.95766722e+02, 1.95088770e+03],\n [5.93604063e-01, -5.77481378e-01, -5.60490387e-01, 4.30437514e+00]],\n [[7.46729038e+02, 7.13841054e+02, -4.61241373e+02, 3.77373081e+03],\n [7.08169289e+02, -6.57709441e+02, -3.83547441e+02, 1.50980066e+03],\n [1.03184351e-01, 8.56259076e-02, -9.90969825e-01, 4.49819911e+00]]\n]\n# camera_a_projection_mat = np.mat([[1.39434783e+02, 1.18422163e+03, -9.32437833e+01, 4.27466162e+03],\n# [3.39496212e+02, 9.22510264e+01, -9.67653298e+02, 1.32319794e+03],\n# [-5.91988790e-01, 6.23211341e-01, -5.11035123e-01, 4.78810628e+00]])\n#\n# camera_b_projection_mat = np.mat([[-7.85090956e+01, 8.61230229e+02, 7.26596598e+02, 4.92106359e+03],\n# [-5.02774485e+02, 7.19172239e+02, -5.71889964e+02, 1.98846331e+03],\n# [-8.77900131e-01, 2.72807532e-01, 3.93531969e-01, 5.53092307e+00]])\n#\n# camera_c_projection_mat = np.mat([[4.12009678e+02, 1.76193887e+02, 1.05384338e+03, 4.97065152e+03],\n# [-8.45497311e+02, 3.82381880e+02, 5.29296949e+02, 3.01051417e+03],\n# [-2.89368602e-01, -5.46386263e-01, 7.85956655e-01, 5.58635091e+00]])\n#\n# camera_d_projection_mat = np.mat([[1.03315200e+03, -1.48038125e+02, 5.60572927e+02, 4.11740670e+03],\n# [-2.82474656e+02, -3.66226258e+02, 9.39743146e+02, 2.38630951e+03],\n# [4.20708915e-01, -8.77941799e-01, 2.28521787e-01, 4.61760675e+00]])\n#\n# camera_e_projection_mat = np.mat([[1.18728070e+03, 1.08759358e+02, -1.57607533e+02, 3.82810628e+03],\n# [4.19718174e+02, -8.31607535e+02, 4.95766722e+02, 1.95088770e+03],\n# [5.93604063e-01, -5.77481378e-01, -5.60490387e-01, 4.30437514e+00]])\n#\n# camera_f_projection_mat = np.mat([[7.46729038e+02, 7.13841054e+02, -4.61241373e+02, 3.77373081e+03],\n# [7.08169289e+02, -6.57709441e+02, -3.83547441e+02, 1.50980066e+03],\n# [1.03184351e-01, 8.56259076e-02, -9.90969825e-01, 4.49819911e+00]])\n\n# 将图片缩小为640*400后的相机内参为: 四个参数都除以二\ncamera_a_inner_para_640_400 = np.mat([[483.76885985, 0, 351.5636866, 0],\n [0, 483.96969605, 175.50937805, 0],\n [0, 0, 1, 0]])\ncamera_b_inner_para_640_400 = np.mat([[481.64959715, 0, 294.90611455, 0],\n [0, 481.37112425, 206.26220275, 0],\n [0, 0, 1, 0]])\ncamera_c_inner_para_640_400 = np.mat([[483.7043457, 0, 306.39131765, 0],\n [0, 484.03793335, 225.8683143, 0],\n [0, 0, 1, 0]])\ncamera_d_inner_para_640_400 = np.mat([[480.5434265, 0, 346.3641218, 0],\n [0, 480.3063354, 208.7187581, 0],\n [0, 0, 1, 0]])\ncamera_e_inner_para_640_400 = np.mat([[477.7441406, 0, 365.15282625, 0],\n [0, 476.8794861, 225.75589835, 0],\n [0, 0, 1, 0]])\ncamera_f_inner_para_640_400 = np.mat([[481.03897095, 0, 297.6251611, 0],\n [0, 480.54992675, 198.41948045, 0],\n [0, 0, 1, 0]])\n# 将图片resize为640*400后的投影矩阵\nall_camera_projection_mat_640_400 = [\n [[6.97173914e+01, 5.92110817e+02, - 4.66218917e+01, 2.13733081e+03],\n [1.69748106e+02, 4.61255132e+01, - 4.83826649e+02, 6.61598968e+02],\n [-5.91988790e-01, 6.23211341e-01, - 5.11035123e-01, 4.78810628e+00]],\n [[-3.92545478e+01, 4.30615115e+02, 3.63298299e+02, 2.46053180e+03],\n [-2.51387243e+02, 3.59586119e+02, - 2.85944982e+02, 9.94231657e+02],\n [-8.77900131e-01, 2.72807532e-01, 3.93531969e-01, 5.53092307e+00]],\n [[2.06004839e+02, 8.80969434e+01, 5.26921691e+02, 2.48532576e+03],\n [-4.22748655e+02, 1.91190940e+02, 2.64648475e+02, 1.50525708e+03],\n [-2.89368602e-01, - 5.46386263e-01, 7.85956655e-01, 5.58635091e+00]],\n [[5.16576002e+02, - 7.40190623e+01, 2.80286464e+02, 2.05870335e+03],\n [-1.41237328e+02, - 1.83113129e+02, 4.69871573e+02, 1.19315475e+03],\n [4.20708915e-01, - 8.77941799e-01, 2.28521787e-01, 4.61760675e+00]],\n [[5.93640352e+02, 5.43796790e+01, - 7.88037663e+01, 1.91405314e+03],\n [2.09859087e+02, - 4.15803768e+02, 2.47883361e+02, 9.75443850e+02],\n [5.93604063e-01, - 5.77481378e-01, - 5.60490387e-01, 4.30437514e+00]],\n [[3.73364519e+02, 3.56920527e+02, - 2.30620687e+02, 1.88686540e+03],\n [3.54084644e+02, - 3.28854721e+02, - 1.91773720e+02, 7.54900332e+02],\n [1.03184351e-01, 8.56259076e-02, - 9.90969825e-01, 4.49819911e+00]]\n]\n\n# 六个相机在世界坐标系下的坐标\ncameras_coordinate = [[2.50436065, -3.75589484, 1.88800446],\n [4.02581981, -2.56894275, -3.29281609],\n [1.01348544, 1.88043939, -5.4273143],\n [-2.45261002, 3.5962286, -1.87506165],\n [-3.12155638, 2.09254542, 2.21770186],\n [-1.07692383, -1.37631717, 4.3081322]]\n# 六个相机组成的空间平面方程参数 AX+BY+CZ+D=0\ncamera_plane_para = [19.467678495159983, 18.098947303577706, 10.253452426300939, 1.884526845005233]\n\n# 六个相机映射到同一平面后的相机坐标,这里选用的是BCD三个相机作为相机平面,因此只需要将AEF映射到平面\ncameras_coordinate_mapping = [[2.45592658, -3.80092362, 1.86249467],\n [4.02581981, -2.56894275, -3.29281609],\n [1.01348544, 1.88043939, -5.4273143],\n [-2.45261002, 3.5962286, -1.87506165],\n [-3.16297766, 2.05403639, 2.19588564],\n [-1.08130466, -1.38038999, 4.30582486]]\n\n# 六张bmp图片的像素信息,读取后放在全局变量中,避免每次都去重新读取\nbmp_pixel = [[], [], [], [], [], []]\n# 哈希表,存储顶点对应的像素uv信息\nmap_vertex_to_texture = dict()\n\n# 哈希表,存储三角面片顶点对应的vt的index(行数)\nmap_vertex_to_vt_index = dict()\n# 每个相机对应的三角面片 如faces_belong_camera_A=[[1,3,5],[2,3,5]...]\n# faces_belong_camera_A = []\n# faces_belong_camera_B = []\n# faces_belong_camera_C = []\n# faces_belong_camera_D = []\n# faces_belong_camera_E = []\n# faces_belong_camera_F = []\n\n# 所有相机对应的三角面片,A相机放在0索引,以此类推\nfaces_belong_camera = [[], [], [], [], [], []]\n\n# 所有相机对应的bmp应该crop出的范围,[Umin,Vmin,Umax,Vmax],初始化时给相反的最大最小值,这里取的10000和-100,因为不可能有超过这个范围的了\nbmp_crop_ranges = [[10000, 10000, -100, -100], [10000, 10000, -100, -100],\n [10000, 10000, -100, -100], [10000, 10000, -100, -100],\n [10000, 10000, -100, -100], [10000, 10000, -100, -100]]\n# 提前计算出crop的宽度u_width和高度v_height,先初始化为0\ncrops_width_and_height = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]\n# 在得到crops_width_and_height后,提前计算出各个相机crop出的图在png中v所占的范围比重(0-1),例如A:0-0.25,B:0.25-0.4...F:0.8-1\ncrops_v_scale_in_png = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]\n# uvmap_png的长度和宽度\nuv_map_size = [0, 0]\n\n# face的索引 寻找bug时使用\nface_index = 1\n\n\n# 打印数据点\ndef print_data_points(data_points):\n for li in data_points:\n print(li)\n\n\n# 计算两个向量的夹角的余弦\n# 公式为cos<a,b>=a.b/|a||b|. a.b=(x1x2+y1y2+z1z2) |a|=√(x1^2+y1^2+z1^2), |b|=√(x2^2+y2^2+z2^2).\ndef calculate_cosine(vector1, vector2):\n a = vector1[0] * vector2[0] + vector1[1] * vector2[1] + vector1[2] * vector2[2]\n b = math.sqrt(vector1[0] * vector1[0] + vector1[1] * vector1[1] + vector1[2] * vector1[2])\n c = math.sqrt(vector2[0] * vector2[0] + vector2[1] * vector2[1] + vector2[2] * vector2[2])\n res = a / (b * c)\n return res\n\n\n# 计算两个向量的向量积\n# AB=(x1,y1,z1) CD=(x2,y2,z2) cross(AB,CD)=(y1*z2-y2z1,z1x2-z2x1,x1y2-x2y1)\ndef calculate_vector_product(vector1, vector2):\n vector_product = [vector1[1] * vector2[2] - vector1[2] * vector2[1],\n vector1[2] * vector2[0] - vector1[0] * vector2[2],\n vector1[0] * vector2[1] - vector1[1] * vector2[0]]\n return vector_product\n\n\n# 点到空间平面的映射点(投影)\ndef get_mapping_point_in_camera_plane(point, camera_plane_para):\n a = camera_plane_para[0]\n b = camera_plane_para[1]\n c = camera_plane_para[2]\n d = camera_plane_para[3]\n x = point[0]\n y = point[1]\n z = point[2]\n # 避免重复计算,不知python是否已有优化\n a_ = a * a\n b_ = b * b\n c_ = c * c\n temp = a_ + b_ + c_\n x_ = ((b_ + c_) * x - a * (b * y + c * z + d)) / temp\n y_ = ((a_ + c_) * y - b * (a * x + c * z + d)) / temp\n z_ = ((a_ + b_) * z - c * (a * x + b * y + d)) / temp\n point_ = [x_, y_, z_]\n return point_\n\n\n# # 全局变量中部分数据的由来(在main函数中直接使用了)(因为外参已经固定,所以部分数据基本不会改变,减少计算量)\n# def pre_process():\n# # 求出六个相机在世界坐标系下的坐标\n# cameras_coordinate = pfd.get_cameras_coordinate()\n# # 求出相机参数平面\n# camera_plane_para = pfd.get_camera_plane(cameras_coordinate)\n# # 获取A,E,F的映射点\n# camera_a_point = get_mapping_point_in_camera_plane(cameras_coordinate[0], camera_plane_para)\n# camera_e_point = get_mapping_point_in_camera_plane(cameras_coordinate[4], camera_plane_para)\n# camera_f_point = get_mapping_point_in_camera_plane(cameras_coordinate[5], camera_plane_para)\n# # 六个相机归到一个平面之后的坐标:BCD不变,AEF映射到BCD平面\n# camera_point_mapping = [camera_a_point, cameras_coordinate[1], cameras_coordinate[2],\n# cameras_coordinate[3], camera_e_point, camera_f_point]\n# camera_point_mapping = np.array(camera_point_mapping)\n"
] | [
[
"numpy.mat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jojotenya/LAMOL | [
"03c31d9f0c7bf71295bc2d362ddf40a7656956e1"
] | [
"train.py"
] | [
"import torch\nfrom torch.utils.data import DataLoader\nfrom torch import nn\nfrom pytorch_transformers import AdamW, WEIGHTS_NAME, WarmupLinearSchedule\nimport csv\nimport numpy as np\nimport os\nimport logging\nfrom fp16 import FP16_Module, FP16_Optimizer\nfrom parallel import DataParallelModel, DataParallelCriterion\nfrom collections import OrderedDict\nfrom utils import *\nfrom settings import args, TASK_DICT, init_logging, MODEL_CONFIG, MODEL_CLASS, SPECIAL_TOKENS, CONFIG_CLASS\nfrom settings import TOKENIZER, SPECIAL_TOKEN_IDS, FILL_VAL, SAVE_NAME, FINAL_SAVE_NAME, TOKENS_WEIGHT, CONFIG_NAME\nfrom scheduler import AnnealingLR\nfrom regularizers import REG_TYPES, REG_TYPE_KEYS, Weight_Regularized_AdamW, Weight_Regularized_SGD\nfrom torch.nn import CrossEntropyLoss\nlogger = logging.getLogger(__name__)\n\n\ndef train(task_ids, model):\n tasks = [args.tasks[task_id] for task_id in task_ids]\n\n logger.info(\"start to train { task: %s, seq train type: %s }\" % (tasks, args.seq_train_type))\n model_dir = get_model_dir(tasks)\n make_dir(model_dir)\n\n train_dataset = [TASK_DICT[t][\"train\"] for t in tasks]\n train_extra_data = []\n if \"lll\" in args.seq_train_type and task_ids[0] > 0 and not args.skip_tasks:\n prev_task = args.tasks[task_ids[0]-1]\n with torch.no_grad():\n create_extra_data(tasks[0], prev_task, model, train_extra_data)\n elif \"gem\" in args.seq_train_type and task_ids[0] > 0: \n get_real_data(tasks[0], train_extra_data, accum=False, encode=True)\n args.memory_data.append(train_extra_data)\n train_extra_data = []\n logger.info('extra training data size: {}'.format(len(train_extra_data)))\n\n if not model:\n # which_model_to_load = model_dir if os.path.isfile(os.path.join(model_dir, FINAL_SAVE_NAME)) else args.model_name\n model = MODEL_CLASS.from_pretrained(args.model_name).cuda()\n model.resize_token_embeddings(len(TOKENIZER))\n if not args.fp32:\n model = FP16_Module(model)\n\n gen_token = get_gen_token(tasks[0])\n TOKENIZER.add_tokens([gen_token])\n TOKENIZER.save_pretrained(model_dir)\n SPECIAL_TOKENS[tasks[0]] = gen_token\n SPECIAL_TOKEN_IDS[tasks[0]] = TOKENIZER.convert_tokens_to_ids(gen_token)\n logger.info('gen token = {} , gen token id = {}'.format(gen_token, SPECIAL_TOKEN_IDS[tasks[0]]))\n MODEL_CONFIG.vocab_size = len(TOKENIZER)\n MODEL_CONFIG.to_json_file(os.path.join(model_dir,CONFIG_NAME))\n global TOKENS_WEIGHT\n if len(TOKENIZER) != TOKENS_WEIGHT.shape[0]:\n TOKENS_WEIGHT = torch.cat((TOKENS_WEIGHT, torch.ones([1]).cuda()))\n\n if args.skip_tasks and len(tasks) == 1:\n logger.info(\"*********** skip task: {} ***********\".format(tasks[0]))\n if tasks[0] in args.skip_tasks:\n if len(args.skip_tasks) == 1:\n model_dir = get_model_dir(tasks)\n model_path = os.path.join(model_dir, FINAL_SAVE_NAME)\n config_path = os.path.join(model_dir,CONFIG_NAME)\n model_config = CONFIG_CLASS.from_json_file(config_path)\n model = MODEL_CLASS(model_config).cuda()\n state_dict = torch.load(model_path)\n model.load_state_dict(state_dict)\n if not args.fp32:\n model = FP16_Module(model)\n if args.seq_train_type in REG_TYPE_KEYS:\n logger.info(\"calulating reg_params ...\")\n train_qadata = QADataset(train_dataset, \"train\", SPECIAL_TOKEN_IDS[tasks[0]], train_extra_data)\n max_train_batch_size = max(len(train_qadata) // args.min_n_steps, args.min_batch_size)\n train_dataloader = create_dataloader(train_qadata, \"train\", max_train_batch_size)\n parallel_model = DataParallelModel(WrapModel(model), args.device_ids)\n regularizer = REG_TYPES[args.seq_train_type](model, parallel_model, [train_dataloader], tasks[0])\n regularizer.task_start_do()\n regularizer.task_end_do()\n torch.save(model.state_dict(), os.path.join(model_dir, FINAL_SAVE_NAME))\n logger.info(\"done reg_params!\")\n args.skip_tasks.remove(tasks[0])\n return model\n\n model.resize_token_embeddings(len(TOKENIZER))\n\n if not args.fp32: # again because resize_token_embeddings makes embedding layer fp32\n model = FP16_Module(model)\n\n parallel_model = DataParallelModel(WrapModel(model), args.device_ids)\n\n train_qadata = QADataset(train_dataset, \"train\", SPECIAL_TOKEN_IDS[tasks[0]], train_extra_data)\n max_train_batch_size = max(len(train_qadata) // args.min_n_steps, args.min_batch_size)\n train_dataloader = create_dataloader(train_qadata, \"train\", max_train_batch_size)\n if not args.unbound and args.seq_train_type != \"multitask\":\n #n_train_epochs = TASK_DICT[tasks[0]][\"n_train_epochs\"]\n n_train_epochs = args.n_train_epochs[tasks[0]]\n else:\n n_train_epochs = args.n_train_epochs['_'.join(tasks)]\n n_train_optimization_steps = len(train_qadata) * n_train_epochs\n logger.info('len of train dataset: {} , max train batch size {} , num of opt steps: {}'.format(\n len(train_qadata), max_train_batch_size, n_train_optimization_steps))\n\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n\n if \"gem\" in args.seq_train_type:\n model.task_id = task_ids[0]\n if not hasattr(model, \"grad_dims\"):\n model.grad_dims = []\n for param in model.parameters():\n model.grad_dims.append(param.data.numel())\n if not hasattr(model, \"grads\"):\n model.grads = torch.zeros(sum(model.grad_dims),len(args.tasks))\n model.grads = model.grads.cuda()\n\n if args.seq_train_type in REG_TYPE_KEYS:\n optimizer = Weight_Regularized_AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n else:\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n if not args.fp32:\n optimizer = FP16_Optimizer(optimizer, static_loss_scale=None, dynamic_loss_scale=True,\n dynamic_loss_args={'scale_window': 100, 'min_scale': 1, 'delayed_shift': 2})\n\n scheduler = AnnealingLR(optimizer, start_lr=args.learning_rate, warmup_iter=int(args.n_warmup_ratio*len(train_qadata)),\n num_iters=int(n_train_optimization_steps), decay_style=args.decay_style)\n train_loss_fct = DataParallelCriterion(CrossEntropyLoss(ignore_index=FILL_VAL, weight=TOKENS_WEIGHT), args.device_ids)\n\n if args.seq_train_type in REG_TYPE_KEYS:\n copy_train_dataloader = create_dataloader(train_qadata, \"train\", max_train_batch_size)\n prev_task = args.tasks[task_ids[0]-1]\n regularizer = REG_TYPES[args.seq_train_type](model, parallel_model, [copy_train_dataloader], tasks[0], prev_task)\n regularizer.task_start_do()\n\n tot_n_steps = 0\n train_once = TrainStep(model, optimizer, scheduler)\n if \"gem\" in args.seq_train_type and task_ids[0] != 0:\n gem_step = GEMStep(model, parallel_model, train_loss_fct, optimizer)\n model.train()\n for ep in range(n_train_epochs):\n cum_loss, cum_qa_loss, cum_lm_loss, cur_n_inputs = 0, 0, 0, 0\n for n_steps, (_, _, cqa, _, Y, gen_X, gen_Y) in enumerate(train_dataloader):\n\n n_inputs = sum(_cqa.shape[0] for _cqa in cqa)\n\n for i in range(len(cqa)):\n cqa[i] = (cqa[i].to(args.device_ids[i]),)\n Y[i] = Y[i].to(args.device_ids[i])\n gen_X[i] = (gen_X[i].to(args.device_ids[i]),)\n gen_Y[i] = gen_Y[i].to(args.device_ids[i])\n\n losses = get_losses(parallel_model, cqa, Y, gen_X, gen_Y, train_loss_fct)\n loss = sum(losses)\n if \"gem\" in args.seq_train_type and task_ids[0] != 0:\n gem_step(task_ids[0])\n train_once(loss, n_inputs)\n\n qa_loss = losses[0].item() * n_inputs\n lm_loss = losses[1].item() * n_inputs\n cum_loss += (qa_loss + lm_loss)\n cum_qa_loss += qa_loss\n cum_lm_loss += lm_loss\n cur_n_inputs += n_inputs\n\n if (n_steps + 1 ) % args.logging_steps == 0:\n logger.info('progress {:.3f} , lr {:.1E} , loss {:.3f} , qa loss {:.3f} , lm loss {:.3f} , avg batch size {:.1f}'.format(\n ep + cur_n_inputs/len(train_qadata), scheduler.get_lr(), cum_loss/cur_n_inputs, cum_qa_loss/cur_n_inputs, cum_lm_loss/cur_n_inputs,\n cur_n_inputs/(n_steps + 1)\n ))\n\n torch.save(model.state_dict(), os.path.join(model_dir, SAVE_NAME+str(ep+1)))\n tot_n_steps += (n_steps + 1)\n logger.info('epoch {}/{} done , tot steps {} , lr {:.1E} , loss {:.2f} , qa loss {:.2f} , lm loss {:.2f} , avg batch size {:.1f}'.format(\n ep+1, n_train_epochs, tot_n_steps, scheduler.get_lr(), cum_loss/cur_n_inputs, cum_qa_loss/cur_n_inputs, cum_lm_loss/cur_n_inputs, cur_n_inputs/(n_steps+1)\n ))\n\n # task end do for reg\n if args.seq_train_type in REG_TYPE_KEYS:\n regularizer.task_end_do()\n torch.save(model.state_dict(), os.path.join(model_dir, FINAL_SAVE_NAME))\n\n return model\n\n\nif __name__ == '__main__':\n\n if not args.debug:\n logging.getLogger(\"pytorch_transformers\").setLevel(logging.WARNING)\n logging.getLogger(\"pytorch_transformers.tokenization_utils\").setLevel(logging.CRITICAL)\n\n make_dir(args.model_dir_root)\n\n init_logging(os.path.join(args.model_dir_root, 'log_train.txt'))\n logger.info('args = {}'.format(str(args)))\n\n model = None\n if args.seq_train_type == \"multitask\":\n model = train(list(range(len(args.tasks))), model)\n else:\n if args.unbound:\n TASK_DICT = lll_unbound_setting(split_size=args.unbound)\n for task_id in range(len(args.tasks)):\n model = train([task_id], model)\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.no_grad",
"torch.ones",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RTHMaK/git-squash-master | [
"76c4c8437dd18114968e69a698f4581927fcdabf",
"76c4c8437dd18114968e69a698f4581927fcdabf",
"76c4c8437dd18114968e69a698f4581927fcdabf",
"76c4c8437dd18114968e69a698f4581927fcdabf",
"76c4c8437dd18114968e69a698f4581927fcdabf",
"76c4c8437dd18114968e69a698f4581927fcdabf",
"76c4c8437dd18114968e69a698f4581927fcdabf",
"76c4c8437dd18114968e69a698f4581927fcdabf",
"76c4c8437dd18114968e69a698f4581927fcdabf",
"76c4c8437dd18114968e69a698f4581927fcdabf",
"76c4c8437dd18114968e69a698f4581927fcdabf",
"76c4c8437dd18114968e69a698f4581927fcdabf",
"76c4c8437dd18114968e69a698f4581927fcdabf",
"76c4c8437dd18114968e69a698f4581927fcdabf",
"76c4c8437dd18114968e69a698f4581927fcdabf"
] | [
"scikit-learn-weighted_kde/examples/svm/plot_separating_hyperplane_unbalanced.py",
"scikit-learn-weighted_kde/examples/cluster/plot_face_segmentation.py",
"scikit-learn-weighted_kde/examples/feature_selection/plot_f_test_vs_mi.py",
"scikit-learn-weighted_kde/benchmarks/bench_lasso.py",
"scikit-learn-weighted_kde/examples/decomposition/plot_pca_vs_fa_model_selection.py",
"scikit-learn-weighted_kde/examples/applications/plot_stock_market.py",
"scikit-learn-weighted_kde/examples/applications/plot_tomography_l1_reconstruction.py",
"scikit-learn-weighted_kde/examples/ensemble/plot_voting_probas.py",
"scikit-learn-weighted_kde/examples/model_selection/plot_underfitting_overfitting.py",
"scikit-learn-weighted_kde/sklearn/linear_model/tests/test_ridge.py",
"scikit-learn-weighted_kde/examples/svm/plot_separating_hyperplane.py",
"scikit-learn-weighted_kde/examples/gaussian_process/plot_gpc_xor.py",
"scikit-learn-weighted_kde/sklearn/feature_selection/from_model.py",
"scikit-learn-weighted_kde/sklearn/utils/multiclass.py",
"scikit-learn-weighted_kde/sklearn/model_selection/tests/test_search.py"
] | [
"\"\"\"\n=================================================\nSVM: Separating hyperplane for unbalanced classes\n=================================================\n\nFind the optimal separating hyperplane using an SVC for classes that\nare unbalanced.\n\nWe first find the separating plane with a plain SVC and then plot\n(dashed) the separating hyperplane with automatically correction for\nunbalanced classes.\n\n.. currentmodule:: sklearn.linear_model\n\n.. note::\n\n This example will also work by replacing ``SVC(kernel=\"linear\")``\n with ``SGDClassifier(loss=\"hinge\")``. Setting the ``loss`` parameter\n of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour\n such as that of a SVC with a linear kernel.\n\n For example try instead of the ``SVC``::\n\n clf = SGDClassifier(n_iter=100, alpha=0.01)\n\n\"\"\"\nprint(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import svm\n#from sklearn.linear_model import SGDClassifier\n\n# we create 40 separable points\nrng = np.random.RandomState(0)\nn_samples_1 = 1000\nn_samples_2 = 100\nX = np.r_[1.5 * rng.randn(n_samples_1, 2),\n 0.5 * rng.randn(n_samples_2, 2) + [2, 2]]\ny = [0] * (n_samples_1) + [1] * (n_samples_2)\n\n# fit the model and get the separating hyperplane\nclf = svm.SVC(kernel='linear', C=1.0)\nclf.fit(X, y)\n\nw = clf.coef_[0]\na = -w[0] / w[1]\nxx = np.linspace(-5, 5)\nyy = a * xx - clf.intercept_[0] / w[1]\n\n\n# get the separating hyperplane using weighted classes\nwclf = svm.SVC(kernel='linear', class_weight={1: 10})\nwclf.fit(X, y)\n\nww = wclf.coef_[0]\nwa = -ww[0] / ww[1]\nwyy = wa * xx - wclf.intercept_[0] / ww[1]\n\n# plot separating hyperplanes and samples\nh0 = plt.plot(xx, yy, 'k-', label='no weights')\nh1 = plt.plot(xx, wyy, 'k--', label='with weights')\nplt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)\nplt.legend()\n\nplt.axis('tight')\nplt.show()\n",
"\"\"\"\n===================================================\nSegmenting the picture of a raccoon face in regions\n===================================================\n\nThis example uses :ref:`spectral_clustering` on a graph created from\nvoxel-to-voxel difference on an image to break this image into multiple\npartly-homogeneous regions.\n\nThis procedure (spectral clustering on an image) is an efficient\napproximate solution for finding normalized graph cuts.\n\nThere are two options to assign labels:\n\n* with 'kmeans' spectral clustering will cluster samples in the embedding space\n using a kmeans algorithm\n* whereas 'discrete' will iteratively search for the closest partition\n space to the embedding space.\n\"\"\"\nprint(__doc__)\n\n# Author: Gael Varoquaux <[email protected]>, Brian Cheung\n# License: BSD 3 clause\n\nimport time\n\nimport numpy as np\nimport scipy as sp\nimport matplotlib.pyplot as plt\n\nfrom sklearn.feature_extraction import image\nfrom sklearn.cluster import spectral_clustering\nfrom sklearn.utils.testing import SkipTest\nfrom sklearn.utils.fixes import sp_version\n\nif sp_version < (0, 12):\n raise SkipTest(\"Skipping because SciPy version earlier than 0.12.0 and \"\n \"thus does not include the scipy.misc.face() image.\")\n\n\n# load the raccoon face as a numpy array\ntry:\n face = sp.face(gray=True)\nexcept AttributeError:\n # Newer versions of scipy have face in misc\n from scipy import misc\n face = misc.face(gray=True)\n\n# Resize it to 10% of the original size to speed up the processing\nface = sp.misc.imresize(face, 0.10) / 255.\n\n# Convert the image into a graph with the value of the gradient on the\n# edges.\ngraph = image.img_to_graph(face)\n\n# Take a decreasing function of the gradient: an exponential\n# The smaller beta is, the more independent the segmentation is of the\n# actual image. For beta=1, the segmentation is close to a voronoi\nbeta = 5\neps = 1e-6\ngraph.data = np.exp(-beta * graph.data / graph.data.std()) + eps\n\n# Apply spectral clustering (this step goes much faster if you have pyamg\n# installed)\nN_REGIONS = 25\n\n#############################################################################\n# Visualize the resulting regions\n\nfor assign_labels in ('kmeans', 'discretize'):\n t0 = time.time()\n labels = spectral_clustering(graph, n_clusters=N_REGIONS,\n assign_labels=assign_labels, random_state=1)\n t1 = time.time()\n labels = labels.reshape(face.shape)\n\n plt.figure(figsize=(5, 5))\n plt.imshow(face, cmap=plt.cm.gray)\n for l in range(N_REGIONS):\n plt.contour(labels == l, contours=1,\n colors=[plt.cm.spectral(l / float(N_REGIONS))])\n plt.xticks(())\n plt.yticks(())\n title = 'Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0))\n print(title)\n plt.title(title)\nplt.show()\n",
"\"\"\"\n===========================================\nComparison of F-test and mutual information\n===========================================\n\nThis example illustrates the differences between univariate F-test statistics\nand mutual information.\n\nWe consider 3 features x_1, x_2, x_3 distributed uniformly over [0, 1], the\ntarget depends on them as follows:\n\ny = x_1 + sin(6 * pi * x_2) + 0.1 * N(0, 1), that is the third features is completely irrelevant.\n\nThe code below plots the dependency of y against individual x_i and normalized\nvalues of univariate F-tests statistics and mutual information.\n\nAs F-test captures only linear dependency, it rates x_1 as the most\ndiscriminative feature. On the other hand, mutual information can capture any\nkind of dependency between variables and it rates x_2 as the most\ndiscriminative feature, which probably agrees better with our intuitive\nperception for this example. Both methods correctly marks x_3 as irrelevant.\n\"\"\"\nprint(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.feature_selection import f_regression, mutual_info_regression\n\nnp.random.seed(0)\nX = np.random.rand(1000, 3)\ny = X[:, 0] + np.sin(6 * np.pi * X[:, 1]) + 0.1 * np.random.randn(1000)\n\nf_test, _ = f_regression(X, y)\nf_test /= np.max(f_test)\n\nmi = mutual_info_regression(X, y)\nmi /= np.max(mi)\n\nplt.figure(figsize=(15, 5))\nfor i in range(3):\n plt.subplot(1, 3, i + 1)\n plt.scatter(X[:, i], y)\n plt.xlabel(\"$x_{}$\".format(i + 1), fontsize=14)\n if i == 0:\n plt.ylabel(\"$y$\", fontsize=14)\n plt.title(\"F-test={:.2f}, MI={:.2f}\".format(f_test[i], mi[i]),\n fontsize=16)\nplt.show()\n\n",
"\"\"\"\nBenchmarks of Lasso vs LassoLars\n\nFirst, we fix a training set and increase the number of\nsamples. Then we plot the computation time as function of\nthe number of samples.\n\nIn the second benchmark, we increase the number of dimensions of the\ntraining set. Then we plot the computation time as function of\nthe number of dimensions.\n\nIn both cases, only 10% of the features are informative.\n\"\"\"\nimport gc\nfrom time import time\nimport numpy as np\n\nfrom sklearn.datasets.samples_generator import make_regression\n\n\ndef compute_bench(alpha, n_samples, n_features, precompute):\n lasso_results = []\n lars_lasso_results = []\n\n it = 0\n\n for ns in n_samples:\n for nf in n_features:\n it += 1\n print('==================')\n print('Iteration %s of %s' % (it, max(len(n_samples),\n len(n_features))))\n print('==================')\n n_informative = nf // 10\n X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,\n n_informative=n_informative,\n noise=0.1, coef=True)\n\n X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data\n\n gc.collect()\n print(\"- benchmarking Lasso\")\n clf = Lasso(alpha=alpha, fit_intercept=False,\n precompute=precompute)\n tstart = time()\n clf.fit(X, Y)\n lasso_results.append(time() - tstart)\n\n gc.collect()\n print(\"- benchmarking LassoLars\")\n clf = LassoLars(alpha=alpha, fit_intercept=False,\n normalize=False, precompute=precompute)\n tstart = time()\n clf.fit(X, Y)\n lars_lasso_results.append(time() - tstart)\n\n return lasso_results, lars_lasso_results\n\n\nif __name__ == '__main__':\n from sklearn.linear_model import Lasso, LassoLars\n import pylab as pl\n\n alpha = 0.01 # regularization parameter\n\n n_features = 10\n list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)\n lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,\n [n_features], precompute=True)\n\n pl.figure('scikit-learn LASSO benchmark results')\n pl.subplot(211)\n pl.plot(list_n_samples, lasso_results, 'b-',\n label='Lasso')\n pl.plot(list_n_samples, lars_lasso_results, 'r-',\n label='LassoLars')\n pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))\n pl.legend(loc='upper left')\n pl.xlabel('number of samples')\n pl.ylabel('Time (s)')\n pl.axis('tight')\n\n n_samples = 2000\n list_n_features = np.linspace(500, 3000, 5).astype(np.int)\n lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],\n list_n_features, precompute=False)\n pl.subplot(212)\n pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')\n pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')\n pl.title('%d samples, alpha=%s' % (n_samples, alpha))\n pl.legend(loc='upper left')\n pl.xlabel('number of features')\n pl.ylabel('Time (s)')\n pl.axis('tight')\n pl.show()\n",
"\"\"\"\n===============================================================\nModel selection with Probabilistic PCA and Factor Analysis (FA)\n===============================================================\n\nProbabilistic PCA and Factor Analysis are probabilistic models.\nThe consequence is that the likelihood of new data can be used\nfor model selection and covariance estimation.\nHere we compare PCA and FA with cross-validation on low rank data corrupted\nwith homoscedastic noise (noise variance\nis the same for each feature) or heteroscedastic noise (noise variance\nis the different for each feature). In a second step we compare the model\nlikelihood to the likelihoods obtained from shrinkage covariance estimators.\n\nOne can observe that with homoscedastic noise both FA and PCA succeed\nin recovering the size of the low rank subspace. The likelihood with PCA\nis higher than FA in this case. However PCA fails and overestimates\nthe rank when heteroscedastic noise is present. Under appropriate\ncircumstances the low rank models are more likely than shrinkage models.\n\nThe automatic estimation from\nAutomatic Choice of Dimensionality for PCA. NIPS 2000: 598-604\nby Thomas P. Minka is also compared.\n\n\"\"\"\n\n# Authors: Alexandre Gramfort\n# Denis A. Engemann\n# License: BSD 3 clause\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import linalg\n\nfrom sklearn.decomposition import PCA, FactorAnalysis\nfrom sklearn.covariance import ShrunkCovariance, LedoitWolf\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import GridSearchCV\n\nprint(__doc__)\n\n###############################################################################\n# Create the data\n\nn_samples, n_features, rank = 1000, 50, 10\nsigma = 1.\nrng = np.random.RandomState(42)\nU, _, _ = linalg.svd(rng.randn(n_features, n_features))\nX = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)\n\n# Adding homoscedastic noise\nX_homo = X + sigma * rng.randn(n_samples, n_features)\n\n# Adding heteroscedastic noise\nsigmas = sigma * rng.rand(n_features) + sigma / 2.\nX_hetero = X + rng.randn(n_samples, n_features) * sigmas\n\n###############################################################################\n# Fit the models\n\nn_components = np.arange(0, n_features, 5) # options for n_components\n\n\ndef compute_scores(X):\n pca = PCA(svd_solver='full')\n fa = FactorAnalysis()\n\n pca_scores, fa_scores = [], []\n for n in n_components:\n pca.n_components = n\n fa.n_components = n\n pca_scores.append(np.mean(cross_val_score(pca, X)))\n fa_scores.append(np.mean(cross_val_score(fa, X)))\n\n return pca_scores, fa_scores\n\n\ndef shrunk_cov_score(X):\n shrinkages = np.logspace(-2, 0, 30)\n cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})\n return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))\n\n\ndef lw_score(X):\n return np.mean(cross_val_score(LedoitWolf(), X))\n\n\nfor X, title in [(X_homo, 'Homoscedastic Noise'),\n (X_hetero, 'Heteroscedastic Noise')]:\n pca_scores, fa_scores = compute_scores(X)\n n_components_pca = n_components[np.argmax(pca_scores)]\n n_components_fa = n_components[np.argmax(fa_scores)]\n\n pca = PCA(svd_solver='full', n_components='mle')\n pca.fit(X)\n n_components_pca_mle = pca.n_components_\n\n print(\"best n_components by PCA CV = %d\" % n_components_pca)\n print(\"best n_components by FactorAnalysis CV = %d\" % n_components_fa)\n print(\"best n_components by PCA MLE = %d\" % n_components_pca_mle)\n\n plt.figure()\n plt.plot(n_components, pca_scores, 'b', label='PCA scores')\n plt.plot(n_components, fa_scores, 'r', label='FA scores')\n plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')\n plt.axvline(n_components_pca, color='b',\n label='PCA CV: %d' % n_components_pca, linestyle='--')\n plt.axvline(n_components_fa, color='r',\n label='FactorAnalysis CV: %d' % n_components_fa,\n linestyle='--')\n plt.axvline(n_components_pca_mle, color='k',\n label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')\n\n # compare with other covariance estimators\n plt.axhline(shrunk_cov_score(X), color='violet',\n label='Shrunk Covariance MLE', linestyle='-.')\n plt.axhline(lw_score(X), color='orange',\n label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')\n\n plt.xlabel('nb of components')\n plt.ylabel('CV scores')\n plt.legend(loc='lower right')\n plt.title(title)\n\nplt.show()\n",
"\"\"\"\n=======================================\nVisualizing the stock market structure\n=======================================\n\nThis example employs several unsupervised learning techniques to extract\nthe stock market structure from variations in historical quotes.\n\nThe quantity that we use is the daily variation in quote price: quotes\nthat are linked tend to cofluctuate during a day.\n\n.. _stock_market:\n\nLearning a graph structure\n--------------------------\n\nWe use sparse inverse covariance estimation to find which quotes are\ncorrelated conditionally on the others. Specifically, sparse inverse\ncovariance gives us a graph, that is a list of connection. For each\nsymbol, the symbols that it is connected too are those useful to explain\nits fluctuations.\n\nClustering\n----------\n\nWe use clustering to group together quotes that behave similarly. Here,\namongst the :ref:`various clustering techniques <clustering>` available\nin the scikit-learn, we use :ref:`affinity_propagation` as it does\nnot enforce equal-size clusters, and it can choose automatically the\nnumber of clusters from the data.\n\nNote that this gives us a different indication than the graph, as the\ngraph reflects conditional relations between variables, while the\nclustering reflects marginal properties: variables clustered together can\nbe considered as having a similar impact at the level of the full stock\nmarket.\n\nEmbedding in 2D space\n---------------------\n\nFor visualization purposes, we need to lay out the different symbols on a\n2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D\nembedding.\n\n\nVisualization\n-------------\n\nThe output of the 3 models are combined in a 2D graph where nodes\nrepresents the stocks and edges the:\n\n- cluster labels are used to define the color of the nodes\n- the sparse covariance model is used to display the strength of the edges\n- the 2D embedding is used to position the nodes in the plan\n\nThis example has a fair amount of visualization-related code, as\nvisualization is crucial here to display the graph. One of the challenge\nis to position the labels minimizing overlap. For this we use an\nheuristic based on the direction of the nearest neighbor along each\naxis.\n\"\"\"\nprint(__doc__)\n\n# Author: Gael Varoquaux [email protected]\n# License: BSD 3 clause\n\nimport datetime\n\nimport numpy as np\nimport matplotlib.pyplot as plt\ntry:\n from matplotlib.finance import quotes_historical_yahoo_ochl\nexcept ImportError:\n # quotes_historical_yahoo_ochl was named quotes_historical_yahoo before matplotlib 1.4\n from matplotlib.finance import quotes_historical_yahoo as quotes_historical_yahoo_ochl\nfrom matplotlib.collections import LineCollection\nfrom sklearn import cluster, covariance, manifold\n\n###############################################################################\n# Retrieve the data from Internet\n\n# Choose a time period reasonably calm (not too long ago so that we get\n# high-tech firms, and before the 2008 crash)\nd1 = datetime.datetime(2003, 1, 1)\nd2 = datetime.datetime(2008, 1, 1)\n\n# kraft symbol has now changed from KFT to MDLZ in yahoo\nsymbol_dict = {\n 'TOT': 'Total',\n 'XOM': 'Exxon',\n 'CVX': 'Chevron',\n 'COP': 'ConocoPhillips',\n 'VLO': 'Valero Energy',\n 'MSFT': 'Microsoft',\n 'IBM': 'IBM',\n 'TWX': 'Time Warner',\n 'CMCSA': 'Comcast',\n 'CVC': 'Cablevision',\n 'YHOO': 'Yahoo',\n 'DELL': 'Dell',\n 'HPQ': 'HP',\n 'AMZN': 'Amazon',\n 'TM': 'Toyota',\n 'CAJ': 'Canon',\n 'MTU': 'Mitsubishi',\n 'SNE': 'Sony',\n 'F': 'Ford',\n 'HMC': 'Honda',\n 'NAV': 'Navistar',\n 'NOC': 'Northrop Grumman',\n 'BA': 'Boeing',\n 'KO': 'Coca Cola',\n 'MMM': '3M',\n 'MCD': 'Mc Donalds',\n 'PEP': 'Pepsi',\n 'MDLZ': 'Kraft Foods',\n 'K': 'Kellogg',\n 'UN': 'Unilever',\n 'MAR': 'Marriott',\n 'PG': 'Procter Gamble',\n 'CL': 'Colgate-Palmolive',\n 'GE': 'General Electrics',\n 'WFC': 'Wells Fargo',\n 'JPM': 'JPMorgan Chase',\n 'AIG': 'AIG',\n 'AXP': 'American express',\n 'BAC': 'Bank of America',\n 'GS': 'Goldman Sachs',\n 'AAPL': 'Apple',\n 'SAP': 'SAP',\n 'CSCO': 'Cisco',\n 'TXN': 'Texas instruments',\n 'XRX': 'Xerox',\n 'LMT': 'Lookheed Martin',\n 'WMT': 'Wal-Mart',\n 'WBA': 'Walgreen',\n 'HD': 'Home Depot',\n 'GSK': 'GlaxoSmithKline',\n 'PFE': 'Pfizer',\n 'SNY': 'Sanofi-Aventis',\n 'NVS': 'Novartis',\n 'KMB': 'Kimberly-Clark',\n 'R': 'Ryder',\n 'GD': 'General Dynamics',\n 'RTN': 'Raytheon',\n 'CVS': 'CVS',\n 'CAT': 'Caterpillar',\n 'DD': 'DuPont de Nemours'}\n\nsymbols, names = np.array(list(symbol_dict.items())).T\n\nquotes = [quotes_historical_yahoo_ochl(symbol, d1, d2, asobject=True)\n for symbol in symbols]\n\nopen = np.array([q.open for q in quotes]).astype(np.float)\nclose = np.array([q.close for q in quotes]).astype(np.float)\n\n# The daily variations of the quotes are what carry most information\nvariation = close - open\n\n###############################################################################\n# Learn a graphical structure from the correlations\nedge_model = covariance.GraphLassoCV()\n\n# standardize the time series: using correlations rather than covariance\n# is more efficient for structure recovery\nX = variation.copy().T\nX /= X.std(axis=0)\nedge_model.fit(X)\n\n###############################################################################\n# Cluster using affinity propagation\n\n_, labels = cluster.affinity_propagation(edge_model.covariance_)\nn_labels = labels.max()\n\nfor i in range(n_labels + 1):\n print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))\n\n###############################################################################\n# Find a low-dimension embedding for visualization: find the best position of\n# the nodes (the stocks) on a 2D plane\n\n# We use a dense eigen_solver to achieve reproducibility (arpack is\n# initiated with random vectors that we don't control). In addition, we\n# use a large number of neighbors to capture the large-scale structure.\nnode_position_model = manifold.LocallyLinearEmbedding(\n n_components=2, eigen_solver='dense', n_neighbors=6)\n\nembedding = node_position_model.fit_transform(X.T).T\n\n###############################################################################\n# Visualization\nplt.figure(1, facecolor='w', figsize=(10, 8))\nplt.clf()\nax = plt.axes([0., 0., 1., 1.])\nplt.axis('off')\n\n# Display a graph of the partial correlations\npartial_correlations = edge_model.precision_.copy()\nd = 1 / np.sqrt(np.diag(partial_correlations))\npartial_correlations *= d\npartial_correlations *= d[:, np.newaxis]\nnon_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)\n\n# Plot the nodes using the coordinates of our embedding\nplt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,\n cmap=plt.cm.spectral)\n\n# Plot the edges\nstart_idx, end_idx = np.where(non_zero)\n#a sequence of (*line0*, *line1*, *line2*), where::\n# linen = (x0, y0), (x1, y1), ... (xm, ym)\nsegments = [[embedding[:, start], embedding[:, stop]]\n for start, stop in zip(start_idx, end_idx)]\nvalues = np.abs(partial_correlations[non_zero])\nlc = LineCollection(segments,\n zorder=0, cmap=plt.cm.hot_r,\n norm=plt.Normalize(0, .7 * values.max()))\nlc.set_array(values)\nlc.set_linewidths(15 * values)\nax.add_collection(lc)\n\n# Add a label to each node. The challenge here is that we want to\n# position the labels to avoid overlap with other labels\nfor index, (name, label, (x, y)) in enumerate(\n zip(names, labels, embedding.T)):\n\n dx = x - embedding[0]\n dx[index] = 1\n dy = y - embedding[1]\n dy[index] = 1\n this_dx = dx[np.argmin(np.abs(dy))]\n this_dy = dy[np.argmin(np.abs(dx))]\n if this_dx > 0:\n horizontalalignment = 'left'\n x = x + .002\n else:\n horizontalalignment = 'right'\n x = x - .002\n if this_dy > 0:\n verticalalignment = 'bottom'\n y = y + .002\n else:\n verticalalignment = 'top'\n y = y - .002\n plt.text(x, y, name, size=10,\n horizontalalignment=horizontalalignment,\n verticalalignment=verticalalignment,\n bbox=dict(facecolor='w',\n edgecolor=plt.cm.spectral(label / float(n_labels)),\n alpha=.6))\n\nplt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),\n embedding[0].max() + .10 * embedding[0].ptp(),)\nplt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),\n embedding[1].max() + .03 * embedding[1].ptp())\n\nplt.show()\n",
"\"\"\"\n======================================================================\nCompressive sensing: tomography reconstruction with L1 prior (Lasso)\n======================================================================\n\nThis example shows the reconstruction of an image from a set of parallel\nprojections, acquired along different angles. Such a dataset is acquired in\n**computed tomography** (CT).\n\nWithout any prior information on the sample, the number of projections\nrequired to reconstruct the image is of the order of the linear size\n``l`` of the image (in pixels). For simplicity we consider here a sparse\nimage, where only pixels on the boundary of objects have a non-zero\nvalue. Such data could correspond for example to a cellular material.\nNote however that most images are sparse in a different basis, such as\nthe Haar wavelets. Only ``l/7`` projections are acquired, therefore it is\nnecessary to use prior information available on the sample (its\nsparsity): this is an example of **compressive sensing**.\n\nThe tomography projection operation is a linear transformation. In\naddition to the data-fidelity term corresponding to a linear regression,\nwe penalize the L1 norm of the image to account for its sparsity. The\nresulting optimization problem is called the :ref:`lasso`. We use the\nclass :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent\nalgorithm. Importantly, this implementation is more computationally efficient\non a sparse matrix, than the projection operator used here.\n\nThe reconstruction with L1 penalization gives a result with zero error\n(all pixels are successfully labeled with 0 or 1), even if noise was\nadded to the projections. In comparison, an L2 penalization\n(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling\nerrors for the pixels. Important artifacts are observed on the\nreconstructed image, contrary to the L1 penalization. Note in particular\nthe circular artifact separating the pixels in the corners, that have\ncontributed to fewer projections than the central disk.\n\"\"\"\n\nprint(__doc__)\n\n# Author: Emmanuelle Gouillart <[email protected]>\n# License: BSD 3 clause\n\nimport numpy as np\nfrom scipy import sparse\nfrom scipy import ndimage\nfrom sklearn.linear_model import Lasso\nfrom sklearn.linear_model import Ridge\nimport matplotlib.pyplot as plt\n\n\ndef _weights(x, dx=1, orig=0):\n x = np.ravel(x)\n floor_x = np.floor((x - orig) / dx)\n alpha = (x - orig - floor_x * dx) / dx\n return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))\n\n\ndef _generate_center_coordinates(l_x):\n X, Y = np.mgrid[:l_x, :l_x].astype(np.float64)\n center = l_x / 2.\n X += 0.5 - center\n Y += 0.5 - center\n return X, Y\n\n\ndef build_projection_operator(l_x, n_dir):\n \"\"\" Compute the tomography design matrix.\n\n Parameters\n ----------\n\n l_x : int\n linear size of image array\n\n n_dir : int\n number of angles at which projections are acquired.\n\n Returns\n -------\n p : sparse matrix of shape (n_dir l_x, l_x**2)\n \"\"\"\n X, Y = _generate_center_coordinates(l_x)\n angles = np.linspace(0, np.pi, n_dir, endpoint=False)\n data_inds, weights, camera_inds = [], [], []\n data_unravel_indices = np.arange(l_x ** 2)\n data_unravel_indices = np.hstack((data_unravel_indices,\n data_unravel_indices))\n for i, angle in enumerate(angles):\n Xrot = np.cos(angle) * X - np.sin(angle) * Y\n inds, w = _weights(Xrot, dx=1, orig=X.min())\n mask = np.logical_and(inds >= 0, inds < l_x)\n weights += list(w[mask])\n camera_inds += list(inds[mask] + i * l_x)\n data_inds += list(data_unravel_indices[mask])\n proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))\n return proj_operator\n\n\ndef generate_synthetic_data():\n \"\"\" Synthetic binary data \"\"\"\n rs = np.random.RandomState(0)\n n_pts = 36.\n x, y = np.ogrid[0:l, 0:l]\n mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2\n mask = np.zeros((l, l))\n points = l * rs.rand(2, n_pts)\n mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1\n mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)\n res = np.logical_and(mask > mask.mean(), mask_outer)\n return res - ndimage.binary_erosion(res)\n\n\n# Generate synthetic images, and projections\nl = 128\nproj_operator = build_projection_operator(l, l / 7.)\ndata = generate_synthetic_data()\nproj = proj_operator * data.ravel()[:, np.newaxis]\nproj += 0.15 * np.random.randn(*proj.shape)\n\n# Reconstruction with L2 (Ridge) penalization\nrgr_ridge = Ridge(alpha=0.2)\nrgr_ridge.fit(proj_operator, proj.ravel())\nrec_l2 = rgr_ridge.coef_.reshape(l, l)\n\n# Reconstruction with L1 (Lasso) penalization\n# the best value of alpha was determined using cross validation\n# with LassoCV\nrgr_lasso = Lasso(alpha=0.001)\nrgr_lasso.fit(proj_operator, proj.ravel())\nrec_l1 = rgr_lasso.coef_.reshape(l, l)\n\nplt.figure(figsize=(8, 3.3))\nplt.subplot(131)\nplt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')\nplt.axis('off')\nplt.title('original image')\nplt.subplot(132)\nplt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')\nplt.title('L2 penalization')\nplt.axis('off')\nplt.subplot(133)\nplt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')\nplt.title('L1 penalization')\nplt.axis('off')\n\nplt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,\n right=1)\n\nplt.show()\n",
"\"\"\"\n===========================================================\nPlot class probabilities calculated by the VotingClassifier\n===========================================================\n\nPlot the class probabilities of the first sample in a toy dataset\npredicted by three different classifiers and averaged by the\n`VotingClassifier`.\n\nFirst, three examplary classifiers are initialized (`LogisticRegression`,\n`GaussianNB`, and `RandomForestClassifier`) and used to initialize a\nsoft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that\nthe predicted probabilities of the `RandomForestClassifier` count 5 times\nas much as the weights of the other classifiers when the averaged probability\nis calculated.\n\nTo visualize the probability weighting, we fit each classifier on the training\nset and plot the predicted class probabilities for the first sample in this\nexample dataset.\n\n\"\"\"\nprint(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import VotingClassifier\n\nclf1 = LogisticRegression(random_state=123)\nclf2 = RandomForestClassifier(random_state=123)\nclf3 = GaussianNB()\nX = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])\ny = np.array([1, 1, 2, 2])\n\neclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],\n voting='soft',\n weights=[1, 1, 5])\n\n# predict class probabilities for all classifiers\nprobas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]\n\n# get class probabilities for the first sample in the dataset\nclass1_1 = [pr[0, 0] for pr in probas]\nclass2_1 = [pr[0, 1] for pr in probas]\n\n\n# plotting\n\nN = 4 # number of groups\nind = np.arange(N) # group positions\nwidth = 0.35 # bar width\n\nfig, ax = plt.subplots()\n\n# bars for classifier 1-3\np1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')\np2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')\n\n# bars for VotingClassifier\np3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')\np4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')\n\n# plot annotations\nplt.axvline(2.8, color='k', linestyle='dashed')\nax.set_xticks(ind + width)\nax.set_xticklabels(['LogisticRegression\\nweight 1',\n 'GaussianNB\\nweight 1',\n 'RandomForestClassifier\\nweight 5',\n 'VotingClassifier\\n(average probabilities)'],\n rotation=40,\n ha='right')\nplt.ylim([0, 1])\nplt.title('Class probabilities for sample 1 by different classifiers')\nplt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')\nplt.show()\n",
"\"\"\"\n============================\nUnderfitting vs. Overfitting\n============================\n\nThis example demonstrates the problems of underfitting and overfitting and\nhow we can use linear regression with polynomial features to approximate\nnonlinear functions. The plot shows the function that we want to approximate,\nwhich is a part of the cosine function. In addition, the samples from the\nreal function and the approximations of different models are displayed. The\nmodels have polynomial features of different degrees. We can see that a\nlinear function (polynomial with degree 1) is not sufficient to fit the\ntraining samples. This is called **underfitting**. A polynomial of degree 4\napproximates the true function almost perfectly. However, for higher degrees\nthe model will **overfit** the training data, i.e. it learns the noise of the\ntraining data.\nWe evaluate quantitatively **overfitting** / **underfitting** by using\ncross-validation. We calculate the mean squared error (MSE) on the validation\nset, the higher, the less likely the model generalizes correctly from the\ntraining data.\n\"\"\"\n\nprint(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import cross_val_score\n\nnp.random.seed(0)\n\nn_samples = 30\ndegrees = [1, 4, 15]\n\ntrue_fun = lambda X: np.cos(1.5 * np.pi * X)\nX = np.sort(np.random.rand(n_samples))\ny = true_fun(X) + np.random.randn(n_samples) * 0.1\n\nplt.figure(figsize=(14, 5))\nfor i in range(len(degrees)):\n ax = plt.subplot(1, len(degrees), i + 1)\n plt.setp(ax, xticks=(), yticks=())\n\n polynomial_features = PolynomialFeatures(degree=degrees[i],\n include_bias=False)\n linear_regression = LinearRegression()\n pipeline = Pipeline([(\"polynomial_features\", polynomial_features),\n (\"linear_regression\", linear_regression)])\n pipeline.fit(X[:, np.newaxis], y)\n\n # Evaluate the models using crossvalidation\n scores = cross_val_score(pipeline, X[:, np.newaxis], y,\n scoring=\"mean_squared_error\", cv=10)\n\n X_test = np.linspace(0, 1, 100)\n plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label=\"Model\")\n plt.plot(X_test, true_fun(X_test), label=\"True function\")\n plt.scatter(X, y, label=\"Samples\")\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n plt.xlim((0, 1))\n plt.ylim((-2, 2))\n plt.legend(loc=\"best\")\n plt.title(\"Degree {}\\nMSE = {:.2e}(+/- {:.2e})\".format(\n degrees[i], -scores.mean(), scores.std()))\nplt.show()\n",
"import numpy as np\nimport scipy.sparse as sp\nfrom scipy import linalg\nfrom itertools import product\n\nfrom sklearn.utils.testing import assert_true\nfrom sklearn.utils.testing import assert_almost_equal\nfrom sklearn.utils.testing import assert_array_almost_equal\nfrom sklearn.utils.testing import assert_equal\nfrom sklearn.utils.testing import assert_array_equal\nfrom sklearn.utils.testing import assert_greater\nfrom sklearn.utils.testing import assert_raises\nfrom sklearn.utils.testing import assert_raise_message\nfrom sklearn.utils.testing import ignore_warnings\nfrom sklearn.utils.testing import assert_warns\n\nfrom sklearn import datasets\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import make_scorer\nfrom sklearn.metrics import get_scorer\n\nfrom sklearn.linear_model.base import LinearRegression\nfrom sklearn.linear_model.ridge import ridge_regression\nfrom sklearn.linear_model.ridge import Ridge\nfrom sklearn.linear_model.ridge import _RidgeGCV\nfrom sklearn.linear_model.ridge import RidgeCV\nfrom sklearn.linear_model.ridge import RidgeClassifier\nfrom sklearn.linear_model.ridge import RidgeClassifierCV\nfrom sklearn.linear_model.ridge import _solve_cholesky\nfrom sklearn.linear_model.ridge import _solve_cholesky_kernel\nfrom sklearn.datasets import make_regression\n\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import KFold\n\nfrom sklearn.utils import check_random_state\nfrom sklearn.datasets import make_multilabel_classification\n\ndiabetes = datasets.load_diabetes()\nX_diabetes, y_diabetes = diabetes.data, diabetes.target\nind = np.arange(X_diabetes.shape[0])\nrng = np.random.RandomState(0)\nrng.shuffle(ind)\nind = ind[:200]\nX_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]\n\niris = datasets.load_iris()\n\nX_iris = sp.csr_matrix(iris.data)\ny_iris = iris.target\n\nDENSE_FILTER = lambda X: X\nSPARSE_FILTER = lambda X: sp.csr_matrix(X)\n\n\ndef test_ridge():\n # Ridge regression convergence test using score\n # TODO: for this test to be robust, we should use a dataset instead\n # of np.random.\n rng = np.random.RandomState(0)\n alpha = 1.0\n\n for solver in (\"svd\", \"sparse_cg\", \"cholesky\", \"lsqr\", \"sag\"):\n # With more samples than features\n n_samples, n_features = 6, 5\n y = rng.randn(n_samples)\n X = rng.randn(n_samples, n_features)\n\n ridge = Ridge(alpha=alpha, solver=solver)\n ridge.fit(X, y)\n assert_equal(ridge.coef_.shape, (X.shape[1], ))\n assert_greater(ridge.score(X, y), 0.47)\n\n if solver in (\"cholesky\", \"sag\"):\n # Currently the only solvers to support sample_weight.\n ridge.fit(X, y, sample_weight=np.ones(n_samples))\n assert_greater(ridge.score(X, y), 0.47)\n\n # With more features than samples\n n_samples, n_features = 5, 10\n y = rng.randn(n_samples)\n X = rng.randn(n_samples, n_features)\n ridge = Ridge(alpha=alpha, solver=solver)\n ridge.fit(X, y)\n assert_greater(ridge.score(X, y), .9)\n\n if solver in (\"cholesky\", \"sag\"):\n # Currently the only solvers to support sample_weight.\n ridge.fit(X, y, sample_weight=np.ones(n_samples))\n assert_greater(ridge.score(X, y), 0.9)\n\n\ndef test_primal_dual_relationship():\n y = y_diabetes.reshape(-1, 1)\n coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])\n K = np.dot(X_diabetes, X_diabetes.T)\n dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])\n coef2 = np.dot(X_diabetes.T, dual_coef).T\n assert_array_almost_equal(coef, coef2)\n\n\ndef test_ridge_singular():\n # test on a singular matrix\n rng = np.random.RandomState(0)\n n_samples, n_features = 6, 6\n y = rng.randn(n_samples // 2)\n y = np.concatenate((y, y))\n X = rng.randn(n_samples // 2, n_features)\n X = np.concatenate((X, X), axis=0)\n\n ridge = Ridge(alpha=0)\n ridge.fit(X, y)\n assert_greater(ridge.score(X, y), 0.9)\n\n\ndef test_ridge_regression_sample_weights():\n rng = np.random.RandomState(0)\n\n for solver in (\"cholesky\", ):\n for n_samples, n_features in ((6, 5), (5, 10)):\n for alpha in (1.0, 1e-2):\n y = rng.randn(n_samples)\n X = rng.randn(n_samples, n_features)\n sample_weight = 1.0 + rng.rand(n_samples)\n\n coefs = ridge_regression(X, y,\n alpha=alpha,\n sample_weight=sample_weight,\n solver=solver)\n\n # Sample weight can be implemented via a simple rescaling\n # for the square loss.\n coefs2 = ridge_regression(\n X * np.sqrt(sample_weight)[:, np.newaxis],\n y * np.sqrt(sample_weight),\n alpha=alpha, solver=solver)\n assert_array_almost_equal(coefs, coefs2)\n\n\ndef test_ridge_sample_weights():\n # TODO: loop over sparse data as well\n\n rng = np.random.RandomState(0)\n param_grid = product((1.0, 1e-2), (True, False),\n ('svd', 'cholesky', 'lsqr', 'sparse_cg'))\n\n for n_samples, n_features in ((6, 5), (5, 10)):\n\n y = rng.randn(n_samples)\n X = rng.randn(n_samples, n_features)\n sample_weight = 1.0 + rng.rand(n_samples)\n\n for (alpha, intercept, solver) in param_grid:\n\n # Ridge with explicit sample_weight\n est = Ridge(alpha=alpha, fit_intercept=intercept, solver=solver)\n est.fit(X, y, sample_weight=sample_weight)\n coefs = est.coef_\n inter = est.intercept_\n\n # Closed form of the weighted regularized least square\n # theta = (X^T W X + alpha I)^(-1) * X^T W y\n W = np.diag(sample_weight)\n if intercept is False:\n X_aug = X\n I = np.eye(n_features)\n else:\n dummy_column = np.ones(shape=(n_samples, 1))\n X_aug = np.concatenate((dummy_column, X), axis=1)\n I = np.eye(n_features + 1)\n I[0, 0] = 0\n\n cf_coefs = linalg.solve(X_aug.T.dot(W).dot(X_aug) + alpha * I,\n X_aug.T.dot(W).dot(y))\n\n if intercept is False:\n assert_array_almost_equal(coefs, cf_coefs)\n else:\n assert_array_almost_equal(coefs, cf_coefs[1:])\n assert_almost_equal(inter, cf_coefs[0])\n\n\ndef test_ridge_shapes():\n # Test shape of coef_ and intercept_\n rng = np.random.RandomState(0)\n n_samples, n_features = 5, 10\n X = rng.randn(n_samples, n_features)\n y = rng.randn(n_samples)\n Y1 = y[:, np.newaxis]\n Y = np.c_[y, 1 + y]\n\n ridge = Ridge()\n\n ridge.fit(X, y)\n assert_equal(ridge.coef_.shape, (n_features,))\n assert_equal(ridge.intercept_.shape, ())\n\n ridge.fit(X, Y1)\n assert_equal(ridge.coef_.shape, (1, n_features))\n assert_equal(ridge.intercept_.shape, (1, ))\n\n ridge.fit(X, Y)\n assert_equal(ridge.coef_.shape, (2, n_features))\n assert_equal(ridge.intercept_.shape, (2, ))\n\n\ndef test_ridge_intercept():\n # Test intercept with multiple targets GH issue #708\n rng = np.random.RandomState(0)\n n_samples, n_features = 5, 10\n X = rng.randn(n_samples, n_features)\n y = rng.randn(n_samples)\n Y = np.c_[y, 1. + y]\n\n ridge = Ridge()\n\n ridge.fit(X, y)\n intercept = ridge.intercept_\n\n ridge.fit(X, Y)\n assert_almost_equal(ridge.intercept_[0], intercept)\n assert_almost_equal(ridge.intercept_[1], intercept + 1.)\n\n\ndef test_toy_ridge_object():\n # Test BayesianRegression ridge classifier\n # TODO: test also n_samples > n_features\n X = np.array([[1], [2]])\n Y = np.array([1, 2])\n reg = Ridge(alpha=0.0)\n reg.fit(X, Y)\n X_test = [[1], [2], [3], [4]]\n assert_almost_equal(reg.predict(X_test), [1., 2, 3, 4])\n\n assert_equal(len(reg.coef_.shape), 1)\n assert_equal(type(reg.intercept_), np.float64)\n\n Y = np.vstack((Y, Y)).T\n\n reg.fit(X, Y)\n X_test = [[1], [2], [3], [4]]\n\n assert_equal(len(reg.coef_.shape), 2)\n assert_equal(type(reg.intercept_), np.ndarray)\n\n\ndef test_ridge_vs_lstsq():\n # On alpha=0., Ridge and OLS yield the same solution.\n\n rng = np.random.RandomState(0)\n # we need more samples than features\n n_samples, n_features = 5, 4\n y = rng.randn(n_samples)\n X = rng.randn(n_samples, n_features)\n\n ridge = Ridge(alpha=0., fit_intercept=False)\n ols = LinearRegression(fit_intercept=False)\n\n ridge.fit(X, y)\n ols.fit(X, y)\n assert_almost_equal(ridge.coef_, ols.coef_)\n\n ridge.fit(X, y)\n ols.fit(X, y)\n assert_almost_equal(ridge.coef_, ols.coef_)\n\n\ndef test_ridge_individual_penalties():\n # Tests the ridge object using individual penalties\n\n rng = np.random.RandomState(42)\n\n n_samples, n_features, n_targets = 20, 10, 5\n X = rng.randn(n_samples, n_features)\n y = rng.randn(n_samples, n_targets)\n\n penalties = np.arange(n_targets)\n\n coef_cholesky = np.array([\n Ridge(alpha=alpha, solver=\"cholesky\").fit(X, target).coef_\n for alpha, target in zip(penalties, y.T)])\n\n coefs_indiv_pen = [\n Ridge(alpha=penalties, solver=solver, tol=1e-8).fit(X, y).coef_\n for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky', 'sag']]\n for coef_indiv_pen in coefs_indiv_pen:\n assert_array_almost_equal(coef_cholesky, coef_indiv_pen)\n\n # Test error is raised when number of targets and penalties do not match.\n ridge = Ridge(alpha=penalties[:-1])\n assert_raises(ValueError, ridge.fit, X, y)\n\n\ndef _test_ridge_loo(filter_):\n # test that can work with both dense or sparse matrices\n n_samples = X_diabetes.shape[0]\n\n ret = []\n\n ridge_gcv = _RidgeGCV(fit_intercept=False)\n ridge = Ridge(alpha=1.0, fit_intercept=False)\n\n # generalized cross-validation (efficient leave-one-out)\n decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)\n errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)\n values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)\n\n # brute-force leave-one-out: remove one example at a time\n errors2 = []\n values2 = []\n for i in range(n_samples):\n sel = np.arange(n_samples) != i\n X_new = X_diabetes[sel]\n y_new = y_diabetes[sel]\n ridge.fit(X_new, y_new)\n value = ridge.predict([X_diabetes[i]])[0]\n error = (y_diabetes[i] - value) ** 2\n errors2.append(error)\n values2.append(value)\n\n # check that efficient and brute-force LOO give same results\n assert_almost_equal(errors, errors2)\n assert_almost_equal(values, values2)\n\n # generalized cross-validation (efficient leave-one-out,\n # SVD variation)\n decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)\n errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)\n values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)\n\n # check that efficient and SVD efficient LOO give same results\n assert_almost_equal(errors, errors3)\n assert_almost_equal(values, values3)\n\n # check best alpha\n ridge_gcv.fit(filter_(X_diabetes), y_diabetes)\n alpha_ = ridge_gcv.alpha_\n ret.append(alpha_)\n\n # check that we get same best alpha with custom loss_func\n f = ignore_warnings\n scoring = make_scorer(mean_squared_error, greater_is_better=False)\n ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)\n f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)\n assert_equal(ridge_gcv2.alpha_, alpha_)\n\n # check that we get same best alpha with custom score_func\n func = lambda x, y: -mean_squared_error(x, y)\n scoring = make_scorer(func)\n ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)\n f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)\n assert_equal(ridge_gcv3.alpha_, alpha_)\n\n # check that we get same best alpha with a scorer\n scorer = get_scorer('mean_squared_error')\n ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)\n ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)\n assert_equal(ridge_gcv4.alpha_, alpha_)\n\n # check that we get same best alpha with sample weights\n ridge_gcv.fit(filter_(X_diabetes), y_diabetes,\n sample_weight=np.ones(n_samples))\n assert_equal(ridge_gcv.alpha_, alpha_)\n\n # simulate several responses\n Y = np.vstack((y_diabetes, y_diabetes)).T\n\n ridge_gcv.fit(filter_(X_diabetes), Y)\n Y_pred = ridge_gcv.predict(filter_(X_diabetes))\n ridge_gcv.fit(filter_(X_diabetes), y_diabetes)\n y_pred = ridge_gcv.predict(filter_(X_diabetes))\n\n assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,\n Y_pred, decimal=5)\n\n return ret\n\n\ndef _test_ridge_cv(filter_):\n ridge_cv = RidgeCV()\n ridge_cv.fit(filter_(X_diabetes), y_diabetes)\n ridge_cv.predict(filter_(X_diabetes))\n\n assert_equal(len(ridge_cv.coef_.shape), 1)\n assert_equal(type(ridge_cv.intercept_), np.float64)\n\n cv = KFold(5)\n ridge_cv.set_params(cv=cv)\n ridge_cv.fit(filter_(X_diabetes), y_diabetes)\n ridge_cv.predict(filter_(X_diabetes))\n\n assert_equal(len(ridge_cv.coef_.shape), 1)\n assert_equal(type(ridge_cv.intercept_), np.float64)\n\n\ndef _test_ridge_diabetes(filter_):\n ridge = Ridge(fit_intercept=False)\n ridge.fit(filter_(X_diabetes), y_diabetes)\n return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)\n\n\ndef _test_multi_ridge_diabetes(filter_):\n # simulate several responses\n Y = np.vstack((y_diabetes, y_diabetes)).T\n n_features = X_diabetes.shape[1]\n\n ridge = Ridge(fit_intercept=False)\n ridge.fit(filter_(X_diabetes), Y)\n assert_equal(ridge.coef_.shape, (2, n_features))\n Y_pred = ridge.predict(filter_(X_diabetes))\n ridge.fit(filter_(X_diabetes), y_diabetes)\n y_pred = ridge.predict(filter_(X_diabetes))\n assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,\n Y_pred, decimal=3)\n\n\ndef _test_ridge_classifiers(filter_):\n n_classes = np.unique(y_iris).shape[0]\n n_features = X_iris.shape[1]\n for reg in (RidgeClassifier(), RidgeClassifierCV()):\n reg.fit(filter_(X_iris), y_iris)\n assert_equal(reg.coef_.shape, (n_classes, n_features))\n y_pred = reg.predict(filter_(X_iris))\n assert_greater(np.mean(y_iris == y_pred), .79)\n\n cv = KFold(5)\n reg = RidgeClassifierCV(cv=cv)\n reg.fit(filter_(X_iris), y_iris)\n y_pred = reg.predict(filter_(X_iris))\n assert_true(np.mean(y_iris == y_pred) >= 0.8)\n\n\ndef _test_tolerance(filter_):\n ridge = Ridge(tol=1e-5, fit_intercept=False)\n ridge.fit(filter_(X_diabetes), y_diabetes)\n score = ridge.score(filter_(X_diabetes), y_diabetes)\n\n ridge2 = Ridge(tol=1e-3, fit_intercept=False)\n ridge2.fit(filter_(X_diabetes), y_diabetes)\n score2 = ridge2.score(filter_(X_diabetes), y_diabetes)\n\n assert_true(score >= score2)\n\n\ndef check_dense_sparse(test_func):\n # test dense matrix\n ret_dense = test_func(DENSE_FILTER)\n # test sparse matrix\n ret_sparse = test_func(SPARSE_FILTER)\n # test that the outputs are the same\n if ret_dense is not None and ret_sparse is not None:\n assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)\n\n\ndef test_dense_sparse():\n for test_func in (_test_ridge_loo,\n _test_ridge_cv,\n _test_ridge_diabetes,\n _test_multi_ridge_diabetes,\n _test_ridge_classifiers,\n _test_tolerance):\n yield check_dense_sparse, test_func\n\n\ndef test_ridge_cv_sparse_svd():\n X = sp.csr_matrix(X_diabetes)\n ridge = RidgeCV(gcv_mode=\"svd\")\n assert_raises(TypeError, ridge.fit, X)\n\n\ndef test_ridge_sparse_svd():\n X = sp.csc_matrix(rng.rand(100, 10))\n y = rng.rand(100)\n ridge = Ridge(solver='svd', fit_intercept=False)\n assert_raises(TypeError, ridge.fit, X, y)\n\n\ndef test_class_weights():\n # Test class weights.\n X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],\n [1.0, 1.0], [1.0, 0.0]])\n y = [1, 1, 1, -1, -1]\n\n reg = RidgeClassifier(class_weight=None)\n reg.fit(X, y)\n assert_array_equal(reg.predict([[0.2, -1.0]]), np.array([1]))\n\n # we give a small weights to class 1\n reg = RidgeClassifier(class_weight={1: 0.001})\n reg.fit(X, y)\n\n # now the hyperplane should rotate clock-wise and\n # the prediction on this point should shift\n assert_array_equal(reg.predict([[0.2, -1.0]]), np.array([-1]))\n\n # check if class_weight = 'balanced' can handle negative labels.\n reg = RidgeClassifier(class_weight='balanced')\n reg.fit(X, y)\n assert_array_equal(reg.predict([[0.2, -1.0]]), np.array([1]))\n\n # class_weight = 'balanced', and class_weight = None should return\n # same values when y has equal number of all labels\n X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])\n y = [1, 1, -1, -1]\n reg = RidgeClassifier(class_weight=None)\n reg.fit(X, y)\n rega = RidgeClassifier(class_weight='balanced')\n rega.fit(X, y)\n assert_equal(len(rega.classes_), 2)\n assert_array_almost_equal(reg.coef_, rega.coef_)\n assert_array_almost_equal(reg.intercept_, rega.intercept_)\n\n\ndef test_class_weight_vs_sample_weight():\n \"\"\"Check class_weights resemble sample_weights behavior.\"\"\"\n for reg in (RidgeClassifier, RidgeClassifierCV):\n\n # Iris is balanced, so no effect expected for using 'balanced' weights\n reg1 = reg()\n reg1.fit(iris.data, iris.target)\n reg2 = reg(class_weight='balanced')\n reg2.fit(iris.data, iris.target)\n assert_almost_equal(reg1.coef_, reg2.coef_)\n\n # Inflate importance of class 1, check against user-defined weights\n sample_weight = np.ones(iris.target.shape)\n sample_weight[iris.target == 1] *= 100\n class_weight = {0: 1., 1: 100., 2: 1.}\n reg1 = reg()\n reg1.fit(iris.data, iris.target, sample_weight)\n reg2 = reg(class_weight=class_weight)\n reg2.fit(iris.data, iris.target)\n assert_almost_equal(reg1.coef_, reg2.coef_)\n\n # Check that sample_weight and class_weight are multiplicative\n reg1 = reg()\n reg1.fit(iris.data, iris.target, sample_weight ** 2)\n reg2 = reg(class_weight=class_weight)\n reg2.fit(iris.data, iris.target, sample_weight)\n assert_almost_equal(reg1.coef_, reg2.coef_)\n\n\ndef test_class_weights_cv():\n # Test class weights for cross validated ridge classifier.\n X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],\n [1.0, 1.0], [1.0, 0.0]])\n y = [1, 1, 1, -1, -1]\n\n reg = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])\n reg.fit(X, y)\n\n # we give a small weights to class 1\n reg = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])\n reg.fit(X, y)\n\n assert_array_equal(reg.predict([[-.2, 2]]), np.array([-1]))\n\n\ndef test_ridgecv_store_cv_values():\n # Test _RidgeCV's store_cv_values attribute.\n rng = rng = np.random.RandomState(42)\n\n n_samples = 8\n n_features = 5\n x = rng.randn(n_samples, n_features)\n alphas = [1e-1, 1e0, 1e1]\n n_alphas = len(alphas)\n\n r = RidgeCV(alphas=alphas, store_cv_values=True)\n\n # with len(y.shape) == 1\n y = rng.randn(n_samples)\n r.fit(x, y)\n assert_equal(r.cv_values_.shape, (n_samples, n_alphas))\n\n # with len(y.shape) == 2\n n_responses = 3\n y = rng.randn(n_samples, n_responses)\n r.fit(x, y)\n assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))\n\n\ndef test_ridgecv_sample_weight():\n rng = np.random.RandomState(0)\n alphas = (0.1, 1.0, 10.0)\n\n # There are different algorithms for n_samples > n_features\n # and the opposite, so test them both.\n for n_samples, n_features in ((6, 5), (5, 10)):\n y = rng.randn(n_samples)\n X = rng.randn(n_samples, n_features)\n sample_weight = 1.0 + rng.rand(n_samples)\n\n cv = KFold(5)\n ridgecv = RidgeCV(alphas=alphas, cv=cv)\n ridgecv.fit(X, y, sample_weight=sample_weight)\n\n # Check using GridSearchCV directly\n parameters = {'alpha': alphas}\n fit_params = {'sample_weight': sample_weight}\n gs = GridSearchCV(Ridge(), parameters, fit_params=fit_params,\n cv=cv)\n gs.fit(X, y)\n\n assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)\n assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)\n\n\ndef test_raises_value_error_if_sample_weights_greater_than_1d():\n # Sample weights must be either scalar or 1D\n\n n_sampless = [2, 3]\n n_featuress = [3, 2]\n\n rng = np.random.RandomState(42)\n\n for n_samples, n_features in zip(n_sampless, n_featuress):\n X = rng.randn(n_samples, n_features)\n y = rng.randn(n_samples)\n sample_weights_OK = rng.randn(n_samples) ** 2 + 1\n sample_weights_OK_1 = 1.\n sample_weights_OK_2 = 2.\n sample_weights_not_OK = sample_weights_OK[:, np.newaxis]\n sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]\n\n ridge = Ridge(alpha=1)\n\n # make sure the \"OK\" sample weights actually work\n ridge.fit(X, y, sample_weights_OK)\n ridge.fit(X, y, sample_weights_OK_1)\n ridge.fit(X, y, sample_weights_OK_2)\n\n def fit_ridge_not_ok():\n ridge.fit(X, y, sample_weights_not_OK)\n\n def fit_ridge_not_ok_2():\n ridge.fit(X, y, sample_weights_not_OK_2)\n\n assert_raise_message(ValueError,\n \"Sample weights must be 1D array or scalar\",\n fit_ridge_not_ok)\n\n assert_raise_message(ValueError,\n \"Sample weights must be 1D array or scalar\",\n fit_ridge_not_ok_2)\n\n\ndef test_sparse_design_with_sample_weights():\n # Sample weights must work with sparse matrices\n\n n_sampless = [2, 3]\n n_featuress = [3, 2]\n\n rng = np.random.RandomState(42)\n\n sparse_matrix_converters = [sp.coo_matrix,\n sp.csr_matrix,\n sp.csc_matrix,\n sp.lil_matrix,\n sp.dok_matrix\n ]\n\n sparse_ridge = Ridge(alpha=1., fit_intercept=False)\n dense_ridge = Ridge(alpha=1., fit_intercept=False)\n\n for n_samples, n_features in zip(n_sampless, n_featuress):\n X = rng.randn(n_samples, n_features)\n y = rng.randn(n_samples)\n sample_weights = rng.randn(n_samples) ** 2 + 1\n for sparse_converter in sparse_matrix_converters:\n X_sparse = sparse_converter(X)\n sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)\n dense_ridge.fit(X, y, sample_weight=sample_weights)\n\n assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,\n decimal=6)\n\n\ndef test_raises_value_error_if_solver_not_supported():\n # Tests whether a ValueError is raised if a non-identified solver\n # is passed to ridge_regression\n\n wrong_solver = \"This is not a solver (MagritteSolveCV QuantumBitcoin)\"\n\n exception = ValueError\n message = \"Solver %s not understood\" % wrong_solver\n\n def func():\n X = np.eye(3)\n y = np.ones(3)\n ridge_regression(X, y, alpha=1., solver=wrong_solver)\n\n assert_raise_message(exception, message, func)\n\n\ndef test_sparse_cg_max_iter():\n reg = Ridge(solver=\"sparse_cg\", max_iter=1)\n reg.fit(X_diabetes, y_diabetes)\n assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])\n\n\n@ignore_warnings\ndef test_n_iter():\n # Test that self.n_iter_ is correct.\n n_targets = 2\n X, y = X_diabetes, y_diabetes\n y_n = np.tile(y, (n_targets, 1)).T\n\n for max_iter in range(1, 4):\n for solver in ('sag', 'lsqr'):\n reg = Ridge(solver=solver, max_iter=max_iter, tol=1e-12)\n reg.fit(X, y_n)\n assert_array_equal(reg.n_iter_, np.tile(max_iter, n_targets))\n\n for solver in ('sparse_cg', 'svd', 'cholesky'):\n reg = Ridge(solver=solver, max_iter=1, tol=1e-1)\n reg.fit(X, y_n)\n assert_equal(reg.n_iter_, None)\n\n\ndef test_ridge_fit_intercept_sparse():\n X, y = make_regression(n_samples=1000, n_features=2, n_informative=2,\n bias=10., random_state=42)\n X_csr = sp.csr_matrix(X)\n\n dense = Ridge(alpha=1., tol=1.e-15, solver='sag', fit_intercept=True)\n sparse = Ridge(alpha=1., tol=1.e-15, solver='sag', fit_intercept=True)\n dense.fit(X, y)\n sparse.fit(X_csr, y)\n assert_almost_equal(dense.intercept_, sparse.intercept_)\n assert_array_almost_equal(dense.coef_, sparse.coef_)\n\n # test the solver switch and the corresponding warning\n sparse = Ridge(alpha=1., tol=1.e-15, solver='lsqr', fit_intercept=True)\n assert_warns(UserWarning, sparse.fit, X_csr, y)\n assert_almost_equal(dense.intercept_, sparse.intercept_)\n assert_array_almost_equal(dense.coef_, sparse.coef_)\n\n\ndef test_errors_and_values_helper():\n ridgecv = _RidgeGCV()\n rng = check_random_state(42)\n alpha = 1.\n n = 5\n y = rng.randn(n)\n v = rng.randn(n)\n Q = rng.randn(len(v), len(v))\n QT_y = Q.T.dot(y)\n G_diag, c = ridgecv._errors_and_values_helper(alpha, y, v, Q, QT_y)\n\n # test that helper function behaves as expected\n out, c_ = ridgecv._errors(alpha, y, v, Q, QT_y)\n np.testing.assert_array_equal(out, (c / G_diag) ** 2)\n np.testing.assert_array_equal(c, c)\n\n out, c_ = ridgecv._values(alpha, y, v, Q, QT_y)\n np.testing.assert_array_equal(out, y - (c / G_diag))\n np.testing.assert_array_equal(c_, c)\n\n\ndef test_errors_and_values_svd_helper():\n ridgecv = _RidgeGCV()\n rng = check_random_state(42)\n alpha = 1.\n for n, p in zip((5, 10), (12, 6)):\n y = rng.randn(n)\n v = rng.randn(p)\n U = rng.randn(n, p)\n UT_y = U.T.dot(y)\n G_diag, c = ridgecv._errors_and_values_svd_helper(alpha, y, v, U, UT_y)\n\n # test that helper function behaves as expected\n out, c_ = ridgecv._errors_svd(alpha, y, v, U, UT_y)\n np.testing.assert_array_equal(out, (c / G_diag) ** 2)\n np.testing.assert_array_equal(c, c)\n\n out, c_ = ridgecv._values_svd(alpha, y, v, U, UT_y)\n np.testing.assert_array_equal(out, y - (c / G_diag))\n np.testing.assert_array_equal(c_, c)\n\n\ndef test_ridge_classifier_no_support_multilabel():\n X, y = make_multilabel_classification(n_samples=10, random_state=0)\n assert_raises(ValueError, RidgeClassifier().fit, X, y)\n",
"\"\"\"\n=========================================\nSVM: Maximum margin separating hyperplane\n=========================================\n\nPlot the maximum margin separating hyperplane within a two-class\nseparable dataset using a Support Vector Machine classifier with\nlinear kernel.\n\"\"\"\nprint(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import svm\n\n# we create 40 separable points\nnp.random.seed(0)\nX = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]\nY = [0] * 20 + [1] * 20\n\n# fit the model\nclf = svm.SVC(kernel='linear')\nclf.fit(X, Y)\n\n# get the separating hyperplane\nw = clf.coef_[0]\na = -w[0] / w[1]\nxx = np.linspace(-5, 5)\nyy = a * xx - (clf.intercept_[0]) / w[1]\n\n# plot the parallels to the separating hyperplane that pass through the\n# support vectors\nb = clf.support_vectors_[0]\nyy_down = a * xx + (b[1] - a * b[0])\nb = clf.support_vectors_[-1]\nyy_up = a * xx + (b[1] - a * b[0])\n\n# plot the line, the points, and the nearest vectors to the plane\nplt.plot(xx, yy, 'k-')\nplt.plot(xx, yy_down, 'k--')\nplt.plot(xx, yy_up, 'k--')\n\nplt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],\n s=80, facecolors='none')\nplt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)\n\nplt.axis('tight')\nplt.show()\n",
"\"\"\"\n========================================================================\nIllustration of Gaussian process classification (GPC) on the XOR dataset\n========================================================================\n\nThis example illustrates GPC on XOR data. Compared are a stationary, isotropic\nkernel (RBF) and a non-stationary kernel (DotProduct). On this particular\ndataset, the DotProduct kernel obtains considerably better results because the\nclass-boundaries are linear and coincide with the coordinate axes. In general,\nstationary kernels often obtain better results.\n\"\"\"\nprint(__doc__)\n\n# Authors: Jan Hendrik Metzen <[email protected]>\n#\n# License: BSD 3 clause\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.gaussian_process.kernels import RBF, DotProduct\n\n\nxx, yy = np.meshgrid(np.linspace(-3, 3, 50),\n np.linspace(-3, 3, 50))\nrng = np.random.RandomState(0)\nX = rng.randn(200, 2)\nY = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)\n\n# fit the model\nplt.figure(figsize=(10, 5))\nkernels = [1.0 * RBF(length_scale=1.0), 1.0 * DotProduct(sigma_0=1.0)**2]\nfor i, kernel in enumerate(kernels):\n clf = GaussianProcessClassifier(kernel=kernel, warm_start=True).fit(X, Y)\n\n # plot the decision function for each datapoint on the grid\n Z = clf.predict_proba(np.vstack((xx.ravel(), yy.ravel())).T)[:, 1]\n Z = Z.reshape(xx.shape)\n\n plt.subplot(1, 2, i + 1)\n image = plt.imshow(Z, interpolation='nearest',\n extent=(xx.min(), xx.max(), yy.min(), yy.max()),\n aspect='auto', origin='lower', cmap=plt.cm.PuOr_r)\n contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,\n linetypes='--')\n plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)\n plt.xticks(())\n plt.yticks(())\n plt.axis([-3, 3, -3, 3])\n plt.colorbar(image)\n plt.title(\"%s\\n Log-Marginal-Likelihood:%.3f\"\n % (clf.kernel_, clf.log_marginal_likelihood(clf.kernel_.theta)),\n fontsize=12)\n\nplt.tight_layout()\nplt.show()\n",
"# Authors: Gilles Louppe, Mathieu Blondel, Maheshakya Wijewardena\n# License: BSD 3 clause\n\nimport numpy as np\n\nfrom .base import SelectorMixin\nfrom ..base import TransformerMixin, BaseEstimator, clone\nfrom ..externals import six\n\nfrom ..utils import safe_mask, check_array, deprecated\nfrom ..utils.validation import check_is_fitted\nfrom ..exceptions import NotFittedError\n\n\ndef _get_feature_importances(estimator):\n \"\"\"Retrieve or aggregate feature importances from estimator\"\"\"\n if hasattr(estimator, \"feature_importances_\"):\n importances = estimator.feature_importances_\n\n elif hasattr(estimator, \"coef_\"):\n if estimator.coef_.ndim == 1:\n importances = np.abs(estimator.coef_)\n\n else:\n importances = np.sum(np.abs(estimator.coef_), axis=0)\n\n else:\n raise ValueError(\n \"The underlying estimator %s has no `coef_` or \"\n \"`feature_importances_` attribute. Either pass a fitted estimator\"\n \" to SelectFromModel or call fit before calling transform.\"\n % estimator.__class__.__name__)\n\n return importances\n\n\ndef _calculate_threshold(estimator, importances, threshold):\n \"\"\"Interpret the threshold value\"\"\"\n\n if threshold is None:\n # determine default from estimator\n est_name = estimator.__class__.__name__\n if ((hasattr(estimator, \"penalty\") and estimator.penalty == \"l1\") or\n \"Lasso\" in est_name):\n # the natural default threshold is 0 when l1 penalty was used\n threshold = 1e-5\n else:\n threshold = \"mean\"\n\n if isinstance(threshold, six.string_types):\n if \"*\" in threshold:\n scale, reference = threshold.split(\"*\")\n scale = float(scale.strip())\n reference = reference.strip()\n\n if reference == \"median\":\n reference = np.median(importances)\n elif reference == \"mean\":\n reference = np.mean(importances)\n else:\n raise ValueError(\"Unknown reference: \" + reference)\n\n threshold = scale * reference\n\n elif threshold == \"median\":\n threshold = np.median(importances)\n\n elif threshold == \"mean\":\n threshold = np.mean(importances)\n\n else:\n raise ValueError(\"Expected threshold='mean' or threshold='median' \"\n \"got %s\" % threshold)\n\n else:\n threshold = float(threshold)\n\n return threshold\n\n\nclass _LearntSelectorMixin(TransformerMixin):\n # Note because of the extra threshold parameter in transform, this does\n # not naturally extend from SelectorMixin\n \"\"\"Transformer mixin selecting features based on importance weights.\n\n This implementation can be mixin on any estimator that exposes a\n ``feature_importances_`` or ``coef_`` attribute to evaluate the relative\n importance of individual features for feature selection.\n \"\"\"\n @deprecated('Support to use estimators as feature selectors will be '\n 'removed in version 0.19. Use SelectFromModel instead.')\n def transform(self, X, threshold=None):\n \"\"\"Reduce X to its most important features.\n\n Uses ``coef_`` or ``feature_importances_`` to determine the most\n important features. For models with a ``coef_`` for each class, the\n absolute sum over the classes is used.\n\n Parameters\n ----------\n X : array or scipy sparse matrix of shape [n_samples, n_features]\n The input samples.\n\n threshold : string, float or None, optional (default=None)\n The threshold value to use for feature selection. Features whose\n importance is greater or equal are kept while the others are\n discarded. If \"median\" (resp. \"mean\"), then the threshold value is\n the median (resp. the mean) of the feature importances. A scaling\n factor (e.g., \"1.25*mean\") may also be used. If None and if\n available, the object attribute ``threshold`` is used. Otherwise,\n \"mean\" is used by default.\n\n Returns\n -------\n X_r : array of shape [n_samples, n_selected_features]\n The input samples with only the selected features.\n \"\"\"\n check_is_fitted(self, ('coef_', 'feature_importances_'),\n all_or_any=any)\n\n X = check_array(X, 'csc')\n importances = _get_feature_importances(self)\n if len(importances) != X.shape[1]:\n raise ValueError(\"X has different number of features than\"\n \" during model fitting.\")\n\n if threshold is None:\n threshold = getattr(self, 'threshold', None)\n threshold = _calculate_threshold(self, importances, threshold)\n\n # Selection\n try:\n mask = importances >= threshold\n except TypeError:\n # Fails in Python 3.x when threshold is str;\n # result is array of True\n raise ValueError(\"Invalid threshold: all features are discarded.\")\n\n if np.any(mask):\n mask = safe_mask(X, mask)\n return X[:, mask]\n else:\n raise ValueError(\"Invalid threshold: all features are discarded.\")\n\n\nclass SelectFromModel(BaseEstimator, SelectorMixin):\n \"\"\"Meta-transformer for selecting features based on importance weights.\n\n .. versionadded:: 0.17\n\n Parameters\n ----------\n estimator : object\n The base estimator from which the transformer is built.\n This can be both a fitted (if ``prefit`` is set to True)\n or a non-fitted estimator.\n\n threshold : string, float, optional default None\n The threshold value to use for feature selection. Features whose\n importance is greater or equal are kept while the others are\n discarded. If \"median\" (resp. \"mean\"), then the ``threshold`` value is\n the median (resp. the mean) of the feature importances. A scaling\n factor (e.g., \"1.25*mean\") may also be used. If None and if the\n estimator has a parameter penalty set to l1, either explicitly\n or implicitly (e.g, Lasso), the threshold is used is 1e-5.\n Otherwise, \"mean\" is used by default.\n\n prefit : bool, default False\n Whether a prefit model is expected to be passed into the constructor\n directly or not. If True, ``transform`` must be called directly\n and SelectFromModel cannot be used with ``cross_val_score``,\n ``GridSearchCV`` and similar utilities that clone the estimator.\n Otherwise train the model using ``fit`` and then ``transform`` to do\n feature selection.\n\n Attributes\n ----------\n `estimator_`: an estimator\n The base estimator from which the transformer is built.\n This is stored only when a non-fitted estimator is passed to the\n ``SelectFromModel``, i.e when prefit is False.\n\n `threshold_`: float\n The threshold value used for feature selection.\n \"\"\"\n def __init__(self, estimator, threshold=None, prefit=False):\n self.estimator = estimator\n self.threshold = threshold\n self.prefit = prefit\n\n def _get_support_mask(self):\n # SelectFromModel can directly call on transform.\n if self.prefit:\n estimator = self.estimator\n elif hasattr(self, 'estimator_'):\n estimator = self.estimator_\n else:\n raise ValueError(\n 'Either fit the model before transform or set \"prefit=True\"'\n ' while passing the fitted estimator to the constructor.')\n scores = _get_feature_importances(estimator)\n self.threshold_ = _calculate_threshold(estimator, scores,\n self.threshold)\n return scores >= self.threshold_\n\n def fit(self, X, y=None, **fit_params):\n \"\"\"Fit the SelectFromModel meta-transformer.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The training input samples.\n\n y : array-like, shape (n_samples,)\n The target values (integers that correspond to classes in\n classification, real numbers in regression).\n\n **fit_params : Other estimator specific parameters\n\n Returns\n -------\n self : object\n Returns self.\n \"\"\"\n if self.prefit:\n raise NotFittedError(\n \"Since 'prefit=True', call transform directly\")\n if not hasattr(self, \"estimator_\"):\n self.estimator_ = clone(self.estimator)\n self.estimator_.fit(X, y, **fit_params)\n return self\n\n def partial_fit(self, X, y=None, **fit_params):\n \"\"\"Fit the SelectFromModel meta-transformer only once.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The training input samples.\n\n y : array-like, shape (n_samples,)\n The target values (integers that correspond to classes in\n classification, real numbers in regression).\n\n **fit_params : Other estimator specific parameters\n\n Returns\n -------\n self : object\n Returns self.\n \"\"\"\n if self.prefit:\n raise NotFittedError(\n \"Since 'prefit=True', call transform directly\")\n if not hasattr(self, \"estimator_\"):\n self.estimator_ = clone(self.estimator)\n self.estimator_.partial_fit(X, y, **fit_params)\n return self\n",
"\n# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi\n#\n# License: BSD 3 clause\n\"\"\"\nMulti-class / multi-label utility function\n==========================================\n\n\"\"\"\nfrom __future__ import division\nfrom collections import Sequence\nfrom itertools import chain\n\nfrom scipy.sparse import issparse\nfrom scipy.sparse.base import spmatrix\nfrom scipy.sparse import dok_matrix\nfrom scipy.sparse import lil_matrix\n\nimport numpy as np\n\nfrom ..externals.six import string_types\nfrom .validation import check_array\nfrom ..utils.fixes import bincount\nfrom ..utils.fixes import array_equal\n\n\ndef _unique_multiclass(y):\n if hasattr(y, '__array__'):\n return np.unique(np.asarray(y))\n else:\n return set(y)\n\n\ndef _unique_indicator(y):\n return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])\n\n\n_FN_UNIQUE_LABELS = {\n 'binary': _unique_multiclass,\n 'multiclass': _unique_multiclass,\n 'multilabel-indicator': _unique_indicator,\n}\n\n\ndef unique_labels(*ys):\n \"\"\"Extract an ordered array of unique labels\n\n We don't allow:\n - mix of multilabel and multiclass (single label) targets\n - mix of label indicator matrix and anything else,\n because there are no explicit labels)\n - mix of label indicator matrices of different sizes\n - mix of string and integer labels\n\n At the moment, we also don't allow \"multiclass-multioutput\" input type.\n\n Parameters\n ----------\n *ys : array-likes,\n\n Returns\n -------\n out : numpy array of shape [n_unique_labels]\n An ordered array of unique labels.\n\n Examples\n --------\n >>> from sklearn.utils.multiclass import unique_labels\n >>> unique_labels([3, 5, 5, 5, 7, 7])\n array([3, 5, 7])\n >>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])\n array([1, 2, 3, 4])\n >>> unique_labels([1, 2, 10], [5, 11])\n array([ 1, 2, 5, 10, 11])\n \"\"\"\n if not ys:\n raise ValueError('No argument has been passed.')\n # Check that we don't mix label format\n\n ys_types = set(type_of_target(x) for x in ys)\n if ys_types == set([\"binary\", \"multiclass\"]):\n ys_types = set([\"multiclass\"])\n\n if len(ys_types) > 1:\n raise ValueError(\"Mix type of y not allowed, got types %s\" % ys_types)\n\n label_type = ys_types.pop()\n\n # Check consistency for the indicator format\n if (label_type == \"multilabel-indicator\" and\n len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]\n for y in ys)) > 1):\n raise ValueError(\"Multi-label binary indicator input with \"\n \"different numbers of labels\")\n\n # Get the unique set of labels\n _unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)\n if not _unique_labels:\n raise ValueError(\"Unknown label type: %s\" % repr(ys))\n\n ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))\n\n # Check that we don't mix string type with number type\n if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):\n raise ValueError(\"Mix of label input types (string and number)\")\n\n return np.array(sorted(ys_labels))\n\n\ndef _is_integral_float(y):\n return y.dtype.kind == 'f' and np.all(y.astype(int) == y)\n\n\ndef is_multilabel(y):\n \"\"\" Check if ``y`` is in a multilabel format.\n\n Parameters\n ----------\n y : numpy array of shape [n_samples]\n Target values.\n\n Returns\n -------\n out : bool,\n Return ``True``, if ``y`` is in a multilabel format, else ```False``.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.utils.multiclass import is_multilabel\n >>> is_multilabel([0, 1, 0, 1])\n False\n >>> is_multilabel([[1], [0, 2], []])\n False\n >>> is_multilabel(np.array([[1, 0], [0, 0]]))\n True\n >>> is_multilabel(np.array([[1], [0], [0]]))\n False\n >>> is_multilabel(np.array([[1, 0, 0]]))\n True\n \"\"\"\n if hasattr(y, '__array__'):\n y = np.asarray(y)\n if not (hasattr(y, \"shape\") and y.ndim == 2 and y.shape[1] > 1):\n return False\n\n if issparse(y):\n if isinstance(y, (dok_matrix, lil_matrix)):\n y = y.tocsr()\n return (len(y.data) == 0 or np.unique(y.data).size == 1 and\n (y.dtype.kind in 'biu' or # bool, int, uint\n _is_integral_float(np.unique(y.data))))\n else:\n labels = np.unique(y)\n\n return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint\n _is_integral_float(labels))\n\ndef check_classification_targets(y):\n \"\"\"Ensure that target y is of a non-regression type.\n\n Only the following target types (as defined in type_of_target) are allowed:\n 'binary', 'multiclass', 'multiclass-multioutput', \n 'multilabel-indicator', 'multilabel-sequences'\n\n Parameters\n ----------\n y : array-like\n \"\"\"\n y_type = type_of_target(y)\n if y_type not in ['binary', 'multiclass', 'multiclass-multioutput', \n 'multilabel-indicator', 'multilabel-sequences']:\n raise ValueError(\"Unknown label type: %r\" % y_type)\n\n\n\ndef type_of_target(y):\n \"\"\"Determine the type of data indicated by target `y`\n\n Parameters\n ----------\n y : array-like\n\n Returns\n -------\n target_type : string\n One of:\n * 'continuous': `y` is an array-like of floats that are not all\n integers, and is 1d or a column vector.\n * 'continuous-multioutput': `y` is a 2d array of floats that are\n not all integers, and both dimensions are of size > 1.\n * 'binary': `y` contains <= 2 discrete values and is 1d or a column\n vector.\n * 'multiclass': `y` contains more than two discrete values, is not a\n sequence of sequences, and is 1d or a column vector.\n * 'multiclass-multioutput': `y` is a 2d array that contains more\n than two discrete values, is not a sequence of sequences, and both\n dimensions are of size > 1.\n * 'multilabel-indicator': `y` is a label indicator matrix, an array\n of two dimensions with at least two columns, and at most 2 unique\n values.\n * 'unknown': `y` is array-like but none of the above, such as a 3d\n array, sequence of sequences, or an array of non-sequence objects.\n\n Examples\n --------\n >>> import numpy as np\n >>> type_of_target([0.1, 0.6])\n 'continuous'\n >>> type_of_target([1, -1, -1, 1])\n 'binary'\n >>> type_of_target(['a', 'b', 'a'])\n 'binary'\n >>> type_of_target([1.0, 2.0])\n 'binary'\n >>> type_of_target([1, 0, 2])\n 'multiclass'\n >>> type_of_target([1.0, 0.0, 3.0])\n 'multiclass'\n >>> type_of_target(['a', 'b', 'c'])\n 'multiclass'\n >>> type_of_target(np.array([[1, 2], [3, 1]]))\n 'multiclass-multioutput'\n >>> type_of_target([[1, 2]])\n 'multiclass-multioutput'\n >>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))\n 'continuous-multioutput'\n >>> type_of_target(np.array([[0, 1], [1, 1]]))\n 'multilabel-indicator'\n \"\"\"\n valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))\n and not isinstance(y, string_types))\n\n if not valid:\n raise ValueError('Expected array-like (array or non-string sequence), '\n 'got %r' % y)\n\n if is_multilabel(y):\n return 'multilabel-indicator'\n\n try:\n y = np.asarray(y)\n except ValueError:\n # Known to fail in numpy 1.3 for array of arrays\n return 'unknown'\n\n # The old sequence of sequences format\n try:\n if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)\n and not isinstance(y[0], string_types)):\n raise ValueError('You appear to be using a legacy multi-label data'\n ' representation. Sequence of sequences are no'\n ' longer supported; use a binary array or sparse'\n ' matrix instead.')\n except IndexError:\n pass\n\n # Invalid inputs\n if y.ndim > 2 or (y.dtype == object and len(y) and\n not isinstance(y.flat[0], string_types)):\n return 'unknown' # [[[1, 2]]] or [obj_1] and not [\"label_1\"]\n\n if y.ndim == 2 and y.shape[1] == 0:\n return 'unknown' # [[]]\n\n if y.ndim == 2 and y.shape[1] > 1:\n suffix = \"-multioutput\" # [[1, 2], [1, 2]]\n else:\n suffix = \"\" # [1, 2, 3] or [[1], [2], [3]]\n\n # check float and contains non-integer float values\n if y.dtype.kind == 'f' and np.any(y != y.astype(int)):\n # [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]\n return 'continuous' + suffix\n\n if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):\n return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]\n else:\n return 'binary' # [1, 2] or [[\"a\"], [\"b\"]]\n\n\ndef _check_partial_fit_first_call(clf, classes=None):\n \"\"\"Private helper function for factorizing common classes param logic\n\n Estimators that implement the ``partial_fit`` API need to be provided with\n the list of possible classes at the first call to partial_fit.\n\n Subsequent calls to partial_fit should check that ``classes`` is still\n consistent with a previous value of ``clf.classes_`` when provided.\n\n This function returns True if it detects that this was the first call to\n ``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also\n set on ``clf``.\n\n \"\"\"\n if getattr(clf, 'classes_', None) is None and classes is None:\n raise ValueError(\"classes must be passed on the first call \"\n \"to partial_fit.\")\n\n elif classes is not None:\n if getattr(clf, 'classes_', None) is not None:\n if not array_equal(clf.classes_, unique_labels(classes)):\n raise ValueError(\n \"`classes=%r` is not the same as on last call \"\n \"to partial_fit, was: %r\" % (classes, clf.classes_))\n\n else:\n # This is the first call to partial_fit\n clf.classes_ = unique_labels(classes)\n return True\n\n # classes is None and clf.classes_ has already previously been set:\n # nothing to do\n return False\n\n\ndef class_distribution(y, sample_weight=None):\n \"\"\"Compute class priors from multioutput-multiclass target data\n\n Parameters\n ----------\n y : array like or sparse matrix of size (n_samples, n_outputs)\n The labels for each example.\n\n sample_weight : array-like of shape = (n_samples,), optional\n Sample weights.\n\n Returns\n -------\n classes : list of size n_outputs of arrays of size (n_classes,)\n List of classes for each column.\n\n n_classes : list of integers of size n_outputs\n Number of classes in each column\n\n class_prior : list of size n_outputs of arrays of size (n_classes,)\n Class distribution of each column.\n\n \"\"\"\n classes = []\n n_classes = []\n class_prior = []\n\n n_samples, n_outputs = y.shape\n\n if issparse(y):\n y = y.tocsc()\n y_nnz = np.diff(y.indptr)\n\n for k in range(n_outputs):\n col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]\n # separate sample weights for zero and non-zero elements\n if sample_weight is not None:\n nz_samp_weight = np.asarray(sample_weight)[col_nonzero]\n zeros_samp_weight_sum = (np.sum(sample_weight) -\n np.sum(nz_samp_weight))\n else:\n nz_samp_weight = None\n zeros_samp_weight_sum = y.shape[0] - y_nnz[k]\n\n classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],\n return_inverse=True)\n class_prior_k = bincount(y_k, weights=nz_samp_weight)\n\n # An explicit zero was found, combine its weight with the weight\n # of the implicit zeros\n if 0 in classes_k:\n class_prior_k[classes_k == 0] += zeros_samp_weight_sum\n\n # If an there is an implicit zero and it is not in classes and\n # class_prior, make an entry for it\n if 0 not in classes_k and y_nnz[k] < y.shape[0]:\n classes_k = np.insert(classes_k, 0, 0)\n class_prior_k = np.insert(class_prior_k, 0,\n zeros_samp_weight_sum)\n\n classes.append(classes_k)\n n_classes.append(classes_k.shape[0])\n class_prior.append(class_prior_k / class_prior_k.sum())\n else:\n for k in range(n_outputs):\n classes_k, y_k = np.unique(y[:, k], return_inverse=True)\n classes.append(classes_k)\n n_classes.append(classes_k.shape[0])\n class_prior_k = bincount(y_k, weights=sample_weight)\n class_prior.append(class_prior_k / class_prior_k.sum())\n\n return (classes, n_classes, class_prior)\n",
"\"\"\"Test the search module\"\"\"\n\nfrom collections import Iterable, Sized\nfrom sklearn.externals.six.moves import cStringIO as StringIO\nfrom sklearn.externals.six.moves import xrange\nfrom itertools import chain, product\nimport pickle\nimport sys\n\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom sklearn.utils.fixes import sp_version\nfrom sklearn.utils.testing import assert_equal\nfrom sklearn.utils.testing import assert_not_equal\nfrom sklearn.utils.testing import assert_raises\nfrom sklearn.utils.testing import assert_warns\nfrom sklearn.utils.testing import assert_raise_message\nfrom sklearn.utils.testing import assert_false, assert_true\nfrom sklearn.utils.testing import assert_array_equal\nfrom sklearn.utils.testing import assert_almost_equal\nfrom sklearn.utils.testing import assert_array_almost_equal\nfrom sklearn.utils.testing import assert_no_warnings\nfrom sklearn.utils.testing import ignore_warnings\nfrom sklearn.utils.mocking import CheckingClassifier, MockDataFrame\n\nfrom scipy.stats import bernoulli, expon, uniform\n\nfrom sklearn.externals.six.moves import zip\nfrom sklearn.base import BaseEstimator\nfrom sklearn.datasets import make_classification\nfrom sklearn.datasets import make_blobs\nfrom sklearn.datasets import make_multilabel_classification\n\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.model_selection import LeaveOneLabelOut\nfrom sklearn.model_selection import LeavePLabelOut\nfrom sklearn.model_selection import LabelKFold\nfrom sklearn.model_selection import LabelShuffleSplit\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.model_selection import ParameterGrid\nfrom sklearn.model_selection import ParameterSampler\n\n# TODO Import from sklearn.exceptions once merged.\nfrom sklearn.base import ChangedBehaviorWarning\nfrom sklearn.model_selection._validation import FitFailedWarning\n\nfrom sklearn.svm import LinearSVC, SVC\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.cluster import KMeans\nfrom sklearn.neighbors import KernelDensity\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import make_scorer\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.preprocessing import Imputer\nfrom sklearn.pipeline import Pipeline\n\n\n# Neither of the following two estimators inherit from BaseEstimator,\n# to test hyperparameter search on user-defined classifiers.\nclass MockClassifier(object):\n \"\"\"Dummy classifier to test the parameter search algorithms\"\"\"\n def __init__(self, foo_param=0):\n self.foo_param = foo_param\n\n def fit(self, X, Y):\n assert_true(len(X) == len(Y))\n return self\n\n def predict(self, T):\n return T.shape[0]\n\n predict_proba = predict\n decision_function = predict\n transform = predict\n\n def score(self, X=None, Y=None):\n if self.foo_param > 1:\n score = 1.\n else:\n score = 0.\n return score\n\n def get_params(self, deep=False):\n return {'foo_param': self.foo_param}\n\n def set_params(self, **params):\n self.foo_param = params['foo_param']\n return self\n\n\nclass LinearSVCNoScore(LinearSVC):\n \"\"\"An LinearSVC classifier that has no score method.\"\"\"\n @property\n def score(self):\n raise AttributeError\n\nX = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])\ny = np.array([1, 1, 2, 2])\n\n\ndef assert_grid_iter_equals_getitem(grid):\n assert_equal(list(grid), [grid[i] for i in range(len(grid))])\n\n\ndef test_parameter_grid():\n # Test basic properties of ParameterGrid.\n params1 = {\"foo\": [1, 2, 3]}\n grid1 = ParameterGrid(params1)\n assert_true(isinstance(grid1, Iterable))\n assert_true(isinstance(grid1, Sized))\n assert_equal(len(grid1), 3)\n assert_grid_iter_equals_getitem(grid1)\n\n params2 = {\"foo\": [4, 2],\n \"bar\": [\"ham\", \"spam\", \"eggs\"]}\n grid2 = ParameterGrid(params2)\n assert_equal(len(grid2), 6)\n\n # loop to assert we can iterate over the grid multiple times\n for i in xrange(2):\n # tuple + chain transforms {\"a\": 1, \"b\": 2} to (\"a\", 1, \"b\", 2)\n points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)\n assert_equal(points,\n set((\"bar\", x, \"foo\", y)\n for x, y in product(params2[\"bar\"], params2[\"foo\"])))\n assert_grid_iter_equals_getitem(grid2)\n\n # Special case: empty grid (useful to get default estimator settings)\n empty = ParameterGrid({})\n assert_equal(len(empty), 1)\n assert_equal(list(empty), [{}])\n assert_grid_iter_equals_getitem(empty)\n assert_raises(IndexError, lambda: empty[1])\n\n has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])\n assert_equal(len(has_empty), 4)\n assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])\n assert_grid_iter_equals_getitem(has_empty)\n\n\ndef test_grid_search():\n # Test that the best estimator contains the right value for foo_param\n clf = MockClassifier()\n grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)\n # make sure it selects the smallest parameter in case of ties\n old_stdout = sys.stdout\n sys.stdout = StringIO()\n grid_search.fit(X, y)\n sys.stdout = old_stdout\n assert_equal(grid_search.best_estimator_.foo_param, 2)\n\n for i, foo_i in enumerate([1, 2, 3]):\n assert_true(grid_search.grid_scores_[i][0]\n == {'foo_param': foo_i})\n # Smoke test the score etc:\n grid_search.score(X, y)\n grid_search.predict_proba(X)\n grid_search.decision_function(X)\n grid_search.transform(X)\n\n # Test exception handling on scoring\n grid_search.scoring = 'sklearn'\n assert_raises(ValueError, grid_search.fit, X, y)\n\n\n@ignore_warnings\ndef test_grid_search_no_score():\n # Test grid-search on classifier that has no score function.\n clf = LinearSVC(random_state=0)\n X, y = make_blobs(random_state=0, centers=2)\n Cs = [.1, 1, 10]\n clf_no_score = LinearSVCNoScore(random_state=0)\n grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')\n grid_search.fit(X, y)\n\n grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},\n scoring='accuracy')\n # smoketest grid search\n grid_search_no_score.fit(X, y)\n\n # check that best params are equal\n assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)\n # check that we can call score and that it gives the correct result\n assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))\n\n # giving no scoring function raises an error\n grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})\n assert_raise_message(TypeError, \"no scoring\", grid_search_no_score.fit,\n [[1]])\n\n\ndef test_grid_search_score_method():\n X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,\n random_state=0)\n clf = LinearSVC(random_state=0)\n grid = {'C': [.1]}\n\n search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)\n search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)\n search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,\n scoring='roc_auc').fit(X, y)\n search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)\n\n # Check warning only occurs in situation where behavior changed:\n # estimator requires score method to compete with scoring parameter\n score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)\n score_accuracy = assert_warns(ChangedBehaviorWarning,\n search_accuracy.score, X, y)\n score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,\n X, y)\n score_auc = assert_warns(ChangedBehaviorWarning,\n search_auc.score, X, y)\n # ensure the test is sane\n assert_true(score_auc < 1.0)\n assert_true(score_accuracy < 1.0)\n assert_not_equal(score_auc, score_accuracy)\n\n assert_almost_equal(score_accuracy, score_no_scoring)\n assert_almost_equal(score_auc, score_no_score_auc)\n\n\ndef test_grid_search_labels():\n # Check if ValueError (when labels is None) propagates to GridSearchCV\n # And also check if labels is correctly passed to the cv object\n rng = np.random.RandomState(0)\n\n X, y = make_classification(n_samples=15, n_classes=2, random_state=0)\n labels = rng.randint(0, 3, 15)\n\n clf = LinearSVC(random_state=0)\n grid = {'C': [1]}\n\n label_cvs = [LeaveOneLabelOut(), LeavePLabelOut(2), LabelKFold(),\n LabelShuffleSplit()]\n for cv in label_cvs:\n gs = GridSearchCV(clf, grid, cv=cv)\n assert_raise_message(ValueError,\n \"The labels parameter should not be None\",\n gs.fit, X, y)\n gs.fit(X, y, labels)\n\n non_label_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]\n for cv in non_label_cvs:\n gs = GridSearchCV(clf, grid, cv=cv)\n # Should not raise an error\n gs.fit(X, y)\n\n\ndef test_trivial_grid_scores():\n # Test search over a \"grid\" with only one point.\n # Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.\n clf = MockClassifier()\n grid_search = GridSearchCV(clf, {'foo_param': [1]})\n grid_search.fit(X, y)\n assert_true(hasattr(grid_search, \"grid_scores_\"))\n\n random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)\n random_search.fit(X, y)\n assert_true(hasattr(random_search, \"grid_scores_\"))\n\n\ndef test_no_refit():\n # Test that grid search can be used for model selection only\n clf = MockClassifier()\n grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)\n grid_search.fit(X, y)\n assert_true(hasattr(grid_search, \"best_params_\"))\n\n\ndef test_grid_search_error():\n # Test that grid search will capture errors on data with different\n # length\n X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)\n\n clf = LinearSVC()\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]})\n assert_raises(ValueError, cv.fit, X_[:180], y_)\n\n\ndef test_grid_search_iid():\n # test the iid parameter\n # noise-free simple 2d-data\n X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,\n cluster_std=0.1, shuffle=False, n_samples=80)\n # split dataset into two folds that are not iid\n # first one contains data of all 4 blobs, second only from two.\n mask = np.ones(X.shape[0], dtype=np.bool)\n mask[np.where(y == 1)[0][::2]] = 0\n mask[np.where(y == 2)[0][::2]] = 0\n # this leads to perfect classification on one fold and a score of 1/3 on\n # the other\n svm = SVC(kernel='linear')\n # create \"cv\" for splits\n cv = [[mask, ~mask], [~mask, mask]]\n # once with iid=True (default)\n grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)\n grid_search.fit(X, y)\n first = grid_search.grid_scores_[0]\n assert_equal(first.parameters['C'], 1)\n assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])\n # for first split, 1/4 of dataset is in test, for second 3/4.\n # take weighted average\n assert_almost_equal(first.mean_validation_score,\n 1 * 1. / 4. + 1. / 3. * 3. / 4.)\n\n # once with iid=False\n grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,\n iid=False)\n grid_search.fit(X, y)\n first = grid_search.grid_scores_[0]\n assert_equal(first.parameters['C'], 1)\n # scores are the same as above\n assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])\n # averaged score is just mean of scores\n assert_almost_equal(first.mean_validation_score,\n np.mean(first.cv_validation_scores))\n\n\ndef test_grid_search_one_grid_point():\n X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)\n param_dict = {\"C\": [1.0], \"kernel\": [\"rbf\"], \"gamma\": [0.1]}\n\n clf = SVC()\n cv = GridSearchCV(clf, param_dict)\n cv.fit(X_, y_)\n\n clf = SVC(C=1.0, kernel=\"rbf\", gamma=0.1)\n clf.fit(X_, y_)\n\n assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)\n\n\ndef test_grid_search_bad_param_grid():\n param_dict = {\"C\": 1.0}\n clf = SVC()\n assert_raises(ValueError, GridSearchCV, clf, param_dict)\n\n param_dict = {\"C\": []}\n clf = SVC()\n assert_raises(ValueError, GridSearchCV, clf, param_dict)\n\n param_dict = {\"C\": np.ones(6).reshape(3, 2)}\n clf = SVC()\n assert_raises(ValueError, GridSearchCV, clf, param_dict)\n\n\ndef test_grid_search_sparse():\n # Test that grid search works with both dense and sparse matrices\n X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)\n\n clf = LinearSVC()\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]})\n cv.fit(X_[:180], y_[:180])\n y_pred = cv.predict(X_[180:])\n C = cv.best_estimator_.C\n\n X_ = sp.csr_matrix(X_)\n clf = LinearSVC()\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]})\n cv.fit(X_[:180].tocoo(), y_[:180])\n y_pred2 = cv.predict(X_[180:])\n C2 = cv.best_estimator_.C\n\n assert_true(np.mean(y_pred == y_pred2) >= .9)\n assert_equal(C, C2)\n\n\ndef test_grid_search_sparse_scoring():\n X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)\n\n clf = LinearSVC()\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=\"f1\")\n cv.fit(X_[:180], y_[:180])\n y_pred = cv.predict(X_[180:])\n C = cv.best_estimator_.C\n\n X_ = sp.csr_matrix(X_)\n clf = LinearSVC()\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=\"f1\")\n cv.fit(X_[:180], y_[:180])\n y_pred2 = cv.predict(X_[180:])\n C2 = cv.best_estimator_.C\n\n assert_array_equal(y_pred, y_pred2)\n assert_equal(C, C2)\n # Smoke test the score\n # np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),\n # cv.score(X_[:180], y[:180]))\n\n # test loss where greater is worse\n def f1_loss(y_true_, y_pred_):\n return -f1_score(y_true_, y_pred_)\n F1Loss = make_scorer(f1_loss, greater_is_better=False)\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)\n cv.fit(X_[:180], y_[:180])\n y_pred3 = cv.predict(X_[180:])\n C3 = cv.best_estimator_.C\n\n assert_equal(C, C3)\n assert_array_equal(y_pred, y_pred3)\n\n\ndef test_grid_search_precomputed_kernel():\n # Test that grid search works when the input features are given in the\n # form of a precomputed kernel matrix\n X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)\n\n # compute the training kernel matrix corresponding to the linear kernel\n K_train = np.dot(X_[:180], X_[:180].T)\n y_train = y_[:180]\n\n clf = SVC(kernel='precomputed')\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]})\n cv.fit(K_train, y_train)\n\n assert_true(cv.best_score_ >= 0)\n\n # compute the test kernel matrix\n K_test = np.dot(X_[180:], X_[:180].T)\n y_test = y_[180:]\n\n y_pred = cv.predict(K_test)\n\n assert_true(np.mean(y_pred == y_test) >= 0)\n\n # test error is raised when the precomputed kernel is not array-like\n # or sparse\n assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)\n\n\ndef test_grid_search_precomputed_kernel_error_nonsquare():\n # Test that grid search returns an error with a non-square precomputed\n # training kernel matrix\n K_train = np.zeros((10, 20))\n y_train = np.ones((10, ))\n clf = SVC(kernel='precomputed')\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]})\n assert_raises(ValueError, cv.fit, K_train, y_train)\n\n\ndef test_grid_search_precomputed_kernel_error_kernel_function():\n # Test that grid search returns an error when using a kernel_function\n X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)\n kernel_function = lambda x1, x2: np.dot(x1, x2.T)\n clf = SVC(kernel=kernel_function)\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]})\n assert_raises(ValueError, cv.fit, X_, y_)\n\n\nclass BrokenClassifier(BaseEstimator):\n \"\"\"Broken classifier that cannot be fit twice\"\"\"\n\n def __init__(self, parameter=None):\n self.parameter = parameter\n\n def fit(self, X, y):\n assert_true(not hasattr(self, 'has_been_fit_'))\n self.has_been_fit_ = True\n\n def predict(self, X):\n return np.zeros(X.shape[0])\n\n\n@ignore_warnings\ndef test_refit():\n # Regression test for bug in refitting\n # Simulates re-fitting a broken estimator; this used to break with\n # sparse SVMs.\n X = np.arange(100).reshape(10, 10)\n y = np.array([0] * 5 + [1] * 5)\n\n clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],\n scoring=\"precision\", refit=True)\n clf.fit(X, y)\n\n\ndef test_gridsearch_nd():\n # Pass X as list in GridSearchCV\n X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)\n y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)\n check_X = lambda x: x.shape[1:] == (5, 3, 2)\n check_y = lambda x: x.shape[1:] == (7, 11)\n clf = CheckingClassifier(check_X=check_X, check_y=check_y)\n grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})\n grid_search.fit(X_4d, y_3d).score(X, y)\n assert_true(hasattr(grid_search, \"grid_scores_\"))\n\n\ndef test_X_as_list():\n # Pass X as list in GridSearchCV\n X = np.arange(100).reshape(10, 10)\n y = np.array([0] * 5 + [1] * 5)\n\n clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))\n cv = KFold(n_folds=3)\n grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)\n grid_search.fit(X.tolist(), y).score(X, y)\n assert_true(hasattr(grid_search, \"grid_scores_\"))\n\n\ndef test_y_as_list():\n # Pass y as list in GridSearchCV\n X = np.arange(100).reshape(10, 10)\n y = np.array([0] * 5 + [1] * 5)\n\n clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))\n cv = KFold(n_folds=3)\n grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)\n grid_search.fit(X, y.tolist()).score(X, y)\n assert_true(hasattr(grid_search, \"grid_scores_\"))\n\n\n@ignore_warnings\ndef test_pandas_input():\n # check cross_val_score doesn't destroy pandas dataframe\n types = [(MockDataFrame, MockDataFrame)]\n try:\n from pandas import Series, DataFrame\n types.append((DataFrame, Series))\n except ImportError:\n pass\n\n X = np.arange(100).reshape(10, 10)\n y = np.array([0] * 5 + [1] * 5)\n\n for InputFeatureType, TargetType in types:\n # X dataframe, y series\n X_df, y_ser = InputFeatureType(X), TargetType(y)\n check_df = lambda x: isinstance(x, InputFeatureType)\n check_series = lambda x: isinstance(x, TargetType)\n clf = CheckingClassifier(check_X=check_df, check_y=check_series)\n\n grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})\n grid_search.fit(X_df, y_ser).score(X_df, y_ser)\n grid_search.predict(X_df)\n assert_true(hasattr(grid_search, \"grid_scores_\"))\n\n\ndef test_unsupervised_grid_search():\n # test grid-search with unsupervised estimator\n X, y = make_blobs(random_state=0)\n km = KMeans(random_state=0)\n grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),\n scoring='adjusted_rand_score')\n grid_search.fit(X, y)\n # ARI can find the right number :)\n assert_equal(grid_search.best_params_[\"n_clusters\"], 3)\n\n # Now without a score, and without y\n grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))\n grid_search.fit(X)\n assert_equal(grid_search.best_params_[\"n_clusters\"], 4)\n\n\ndef test_gridsearch_no_predict():\n # test grid-search with an estimator without predict.\n # slight duplication of a test from KDE\n def custom_scoring(estimator, X):\n return 42 if estimator.bandwidth == .1 else 0\n X, _ = make_blobs(cluster_std=.1, random_state=1,\n centers=[[0, 1], [1, 0], [0, 0]])\n search = GridSearchCV(KernelDensity(),\n param_grid=dict(bandwidth=[.01, .1, 1]),\n scoring=custom_scoring)\n search.fit(X)\n assert_equal(search.best_params_['bandwidth'], .1)\n assert_equal(search.best_score_, 42)\n\n\ndef test_param_sampler():\n # test basic properties of param sampler\n param_distributions = {\"kernel\": [\"rbf\", \"linear\"],\n \"C\": uniform(0, 1)}\n sampler = ParameterSampler(param_distributions=param_distributions,\n n_iter=10, random_state=0)\n samples = [x for x in sampler]\n assert_equal(len(samples), 10)\n for sample in samples:\n assert_true(sample[\"kernel\"] in [\"rbf\", \"linear\"])\n assert_true(0 <= sample[\"C\"] <= 1)\n\n # test that repeated calls yield identical parameters\n param_distributions = {\"C\": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}\n sampler = ParameterSampler(param_distributions=param_distributions,\n n_iter=3, random_state=0)\n assert_equal([x for x in sampler], [x for x in sampler])\n\n if sp_version >= (0, 16):\n param_distributions = {\"C\": uniform(0, 1)}\n sampler = ParameterSampler(param_distributions=param_distributions,\n n_iter=10, random_state=0)\n assert_equal([x for x in sampler], [x for x in sampler])\n\n\ndef test_randomized_search_grid_scores():\n # Make a dataset with a lot of noise to get various kind of prediction\n # errors across CV folds and parameter settings\n X, y = make_classification(n_samples=200, n_features=100, n_informative=3,\n random_state=0)\n\n # XXX: as of today (scipy 0.12) it's not possible to set the random seed\n # of scipy.stats distributions: the assertions in this test should thus\n # not depend on the randomization\n params = dict(C=expon(scale=10),\n gamma=expon(scale=0.1))\n n_cv_iter = 3\n n_search_iter = 30\n search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,\n param_distributions=params, iid=False)\n search.fit(X, y)\n assert_equal(len(search.grid_scores_), n_search_iter)\n\n # Check consistency of the structure of each cv_score item\n for cv_score in search.grid_scores_:\n assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)\n # Because we set iid to False, the mean_validation score is the\n # mean of the fold mean scores instead of the aggregate sample-wise\n # mean score\n assert_almost_equal(np.mean(cv_score.cv_validation_scores),\n cv_score.mean_validation_score)\n assert_equal(list(sorted(cv_score.parameters.keys())),\n list(sorted(params.keys())))\n\n # Check the consistency with the best_score_ and best_params_ attributes\n sorted_grid_scores = list(sorted(search.grid_scores_,\n key=lambda x: x.mean_validation_score))\n best_score = sorted_grid_scores[-1].mean_validation_score\n assert_equal(search.best_score_, best_score)\n\n tied_best_params = [s.parameters for s in sorted_grid_scores\n if s.mean_validation_score == best_score]\n assert_true(search.best_params_ in tied_best_params,\n \"best_params_={0} is not part of the\"\n \" tied best models: {1}\".format(\n search.best_params_, tied_best_params))\n\n\ndef test_grid_search_score_consistency():\n # test that correct scores are used\n clf = LinearSVC(random_state=0)\n X, y = make_blobs(random_state=0, centers=2)\n Cs = [.1, 1, 10]\n for score in ['f1', 'roc_auc']:\n grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)\n grid_search.fit(X, y)\n cv = StratifiedKFold(n_folds=3)\n for C, scores in zip(Cs, grid_search.grid_scores_):\n clf.set_params(C=C)\n scores = scores[2] # get the separate runs from grid scores\n i = 0\n for train, test in cv.split(X, y):\n clf.fit(X[train], y[train])\n if score == \"f1\":\n correct_score = f1_score(y[test], clf.predict(X[test]))\n elif score == \"roc_auc\":\n dec = clf.decision_function(X[test])\n correct_score = roc_auc_score(y[test], dec)\n assert_almost_equal(correct_score, scores[i])\n i += 1\n\n\ndef test_pickle():\n # Test that a fit search can be pickled\n clf = MockClassifier()\n grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)\n grid_search.fit(X, y)\n pickle.dumps(grid_search) # smoke test\n\n random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},\n refit=True, n_iter=3)\n random_search.fit(X, y)\n pickle.dumps(random_search) # smoke test\n\n\ndef test_grid_search_with_multioutput_data():\n # Test search with multi-output estimator\n\n X, y = make_multilabel_classification(return_indicator=True,\n random_state=0)\n\n est_parameters = {\"max_depth\": [1, 2, 3, 4]}\n cv = KFold(random_state=0)\n\n estimators = [DecisionTreeRegressor(random_state=0),\n DecisionTreeClassifier(random_state=0)]\n\n # Test with grid search cv\n for est in estimators:\n grid_search = GridSearchCV(est, est_parameters, cv=cv)\n grid_search.fit(X, y)\n for parameters, _, cv_validation_scores in grid_search.grid_scores_:\n est.set_params(**parameters)\n\n for i, (train, test) in enumerate(cv.split(X, y)):\n est.fit(X[train], y[train])\n correct_score = est.score(X[test], y[test])\n assert_almost_equal(correct_score,\n cv_validation_scores[i])\n\n # Test with a randomized search\n for est in estimators:\n random_search = RandomizedSearchCV(est, est_parameters,\n cv=cv, n_iter=3)\n random_search.fit(X, y)\n for parameters, _, cv_validation_scores in random_search.grid_scores_:\n est.set_params(**parameters)\n\n for i, (train, test) in enumerate(cv.split(X, y)):\n est.fit(X[train], y[train])\n correct_score = est.score(X[test], y[test])\n assert_almost_equal(correct_score,\n cv_validation_scores[i])\n\n\ndef test_predict_proba_disabled():\n # Test predict_proba when disabled on estimator.\n X = np.arange(20).reshape(5, -1)\n y = [0, 0, 1, 1, 1]\n clf = SVC(probability=False)\n gs = GridSearchCV(clf, {}, cv=2).fit(X, y)\n assert_false(hasattr(gs, \"predict_proba\"))\n\n\ndef test_grid_search_allows_nans():\n # Test GridSearchCV with Imputer\n X = np.arange(20, dtype=np.float64).reshape(5, -1)\n X[2, :] = np.nan\n y = [0, 0, 1, 1, 1]\n p = Pipeline([\n ('imputer', Imputer(strategy='mean', missing_values='NaN')),\n ('classifier', MockClassifier()),\n ])\n GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)\n\n\nclass FailingClassifier(BaseEstimator):\n \"\"\"Classifier that raises a ValueError on fit()\"\"\"\n\n FAILING_PARAMETER = 2\n\n def __init__(self, parameter=None):\n self.parameter = parameter\n\n def fit(self, X, y=None):\n if self.parameter == FailingClassifier.FAILING_PARAMETER:\n raise ValueError(\"Failing classifier failed as required\")\n\n def predict(self, X):\n return np.zeros(X.shape[0])\n\n\ndef test_grid_search_failing_classifier():\n # GridSearchCV with on_error != 'raise'\n # Ensures that a warning is raised and score reset where appropriate.\n\n X, y = make_classification(n_samples=20, n_features=10, random_state=0)\n\n clf = FailingClassifier()\n\n # refit=False because we only want to check that errors caused by fits\n # to individual folds will be caught and warnings raised instead. If\n # refit was done, then an exception would be raised on refit and not\n # caught by grid_search (expected behavior), and this would cause an\n # error in this test.\n gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',\n refit=False, error_score=0.0)\n\n assert_warns(FitFailedWarning, gs.fit, X, y)\n\n # Ensure that grid scores were set to zero as required for those fits\n # that are expected to fail.\n assert all(np.all(this_point.cv_validation_scores == 0.0)\n for this_point in gs.grid_scores_\n if this_point.parameters['parameter'] ==\n FailingClassifier.FAILING_PARAMETER)\n\n gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',\n refit=False, error_score=float('nan'))\n assert_warns(FitFailedWarning, gs.fit, X, y)\n assert all(np.all(np.isnan(this_point.cv_validation_scores))\n for this_point in gs.grid_scores_\n if this_point.parameters['parameter'] ==\n FailingClassifier.FAILING_PARAMETER)\n\n\ndef test_grid_search_failing_classifier_raise():\n # GridSearchCV with on_error == 'raise' raises the error\n\n X, y = make_classification(n_samples=20, n_features=10, random_state=0)\n\n clf = FailingClassifier()\n\n # refit=False because we want to test the behaviour of the grid search part\n gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',\n refit=False, error_score='raise')\n\n # FailingClassifier issues a ValueError so this is what we look for.\n assert_raises(ValueError, gs.fit, X, y)\n\n\ndef test_parameters_sampler_replacement():\n # raise error if n_iter too large\n params = {'first': [0, 1], 'second': ['a', 'b', 'c']}\n sampler = ParameterSampler(params, n_iter=7)\n assert_raises(ValueError, list, sampler)\n # degenerates to GridSearchCV if n_iter the same as grid_size\n sampler = ParameterSampler(params, n_iter=6)\n samples = list(sampler)\n assert_equal(len(samples), 6)\n for values in ParameterGrid(params):\n assert_true(values in samples)\n\n # test sampling without replacement in a large grid\n params = {'a': range(10), 'b': range(10), 'c': range(10)}\n sampler = ParameterSampler(params, n_iter=99, random_state=42)\n samples = list(sampler)\n assert_equal(len(samples), 99)\n hashable_samples = [\"a%db%dc%d\" % (p['a'], p['b'], p['c'])\n for p in samples]\n assert_equal(len(set(hashable_samples)), 99)\n\n # doesn't go into infinite loops\n params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}\n sampler = ParameterSampler(params_distribution, n_iter=7)\n samples = list(sampler)\n assert_equal(len(samples), 7)\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.scatter",
"numpy.linspace",
"matplotlib.pyplot.plot",
"sklearn.svm.SVC",
"matplotlib.pyplot.axis",
"numpy.random.RandomState",
"matplotlib.pyplot.show"
],
[
"sklearn.utils.testing.SkipTest",
"scipy.misc.imresize",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.yticks",
"sklearn.feature_extraction.image.img_to_graph",
"matplotlib.pyplot.title",
"scipy.face",
"scipy.misc.face",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"sklearn.cluster.spectral_clustering",
"matplotlib.pyplot.figure"
],
[
"sklearn.feature_selection.f_regression",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"sklearn.feature_selection.mutual_info_regression",
"numpy.sin",
"numpy.max",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplot",
"numpy.random.randn",
"numpy.random.rand",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.linspace",
"sklearn.linear_model.LassoLars",
"sklearn.linear_model.Lasso",
"sklearn.datasets.samples_generator.make_regression",
"numpy.sum"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.axvline",
"sklearn.model_selection.cross_val_score",
"matplotlib.pyplot.title",
"sklearn.covariance.LedoitWolf",
"numpy.logspace",
"numpy.arange",
"sklearn.covariance.ShrunkCovariance",
"matplotlib.pyplot.plot",
"sklearn.decomposition.FactorAnalysis",
"matplotlib.pyplot.ylabel",
"numpy.argmax",
"matplotlib.pyplot.xlabel",
"numpy.random.RandomState",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.diag",
"sklearn.covariance.GraphLassoCV",
"matplotlib.pyplot.scatter",
"numpy.abs",
"sklearn.manifold.LocallyLinearEmbedding",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.clf",
"matplotlib.finance.quotes_historical_yahoo",
"sklearn.cluster.affinity_propagation",
"matplotlib.pyplot.axis",
"numpy.triu",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.where",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.imshow",
"scipy.ndimage.binary_erosion",
"numpy.linspace",
"numpy.random.randn",
"numpy.hstack",
"scipy.sparse.coo_matrix",
"numpy.arange",
"sklearn.linear_model.Lasso",
"numpy.sin",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.axis",
"numpy.ravel",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"sklearn.linear_model.Ridge",
"numpy.floor",
"matplotlib.pyplot.show",
"numpy.random.RandomState",
"numpy.logical_and",
"scipy.ndimage.gaussian_filter",
"numpy.cos"
],
[
"matplotlib.pyplot.legend",
"numpy.hstack",
"matplotlib.pyplot.axvline",
"sklearn.naive_bayes.GaussianNB",
"sklearn.linear_model.LogisticRegression",
"sklearn.ensemble.RandomForestClassifier",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.subplots",
"sklearn.ensemble.VotingClassifier",
"numpy.array",
"matplotlib.pyplot.show"
],
[
"matplotlib.pyplot.legend",
"sklearn.model_selection.cross_val_score",
"numpy.random.seed",
"numpy.linspace",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylim",
"numpy.cos",
"sklearn.preprocessing.PolynomialFeatures",
"sklearn.pipeline.Pipeline",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.setp",
"numpy.random.rand",
"sklearn.linear_model.LinearRegression",
"numpy.random.randn",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.diag",
"numpy.dot",
"sklearn.linear_model.ridge._RidgeGCV",
"sklearn.utils.testing.assert_array_almost_equal",
"numpy.sqrt",
"sklearn.utils.testing.assert_almost_equal",
"sklearn.linear_model.ridge.RidgeClassifier",
"sklearn.datasets.load_diabetes",
"sklearn.utils.testing.assert_raises",
"sklearn.model_selection.KFold",
"numpy.concatenate",
"sklearn.metrics.mean_squared_error",
"sklearn.utils.testing.assert_true",
"numpy.mean",
"sklearn.utils.testing.assert_warns",
"sklearn.linear_model.base.LinearRegression",
"sklearn.linear_model.ridge._solve_cholesky_kernel",
"numpy.unique",
"numpy.arange",
"numpy.eye",
"sklearn.linear_model.ridge.RidgeClassifierCV",
"sklearn.linear_model.ridge.Ridge",
"sklearn.utils.testing.assert_raise_message",
"sklearn.linear_model.ridge._solve_cholesky",
"sklearn.datasets.load_iris",
"scipy.sparse.csr_matrix",
"sklearn.datasets.make_multilabel_classification",
"sklearn.metrics.make_scorer",
"sklearn.metrics.get_scorer",
"numpy.array",
"numpy.random.RandomState",
"sklearn.utils.testing.assert_equal",
"sklearn.linear_model.ridge.RidgeCV",
"numpy.tile",
"numpy.ones",
"numpy.testing.assert_array_equal",
"sklearn.datasets.make_regression",
"sklearn.linear_model.ridge.ridge_regression",
"sklearn.utils.check_random_state",
"numpy.vstack"
],
[
"matplotlib.pyplot.scatter",
"numpy.random.seed",
"numpy.linspace",
"matplotlib.pyplot.plot",
"numpy.random.randn",
"sklearn.svm.SVC",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show"
],
[
"numpy.logical_xor",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.yticks",
"numpy.linspace",
"matplotlib.pyplot.scatter",
"sklearn.gaussian_process.kernels.RBF",
"sklearn.gaussian_process.kernels.DotProduct",
"sklearn.gaussian_process.GaussianProcessClassifier",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.contour",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xticks",
"numpy.random.RandomState",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.abs",
"numpy.median",
"numpy.mean",
"numpy.any"
],
[
"scipy.sparse.issparse",
"numpy.unique",
"numpy.asarray",
"numpy.diff",
"numpy.insert",
"numpy.sum"
],
[
"numpy.dot",
"sklearn.metrics.roc_auc_score",
"sklearn.utils.testing.assert_array_almost_equal",
"sklearn.datasets.make_classification",
"sklearn.utils.testing.assert_almost_equal",
"sklearn.cluster.KMeans",
"sklearn.utils.testing.assert_raises",
"sklearn.externals.six.moves.cStringIO",
"sklearn.model_selection.KFold",
"numpy.all",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.utils.testing.assert_true",
"numpy.mean",
"sklearn.svm.LinearSVC",
"sklearn.utils.testing.assert_warns",
"sklearn.metrics.f1_score",
"numpy.where",
"sklearn.datasets.make_blobs",
"sklearn.externals.six.moves.zip",
"sklearn.model_selection.LeaveOneLabelOut",
"numpy.arange",
"sklearn.utils.testing.assert_no_warnings",
"sklearn.externals.six.moves.xrange",
"sklearn.model_selection.StratifiedKFold",
"sklearn.preprocessing.Imputer",
"sklearn.model_selection.StratifiedShuffleSplit",
"numpy.zeros",
"sklearn.utils.testing.assert_raise_message",
"sklearn.model_selection.LabelShuffleSplit",
"sklearn.model_selection.LeavePLabelOut",
"numpy.isnan",
"scipy.sparse.csr_matrix",
"sklearn.model_selection.ParameterGrid",
"sklearn.datasets.make_multilabel_classification",
"sklearn.metrics.make_scorer",
"sklearn.svm.SVC",
"sklearn.utils.mocking.CheckingClassifier",
"sklearn.utils.testing.assert_array_equal",
"sklearn.neighbors.KernelDensity",
"scipy.stats.uniform",
"numpy.random.RandomState",
"numpy.array",
"sklearn.utils.testing.assert_equal",
"sklearn.model_selection.GridSearchCV",
"sklearn.tree.DecisionTreeRegressor",
"sklearn.model_selection.RandomizedSearchCV",
"sklearn.model_selection.ParameterSampler",
"sklearn.model_selection.LabelKFold",
"scipy.stats.expon",
"numpy.ones",
"sklearn.utils.testing.assert_not_equal",
"scipy.stats.bernoulli"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
josehoras/Advanced-Lane-Finding | [
"e6b83d602eb89661d3bf0f4d257ed5af0f6a58bb"
] | [
"video_pipeline.py"
] | [
"import numpy as np\nimport pickle\nimport cv2\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom moviepy.editor import VideoFileClip\nfrom image_thresholding import *\nfrom plotting_helpers import *\nfrom line_fit import *\nfrom Line import *\n\n\n# *** PIPELINE ***\ndef pipeline(img):\n global error_im, skipped_frames\n\n # 1. Correct distorsion\n # open distorsion matrix\n try:\n saved_dist = pickle.load(open('calibrate_camera.p', 'rb'), encoding='latin1')\n mtx = saved_dist['mtx']\n dist = saved_dist['dist']\n except (OSError, IOError): # No progress file yet available\n print(\"No saved distorsion data. Run camera_calibration.py\")\n # apply correction\n undist = cv2.undistort(img, mtx, dist, None, mtx)\n\n # 2. Apply filters to get binary map\n ksize = 3\n gradx = abs_sobel_thresh(undist, orient='x', sobel_kernel=ksize, thresh=(10, 100))\n grady = abs_sobel_thresh(undist, orient='y', sobel_kernel=ksize, thresh=(5, 100))\n mag_bin = mag_thresh(undist, sobel_kernel=ksize, mag_thresh=(10, 200))\n dir_bin = dir_threshold(undist, sobel_kernel=15, thresh=(0.9, 1.2))\n hls_bin = hls_select(img, thresh=(50, 255))\n white_bin = white_select(img, thresh=195)\n yellow_bin = yellow_select(img)\n # combine filters to a final output\n combined = np.zeros_like(dir_bin)\n combined[((mag_bin == 1) & (dir_bin == 1) & (hls_bin == 1)) |\n ((white_bin == 1) | (yellow_bin == 1))] = 1\n\n # 3. Define trapezoid points on the road and transform perspective\n X = combined.shape[1]\n Y = combined.shape[0]\n src = np.float32(\n [[205, 720],\n [1075, 720],\n [700, 460],\n [580, 460]])\n dst = np.float32(\n [[300, 720],\n [980, 720],\n [980, 0],\n [300, 0]])\n # get perspective transformation matrix\n M = cv2.getPerspectiveTransform(src, dst)\n Minv = cv2.getPerspectiveTransform(dst, src)\n # warp the result of binary thresholds\n warped = cv2.warpPerspective(combined, M, (X,Y), flags=cv2.INTER_LINEAR)\n\n # 4. Get polinomial fit of lines\n # if > 4 frames skipped (or first frame, as skipped_frames is initialized to 100) do full search\n if skipped_frames > 5:\n fit_method = \"Boxes\"\n leftx, lefty, rightx, righty, out_img = find_lane_pixels(warped)\n else:\n fit_method = \"Around fit\"\n leftx, lefty, rightx, righty, out_img = find_lane_around_fit(warped, left_lane.fit_x, right_lane.fit_x)\n\n # fit polynomials and sanity check\n try:\n left_fit, right_fit, left_px, right_px, ploty = fit(leftx, lefty, rightx, righty, warped.shape[0])\n detected, err_msg = sanity_chk(ploty, left_px, right_px)\n except:\n detected, err_msg = False, \"Empty data\"\n\n if detected: skipped_frames = 0\n else: skipped_frames += 1\n\n # 5. Calculate distance to center, curvature, and update Line objects\n if detected or (fit_method == \"Boxes\" and err_msg != \"Empty data\"):\n left_curv, right_curv = find_curv(ploty, left_fit, right_fit)\n left_lane.update(ploty, left_fit, left_px, left_curv)\n right_lane.update(ploty, right_fit, right_px, right_curv)\n lane_w = (right_lane.base_pos - left_lane.base_pos) * 3.7/700\n offset = (((right_lane.base_pos + left_lane.base_pos) - img.shape[1]) / 2) * 3.7/700\n\n # 6. Plot fitted lanes into original image\n # Create an image to draw the lines on\n warp_zero = np.zeros_like(warped).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n\n # Recast the x and y points into usable format for cv2.fillPoly()\n pts_left = np.array([np.transpose(np.vstack([left_lane.fit_x, left_lane.fit_y]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_lane.fit_x, right_lane.fit_y])))])\n pts = np.hstack((pts_left, pts_right))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))\n\n # Warp the blank back to original image space using inverse perspective matrix (Minv)\n newwarp = cv2.warpPerspective(color_warp, Minv, (img.shape[1], img.shape[0]))\n\n # Combine the result with the original image\n result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)\n\n # if error save original img to check closely in image pipeline\n if 1 < skipped_frames < 3:\n mpimg.imsave(err_msg + \"_\" + str(error_im) + \".jpg\", img)\n error_im += 1\n\n # Add text\n road_curv = (left_lane.curv_avg + right_lane.curv_avg) // 2\n if road_curv > 2000:\n road_curv_text = \"Road curvature: straight\"\n else:\n road_curv_text = \"Road curvature: \" + str(road_curv) + \"m\"\n side = {True: \"left\", False: \"right\"}\n offset_txt = \"Car is {0:.2f}m {1:s} of center\".format(offset, side[offset > 0])\n\n for i, txt in enumerate([road_curv_text, offset_txt]):\n cv2.putText(result, txt, (75, 75 * (i+1)), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 3)\n\n # Uncomment for debugging messages\n # lane_width_txt = \"Lane width: %.2f m\" % lane_w\n # for i, obj, txt in [(1, left_lane, \"Left\"), (2, right_lane, \"Right\")]:\n # if obj.curv_avg > 2000:\n # curv_txt = txt + \" curvature: straight\"\n # else:\n # curv_txt = txt + \" curvature: \" + str(int(obj.curv_avg)) + \"m\"\n # cv2.putText(result,curv_txt, (550, 50 * i), cv2.FONT_HERSHEY_SIMPLEX, 1, 0, 2)\n # cv2.putText(result, \"Skipped frames: \" + str(skipped_frames), (550,150), cv2.FONT_HERSHEY_SIMPLEX, 1, 0, 2)\n # cv2.putText(result, fit_method, (550, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, 0, 2)\n # if err_msg != \"\":\n # cv2.putText(result, \"Error!: \" + err_msg, (550, 250), cv2.FONT_HERSHEY_SIMPLEX, 1, 0, 2)\n\n return result\n\n\n# *** MAIN ***\n# define global variables to use in the pipeline\nleft_lane = Line()\nright_lane = Line()\nerror_im = 1\nskipped_frames = 100\n# load video\nclip_name = \"challenge_video\"\nclip1 = VideoFileClip(clip_name + \".mp4\")#.subclip(0, 8)\n# run video through the pipeline and save output\nout_clip = clip1.fl_image(pipeline)\nout_clip.write_videofile(\"output_videos/\" + clip_name + \"_output.mp4\", audio=False)\n"
] | [
[
"numpy.hstack",
"numpy.dstack",
"numpy.int_",
"numpy.zeros_like",
"numpy.float32",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
EdwardFerdian/4DFlowNet | [
"e9c8bf72660b41ef5c7b6c677a71283ead32bbab",
"e9c8bf72660b41ef5c7b6c677a71283ead32bbab"
] | [
"src/Network/SR4DFlowNet.py",
"src/Network/loss_utils.py"
] | [
"import tensorflow as tf\n\nclass SR4DFlowNet():\n def __init__(self, res_increase):\n self.res_increase = res_increase\n\n def build_network(self, u, v, w, u_mag, v_mag, w_mag, low_resblock=8, hi_resblock=4, channel_nr=64):\n channel_nr = 64\n\n speed = (u ** 2 + v ** 2 + w ** 2) ** 0.5\n mag = (u_mag ** 2 + v_mag ** 2 + w_mag ** 2) ** 0.5\n pcmr = mag * speed\n\n phase = tf.keras.layers.concatenate([u,v,w])\n pc = tf.keras.layers.concatenate([pcmr, mag, speed])\n \n pc = conv3d(pc,3,channel_nr, 'SYMMETRIC', 'relu')\n pc = conv3d(pc,3,channel_nr, 'SYMMETRIC', 'relu')\n\n phase = conv3d(phase,3,channel_nr, 'SYMMETRIC', 'relu')\n phase = conv3d(phase,3,channel_nr, 'SYMMETRIC', 'relu')\n\n concat_layer = tf.keras.layers.concatenate([phase, pc])\n concat_layer = conv3d(concat_layer, 1, channel_nr, 'SYMMETRIC', 'relu')\n concat_layer = conv3d(concat_layer, 3, channel_nr, 'SYMMETRIC', 'relu')\n \n # res blocks\n rb = concat_layer\n for i in range(low_resblock):\n rb = resnet_block(rb, \"ResBlock\", channel_nr, pad='SYMMETRIC')\n\n rb = upsample3d(rb, self.res_increase)\n \n # refinement in HR\n for i in range(hi_resblock):\n rb = resnet_block(rb, \"ResBlock\", channel_nr, pad='SYMMETRIC')\n\n # 3 separate path version\n u_path = conv3d(rb, 3, channel_nr, 'SYMMETRIC', 'relu')\n u_path = conv3d(u_path, 3, 1, 'SYMMETRIC', None)\n\n v_path = conv3d(rb, 3, channel_nr, 'SYMMETRIC', 'relu')\n v_path = conv3d(v_path, 3, 1, 'SYMMETRIC', None)\n\n w_path = conv3d(rb, 3, channel_nr, 'SYMMETRIC', 'relu')\n w_path = conv3d(w_path, 3, 1, 'SYMMETRIC', None)\n \n\n b_out = tf.keras.layers.concatenate([u_path, v_path, w_path])\n\n return b_out\n\ndef upsample3d(input_tensor, res_increase):\n \"\"\"\n Resize the image by linearly interpolating the input\n using TF '``'resize_bilinear' function.\n\n :param input_tensor: 2D/3D image tensor, with shape:\n 'batch, X, Y, Z, Channels'\n :return: interpolated volume\n\n Original source: https://niftynet.readthedocs.io/en/dev/_modules/niftynet/layer/linear_resize.html\n \"\"\"\n \n # We need this option for the bilinear resize to prevent shifting bug\n align = True \n\n b_size, x_size, y_size, z_size, c_size = input_tensor.shape\n\n x_size_new, y_size_new, z_size_new = x_size * res_increase, y_size * res_increase, z_size * res_increase\n\n if res_increase == 1:\n # already in the target shape\n return input_tensor\n\n # resize y-z\n squeeze_b_x = tf.reshape(input_tensor, [-1, y_size, z_size, c_size], name='reshape_bx')\n resize_b_x = tf.compat.v1.image.resize_bilinear(squeeze_b_x, [y_size_new, z_size_new], align_corners=align)\n resume_b_x = tf.reshape(resize_b_x, [-1, x_size, y_size_new, z_size_new, c_size], name='resume_bx')\n\n # Reorient\n reoriented = tf.transpose(resume_b_x, [0, 3, 2, 1, 4])\n \n # squeeze and 2d resize\n squeeze_b_z = tf.reshape(reoriented, [-1, y_size_new, x_size, c_size], name='reshape_bz')\n resize_b_z = tf.compat.v1.image.resize_bilinear(squeeze_b_z, [y_size_new, x_size_new], align_corners=align)\n resume_b_z = tf.reshape(resize_b_z, [-1, z_size_new, y_size_new, x_size_new, c_size], name='resume_bz')\n \n output_tensor = tf.transpose(resume_b_z, [0, 3, 2, 1, 4])\n return output_tensor\n\n\ndef conv3d(x, kernel_size, filters, padding='SYMMETRIC', activation=None, initialization=None, use_bias=True):\n \"\"\"\n Based on: https://github.com/gitlimlab/CycleGAN-Tensorflow/blob/master/ops.py\n For tf padding, refer to: https://www.tensorflow.org/api_docs/python/tf/pad\n\n \"\"\"\n reg_l2 = tf.keras.regularizers.l2(5e-7)\n\n if padding == 'SYMMETRIC' or padding == 'REFLECT':\n p = (kernel_size - 1) // 2\n x = tf.pad(x, [[0,0],[p,p],[p,p], [p,p],[0,0]], padding)\n x = tf.keras.layers.Conv3D(filters, kernel_size, activation=activation, kernel_initializer=initialization, use_bias=use_bias, kernel_regularizer=reg_l2)(x)\n else:\n assert padding in ['SAME', 'VALID']\n x = tf.keras.layers.Conv3D(filters, kernel_size, activation=activation, kernel_initializer=initialization, use_bias=use_bias, kernel_regularizer=reg_l2)(x)\n return x\n \n\ndef resnet_block(x, block_name='ResBlock', channel_nr=64, scale = 1, pad='SAME'):\n tmp = conv3d(x, kernel_size=3, filters=channel_nr, padding=pad, activation=None, use_bias=False, initialization=None)\n tmp = tf.keras.layers.LeakyReLU(alpha=0.2)(tmp)\n\n tmp = conv3d(tmp, kernel_size=3, filters=channel_nr, padding=pad, activation=None, use_bias=False, initialization=None)\n\n tmp = x + tmp * scale\n tmp = tf.keras.layers.LeakyReLU(alpha=0.2)(tmp)\n\n return tmp\n",
"import tensorflow as tf\nimport numpy as np\n\ndef create_divergence_kernels():\n \"\"\"\n Create kernels in 3 different direction to calculate central differences\n The kernels will filter out the x, y, z direction vector\n Representing the gradients for each direction\n \"\"\"\n kernel_x = np.zeros((3,3,3), dtype='float32')\n kernel_x[0,1,1] = 1 # filter x\n kernel_x[2,1,1] = -1 # filter x\n filter_x = tf.constant(kernel_x, dtype='float32')\n filter_x = tf.reshape(filter_x, [3, 3, 3, 1, 1])\n\n kernel_y = np.zeros((3,3,3), dtype='float32')\n kernel_y[1,0,1] = 1 # filter y\n kernel_y[1,2,1] = -1 # filter y\n filter_y = tf.constant(kernel_y, dtype='float32')\n filter_y = tf.reshape(filter_y, [3, 3, 3, 1, 1])\n\n kernel_z = np.zeros((3,3,3), dtype='float32')\n kernel_z[1,1,0] = 1 # filter z\n kernel_z[1,1,2] = -1 # filter z\n filter_z = tf.constant(kernel_z, dtype='float32')\n filter_z = tf.reshape(filter_z, [3, 3, 3, 1, 1])\n\n return (filter_x, filter_y, filter_z) \n\ndef calculate_gradient(image, kernel):\n \"\"\"\n Calculate the gradient (edge) of an image using a predetermined kernel\n \"\"\"\n # make sure it has 5 dimensions\n image = tf.expand_dims(image, 4)\n\n kernel_size = 3\n p = (kernel_size - 1) // 2\n image = tf.pad(image, [[0,0],[p,p],[p,p], [p,p],[0,0]], 'SYMMETRIC')\n\n conv = tf.nn.conv3d(image, kernel, strides=[1,1,1,1,1], padding='VALID')\n\n # remove the extra dimension\n conv = tf.squeeze(conv, 4)\n return conv\n\ndef calculate_divergence(u, v, w):\n \"\"\"\n Calculate divergence for the corresponding velocity component\n \"\"\"\n kernels = create_divergence_kernels()\n dudx = calculate_gradient(u, kernels[0])\n dvdy = calculate_gradient(v, kernels[1])\n dwdz = calculate_gradient(w, kernels[2])\n\n return (dudx, dvdy, dwdz)\n\ndef calculate_divergence_loss2(u, v, w, u_pred, v_pred, w_pred):\n (divpx, divpy, divpz) = calculate_divergence(u_pred, v_pred, w_pred)\n (divx, divy, divz) = calculate_divergence(u, v, w)\n \n return (divpx - divx) ** 2 + (divpy - divy) ** 2 + (divpz - divz) ** 2\n\ndef calculate_relative_error(u_pred, v_pred, w_pred, u_hi, v_hi, w_hi, binary_mask):\n # if epsilon is set to 0, we will get nan and inf\n epsilon = 1e-5\n\n u_diff = tf.square(u_pred - u_hi)\n v_diff = tf.square(v_pred - v_hi)\n w_diff = tf.square(w_pred - w_hi)\n\n diff_speed = tf.sqrt(u_diff + v_diff + w_diff)\n actual_speed = tf.sqrt(tf.square(u_hi) + tf.square(v_hi) + tf.square(w_hi)) \n\n # actual speed can be 0, resulting in inf\n relative_speed_loss = diff_speed / (actual_speed + epsilon)\n \n # Make sure the range is between 0 and 1\n relative_speed_loss = tf.clip_by_value(relative_speed_loss, 0., 1.)\n\n # Apply correction, only use the diff speed if actual speed is zero\n condition = tf.not_equal(actual_speed, tf.constant(0.))\n corrected_speed_loss = tf.where(condition, relative_speed_loss, diff_speed)\n\n multiplier = 1e4 # round it so we don't get any infinitesimal number\n corrected_speed_loss = tf.round(corrected_speed_loss * multiplier) / multiplier\n # print(corrected_speed_loss)\n \n # Apply mask\n # binary_mask_condition = (mask > threshold)\n binary_mask_condition = tf.equal(binary_mask, 1.0) \n corrected_speed_loss = tf.where(binary_mask_condition, corrected_speed_loss, tf.zeros_like(corrected_speed_loss))\n # print(found_indexes)\n\n # Calculate the mean from the total non zero accuracy, divided by the masked area\n # reduce first to the 'batch' axis\n mean_err = tf.reduce_sum(corrected_speed_loss, axis=[1,2,3]) / (tf.reduce_sum(binary_mask, axis=[1,2,3]) + 1) \n\n # now take the actual mean\n # mean_err = tf.reduce_mean(mean_err) * 100 # in percentage\n mean_err = mean_err * 100\n\n return mean_err"
] | [
[
"tensorflow.transpose",
"tensorflow.keras.layers.LeakyReLU",
"tensorflow.keras.regularizers.l2",
"tensorflow.reshape",
"tensorflow.keras.layers.Conv3D",
"tensorflow.keras.layers.concatenate",
"tensorflow.compat.v1.image.resize_bilinear",
"tensorflow.pad"
],
[
"tensorflow.clip_by_value",
"tensorflow.constant",
"tensorflow.reduce_sum",
"tensorflow.reshape",
"tensorflow.equal",
"tensorflow.expand_dims",
"tensorflow.squeeze",
"tensorflow.zeros_like",
"tensorflow.nn.conv3d",
"tensorflow.pad",
"tensorflow.square",
"tensorflow.where",
"tensorflow.sqrt",
"tensorflow.round",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
kingmoon3/xalpha | [
"dd877c6bce1b85a4facd38de9dc35a7bf0acf1c6"
] | [
"xalpha/universal.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nmodules for universal fetcher that gives historical daily data and realtime data\nfor almost everything in the market\n\"\"\"\n\nimport os\nimport sys\nimport time\nimport datetime as dt\nimport numpy as np\nimport pandas as pd\nimport logging\nimport inspect\nfrom bs4 import BeautifulSoup\nfrom functools import wraps, lru_cache\nfrom uuid import uuid4\nfrom sqlalchemy import exc\nfrom dateutil.relativedelta import relativedelta\n\ntry:\n from jqdatasdk import (\n get_index_weights,\n query,\n get_fundamentals,\n valuation,\n get_query_count,\n finance,\n get_index_stocks,\n macro,\n get_price,\n )\n\n # 本地导入\nexcept ImportError:\n try:\n from jqdata import finance, macro # 云平台导入\n except ImportError:\n pass\n\nfrom xalpha.info import basicinfo, fundinfo, mfundinfo, get_fund_holdings\nfrom xalpha.indicator import indicator\nfrom xalpha.cons import (\n rget,\n rpost,\n rget_json,\n rpost_json,\n tz_bj,\n last_onday,\n region_trans,\n today_obj,\n _float,\n)\nfrom xalpha.provider import data_source\nfrom xalpha.exceptions import DataPossiblyWrong, ParserFailure\n\npd.options.mode.chained_assignment = None # turn off setwith copy warning\nthismodule = sys.modules[__name__]\nxamodule = sys.modules[\"xalpha\"]\nlogger = logging.getLogger(__name__)\n\n\ndef tomorrow_ts():\n dto = dt.datetime.now() + dt.timedelta(1)\n return dto.timestamp()\n\n\ndef has_weekday(start, end):\n for d in pd.date_range(start, end):\n if d.weekday() < 5:\n return True\n return False\n\n\ndef ts2pdts(ts):\n dto = dt.datetime.fromtimestamp(ts / 1000, tz=tz_bj).replace(tzinfo=None)\n return dto.replace(\n hour=0, minute=0, second=0, microsecond=0\n ) # 雪球美股数据时间戳是美国0点,按北京时区换回时间后,把时分秒扔掉就重合了\n\n\ndef decouple_code(code):\n \"\"\"\n decompose SH600000.A into SH600000, after\n\n :param code:\n :return: Tuple\n \"\"\"\n if len(code[1:].split(\".\")) > 1: # .SPI in US stock!\n type_ = code.split(\".\")[-1]\n code = \".\".join(code.split(\".\")[:-1])\n if type_.startswith(\"b\") or type_.startswith(\"B\"):\n type_ = \"before\"\n elif type_.startswith(\"a\") or type_.startswith(\"A\"):\n type_ = \"after\"\n elif type_.startswith(\"n\") or type_.startswith(\"N\"):\n type_ = \"normal\"\n else:\n logger.warning(\n \"unrecoginzed flag for adjusted factor %s, use default\" % type_\n )\n type_ = \"before\"\n else:\n type_ = \"before\"\n return code, type_\n\n\ndef lru_cache_time(ttl=None, maxsize=None):\n \"\"\"\n TTL support on lru_cache\n\n :param ttl: float or int, seconds\n :param maxsize: int, maxsize for lru_cache\n :return:\n \"\"\"\n\n def wrapper(func):\n # Lazy function that makes sure the lru_cache() invalidate after X secs\n @lru_cache(maxsize)\n def time_aware(_ttl, *args, **kwargs):\n return func(*args, **kwargs)\n\n setattr(thismodule, func.__name__ + \"_ttl\", time_aware)\n\n @wraps(func)\n def newfunc(*args, **kwargs):\n ttl_hash = round(time.time() / ttl)\n f_ttl = getattr(thismodule, func.__name__ + \"_ttl\")\n return f_ttl(ttl_hash, *args, **kwargs)\n\n return newfunc\n\n return wrapper\n\n\n# TODO: 缓存 token 的合适时间尺度\n@lru_cache_time(ttl=300)\ndef get_token():\n \"\"\"\n 获取雪球的验权 token,匿名也可获取,而且似乎永远恒定(大时间范围内会改变)\n\n :return:\n \"\"\"\n r = rget(\"https://xueqiu.com\", headers={\"user-agent\": \"Mozilla\"})\n return r.cookies[\"xq_a_token\"]\n\n\ndef get_historical_fromxq(code, count, type_=\"before\", full=False):\n \"\"\"\n\n :param code:\n :param count:\n :param type_: str. normal, before, after\n :param full:\n :return:\n \"\"\"\n url = \"https://stock.xueqiu.com/v5/stock/chart/kline.json?symbol={code}&begin={tomorrow}&period=day&type={type_}&count=-{count}\"\n if full:\n url += \"&indicator=kline,pe,pb,ps,pcf,market_capital,agt,ggt,balance\"\n # pe 是 TTM 数据\n r = rget_json(\n url.format(\n code=code, tomorrow=int(tomorrow_ts() * 1000), count=count, type_=type_\n ),\n cookies={\"xq_a_token\": get_token()},\n headers={\"user-agent\": \"Mozilla/5.0\"},\n )\n df = pd.DataFrame(data=r[\"data\"][\"item\"], columns=r[\"data\"][\"column\"])\n df[\"date\"] = (df[\"timestamp\"]).apply(ts2pdts) # reset hours to zero\n return df\n\n\n@lru_cache()\ndef get_industry_fromxq(code):\n \"\"\"\n part of symbols has empty industry information\n\n :param code:\n :return: dict\n \"\"\"\n url = (\n \"https://xueqiu.com/stock/industry/stockList.json?code=%s&type=1&size=100\"\n % code\n )\n r = rget_json(url, cookies={\"xq_a_token\": get_token()})\n return r\n\n\ndef get_historical_fromcninvesting(curr_id, st_date, end_date, app=False):\n data = {\n \"curr_id\": curr_id,\n # \"smlID\": smlID, # ? but seems to be fixed with curr_id, it turns out it doesn't matter\n \"st_date\": st_date, # %Y/%m/%d\n \"end_date\": end_date,\n \"interval_sec\": \"Daily\",\n \"sort_col\": \"date\",\n \"sort_ord\": \"DESC\",\n \"action\": \"historical_data\",\n }\n if not app: # fetch from web api\n r = rpost(\n \"https://cn.investing.com/instruments/HistoricalDataAjax\",\n data=data,\n headers={\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4)\\\n AppleWebKit/537.36 (KHTML, like Gecko)\",\n \"Host\": \"cn.investing.com\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n },\n )\n else: # fetch from app api\n r = rpost(\n \"https://cnappapi.investing.com/instruments/HistoricalDataAjax\",\n data=data,\n headers={\n \"Accept\": \"*/*\",\n \"Accept-Encoding\": \"gzip\",\n \"Accept-Language\": \"zh-cn\",\n \"Cache-Control\": \"no-cache\",\n \"Connection\": \"keep-alive\",\n \"User-Agent\": \"Investing.China/0.0.3 CFNetwork/1121.2.2 Darwin/19.3.0\",\n \"ccode\": \"CN\",\n #'ccode_time': '1585551041.986028',\n \"x-app-ver\": \"117\",\n \"x-meta-ver\": \"14\",\n \"x-os\": \"ios\",\n \"x-uuid\": str(uuid4()),\n \"Host\": \"cn.investing.com\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n },\n )\n s = BeautifulSoup(r.text, \"lxml\")\n dfdict = {}\n cols = []\n for col in s.find_all(\"th\"):\n dfdict[str(col.contents[0])] = []\n cols.append(str(col.contents[0]))\n num_cols = len(cols)\n for i, td in enumerate(s.find_all(\"td\")[:-5]):\n if cols[i % num_cols] == \"日期\":\n dfdict[cols[i % num_cols]].append(\n dt.datetime.strptime(str(td.string), \"%Y年%m月%d日\")\n )\n else:\n dfdict[cols[i % num_cols]].append(str(td.string))\n return pd.DataFrame(dfdict)\n\n\ndef prettify(df):\n _map = {\n \"日期\": \"date\",\n \"收盘\": \"close\",\n \"开盘\": \"open\",\n \"高\": \"high\",\n \"低\": \"low\",\n \"涨跌幅\": \"percent\",\n \"交易量\": \"volume\",\n }\n df.rename(_map, axis=1, inplace=True)\n if len(df) > 1 and df.iloc[1][\"date\"] < df.iloc[0][\"date\"]:\n df = df[::-1]\n # df = df[[\"date\", \"open\", \"close\", \"high\", \"low\", \"percent\"]]\n df1 = df[[\"date\"]]\n for k in [\"open\", \"close\", \"high\", \"low\", \"volume\"]:\n if k in df.columns:\n df1[k] = df[k].apply(_float)\n df1[\"percent\"] = df[\"percent\"]\n return df1\n\n\ndef dstr2dobj(dstr):\n if len(dstr.split(\"/\")) > 1:\n d_obj = dt.datetime.strptime(dstr, \"%Y/%m/%d\")\n elif len(dstr.split(\".\")) > 1:\n d_obj = dt.datetime.strptime(dstr, \"%Y.%m.%d\")\n elif len(dstr.split(\"-\")) > 1:\n d_obj = dt.datetime.strptime(dstr, \"%Y-%m-%d\")\n else:\n d_obj = dt.datetime.strptime(dstr, \"%Y%m%d\")\n return d_obj\n\n\n@lru_cache(maxsize=1024)\ndef get_investing_id(suburl, app=False):\n if not app:\n url = \"https://cn.investing.com\"\n else:\n url = \"https://cnappapi.investing.com\"\n if not suburl.startswith(\"/\"):\n url += \"/\"\n url += suburl\n if not app:\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36\"\n }\n else:\n headers = {\n \"Accept\": \"*/*\",\n \"Accept-Encoding\": \"gzip\",\n \"Accept-Language\": \"zh-cn\",\n \"Cache-Control\": \"no-cache\",\n \"Connection\": \"keep-alive\",\n \"User-Agent\": \"Investing.China/0.0.3 CFNetwork/1121.2.2 Darwin/19.3.0\",\n \"ccode\": \"CN\",\n #'ccode_time': '1585551041.986028',\n \"x-app-ver\": \"117\",\n \"x-meta-ver\": \"14\",\n \"x-os\": \"ios\",\n \"x-uuid\": str(uuid4()),\n \"Host\": \"cn.investing.com\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n }\n r = rget(\n url,\n headers=headers,\n )\n s = BeautifulSoup(r.text, \"lxml\")\n pid = s.find(\"span\", id=\"last_last\")[\"class\"][-1].split(\"-\")[1]\n return pid\n\n\ndef _variate_ua():\n last = 20 + np.random.randint(20)\n ua = []\n ua.append(\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko)\"\n )\n ua.append(\n \"Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1\"\n )\n choice = np.random.randint(2)\n return ua[choice][:last]\n\n\n@lru_cache_time(ttl=120, maxsize=128)\ndef get_rmb(start=None, end=None, prev=360, currency=\"USD/CNY\"):\n \"\"\"\n 获取人民币汇率中间价, 该 API 官网数据源,稳定性很差\n\n :param start:\n :param end:\n :param prev:\n :param currency:\n :return: pd.DataFrame\n \"\"\"\n bl = [\"USD\", \"EUR\", \"100JPY\", \"HKD\", \"GBP\", \"AUD\", \"NZD\", \"SGD\", \"CHF\", \"CAD\"]\n al = [\n \"MYR\",\n \"RUB\",\n \"ZAR\",\n \"KRW\",\n \"AED\",\n \"SAR\",\n \"HUF\",\n \"PLN\",\n \"DKK\",\n \"SEK\",\n \"NOK\",\n \"TRY\",\n \"MXN\",\n \"THB\",\n ]\n is_inverse = False\n if (currency[:3] in al) or (currency[4:] in bl):\n is_inverse = True\n currency = currency[4:] + \"/\" + currency[:3]\n url = \"http://www.chinamoney.com.cn/ags/ms/cm-u-bk-ccpr/CcprHisNew?startDate={start_str}&endDate={end_str}¤cy={currency}&pageNum=1&pageSize=300\"\n if not end:\n end_obj = today_obj()\n else:\n end_obj = dstr2dobj(end)\n if not start:\n start_obj = end_obj - dt.timedelta(prev)\n else:\n start_obj = dstr2dobj(start)\n start_str = start_obj.strftime(\"%Y-%m-%d\")\n end_str = end_obj.strftime(\"%Y-%m-%d\")\n count = (end_obj - start_obj).days + 1\n rl = []\n # API 很奇怪,需要经常变 UA 才好用\n\n headers = {\n \"Referer\": \"http://www.chinamoney.com.cn/chinese/bkccpr/\",\n \"Origin\": \"http://www.chinamoney.com.cn\",\n \"Host\": \"www.chinamoney.com.cn\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n }\n\n if count <= 360:\n headers.update({\"user-agent\": _variate_ua()})\n r = rpost_json(\n url.format(start_str=start_str, end_str=end_str, currency=currency),\n headers=headers,\n )\n rl.extend(r[\"records\"])\n else: # data more than 1 year cannot be fetched once due to API limitation\n sepo_obj = end_obj\n sepn_obj = sepo_obj - dt.timedelta(360)\n # sep0_obj = end_obj - dt.timedelta(361)\n while sepn_obj > start_obj: # [sepn sepo]\n headers.update({\"user-agent\": _variate_ua()})\n r = rpost_json(\n url.format(\n start_str=sepn_obj.strftime(\"%Y-%m-%d\"),\n end_str=sepo_obj.strftime(\"%Y-%m-%d\"),\n currency=currency,\n ),\n headers=headers,\n )\n rl.extend(r[\"records\"])\n\n sepo_obj = sepn_obj - dt.timedelta(1)\n sepn_obj = sepo_obj - dt.timedelta(360)\n headers.update({\"user-agent\": _variate_ua()})\n r = rpost_json(\n url.format(\n start_str=start_obj.strftime(\"%Y-%m-%d\"),\n end_str=sepo_obj.strftime(\"%Y-%m-%d\"),\n currency=currency,\n ),\n headers=headers,\n )\n rl.extend(r[\"records\"])\n data = {\"date\": [], \"close\": []}\n for d in rl:\n data[\"date\"].append(pd.Timestamp(d[\"date\"]))\n data[\"close\"].append(d[\"values\"][0])\n df = pd.DataFrame(data)\n df = df[::-1]\n df[\"close\"] = pd.to_numeric(df[\"close\"])\n if is_inverse:\n df[\"close\"] = 1 / df[\"close\"]\n return df\n\n\ndef get_fund(code):\n # 随意设置非空 path,防止嵌套缓存到 fundinfo\n if code[0] == \"F\":\n if code.startswith(\"F96\"):\n return get_historical_from_ttjj_oversea(code)\n else:\n df = fundinfo(code[1:], path=\"nobackend\", priceonly=True).price\n elif code[0] == \"T\":\n df = fundinfo(code[1:], path=\"nobackend\", priceonly=True).price\n df[\"netvalue\"] = df[\"totvalue\"]\n elif code[0] == \"M\":\n df = mfundinfo(code[1:], path=\"nobackend\").price\n else:\n raise ParserFailure(\"Unknown fund code %s\" % code)\n df[\"close\"] = df[\"netvalue\"]\n return df[[\"date\", \"close\"]]\n\n\ndef get_historical_from_ttjj_oversea(code, start=None, end=None):\n if code.startswith(\"F\"):\n code = code[1:]\n pagesize = (\n dt.datetime.strptime(end, \"%Y%m%d\") - dt.datetime.strptime(start, \"%Y%m%d\")\n ).days + 1\n r = rget_json(\n \"http://overseas.1234567.com.cn/overseasapi/OpenApiHander.ashx?api=HKFDApi&m=MethodJZ&hkfcode={hkfcode}&action=2&pageindex=0&pagesize={pagesize}&date1={startdash}&date2={enddash}&callback=\".format(\n hkfcode=get_hkfcode(code),\n pagesize=pagesize,\n startdash=start[:4] + \"-\" + start[4:6] + \"-\" + start[6:],\n enddash=end[:4] + \"-\" + end[4:6] + \"-\" + end[6:],\n )\n )\n datalist = {\"date\": [], \"close\": []}\n for dd in r[\"Data\"]:\n datalist[\"date\"].append(pd.to_datetime(dd[\"PDATE\"]))\n datalist[\"close\"].append(dd[\"NAV\"])\n df = pd.DataFrame(datalist)\n df = df[df[\"date\"] <= end]\n df = df[df[\"date\"] >= start]\n df = df.sort_values(\"date\", ascending=True)\n return df\n\n\ndef get_portfolio_fromttjj(code, start=None, end=None):\n startobj = dt.datetime.strptime(start, \"%Y%m%d\")\n endobj = dt.datetime.strptime(end, \"%Y%m%d\")\n if (endobj - startobj).days < 90:\n return None # note start is always 1.1 4.1 7.1 10.1 in incremental updates\n if code.startswith(\"F\"):\n code = code[1:]\n r = rget(\"http://fundf10.eastmoney.com/zcpz_{code}.html\".format(code=code))\n s = BeautifulSoup(r.text, \"lxml\")\n table = s.find(\"table\", class_=\"tzxq\")\n df = pd.read_html(str(table))[0]\n df[\"date\"] = pd.to_datetime(df[\"报告期\"])\n df[\"stock_ratio\"] = df[\"股票占净比\"].replace(\"---\", \"0%\").apply(lambda s: _float(s[:-1]))\n df[\"bond_ratio\"] = df[\"债券占净比\"].replace(\"---\", \"0%\").apply(lambda s: _float(s[:-1]))\n df[\"cash_ratio\"] = df[\"现金占净比\"].replace(\"---\", \"0%\").apply(lambda s: _float(s[:-1]))\n # df[\"dr_ratio\"] = df[\"存托凭证占净比\"].replace(\"---\", \"0%\").apply(lambda s: xa.cons._float(s[:-1]))\n df[\"assets\"] = df[\"净资产(亿元)\"]\n df = df[::-1]\n return df[[\"date\", \"stock_ratio\", \"bond_ratio\", \"cash_ratio\", \"assets\"]]\n\n\n# this is the most elegant approach to dispatch get_daily, the definition can be such simple\n# you actually don't need to bother on start end blah, everything is taken care of by ``cahcedio``\n@data_source(\"jq\")\ndef get_fundshare_byjq(code, **kws):\n code = _inverse_convert_code(code)\n df = finance.run_query(\n query(finance.FUND_SHARE_DAILY)\n .filter(finance.FUND_SHARE_DAILY.code == code)\n .filter(finance.FUND_SHARE_DAILY.date >= kws[\"start\"])\n .filter(finance.FUND_SHARE_DAILY.date <= kws[\"end\"])\n .order_by(finance.FUND_SHARE_DAILY.date)\n )\n df[\"date\"] = pd.to_datetime(df[\"date\"])\n df = df[[\"date\", \"shares\"]]\n return df\n\n\n@lru_cache(maxsize=1024)\ndef get_futu_id(code):\n r = rget(\"https://www.futunn.com/stock/{code}\".format(code=code))\n sind = r.text.find(\"securityId\")\n futuid = r.text[sind : sind + 30].split(\"=\")[1].split(\";\")[0].strip(\" \").strip(\"'\")\n sind = r.text.find(\"marketType\")\n market = r.text[sind : sind + 30].split(\"=\")[1].split(\";\")[0].strip().strip(\"''\")\n return futuid, market\n\n\ndef get_futu_historical(code, start=None, end=None):\n fid, market = get_futu_id(code)\n r = rget(\n \"https://www.futunn.com/new-quote/kline?security_id={fid}&type=2&market_type={market}\".format(\n fid=fid, market=market\n )\n )\n df = pd.DataFrame(r.json()[\"data\"][\"list\"])\n df[\"date\"] = df[\"k\"].map(\n lambda s: dt.datetime.fromtimestamp(s)\n .replace(hour=0, minute=0, second=0, microsecond=0)\n .replace(tzinfo=None)\n )\n df[\"open\"] = df[\"o\"] / 1000\n df[\"close\"] = df[\"c\"] / 1000\n df[\"high\"] = df[\"h\"] / 1000\n df[\"low\"] = df[\"l\"] / 1000\n df[\"volume\"] = df[\"v\"]\n df = df.drop([\"k\", \"t\", \"o\", \"c\", \"h\", \"l\", \"v\"], axis=1)\n return df\n\n\ndef get_historical_fromsp(code, start=None, end=None, region=\"us\", **kws):\n \"\"\"\n 标普官网数据源\n\n :param code:\n :param start:\n :param end:\n :param kws:\n :return:\n \"\"\"\n\n if code.startswith(\"SP\"):\n code = code[2:]\n if len(code.split(\".\")) > 1:\n col = code.split(\".\")[1]\n code = code.split(\".\")[0]\n else:\n col = \"1\"\n start_obj = dt.datetime.strptime(start, \"%Y%m%d\")\n fromnow = (today_obj() - start_obj).days\n if fromnow < 300:\n flag = \"one\"\n elif fromnow < 1000:\n flag = \"three\"\n else:\n flag = \"ten\"\n url = \"https://{region}.spindices.com/idsexport/file.xls?\\\nselectedModule=PerformanceGraphView&selectedSubModule=Graph\\\n&yearFlag={flag}YearFlag&indexId={code}\".format(\n region=region, flag=flag, code=code\n )\n r = rget(\n url,\n headers={\n \"sec-fetch-dest\": \"document\",\n \"sec-fetch-mode\": \"navigate\",\n \"sec-fetch-site\": \"same-origin\",\n \"sec-fetch-user\": \"?1\",\n \"upgrade-insecure-requests\": \"1\",\n },\n )\n df = pd.read_excel(r.content, engine=\"xlrd\")\n # print(df.iloc[:10])\n df = df.iloc[6:]\n df = df.dropna()\n df[\"close\"] = df[\"Unnamed: \" + col]\n df[\"date\"] = pd.to_datetime(df[\"Unnamed: 0\"])\n df = df[[\"date\", \"close\"]]\n return df\n\n\ndef get_historical_frombb(code, start=None, end=None, **kws):\n \"\"\"\n https://www.bloomberg.com/ 数据源, 试验性支持。\n 似乎有很严格的 IP 封禁措施, 且最新数据更新滞后,且国内会被 reset,似乎难以支持 T-1 净值预测。强烈建议从英为或雅虎能找到的标的,不要用彭博源,该 API 只能作为 last resort。\n\n :param code:\n :param start:\n :param end:\n :param kws:\n :return:\n \"\"\"\n if code.startswith(\"BB-\"):\n code = code[3:]\n # end_obj = dt.datetime.strptime(end, \"%Y%m%d\")\n start_obj = dt.datetime.strptime(start, \"%Y%m%d\")\n fromnow = (today_obj() - start_obj).days\n if fromnow < 20:\n years = \"1_MONTH\"\n elif fromnow < 300:\n years = \"1_YEAR\"\n else:\n years = \"5_YEAR\"\n url = \"https://www.bloomberg.com/markets2/api/history/{code}/PX_LAST?\\\ntimeframe={years}&period=daily&volumePeriod=daily\".format(\n years=years, code=code\n )\n r = rget_json(\n url,\n headers={\n \"user-agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko)\",\n \"referer\": \"https://www.bloomberg.com/quote/{code}\".format(code=code),\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-origin\",\n \"accept\": \"*/*\",\n },\n )\n df = pd.DataFrame(r[0][\"price\"])\n df[\"close\"] = df[\"value\"]\n df[\"date\"] = pd.to_datetime(df[\"dateTime\"])\n df = df[[\"date\", \"close\"]]\n return df\n\n\ndef get_historical_fromft(code, start, end, _type=\"indices\"):\n \"\"\"\n finance times 数据\n\n :param code:\n :param start:\n :param end:\n :return:\n \"\"\"\n if not code.isdigit():\n code = get_ft_id(code, _type=_type)\n start = start.replace(\"/\", \"\").replace(\"-\", \"\")\n end = end.replace(\"/\", \"\").replace(\"-\", \"\")\n start = start[:4] + \"/\" + start[4:6] + \"/\" + start[6:]\n end = end[:4] + \"/\" + end[4:6] + \"/\" + end[6:]\n url = \"https://markets.ft.com/data/equities/ajax/\\\nget-historical-prices?startDate={start}&endDate={end}&symbol={code}\".format(\n code=code, start=start, end=end\n )\n r = rget_json(url, headers={\"user-agent\": \"Mozilla/5.0\"})\n b = BeautifulSoup(r[\"html\"], \"lxml\")\n data = {\"date\": [], \"open\": [], \"close\": [], \"high\": [], \"low\": []}\n for i, td in enumerate(b.findAll(\"td\")):\n if i % 6 == 0:\n s = td.find(\"span\").string.split(\",\")[1:]\n s = \",\".join(s)\n data[\"date\"].append(dt.datetime.strptime(s, \" %B %d, %Y\"))\n elif i % 6 == 1:\n data[\"open\"].append(_float(td.string))\n elif i % 6 == 2:\n data[\"high\"].append(_float(td.string))\n elif i % 6 == 3:\n data[\"low\"].append(_float(td.string))\n elif i % 6 == 4:\n data[\"close\"].append(_float(td.string))\n df = pd.DataFrame(data)\n df = df.iloc[::-1]\n return df\n\n\ndef get_historical_fromyh(code, start=None, end=None):\n \"\"\"\n 雅虎财经数据源,支持数据丰富,不限于美股。但存在部分历史数据缺失 NAN 或者周末进入交易日的现象,可能数据需要进一步清洗和处理。\n\n :param code:\n :param start:\n :param end:\n :return:\n \"\"\"\n if code.startswith(\"YH-\"):\n code = code[3:]\n start_obj = dt.datetime.strptime(start, \"%Y%m%d\")\n fromnow = (today_obj() - start_obj).days\n if fromnow < 20:\n range_ = \"1mo\"\n elif fromnow < 50:\n range_ = \"3mo\"\n elif fromnow < 150:\n range_ = \"6mo\"\n elif fromnow < 300:\n range_ = \"1y\"\n elif fromnow < 600:\n range_ = \"2y\"\n elif fromnow < 1500:\n range_ = \"5y\"\n else:\n range_ = \"10y\"\n url = \"https://query1.finance.yahoo.com/v8\\\n/finance/chart/{code}?region=US&lang=en-US&includePrePost=false\\\n&interval=1d&range={range_}&corsDomain=finance.yahoo.com&.tsrc=finance\".format(\n code=code, range_=range_\n )\n # 该 API 似乎也支持起止时间选择参数,period1=1427500800&period2=1585353600\n # 也可直接从历史数据页面爬取: https://finance.yahoo.com/quote/CSGOLD.SW/history?period1=1427500800&period2=1585353600&interval=1d&filter=history&frequency=1d\n r = rget_json(url)\n data = {}\n datel = []\n for t in r[\"chart\"][\"result\"][0][\"timestamp\"]:\n t = dt.datetime.fromtimestamp(t)\n if t.second != 0:\n t -= dt.timedelta(hours=8)\n datel.append(t.replace(tzinfo=None, hour=0, minute=0, second=0, microsecond=0))\n\n data[\"date\"] = datel\n for k in [\"close\", \"open\", \"high\", \"low\"]:\n data[k] = r[\"chart\"][\"result\"][0][\"indicators\"][\"quote\"][0][k]\n df = pd.DataFrame(data)\n return df\n\n\ndef get_historical_fromzzindex(code, start, end=None):\n \"\"\"\n 中证指数源\n\n :param code:\n :param start:\n :param end:\n :return:\n \"\"\"\n if code.startswith(\"ZZ\"):\n code = code[2:]\n start_obj = dt.datetime.strptime(start, \"%Y%m%d\")\n fromnow = (today_obj() - start_obj).days\n if fromnow < 20:\n flag = \"1%E4%B8%AA%E6%9C%88\"\n elif fromnow < 60:\n flag = \"3%E4%B8%AA%E6%9C%88\" # 个月\n elif fromnow < 200:\n flag = \"1%E5%B9%B4\" # 年\n else:\n flag = \"5%E5%B9%B4\"\n r = rget_json(\n \"http://www.csindex.com.cn/zh-CN/indices/index-detail/\\\n{code}?earnings_performance={flag}&data_type=json\".format(\n code=code, flag=flag\n ),\n headers={\n \"Host\": \"www.csindex.com.cn\",\n \"Referer\": \"http://www.csindex.com.cn/zh-CN/indices/index-detail/{code}\".format(\n code=code\n ),\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Accept\": \"application/json, text/javascript, */*; q=0.01\",\n },\n )\n df = pd.DataFrame(r)\n df[\"date\"] = pd.to_datetime(df[\"tradedate\"])\n df[\"close\"] = df[\"tclose\"].apply(_float)\n return df[[\"date\", \"close\"]]\n\n\ndef get_historical_fromgzindex(code, start, end):\n \"\"\"\n 国证指数源\n\n :param code:\n :param start:\n :param end:\n :return:\n \"\"\"\n if code.startswith(\"GZ\"):\n code = code[2:]\n start = start[:4] + \"-\" + start[4:6] + \"-\" + start[6:]\n end = end[:4] + \"-\" + end[4:6] + \"-\" + end[6:]\n params = {\n \"indexCode\": code,\n \"startDate\": start,\n \"endDate\": end,\n \"frequency\": \"Day\",\n }\n\n r = rget_json(\n \"http://hq.cnindex.com.cn/market/market/getIndexDailyDataWithDataFormat\",\n params=params,\n )\n df = pd.DataFrame(r[\"data\"][\"data\"], columns=r[\"data\"][\"item\"])\n\n df[\"date\"] = pd.to_datetime(df[\"timestamp\"])\n df = df[[\"date\", \"close\", \"open\", \"low\", \"high\", \"percent\", \"amount\", \"volume\"]]\n # TODO: 是否有这些列不全的国证指数?\n df = df[::-1]\n return df\n\n\ndef get_historical_fromhzindex(code, start, end):\n \"\"\"\n 华证指数源\n\n :param code:\n :param start:\n :param end:\n :return:\n \"\"\"\n if code.startswith(\"HZ\"):\n code = code[2:]\n\n r = rget_json(\n \"http://www.chindices.com/index/values.val?code={code}\".format(code=code)\n )\n df = pd.DataFrame(r[\"data\"])\n df[\"date\"] = pd.to_datetime(df[\"date\"])\n df = df[[\"date\", \"price\", \"pctChange\"]]\n df.rename(columns={\"price\": \"close\", \"pctChange\": \"percent\"}, inplace=True)\n df = df[::-1]\n return df\n\n\ndef get_historical_fromesunny(code, start=None, end=None):\n \"\"\"\n 易盛商品指数\n\n :param code: eg. ESCI000201\n :param start: just placeholder\n :param end: just placeholder\n :return:\n \"\"\"\n # code\n if code.startswith(\"ESCI\"):\n code = code[4:] + \".ESCI\"\n r = rget(\n \"http://www.esunny.com.cn/chartES/csv/shareday/day_易盛指数_{code}.es\".format(\n code=code\n )\n )\n data = []\n for l in r.text.split(\"\\n\"):\n row = [s.strip() for s in l.split(\"|\")] # 开 高 低 收 结\n if len(row) > 1:\n data.append(row[:7])\n df = pd.DataFrame(\n data, columns=[\"date\", \"open\", \"high\", \"low\", \"close\", \"settlement\", \"amount\"]\n )\n df[\"date\"] = pd.to_datetime(df[\"date\"])\n for c in [\"open\", \"high\", \"low\", \"close\", \"settlement\", \"amount\"]:\n df[c] = df[c].apply(_float)\n return df\n\n\ndef get_historical_fromycharts(code, start, end, category, metric):\n params = {\n \"securities\": \"include:true,id:{code},,\".format(code=code),\n \"calcs\": \"include:true,id:{metric},,\".format(metric=metric),\n \"startDate\": start, # %m/%d/%Y\n \"endDate\": end, # %m/%d/%Y\n \"zoom\": \"custom\",\n }\n r = rget_json(\n \"https://ycharts.com/charts/fund_data.json\",\n params=params,\n headers={\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4)\\\n AppleWebKit/537.36 (KHTML, like Gecko)\",\n \"Host\": \"ycharts.com\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Referer\": \"https://ycharts.com/{category}/{code}/chart/\".format(\n category=category, code=code\n ),\n \"Sec-Fetch-Mode\": \"cors\",\n \"Sec-Fetch-Site\": \"same-origin\",\n },\n )\n df = pd.DataFrame(\n data=r[\"chart_data\"][0][0][\"raw_data\"], columns=[\"timestamp\", \"close\"]\n )\n df[\"date\"] = (df[\"timestamp\"]).apply(ts2pdts)\n return df[[\"date\", \"close\"]]\n\n\n@lru_cache()\ndef get_bond_rates(rating, date=None):\n \"\"\"\n 获取各评级企业债的不同久期的预期利率\n\n :param rating: str. eg AAA, AA-, N for 中国国债\n :param date: %Y-%m-%d\n :return:\n \"\"\"\n rating = rating.strip()\n rating_uid = {\n \"N\": \"2c9081e50a2f9606010a3068cae70001\", # 国债\n \"AAA\": \"2c9081e50a2f9606010a309f4af50111\",\n \"AAA-\": \"8a8b2ca045e879bf014607ebef677f8e\",\n \"AA+\": \"2c908188138b62cd01139a2ee6b51e25\",\n \"AA\": \"2c90818812b319130112c279222836c3\",\n \"AA-\": \"8a8b2ca045e879bf014607f9982c7fc0\",\n \"A+\": \"2c9081e91b55cc84011be40946ca0925\",\n \"A\": \"2c9081e91e6a3313011e6d438a58000d\",\n \"A-\": \"8a8b2ca04142df6a014148ca880f3046\",\n \"A\": \"2c9081e91e6a3313011e6d438a58000d\",\n \"BBB+\": \"2c9081e91ea160e5011eab1f116c1a59\",\n \"BBB\": \"8a8b2ca0455847ac0145650780ad68fb\",\n \"BB\": \"8a8b2ca0455847ac0145650ba23b68ff\",\n \"B\": \"8a8b2ca0455847ac0145650c3d726901\",\n }\n # 上边字典不全,非常欢迎贡献 :)\n def _fetch(date):\n r = rpost(\n \"https://yield.chinabond.com.cn/cbweb-mn/yc/searchYc?\\\nxyzSelect=txy&&workTimes={date}&&dxbj=0&&qxll=0,&&yqqxN=N&&yqqxK=K&&\\\nycDefIds={uid}&&wrjxCBFlag=0&&locale=zh_CN\".format(\n uid=rating_uid.get(rating, rating), date=date\n ),\n )\n return r\n\n if not date:\n date = dt.datetime.today().strftime(\"%Y-%m-%d\")\n\n r = _fetch(date)\n while len(r.text.strip()) < 20: # 当天没有数据,非交易日\n date = last_onday(date).strftime(\"%Y-%m-%d\")\n r = _fetch(date)\n l = r.json()[0][\"seriesData\"]\n l = [t for t in l if t[1]]\n df = pd.DataFrame(l, columns=[\"year\", \"rate\"])\n return df\n\n\ndef get_bond_rates_range(rating, duration=3, freq=\"W-FRI\", start=None, end=None):\n l = []\n if rating.startswith(\"B-\"):\n rating = rating[2:]\n rs = rating.split(\".\")\n if len(rs) > 1:\n duration = float(rs[1])\n rating = rs[0]\n\n for d in pd.date_range(start, end, freq=freq):\n df = get_bond_rates(rating, d.strftime(\"%Y-%m-%d\"))\n l.append([d, df[df[\"year\"] <= duration].iloc[-1][\"rate\"]])\n return pd.DataFrame(l, columns=[\"date\", \"close\"])\n\n\n@data_source(\"jq\")\ndef get_macro(table, start, end, datecol=\"stat_year\"):\n df = macro.run_query(\n query(getattr(macro, table))\n .filter(getattr(getattr(macro, table), datecol) >= start)\n .filter(getattr(getattr(macro, table), datecol) <= end)\n .order_by(getattr(getattr(macro, table), datecol))\n )\n df[datecol] = pd.to_datetime(df[datecol])\n df[\"date\"] = df[datecol]\n return df\n\n\ndef set_handler(method=\"daily\", f=None):\n \"\"\"\n 为 ``get_daily``, ``get_bar`` 或 ``get_rt`` 设置 hook,优先按照函数 f 进行处理,若返回 None,再按一般情形处理\n\n :param method: str. daily, rt, bar\n :param f: func, default None.\n :return: None\n \"\"\"\n setattr(thismodule, \"get_\" + method + \"_handler\", f)\n\n\ndef _get_daily(\n code, start=None, end=None, prev=365, _from=None, wrapper=True, handler=True, **kws\n):\n \"\"\"\n universal fetcher for daily historical data of literally everything has a value in market.\n 数据来源包括但不限于天天基金,雪球,英为财情,外汇局官网,聚宽,标普官网,bloomberg,雅虎财经,ycharts等。\n\n :param code: str.\n\n 1. 对于沪深市场的股票,指数,ETF,LOF 场内基金,可转债和债券,直接使用其代码,主要开头需要包括 SH 或者 SZ。如果数字代码之后接 .A .B .N 分别代表后复权,前复权和不复权数据,不加后缀默认前复权。港股美股同理。\n\n 2. 对于香港市场的股票,指数,使用其数字代码,同时开头要添加 HK。\n\n 3. 对于美国市场的股票,指数,ETF 等,直接使用其字母缩写代码即可。\n\n 4. 对于人民币中间价数据,使用 \"USD/CNY\" 的形式,具体可能的值可在 http://www.chinamoney.com.cn/chinese/bkccpr/ 历史数据的横栏查询,注意日元需要用 100JPY/CNY.\n\n 5. 对于所有可以在 cn.investing.com 网站查到的金融产品,其代码可以是该网站对应的统一代码,或者是网址部分,比如 DAX 30 的概览页面为 https://cn.investing.com/indices/germany-30,那么对应代码即为 \"indices/germany-30\"。也可去网页 inspect 手动查找其内部代码(一般不需要自己做,推荐直接使用网页url作为 code 变量值),手动 inspect 加粗的实时价格,其对应的网页 span class 中的 pid 的数值即为内部代码。\n\n 6. 对于国内发行的基金,使用基金代码,同时开头添加 F。若想考虑分红使用累计净值,则开头添加 T。\n\n 7. 对于国内发行的货币基金,使用基金代码,同时开头添加 M。(全部按照净值数据处理)\n\n 8. 形如 peb-000807.XSHG 或 peb-SH000807 格式的数据,可以返回每周的指数估值情况,需要 enable 聚宽数据源方可查看。\n\n 9. 形如 iw-000807.XSHG 或 iw-SH000807 格式的数据,可以返回每月的指数成分股和实时权重,需要 enable 聚宽数据源方可查看。\n\n 10. 形如 fs-SH501018 格式的数据,可以返回指定场内基金每日份额,需要 enable 聚宽数据源方可查看。\n\n 11. 形如 SP5475707.2 格式的数据,可以返回标普官网相关指数的日线数据(最近十年),id 5475707 部分可以从相关指数 export 按钮获取的链接中得到,小数点后的部分代表保存的列数。参考链接:https://us.spindices.com/indices/equity/sp-global-oil-index. 若SPC开头,则从中国网站获取。\n\n 12. 形如 BB-FGERBIU:ID 格式的数据,对应网页 https://www.bloomberg.com/quote/FGERBIU:ID,可以返回彭博的数据(最近五年)\n\n 13. 形如 sw-801720 格式的数据,可以返回对应申万行业的历史数据情况,需要 enable 聚宽数据源方可查看。\n\n 14. 形如 teb-SH000300 格式的数据,返回每周指数盈利和净资产总值数据(单位:亿人民币元),需要 enbale 聚宽数据方可查看。\n\n 15. 形如 YH-CSGOLD.SW 格式的数据,返回雅虎财经标的日线数据(最近十年)。代码来自标的网页 url:https://finance.yahoo.com/quote/CSGOLD.SW。\n\n 16. 形如 FT-22065529 格式的数据或 FT-INX:IOM,可以返回 financial times 的数据,推荐直接用后者。前者数字代码来源,打开浏览器 network 监视,切换图标时间轴时,会新增到 https://markets.ft.com/data/chartapi/series 的 XHR 请求,其 request payload 里的 [elements][symbol] 即为该指数对应数字。\n\n 17. 形如 FTC-WTI+Crude+Oil 格式的数据,开头可以是 FTC, FTE, FTX, FTF, FTB, FTI 对应 ft.com 子栏目 commdities,equities,currencies,funds,bonds,indicies。其中 FTI 和 FT 相同。\n\n 18. 形如 mcy-MAC_AREA_UNEMPLOY 格式的数据,返回相应的宏观数据,需要聚宽数据源。mcy,mcq,mcm 代表年度,季度和月度的数据,code 为表名,可以参考 https://www.joinquant.com/help/api/help?name=macroData\n\n 19. 形如 ZZ000905,ZZH30533 的代码,代表中证官网的指数,ZZ 之后接指数代码,注意有些指数代码里可能包含 H,历史数据最大到近五年。\n\n 20. 形如 GZB30018, GZ399299 格式的数据,代表国证系列指数, GZ 之后接指数代码,代码可能包含更多字母。\n\n 21. 形如 ESCI000201 格式的数据,易盛商品指数系列,参考 http://www.esunny.com.cn/index.php?a=lists&catid=60。\n\n 22. 形如 pt-F100032 格式的数据,返回指定基金每季度股票债券和现金的持仓比例\n\n 23. 形如 yc-companies/DBP,yc-companies/DBP/price 格式的数据,返回ycharts股票、ETF数据,对应网页 https://ycharts.com/companies/DBP/price,最后部分为数据含义,默认price,可选:net_asset_value(仅ETF可用)、total_return_price、total_return_forward_adjusted_price、average_volume_30,历史数据限制五年内。\n\n 24. 形如 yc-indices/^SPGSCICO,yc-indices/^SPGSCICO/level 格式的数据,返回ycharts指数数据,对应网页 https://ycharts.com/indices/%5ESPGSCICO/level,最后部分为数据含义,默认level,可选:total_return_forward_adjusted_price,历史数据限制五年内。\n\n 25. 形如 HZ999001 HZ999005 格式的数据,代表了华证系列指数 http://www.chindices.com/indicator.html#\n\n 26. 形如 B-AA+.3 格式的数据,代表了 AA+ 企业债三年久期利率数据 (每周)\n\n 27. 形如 fu-00700.HK 或 fu-BA.US 格式的数据,代表了来自 https://www.futunn.com/stock/BA-US 的日线行情数据\n\n :param start: str. \"20200101\", \"2020/01/01\", \"2020-01-01\" are all legal. The starting date of daily data.\n :param end: str. format is the same as start. The ending date of daily data.\n :param prev: Optional[int], default 365. If start is not specified, start = end-prev.\n :param _from: Optional[str]. 一般用户不需设定该选项。can be one of \"xueqiu\", \"zjj\", \"investing\", \"tiantianjijin\". Only used for debug to\n enforce data source. For common use, _from can be chosed automatically based on code in the run time.\n :param wrapper: bool. 一般用户不需设定该选项。\n :param handler: bool. Default True. 若为 False,则 handler 钩子失效,用于钩子函数中的原函数嵌套调用。\n :return: pd.Dataframe.\n must include cols: date[pd.Timestamp],close[float64]。\n \"\"\"\n if handler:\n if getattr(thismodule, \"get_daily_handler\", None):\n args = inspect.getargvalues(inspect.currentframe())\n f = getattr(thismodule, \"get_daily_handler\")\n fr = f(**args.locals)\n if fr is not None:\n return fr\n\n if not end:\n end_obj = today_obj()\n else:\n end_obj = dstr2dobj(end)\n if not start:\n start_obj = end_obj - dt.timedelta(days=prev)\n else:\n start_obj = dstr2dobj(start)\n\n if not _from:\n if (code.startswith(\"SH\") or code.startswith(\"SZ\")) and code[2:8].isdigit():\n _from = \"xueqiu\"\n elif code.endswith(\"/CNY\") or code.startswith(\"CNY/\"):\n _from = \"zjj\"\n elif code.isdigit():\n _from = \"cninvesting\"\n elif code[0] in [\"F\", \"M\", \"T\"] and code[1:].isdigit():\n _from = \"ttjj\"\n elif code.startswith(\"HK\") and code[2:7].isdigit():\n _from = \"xueqiu\"\n code = code[2:]\n elif code.startswith(\"SP\") and code[2:].split(\".\")[0].isdigit():\n _from = \"SP\"\n elif code.startswith(\"SPC\") and code[3:].split(\".\")[0].isdigit():\n _from = \"SPC\"\n elif code.startswith(\"ZZ\") and code[4:].isdigit(): # 注意中证系列指数的代码里可能包含字母!\n _from = \"ZZ\"\n elif code.startswith(\"GZ\") and code[-3:].isdigit(): # 注意国证系列指数的代码里可能包含多个字母!\n _from = \"GZ\"\n elif code.startswith(\"HZ\") and code[2:].isdigit():\n _from = \"HZ\"\n elif code.startswith(\"ESCI\") and code[4:].isdigit():\n _from = \"ES\"\n elif code.startswith(\"yc-companies/\") or code.startswith(\"yc-indices/\"):\n _from = \"ycharts\"\n params = code.split(\"/\")\n code = params[1]\n category = params[0].split(\"-\")[1]\n if len(params) == 3:\n metric = params[2]\n else:\n if category == \"companies\":\n metric = \"price\"\n elif category == \"indices\":\n metric = \"level\"\n elif len(code.split(\"-\")) >= 2 and len(code.split(\"-\")[0]) <= 3:\n # peb-000807.XSHG\n _from = code.split(\"-\")[0]\n code = \"-\".join(code.split(\"-\")[1:])\n elif len(code[1:].split(\"/\")) == 2:\n _from = \"cninvesting\"\n code = get_investing_id(code)\n else:\n _from = \"xueqiu\" # 美股代码\n\n count = (today_obj() - start_obj).days + 1\n start_str = start_obj.strftime(\"%Y/%m/%d\")\n end_str = end_obj.strftime(\"%Y/%m/%d\")\n if _from in [\"cninvesting\", \"investing\", \"default\", \"IN\"]:\n df = get_historical_fromcninvesting(code, start_str, end_str)\n df = prettify(df)\n elif _from in [\"xueqiu\", \"xq\", \"snowball\", \"XQ\"]:\n code, type_ = decouple_code(code)\n\n df = get_historical_fromxq(code, count, type_=type_)\n df = prettify(df)\n elif _from in [\"zhongjianjia\", \"zjj\", \"chinamoney\", \"ZJJ\"]:\n df = get_rmb(start, end, prev, currency=code)\n elif _from in [\"ttjj\", \"tiantianjijin\", \"xalpha\", \"eastmoney\"]:\n if code.startswith(\"F96\"):\n df = get_historical_from_ttjj_oversea(code, start=start, end=end)\n else:\n df = get_fund(code)\n\n elif _from == \"peb\":\n if (\n code.startswith(\"SH000\")\n or code.startswith(\"SZ399\")\n or code.startswith(\"399\")\n or code.startswith(\"000\")\n ):\n df = _get_peb_range(code=code, start=start_str, end=end_str)\n elif code.startswith(\"F\"):\n df = get_fund_peb_range(code=code, start=start, end=end)\n else:\n df = get_stock_peb_range(code=code, start=start, end=end, wrapper=True)\n\n elif _from == \"iw\":\n df = _get_index_weight_range(code=code, start=start_str, end=end_str)\n\n elif _from == \"fs\":\n df = get_fundshare_byjq(code, start=start, end=end)\n\n elif _from == \"SP\":\n df = get_historical_fromsp(code, start=start, end=end)\n\n elif _from == \"SPC\":\n df = get_historical_fromsp(code[3:], start=start, end=end, region=\"chinese\")\n\n elif _from == \"BB\":\n df = get_historical_frombb(code, start=start, end=end)\n\n elif _from == \"ZZ\":\n df = get_historical_fromzzindex(code, start=start, end=end)\n\n elif _from == \"GZ\":\n df = get_historical_fromgzindex(code, start=start, end=end)\n\n elif _from == \"HZ\":\n df = get_historical_fromhzindex(code, start=start, end=end)\n\n elif _from == \"ES\":\n df = get_historical_fromesunny(code, start=start, end=end)\n\n elif _from == \"B\":\n df = get_bond_rates_range(code, start=start, end=end)\n\n elif _from == \"fu\":\n code = code.replace(\".\", \"-\")\n df = get_futu_historical(code, start=start, end=end)\n\n elif _from == \"ycharts\":\n df = get_historical_fromycharts(\n code,\n start=start_obj.strftime(\"%m/%d/%Y\"),\n end=end_obj.strftime(\"%m/%d/%Y\"),\n category=category,\n metric=metric,\n )\n\n elif _from == \"sw\":\n df = get_sw_from_jq(code, start=start, end=end)\n\n elif _from == \"teb\":\n df = get_teb_range(code, start=start, end=end)\n\n elif _from in [\"pt\", \"portfolio\"]:\n df = get_portfolio_fromttjj(code, start=start, end=end)\n\n elif _from == \"YH\":\n df = get_historical_fromyh(code, start=start, end=end)\n\n elif _from in [\"FT\", \"FTI\"]:\n df = get_historical_fromft(code, start=start, end=end)\n\n elif _from == \"FTE\":\n df = get_historical_fromft(code, start=start, end=end, _type=\"equities\")\n\n elif _from == \"FTB\":\n df = get_historical_fromft(code, start=start, end=end, _type=\"bonds\")\n\n elif _from == \"FTF\":\n df = get_historical_fromft(code, start=start, end=end, _type=\"funds\")\n\n elif _from == \"FTX\":\n df = get_historical_fromft(code, start=start, end=end, _type=\"currencies\")\n\n elif _from == \"FTC\":\n df = get_historical_fromft(code, start=start, end=end, _type=\"commodities\")\n\n elif _from == \"INA\": # investing app\n code = get_investing_id(code, app=True)\n df = get_historical_fromcninvesting(code, start_str, end_str, app=True)\n df = prettify(df)\n\n elif _from == \"mcy\":\n df = get_macro(code, start=start[:4], end=end[:4], datecol=\"stat_year\")\n\n elif _from == \"mcq\":\n df = get_macro(code, start=start, end=end, datecol=\"stat_quarter\")\n\n elif _from == \"mcm\":\n df = get_macro(code, start=start, end=end, datecol=\"stat_month\")\n\n elif _from == \"mcd\":\n df = get_macro(code, start=start, end=end, datecol=\"day\")\n\n else:\n raise ParserFailure(\"no such data source: %s\" % _from)\n\n if wrapper or len(df) == 0:\n return df\n else:\n df = df[df.date <= end_str]\n df = df[df.date >= start_str]\n return df\n\n\ndef get_xueqiu_rt(code, token=\"a664afb60c7036c7947578ac1a5860c4cfb6b3b5\"):\n if code.startswith(\"HK\") and code[2:].isdigit():\n code = code[2:]\n url = \"https://stock.xueqiu.com/v5/stock/quote.json?symbol={code}&extend=detail\"\n r = rget_json(\n url.format(code=code),\n cookies={\"xq_a_token\": token},\n headers={\"user-agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4)\"},\n )\n n = r[\"data\"][\"quote\"][\"name\"]\n q = r[\"data\"][\"quote\"][\"current\"]\n try:\n q = _float(q)\n except TypeError: # 针对雪球实时在9点后开盘前可能出现其他情形的fixup, 效果待 check\n # 现在的怀疑是在9am 到9:15 am, 雪球 API current 字段返回 Null\n q = _float(r[\"data\"][\"quote\"][\"last_close\"])\n q_ext = r[\"data\"][\"quote\"].get(\"current_ext\", None)\n percent = r[\"data\"][\"quote\"][\"percent\"]\n try:\n percent = _float(percent)\n except:\n pass\n currency = r[\"data\"][\"quote\"][\"currency\"]\n market = r[\"data\"][\"market\"][\"region\"]\n timestr = dt.datetime.fromtimestamp(r[\"data\"][\"quote\"][\"time\"] / 1000).strftime(\n \"%Y-%m-%d %H:%M:%S\"\n )\n if r[\"data\"][\"quote\"].get(\"timestamp_ext\", None):\n time_ext = dt.datetime.fromtimestamp(\n r[\"data\"][\"quote\"][\"timestamp_ext\"] / 1000\n ).strftime(\"%Y-%m-%d %H:%M:%S\")\n else:\n time_ext = None\n share = r[\"data\"][\"quote\"][\"total_shares\"]\n fshare = r[\"data\"][\"quote\"][\"float_shares\"]\n volume = r[\"data\"][\"quote\"][\"volume\"]\n return {\n \"name\": n,\n \"current\": q,\n \"percent\": percent,\n \"current_ext\": _float(q_ext) if q_ext else None,\n \"currency\": currency,\n \"market\": market, # HK, US, CN\n \"time\": timestr,\n \"time_ext\": time_ext,\n \"totshare\": share,\n \"floatshare\": fshare,\n \"volume\": volume,\n }\n\n\ndef get_cninvesting_rt(suburl, app=False):\n if not app:\n url = \"https://cn.investing.com\"\n else:\n url = \"https://cnappapi.investing.com\"\n if not suburl.startswith(\"/\"):\n url += \"/\"\n url += suburl\n if not app:\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36\"\n }\n else:\n headers = {\n \"Accept\": \"*/*\",\n \"Accept-Encoding\": \"gzip\",\n \"Accept-Language\": \"zh-cn\",\n \"Cache-Control\": \"no-cache\",\n \"Connection\": \"keep-alive\",\n \"User-Agent\": \"Investing.China/0.0.3 CFNetwork/1121.2.2 Darwin/19.3.0\",\n \"ccode\": \"CN\",\n #'ccode_time': '1585551041.986028',\n \"x-app-ver\": \"117\",\n \"x-meta-ver\": \"14\",\n \"x-os\": \"ios\",\n \"x-uuid\": str(uuid4()),\n \"Host\": \"cn.investing.com\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n }\n\n r = rget(\n url,\n headers=headers,\n )\n s = BeautifulSoup(r.text, \"lxml\")\n last_last = s.find(\"span\", id=\"last_last\")\n q = _float(last_last.string)\n name = s.find(\"h1\").string.strip()\n ind = 0\n timestr = s.select('span[class*=\"ClockBigIcon\"]+span')[0].text\n l = s.find(\"div\", class_=\"lighterGrayFont\").contents\n for i, c in enumerate(l):\n if isinstance(c, str) and c.strip() == \"货币\":\n ind = i\n break\n if ind == 0:\n currency = None\n else:\n currency = l[ind - 1].string\n percent = _float(\n s.find(\"span\", attrs={\"dir\": \"ltr\", \"class\": \"parentheses\"}).string[:-1]\n )\n panhou = s.find(\"div\", class_=\"afterHoursInfo\")\n if panhou:\n q_ext = _float(panhou.find(\"span\").string)\n else:\n q_ext = None\n market = None\n for span in s.findAll(\"span\", class_=\"elp\"):\n if span.find(\"a\") and span.find(\"a\")[\"href\"].startswith(\"/markets\"):\n market = span.string\n market = region_trans.get(market, market)\n time_ext = s.select(\"div[class~=lastUpdated]\")\n if time_ext:\n time_ext = time_ext[0].text.strip()\n else:\n time_ext = None\n d = {\n \"name\": name,\n \"current\": q,\n \"current_ext\": q_ext,\n \"time\": timestr,\n \"time_ext\": time_ext,\n \"currency\": currency,\n \"percent\": percent,\n \"market\": market,\n }\n\n if suburl.startswith(\"commodities\"): # 商品期货展期日\n try:\n d[\"rollover\"] = s.select(\"span[class*=float_lang_base_2]\")[10].string\n d[\"lastrollover\"] = s.select(\"span[class*=float_lang_base_2]\")[13].string\n except (ValueError, IndexError, AttributeError):\n logger.warning(\"%s cannot extract rollover date\" % suburl)\n # in case some commodities with strong page structure\n return d\n\n\ndef get_rt_from_sina(code):\n if (\n code.startswith(\"SH\") or code.startswith(\"SZ\") or code.startswith(\"HK\")\n ) and code[2:].isdigit():\n tinycode = code[:2].lower() + code[2:]\n if code.startswith(\"HK\"): # 港股额外要求实时\n tinycode = \"rt_\" + tinycode\n else: # 美股\n tinycode = \"gb_\"\n if code.startswith(\".\"):\n code = code[1:]\n tinycode += code.lower()\n r = rget(\"https://hq.sinajs.cn/list={tinycode}\".format(tinycode=tinycode))\n l = r.text.split(\"=\")[1].split(\",\")\n d = {}\n d[\"name\"] = l[0].strip('\"')\n if (\n code.startswith(\"SH\") or code.startswith(\"SZ\") or code.startswith(\"HK\")\n ) and code[2:].isdigit():\n # TODO: 20200819: API seems changed a bit, index shift?\n # or things may get zero when the market is closed?\n if code.startswith(\"HK\"):\n d[\"current\"] = float(l[9]) # 英文股票名称占位\n d[\"currency\"] = \"HKD\"\n d[\"percent\"] = round(float(l[8]), 2)\n d[\"market\"] = \"HK\"\n d[\"time\"] = l[17] + \" \" + l[18]\n d[\"current_ext\"] = None\n\n else: # A 股\n d[\"current\"] = float(l[3])\n d[\"currency\"] = \"CNY\"\n d[\"percent\"] = round((float(l[3]) / float(l[2]) - 1) * 100, 2)\n d[\"market\"] = \"CN\"\n d[\"time\"] = l[-4] + \" \" + l[-3]\n for i in range(10, 19)[::2]:\n d[\"buy\" + str(int((i - 8) / 2))] = (l[i + 1], l[i])\n for i in range(20, 29)[::2]:\n d[\"sell\" + str(int((i - 18) / 2))] = (l[i + 1], l[i])\n d[\"current_ext\"] = None\n\n else:\n d[\"currency\"] = \"USD\"\n d[\"current\"] = float(l[1])\n d[\"percent\"] = float(l[2])\n d[\"current_ext\"] = _float(l[21]) if _float(l[21]) > 0 else None\n d[\"market\"] = \"US\"\n d[\"time\"] = l[3]\n return d\n\n\ndef make_ft_url(code, _type=\"indices\"):\n \"\"\"\n\n :param code:\n :param _type: indices, commodities, currencies, funds, equities, bonds\n :return:\n \"\"\"\n if _type == \"indices\":\n url = \"https://markets.ft.com/data/indices/tearsheet/summary?s={code}\".format(\n code=code\n )\n elif _type == \"commodities\":\n url = (\n \"https://markets.ft.com/data/commodities/tearsheet/summary?c={code}\".format(\n code=code\n )\n )\n elif _type == \"currencies\":\n url = (\n \"https://markets.ft.com/data/currencies/tearsheet/summary?s={code}\".format(\n code=code\n )\n )\n elif _type == \"funds\":\n url = \"https://markets.ft.com/data/funds/tearsheet/summary?s={code}\".format(\n code=code\n )\n elif _type == \"equities\":\n url = \"https://markets.ft.com/data/equities/tearsheet/summary?s={code}\".format(\n code=code\n )\n elif _type == \"bonds\":\n url = \"https://markets.ft.com/data/bonds/tearsheet/summary?s={code}\".format(\n code=code\n )\n else:\n raise ParserFailure(\"no reconginzed type for ft datasource: %s\" % _type)\n return url\n\n\n@lru_cache(maxsize=1024)\ndef get_ft_id(code, _type=\"indices\"):\n url = make_ft_url(code, _type=_type)\n r = rget(url)\n b = BeautifulSoup(r.text, \"lxml\")\n return eval(\n b.find(\"section\", class_=\"mod-tearsheet-add-to-watchlist\")[\"data-mod-config\"]\n )[\"xid\"]\n\n\ndef get_rt_from_ft(code, _type=\"indices\"):\n url = make_ft_url(code, _type=_type)\n r = rget(url)\n b = BeautifulSoup(r.text, \"lxml\")\n d = {}\n d[\"name\"] = b.find(\"h1\").string\n d[\"current\"] = _float(b.find(\"span\", class_=\"mod-ui-data-list__value\").string)\n d[\"percent\"] = _float(\n b.select(\"span[class^='mod-format--']\")[0].text.split(\"/\")[-1].strip()[:-1]\n )\n d[\"current_ext\"] = None\n d[\"market\"] = None\n d[\"currency\"] = b.find(\"span\", class_=\"mod-ui-data-list__label\").string.split(\"(\")[\n 1\n ][:-1]\n d[\"time\"] = b.find(\"div\", class_=\"mod-disclaimer\").string\n return d\n\n\ndef get_rt_from_ycharts(code):\n if code.startswith(\"yc-\"):\n code = code[3:]\n url = \"https://ycharts.com/\" + code\n r = rget(url)\n s = BeautifulSoup(r.text, \"lxml\")\n qdiv = s.select(\"div.index-rank.col-auto\") # current\n spans = [s for s in qdiv[0].contents if s != \"\\n\" and s.contents]\n d = {}\n d[\"name\"] = s.select(\"h1,h3[class=securityName]\")[0].text.strip()\n d[\"current\"], d[\"percent\"] = (\n _float(spans[0].string), # current,\n _float(spans[1].contents[-2].string[1:-1]), # percent\n )\n l = [\n c.strip()\n for c in s.select(\"span[class=index-info]\")[0].string.split(\"\\n\")\n if c.strip()\n ]\n d[\"time\"] = l[1]\n d[\"currency\"] = l[0].split(\" \")[0].strip()\n d[\"market\"] = None\n return d\n\n\n@lru_cache_time(ttl=300, maxsize=512)\ndef get_newest_netvalue(code):\n \"\"\"\n 防止天天基金总量 API 最新净值更新不及时,获取基金最新公布净值及对应日期, depracated, use get_rt(\"F501018\") instead\n\n :param code: six digits string for fund.\n :return: netvalue, %Y-%m-%d\n \"\"\"\n code = code[1:]\n r = rget(\"http://fund.eastmoney.com/{code}.html\".format(code=code))\n s = BeautifulSoup(r.text, \"lxml\")\n return (\n float(\n s.findAll(\"dd\", class_=\"dataNums\")[1]\n .find(\"span\", class_=\"ui-font-large\")\n .string\n ),\n str(s.findAll(\"dt\")[1]).split(\"(\")[1].split(\")\")[0][7:],\n )\n\n\n@lru_cache(maxsize=512)\ndef get_hkfcode(code):\n if code.startswith(\"F\"):\n code = code[1:]\n page = rget(\"http://overseas.1234567.com.cn/{code}\".format(code=code)).text\n page.find(\"hkfcode\")\n hkfcode = (\n page[page.find(\"hkfcode\") :]\n .split(\"=\")[1]\n .split(\";\")[0]\n .lstrip()\n .lstrip(\"'\")\n .strip(\"'\")\n )\n return hkfcode\n\n\ndef get_rt_from_ttjj_oversea(code):\n if code.startswith(\"F\"):\n code = code[1:]\n if not code.startswith(\"96\"):\n raise ValueError(\"%s is not an oversea fund\" % code)\n r = rget(\"http://overseas.1234567.com.cn/{code}.html\".format(code=code))\n r.encoding = \"utf-8\"\n s = BeautifulSoup(r.text, \"lxml\")\n start = s.select(\"dl.dataItem02\")[0].text\n start = start.split(\"(\")[1].split(\")\")[0]\n name = s.select(\"div[class='fundDetail-tit']\")[0].text.split(\"(\")[0].strip()\n name = name.split(\"(\")[0].strip()\n value = _float(s.select(\"span.ui-font-large.ui-num\")[0].text)\n date = (\n s.select(\"dl[class='dataItem01']\")[0]\n .find(\"p\")\n .text.split(\"(\")[-1]\n .split(\")\")[0]\n )\n infol = [\n r for r in s.select(\"div[class='infoOfFund']\")[0].text.split(\"\\n\") if r.strip()\n ]\n return {\n \"name\": name,\n \"time\": date,\n \"current\": value,\n \"market\": \"CN\",\n \"currency\": None, # 很可能存在非人民币计价的互认基金\n \"current_ext\": None,\n \"type\": infol[0].split(\":\")[1].strip(),\n \"scale\": infol[1].split(\":\")[1].strip(),\n \"manager\": infol[2].split(\":\")[1].strip(),\n \"startdate\": start,\n }\n\n\n@lru_cache_time(ttl=600, maxsize=512)\ndef get_rt_from_ttjj(code):\n code = code[1:]\n if code.startswith(\"96\"):\n return get_rt_from_ttjj_oversea(code)\n r = rget(\"http://fund.eastmoney.com/{code}.html\".format(code=code))\n r.encoding = \"utf-8\"\n s = BeautifulSoup(r.text, \"lxml\")\n name = s.select(\"div[style='float: left']\")[0].text.split(\"(\")[0]\n if s.findAll(\"dd\", class_=\"dataNums\")[1].find(\n \"span\", class_=\"ui-font-large\"\n ): # 非货币基金\n value, date = (\n float(\n s.findAll(\"dd\", class_=\"dataNums\")[1]\n .find(\"span\", class_=\"ui-font-large\")\n .string\n ),\n str(s.findAll(\"dt\")[1]).split(\"(\")[1].split(\")\")[0][7:],\n )\n estimate = s.select(\"span[id=gz_gsz]\")[0].text # after loading\n if estimate == \"--\":\n gsz = rget(\n \"http://fundgz.1234567.com.cn/js/{code}.js\".format(code=code),\n headers={\n \"Host\": \"fundgz.1234567.com.cn\",\n \"Referer\": \"http://fund.eastmoney.com/\",\n },\n )\n try: # in case eval error\n gsz_dict = eval(gsz.text[8:-2])\n estimate = _float(gsz_dict[\"gsz\"])\n estimate_time = gsz_dict[\"gztime\"]\n except:\n estimate = None\n else:\n try:\n estimate = _float(estimate)\n except ValueError:\n logger.warning(\"unrecognized estimate netvalue %s\" % estimate)\n estimate = None\n else:\n value, date = (\n s.findAll(\"dd\", class_=\"dataNums\")[1].text,\n str(s.findAll(\"dt\")[1]).split(\"(\")[1].split(\")\")[0],\n )\n estimate = None\n status = s.select(\"span[class='staticCell']\")[0].text.strip()\n tb = s.select(\"div.infoOfFund > table >tr>td\")\n infol = [i.text for i in tb]\n try:\n estimate_time\n except NameError:\n estimate_time = None\n return {\n \"name\": name,\n \"time\": date,\n \"current\": value,\n \"market\": \"CN\",\n \"currency\": \"CNY\",\n \"current_ext\": None,\n \"status\": status,\n \"type\": infol[0].split(\":\")[1].split(\"\\xa0\")[0],\n \"scale\": infol[1].split(\":\")[1],\n \"manager\": infol[2].split(\":\")[1],\n \"company\": infol[4].split(\":\")[1],\n \"estimate\": estimate,\n \"estimate_time\": estimate_time,\n }\n # 是否有美元份额计价的基金会出问题?\n\n\n@lru_cache(2048)\ndef get_fund_type(code):\n \"\"\"\n given fund code, return unified fund category which is extracted from get_rt(code)[\"type\"]\n\n :param code:\n :return: str.\n \"\"\"\n code = code[-6:]\n t = get_rt(\"F\" + code)[\"type\"]\n\n if t in [\"联接基金\", \"股票指数\"] or t.startswith(\"ETF\"):\n return \"指数基金\"\n elif t.startswith(\"QDII\"):\n return \"QDII\"\n elif t.startswith(\"股票\"):\n return \"股票基金\"\n elif t.startswith(\"混合\"):\n return \"混合基金\"\n elif t.startswith(\"债券\"):\n return \"债券基金\"\n elif t.startswith(\"货币\"):\n return \"货币基金\"\n else:\n return \"其他\"\n\n\ndef get_rt(\n code, _from=None, double_check=False, double_check_threhold=0.005, handler=True\n):\n \"\"\"\n universal fetcher for realtime price of literally everything.\n\n :param code: str. 规则同 :func:`get_daily`. 需要注意场外基金和外汇中间价是不支持实时行情的,因为其每日只有一个报价。对于 investing 的数据源,只支持网址格式代码。\n :param _from: Optional[str]. can be one of \"xueqiu\", \"investing\". Only used for debug to\n enfore data source. For common use, _from can be chosed automatically based on code in the run time.\n :param double_check: Optional[bool], default False. 如果设为 True,只适用于 A 股,美股,港股实时行情,会通过至少两个不同的数据源交叉验证,确保正确。\n 适用于需要自动交易等情形,防止实时数据异常。\n :param handler: bool. Default True. 若为 False,则 handler 钩子失效,用于钩子函数中的嵌套。\n :return: Dict[str, Any].\n 包括 \"name\", \"current\", \"percent\" 三个必有项和 \"current_ext\"(盘后价格), \"currency\" (计价货币), \"market\" (发行市场), \"time\"(记录时间) 可能为 ``None`` 的选项。\n \"\"\"\n # 对于一些标的,get_rt 的主任务可能不是 current 价格,而是去拿 market currency 这些元数据\n # 现在用的新浪实时数据源延迟严重, double check 并不靠谱,港股数据似乎有15分钟延迟(已解决)\n # 雪球实时和新浪实时在9:00之后一段时间可能都有问题\n # FT 数据源有10到20分钟的延迟\n if handler:\n if getattr(thismodule, \"get_rt_handler\", None):\n args = inspect.getargvalues(inspect.currentframe())\n f = getattr(thismodule, \"get_rt_handler\")\n fr = f(**args.locals)\n if fr:\n return fr\n\n if not _from:\n # if code.startswith(\"HK\") and code[2:].isdigit():\n # _from = \"xueqiu\"\n if code.startswith(\"yc-\"):\n _from = \"ycharts\"\n elif len(code.split(\"-\")) >= 2 and len(code.split(\"-\")[0]) <= 3:\n _from = code.split(\"-\")[0]\n code = \"-\".join(code.split(\"-\")[1:])\n elif (code.startswith(\"F\") or code.startswith(\"T\")) and code[1:].isdigit():\n _from = \"ttjj\"\n elif len(code.split(\"/\")) > 1:\n _from = \"investing\"\n else: # 默认启用雪球实时,新浪纯指数行情不完整\n _from = \"xueqiu\"\n if _from in [\"cninvesting\", \"investing\"]:\n try:\n return get_cninvesting_rt(code)\n except Exception as e:\n logger.warning(\n \"Fails due to %s, now trying app source of investing.com\" % e.args[0]\n )\n return get_cninvesting_rt(code, app=True)\n elif double_check and _from in [\"xueqiu\", \"sina\"]:\n r1 = get_xueqiu_rt(code, token=get_token())\n r2 = get_rt_from_sina(code)\n if abs(r1[\"current\"] / r2[\"current\"] - 1) > double_check_threhold:\n raise DataPossiblyWrong(\"realtime data unmatch for %s\" % code)\n return r2\n elif _from in [\"xueqiu\", \"xq\", \"snowball\"]:\n try:\n return get_xueqiu_rt(code, token=get_token())\n except (IndexError, ValueError, AttributeError, TypeError) as e: # 默认雪球实时引入备份机制\n logging.warning(\n \"Fails due to %s, now trying backup data source from sina\" % e.args[0]\n )\n return get_rt_from_sina(code)\n elif _from in [\"sina\", \"sn\", \"xinlang\"]:\n try:\n return get_rt_from_sina(code)\n except (IndexError, ValueError, AttributeError, TypeError) as e: # 默认雪球实时引入备份机制\n logging.warning(\n \"Fails due to %s, now trying backup data source from xueqiu\" % e.args[0]\n )\n return get_xueqiu_rt(code, token=get_token())\n elif _from in [\"ttjj\"]:\n return get_rt_from_ttjj(code)\n elif _from in [\"FT\", \"ft\", \"FTI\"]:\n return get_rt_from_ft(code)\n elif _from == \"FTE\":\n return get_rt_from_ft(code, _type=\"equities\")\n elif _from == \"FTB\":\n return get_rt_from_ft(code, _type=\"bonds\")\n elif _from == \"FTF\":\n return get_rt_from_ft(code, _type=\"funds\")\n elif _from == \"FTX\":\n return get_rt_from_ft(code, _type=\"currencies\")\n elif _from == \"FTC\":\n return get_rt_from_ft(code, _type=\"commodities\")\n elif _from in [\"INA\"]: # investing app\n return get_cninvesting_rt(code, app=True)\n elif _from in [\"yc\", \"ycharts\"]:\n return get_rt_from_ycharts(code)\n else:\n raise ParserFailure(\"unrecoginzed _from for %s\" % _from)\n\n\nget_realtime = get_rt\nget_now = get_rt\n\n_cached_data = {}\n\n\ndef reset_cache():\n \"\"\"\n clear all cache of daily data in memory.\n\n :return: None.\n \"\"\"\n global _cached_data\n _cached_data = {}\n setattr(thismodule, \"cached_dict\", {})\n\n\ndef cached(s):\n \"\"\"\n **Deprecated**, use :func:`cachedio` instead, where ``backend=\"memory\"``.\n\n Usage as follows:\n\n .. code-block:: python\n\n @cached(\"20170101\")\n def get_daily(*args, **kws):\n return xa.get_daily(*args, **kws)\n\n Automatically cache the result in memory and avoid refetching\n :param s: str. eg. \"20160101\", the starting date of cached table.\n :return: wrapped function.\n \"\"\"\n\n def cached_start(f):\n @wraps(f)\n def wrapper(*args, **kws):\n print(\"cached function is deprecated, please instead use cachedio\")\n if args:\n code = args[0]\n else:\n code = kws.get(\"code\")\n start = kws.get(\"start\", None)\n end = kws.get(\"end\", None)\n prev = kws.get(\"prev\", None)\n if not prev:\n prev = 365\n if not end:\n end_obj = today_obj()\n else:\n end_obj = dstr2dobj(end)\n if not start:\n start_obj = end_obj - dt.timedelta(prev)\n else:\n start_obj = dstr2dobj(start)\n start_str = start_obj.strftime(\"%Y%m%d\")\n end_str = end_obj.strftime(\"%Y%m%d\")\n kws[\"start\"] = s\n kws[\"end\"] = dt.datetime.now().strftime(\"%Y%m%d\")\n global _cached_data\n _cached_data.setdefault(s, {})\n if code not in _cached_data[s]:\n df = f(*args, **kws)\n # print(\"cached %s\" % code)\n _cached_data[s][code] = df\n else:\n pass\n # print(\"directly call cache\")\n df = _cached_data[s][code]\n df = df[df[\"date\"] <= end_str]\n df = df[df[\"date\"] >= start_str]\n\n return df\n\n return wrapper\n\n return cached_start\n\n\ndef cachedio(**ioconf):\n \"\"\"\n 用法类似:func:`cached`,通用透明缓存器,用来作为 (code, start, end ...) -> pd.DataFrame 形式函数的缓存层,\n 避免重复爬取已有数据。\n\n :param **ioconf: 可选关键字参数 backend: csv or sql or memory,\n path: csv 文件夹或 sql engine, refresh True 会刷新结果,重新爬取, default False,\n prefix 是 key 前统一部分, 缓存 hash 标志\n :return:\n \"\"\"\n\n def cached(f):\n @wraps(f)\n def wrapper(*args, **kws):\n if args:\n code = args[0]\n else:\n code = kws.get(\"code\")\n date = ioconf.get(\"date\", \"date\") # 没利用上这个栏的名字变化\n precached = ioconf.get(\"precached\", None)\n precached = kws.get(\"precached\", precached)\n key = kws.get(\"key\", code)\n key = key.replace(\"/\", \" \")\n key_func = ioconf.get(\"key_func\", None)\n key_func = ioconf.get(\"keyfunc\", key_func)\n if key_func is not None:\n key = key_func(key)\n defaultend = ioconf.get(\"defaultend\", today_obj)\n defaultend = ioconf.get(\"default_end\", defaultend)\n defaultprev = ioconf.get(\"defaultprev\", 365)\n defaultprev = ioconf.get(\"default_prev\", defaultprev)\n if isinstance(defaultend, str):\n defaultend = defaultend.replace(\"/\", \"\").replace(\"-\", \"\")\n defaultend = dt.datetime.strptime(defaultend, \"%Y%m%d\")\n if callable(defaultend):\n defaultend = defaultend()\n start = kws.get(\"start\", None)\n end = kws.get(\"end\", None)\n prev = kws.get(\"prev\", None)\n prefix = ioconf.get(\"prefix\", \"\")\n key = prefix + key\n if precached:\n precached = precached.replace(\"/\", \"\").replace(\"-\", \"\")\n precached_obj = dt.datetime.strptime(precached, \"%Y%m%d\")\n if not prev:\n prev = defaultprev\n if not end:\n end_obj = defaultend\n else:\n end_obj = dt.datetime.strptime(\n end.replace(\"/\", \"\").replace(\"-\", \"\"), \"%Y%m%d\"\n )\n\n if not start:\n start_obj = end_obj - dt.timedelta(days=prev)\n else:\n start_obj = dt.datetime.strptime(\n start.replace(\"/\", \"\").replace(\"-\", \"\"), \"%Y%m%d\"\n )\n\n start_str = start_obj.strftime(\"%Y%m%d\")\n end_str = end_obj.strftime(\"%Y%m%d\")\n backend = ioconf.get(\"backend\")\n backend = kws.get(\"backend\", backend)\n # if backend == \"sql\": # reserved for case insensitive database settings\n # key = key.lower()\n refresh = ioconf.get(\"refresh\", False)\n refresh = kws.get(\"refresh\", refresh)\n fetchonly = ioconf.get(\"fetchonly\", False)\n fetchonly = ioconf.get(\"fetch_only\", fetchonly)\n fetchonly = kws.get(\"fetchonly\", fetchonly)\n fetchonly = kws.get(\"fetch_only\", fetchonly)\n path = ioconf.get(\"path\")\n path = kws.get(\"path\", path)\n kws[\"start\"] = start_str\n kws[\"end\"] = end_str\n if not backend:\n df = f(*args, **kws)\n df = df[df[\"date\"] <= kws[\"end\"]]\n df = df[df[\"date\"] >= kws[\"start\"]]\n return df\n else:\n if backend == \"csv\":\n key = key + \".csv\"\n if not getattr(thismodule, \"cached_dict\", None):\n setattr(thismodule, \"cached_dict\", {})\n if refresh:\n is_changed = True\n df0 = f(*args, **kws)\n\n else: # non refresh\n try:\n if backend == \"csv\":\n if key in getattr(thismodule, \"cached_dict\"):\n # 即使硬盘级别的缓存,也有内存层,加快读写速度\n df0 = getattr(thismodule, \"cached_dict\")[key]\n else:\n df0 = pd.read_csv(os.path.join(path, key))\n elif backend == \"sql\":\n if key in getattr(thismodule, \"cached_dict\"):\n df0 = getattr(thismodule, \"cached_dict\")[key]\n else:\n df0 = pd.read_sql(key, path)\n elif backend == \"memory\":\n df0 = getattr(thismodule, \"cached_dict\")[key]\n else:\n raise ValueError(\"no %s option for backend\" % backend)\n df0[date] = pd.to_datetime(df0[date])\n # 向前延拓\n is_changed = False\n if df0.iloc[0][date] > start_obj and not fetchonly:\n kws[\"start\"] = start_str\n kws[\"end\"] = (\n df0.iloc[0][date] - pd.Timedelta(days=1)\n ).strftime(\"%Y%m%d\")\n if has_weekday(kws[\"start\"], kws[\"end\"]):\n # 考虑到海外市场的不同情况,不用 opendate 判断,采取保守型判别\n df1 = f(*args, **kws)\n if df1 is not None and len(df1) > 0:\n df1 = df1[df1[\"date\"] <= kws[\"end\"]]\n if df1 is not None and len(df1) > 0:\n is_changed = True\n df0 = df1.append(df0, ignore_index=True, sort=False)\n # 向后延拓\n if df0.iloc[-1][date] < end_obj and not fetchonly:\n nextday_str = (\n df0.iloc[-1][date] + dt.timedelta(days=1)\n ).strftime(\"%Y%m%d\")\n if len(df0[df0[\"date\"] == df0.iloc[-1][\"date\"]]) == 1:\n kws[\"start\"] = (df0.iloc[-1][date]).strftime(\"%Y%m%d\")\n else: # 单日多行的表默认最后一日是准确的,不再刷新了\n kws[\"start\"] = nextday_str\n kws[\"end\"] = end_str\n if has_weekday(nextday_str, kws[\"end\"]): # 新更新的日期里有工作日\n df2 = f(*args, **kws)\n if df2 is not None and len(df2) > 0:\n df2 = df2[df2[\"date\"] >= kws[\"start\"]]\n if df2 is not None and len(df2) > 0:\n is_changed = True\n if (\n len(df0[df0[\"date\"] == df0.iloc[-1][\"date\"]])\n == 1\n ):\n df0 = df0.iloc[:-1]\n df0 = df0.append(df2, ignore_index=True, sort=False)\n # 注意这里抹去更新了原有最后一天的缓存,这是因为日线最新一天可能有实时数据污染\n\n except (FileNotFoundError, exc.ProgrammingError, KeyError) as e:\n if fetchonly:\n logger.error(\n \"no cache in backend for %s but you insist `fetchonly`\"\n % code\n )\n raise e\n if precached:\n if start_obj > precached_obj:\n kws[\"start\"] = precached\n if end_obj < today_obj():\n kws[\"end\"] = (\n today_obj() - dt.timedelta(days=1)\n ).strftime(\"%Y%m%d\")\n is_changed = True\n df0 = f(*args, **kws)\n\n if df0 is not None and len(df0) > 0 and is_changed:\n if backend == \"csv\":\n df0.to_csv(os.path.join(path, key), index=False)\n elif backend == \"sql\":\n df0.to_sql(key, con=path, if_exists=\"replace\", index=False)\n # elif backend == \"memory\":\n # 总是刷新内存层,即使是硬盘缓存\n d = getattr(thismodule, \"cached_dict\")\n d[key] = df0\n\n if df0 is not None and len(df0) > 0:\n df0 = df0[df0[\"date\"] <= end_str]\n df0 = df0[df0[\"date\"] >= start_str]\n\n return df0\n\n return wrapper\n\n return cached\n\n\ndef fetch_backend(key):\n prefix = ioconf.get(\"prefix\", \"\")\n key = prefix + key\n backend = ioconf.get(\"backend\")\n path = ioconf.get(\"path\")\n if backend == \"csv\":\n key = key + \".csv\"\n\n try:\n if backend == \"csv\":\n df0 = pd.read_csv(os.path.join(path, key))\n elif backend == \"sql\":\n df0 = pd.read_sql(key, path)\n else:\n raise ValueError(\"no %s option for backend\" % backend)\n\n return df0\n\n except (FileNotFoundError, exc.ProgrammingError, KeyError):\n return None\n\n\ndef save_backend(key, df, mode=\"a\", header=False):\n prefix = ioconf.get(\"prefix\", \"\")\n key = prefix + key\n backend = ioconf.get(\"backend\")\n path = ioconf.get(\"path\")\n if backend == \"csv\":\n key = key + \".csv\"\n\n if backend == \"csv\":\n if mode == \"a\":\n df.to_csv(os.path.join(path, key), index=False, header=header, mode=mode)\n else:\n df.to_csv(os.path.join(path, key), index=False, mode=mode)\n elif backend == \"sql\":\n if mode == \"a\":\n mode = \"append\"\n else:\n mode = \"replace\"\n df.to_sql(key, con=path, if_exists=mode, index=False)\n else:\n raise ValueError(\"no %s option for backend\" % backend)\n\n logger.debug(\"%s saved into backend successfully\" % key)\n\n\ndef check_cache(*args, omit_lines=0, **kws):\n if omit_lines == 0:\n assert (\n _get_daily(*args, wrapper=False, **kws)\n .reset_index(drop=True)\n .equals(get_daily(*args, **kws).reset_index(drop=True))\n )\n else:\n assert (\n _get_daily(*args, wrapper=False, **kws)\n .reset_index(drop=True)[:-omit_lines]\n .equals(get_daily(*args, **kws).reset_index(drop=True)[:-omit_lines])\n )\n\n\n@data_source(\"jq\")\ndef _get_index_weight_range(code, start, end):\n if len(code.split(\".\")) != 2:\n code = _inverse_convert_code(code)\n start_obj = dt.datetime.strptime(start.replace(\"-\", \"\").replace(\"/\", \"\"), \"%Y%m%d\")\n end_obj = dt.datetime.strptime(end.replace(\"-\", \"\").replace(\"/\", \"\"), \"%Y%m%d\")\n start_m = start_obj.replace(day=1)\n if start_m < start_obj:\n start_m = start_m + relativedelta(months=1)\n end_m = end_obj.replace(day=1)\n if end_obj < end_m:\n end_m = end_m - relativedelta(months=1)\n d = start_m\n\n df = pd.DataFrame({\"code\": [], \"weight\": [], \"display_name\": [], \"date\": []})\n while True:\n if d > end_m:\n\n df[\"date\"] = pd.to_datetime(df[\"date\"])\n return df\n logger.debug(\"fetch index weight on %s for %s\" % (d, code))\n df0 = get_index_weights(index_id=code, date=d.strftime(\"%Y-%m-%d\"))\n df0[\"code\"] = df0.index\n df = df.append(df0, ignore_index=True, sort=False)\n d = d + relativedelta(months=1)\n\n\n@data_source(\"jq\")\ndef _get_peb_range(code, start, end): # 盈利,净资产,总市值\n \"\"\"\n 获取指定指数一段时间内的 pe pb 值。\n\n :param code: 聚宽形式指数代码。\n :param start:\n :param end:\n :return: pd.DataFrame\n \"\"\"\n if len(code.split(\".\")) != 2:\n code = _inverse_convert_code(code)\n data = {\"date\": [], \"pe\": [], \"pb\": []}\n for d in pd.date_range(start=start, end=end, freq=\"W-FRI\"):\n data[\"date\"].append(d)\n logger.debug(\"compute pe pb on %s\" % d)\n r = get_peb(code, date=d.strftime(\"%Y-%m-%d\"))\n data[\"pe\"].append(r[\"pe\"])\n data[\"pb\"].append(r[\"pb\"])\n return pd.DataFrame(data)\n\n\ndef get_stock_peb_range(code, start, end, wrapper=False):\n \"\"\"\n 获取股票历史 pe pb\n\n :param code:\n :param start:\n :param end:\n :return:\n \"\"\"\n if code.startswith(\"HK\") and code[2:].isdigit():\n code = code[2:]\n count = (today_obj() - dt.datetime.strptime(start, \"%Y%m%d\")).days\n df = get_historical_fromxq(code, count, full=True)\n df = df[[\"date\", \"pe\", \"pb\", \"ps\"]]\n if not wrapper:\n df = df[df[\"date\"] >= start]\n df = df[df[\"date\"] <= end]\n return df\n\n\n@lru_cache()\ndef ttjjcode(code):\n \"\"\"\n 将天天基金的持仓股票代码或其他来源的代码标准化\n\n :param code: str.\n :return: str.\n \"\"\"\n code = code.strip()\n if code.endswith(\".HK\"):\n return \"HK\" + code[:-3]\n elif code.endswith(\".US\"):\n return code[:-3]\n elif code.isdigit() and len(code) == 5:\n return \"HK\" + code\n elif code.isdigit() and len(code) == 6:\n if (\n code.startswith(\"16\")\n or code.startswith(\"15\")\n or code.startswith(\"12\")\n or code.startswith(\"0\")\n or code.startswith(\"3\")\n ):\n # 注意这里只能对应个股,指数代码有重叠没有办法的事\n return \"SZ\" + code\n elif code.startswith(\"5\") or code.startswith(\"6\") or code.startswith(\"11\"):\n return \"SH\" + code\n else:\n logger.warning(\"unrecognized code format %s\" % code)\n return \"0\"\n else:\n logger.info(\"not so sure about code format %s, taken as US stock\" % code)\n return code\n\n\ndef get_fund_peb(code, date, threhold=0.3):\n \"\"\"\n 根据基金的股票持仓,获取对应日期的 pe,pb 估值\n\n :param code: str. 基金代码\n :param date:\n :param threhold: float, default 0.3. 为了计算快速,占比小于千分之三的股票将舍弃\n :return:\n \"\"\"\n if code.startswith(\"F\"):\n code = code[1:]\n date = date.replace(\"/\", \"\").replace(\"-\", \"\")\n d = dt.datetime.strptime(date, \"%Y%m%d\")\n if d.month > 3 and d.month < 8:\n year = d.year - 1\n season = 4\n elif d.month <= 3:\n year = d.year - 1\n season = 2\n else:\n year = d.year\n season = 2\n # season 只选 2,4, 具有更详细的持仓信息\n df = get_fund_holdings(code, year, season)\n if df is None:\n if season == 4:\n season = 2\n else:\n year -= 1\n season = 4\n df = get_fund_holdings(code, year, season)\n if df is None:\n logger.warning(\"%s seems has no holdings data in this time %s\" % (code, year))\n return {\"pe\": None, \"pb\": None}\n df = df[df[\"ratio\"] >= threhold]\n df[\"scode\"] = df[\"code\"].apply(ttjjcode)\n df = df[df[\"scode\"] != \"0\"]\n if len(df) == 0:\n return {\"pe\": None, \"pb\": None}\n\n pel, pbl = [], []\n for i, r in df.iterrows():\n try:\n fdf = get_daily(\"peb-\" + r[\"scode\"], end=date, prev=60)\n if len(fdf) == 0:\n # 已退市或改名\n logger.warning(\"%s: 无法获取,可能已退市,当时休市或改名\" % r[\"scode\"])\n pel.append(None)\n pbl.append(None)\n else:\n fdf = fdf.iloc[-1]\n pel.append(fdf[\"pe\"])\n pbl.append(fdf[\"pb\"])\n except (KeyError, TypeError, IndexError) as e:\n logger.warning(\n \"%s: 获取历史估值出现问题: %s, 可能由于网站故障或股票代码非中美市场\" % (r[\"scode\"], e.args[0])\n )\n pel.append(None)\n pbl.append(None)\n df[\"pe\"] = pel\n df[\"pb\"] = pbl\n r = {}\n pedf = df[~pd.isna(df[\"pe\"])]\n pbdf = df[~pd.isna(df[\"pb\"])]\n if len(pbdf) < 0.5 * len(df): # 有时候会有个别标的有pb值\n r[\"pb\"] = None\n else:\n pbdf[\"b\"] = pbdf[\"ratio\"] / (pbdf[\"pb\"] + 0.000001)\n r[\"pb\"] = pbdf.ratio.sum() / pbdf.b.sum()\n if len(pedf) == 0:\n r[\"pe\"] = None\n else:\n pedf[\"e\"] = pedf[\"ratio\"] / (pedf[\"pe\"] + 0.000001)\n r[\"pe\"] = pedf.ratio.sum() / pedf.e.sum()\n return r\n\n\ndef get_fund_peb_range(code, start, end):\n \"\"\"\n 获取一段时间的基金历史估值,每周五为频率\n\n :param code:\n :param start:\n :param end:\n :return:\n \"\"\"\n if code.startswith(\"F\"):\n code = code[1:]\n data = {\"date\": [], \"pe\": [], \"pb\": []}\n for d in pd.date_range(start=start, end=end, freq=\"W-FRI\"):\n data[\"date\"].append(d)\n r = get_fund_peb(code, date=d.strftime(\"%Y-%m-%d\"))\n data[\"pe\"].append(r[\"pe\"])\n data[\"pb\"].append(r[\"pb\"])\n return pd.DataFrame(data)\n\n\ndef set_backend(**ioconf):\n \"\"\"\n 设定 xalpha get_daily 函数的缓存后端,默认为内存。 ioconf 参数设置可参考 :func:`cachedio`\n\n :param ioconf:\n :return: None.\n \"\"\"\n\n if not ioconf:\n ioconf = {\"backend\": \"memory\"}\n get_daily = cachedio(**ioconf)(_get_daily)\n prefix = ioconf.get(\"prefix\", \"\")\n ioconf[\"prefix\"] = \"iw-\" + prefix\n get_index_weight_range = cachedio(**ioconf)(_get_index_weight_range)\n ioconf[\"prefix\"] = \"peb-\" + prefix\n get_peb_range = cachedio(**ioconf)(_get_peb_range)\n setattr(thismodule, \"get_daily\", get_daily)\n setattr(xamodule, \"get_daily\", get_daily)\n setattr(thismodule, \"get_index_weight_range\", get_index_weight_range)\n setattr(thismodule, \"get_peb_range\", get_peb_range)\n ioconf[\"prefix\"] = prefix\n setattr(thismodule, \"ioconf\", ioconf)\n\n\nset_backend()\n\n\n@data_source(\"jq\")\ndef get_peb(index, date=None, table=False):\n \"\"\"\n 获取指数在指定日期的 pe 和 pb。采用当时各公司的最新财报和当时的指数成分股权重加权计算。\n\n :param index: str. 聚宽形式的指数代码。\n :param date: str. %Y-%m-%d\n :param table: Optioanl[bool], default False. True 时返回整个计算的 DataFrame,用于 debug。\n :return: Dict[str, float]. 包含 pe 和 pb 值的字典。\n \"\"\"\n if len(index.split(\".\")) == 2:\n index = _convert_code(index)\n middle = dt.datetime.strptime(\n date.replace(\"/\", \"\").replace(\"-\", \"\"), \"%Y%m%d\"\n ).replace(day=1)\n iwdf = get_index_weight_range(\n index,\n start=(middle - dt.timedelta(days=10)).strftime(\"%Y-%m-%d\"),\n end=(middle + dt.timedelta(days=6)).strftime(\"%Y-%m-%d\"),\n )\n q = query(valuation).filter(valuation.code.in_(list(iwdf.code)))\n logger.debug(\"get_fundamentals on %s\" % (date))\n df = get_fundamentals(q, date=date)\n df = df.merge(iwdf, on=\"code\")\n df[\"e\"] = df[\"weight\"] / df[\"pe_ratio\"]\n df[\"b\"] = df[\"weight\"] / df[\"pb_ratio\"]\n df[\"p\"] = df[\"weight\"]\n tote = df.e.sum()\n totb = df.b.sum()\n if table:\n return df\n return {\n \"pe\": (round(100.0 / tote, 3) if tote != 0 else np.inf),\n \"pb\": (round(100.0 / totb, 3) if totb != 0 else np.inf),\n }\n\n\n@data_source(\"jq\")\ndef get_sw_from_jq(code, start=None, end=None, **kws):\n \"\"\"\n\n :param code: str. eg. 801180 申万行业指数\n :param start:\n :param end:\n :param kws:\n :return:\n \"\"\"\n logger.debug(\"get sw data of %s\" % code)\n df = finance.run_query(\n query(finance.SW1_DAILY_VALUATION)\n .filter(finance.SW1_DAILY_VALUATION.date >= start)\n .filter(finance.SW1_DAILY_VALUATION.date <= end)\n .filter(finance.SW1_DAILY_VALUATION.code == code)\n .order_by(finance.SW1_DAILY_VALUATION.date.asc())\n )\n df[\"date\"] = pd.to_datetime(df[\"date\"])\n return df\n\n\n@data_source(\"jq\")\ndef get_teb(code, date):\n if len(code.split(\".\")) != 2:\n code = _inverse_convert_code(code)\n sl = get_index_stocks(code, date=date)\n logger.debug(\"get fundamentals from jq for %s\" % code)\n df = get_fundamentals(query(valuation).filter(valuation.code.in_(sl)), date=date)\n df[\"e\"] = df[\"market_cap\"] / df[\"pe_ratio\"]\n df[\"b\"] = df[\"market_cap\"] / df[\"pb_ratio\"]\n return {\"e\": df[\"e\"].sum(), \"b\": df[\"b\"].sum(), \"m\": df[\"market_cap\"].sum()} # 亿人民币\n\n\ndef get_teb_range(code, start, end, freq=\"W-FRI\"):\n if len(code.split(\".\")) != 2:\n code = _inverse_convert_code(code)\n data = {\"date\": [], \"e\": [], \"b\": [], \"m\": []}\n for d in pd.date_range(start, end, freq=freq):\n data[\"date\"].append(d)\n r = get_teb(code, d.strftime(\"%Y-%m-%d\"))\n data[\"e\"].append(r[\"e\"])\n data[\"b\"].append(r[\"b\"])\n data[\"m\"].append(r[\"m\"])\n df = pd.DataFrame(data)\n return df\n\n\ndef _convert_code(code):\n \"\"\"\n 将聚宽形式的代码转化为 xalpha 形式\n\n :param code:\n :return:\n \"\"\"\n no, mk = code.split(\".\")\n if mk == \"XSHG\":\n return \"SH\" + no\n elif mk == \"XSHE\":\n return \"SZ\" + no\n\n\ndef _inverse_convert_code(code):\n \"\"\"\n 将 xalpha 形式的代码转化为聚宽形式\n\n :param code:\n :return:\n \"\"\"\n\n if code.startswith(\"SH\"):\n return code[2:] + \".XSHG\"\n elif code.startswith(\"SZ\"):\n return code[2:] + \".XSHE\"\n\n\n@lru_cache_time(ttl=60, maxsize=512)\ndef get_bar(\n code, prev=24, interval=3600, _from=None, handler=True, start=None, end=None\n):\n \"\"\"\n\n :param code: str. 支持雪球和英为的代码\n :param prev: points of data from now to back, often limited by API around several hundreds\n :param interval: float, seconds. need to match the corresponding API,\n typical values include 60, 300, 3600, 86400, 86400*7\n :param handler: bool. Default True. 若为 False,则 handler 钩子失效,用于钩子函数中的嵌套。\n :return: pd.DataFrame\n \"\"\"\n if handler:\n if getattr(thismodule, \"get_bar_handler\", None):\n args = inspect.getargvalues(inspect.currentframe())\n f = getattr(thismodule, \"get_bar_handler\")\n fr = f(**args.locals)\n if fr is not None:\n return fr\n\n if not _from:\n if (\n (start is not None)\n and (end is not None)\n and (code.startswith(\"SH\") or code.startswith(\"SZ\"))\n ):\n _from = \"jq\"\n elif code.startswith(\"SH\") or code.startswith(\"SZ\"):\n _from = \"xueqiu\"\n elif code.isdigit():\n _from = \"cninvesting\"\n elif code.startswith(\"HK\") and code[2:7].isdigit():\n _from = \"xueqiu\"\n code = code[2:]\n elif len(code.split(\"-\")) >= 2 and len(code.split(\"-\")[0]) <= 3:\n _from = code.split(\"-\")[0]\n code = \"-\".join(code.split(\"-\")[1:])\n elif len(code.split(\"/\")) > 1:\n _from = \"cninvesting\"\n code = get_investing_id(code)\n else:\n _from = \"xueqiu\" # 美股\n if _from in [\"xq\", \"xueqiu\", \"XQ\"]:\n return get_bar_fromxq(code, prev, interval)\n elif _from in [\"IN\", \"cninvesting\", \"investing\"]:\n return get_bar_frominvesting(code, prev, interval)\n elif _from in [\"INA\"]:\n return get_bar_frominvesting(code, prev, interval)\n # 这里 investing app 源是 404,只能用网页源\n elif _from in [\"jq\"]:\n code, type_ = decouple_code(code)\n # 关于复权,聚宽各个时间密度的数据都有复权,雪球源日线以上的高频数据没有复权\n type_map = {\"after\": \"post\", \"before\": \"pre\", \"normal\": None}\n return get_bar_fromjq(\n code, start=start, end=end, interval=interval, fq=type_map[type_]\n )\n elif _from in [\"wsj\"]:\n return get_bar_fromwsj(code, interval=interval)[-prev:]\n else:\n raise ParserFailure(\"unrecoginized _from %s\" % _from)\n\n\n@data_source(\"jq\")\ndef get_bar_fromjq(code, start, end, interval, fq=\"pre\"):\n code = _inverse_convert_code(code)\n trans = {\n \"60\": \"1m\",\n \"120\": \"2m\",\n \"300\": \"5m\",\n \"900\": \"15m\",\n \"1800\": \"30m\",\n \"3600\": \"60m\",\n \"7200\": \"120m\",\n \"86400\": \"daily\",\n }\n interval = trans.get(str(interval), interval)\n logger.debug(\"calling ``get_price`` from jq with %s\" % code)\n return get_price(code, start_date=start, end_date=end, frequency=interval, fq=fq)\n\n\ndef get_bar_frominvesting(code, prev=120, interval=3600):\n \"\"\"\n get bar data beyond daily bar\n\n :param code: str. investing id or url\n :param prev: int, data points from now, max might be around 500, if exceed, only None is returnd\n :param interval: default 3600. optional 60, 300, 900, 1800, 18000, 86400, \"week\", \"month\"\n :return: pd.DataFrame or None if prev and interval unmatch the API\n \"\"\"\n if interval == \"day\":\n interval = 86400\n elif interval == \"hour\":\n interval = 3600\n elif interval == \"minute\":\n interval = 60\n elif interval == 86400 * 7:\n interval = \"week\"\n elif interval == 86400 * 30:\n interval = \"month\"\n if len(code.split(\"/\")) == 2:\n code = get_investing_id(code)\n\n url = \"https://cn.investing.com\"\n headers = {\n \"user-agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4)\\\n AppleWebKit/537.36 (KHTML, like Gecko)\",\n \"Host\": \"cn.investing.com\",\n \"Referer\": \"https://cn.investing.com/commodities/\",\n \"Sec-Fetch-Dest\": \"empty\",\n \"Sec-Fetch-Mode\": \"cors\",\n \"Sec-Fetch-Site\": \"same-origin\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n }\n\n r = rget(\n url\n + \"/common/modules/js_instrument_chart/api/data.php?pair_id={code}&pair_id_for_news={code}\\\n&chart_type=area&pair_interval={interval}&candle_count={prev}&events=yes&volume_series=yes&period=\".format(\n code=code, prev=str(prev), interval=str(interval)\n ),\n headers=headers,\n )\n if not r.text:\n return # None\n r = r.json()\n df = pd.DataFrame(r[\"candles\"], columns=[\"date\", \"close\", \"0\", \"1\"])\n df = df.drop([\"0\", \"1\"], axis=1)\n df[\"date\"] = df[\"date\"].apply(\n lambda t: dt.datetime.fromtimestamp(t / 1000, tz=tz_bj).replace(tzinfo=None)\n )\n return df\n\n\ndef get_bar_fromxq(code, prev, interval=3600):\n \"\"\"\n\n :param code:\n :param prev:\n :param interval: 1m, 5m, 15m, 30m, 60m, 120m, month, quarter, year, week, day\n :return:\n \"\"\"\n # max interval is also around 500\n trans = {\n \"60\": \"1m\",\n \"300\": \"5m\",\n \"900\": \"15m\",\n \"1800\": \"30m\",\n \"3600\": \"60m\",\n \"7200\": \"120m\",\n \"86400\": \"day\",\n \"604800\": \"week\",\n \"2592000\": \"month\",\n }\n code, type_ = decouple_code(code)\n interval = trans.get(str(interval), interval)\n url = \"https://stock.xueqiu.com/v5/stock/chart/kline.json?symbol={code}&begin={tomorrow}&period={interval}&type={type_}\\\n&count=-{prev}&indicator=kline,pe,pb,ps,pcf,market_capital,agt,ggt,balance\".format(\n code=code,\n tomorrow=int(tomorrow_ts() * 1000),\n prev=prev,\n interval=interval,\n type_=type_,\n )\n r = rget(\n url, headers={\"user-agent\": \"Mozilla/5.0\"}, cookies={\"xq_a_token\": get_token()}\n )\n if not r.text:\n return # None\n else:\n df = pd.DataFrame(r.json()[\"data\"][\"item\"], columns=r.json()[\"data\"][\"column\"])\n df[\"date\"] = df[\"timestamp\"].apply(\n lambda t: dt.datetime.fromtimestamp(t / 1000, tz=tz_bj).replace(tzinfo=None)\n )\n df = df[\n [\n \"date\",\n \"open\",\n \"high\",\n \"low\",\n \"close\",\n \"volume\",\n \"turnoverrate\",\n \"percent\",\n ]\n ]\n return df\n\n\ndef get_bar_fromwsj(code, token=None, interval=3600):\n # proxy required\n # code = \"FUTURE/US/XNYM/CLM20\"\n # TODO: also not explore the code format here extensively\n trans = {\"3600\": \"1H\"}\n # TODO: there is other freq tags, but I have no time to explore them, contributions are welcome:)\n freq = trans.get(str(interval), interval)\n if not token:\n token = \"cecc4267a0194af89ca343805a3e57af\"\n # the thing I am concerned here is whether token is refreshed\n\n params = {\n \"json\": '{\"Step\":\"PT%s\",\"TimeFrame\":\"D5\",\"EntitlementToken\":\"%s\",\\\n\"IncludeMockTick\":true,\"FilterNullSlots\":false,\"FilterClosedPoints\":true,\"IncludeClosedSlots\":false,\\\n\"IncludeOfficialClose\":true,\"InjectOpen\":false,\"ShowPreMarket\":false,\"ShowAfterHours\":false,\\\n\"UseExtendedTimeFrame\":false,\"WantPriorClose\":true,\"IncludeCurrentQuotes\":false,\\\n\"ResetTodaysAfterHoursPercentChange\":false,\\\n\"Series\":[{\"Key\":\"%s\",\"Dialect\":\"Charting\",\"Kind\":\"Ticker\",\"SeriesId\":\"s1\",\"DataTypes\":[\"Last\"]}]}'\n % (freq, token, code),\n \"ckey\": token[:10],\n }\n r = rget_json(\n \"https://api-secure.wsj.net/api/michelangelo/timeseries/history\",\n params=params,\n headers={\n \"user-agent\": \"Mozilla/5.0\",\n \"Accept\": \"application/json, text/javascript, */*; q=0.01\",\n \"Dylan2010.EntitlementToken\": token,\n \"Host\": \"api-secure.wsj.net\",\n \"Origin\": \"https://www.marketwatch.com\",\n \"Sec-Fetch-Dest\": \"empty\",\n \"Sec-Fetch-Mode\": \"cors\",\n \"Sec-Fetch-Site\": \"cross-site\",\n },\n )\n\n df = pd.DataFrame(\n {\n \"date\": r[\"TimeInfo\"][\"Ticks\"],\n \"close\": [n[0] for n in r[\"Series\"][0][\"DataPoints\"]],\n }\n )\n df[\"date\"] = pd.to_datetime(df[\"date\"] * 1000000) + pd.Timedelta(hours=8)\n df = df[df[\"close\"] > -100.0] # 存在未来数据占位符需要排除\n return df\n\n\nclass vinfo(basicinfo, indicator):\n \"\"\"\n vinfo is an info like class wrapper for get_daily, it behaves like info\n \"\"\"\n\n def __init__(\n self,\n code,\n name=None,\n start=None,\n end=None,\n rate=0,\n col=\"close\",\n normalization=True,\n **kws\n ):\n if not name:\n try:\n name = get_rt(code)[\"name\"]\n except:\n name = code\n self.name = name\n self.code = code\n self.start = start # None is one year ago\n self.end = end # None is yesterday\n df = get_daily(code, start=start, end=end)\n df[col] = pd.to_numeric(df[col]) # in case the col is not float\n df[\"totvalue\"] = df[col]\n if normalization:\n df[\"netvalue\"] = df[col] / df.iloc[0][col]\n else:\n df[\"netvalue\"] = df[col]\n self.price = df\n self.round_label = kws.get(\"round_label\", 0)\n self.dividend_label = kws.get(\"dividend_label\", 0)\n self.value_label = kws.get(\"value_label\", 1) # 默认按金额赎回\n self.specialdate = []\n self.fenhongdate = []\n self.zhesuandate = []\n self.rate = rate\n\n\nVInfo = vinfo\n"
] | [
[
"pandas.to_datetime",
"pandas.read_excel",
"pandas.DataFrame",
"pandas.Timedelta",
"pandas.date_range",
"pandas.isna",
"pandas.Timestamp",
"pandas.to_numeric",
"pandas.read_sql",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
cs-giung/giung2 | [
"c8560fd1b56f20eb1f3cf57202975d8325b591f5",
"c8560fd1b56f20eb1f3cf57202975d8325b591f5"
] | [
"giung2/modeling/backbone/resnet.py",
"giung2/modeling/classifier/centroid.py"
] | [
"import torch\nimport torch.nn as nn\nfrom typing import Dict, List\nfrom functools import partial\n\nfrom fvcore.common.config import CfgNode\nfrom giung2.layers import *\n\n\n__all__ = [\n \"build_resnet_backbone\",\n]\n\n\nclass IdentityShortcut(nn.Module):\n\n def __init__(\n self,\n in_planes: int,\n planes: int,\n stride: int,\n expansion: int,\n conv: nn.Module = Conv2d,\n norm: nn.Module = BatchNorm2d,\n relu: nn.Module = ReLU,\n **kwargs\n ) -> None:\n super(IdentityShortcut, self).__init__()\n self.identity = MaxPool2d(kernel_size=1, stride=stride)\n self.pad_size = expansion * planes - in_planes\n\n def forward(self, x: torch.Tensor, **kwargs) -> torch.Tensor:\n out = self.identity(x)\n out = nn.functional.pad(out, (0, 0, 0, 0, 0, self.pad_size), mode=\"constant\", value=0)\n return out\n\n\nclass ProjectionShortcut(nn.Module):\n\n def __init__(\n self,\n in_planes: int,\n planes: int,\n stride: int,\n expansion: int,\n conv: nn.Module = Conv2d,\n norm: nn.Module = BatchNorm2d,\n relu: nn.Module = ReLU,\n **kwargs\n ) -> None:\n super(ProjectionShortcut, self).__init__()\n self.conv = conv(in_channels=in_planes, out_channels=expansion*planes,\n kernel_size=1, stride=stride, padding=0, **kwargs)\n self.norm = norm(num_features=expansion*planes)\n\n def forward(self, x: torch.Tensor, **kwargs) -> torch.Tensor:\n out = self.norm(self.conv(x, **kwargs), **kwargs)\n return out\n\n\nclass FirstBlock(nn.Module):\n\n def __init__(\n self,\n in_planes: int,\n planes: int,\n conv: nn.Module,\n conv_ksp: List[int],\n norm: nn.Module,\n relu: nn.Module,\n pool: nn.Module,\n pool_ksp: List[int],\n **kwargs\n ) -> None:\n super(FirstBlock, self).__init__()\n self.conv1 = conv(in_channels=in_planes, out_channels=planes,\n kernel_size=conv_ksp[0], stride=conv_ksp[1], padding=conv_ksp[2], **kwargs)\n self.norm1 = norm(num_features=planes)\n self.relu1 = relu()\n self.pool1 = pool(kernel_size=pool_ksp[0], stride=pool_ksp[1], padding=pool_ksp[2])\n\n def forward(self, x: torch.Tensor, **kwargs) -> torch.Tensor:\n out = self.pool1(self.relu1(self.norm1(self.conv1(x, **kwargs), **kwargs), **kwargs), **kwargs)\n return out\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(\n self,\n in_planes: int,\n planes: int,\n stride: int,\n shortcut: nn.Module,\n conv: nn.Module = Conv2d,\n norm: nn.Module = BatchNorm2d,\n relu: nn.Module = ReLU,\n **kwargs\n ) -> None:\n super(BasicBlock,self).__init__()\n self.conv1 = conv(in_channels=in_planes, out_channels=planes,\n kernel_size=3, stride=stride, padding=1, **kwargs)\n self.norm1 = norm(num_features=planes)\n self.relu1 = relu()\n self.conv2 = conv(in_channels=planes, out_channels=self.expansion*planes,\n kernel_size=3, stride=1, padding=1, **kwargs)\n self.norm2 = norm(num_features=self.expansion*planes)\n self.relu2 = relu()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = shortcut(\n in_planes, planes, stride, self.expansion, conv, norm, **kwargs\n )\n else:\n self.shortcut = Identity()\n\n def forward(self, x: torch.Tensor, **kwargs) -> torch.Tensor:\n out = self.relu1(self.norm1(self.conv1(x, **kwargs), **kwargs), **kwargs)\n out = self.relu2(self.norm2(self.conv2(out, **kwargs), **kwargs) + self.shortcut(x, **kwargs), **kwargs)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(\n self,\n in_planes: int,\n planes: int,\n stride: int,\n shortcut: nn.Module,\n conv: nn.Module = Conv2d,\n norm: nn.Module = BatchNorm2d,\n relu: nn.Module = ReLU,\n **kwargs\n ) -> None:\n super(Bottleneck,self).__init__()\n self.conv1 = conv(in_channels=in_planes, out_channels=planes,\n kernel_size=1, stride=1, padding=0, **kwargs)\n self.norm1 = norm(num_features=planes)\n self.relu1 = relu()\n self.conv2 = conv(in_channels=planes, out_channels=planes,\n kernel_size=3, stride=stride, padding=1, **kwargs)\n self.norm2 = norm(num_features=planes)\n self.relu2 = relu()\n self.conv3 = conv(in_channels=planes, out_channels=self.expansion*planes,\n kernel_size=1, stride=1, padding=0, **kwargs)\n self.norm3 = norm(num_features=self.expansion*planes)\n self.relu3 = relu()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = shortcut(\n in_planes, planes, stride, self.expansion, conv, norm, **kwargs\n )\n else:\n self.shortcut = Identity()\n\n def forward(self, x: torch.Tensor, **kwargs) -> torch.Tensor:\n out = self.relu1(self.norm1(self.conv1(x, **kwargs), **kwargs), **kwargs)\n out = self.relu2(self.norm2(self.conv2(out, **kwargs), **kwargs), **kwargs)\n out = self.relu3(self.norm3(self.conv3(out, **kwargs), **kwargs) + self.shortcut(x, **kwargs), **kwargs)\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(\n self,\n channels: int,\n in_planes: int,\n first_block: nn.Module,\n block: nn.Module,\n shortcut: nn.Module,\n num_blocks: List[int],\n widen_factor: int,\n conv: nn.Module = Conv2d,\n norm: nn.Module = BatchNorm2d,\n relu: nn.Module = ReLU,\n **kwargs\n ) -> None:\n super(ResNet, self).__init__()\n self.channels = channels\n self.in_planes = in_planes\n self._in_planes = in_planes\n self.first_block = first_block\n self.block = block\n self.shortcut = shortcut\n self.num_blocks = num_blocks\n self.widen_factor = widen_factor\n self.conv = conv\n self.norm = norm\n self.relu = relu\n\n _layers = [self.first_block(in_planes=self.channels, planes=self.in_planes, **kwargs)]\n\n _layers += self._make_layer(\n self.in_planes * self.widen_factor, self.num_blocks[0], stride=1, **kwargs\n )\n for idx, num_block in enumerate(self.num_blocks[1:], start=1):\n _layers += self._make_layer(\n self.in_planes * (2 ** idx) * self.widen_factor, num_block, stride=2, **kwargs\n )\n self.layers = nn.Sequential(*_layers)\n\n def _make_layer(self, planes: int, num_block: int, stride: int, **kwargs) -> List[nn.Module]:\n strides = [stride] + [1] * (num_block - 1)\n _layers = []\n for stride in strides:\n _layers.append(self.block(self._in_planes, planes, stride,\n self.shortcut, self.conv, self.norm, self.relu, **kwargs))\n self._in_planes = planes * self.block.expansion\n return _layers\n\n def forward(self, x: torch.Tensor, **kwargs) -> Dict[str, torch.Tensor]:\n\n outputs = dict()\n\n # intermediate feature maps\n for layer_idx, layer in enumerate(self.layers):\n x = layer(x, **kwargs)\n outputs[f\"layer{layer_idx}\"] = x\n\n # final feature vector\n x = nn.functional.adaptive_avg_pool2d(x, (1, 1))\n x = x.view(x.size(0), -1)\n outputs[\"features\"] = x\n\n return outputs\n\n\ndef build_resnet_backbone(cfg: CfgNode) -> nn.Module:\n\n # Conv2d layers may be replaced by its variations\n _conv_layers = cfg.MODEL.BACKBONE.RESNET.CONV_LAYERS\n kwargs = {\n \"bias\": cfg.MODEL.BACKBONE.RESNET.CONV_LAYERS_BIAS,\n \"same_padding\": cfg.MODEL.BACKBONE.RESNET.CONV_LAYERS_SAME_PADDING,\n }\n if _conv_layers == \"Conv2d\":\n conv_layers = Conv2d\n elif _conv_layers == \"Conv2d_Bezier\":\n conv_layers = Conv2d_Bezier\n elif _conv_layers in [\"Conv2d_BatchEnsemble\", \"Conv2d_BatchEnsembleV2\",]:\n if cfg.MODEL.BATCH_ENSEMBLE.ENABLED is False:\n raise AssertionError(\n f\"Set MODEL.BATCH_ENSEMBLE.ENABLED=True to use {_conv_layers}\"\n )\n if _conv_layers == \"Conv2d_BatchEnsemble\":\n conv_layers = Conv2d_BatchEnsemble\n if _conv_layers == \"Conv2d_BatchEnsembleV2\":\n conv_layers = Conv2d_BatchEnsembleV2\n kwargs.update({\n \"ensemble_size\": cfg.MODEL.BATCH_ENSEMBLE.ENSEMBLE_SIZE,\n \"use_ensemble_bias\": cfg.MODEL.BATCH_ENSEMBLE.USE_ENSEMBLE_BIAS,\n \"alpha_initializer\": {\n \"initializer\": cfg.MODEL.BATCH_ENSEMBLE.ALPHA_INITIALIZER.NAME,\n \"init_values\": cfg.MODEL.BATCH_ENSEMBLE.ALPHA_INITIALIZER.VALUES,\n },\n \"gamma_initializer\": {\n \"initializer\": cfg.MODEL.BATCH_ENSEMBLE.GAMMA_INITIALIZER.NAME,\n \"init_values\": cfg.MODEL.BATCH_ENSEMBLE.GAMMA_INITIALIZER.VALUES,\n },\n })\n elif _conv_layers == \"Conv2d_Dropout\":\n if cfg.MODEL.DROPOUT.ENABLED is False:\n raise AssertionError(\n f\"Set MODEL.DROPOUT.ENABLED=True to use {_conv_layers}\"\n )\n conv_layers = Conv2d_Dropout\n kwargs.update({\n \"drop_p\": cfg.MODEL.DROPOUT.DROP_PROBABILITY,\n })\n elif _conv_layers == \"Conv2d_SpatialDropout\":\n if cfg.MODEL.SPATIAL_DROPOUT.ENABLED is False:\n raise AssertionError(\n f\"Set MODEL.SPATIAL_DROPOUT.ENABLED=True to use {_conv_layers}\"\n )\n conv_layers = Conv2d_SpatialDropout\n kwargs.update({\n \"drop_p\": cfg.MODEL.SPATIAL_DROPOUT.DROP_PROBABILITY,\n })\n elif _conv_layers == \"Conv2d_DropBlock\":\n if cfg.MODEL.DROP_BLOCK.ENABLED is False:\n raise AssertionError(\n f\"Set MODEL.DROP_BLOCK.ENABLED=True to use {_conv_layers}\"\n )\n conv_layers = Conv2d_DropBlock\n kwargs.update({\n \"drop_p\": cfg.MODEL.DROP_BLOCK.DROP_PROBABILITY,\n \"block_size\": cfg.MODEL.DROP_BLOCK.BLOCK_SIZE,\n \"use_shared_masks\": cfg.MODEL.DROP_BLOCK.USE_SHARED_MASKS,\n })\n else:\n raise NotImplementedError(\n f\"Unknown MODEL.BACKBONE.RESNET.CONV_LAYERS: {_conv_layers}\"\n )\n\n # BatchNorm2d layers may be replaced by its variations\n _norm_layers = cfg.MODEL.BACKBONE.RESNET.NORM_LAYERS\n if _norm_layers == \"NONE\":\n norm_layers = Identity\n elif _norm_layers == \"BatchNorm2d\":\n norm_layers = BatchNorm2d\n elif _norm_layers == \"GroupNorm2d\":\n norm_layers = partial(GroupNorm2d, num_groups=cfg.MODEL.BACKBONE.RESNET.IN_PLANES // 2)\n elif _norm_layers == \"FilterResponseNorm2d\":\n norm_layers = FilterResponseNorm2d\n elif _norm_layers == \"FilterResponseNorm2d_Bezier\":\n norm_layers = FilterResponseNorm2d_Bezier\n else:\n raise NotImplementedError(\n f\"Unknown MODEL.BACKBONE.RESNET.NORM_LAYERS: {_norm_layers}\"\n )\n\n # ReLU layers may be replaced by its variations\n _activations = cfg.MODEL.BACKBONE.RESNET.ACTIVATIONS\n if _activations == \"NONE\":\n activations = Identity\n elif _activations == \"ReLU\":\n activations = ReLU\n elif _activations == \"SiLU\":\n activations = SiLU\n else:\n raise NotImplementedError(\n f\"Unknown MODEL.BACKBONE.RESNET.ACTIVATIONS: {_activations}\"\n )\n\n # specify the first block\n first_block = partial(\n FirstBlock,\n conv = conv_layers,\n conv_ksp = cfg.MODEL.BACKBONE.RESNET.FIRST_BLOCK.CONV_KSP,\n norm = norm_layers if cfg.MODEL.BACKBONE.RESNET.FIRST_BLOCK.USE_NORM_LAYER else Identity,\n relu = activations if cfg.MODEL.BACKBONE.RESNET.FIRST_BLOCK.USE_ACTIVATION else Identity,\n pool = MaxPool2d if cfg.MODEL.BACKBONE.RESNET.FIRST_BLOCK.USE_POOL_LAYER else Identity,\n pool_ksp = cfg.MODEL.BACKBONE.RESNET.FIRST_BLOCK.POOL_KSP,\n )\n\n # specify block\n _block = cfg.MODEL.BACKBONE.RESNET.BLOCK\n if _block == \"BasicBlock\":\n block = BasicBlock\n elif _block == \"Bottleneck\":\n block = Bottleneck\n else:\n raise NotImplementedError(\n f\"Unknown MODEL.BACKBONE.RESNET.BLOCK: {_block}\"\n )\n\n # specify shortcut\n _shortcut = cfg.MODEL.BACKBONE.RESNET.SHORTCUT\n if _shortcut == \"IdentityShortcut\":\n shortcut = IdentityShortcut\n elif _shortcut == \"ProjectionShortcut\":\n shortcut = ProjectionShortcut\n else:\n raise NotImplementedError(\n f\"Unknown MODEL.BACKBONE.RESNET.SHORTCUT: {_shortcut}\"\n )\n\n # build backbone\n backbone = ResNet(\n channels = cfg.MODEL.BACKBONE.RESNET.CHANNELS,\n in_planes = cfg.MODEL.BACKBONE.RESNET.IN_PLANES,\n first_block = first_block,\n block = block,\n shortcut = shortcut,\n num_blocks = cfg.MODEL.BACKBONE.RESNET.NUM_BLOCKS,\n widen_factor = cfg.MODEL.BACKBONE.RESNET.WIDEN_FACTOR,\n conv = conv_layers,\n norm = norm_layers,\n relu = activations,\n **kwargs\n )\n\n # initialize weights\n for m in backbone.modules():\n if isinstance(m, Conv2d):\n if isinstance(m.weight, nn.ParameterList):\n for idx in range(len(m.weight)):\n nn.init.kaiming_normal_(m.weight[idx], mode=\"fan_out\", nonlinearity=\"relu\")\n else:\n nn.init.kaiming_normal_(m.weight, mode=\"fan_out\", nonlinearity=\"relu\")\n\n return backbone\n",
"import torch\nimport torch.nn as nn\nfrom typing import Dict\n\nfrom fvcore.common.config import CfgNode\nfrom giung2.layers import *\n\n\n__all__ = [\n \"build_centroid_classifier\",\n]\n\n\nclass CentroidClassifier(nn.Module):\n\n def __init__(\n self,\n feature_dim: int,\n num_classes: int,\n **kwargs,\n ) -> None:\n super(CentroidClassifier, self).__init__()\n self.feature_dim = feature_dim\n self.num_classes = num_classes\n\n self.centers = nn.Parameter(\n torch.randn(self.num_classes, self.feature_dim)\n )\n\n def forward(self, x: torch.Tensor, **kwargs) -> Dict[str, torch.Tensor]:\n\n outputs = dict()\n\n # make predictions\n diff = torch.unsqueeze(x, dim=1) - torch.unsqueeze(self.centers, dim=0)\n dist = torch.sum(torch.mul(diff, diff), dim=-1)\n logits = -0.5 * dist\n\n outputs[\"logits\"] = logits\n outputs[\"confidences\"] = torch.softmax(outputs[\"logits\"], dim=1)\n outputs[\"log_confidences\"] = torch.log_softmax(outputs[\"logits\"], dim=1)\n\n return outputs\n\n\ndef build_centroid_classifier(cfg: CfgNode) -> nn.Module:\n\n kwargs = {}\n\n classifier = CentroidClassifier(\n feature_dim = cfg.MODEL.CLASSIFIER.SOFTMAX_CLASSIFIER.FEATURE_DIM,\n num_classes = cfg.MODEL.CLASSIFIER.SOFTMAX_CLASSIFIER.NUM_CLASSES,\n **kwargs\n )\n\n return classifier\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.init.kaiming_normal_",
"torch.nn.functional.pad",
"torch.nn.functional.adaptive_avg_pool2d"
],
[
"torch.softmax",
"torch.randn",
"torch.unsqueeze",
"torch.log_softmax",
"torch.mul"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nasimanousheh/dipy | [
"9d20c911b4afe83e52ded698eff9ba0f0fafeca8",
"d737a6af80a184322e30de4760e8c205291dbed0"
] | [
"dipy/data/tests/test_fetcher.py",
"dipy/denoise/tests/test_lpca.py"
] | [
"import tempfile\nimport os.path as op\nimport sys\nimport os\nimport numpy.testing as npt\nfrom nibabel.tmpdirs import TemporaryDirectory\nimport dipy.data.fetcher as fetcher\nfrom dipy.data import SPHERE_FILES\nfrom threading import Thread\nif sys.version_info[0] < 3:\n from SimpleHTTPServer import SimpleHTTPRequestHandler # Python 2\n from SocketServer import TCPServer as HTTPServer\nelse:\n from http.server import HTTPServer, SimpleHTTPRequestHandler # Python 3\n\n\ndef test_check_md5():\n fd, fname = tempfile.mkstemp()\n stored_md5 = fetcher._get_file_md5(fname)\n # If all is well, this shouldn't return anything:\n npt.assert_equal(fetcher.check_md5(fname, stored_md5), None)\n # If None is provided as input, it should silently not check either:\n npt.assert_equal(fetcher.check_md5(fname, None), None)\n # Otherwise, it will raise its exception class:\n npt.assert_raises(fetcher.FetcherError, fetcher.check_md5, fname, 'foo')\n\n\ndef test_make_fetcher():\n symmetric362 = SPHERE_FILES['symmetric362']\n with TemporaryDirectory() as tmpdir:\n stored_md5 = fetcher._get_file_md5(symmetric362)\n\n # create local HTTP Server\n testfile_url = op.split(symmetric362)[0] + os.sep\n test_server_url = \"http://127.0.0.1:8000/\"\n print(testfile_url)\n print(symmetric362)\n current_dir = os.getcwd()\n # change pwd to directory containing testfile.\n os.chdir(testfile_url)\n server = HTTPServer(('localhost', 8000), SimpleHTTPRequestHandler)\n server_thread = Thread(target=server.serve_forever)\n server_thread.deamon = True\n server_thread.start()\n\n # test make_fetcher\n sphere_fetcher = fetcher._make_fetcher(\"sphere_fetcher\",\n tmpdir, test_server_url,\n [op.split(symmetric362)[-1]],\n [\"sphere_name\"],\n md5_list=[stored_md5])\n\n sphere_fetcher()\n assert op.isfile(op.join(tmpdir, \"sphere_name\"))\n npt.assert_equal(fetcher._get_file_md5(op.join(tmpdir, \"sphere_name\")),\n stored_md5)\n\n # stop local HTTP Server\n server.shutdown()\n # change to original working directory\n os.chdir(current_dir)\n\n\ndef test_fetch_data():\n symmetric362 = SPHERE_FILES['symmetric362']\n with TemporaryDirectory() as tmpdir:\n md5 = fetcher._get_file_md5(symmetric362)\n bad_md5 = '8' * len(md5)\n\n newfile = op.join(tmpdir, \"testfile.txt\")\n # Test that the fetcher can get a file\n testfile_url = symmetric362\n print(testfile_url)\n testfile_dir, testfile_name = op.split(testfile_url)\n # create local HTTP Server\n test_server_url = \"http://127.0.0.1:8001/\" + testfile_name\n current_dir = os.getcwd()\n # change pwd to directory containing testfile.\n os.chdir(testfile_dir + os.sep)\n # use different port as shutdown() takes time to release socket.\n server = HTTPServer(('localhost', 8001), SimpleHTTPRequestHandler)\n server_thread = Thread(target=server.serve_forever)\n server_thread.deamon = True\n server_thread.start()\n\n files = {\"testfile.txt\": (test_server_url, md5)}\n fetcher.fetch_data(files, tmpdir)\n npt.assert_(op.exists(newfile))\n\n # Test that the file is replaced when the md5 doesn't match\n with open(newfile, 'a') as f:\n f.write(\"some junk\")\n fetcher.fetch_data(files, tmpdir)\n npt.assert_(op.exists(newfile))\n npt.assert_equal(fetcher._get_file_md5(newfile), md5)\n\n # Test that an error is raised when the md5 checksum of the download\n # file does not match the expected value\n files = {\"testfile.txt\": (test_server_url, bad_md5)}\n npt.assert_raises(fetcher.FetcherError,\n fetcher.fetch_data, files, tmpdir)\n\n # stop local HTTP Server\n server.shutdown()\n # change to original working directory\n os.chdir(current_dir)\n\n def test_dipy_home():\n test_path = 'TEST_PATH'\n if 'DIPY_HOME' in os.environ:\n old_home = os.environ['DIPY_HOME']\n del os.environ['DIPY_HOME']\n else:\n old_home = None\n\n reload(fetcher)\n\n npt.assert_string_equal(fetcher.dipy_home,\n op.join(os.path.expanduser('~'), '.dipy'))\n os.environ['DIPY_HOME'] = test_path\n reload(fetcher)\n npt.assert_string_equal(fetcher.dipy_home, test_path)\n\n # return to previous state\n if old_home:\n os.environ['DIPY_HOME'] = old_home\n",
"import numpy as np\nimport scipy as sp\nimport scipy.special as sps\nfrom numpy.testing import (run_module_suite,\n assert_,\n assert_equal,\n assert_raises,\n assert_array_almost_equal)\nfrom dipy.denoise.localpca import localpca\nfrom dipy.sims.voxel import multi_tensor\nfrom dipy.core.gradients import gradient_table, generate_bvecs\nfrom dipy.core.sphere import disperse_charges, HemiSphere\nfrom dipy.sims.voxel import multi_tensor\n\n\ndef rfiw_phantom(gtab, snr=None):\n \"\"\"rectangle fiber immersed in water\"\"\"\n # define voxel index\n slice_ind = np.zeros((10, 10, 8))\n slice_ind[4:7, 4:7, :] = 1\n slice_ind[4:7, 7, :] = 2\n slice_ind[7, 7, :] = 3\n slice_ind[7, 4:7, :] = 4\n slice_ind[7, 3, :] = 5\n slice_ind[4:7, 3, :] = 6\n slice_ind[3, 3, :] = 7\n slice_ind[3, 4:7, :] = 8\n slice_ind[3, 7, :] = 9\n\n # Define tisse diffusion parameters\n # Restricted diffusion\n ADr = 0.99e-3\n RDr = 0.0\n # Hindered diffusion\n ADh = 2.26e-3\n RDh = 0.87\n # S0 value for tissue\n S1 = 50\n # Fraction between Restricted and Hindered diffusion\n fia = 0.51\n\n # Define water diffusion\n Dwater = 3e-3\n S2 = 100 # S0 value for water\n\n # Define tissue volume fraction for each voxel type (in index order)\n f = np.array([0., 1., 0.6, 0.18, 0.30, 0.15, 0.50, 0.35, 0.70, 0.42])\n\n # Define S0 for each voxel (in index order)\n S0 = S1 * f + S2 * (1 - f)\n\n # multi tensor simulations assume that each water pull as constant S0\n # since I am assuming that tissue and water voxels have different S0,\n # tissue volume fractions have to be adjusted to the measured f values when\n # constant S0 are assumed constant. Doing this correction, simulations will\n # be analogous to simulates that S0 are different for each media. (For more\n # datails on this contact the phantom designer)\n f1 = f * S1 / S0\n\n mevals = np.array([[ADr, RDr, RDr], [ADh, RDh, RDh],\n [Dwater, Dwater, Dwater]])\n angles = [(0, 0, 1), (0, 0, 1), (0, 0, 1)]\n DWI = np.zeros(slice_ind.shape + (gtab.bvals.size, ))\n for i in range(10):\n fractions = [f1[i] * fia * 100, f1[i] *\n (1 - fia) * 100, (1 - f1[i]) * 100]\n sig, direction = multi_tensor(gtab, mevals, S0=S0[i], angles=angles,\n fractions=fractions, snr=None)\n DWI[slice_ind == i, :] = sig\n\n if snr is None:\n return DWI\n else:\n sigma = S2 * 1.0 / snr\n n1 = np.random.normal(0, sigma, size=DWI.shape)\n n2 = np.random.normal(0, sigma, size=DWI.shape)\n return [np.sqrt((DWI / np.sqrt(2) + n1)**2 +\n (DWI / np.sqrt(2) + n2)**2), sigma]\n\n\ndef gen_gtab():\n # generate a gradient table for phantom data\n directions8 = generate_bvecs(8)\n directions30 = generate_bvecs(30)\n directions60 = generate_bvecs(60)\n # Create full dataset parameters\n # (6 b-values = 0, 8 directions for b-value 300, 30 directions for b-value\n # 1000 and 60 directions for b-value 2000)\n bvals = np.hstack((np.zeros(6),\n 300 * np.ones(8),\n 1000 * np.ones(30),\n 2000 * np.ones(60)))\n bvecs = np.vstack((np.zeros((6, 3)),\n directions8, directions30, directions60))\n gtab = gradient_table(bvals, bvecs)\n return gtab\n\n\ndef test_lpca_static():\n S0 = 100 * np.ones((20, 20, 20, 20), dtype='f8')\n S0ns = localpca(S0, sigma=np.ones((20, 20, 20), dtype=np.float64))\n assert_array_almost_equal(S0, S0ns)\n\n\ndef test_lpca_random_noise():\n S0 = 100 + 2 * np.random.standard_normal((22, 23, 30, 20))\n S0ns = localpca(S0, sigma=np.std(S0))\n\n assert_(S0ns.min() > S0.min())\n assert_(S0ns.max() < S0.max())\n assert_equal(np.round(S0ns.mean()), 100)\n\n\ndef test_lpca_boundary_behaviour():\n # check is first slice is getting denoised or not ?\n S0 = 100 * np.ones((20, 20, 20, 20), dtype='f8')\n S0[:, :, 0, :] = S0[:, :, 0, :] + 2 * \\\n np.random.standard_normal((20, 20, 20))\n S0_first = S0[:, :, 0, :]\n S0ns = localpca(S0, sigma=np.std(S0))\n S0ns_first = S0ns[:, :, 0, :]\n rmses = np.sum(np.abs(S0ns_first - S0_first)) / \\\n (100.0 * 20.0 * 20.0 * 20.0)\n\n # shows that S0n_first is not very close to S0_first\n assert_(rmses > 0.0001)\n assert_equal(np.round(S0ns_first.mean()), 100)\n\n # Use a volume of sigma, instead of a scalar:\n sigma_vol = np.ones(S0.shape[:-1]) * np.std(S0)\n S0ns = localpca(S0, sigma=sigma_vol)\n\n rmses = np.sum(np.abs(S0ns_first - S0_first)) / \\\n (100.0 * 20.0 * 20.0 * 20.0)\n\n # shows that S0n_first is not very close to S0_first\n assert_(rmses > 0.0001)\n assert_equal(np.round(S0ns_first.mean()), 100)\n\n\ndef test_lpca_rmse():\n S0_w_noise = 100 + 2 * np.random.standard_normal((22, 23, 30, 20))\n rmse_w_noise = np.sqrt(np.mean((S0_w_noise - 100) ** 2))\n S0_denoised = localpca(S0_w_noise, sigma=np.std(S0_w_noise))\n rmse_denoised = np.sqrt(np.mean((S0_denoised - 100) ** 2))\n # Denoising should always improve the RMSE:\n assert_(rmse_denoised < rmse_w_noise)\n\n\ndef test_lpca_sharpness():\n S0 = np.ones((30, 30, 30, 20), dtype=np.float64) * 100\n S0[10:20, 10:20, 10:20, :] = 50\n S0[20:30, 20:30, 20:30, :] = 0\n S0 = S0 + 20 * np.random.standard_normal((30, 30, 30, 20))\n S0ns = localpca(S0, sigma=20.0)\n # check the edge gradient\n edgs = np.abs(np.mean(S0ns[8, 10:20, 10:20] - S0ns[12, 10:20, 10:20]) - 50)\n assert_(edgs < 2)\n\n\ndef test_lpca_dtype():\n # If out_dtype is not specified, we retain the original precision:\n S0 = 200 * np.ones((20, 20, 20, 3), dtype=np.float64)\n S0ns = localpca(S0, sigma=1)\n assert_equal(S0.dtype, S0ns.dtype)\n\n S0 = 200 * np.ones((20, 20, 20, 20), dtype=np.uint16)\n S0ns = localpca(S0, sigma=np.ones((20, 20, 20)))\n assert_equal(S0.dtype, S0ns.dtype)\n\n # If we set out_dtype, we get what we asked for:\n S0 = 200 * np.ones((20, 20, 20, 20), dtype=np.uint16)\n S0ns = localpca(S0, sigma=np.ones((20, 20, 20)),\n out_dtype=np.float32)\n assert_equal(np.float32, S0ns.dtype)\n\n # If we set a few entries to zero, this induces negative entries in the\n # Resulting denoised array:\n S0[5:8, 5:8, 5:8] = 0\n # But if we should always get all non-negative results:\n S0ns = localpca(S0, sigma=np.ones((20, 20, 20)), out_dtype=np.uint16)\n assert_(np.all(S0ns >= 0))\n # And no wrap-around to crazy high values:\n assert_(np.all(S0ns <= 200))\n\n\ndef test_lpca_wrong():\n S0 = np.ones((20, 20))\n assert_raises(ValueError, localpca, S0, sigma=1)\n\n\ndef test_phantom():\n gtab = gen_gtab()\n DWI_clean = rfiw_phantom(gtab, snr=None)\n DWI, sigma = rfiw_phantom(gtab, snr=30)\n # To test without rician correction\n temp = (DWI_clean / sigma)**2\n DWI_clean_wrc = (sigma * np.sqrt(np.pi / 2) * np.exp(-0.5 * temp) *\n ((1 + 0.5 * temp) * sps.iv(0, 0.25 * temp) + 0.5 * temp *\n sps.iv(1, 0.25 * temp))**2)\n\n DWI_den = localpca(DWI, sigma, patch_radius=3)\n rmse_den = np.sum(np.abs(DWI_clean - DWI_den)) / np.sum(np.abs(DWI_clean))\n rmse_noisy = np.sum(np.abs(DWI_clean - DWI)) / np.sum(np.abs(DWI_clean))\n\n rmse_den_wrc = np.sum(np.abs(DWI_clean_wrc - DWI_den)\n ) / np.sum(np.abs(DWI_clean_wrc))\n rmse_noisy_wrc = np.sum(np.abs(DWI_clean_wrc - DWI)) / \\\n np.sum(np.abs(DWI_clean_wrc))\n\n assert_(np.max(DWI_clean) / sigma < np.max(DWI_den) / sigma)\n assert_(np.max(DWI_den) / sigma < np.max(DWI) / sigma)\n assert_(rmse_den < rmse_noisy)\n assert_(rmse_den_wrc < rmse_noisy_wrc)\n\n # Check if the results of different PCA methods (eig, svd) are similar\n DWI_den_svd = localpca(DWI, sigma, pca_method='svd', patch_radius=3)\n assert_array_almost_equal(DWI_den, DWI_den_svd)\n\n assert_raises(ValueError, localpca, DWI, sigma, pca_method='empty')\n\n # Try this with a sigma volume, instead of a scalar\n sigma_vol = sigma * np.ones(DWI.shape[:-1])\n mask = np.zeros_like(DWI, dtype=bool)[..., 0]\n mask[2:-2, 2:-2, 2:-2] = True\n DWI_den = localpca(DWI, sigma_vol, mask, patch_radius=3)\n DWI_clean_masked = DWI_clean.copy()\n DWI_clean_masked[~mask] = 0\n DWI_masked = DWI.copy()\n DWI_masked[~mask] = 0\n rmse_den = np.sum(np.abs(DWI_clean_masked - DWI_den)) / np.sum(np.abs(\n DWI_clean_masked))\n rmse_noisy = np.sum(np.abs(DWI_clean_masked - DWI_masked)) / np.sum(np.abs(\n DWI_clean_masked))\n\n DWI_clean_wrc_masked = DWI_clean_wrc.copy()\n DWI_clean_wrc_masked[~mask] = 0\n rmse_den_wrc = np.sum(np.abs(DWI_clean_wrc_masked - DWI_den)\n ) / np.sum(np.abs(DWI_clean_wrc_masked))\n rmse_noisy_wrc = np.sum(np.abs(DWI_clean_wrc_masked - DWI_masked)) / \\\n np.sum(np.abs(DWI_clean_wrc_masked))\n\n assert_(np.max(DWI_clean) / sigma < np.max(DWI_den) / sigma)\n assert_(np.max(DWI_den) / sigma < np.max(DWI) / sigma)\n assert_(rmse_den < rmse_noisy)\n assert_(rmse_den_wrc < rmse_noisy_wrc)\n\n\ndef test_lpca_ill_conditioned():\n gtab = gen_gtab()\n DWI, sigma = rfiw_phantom(gtab, snr=30)\n assert_raises(ValueError, localpca, DWI, sigma, patch_radius=1)\n\n\ndef test_lpca_sigma_wrong_shape():\n gtab = gen_gtab()\n DWI, sigma = rfiw_phantom(gtab, snr=30)\n # If sigma is 3D but shape is not like DWI.shape[:-1], an error is raised:\n sigma = np.zeros((DWI.shape[0], DWI.shape[1] + 1, DWI.shape[2]))\n assert_raises(ValueError, localpca, DWI, sigma)\n\n\nif __name__ == '__main__':\n run_module_suite()\n"
] | [
[
"numpy.testing.assert_raises",
"numpy.testing.assert_string_equal"
],
[
"numpy.sqrt",
"numpy.all",
"numpy.max",
"numpy.mean",
"numpy.zeros_like",
"numpy.exp",
"numpy.testing.assert_equal",
"numpy.std",
"numpy.zeros",
"scipy.special.iv",
"numpy.testing.assert_array_almost_equal",
"numpy.testing.assert_raises",
"numpy.testing.assert_",
"numpy.array",
"numpy.testing.run_module_suite",
"numpy.abs",
"numpy.random.standard_normal",
"numpy.ones",
"numpy.random.normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BendeguzToth/NeuralLanguageModel | [
"f4bb60375019acd57c7396768d62ad0f3166391c"
] | [
"Project/_visualize.py"
] | [
"\"\"\"\nIn this file we visualize the activations of\nparticular neurons, at different positions\nof a provided sample text.\n\"\"\"\n\n# Standard libraries\nimport json\nimport tkinter as tk\n\n# Third-party libraries\nimport numpy as np\n\n# Project files\nfrom layers import LSTM\n\n# SETUP\nMODEL = \"saves/ShakespeareNet.json\"\nLOOKUP_FILE = \"saves/ShakespeareLookup.json\"\nTEXT_FILE = \"saves/sample.txt\"\n\n\ndef main():\n with open(LOOKUP_FILE, 'r') as file:\n chars = json.load(file)\n\n # Here we make dictionaries that can be used to convert\n # between characters, integer id-s of characters, and one-hot\n # vectors that will be used to represent the characters.\n char_to_int = dict()\n int_to_char = dict()\n char_to_vec = dict()\n\n for i in range(len(chars)):\n char_to_int[chars[i]] = i\n int_to_char[i] = chars[i]\n vec = np.zeros((len(chars), 1))\n vec[i] = 1.\n char_to_vec[chars[i]] = vec\n\n # The length of the vector that represents a character\n # is equivalent to the number of different characters\n # in the text.\n EMBEDDING_LENGTH = len(chars)\n # Create the LSTM layers only. We don't use the Network class,\n # since we are only interested in the activations of the recurrent\n # layers.\n first_layer = LSTM(size=512, input_size=EMBEDDING_LENGTH, batch_size=1, backprop_depth=1, stateful=True)\n second_layer = LSTM(size=512, input_size=512, batch_size=1, backprop_depth=1, stateful=True)\n\n # Load the weights.\n with open(MODEL, 'r') as file:\n weights = json.load(file)\n first_layer.loadParams(weights[0])\n second_layer.loadParams(weights[1])\n\n # Loading in the file.\n with open(TEXT_FILE, 'r', encoding='utf8') as file:\n text = file.read()\n source = list(text)\n\n for i in range(len(source)):\n source[i] = char_to_vec[source[i]]\n\n # Feed the text to the network.\n # Here we look at the activation of the neurons of the\n # hidden state at the 2nd LSTM layer.\n # We take the first element of the output as there is only one\n # batch.\n out = second_layer.forward(first_layer.forward(np.array([source])))[0]\n\n # ###############---TKINTER---#############################################\n class Wrap:\n NEURON_INDEX = 0\n\n def showNeuron():\n for j in range(out.shape[0]):\n # We will leave the background of the newline characters white,\n # regardless of its activation. The reason for that is that the color\n # would fill the entire remainder of the line, which is very disturbing to look at.\n intensity = 255 if text[j] == '\\n' else 255 - int((out[j, Wrap.NEURON_INDEX, 0] + 1) * 127.5)\n text_box.tag_config(str(j), background=\"#%02x%02x%02x\" % (\n 255, intensity, intensity))\n\n def inputFromEntry(evt):\n Wrap.NEURON_INDEX = int(entry.get())\n entry.delete(0, \"end\")\n showNeuron()\n\n def nextButtonClicked():\n Wrap.NEURON_INDEX += 1\n entry.delete(0, \"end\")\n entry.insert(tk.INSERT, str(Wrap.NEURON_INDEX))\n showNeuron()\n\n # Making the tkinter window.\n root = tk.Tk()\n text_box = tk.Text(root, height=35)\n text_box.insert(tk.INSERT, text)\n text_box.pack()\n current_line = 1\n current_char = 0\n for i in range(out.shape[0]):\n text_box.tag_add(str(i), f\"{current_line}.{current_char}\")\n current_char += 1\n if text[i] == '\\n':\n current_line += 1\n current_char = 0\n\n # Making the entry box.\n entry = tk.Entry(root, width=5)\n entry.pack()\n entry.bind(\"<Return>\", inputFromEntry)\n\n # Buttons\n up = tk.Button(text=\"Next\", command=nextButtonClicked)\n up.pack()\n\n # Show the first neuron by default.\n showNeuron()\n\n root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vjaguilera/BERT4Rec | [
"8c460676af224c90c9cc89f1ba837b38f04e4210"
] | [
"gen_data_fin.py"
] | [
"# -*- coding: UTF-8 -*-\nimport os\nimport codecs\n\nimport collections\nimport random\n\nimport sys\n\nimport tensorflow as tf\n\nimport six\n\nfrom util import *\nfrom vocab import *\nimport pickle\nimport multiprocessing\nimport time\n\n\nrandom_seed = 12345\nshort_seq_prob = 0 # Probability of creating sequences which are shorter than the maximum length。\n\nflags = tf.flags\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\"signature\", 'default', \"signature_name\")\n\nflags.DEFINE_integer(\n \"pool_size\", 10,\n \"multiprocesses pool size.\")\n\nflags.DEFINE_integer(\n \"max_seq_length\", 200,\n \"max sequence length.\")\n\nflags.DEFINE_integer(\n \"max_predictions_per_seq\", 20,\n \"max_predictions_per_seq.\")\n\nflags.DEFINE_float(\n \"masked_lm_prob\", 0.15,\n \"Masked LM probability.\")\n\nflags.DEFINE_float(\n \"mask_prob\", 1.0,\n \"mask probabaility\")\n\nflags.DEFINE_integer(\n \"dupe_factor\", 10,\n \"Number of times to duplicate the input data (with different masks).\")\n\nflags.DEFINE_float(\"prop_sliding_window\", 0.1, \"sliding window step size.\")\n \nflags.DEFINE_string(\n \"data_dir\", './data/',\n \"data dir.\")\n\nflags.DEFINE_string(\n \"dataset_name\", 'ml-1m',\n \"dataset name.\")\n\n\ndef printable_text(text):\n \"\"\"Returns text encoded in a way suitable for print or `tf.logging`.\"\"\"\n\n # These functions want `str` for both Python2 and Python3, but in one case\n # it's a Unicode string and in the other it's a byte string.\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text\n elif isinstance(text, unicode):\n return text.encode(\"utf-8\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")\n\n\ndef convert_to_unicode(text):\n \"\"\"Converts `text` to Unicode (if it's not already), assuming utf-8 input.\"\"\"\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text.decode(\"utf-8\", \"ignore\")\n elif isinstance(text, unicode):\n return text\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")\n\n\nclass TrainingInstance(object):\n \"\"\"A single training instance (sentence pair).\"\"\"\n\n def __init__(self, info, tokens, masked_lm_positions, masked_lm_labels):\n self.info = info # info = [user]\n self.tokens = tokens\n self.masked_lm_positions = masked_lm_positions\n self.masked_lm_labels = masked_lm_labels\n\n def __str__(self):\n s = \"\"\n s += \"info: %s\\n\" % (\" \".join([printable_text(x) for x in self.info]))\n s += \"tokens: %s\\n\" % (\n \" \".join([printable_text(x) for x in self.tokens]))\n s += \"masked_lm_positions: %s\\n\" % (\n \" \".join([str(x) for x in self.masked_lm_positions]))\n s += \"masked_lm_labels: %s\\n\" % (\n \" \".join([printable_text(x) for x in self.masked_lm_labels]))\n s += \"\\n\"\n return s\n\n def __repr__(self):\n return self.__str__()\n\n\ndef write_instance_to_example_files(instances, max_seq_length,\n max_predictions_per_seq, vocab,\n output_files):\n \"\"\"Create TF example files from `TrainingInstance`s.\"\"\"\n writers = []\n for output_file in output_files:\n writers.append(tf.python_io.TFRecordWriter(output_file))\n\n writer_index = 0\n\n total_written = 0\n for (inst_index, instance) in enumerate(instances):\n try:\n input_ids = vocab.convert_tokens_to_ids(instance.tokens)\n except:\n print(instance)\n\n input_mask = [1] * len(input_ids)\n assert len(input_ids) <= max_seq_length\n\n input_ids += [0] * (max_seq_length - len(input_ids))\n input_mask += [0] * (max_seq_length - len(input_mask))\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n\n masked_lm_positions = list(instance.masked_lm_positions)\n masked_lm_ids = vocab.convert_tokens_to_ids(instance.masked_lm_labels)\n masked_lm_weights = [1.0] * len(masked_lm_ids)\n\n masked_lm_positions += [0] * (max_predictions_per_seq - len(masked_lm_positions))\n masked_lm_ids += [0] * (max_predictions_per_seq - len(masked_lm_ids))\n masked_lm_weights += [0.0] * (max_predictions_per_seq - len(masked_lm_weights))\n\n features = collections.OrderedDict()\n features[\"info\"] = create_int_feature(instance.info)\n features[\"input_ids\"] = create_int_feature(input_ids)\n features[\"input_mask\"] = create_int_feature(input_mask)\n features[\"masked_lm_positions\"] = create_int_feature(\n masked_lm_positions)\n features[\"masked_lm_ids\"] = create_int_feature(masked_lm_ids)\n features[\"masked_lm_weights\"] = create_float_feature(masked_lm_weights)\n\n tf_example = tf.train.Example(\n features=tf.train.Features(feature=features))\n\n writers[writer_index].write(tf_example.SerializeToString())\n writer_index = (writer_index + 1) % len(writers)\n\n total_written += 1\n\n if inst_index < 20:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [printable_text(x) for x in instance.tokens]))\n\n for feature_name in features.keys():\n feature = features[feature_name]\n values = []\n if feature.int64_list.value:\n values = feature.int64_list.value\n elif feature.float_list.value:\n values = feature.float_list.value\n tf.logging.info(\"%s: %s\" % (feature_name,\n \" \".join([str(x)\n for x in values])))\n\n for writer in writers:\n writer.close()\n\n tf.logging.info(\"Wrote %d total instances\", total_written)\n\n\ndef create_int_feature(values):\n feature = tf.train.Feature(\n int64_list=tf.train.Int64List(value=list(values)))\n return feature\n\n\ndef create_float_feature(values):\n feature = tf.train.Feature(\n float_list=tf.train.FloatList(value=list(values)))\n return feature\n\n\ndef create_training_instances(all_documents_raw,\n max_seq_length,\n dupe_factor,\n short_seq_prob,\n masked_lm_prob,\n max_predictions_per_seq,\n rng,\n vocab,\n mask_prob,\n prop_sliding_window,\n pool_size,\n force_last=False):\n \"\"\"Create `TrainingInstance`s from raw text.\n PARAMS:\n - all_documents_raw (dict): Dict containing users as \n keys and item-list as value\n \"\"\"\n all_documents = {}\n\n # TEST\n if force_last:\n max_num_tokens = max_seq_length\n for user, item_seq in all_documents_raw.items():\n if len(item_seq) == 0:\n print(\"got empty seq:\" + user)\n continue\n all_documents[user] = [item_seq[-max_num_tokens:]]\n # Assign list of list from the last to the max_num_tokens\n\n # TRAIN\n else:\n max_num_tokens = max_seq_length # we need two sentence\n\n sliding_step = (int)(\n prop_sliding_window *\n max_num_tokens) if prop_sliding_window != -1.0 else max_num_tokens\n for user, item_seq in all_documents_raw.items():\n if len(item_seq) == 0:\n print(\"got empty seq:\" + user)\n continue\n\n #todo: add slide\n if len(item_seq) <= max_num_tokens:\n # All to token\n all_documents[user] = [item_seq]\n else:\n beg_idx = list(range(len(item_seq)-max_num_tokens, 0, -sliding_step))\n beg_idx.append(0)\n # Reverse ordered list with 0 appended\n all_documents[user] = [item_seq[i:i + max_num_tokens] for i in beg_idx[::-1]]\n\n instances = []\n\n # TEST\n if force_last:\n for user in all_documents:\n instances.extend(\n create_instances_from_document_test(\n all_documents, user, max_seq_length))\n print(\"num of instance:{}\".format(len(instances)))\n\n # TRAIN\n else:\n start_time = time.clock()\n pool = multiprocessing.Pool(processes=pool_size)\n instances = []\n print(\"Document quantity: {}\".format(len(all_documents)))\n\n def log_result(result):\n print(\"callback function result type: {}, size: {} \".format(type(result), len(result)))\n # RESULT CAN BE error_callback or the result of create_instances_threading\n instances.extend(result)\n # Add Training Instances to instances list if result is correct\n\n for step in range(dupe_factor):\n # Run a process async as a thread\n pool.apply_async(\n create_instances_threading, args=(\n all_documents, user, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab, random.Random(random.randint(1,10000)),\n mask_prob, step, dupe_factor), callback=log_result)\n pool.close()\n pool.join()\n \n # Always masking the last item\n for user in all_documents:\n instances.extend(\n mask_last(\n all_documents, user, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab, rng))\n\n print(\"num of instance:{}; time:{}\".format(len(instances), time.clock() - start_time))\n rng.shuffle(instances)\n return instances\n\n\ndef create_instances_threading(all_documents, user, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab, rng,\n mask_prob, step, dupe_factor):\n cnt = 0\n start_time = time.clock()\n instances = []\n for user in all_documents:\n cnt += 1\n if cnt % 1000 == 0:\n print(\"step: {}/{}, name: {}, user: {}, time: {}\".format(step, dupe_factor, multiprocessing.current_process().name, cnt, time.clock()-start_time))\n start_time = time.clock()\n\n instances.extend(create_instances_from_document_train(\n all_documents, user, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab, rng,\n mask_prob))\n \n return instances\n\n\ndef mask_last(\n all_documents, user, max_seq_length, short_seq_prob, masked_lm_prob,\n max_predictions_per_seq, vocab, rng):\n \"\"\"Creates `TrainingInstance`s for a single document.\"\"\"\n document = all_documents[user]\n max_num_tokens = max_seq_length\n \n instances = []\n info = [int(user.split(\"_\")[1])]\n vocab_items = vocab.get_items()\n\n for tokens in document:\n assert len(tokens) >= 1 and len(tokens) <= max_num_tokens\n \n (tokens, masked_lm_positions,\n masked_lm_labels) = create_masked_lm_predictions_force_last(tokens)\n instance = TrainingInstance(\n info=info,\n tokens=tokens,\n masked_lm_positions=masked_lm_positions,\n masked_lm_labels=masked_lm_labels)\n instances.append(instance)\n\n return instances\n\n\ndef create_instances_from_document_test(all_documents, user, max_seq_length):\n \"\"\"Creates `TrainingInstance`s for a single document.\"\"\"\n document = all_documents[user]\n max_num_tokens = max_seq_length\n \n assert len(document) == 1 and len(document[0]) <= max_num_tokens\n \n tokens = document[0]\n assert len(tokens) >= 1\n\n (tokens, masked_lm_positions,\n masked_lm_labels) = create_masked_lm_predictions_force_last(tokens)\n\n info = [int(user.split(\"_\")[1])]\n instance = TrainingInstance(\n info=info,\n tokens=tokens,\n masked_lm_positions=masked_lm_positions,\n masked_lm_labels=masked_lm_labels)\n\n return [instance]\n\n\ndef create_instances_from_document_train(\n all_documents, user, max_seq_length, short_seq_prob, masked_lm_prob,\n max_predictions_per_seq, vocab, rng, mask_prob):\n \"\"\"Creates `TrainingInstance`s for a single document.\"\"\"\n document = all_documents[user]\n\n max_num_tokens = max_seq_length\n\n instances = []\n info = [int(user.split(\"_\")[1])]\n vocab_items = vocab.get_items()\n\n for tokens in document:\n assert len(tokens) >= 1 and len(tokens) <= max_num_tokens\n \n # Return the tokens, the masked positions and the masked labels\n (tokens, masked_lm_positions,\n masked_lm_labels) = create_masked_lm_predictions(\n tokens, masked_lm_prob, max_predictions_per_seq,\n vocab_items, rng, mask_prob)\n \n # Instantiate a TrainingInstance\n instance = TrainingInstance(\n info=info,\n tokens=tokens,\n masked_lm_positions=masked_lm_positions,\n masked_lm_labels=masked_lm_labels)\n instances.append(instance)\n\n return instances\n\n\nMaskedLmInstance = collections.namedtuple(\"MaskedLmInstance\",\n [\"index\", \"label\"])\n\n\ndef create_masked_lm_predictions_force_last(tokens):\n \"\"\"Creates the predictions for the masked LM objective, BUT JUST MASKING THE LAST ITEM\"\"\"\n\n last_index = -1\n for (i, token) in enumerate(tokens):\n if token == \"[CLS]\" or token == \"[PAD]\" or token == '[NO_USE]':\n continue\n last_index = i\n\n assert last_index > 0\n\n output_tokens = list(tokens)\n output_tokens[last_index] = \"[MASK]\"\n\n masked_lm_positions = [last_index]\n masked_lm_labels = [tokens[last_index]]\n\n return (output_tokens, masked_lm_positions, masked_lm_labels)\n\n\ndef create_masked_lm_predictions(tokens, masked_lm_prob,\n max_predictions_per_seq, vocab_words, rng,\n mask_prob):\n \"\"\"Creates the predictions for the masked LM objective.\"\"\"\n\n cand_indexes = []\n for (i, token) in enumerate(tokens):\n if token not in vocab_words:\n continue\n cand_indexes.append(i)\n\n rng.shuffle(cand_indexes)\n\n output_tokens = list(tokens)\n\n num_to_predict = min(max_predictions_per_seq,\n max(1, int(round(len(tokens) * masked_lm_prob))))\n\n masked_lms = []\n covered_indexes = set()\n for index in cand_indexes:\n if len(masked_lms) >= num_to_predict:\n break\n if index in covered_indexes:\n continue\n covered_indexes.add(index)\n\n masked_token = None\n # 80% of the time, replace with [MASK]\n if rng.random() < mask_prob:\n masked_token = \"[MASK]\"\n else:\n # 10% of the time, keep original\n if rng.random() < 0.5:\n masked_token = tokens[index]\n # 10% of the time, replace with random word\n else:\n # masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]\n masked_token = rng.choice(vocab_words) \n\n output_tokens[index] = masked_token\n\n masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))\n\n masked_lms = sorted(masked_lms, key=lambda x: x.index)\n\n masked_lm_positions = []\n masked_lm_labels = []\n for p in masked_lms:\n masked_lm_positions.append(p.index)\n masked_lm_labels.append(p.label)\n\n return (output_tokens, masked_lm_positions, masked_lm_labels)\n\n\ndef gen_samples(data,\n output_filename,\n rng,\n vocab,\n max_seq_length,\n dupe_factor,\n short_seq_prob,\n mask_prob,\n masked_lm_prob,\n max_predictions_per_seq,\n prop_sliding_window,\n pool_size,\n force_last=False):\n # create train instances\n instances = create_training_instances(\n data, max_seq_length, dupe_factor, short_seq_prob, masked_lm_prob,\n max_predictions_per_seq, rng, vocab, mask_prob, prop_sliding_window,\n pool_size, force_last)\n\n tf.logging.info(\"*** Writing to output files ***\")\n tf.logging.info(\" %s\", output_filename)\n\n # Write training instances\n write_instance_to_example_files(instances, max_seq_length,\n max_predictions_per_seq, vocab,\n [output_filename])\n\n\ndef main():\n tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG)\n \n max_seq_length = FLAGS.max_seq_length\n max_predictions_per_seq = FLAGS.max_predictions_per_seq\n masked_lm_prob = FLAGS.masked_lm_prob\n mask_prob = FLAGS.mask_prob\n dupe_factor = FLAGS.dupe_factor\n prop_sliding_window = FLAGS.prop_sliding_window\n pool_size = FLAGS.pool_size\n\n output_dir = FLAGS.data_dir\n dataset_name = FLAGS.dataset_name\n version_id = FLAGS.signature\n print(version_id)\n print(output_dir)\n print(dataset_name)\n\n if not os.path.isdir(output_dir):\n print(output_dir + ' is not exist')\n print(os.getcwd())\n exit(1)\n\n dataset = data_partition(output_dir+dataset_name+'.txt')\n [user_train, user_valid, user_test, usernum, itemnum] = dataset\n cc = 0.0\n max_len = 0\n min_len = 100000\n for u in user_train:\n cc += len(user_train[u])\n max_len = max(len(user_train[u]), max_len)\n min_len = min(len(user_train[u]), min_len)\n\n print('average sequence length: %.2f' % (cc / len(user_train)))\n print('max:{}, min:{}'.format(max_len, min_len))\n\n print('len_train:{}, len_valid:{}, len_test:{}, usernum:{}, itemnum:{}'.\n format(\n len(user_train),\n len(user_valid), len(user_test), usernum, itemnum))\n\n for idx, u in enumerate(user_train):\n if idx < 10:\n print(user_train[u])\n print(user_valid[u])\n print(user_test[u])\n\n # put validate into train\n for u in user_train:\n if u in user_valid:\n user_train[u].extend(user_valid[u])\n\n # get the max index of the data\n user_train_data = {\n 'user_' + str(k): ['item_' + str(item) for item in v]\n for k, v in user_train.items() if len(v) > 0\n }\n user_test_data = {\n 'user_' + str(u):\n ['item_' + str(item) for item in (user_train[u] + user_test[u])]\n for u in user_train if len(user_train[u]) > 0 and len(user_test[u]) > 0\n }\n rng = random.Random(random_seed)\n\n vocab = FreqVocab(user_test_data)\n user_test_data_output = {\n k: [vocab.convert_tokens_to_ids(v)]\n for k, v in user_test_data.items()\n }\n\n print('begin to generate train')\n output_filename = output_dir + dataset_name + version_id + '.train.tfrecord'\n ## Generating training masked samples\n gen_samples(\n user_train_data,\n output_filename,\n rng,\n vocab,\n max_seq_length,\n dupe_factor,\n short_seq_prob,\n mask_prob,\n masked_lm_prob,\n max_predictions_per_seq,\n prop_sliding_window,\n pool_size,\n force_last=False)\n print('train:{}'.format(output_filename))\n\n print('begin to generate test')\n output_filename = output_dir + dataset_name + version_id + '.test.tfrecord'\n ## Generating test masked samples\n ## force_last is True\n gen_samples(\n user_test_data,\n output_filename,\n rng,\n vocab,\n max_seq_length,\n dupe_factor,\n short_seq_prob,\n mask_prob,\n masked_lm_prob,\n max_predictions_per_seq,\n -1.0,\n pool_size,\n force_last=True)\n print('test:{}'.format(output_filename))\n\n print('vocab_size:{}, user_size:{}, item_size:{}, item_with_other_size:{}'.\n format(vocab.get_vocab_size(),\n vocab.get_user_count(),\n vocab.get_item_count(),\n vocab.get_item_count() + vocab.get_special_token_count()))\n vocab_file_name = output_dir + dataset_name + version_id + '.vocab'\n print('vocab pickle file: ' + vocab_file_name)\n with open(vocab_file_name, 'wb') as output_file:\n pickle.dump(vocab, output_file, protocol=2)\n\n his_file_name = output_dir + dataset_name + version_id + '.his'\n print('test data pickle file: ' + his_file_name)\n with open(his_file_name, 'wb') as output_file:\n pickle.dump(user_test_data_output, output_file, protocol=2)\n print('done.')\n\n\nif __name__ == \"__main__\":\n main()"
] | [
[
"tensorflow.logging.info",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.train.Features",
"tensorflow.compat.v1.logging.set_verbosity"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
richardsfc/neural_rerendering_plus | [
"f5b2bd2ebe7e9657e3584612818eb0d137714276"
] | [
"layers.py"
] | [
"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\nfrom options import FLAGS as opts\nimport numpy as np\nimport tensorflow as tf\nfrom plyfile import PlyData, PlyElement\n\nclass LayerDescriptor(object):\n \n def __init__(self, name, m): \n with tf.variable_scope(name):\n plydata = PlyData.read(opts.descriptor_folder + '/fused.ply')\n shape = [(plydata.elements[0].count // opts.descriptor_div) + 1, m]\n self.dim = m\n with tf.device('/device:GPU:1'):\n self.descriptors = tf.get_variable('descriptors', shape=shape) # 0 index is the null descriptor\n\n def __call__(self, x):\n \"\"\"Apply layer to tensor x.\"\"\"\n with tf.device('/device:GPU:1'):\n shape = x.get_shape().as_list()\n indices = tf.reshape(x[:, :, :, -1], shape=[-1, 1])\n indices = tf.compat.v1.cast(tf.math.ceil(tf.compat.v1.divide(indices, opts.descriptor_div)), tf.int64)\n D = tf.gather_nd(self.descriptors, indices)\n D = tf.reshape(D, shape=[-1, shape[1], shape[2], self.dim])\n return tf.compat.v1.concat([tf.slice(x, [0, 0, 0, 0], [-1, -1, -1, opts.deep_buffer_nc]), D], axis=-1)\n\n\nclass LayerInstanceNorm(object):\n\n def __init__(self, scope_suffix='instance_norm'):\n curr_scope = tf.compat.v1.get_variable_scope().name\n self._scope = curr_scope + '/' + scope_suffix\n\n def __call__(self, x):\n with tf.compat.v1.variable_scope(self._scope, reuse=tf.compat.v1.AUTO_REUSE):\n return tf.contrib.layers.instance_norm(\n x, epsilon=1e-05, center=True, scale=True)\n\n\ndef layer_norm(x, scope='layer_norm'):\n return tf.contrib.layers.layer_norm(x, center=True, scale=True)\n\n\ndef pixel_norm(x):\n \"\"\"Pixel normalization.\n\n Args:\n x: 4D image tensor in B01C format.\n\n Returns:\n 4D tensor with pixel normalized channels.\n \"\"\"\n return x * tf.compat.v1.rsqrt(tf.compat.v1.reduce_mean(tf.compat.v1.square(x), [-1], keepdims=True) + 1e-8)\n\n\ndef global_avg_pooling(x):\n return tf.compat.v1.reduce_mean(x, axis=[1, 2], keepdims=True)\n\n\nclass FullyConnected(object):\n\n def __init__(self, n_out_units, scope_suffix='FC'):\n weight_init = tf.compat.v1.random_normal_initializer(mean=0., stddev=0.02)\n weight_regularizer = tf.contrib.layers.l2_regularizer(scale=0.0001)\n\n curr_scope = tf.get_variable_scope().name\n self._scope = curr_scope + '/' + scope_suffix\n self.fc_layer = functools.partial(\n tf.layers.dense, units=n_out_units, kernel_initializer=weight_init,\n kernel_regularizer=weight_regularizer, use_bias=True)\n\n def __call__(self, x):\n with tf.compat.v1.variable_scope(self._scope, reuse=tf.AUTO_REUSE):\n return self.fc_layer(x)\n\n\ndef init_he_scale(shape, slope=1.0):\n \"\"\"He neural network random normal scaling for initialization.\n\n Args:\n shape: list of the dimensions of the tensor.\n slope: float, slope of the ReLu following the layer.\n\n Returns:\n a float, He's standard deviation.\n \"\"\"\n fan_in = np.prod(shape[:-1])\n return np.sqrt(2. / ((1. + slope**2) * fan_in))\n\n\nclass LayerConv(object):\n \"\"\"Convolution layer with support for equalized learning.\"\"\"\n\n def __init__(self,\n name,\n w,\n n,\n stride,\n padding='SAME',\n use_scaling=False,\n relu_slope=1.):\n \"\"\"Layer constructor.\n\n Args:\n name: string, layer name.\n w: int or 2-tuple, width of the convolution kernel.\n n: 2-tuple of ints, input and output channel depths.\n stride: int or 2-tuple, stride for the convolution kernel.\n padding: string, the padding method. {SAME, VALID, REFLECT}.\n use_scaling: bool, whether to use weight norm and scaling.\n relu_slope: float, the slope of the ReLu following the layer.\n \"\"\"\n assert padding in ['SAME', 'VALID', 'REFLECT'], 'Error: unsupported padding'\n self._padding = padding\n with tf.compat.v1.variable_scope(name):\n if isinstance(stride, int):\n stride = [1, stride, stride, 1]\n else:\n assert len(stride) == 2, \"stride is either an int or a 2-tuple\"\n stride = [1, stride[0], stride[1], 1]\n if isinstance(w, int):\n w = [w, w]\n self.w = w\n shape = [w[0], w[1], n[0], n[1]]\n init_scale, pre_scale = init_he_scale(shape, relu_slope), 1.\n if use_scaling:\n init_scale, pre_scale = pre_scale, init_scale\n self._stride = stride\n self._pre_scale = pre_scale\n self._weight = tf.compat.v1.get_variable(\n 'weight',\n shape=shape,\n initializer=tf.compat.v1.random_normal_initializer(stddev=init_scale))\n self._bias = tf.compat.v1.get_variable(\n 'bias', shape=[n[1]], initializer=tf.compat.v1.zeros_initializer)\n\n def __call__(self, x):\n \"\"\"Apply layer to tensor x.\"\"\"\n if self._padding != 'REFLECT':\n padding = self._padding\n else:\n padding = 'VALID'\n pad_top = self.w[0] // 2\n pad_left = self.w[1] // 2\n if (self.w[0] - self._stride[1]) % 2 == 0:\n pad_bottom = pad_top\n else:\n pad_bottom = self.w[0] - self._stride[1] - pad_top\n if (self.w[1] - self._stride[2]) % 2 == 0:\n pad_right = pad_left\n else:\n pad_right = self.w[1] - self._stride[2] - pad_left\n x = tf.compat.v1.pad(x, [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right],\n [0, 0]], mode='REFLECT')\n y = tf.compat.v1.nn.conv2d(x, self._weight, strides=self._stride, padding=padding)\n return self._pre_scale * y + self._bias\n\n\nclass LayerTransposedConv(object):\n \"\"\"Convolution layer with support for equalized learning.\"\"\"\n\n def __init__(self,\n name,\n w,\n n,\n stride,\n padding='SAME',\n use_scaling=False,\n relu_slope=1.):\n \"\"\"Layer constructor.\n\n Args:\n name: string, layer name.\n w: int or 2-tuple, width of the convolution kernel.\n n: 2-tuple int, [n_in_channels, n_out_channels]\n stride: int or 2-tuple, stride for the convolution kernel.\n padding: string, the padding method {SAME, VALID, REFLECT}.\n use_scaling: bool, whether to use weight norm and scaling.\n relu_slope: float, the slope of the ReLu following the layer.\n \"\"\"\n assert padding in ['SAME'], 'Error: unsupported padding for transposed conv'\n if isinstance(stride, int):\n stride = [1, stride, stride, 1]\n else:\n assert len(stride) == 2, \"stride is either an int or a 2-tuple\"\n stride = [1, stride[0], stride[1], 1]\n if isinstance(w, int):\n w = [w, w]\n self.padding = padding\n self.nc_in, self.nc_out = n\n self.stride = stride\n with tf.variable_scope(name):\n kernel_shape = [w[0], w[1], self.nc_out, self.nc_in]\n init_scale, pre_scale = init_he_scale(kernel_shape, relu_slope), 1.\n if use_scaling:\n init_scale, pre_scale = pre_scale, init_scale\n self._pre_scale = pre_scale\n self._weight = tf.get_variable(\n 'weight',\n shape=kernel_shape,\n initializer=tf.random_normal_initializer(stddev=init_scale))\n self._bias = tf.get_variable(\n 'bias', shape=[self.nc_out], initializer=tf.zeros_initializer)\n\n def __call__(self, x):\n \"\"\"Apply layer to tensor x.\"\"\"\n x_shape = x.get_shape().as_list()\n batch_size = tf.shape(x)[0]\n stride_x, stride_y = self.stride[1], self.stride[2]\n output_shape = tf.stack([\n batch_size, x_shape[1] * stride_x, x_shape[2] * stride_y, self.nc_out])\n y = tf.nn.conv2d_transpose(\n x, filter=self._weight, output_shape=output_shape, strides=self.stride,\n padding=self.padding)\n return self._pre_scale * y + self._bias\n\n\nclass ResBlock(object):\n def __init__(self,\n name,\n nc,\n norm_layer_constructor,\n activation,\n padding='SAME',\n use_scaling=False,\n relu_slope=1.):\n \"\"\"Layer constructor.\"\"\"\n self.name = name\n conv2d = functools.partial(\n LayerConv, w=3, n=[nc, nc], stride=1, padding=padding,\n use_scaling=use_scaling, relu_slope=relu_slope)\n self.blocks = []\n with tf.variable_scope(self.name):\n with tf.variable_scope('res0'):\n self.blocks.append(\n LayerPipe([\n conv2d('res0_conv'),\n norm_layer_constructor('res0_norm'),\n activation\n ])\n )\n with tf.variable_scope('res1'):\n self.blocks.append(\n LayerPipe([\n conv2d('res1_conv'),\n norm_layer_constructor('res1_norm')\n ])\n )\n\n def __call__(self, x_init):\n \"\"\"Apply layer to tensor x.\"\"\"\n x = x_init\n for f in self.blocks:\n x = f(x)\n return x + x_init\n\n\nclass BasicBlock(object):\n def __init__(self,\n name,\n n,\n activation=functools.partial(tf.compat.v1.nn.leaky_relu, alpha=0.2),\n padding='SAME',\n use_scaling=True,\n relu_slope=1.):\n \"\"\"Layer constructor.\"\"\"\n self.name = name\n conv2d = functools.partial(\n LayerConv, stride=1, padding=padding,\n use_scaling=use_scaling, relu_slope=relu_slope)\n nc_in, nc_out = n # n is a 2-tuple\n with tf.compat.v1.variable_scope(self.name):\n self.path1_blocks = []\n with tf.compat.v1.variable_scope('bb_path1'):\n self.path1_blocks.append(\n LayerPipe([\n activation,\n conv2d('bb_conv0', w=3, n=[nc_in, nc_out]),\n activation,\n conv2d('bb_conv1', w=3, n=[nc_out, nc_out]),\n downscale\n ])\n )\n\n self.path2_blocks = []\n with tf.compat.v1.variable_scope('bb_path2'):\n self.path2_blocks.append(\n LayerPipe([\n downscale,\n conv2d('path2_conv', w=1, n=[nc_in, nc_out])\n ])\n )\n\n def __call__(self, x_init):\n \"\"\"Apply layer to tensor x.\"\"\"\n x1 = x_init\n x2 = x_init\n for f in self.path1_blocks:\n x1 = f(x1)\n for f in self.path2_blocks:\n x2 = f(x2)\n return x1 + x2\n\n\nclass LayerDense(object):\n \"\"\"Dense layer with a non-linearity.\"\"\"\n\n def __init__(self, name, n, use_scaling=False, relu_slope=1.):\n \"\"\"Layer constructor.\n\n Args:\n name: string, layer name.\n n: 2-tuple of ints, input and output widths.\n use_scaling: bool, whether to use weight norm and scaling.\n relu_slope: float, the slope of the ReLu following the layer.\n \"\"\"\n with tf.variable_scope(name):\n init_scale, pre_scale = init_he_scale(n, relu_slope), 1.\n if use_scaling:\n init_scale, pre_scale = pre_scale, init_scale\n self._pre_scale = pre_scale\n self._weight = tf.get_variable(\n 'weight',\n shape=n,\n initializer=tf.random_normal_initializer(stddev=init_scale))\n self._bias = tf.get_variable(\n 'bias', shape=[n[1]], initializer=tf.zeros_initializer)\n\n def __call__(self, x):\n \"\"\"Apply layer to tensor x.\"\"\"\n return self._pre_scale * tf.matmul(x, self._weight) + self._bias\n\n\nclass LayerPipe(object):\n \"\"\"Pipe a sequence of functions.\"\"\"\n\n def __init__(self, functions):\n \"\"\"Layer constructor.\n\n Args:\n functions: list, functions to pipe.\n \"\"\"\n self._functions = tuple(functions)\n\n def __call__(self, x, **kwargs):\n \"\"\"Apply pipe to tensor x and return result.\"\"\"\n del kwargs\n for f in self._functions:\n x = f(x)\n return x\n\n\ndef downscale(x, n=2):\n \"\"\"Box downscaling.\n\n Args:\n x: 4D image tensor.\n n: integer scale (must be a power of 2).\n\n Returns:\n 4D tensor of images down scaled by a factor n.\n \"\"\"\n if n == 1:\n return x\n return tf.compat.v1.nn.avg_pool(x, [1, n, n, 1], [1, n, n, 1], 'VALID')\n\n\ndef upscale(x, n):\n \"\"\"Box upscaling (also called nearest neighbors).\n\n Args:\n x: 4D image tensor in B01C format.\n n: integer scale (must be a power of 2).\n\n Returns:\n 4D tensor of images up scaled by a factor n.\n \"\"\"\n if n == 1:\n return x\n x_shape = tf.compat.v1.shape(x)\n height, width = x_shape[1], x_shape[2]\n return tf.compat.v1.image.resize_nearest_neighbor(x, [n * height, n * width])\n\n\ndef tile_and_concatenate(x, z, n_z):\n z = tf.compat.v1.reshape(z, shape=[-1, 1, 1, n_z])\n z = tf.compat.v1.tile(z, [1, tf.compat.v1.shape(x)[1], tf.compat.v1.shape(x)[2], 1])\n x = tf.compat.v1.concat([x, z], axis=-1)\n return x\n\n\ndef minibatch_mean_variance(x):\n \"\"\"Computes the variance average.\n\n This is used by the discriminator as a form of batch discrimination.\n\n Args:\n x: nD tensor for which to compute variance average.\n\n Returns:\n a scalar, the mean variance of variable x.\n \"\"\"\n mean = tf.compat.v1.reduce_mean(x, 0, keepdims=True)\n vals = tf.compat.v1.sqrt(tf.compat.v1.reduce_mean(tf.compat.v1.squared_difference(x, mean), 0) + 1e-8)\n vals = tf.compat.v1.reduce_mean(vals)\n return vals\n\n\ndef scalar_concat(x, scalar):\n \"\"\"Concatenate a scalar to a 4D tensor as an extra channel.\n\n Args:\n x: 4D image tensor in B01C format.\n scalar: a scalar to concatenate to the tensor.\n\n Returns:\n a 4D tensor with one extra channel containing the value scalar at\n every position.\n \"\"\"\n s = tf.compat.v1.shape(x)\n return tf.compat.v1.concat([x, tf.compat.v1.ones([s[0], s[1], s[2], 1]) * scalar], axis=3)\n"
] | [
[
"tensorflow.device",
"tensorflow.get_variable",
"numpy.sqrt",
"tensorflow.compat.v1.concat",
"tensorflow.stack",
"tensorflow.nn.conv2d_transpose",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.image.resize_nearest_neighbor",
"tensorflow.compat.v1.ones",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.nn.avg_pool",
"tensorflow.compat.v1.get_variable_scope",
"tensorflow.random_normal_initializer",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.divide",
"tensorflow.matmul",
"tensorflow.compat.v1.square",
"tensorflow.gather_nd",
"tensorflow.shape",
"tensorflow.compat.v1.reduce_mean",
"tensorflow.compat.v1.get_variable",
"tensorflow.compat.v1.nn.conv2d",
"tensorflow.contrib.layers.instance_norm",
"tensorflow.compat.v1.squared_difference",
"tensorflow.compat.v1.random_normal_initializer",
"tensorflow.slice",
"tensorflow.reshape",
"tensorflow.contrib.layers.layer_norm",
"numpy.prod",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.variable_scope",
"tensorflow.get_variable_scope",
"tensorflow.compat.v1.pad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Abxhor/Coldairarrow | [
"3735beec8a6fa7ad9356375081229c68f0e83f3d"
] | [
"models/final_model.py"
] | [
"# -*- coding: utf-8 -*-\n\n\"\"\"Stacking of some good solutions.\nIMPORTANT:\nTo run this model you need run before the differents models.\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\ndf1 = pd.read_csv('submission40.csv') # 0.309812 (public leaderboard)\ndf2 = pd.read_csv('submission41.csv') # 0.305985 (public leaderboard)\ndf3 = pd.read_csv('submission42.csv') # 0.313587 (public leaderboard)\ndf4 = pd.read_csv('submission45.csv') # 0.309749 (public leaderboard)\ndf5 = pd.read_csv('submission47.csv') # 0.306439 (public leaderboard)\n\ndf = pd.DataFrame()\n\ndf['y'] = 0.2*df1['y'] + 0.23*df2['y'] + 0.2*df3['y'] + 0.15*df4['y'] + 0.22*df5['y']\ndf.to_csv('submission53.csv') # 0.301697 (public leaderboard)\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
vlbthambawita/singan-polyp-aug-exp | [
"b4ec5155f5c36a931fad022aec04dda6b3180b55"
] | [
"777_all_in_one_v1.py"
] | [
"#=========================================================\n# Developer: Vajira Thambawita\n# Reference: https://github.com/meetshah1995/pytorch-semseg\n#=========================================================\n\n\n\nimport argparse\nfrom datetime import datetime\nimport os\nimport copy\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n#Pytorch\nimport torch\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom torchvision import models, transforms,datasets, utils\nfrom torchvision.utils import save_image\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.autograd import Variable\nfrom torchsummary import summary\n\nimport segmentation_models_pytorch as smp\n\n\nfrom data.dataset import Dataset\nfrom data.prepare_data import prepare_data, prepare_test_data\n#from data import PolypsDatasetWithGridEncoding\n#from data import PolypsDatasetWithGridEncoding_TestData\nimport pyra_pytorch as pyra\nfrom utils import dice_coeff, iou_pytorch, visualize\n\nimport segmentation_models_pytorch as smp\n\n\n#======================================\n# Get and set all input parameters\n#======================================\n\nparser = argparse.ArgumentParser()\n\n# Hardware\n#parser.add_argument(\"--device\", default=\"gpu\", help=\"Device to run the code\")\nparser.add_argument(\"--device_id\", type=int, default=0, help=\"\")\n\n# Optional parameters to identify the experiments\nparser.add_argument(\"--exp_name\", type=str, help=\"A name to identify the experiment\", required=True)\n#parser.add_argument(\"--py_file\",default=os.path.abspath(__file__)) # store current python file\n\n\n# Directory and file handling\nparser.add_argument(\"--train_CSVs\", \n nargs=\"+\",\n default=None,\n help=\"CSV file list with image and mask paths\")\n\nparser.add_argument(\"--val_CSVs\",\n nargs=\"+\",\n default=None,\n help=\"CSV file list with image and mask paths\")\n\nparser.add_argument(\"--test_CSVs\",\n nargs=\"+\",\n default=None,\n help=\"CSV file list with image and mask paths\")\n\nparser.add_argument(\"--out_dir\", \n default=\"/work/vajira/DATA/sinGAN_polyps/sinGAN_exp_out/checkpoints\",\n help=\"Main output dierectory\")\n\nparser.add_argument(\"--tensorboard_dir\", \n default=\"/work/vajira/DATA/sinGAN_polyps/sinGAN_exp_out/tensorboard\",\n help=\"Folder to save output of tensorboard\")\n\nparser.add_argument(\"--test_out_dir\",\n default= \"/work/vajira/DATA/sinGAN_polyps/sinGAN_exp_out/test_samples\",\n help=\"Output folder for testing data\"\n) \n\nparser.add_argument(\"--best_checkpoint_name\", type=str, default=\"best_checkpoint.pth\", help=\"A name to save bet checkpoint\")\n\nparser.add_argument(\"--img_size\", type=int, default=128, help=\"Image height and width to resize\")\n\n\n# Action handling \nparser.add_argument(\"--num_epochs\", type=int, default=1, help=\"Numbe of epochs to train\")\nparser.add_argument(\"--start_epoch\", type=int, default=0, help=\"start epoch of training\")\nparser.add_argument(\"--num_test_samples\", type=int, default=5, help=\"Number of samples to test.\")\n\n# smp parameters\nparser.add_argument(\"--model\", help=\"The model to perform segmentation\", required=True)\nparser.add_argument(\"--encoder\", type=str, default='se_resnext50_32x4d', help=\"smp encoders\")\nparser.add_argument(\"--encoder_weights\", type=str, default='imagenet', help=\"encoder weights\")\nparser.add_argument(\"--classes\", default=[0,255], help=\"classes per pixel\")\nparser.add_argument(\"--activation\", type=str, default='softmax2d', help=\"last activation layers activation\")\n\n#PYRA\nparser.add_argument(\"--pyra\", type=bool, default=False, help=\"To enable PYRA grid encoding.\")\nparser.add_argument(\"--grid_sizes_train\", type=list, default=[256], help=\"Grid sizes to use in training\")\nparser.add_argument(\"--grid_sizes_val\", type=list, default=[256], help=\"Grid sizes to use in training\")\nparser.add_argument(\"--grid_sizes_test\", type=list, default=[256], help=\"Grid sizes to use in testing\")\nparser.add_argument(\"--in_channels\", type=int, default=3, help=\"Number of input channgels\")\n\n# Parameters\nparser.add_argument(\"--bs\", type=int, default=8, help=\"Mini batch size\")\nparser.add_argument(\"--val_bs\", type=int, default=1, help=\"Batch size\")\nparser.add_argument(\"--lr\", type=float, default=0.0001, help=\"Learning rate for training\")\nparser.add_argument(\"--lr_change_point\", type=int, default=50, help=\"After this point LR will be changed.\")\n\n\nparser.add_argument(\"--num_workers\", type=int, default=12, help=\"Number of workers in dataloader\")\nparser.add_argument(\"--weight_decay\", type=float, default=1e-5, help=\"weight decay of the optimizer\")\nparser.add_argument(\"--lr_sch_factor\", type=float, default=0.1, help=\"Factor to reduce lr in the scheduler\")\nparser.add_argument(\"--lr_sch_patience\", type=int, default=25, help=\"Num of epochs to be patience for updating lr\")\n\n\nparser.add_argument(\"--num_samples\", type=int, default=5, help=\"Number of samples to print from validation set\")\nparser.add_argument(\"action\", type=str, help=\"Select an action to run\", choices=[\"train\", \"retrain\", \"test\", \"check\", \"check_val\"])\nparser.add_argument(\"--checkpoint_interval\", type=int, default=25, help=\"Interval to save checkpoint models\")\n#parser.add_argument(\"--fold\", type=str, default=\"fold_1\", help=\"Select the validation fold\", choices=[\"fold_1\", \"fold_2\", \"fold_3\"])\n#parser.add_argument(\"--num_test\", default= 200, type=int, help=\"Number of samples to test set from 1k dataset\")\n#parser.add_argument(\"--model_path\", default=\"\", help=\"Model path to load weights\")\n#parser.add_argument(\"--num_of_samples\", default=30, type=int, help=\"Number of samples to validate (Montecalo sampling)\")\nparser.add_argument(\"--record_name\", type=str, default=\"VAL\", help=\"Some name to identify records in tensorboard output\")\n\nopt = parser.parse_args()\n\n\n#==========================================\n# Device handling\n#==========================================\ntorch.cuda.set_device(opt.device_id)\nDEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nopt.device = DEVICE\n\n#===========================================\n# Folder handling\n#===========================================\n\n#make output folder if not exist\nos.makedirs(opt.out_dir, exist_ok=True)\n\n\n# make subfolder in the output folder \n#py_file_name = opt.py_file.split(\"/\")[-1] # Get python file name (soruce code name)\nCHECKPOINT_DIR = os.path.join(opt.out_dir, opt.exp_name + \"/checkpoints\")\nos.makedirs(CHECKPOINT_DIR, exist_ok=True)\n\n# make tensorboard subdirectory for the experiment\ntensorboard_exp_dir = os.path.join(opt.tensorboard_dir, opt.exp_name)\nos.makedirs( tensorboard_exp_dir, exist_ok=True)\n\n#==========================================\n# Tensorboard\n#==========================================\n# Initialize summary writer\nwriter = SummaryWriter(tensorboard_exp_dir)\n\n#==========================================\n# Prepare Data\n#==========================================\n\n\n#================================================\n# Train the model\n#================================================\ndef train_model(train_loader, valid_loader, model, loss, metrics, optimizer, opt):\n\n # create epoch runners \n # it is a simple loop of iterating over dataloader`s samples\n train_epoch = smp.utils.train.TrainEpoch(\n model, \n loss=loss, \n metrics=metrics, \n optimizer=optimizer,\n device=DEVICE,\n verbose=True,\n )\n\n valid_epoch = smp.utils.train.ValidEpoch(\n model, \n loss=loss, \n metrics=metrics, \n device=DEVICE,\n verbose=True,\n )\n\n\n\n max_score = 0\n\n best_chk_path = os.path.join(CHECKPOINT_DIR, opt.best_checkpoint_name)\n\n for i in range(opt.start_epoch + 1, opt.start_epoch + opt.num_epochs +1 ):\n \n print('\\nEpoch: {}'.format(i))\n train_logs = train_epoch.run(train_loader)\n valid_logs = valid_epoch.run(valid_loader)\n \n # do something (save model, change lr, etc.)\n if max_score < valid_logs['iou_score']:\n max_score = valid_logs['iou_score']\n torch.save({\"model\":model, \"epoch\": i}, best_chk_path)\n print('Best Model saved!')\n print(\"Testing....\")\n do_test(opt)\n print(\"Tested\")\n\n \n if i == opt.lr_change_point:\n optimizer.param_groups[0]['lr'] = 1e-5\n print('Decrease decoder learning rate to 1e-5!')\n\n # writing to logs to tensorboard\n for key, value in train_logs.items():\n writer.add_scalar(f\"Train/{key}\", value, i)\n\n for key, value in valid_logs.items():\n writer.add_scalar(f\"Valid/{key}\", value, i)\n\n\n \n\n\n# update here\n \n\n#==============================================\n# Heatmap generator from tensor\n#==============================================\ndef generate_heatmapts(img_tensor):\n print(img_tensor.shape)\n fig_list = []\n for n in range(img_tensor.shape[0]):\n img = img_tensor[n]\n img = img.squeeze(dim=0)\n img_np = img.detach().cpu().numpy()\n #img_np = np.transforms(img_np, (1,2,0))\n \n plt.imshow(img_np, cmap=\"hot\")\n fig = plt.gcf()\n fig_list.append(fig)\n # plt.clf()\n plt.close()\n\n return fig_list\n\n\n\n#===============================================\n# Prepare models\n#===============================================\ndef prepare_model(opt):\n # model = UNet(n_channels=4, n_classes=1) # 4 = 3 channels + 1 grid encode\n\n # create segmentation model with pretrained encoder\n model = getattr(smp, opt.model)(\n encoder_name=opt.encoder,\n in_channels=opt.in_channels, \n encoder_weights=opt.encoder_weights, \n classes=len(opt.classes), \n activation=opt.activation,\n )\n\n return model\n\n#====================================\n# Run training process\n#====================================\ndef run_train(opt):\n model = prepare_model(opt)\n\n preprocessing_fn = smp.encoders.get_preprocessing_fn(opt.encoder, opt.encoder_weights)\n\n train_loader, val_loader = prepare_data(opt, preprocessing_fn=None)\n\n loss = smp.utils.losses.DiceLoss(ignore_channels=[0])\n\n metrics = [\n smp.utils.metrics.IoU(threshold=0.5, ignore_channels=[0]),\n ]\n\n optimizer = torch.optim.Adam([ \n dict(params=model.parameters(), lr=opt.lr),\n ])\n\n train_model(train_loader, val_loader, model, loss, metrics, optimizer, opt)\n#====================================\n# Re-train process\n#====================================\ndef run_retrain(opt):\n\n checkpoint_dict = torch.load(os.path.join(CHECKPOINT_DIR, opt.best_checkpoint_name))\n\n opt.start_epoch = checkpoint_dict[\"epoch\"]\n model = checkpoint_dict[\"model\"]\n\n print(\"Model epoch:\", checkpoint_dict[\"epoch\"])\n print(\"Model retrain started from epoch:\", opt.start_epoch)\n\n preprocessing_fn = smp.encoders.get_preprocessing_fn(opt.encoder, opt.encoder_weights)\n\n train_loader, val_loader = prepare_data(opt, preprocessing_fn)\n\n loss = smp.utils.losses.DiceLoss()\n\n metrics = [\n smp.utils.metrics.IoU(threshold=0.5),\n ]\n\n optimizer = torch.optim.Adam([ \n dict(params=model.parameters(), lr=opt.lr),\n ])\n\n train_model(train_loader, val_loader, model, loss, metrics, optimizer, opt)\n\n#=====================================\n# Check model\n#====================================\ndef check_model_graph():\n raise NotImplementedError\n\n\n#===================================\n# Inference from pre-trained models\n#===================================\n\ndef do_test(opt):\n\n\n checkpoint_dict = torch.load(os.path.join(CHECKPOINT_DIR, opt.best_checkpoint_name))\n\n test_epoch = checkpoint_dict[\"epoch\"]\n best_model = checkpoint_dict[\"model\"]\n\n print(\"Model best epoch:\", test_epoch)\n\n preprocessing_fn = smp.encoders.get_preprocessing_fn(opt.encoder, opt.encoder_weights)\n test_dataset = prepare_test_data(opt, preprocessing_fn=None)\n test_dataset_vis = prepare_test_data(opt, preprocessing_fn=None)\n \n \n for i in range(opt.num_test_samples):\n image, mask = test_dataset[i]\n image_vis, _ = test_dataset_vis[i]\n\n #print(image)\n\n mask_tensor = torch.from_numpy(mask).to(opt.device).unsqueeze(0)\n\n image_tensor = torch.from_numpy(image).to(opt.device).unsqueeze(0)\n pr_mask = best_model.predict(image_tensor)\n\n pr_mask = pr_mask.squeeze().cpu().numpy().round()\n\n fig = visualize(\n input_image_new=np.transpose(image_vis, (1,2,0)).astype(int),\n GT_mask_0=mask[0, :,:],\n Pred_mask_0 = pr_mask[0,:,:],\n GT_mask_1= mask[1,:,:],\n Pred_mask_1 = pr_mask[1, :,:]\n )\n\n fig.savefig(f\"./test_202_{i}.png\")\n writer.add_figure(f\"Test_sample/sample-{i}\", fig, global_step=test_epoch)\n\n\n\n\n\ndef check_test_score(opt):\n\n \n\n checkpoint_dict = torch.load(os.path.join(CHECKPOINT_DIR, opt.best_checkpoint_name))\n\n test_best_epoch = checkpoint_dict[\"epoch\"]\n best_model = checkpoint_dict[\"model\"]\n\n print(\"Model best epoch:\", test_best_epoch)\n \n \n\n preprocessing_fn = smp.encoders.get_preprocessing_fn(opt.encoder, opt.encoder_weights)\n test_dataset = prepare_test_data(opt, preprocessing_fn=None)\n \n test_dataloader = DataLoader(test_dataset, num_workers=48)\n\n loss = smp.utils.losses.DiceLoss()\n # Testing with two class layers\n metrics = [\n #smp.utils.metrics.IoU(threshold=0.5),\n smp.utils.metrics.IoU(threshold=0.5, ignore_channels=None),\n ]\n\n test_epoch = smp.utils.train.ValidEpoch(\n model=best_model,\n loss=loss,\n metrics=metrics,\n device=DEVICE,\n )\n\n logs = test_epoch.run(test_dataloader)\n print(\"logs=\", str(logs))\n writer.add_text(f\"{opt.exp_name}-test-score\", str(logs), global_step=test_best_epoch)\n\n # Testing with only class layer 1 (polyps)\n loss = smp.utils.losses.DiceLoss(ignore_channels=[0])\n metrics = [\n #smp.utils.metrics.IoU(threshold=0.5),\n smp.utils.metrics.IoU(threshold=0.5, ignore_channels=[0]),\n ]\n\n test_epoch = smp.utils.train.ValidEpoch(\n model=best_model,\n loss=loss,\n metrics=metrics,\n device=DEVICE,\n )\n\n logs = test_epoch.run(test_dataloader)\n print(\"logs=\", str(logs))\n writer.add_text(f\"{opt.exp_name}-test-score-ignore-channel-0\", str(logs), global_step=test_best_epoch)\n\n\n\n # Testing with only class layer 0 (BG)\n\n loss = smp.utils.losses.DiceLoss(ignore_channels=[1])\n metrics = [\n #smp.utils.metrics.IoU(threshold=0.5),\n smp.utils.metrics.IoU(threshold=0.5, ignore_channels=[1]),\n ]\n\n test_epoch = smp.utils.train.ValidEpoch(\n model=best_model,\n loss=loss,\n metrics=metrics,\n device=DEVICE,\n )\n\n logs = test_epoch.run(test_dataloader)\n print(\"logs=\", str(logs))\n writer.add_text(f\"{opt.exp_name}-test-score-ignore-channel-1\", str(logs), global_step=test_best_epoch)\n\n\n\ndef check_val_full_score(opt):\n\n # changing test data files into val data\n\n #opt.test_CSVs = opt.val_CSVs\n\n #opt.record_name = \"VAL\"\n\n checkpoint_dict = torch.load(os.path.join(CHECKPOINT_DIR, opt.best_checkpoint_name))\n\n test_best_epoch = checkpoint_dict[\"epoch\"]\n best_model = checkpoint_dict[\"model\"]\n\n print(\"Model best epoch:\", test_best_epoch)\n \n \n\n preprocessing_fn = smp.encoders.get_preprocessing_fn(opt.encoder, opt.encoder_weights)\n test_dataset = prepare_test_data(opt, preprocessing_fn=None)\n \n test_dataloader = DataLoader(test_dataset, num_workers=12)\n\n loss = smp.utils.losses.DiceLoss()\n # Testing with two class layers\n metrics = [\n #smp.utils.metrics.IoU(threshold=0.5),\n smp.utils.metrics.IoU(threshold=0.5, ignore_channels=None),\n smp.utils.metrics.Fscore(threshold=0.5, ignore_channels=None),\n smp.utils.metrics.Accuracy(threshold=0.5, ignore_channels=None),\n smp.utils.metrics.Recall(threshold=0.5, ignore_channels=None),\n smp.utils.metrics.Precision(threshold=0.5, ignore_channels=None),\n ]\n\n test_epoch = smp.utils.train.ValidEpoch(\n model=best_model,\n loss=loss,\n metrics=metrics,\n device=DEVICE,\n )\n\n logs = test_epoch.run(test_dataloader)\n print(\"logs=\", str(logs))\n writer.add_text(f\"{opt.exp_name}-scores-->{opt.record_name}\", str(logs), global_step=test_best_epoch)\n\n # Testing with only class layer 1 (polyps)\n loss = smp.utils.losses.DiceLoss(ignore_channels=[0])\n \n metrics = [\n #smp.utils.metrics.IoU(threshold=0.5),\n smp.utils.metrics.IoU(threshold=0.5, ignore_channels=[0]),\n smp.utils.metrics.Fscore(threshold=0.5, ignore_channels=[0]),\n smp.utils.metrics.Accuracy(threshold=0.5, ignore_channels=[0]),\n smp.utils.metrics.Recall(threshold=0.5, ignore_channels=[0]),\n smp.utils.metrics.Precision(threshold=0.5, ignore_channels=[0]),\n ]\n\n test_epoch = smp.utils.train.ValidEpoch(\n model=best_model,\n loss=loss,\n metrics=metrics,\n device=DEVICE,\n )\n\n logs = test_epoch.run(test_dataloader)\n print(\"logs=\", str(logs))\n writer.add_text(f\"{opt.exp_name}-val-scores-ignore-channel-0-->{opt.record_name}\", str(logs), global_step=test_best_epoch)\n\n\n\n # Testing with only class layer 0 (BG)\n\n loss = smp.utils.losses.DiceLoss(ignore_channels=[1])\n metrics = [\n #smp.utils.metrics.IoU(threshold=0.5),\n smp.utils.metrics.IoU(threshold=0.5, ignore_channels=[1]),\n smp.utils.metrics.Fscore(threshold=0.5, ignore_channels=[1]),\n smp.utils.metrics.Accuracy(threshold=0.5, ignore_channels=[1]),\n smp.utils.metrics.Recall(threshold=0.5, ignore_channels=[1]),\n smp.utils.metrics.Precision(threshold=0.5, ignore_channels=[1]),\n ]\n\n test_epoch = smp.utils.train.ValidEpoch(\n model=best_model,\n loss=loss,\n metrics=metrics,\n device=DEVICE,\n )\n\n logs = test_epoch.run(test_dataloader)\n print(\"logs=\", str(logs))\n writer.add_text(f\"{opt.exp_name}-val-scores-ignore-channel-1-->{opt.record_name}\", str(logs), global_step=test_best_epoch) \n\n\n\n\n\nif __name__ == \"__main__\":\n\n #data_loaders = prepare_data()\n print(vars(opt))\n print(\"Test OK\")\n\n # Train or retrain or inference\n if opt.action == \"train\":\n print(\"Training process is strted..!\")\n run_train(opt)\n pass\n\n elif opt.action == \"retrain\":\n print(\"Retrainning process is strted..!\")\n run_retrain(opt)\n pass\n\n elif opt.action == \"test\":\n print(\"Inference process is strted..!\")\n do_test(opt)\n print(\"Done\")\n\n elif opt.action == \"check\":\n check_test_score(opt)\n print(\"Check pass\")\n\n elif opt.action == \"check_val\":\n check_val_full_score(opt)\n\n # Finish tensorboard writer\n writer.close()\n\n"
] | [
[
"matplotlib.pyplot.imshow",
"torch.cuda.set_device",
"torch.utils.data.DataLoader",
"torch.from_numpy",
"numpy.transpose",
"matplotlib.pyplot.gcf",
"torch.utils.tensorboard.SummaryWriter",
"torch.cuda.is_available",
"matplotlib.pyplot.close",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MasaYan24/pytorch-lightning | [
"046ac714f6955ed14b831657ea1b7b16bc28ac93"
] | [
"pytorch_lightning/trainer/training_loop.py"
] | [
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom contextlib import contextmanager, suppress\nfrom copy import copy, deepcopy\n\nimport numpy as np\nimport torch\n\nfrom pytorch_lightning.callbacks import EarlyStopping\nfrom pytorch_lightning.core.memory import ModelSummary\nfrom pytorch_lightning.core.optimizer import LightningOptimizer\nfrom pytorch_lightning.core.step_result import Result\nfrom pytorch_lightning.plugins import ParallelPlugin\nfrom pytorch_lightning.trainer.states import RunningStage, TrainerState\nfrom pytorch_lightning.trainer.supporters import Accumulator, TensorRunningAccum\nfrom pytorch_lightning.utilities import _TPU_AVAILABLE, AMPType, DeviceType, parsing\nfrom pytorch_lightning.utilities.distributed import rank_zero_info, rank_zero_warn\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.memory import recursive_detach\nfrom pytorch_lightning.utilities.model_helpers import is_overridden\nfrom pytorch_lightning.utilities.parsing import AttributeDict\nfrom pytorch_lightning.utilities.warnings import WarningCache\n\n\nclass TrainLoop:\n\n def __init__(self, trainer, multiple_trainloader_mode):\n self.trainer = trainer\n self.early_stopping_accumulator = None\n self.checkpoint_accumulator = None\n self.accumulated_loss = None\n self.warning_cache = WarningCache()\n self._teardown_already_run = False\n self.running_loss = TensorRunningAccum(window_length=20)\n self.automatic_optimization = True\n self._curr_step_result = None\n self._cur_grad_norm_dict = None\n self._multiple_trainloader_mode = multiple_trainloader_mode\n self._skip_backward = False\n self.trainer._multiple_trainloader_mode = multiple_trainloader_mode\n\n def on_trainer_init(\n self,\n max_epochs,\n min_epochs,\n max_steps,\n min_steps,\n num_sanity_val_steps,\n automatic_optimization,\n weights_summary,\n ):\n self.trainer.global_step = 0\n self.trainer.current_epoch = 0\n self.trainer.interrupted = False\n self.trainer.should_stop = False\n self.trainer._state = TrainerState.INITIALIZING\n\n self.trainer.total_batch_idx = 0\n self.trainer.batch_idx = 0\n self.trainer.num_training_batches = 0\n self.trainer.train_dataloader = None\n self.automatic_optimization = automatic_optimization\n\n # If neither max_epochs or max_steps is set, then use existing default of max_epochs = 1000\n self.trainer.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs\n # If neither min_epochs or min_steps is set, then use existing default of min_epochs = 1\n self.trainer.min_epochs = 1 if (min_epochs is None and min_steps is None) else min_epochs\n self.trainer.max_steps = max_steps\n self.trainer.min_steps = min_steps\n\n if num_sanity_val_steps == -1:\n self.trainer.num_sanity_val_steps = float(\"inf\")\n else:\n self.trainer.num_sanity_val_steps = num_sanity_val_steps\n\n self.trainer.weights_summary = weights_summary\n if weights_summary is not None and weights_summary not in ModelSummary.MODES:\n raise MisconfigurationException(\n f\"`weights_summary` can be None, {', '.join(ModelSummary.MODES)}, got {weights_summary}\"\n )\n\n @property\n def num_optimizers(self):\n num_optimizers = len(self.get_optimizers_iterable())\n return num_optimizers\n\n def should_skip_training(self):\n should_by_max_steps = self.trainer.max_steps is not None and self.trainer.global_step >= self.trainer.max_steps\n should_by_epoch = self.trainer.max_epochs is not None and self.trainer.current_epoch >= self.trainer.max_epochs\n return should_by_max_steps or should_by_epoch or self.trainer.num_training_batches == 0\n\n def on_train_start(self):\n # hook\n self.trainer.call_hook(\"on_train_start\")\n\n # provide rank to profiler\n self.trainer.profile_connector.on_train_start(self.trainer)\n\n def setup_fit(self, model, train_dataloader, val_dataloaders, datamodule):\n # clean hparams\n if hasattr(model, \"hparams\"):\n parsing.clean_namespace(model.hparams)\n\n # links data to the trainer\n self.trainer.data_connector.attach_data(model, train_dataloader, val_dataloaders, datamodule)\n\n # check that model is configured correctly\n self.trainer.config_validator.verify_loop_configurations(model)\n\n # attach model log function to callback\n self.trainer.callback_connector.attach_model_logging_functions(model)\n\n def on_train_end(self):\n if self._teardown_already_run:\n return\n\n self._teardown_already_run = True\n\n # trigger checkpoint check. need to temporarily decrease the global step to avoid saving duplicates\n # when a checkpoint was saved at the last step\n self.trainer.global_step -= 1\n self.check_checkpoint_callback(should_update=True, is_last=True)\n self.trainer.global_step += 1\n\n # hook\n self.trainer.call_hook(\"on_train_end\")\n\n # todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers.\n # It might be related to xla tensors blocked when moving the cpu\n # kill loggers\n if self.trainer.logger is not None and self.trainer.training_type_plugin.should_finalize:\n self.trainer.logger.finalize(\"success\")\n\n # summarize profile results\n if self.trainer.global_rank == 0:\n self.trainer.profiler.describe()\n\n # give accelerators a chance to finish\n self.trainer.accelerator_backend.on_train_end()\n\n # clear mem\n if self.trainer._device_type == DeviceType.GPU:\n model = self.trainer.get_model()\n model.cpu()\n torch.cuda.empty_cache()\n\n def check_checkpoint_callback(self, should_update, is_last=False):\n # TODO bake this logic into the ModelCheckpoint callback\n if should_update and self.trainer.checkpoint_connector.has_trained:\n callbacks = self.trainer.checkpoint_callbacks\n\n if is_last and any(cb.save_last for cb in callbacks):\n rank_zero_info(\"Saving latest checkpoint...\")\n\n model = self.trainer.get_model()\n\n for cb in callbacks:\n cb.on_validation_end(self.trainer, model)\n\n def check_early_stopping_callback(self, should_update):\n # TODO bake this logic into the EarlyStopping callback\n if should_update and self.trainer.checkpoint_connector.has_trained:\n callbacks = [c for c in self.trainer.callbacks if isinstance(c, EarlyStopping)]\n model = self.trainer.get_model()\n\n for cb in callbacks:\n cb.on_validation_end(self.trainer, model)\n\n def on_train_epoch_start(self, epoch):\n\n # update training progress in trainer\n self.trainer.current_epoch = epoch\n\n model = self.trainer.get_model()\n\n # reset train dataloader\n if epoch != 0 and self.trainer.reload_dataloaders_every_epoch:\n self.trainer.reset_train_dataloader(model)\n\n # todo: specify the possible exception\n with suppress(Exception):\n # set seed for distributed sampler (enables shuffling for each epoch)\n self.trainer.train_dataloader.sampler.set_epoch(epoch)\n\n # changing gradient according accumulation_scheduler\n self.trainer.accumulation_scheduler.on_epoch_start(self.trainer, self.trainer.get_model())\n\n # stores accumulated grad fractions per batch\n self.accumulated_loss = TensorRunningAccum(window_length=self.trainer.accumulate_grad_batches)\n\n # structured result accumulators for callbacks\n self.early_stopping_accumulator = Accumulator()\n self.checkpoint_accumulator = Accumulator()\n\n # hook\n self.trainer.call_hook(\"on_epoch_start\")\n self.trainer.call_hook(\"on_train_epoch_start\")\n\n def on_train_batch_end(self, epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx):\n # hook\n self.trainer.call_hook('on_train_batch_end', batch_end_outputs, batch, batch_idx, dataloader_idx)\n self.trainer.call_hook('on_batch_end')\n\n # figure out what to track for epoch end\n self.track_epoch_end_reduce_metrics(epoch_output, batch_end_outputs)\n\n # reset batch logger internals\n self.trainer.logger_connector.on_train_batch_end()\n\n def reset_train_val_dataloaders(self, model):\n if self.trainer.train_dataloader is None or not self.trainer.reload_dataloaders_every_epoch:\n self.trainer.reset_train_dataloader(model)\n\n if self.trainer.val_dataloaders is None and not self.trainer.reload_dataloaders_every_epoch:\n self.trainer.reset_val_dataloader(model)\n\n def track_epoch_end_reduce_metrics(self, epoch_output, batch_end_outputs):\n\n # track the outputs to reduce at the end of the epoch\n for opt_idx, opt_outputs in enumerate(batch_end_outputs):\n sample_output = opt_outputs[-1]\n\n # decide if we need to reduce at the end of the epoch automatically\n auto_reduce_tng_result = isinstance(sample_output, Result) and sample_output.should_reduce_on_epoch_end\n hook_overridden = (\n is_overridden(\"training_epoch_end\", model=self.trainer.get_model())\n or is_overridden(\"on_train_epoch_end\", model=self.trainer.get_model())\n )\n\n # only track when a) it needs to be autoreduced OR b) the user wants to manually reduce on epoch end\n if not (hook_overridden or auto_reduce_tng_result):\n continue\n\n # with 1 step (no tbptt) don't use a sequence at epoch end\n if isinstance(opt_outputs, list) and len(opt_outputs) == 1 and not isinstance(opt_outputs[0], Result):\n opt_outputs = opt_outputs[0]\n\n epoch_output[opt_idx].append(opt_outputs)\n\n def get_optimizers_iterable(self):\n \"\"\"\n Generates an iterable with (idx, optimizer) for each optimizer.\n \"\"\"\n if not self.trainer.optimizer_frequencies:\n # call training_step once per optimizer\n return list(enumerate(self.trainer.optimizers))\n\n optimizer_freq_cumsum = np.cumsum(self.trainer.optimizer_frequencies)\n optimizers_loop_length = optimizer_freq_cumsum[-1]\n current_place_in_loop = self.trainer.total_batch_idx % optimizers_loop_length\n\n # find optimzier index by looking for the first {item > current_place} in the cumsum list\n opt_idx = np.argmax(optimizer_freq_cumsum > current_place_in_loop)\n return [[opt_idx, self.trainer.optimizers[opt_idx]]]\n\n def on_after_backward(self, training_step_output, batch_idx, untouched_loss):\n is_result_obj = isinstance(training_step_output, Result)\n\n if is_result_obj:\n training_step_output.detach()\n else:\n training_step_output.batch_loss = training_step_output.batch_loss.detach()\n\n # insert after step hook\n self.trainer.call_hook(\"on_after_backward\")\n\n # when in dev debugging track the losses\n self.trainer.dev_debugger.track_train_loss_history(batch_idx, untouched_loss.detach())\n\n def _check_training_step_output(self, training_step_output):\n if isinstance(training_step_output, torch.Tensor) and not self.automatic_optimization:\n if training_step_output.grad_fn is None:\n # TODO: Find why - RuntimeError: Expected to mark a variable ready only once ...\n raise MisconfigurationException(\"In manual optimization, `training_step` should not return a Tensor\")\n\n def training_step(self, split_batch, batch_idx, opt_idx, hiddens):\n # give the PL module a result for logging\n model_ref = self.trainer.get_model()\n\n with self.trainer.profiler.profile(\"model_forward\"):\n args = self.build_train_args(split_batch, batch_idx, opt_idx, hiddens)\n\n # manually capture logged metrics\n model_ref._current_fx_name = 'training_step'\n model_ref._results = Result()\n with self.trainer.profiler.profile(\"training_step\"):\n training_step_output = self.trainer.accelerator_backend.training_step(args)\n self.trainer.accelerator_backend.post_training_step()\n\n self.trainer.logger_connector.cache_logged_metrics()\n\n self._check_training_step_output(training_step_output)\n\n training_step_output = self.trainer.call_hook(\"training_step_end\", training_step_output)\n\n training_step_output_for_epoch_end, training_step_output = self._process_training_step_output(\n training_step_output, split_batch\n )\n is_result_obj = isinstance(training_step_output, Result)\n\n if training_step_output_for_epoch_end is None:\n return None\n\n # enable empty loss when using manual opt\n closure_loss = None\n untouched_loss = None\n\n if self.trainer.train_loop.automatic_optimization:\n # accumulate loss\n # (if accumulate_grad_batches = 1 no effect)\n if is_result_obj:\n closure_loss = training_step_output.minimize\n else:\n closure_loss = training_step_output.batch_loss\n\n closure_loss = closure_loss / self.trainer.accumulate_grad_batches\n\n # the loss will get scaled for amp. avoid any modifications to it\n untouched_loss = closure_loss.detach().clone()\n\n # result\n result = AttributeDict(\n closure_loss=closure_loss,\n loss=untouched_loss,\n training_step_output=training_step_output,\n training_step_output_for_epoch_end=training_step_output_for_epoch_end,\n hiddens=training_step_output.hiddens,\n )\n return result\n\n def _process_training_step_output(self, training_step_output, split_batch):\n training_step_output_for_epoch_end = training_step_output\n\n # enable validation_step return None\n if training_step_output_for_epoch_end is None:\n return None, None\n\n # -----------------------------------------\n # process result return (DEPRECATE in 1.0)\n # -----------------------------------------\n if isinstance(training_step_output, Result):\n training_step_output_for_epoch_end = self._process_result(training_step_output, split_batch)\n return training_step_output_for_epoch_end, training_step_output\n\n # -----------------------------------------\n # process hybrid (1.0)\n # -----------------------------------------\n # no need for these checks in 1.0.0\n # TODO: remove checks in 1.0.0\n is_tensor = isinstance(training_step_output_for_epoch_end, torch.Tensor)\n is_1_0_output = is_tensor or (\"log\" not in training_step_output and \"progress_bar\" not in training_step_output)\n if is_1_0_output:\n return self._process_training_step_output_1_0(training_step_output, split_batch)\n\n # -----------------------------------------\n # process old dict (deprecate 1.0)\n # -----------------------------------------\n training_step_output = self.trainer.process_dict_result(training_step_output, train=True)\n\n training_step_output = AttributeDict(\n batch_loss=training_step_output[0],\n pbar_on_batch_end=training_step_output[1],\n log_metrics=training_step_output[2],\n callback_metrics=training_step_output[3],\n hiddens=training_step_output[4],\n )\n # if the user decides to finally reduce things in epoch_end, save raw output without graphs\n if isinstance(training_step_output_for_epoch_end, torch.Tensor):\n training_step_output_for_epoch_end = training_step_output_for_epoch_end.detach()\n else:\n training_step_output_for_epoch_end = recursive_detach(training_step_output_for_epoch_end)\n\n return training_step_output_for_epoch_end, training_step_output\n\n def _process_training_step_output_1_0(self, training_step_output, split_batch):\n result = self.trainer.get_model()._results\n\n loss = None\n hiddens = None\n\n # handle dict return\n if isinstance(training_step_output, dict):\n loss = training_step_output.pop(\"loss\", None)\n hiddens = training_step_output.pop(\"hiddens\", None)\n result[\"extra\"] = training_step_output\n\n # handle scalar return\n elif isinstance(training_step_output, torch.Tensor):\n loss = training_step_output\n result[\"extra\"] = {}\n\n # map to results under the hood\n result.minimize = loss\n result.hiddens = hiddens\n\n # track batch for manual reduction with result\n result.track_batch_size(len(split_batch))\n\n # track metrics without grads for epoch reduction\n training_step_output_for_epoch_end = copy(result)\n training_step_output_for_epoch_end.detach()\n if self.trainer.move_metrics_to_cpu:\n training_step_output_for_epoch_end.cpu()\n\n # what flows back into the system\n training_step_output = result\n\n return training_step_output_for_epoch_end, training_step_output\n\n def _process_result(self, training_step_output, split_batch):\n training_step_output.track_batch_size(len(split_batch))\n m = \"\"\"\n TrainResult and EvalResult were deprecated in 0.9.1 and support will drop in 1.0.0.\n Use self.log and .write from the LightningModule to log metrics and write predictions.\n training_step can now only return a scalar (for the loss) or a dictionary with anything you want.\n\n Option 1:\n return loss\n\n Option 2:\n return {'loss': loss, 'anything_else': ...}\n\n Option 3:\n return {'loss': loss, 'hiddens': hiddens, 'anything_else': ...}\n \"\"\"\n rank_zero_warn(m)\n\n training_step_output_for_epoch_end = copy(training_step_output)\n training_step_output_for_epoch_end.detach()\n\n return training_step_output_for_epoch_end\n\n def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure):\n model_ref = self.trainer.get_model()\n\n is_lbfgs = isinstance(optimizer, torch.optim.LBFGS)\n using_native_amp = self.trainer.amp_backend == AMPType.NATIVE\n\n # native amp + lbfgs is a no go right now\n if using_native_amp and is_lbfgs:\n raise MisconfigurationException(\n 'native PyTorch amp and lbfgs are not compatible.'\n ' To request, please file a Github issue in PyTorch and tag @mcarilli'\n )\n\n # wraps into LightningOptimizer only for running step\n optimizer = LightningOptimizer._to_lightning_optimizer(optimizer, self.trainer, opt_idx)\n\n # model hook\n model_ref.optimizer_step(\n self.trainer.current_epoch,\n batch_idx,\n optimizer,\n opt_idx,\n train_step_and_backward_closure,\n on_tpu=self.trainer._device_type == DeviceType.TPU and _TPU_AVAILABLE,\n using_native_amp=using_native_amp,\n using_lbfgs=is_lbfgs,\n )\n\n def on_before_zero_grad(self, optimizer):\n self.trainer.call_hook('on_before_zero_grad', optimizer)\n\n def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx):\n self.trainer.accelerator_backend.optimizer_zero_grad(self.trainer.current_epoch, batch_idx, optimizer, opt_idx)\n\n def track_and_norm_grad(self, optimizer):\n # track gradient norms\n grad_norm_dic = self._track_gradient_norm()\n\n # clip gradients\n self.trainer.accelerator_backend.clip_gradients(optimizer, self.trainer.gradient_clip_val)\n self._cur_grad_norm_dict = grad_norm_dic\n\n def _track_gradient_norm(self):\n grad_norm_dict = {}\n if (self.trainer.global_step + 1) % self.trainer.log_every_n_steps == 0:\n if float(self.trainer.track_grad_norm) > 0:\n model = self.trainer.get_model()\n grad_norm_dict = model.grad_norm(self.trainer.track_grad_norm)\n return grad_norm_dict\n\n def process_hiddens(self, opt_closure_result):\n hiddens = opt_closure_result.hiddens\n if isinstance(opt_closure_result.training_step_output, Result):\n opt_closure_result.training_step_output_for_epoch_end.drop_hiddens()\n return hiddens\n\n def tbptt_split_batch(self, batch):\n splits = [batch]\n if self.trainer.truncated_bptt_steps is not None:\n model_ref = self.trainer.get_model()\n with self.trainer.profiler.profile(\"tbptt_split_batch\"):\n splits = model_ref.tbptt_split_batch(batch, self.trainer.truncated_bptt_steps)\n return splits\n\n def run_training_epoch(self):\n # modify dataloader if needed (ddp, etc...)\n train_dataloader = self.trainer.accelerator_backend.process_dataloader(self.trainer.train_dataloader)\n\n # track epoch output\n epoch_output = [[] for _ in range(self.num_optimizers)]\n\n train_dataloader = self.trainer.data_connector.get_profiled_train_dataloader(train_dataloader)\n dataloader_idx = 0\n should_check_val = False\n\n for batch_idx, (batch, is_last_batch) in train_dataloader:\n\n self.trainer.batch_idx = batch_idx\n\n # ------------------------------------\n # TRAINING_STEP + TRAINING_STEP_END\n # ------------------------------------\n with self.trainer.profiler.profile(\"run_training_batch\"):\n batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)\n\n # when returning -1 from train_step, we end epoch early\n if batch_output.signal == -1:\n break\n\n batch_end_outputs = self.process_train_step_outputs(\n batch_output.training_step_output_for_epoch_end,\n self.early_stopping_accumulator,\n self.checkpoint_accumulator,\n )\n # hook\n # TODO: add outputs to batches\n self.on_train_batch_end(epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx)\n\n # -----------------------------------------\n # SAVE METRICS TO LOGGERS\n # -----------------------------------------\n self.trainer.logger_connector.log_train_step_metrics(batch_output)\n\n # -----------------------------------------\n # VALIDATE IF NEEDED + CHECKPOINT CALLBACK\n # -----------------------------------------\n should_check_val = self.should_check_val_fx(batch_idx, is_last_batch)\n if should_check_val:\n self.trainer.run_evaluation()\n\n # reset stage to train\n self.trainer._set_wide_running_stage(RunningStage.TRAINING)\n\n # -----------------------------------------\n # SAVE LOGGERS (ie: Tensorboard, etc...)\n # -----------------------------------------\n self.save_loggers_on_train_batch_end()\n\n # update LR schedulers\n monitor_metrics = deepcopy(self.trainer.logger_connector.callback_metrics)\n self.update_train_loop_lr_schedulers(monitor_metrics=monitor_metrics)\n self.trainer.checkpoint_connector.has_trained = True\n\n # max steps reached, end training\n if (\n self.trainer.max_steps is not None and self.trainer.max_steps == self.trainer.global_step + 1\n and self._accumulated_batches_reached()\n ):\n break\n\n # end epoch early\n # stop when the flag is changed or we've gone past the amount\n # requested in the batches\n if self.trainer.should_stop:\n break\n\n self.trainer.total_batch_idx += 1\n\n # stop epoch if we limited the number of training batches\n if self._num_training_batches_reached(is_last_batch):\n break\n\n # progress global step according to grads progress\n self.increment_accumulated_grad_global_step()\n\n # epoch end hook\n self.run_on_epoch_end_hook(epoch_output)\n\n # log epoch metrics\n self.trainer.logger_connector.log_train_epoch_end_metrics(\n epoch_output, self.checkpoint_accumulator, self.early_stopping_accumulator, self.num_optimizers\n )\n\n should_check_val = self.should_check_val_fx(batch_idx, is_last_batch, on_epoch=True)\n if should_check_val:\n self.trainer.run_evaluation(on_epoch=True)\n\n # reset stage to train\n self.trainer._set_wide_running_stage(RunningStage.TRAINING)\n\n should_skip_eval = self.trainer.evaluation_loop.should_skip_evaluation(self.trainer.num_val_batches)\n should_train_only = self.trainer.disable_validation or should_skip_eval\n\n if should_train_only:\n # update epoch level lr_schedulers\n self.trainer.optimizer_connector.update_learning_rates(interval='epoch')\n self.check_checkpoint_callback(True)\n self.check_early_stopping_callback(True)\n\n # increment the global step once\n # progress global step according to grads progress\n self.increment_accumulated_grad_global_step()\n\n def run_training_batch(self, batch, batch_idx, dataloader_idx):\n # track grad norms\n grad_norm_dic = {}\n\n # bookkeeping\n self.trainer.hiddens = None\n\n # track all outputs across time and num of optimizers\n batch_outputs = [[] for _ in range(len(self.get_optimizers_iterable()))]\n\n if batch is None:\n return AttributeDict(signal=0, grad_norm_dic=grad_norm_dic)\n\n # hook\n response = self.trainer.call_hook(\"on_batch_start\")\n if response == -1:\n return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)\n\n # hook\n response = self.trainer.call_hook(\"on_train_batch_start\", batch, batch_idx, dataloader_idx)\n if response == -1:\n return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)\n\n # lightning module hook\n splits = self.tbptt_split_batch(batch)\n\n for split_idx, split_batch in enumerate(splits):\n\n # create an iterable for optimizers and loop over them\n for opt_idx, optimizer in self.prepare_optimizers():\n\n # toggle model params + set info to logger_connector\n self.run_train_split_start(split_idx, split_batch, opt_idx, optimizer)\n\n if self.should_accumulate():\n # For gradient accumulation\n\n # -------------------\n # calculate loss (train step + train step end)\n # -------------------\n\n # automatic_optimization=True: perform dpp sync only when performing optimizer_step\n # automatic_optimization=False: don't block synchronization here\n with self.block_ddp_sync_behaviour():\n self.training_step_and_backward(\n split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens\n )\n\n batch_outputs = self._process_closure_result(\n batch_outputs=batch_outputs,\n opt_idx=opt_idx,\n )\n\n # ------------------------------\n # BACKWARD PASS\n # ------------------------------\n # gradient update with accumulated gradients\n\n else:\n if self.automatic_optimization:\n\n def train_step_and_backward_closure():\n result = self.training_step_and_backward(\n split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens\n )\n return None if result is None else result.loss\n\n # optimizer step\n self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)\n\n else:\n self._curr_step_result = self.training_step(\n split_batch, batch_idx, opt_idx, self.trainer.hiddens\n )\n\n if self._curr_step_result is None:\n # user decided to skip optimization\n # make sure to zero grad.\n continue\n\n batch_outputs = self._process_closure_result(\n batch_outputs=batch_outputs,\n opt_idx=opt_idx,\n )\n\n # todo: Properly aggregate grad_norm accros opt_idx and split_idx\n grad_norm_dic = self._cur_grad_norm_dict\n self._cur_grad_norm_dict = None\n\n # update running loss + reset accumulated loss\n self.update_running_loss()\n\n result = AttributeDict(\n signal=0,\n grad_norm_dic=grad_norm_dic,\n training_step_output_for_epoch_end=batch_outputs,\n )\n return result\n\n @contextmanager\n def block_ddp_sync_behaviour(self, should_block_sync: bool = False):\n \"\"\"\n automatic_optimization = True\n Blocks ddp sync gradients behaviour on backwards pass.\n This is useful for skipping sync when accumulating gradients, reducing communication overhead\n\n automatic_optimization = False\n do not block ddp gradient sync when using manual optimization\n as gradients are needed within the training step\n\n Returns:\n context manager with sync behaviour off\n\n \"\"\"\n if (\n isinstance(self.trainer.training_type_plugin, ParallelPlugin)\n and (self.automatic_optimization or should_block_sync)\n ):\n with self.trainer.training_type_plugin.block_backward_sync():\n yield None\n else:\n yield None\n\n def _process_closure_result(self, batch_outputs: list, opt_idx: int) -> list:\n opt_closure_result = self._curr_step_result\n\n if opt_closure_result is not None:\n\n # cache metrics\n self.trainer.logger_connector.cache_training_step_metrics(opt_closure_result)\n\n # track hiddens\n self.trainer.hiddens = self.process_hiddens(opt_closure_result)\n\n # check if loss or model weights are nan\n if self.trainer.terminate_on_nan:\n self.trainer.detect_nan_tensors(opt_closure_result.loss)\n\n # track all the outputs across all steps\n batch_opt_idx = opt_idx if len(batch_outputs) > 1 else 0\n batch_outputs[batch_opt_idx].append(opt_closure_result.training_step_output_for_epoch_end)\n\n if self.automatic_optimization:\n # track total loss for logging (avoid mem leaks)\n self.accumulated_loss.append(opt_closure_result.loss)\n\n self._curr_step_result = None\n\n return batch_outputs\n\n def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens):\n \"\"\"\n wrap the forward step in a closure so second order methods work\n \"\"\"\n with self.trainer.profiler.profile(\"training_step_and_backward\"):\n # lightning module hook\n result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)\n self._curr_step_result = result\n\n if result is None:\n if self.automatic_optimization:\n self.warning_cache.warn(\"training_step returned None if it was on purpose, ignore this warning...\")\n return None\n\n if not self._skip_backward and self.trainer.train_loop.automatic_optimization:\n # backward pass\n with self.trainer.profiler.profile(\"model_backward\"):\n self.backward(result, optimizer, opt_idx)\n\n # hook - call this hook only\n # when gradients have finished to accumulate\n if not self.should_accumulate():\n self.on_after_backward(result.training_step_output, batch_idx, result.loss)\n\n # check if loss or model weights are nan\n if self.trainer.terminate_on_nan:\n self.trainer.detect_nan_tensors(result.loss)\n\n if len(self.trainer.optimizers) > 1:\n # revert back to previous state\n self.trainer.get_model().untoggle_optimizer(opt_idx)\n\n return result\n\n def backward(self, result, optimizer, opt_idx, *args, **kwargs):\n self.trainer.dev_debugger.track_event(\"backward_call\")\n\n should_accumulate = self.should_accumulate()\n\n # backward can be called manually in the training loop\n if isinstance(result, torch.Tensor):\n self.trainer.accelerator_backend.backward(result, optimizer, opt_idx, should_accumulate, *args, **kwargs)\n else:\n result.closure_loss = self.trainer.accelerator_backend.backward(\n result.closure_loss, optimizer, opt_idx, should_accumulate, *args, **kwargs\n )\n\n if not self.should_accumulate():\n # track gradients\n self.track_and_norm_grad(optimizer=optimizer)\n\n def update_train_loop_lr_schedulers(self, monitor_metrics=None):\n num_accumulated_batches_reached = self._accumulated_batches_reached()\n num_training_batches_reached = self._num_training_batches_reached()\n\n if num_accumulated_batches_reached or num_training_batches_reached:\n # update lr\n self.trainer.optimizer_connector.update_learning_rates(interval=\"step\", monitor_metrics=monitor_metrics)\n\n def run_on_epoch_end_hook(self, epoch_output):\n # inform logger the batch loop has finished\n self.trainer.logger_connector.on_train_epoch_end()\n\n self.trainer.call_hook('on_train_epoch_end', epoch_output)\n self.trainer.call_hook('on_epoch_end')\n\n def increment_accumulated_grad_global_step(self):\n num_accumulated_batches_reached = self._accumulated_batches_reached()\n num_training_batches_reached = self._num_training_batches_reached()\n\n # progress global step according to grads progress\n if num_accumulated_batches_reached or num_training_batches_reached:\n self.trainer.global_step += 1\n\n def _accumulated_batches_reached(self):\n return (self.trainer.batch_idx + 1) % self.trainer.accumulate_grad_batches == 0\n\n def _num_training_batches_reached(self, is_last_batch=False):\n return (self.trainer.batch_idx + 1) == self.trainer.num_training_batches or is_last_batch\n\n def should_accumulate(self):\n # checks if backward or backward + optimizer step (via closure)\n accumulation_done = self._accumulated_batches_reached()\n is_final_batch = self._num_training_batches_reached()\n return not (accumulation_done or is_final_batch)\n\n def should_check_val_fx(self, batch_idx, is_last_batch, on_epoch=False):\n # decide if we should run validation\n is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0\n is_val_check_epoch = (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch == 0\n can_check_val = self.trainer.enable_validation and is_val_check_epoch\n is_last_batch_for_infinite_dataset = is_last_batch and self.trainer.val_check_batch == float(\"inf\")\n epoch_end_val_check = self.trainer.val_check_batch == self.trainer.num_training_batches\n\n should_check_val = ((is_val_check_batch and epoch_end_val_check) or self.trainer.should_stop\n or is_last_batch_for_infinite_dataset\n ) if on_epoch else (is_val_check_batch and not epoch_end_val_check)\n\n return should_check_val and can_check_val\n\n def build_train_args(self, batch, batch_idx, opt_idx, hiddens):\n # enable not needing to add opt_idx to training_step\n args = [batch, batch_idx]\n\n if len(self.trainer.optimizers) > 1:\n if self.trainer.has_arg(\"training_step\", \"optimizer_idx\"):\n args.append(opt_idx)\n else:\n num_opts = len(self.trainer.optimizers)\n raise ValueError(\n f\"Your LightningModule defines {num_opts} optimizers but \"\n f'training_step is missing the \"optimizer_idx\" argument.'\n )\n\n # pass hiddens if using tbptt\n if self.trainer.truncated_bptt_steps is not None:\n args.append(hiddens)\n\n return args\n\n def save_loggers_on_train_batch_end(self):\n # when loggers should save to disk\n should_flush_logs = self.trainer.logger_connector.should_flush_logs\n if should_flush_logs and self.trainer.is_global_zero and self.trainer.logger is not None:\n self.trainer.logger.save()\n\n def process_train_step_outputs(self, all_train_step_outputs, early_stopping_accumulator, checkpoint_accumulator):\n \"\"\"\n Figure out what needs to be tracked/logged at the end of the epoch\n \"\"\"\n\n # the training step outputs a list per optimizer. The list contains the outputs at each time step\n # when no TBPTT is used, then the list has 1 item per batch\n # when TBPTT IS used, then the list has n items (1 per time step)\n batch_end_outputs = []\n for optimizer_idx_outputs in all_train_step_outputs:\n # extract one representative sample from each time step (1 if no tbptt) and 0th optimizer\n if len(optimizer_idx_outputs) == 0:\n continue\n\n sample_output = optimizer_idx_outputs[-1]\n\n # pull out callback info if available (ie: Results object)\n if isinstance(sample_output, dict) and \"early_stop_on\" in sample_output:\n early_stopping_accumulator.accumulate(sample_output[\"early_stop_on\"])\n\n if isinstance(sample_output, dict) and \"checkpoint_on\" in sample_output:\n checkpoint_accumulator.accumulate(sample_output[\"checkpoint_on\"])\n\n batch_end_outputs.append(optimizer_idx_outputs)\n\n return batch_end_outputs\n\n def prepare_optimizers(self):\n # in manual optimization we loop over all optimizers at once\n optimizers = self.get_optimizers_iterable()\n if not self.automatic_optimization:\n optimizers = [optimizers[0]]\n return optimizers\n\n def run_train_split_start(self, split_idx, split_batch, opt_idx, optimizer):\n # set split_idx to trainer for tracking\n self.trainer.split_idx = split_idx\n\n # make sure only the gradients of the current optimizer's parameters are calculated\n # in the training step to prevent dangling gradients in multiple-optimizer setup.\n if self.automatic_optimization and len(self.trainer.optimizers) > 1:\n model = self.trainer.get_model()\n model.toggle_optimizer(optimizer, opt_idx)\n\n # use to track metrics internally\n self.trainer.logger_connector.on_train_split_start(split_idx, opt_idx, split_batch)\n\n def update_running_loss(self):\n accumulated_loss = self.accumulated_loss.mean()\n\n if accumulated_loss is not None:\n # calculate running loss for display\n self.running_loss.append(self.accumulated_loss.mean() * self.trainer.accumulate_grad_batches)\n\n # reset for next set of accumulated grads\n self.accumulated_loss.reset()\n"
] | [
[
"numpy.argmax",
"torch.cuda.empty_cache",
"numpy.cumsum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
accordproject/labs-cicero-classify | [
"3a52ebaf45252515c417bf94a05e33fc1c2628b8",
"3a52ebaf45252515c417bf94a05e33fc1c2628b8"
] | [
"Practice/adapter_roberta_v4/adapter_model.py",
"Practice/adapter_roberta/ner_dataset.py"
] | [
"import pandas as pd\nimport numpy as np\nimport torch\nprint(f\"Torch Version: {torch.__version__}\")\n\nimport transformers\nprint(f\"transformers (Adapter) Version: {transformers.__version__}\")\n\nfrom transformers import RobertaTokenizer\nimport numpy as np\n\ntokenizer = RobertaTokenizer.from_pretrained(\"roberta-base\")\n\nfrom transformers import RobertaTokenizer\n\ntokenizer = RobertaTokenizer.from_pretrained(\"roberta-base\")\n\ndef encode_batch(batch):\n \"\"\"Encodes a batch of input data using the model tokenizer.\"\"\"\n return tokenizer(batch[\"text\"], max_length=80, truncation=True, padding=\"max_length\")\n\ndata_path = \"./NER_multilabel_data_v4.csv\"\ndf = pd.read_csv(data_path)\n\nall_tags = df.newTag\n\nall_tags = set(all_tags)\n\nall_tags = \"|\".join(all_tags)\nall_tags = all_tags.split(\"|\")\nall_tags = set(all_tags)\nall_tags = list(all_tags)\n\nfrom ner_dataset import get_trainset_data_loader\n\nall_tags, trainset, trainloader = get_trainset_data_loader(tokenizer, BATCH_SIZE=128)\n\n\nfrom transformers import RobertaConfig, RobertaModelWithHeads\n\nconfig = RobertaConfig.from_pretrained(\n \"roberta-base\",\n num_labels=len(all_tags),\n label2id = trainset.label_map, \n id2label = trainset.id2label\n)\nmodel = RobertaModelWithHeads.from_pretrained(\n \"roberta-base\",\n config=config,\n)\n\nall_adapter_name = []\nfor tag in all_tags:\n adapter_name = f\"{tag}_0731\"\n name = model.load_adapter(f\"./save_adapters/{adapter_name}\")\n all_adapter_name.append(name)\n model.load_head(f\"./save_heads/{adapter_name}\")\n\nimport re\n\nparallel_text = \"','\".join(all_adapter_name)\nresult = re.findall(r'[;|(|)]',parallel_text)\nif len(result) != 0:\n raise(ValueError(\"Adapter Name must not contain \\\"\" + '\\\", \\\"'.join(result) + '\"'))\n\nfrom transformers.adapters.composition import Parallel\nparallel = eval(\"Parallel('\" + \"','\".join(all_adapter_name) + \"')\")\n\nmodel.set_active_adapters(parallel)\n\ndevice = \"cpu\"\n\ndef get_adapter_mapping(model):\n print(model.active_head)\n label_2_id_mapping = dict()\n id_2_label_mapping = dict()\n for i, head in enumerate(model.active_head):\n label_2_id_mapping[head] = i\n id_2_label_mapping[i] = head\n return label_2_id_mapping, id_2_label_mapping\n\n\n\ndef model_predict(model, sentence, device = \"cpu\"):\n tokenized_sentence = torch.tensor([tokenizer.encode(sentence)])\n pos = torch.tensor([[0] * len(tokenized_sentence)])\n tags = torch.tensor([[1] * len(tokenized_sentence)])\n\n model = model.to(device)\n with torch.no_grad():\n outputs = model(input_ids=tokenized_sentence.to(device), \n token_type_ids=pos.to(device), \n attention_mask=tags.to(device))\n\n logits = outputs[1][0]\n\n return_tags_order = {}\n all_output = None\n for i, output in enumerate(outputs):\n\n return_tags_order[i] = (model.active_head[i])\n\n output = outputs[i][0]\n\n if all_output != None:\n all_output = torch.cat((all_output, output), dim=2)\n else:\n all_output = output\n all_output = torch.sigmoid(all_output)\n\n output_array = np.array(all_output)\n output_array = output_array.reshape(output_array.shape[-2], output_array.shape[-1])\n\n label_confidences = []\n for label_confidence in list(output_array):\n label_confidences.append(list(label_confidence))\n\n #Drop Head and End since it is start/stop Token\n label_confidences = label_confidences[1:-1]\n\n max_value = np.array(label_confidences).argmax(axis=1)\n trans_func = np.vectorize(lambda x: model.active_head[x])\n out_labels = trans_func(max_value)\n\n out_sentence = tokenizer.tokenize(sentence)\n\n return out_sentence, out_labels, label_confidences, return_tags_order\n\ndevice = \"cpu\"\n\ndef get_adapter_mapping(model):\n print(model.active_head)\n label_2_id_mapping = dict()\n id_2_label_mapping = dict()\n for i, head in enumerate(model.active_head):\n label_2_id_mapping[head] = i\n id_2_label_mapping[i] = head\n return label_2_id_mapping, id_2_label_mapping\n\n\n\ndef model_predict(model, sentence, device = \"cpu\"):\n tokenized_sentence = torch.tensor([tokenizer.encode(sentence)])\n pos = torch.tensor([[0] * len(tokenized_sentence)])\n tags = torch.tensor([[1] * len(tokenized_sentence)])\n\n model = model.to(device)\n with torch.no_grad():\n outputs = model(input_ids=tokenized_sentence.to(device), \n token_type_ids=pos.to(device), \n attention_mask=tags.to(device))\n\n logits = outputs[1][0]\n\n return_tags_order = {}\n all_output = None\n for i, output in enumerate(outputs):\n\n return_tags_order[i] = (model.active_head[i])\n\n output = outputs[i][0]\n\n if all_output != None:\n all_output = torch.cat((all_output, output), dim=2)\n else:\n all_output = output\n all_output = torch.sigmoid(all_output)\n\n output_array = np.array(all_output)\n output_array = output_array.reshape(output_array.shape[-2], output_array.shape[-1])\n\n label_confidences = []\n for label_confidence in list(output_array):\n label_confidences.append(list(label_confidence))\n\n #Drop Head and End since it is start/stop Token\n label_confidences = label_confidences[1:-1]\n\n max_value = np.array(label_confidences).argmax(axis=1)\n trans_func = np.vectorize(lambda x: model.active_head[x])\n out_labels = trans_func(max_value)\n\n out_sentence = tokenizer.tokenize(sentence)\n\n return out_sentence, out_labels, label_confidences, return_tags_order",
"import pandas as pd\nimport numpy as np\nimport torch\n\n#data_path = \"./NER_multilabel_data_v2.csv\"\nBATCH_SIZE = 16\ndef get_trainset_data_loader(tokenizer, data_path = \"\",\n BATCH_SIZE = BATCH_SIZE):\n df = pd.read_csv(data_path)\n\n all_tags = df.newTag\n\n all_tags = set(all_tags)\n\n all_tags = \"|\".join(all_tags)\n all_tags = all_tags.split(\"|\")\n all_tags = set(all_tags)\n all_tags = list(all_tags)\n\n\n def process_csv(data_path):\n df = pd.read_csv(data_path)\n df.loc[:, \"Sentence #\"] = df[\"Sentence #\"].fillna(method=\"ffill\")\n sentences = df.groupby(\"Sentence #\")[\"Word\"].apply(list).values\n tags = df.groupby(\"Sentence #\")[\"newTag\"].apply(list).values\n return sentences, tags\n\n sentences, tags = process_csv(data_path)\n\n from torch.utils.data import Dataset\n from sklearn.preprocessing import OneHotEncoder\n\n\n\n\n class NER_Dataset(Dataset):\n # 讀取前處理後的 tsv 檔並初始化一些參數\n def __init__(self, mode, tokenizer, data_path, labels):\n assert mode in [\"train\", \"test\"] # 一般訓練你會需要 dev set\n self.mode = mode\n # 大數據你會需要用 iterator=True\n self.sentences, self.tags = process_csv(data_path)\n self.len = len(self.sentences)\n\n\n if mode != \"test\":\n self.label_map = {}\n for i in range(len(labels)):\n self.label_map[labels[i]] = i\n\n possible_labels = np.array(range(len(labels))).reshape(-1, 1)\n self.oneHotEncoder = OneHotEncoder()\n self.oneHotEncoder.fit(possible_labels)\n else:\n self.label_map = None\n\n self.tokenizer = tokenizer # 我們將使用 BERT tokenizer\n self.O_label = self.label_map[\"O\"]\n\n\n # 定義回傳一筆訓練 / 測試數據的函式\n def __getitem__(self, idx):\n if self.mode == \"test\":\n label_tensor = None\n else:\n label = [\"O\"] + self.tags[idx] + [\"O\"]\n\n label = np.array(label)\n label = label.reshape(-1,1)\n\n label = np.apply_along_axis(self.split_one_hot_multiTags, 1, label)\n label_tensor = torch.tensor(label, dtype = torch.float32)\n\n # 建立第一個句子的 BERT tokens 並加入分隔符號 [SEP]\n word_pieces = ['[CLS]']\n word_pieces += self.sentences[idx]\n word_pieces += ['[SEP]']\n\n ids = self.tokenizer.convert_tokens_to_ids(word_pieces)\n tokens_tensor = torch.tensor(ids)\n\n # 將第一句包含 [SEP] 的 token 位置設為 0\n segments_tensor = torch.zeros_like(tokens_tensor)\n\n return (tokens_tensor, segments_tensor, label_tensor)\n\n def __len__(self):\n return self.len\n\n def split_one_hot_multiTags(self, tags):\n # tags = ['B-org|Party|String']\n tags = tags[0]\n tags = tags.split(\"|\")\n\n\n tags_num = list(map(lambda x: self.label_map[x], tags))\n #[5, 20, 23]\n\n tags_num = np.array(tags_num).reshape(-1,1)\n\n tags_one_hot = self.oneHotEncoder.transform(tags_num).toarray()\n\n tags_one_hot = tags_one_hot.sum(axis = 0)\n\n #return torch.tensor(tags_one_hot, dtype = torch.float32)\n\n return tags_one_hot\n\n\n # 初始化一個專門讀取訓練樣本的 Dataset,使用中文 BERT 斷詞\n\n df = pd.read_csv(data_path)\n\n labels = np.unique(\"|\".join(list(df.newTag)).split(\"|\"))\n print(f\"labels: {labels}\")\n\n trainset = NER_Dataset(\"train\", tokenizer=tokenizer, data_path=data_path, labels= labels)\n\n from torch.utils.data import DataLoader, IterableDataset\n from torch.nn.utils.rnn import pad_sequence\n def create_mini_batch(samples):\n tokens_tensors = [s[0] for s in samples]\n segments_tensors = [s[1] for s in samples]\n\n # 測試集有 labels\n if samples[0][2] is not None:\n label_ids = [s[2] for s in samples]\n label_ids = pad_sequence(label_ids, \n batch_first=True)\n else:\n label_ids = None\n\n # zero pad 到同一序列長度\n tokens_tensors = pad_sequence(tokens_tensors, \n batch_first=True)\n segments_tensors = pad_sequence(segments_tensors, \n batch_first=True)\n\n # attention masks,將 tokens_tensors 裡頭不為 zero padding\n # 的位置設為 1 讓 BERT 只關注這些位置的 tokens\n masks_tensors = torch.zeros(tokens_tensors.shape, \n dtype=torch.long)\n masks_tensors = masks_tensors.masked_fill(\n tokens_tensors != 0, 1)\n\n return tokens_tensors, segments_tensors, masks_tensors, label_ids\n\n\n\n\n\n trainset.id2label = {}\n for key in trainset.label_map.keys():\n trainset.id2label[trainset.label_map[key]] = key\n\n\n trainloader = DataLoader(trainset, batch_size=BATCH_SIZE, \n collate_fn=create_mini_batch)\n\n data = next(iter(trainloader))\n\n tokens_tensors, segments_tensors, \\\n masks_tensors, label_ids = data\n\n '''print(f\"\"\"\n tokens_tensors.shape = {tokens_tensors.shape} \n {tokens_tensors}\n ------------------------\n segments_tensors.shape = {segments_tensors.shape}\n {segments_tensors}\n ------------------------\n masks_tensors.shape = {masks_tensors.shape}\n {masks_tensors}\n ------------------------\n label_ids.shape = {label_ids.shape}\n {label_ids}\n \"\"\")'''\n \n trainset.id2label = {}\n for key in trainset.label_map.keys():\n trainset.id2label[trainset.label_map[key]] = key\n\n \n return all_tags, trainset, trainloader"
] | [
[
"torch.sigmoid",
"pandas.read_csv",
"torch.cat",
"numpy.vectorize",
"torch.no_grad",
"numpy.array"
],
[
"pandas.read_csv",
"torch.zeros",
"torch.nn.utils.rnn.pad_sequence",
"torch.utils.data.DataLoader",
"torch.zeros_like",
"sklearn.preprocessing.OneHotEncoder",
"torch.tensor",
"numpy.apply_along_axis",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
falcon2212/detr-tensorflow | [
"119da1390a02b6013e7147d822e72c38fc3a2dd9"
] | [
"detr_tf/data/tfcsv.py"
] | [
"import tensorflow as tf\nfrom random import shuffle\nimport pandas as pd\nimport numpy as np\nimport imageio\nimport os\n\nfrom detr_tf.data import processing\nfrom detr_tf.data.transformation import detr_transform\nfrom detr_tf import bbox\n\n\ndef morethan1(img, tbbox, tclass):\n ret = False\n print(\"morethan1 \", tbbox.shape)\n try:\n ret = tbbox.shape[0] > 0\n except:\n ret = False\n return ret\n\n\ndef load_data_from_index(index, class_names, filenames, anns, config, augmentation, img_dir):\n # Open the image\n image = imageio.imread(config.datadir+img_dir+\"/\"+filenames[index])\n # Select all the annotatiom (bbox and class) on this image\n image_anns = anns[anns[\"filename\"] == filenames[index]]\n\n # Convert all string class to number (the target class)\n t_class = image_anns[\"class\"].map(\n lambda x: class_names.index(x)).to_numpy()\n # Select the width&height of each image (should be the same since all the ann belongs to the same image)\n width = image_anns[\"width\"].to_numpy()\n height = image_anns[\"height\"].to_numpy()\n # Select the xmin, ymin, xmax and ymax of each bbox, Then, normalized the bbox to be between and 0 and 1\n # Finally, convert the bbox from xmin,ymin,xmax,ymax to x_center,y_center,width,height\n bbox_list = image_anns[[\"xmin\", \"ymin\", \"xmax\", \"ymax\"]].to_numpy()\n bbox_list = bbox_list / [width[0], height[0], width[0], height[0]]\n t_bbox = bbox.xy_min_xy_max_to_xcycwh(bbox_list)\n\n # Transform and augment image with bbox and class if needed\n image, t_bbox, t_class = detr_transform(\n image, t_bbox, t_class, config, augmentation=augmentation)\n\n # Normalized image\n image = processing.normalized_images(image, config)\n\n return image.astype(np.float32), t_bbox.astype(np.float32), np.expand_dims(t_class, axis=-1).astype(np.int64)\n\n\ndef load_tfcsv_dataset(config, batch_size, augmentation=False, exclude=[], ann_dir=None, ann_file=None, img_dir=None):\n \"\"\" Load the hardhat dataset\n \"\"\"\n ann_dir = config.data.ann_dir if ann_dir is None else ann_dir\n ann_file = config.data.ann_file if ann_file is None else ann_file\n img_dir = config.data.img_dir if img_dir is None else img_dir\n\n anns = pd.read_csv(config.datadir+ann_file)\n for name in exclude:\n anns = anns[anns[\"class\"] != name]\n\n unique_class = anns[\"class\"].unique()\n unique_class.sort()\n\n # Set the background class to 0\n config.background_class = 0\n class_names = [\"background\"] + unique_class.tolist()\n\n filenames = anns[\"filename\"].unique().tolist()\n indexes = list(range(0, len(filenames)))\n shuffle(indexes)\n\n dataset = tf.data.Dataset.from_tensor_slices(indexes)\n dataset = dataset.map(lambda idx: processing.numpy_fc(\n idx, load_data_from_index,\n class_names=class_names, filenames=filenames, anns=anns, config=config, augmentation=augmentation, img_dir=img_dir), num_parallel_calls=tf.data.experimental.AUTOTUNE)\n # Filter labels to be sure to keep only sample with at least one bbox\n dataset = dataset.filter(\n lambda imgs, tbbox, tclass: tf.shape(tbbox)[0] > 0)\n\n # Pad bbox and labels\n dataset = dataset.map(processing.pad_labels,\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n # Batch images\n dataset = dataset.batch(batch_size, drop_remainder=True)\n\n return dataset, class_names\n\n\n# print(config.data_dir)\n# train_iterator, class_names = load_tfcsv_dataset(\n# config=config, batch_size=config.batch_size, augmentation=True, img_dir=\"train\", ann_file=\"train/_annotations.csv\")\n# test_iterator, class_names = load_tfcsv_dataset(\n# config=config, batch_size=config.batch_size, augmentation=True, img_dir=\"test\", ann_file=\"test/_annotations.csv\")\n# print(test_iterator.cardinality())\n# print(train_iterator.cardinality())\n# # tmp = list(train_iterator)\n# # for i, _ in enumerate(train_iterator):\n# # print(i)\n# # print(int(None))\n"
] | [
[
"tensorflow.data.Dataset.from_tensor_slices",
"pandas.read_csv",
"numpy.expand_dims",
"tensorflow.shape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
tomAntoine/multi-UAV-simulator | [
"2fbd8b802ea1a5f388722714bac5563d0718b28f"
] | [
"Simulation_Python/scenarios.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nauthor: John Bass\nemail: [email protected]\nlicense: MIT\nPlease feel free to use and modify this, but keep the above information. Thanks!\n\nadaptation\nauthor: Tom Antoine and Alex Martinez\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport cProfile\nfrom trajectory import Trajectory\nfrom ctrl import Control\nfrom quadFiles.quad import Quadcopter\nfrom utils.windModel import Wind\nimport utils\nimport config\nimport mpl_toolkits.mplot3d.axes3d as p3\nfrom matplotlib.legend import Legend\nimport random\n\n\n\"\"\"\nThe variable “quad_id” is provided as an integer from 0 to the number of drones\nin the simulation.\n\nThe “mode” is provided as a string and it can be split into three categories,\ndepending whether or not they are associated with the agent, the target or both.\nThe latter are simple actions such as “takeoff”, “home”, “land”, “fall” or\n“charging”. Then, there are specific modes for agents like “guided” or “track”;\nand targets like “enemy”. The change of mode can be pre-defined or provided\nby the Mission planning and Task control subsystem. In the case of targets, the\ntransition is automated internally. They will be initialized in “enemy” mode and\nchanged into “neutralized” if the conditions are met to finally change into “fall”.\nIn the case of agents, the change of modes is performed externally after system\nintegration. However, due to the very intuitive transitions, some of them were\npredefined in sequences for the subsystem validation and verification. For this\nreason, “takeoff” and “land” mode were integrated at the beginning and end of\neach mission. Similarly, after an agent in “track” mode neutralized its target, or\na “guided” one has reached its goal position, the mode was switched to “home”.\nThe “id_targ” is a specific integer input associated to the mode “track”. It\ncorresponds to the target identification number and is assigned as -1 by default\nif any other mode is employed.\n\nThe “pos_goal” is a set of coordinates x, y and z in the global reference frame\nthat represent the goal position. It should be noted that although x and y are\nnot bounded, the z coordinate is restricted so that the drones cannot go through\nthe ground and by consistency with the guidance algorithms, it is defined as\nnegative. It should be noted that although this is an input from Mission planning\nand Task control subsystem it will be updated for specific modes such as\n“track”.\n\nThe “pos_obs” is a list of sets of coordinates x, y and z in the global reference\nframe corresponding to the static obstacles and therefore should be kept\nconstant for all the drones in the simulation environment. This information is\npredefined but will need to be provided by the Situation Awareness subsystem.\n\nThe “pos_ini” is a set of coordinates x, y and z in the global reference frame\nthat represent the initial position. It should be noted that as for the rest of\ncoordinates, the z coordinate is defined as negative.\n\nThe “color” is employed for the easy identification of the drones. It allows to\neasily verify the correct functioning of the algorithms.\n\nThe “ctrlType” xyz_pos by default.\n\nThe “trajSelect” minimum velocity, no yaw control, average speedby default.\n\nThe “Ti” input is given as a number and indicates the initial time for the\nsimulation. It is common for all drones and by default set at 0s.\n\nFor most modes, the “Tf” input is given as a number and corresponds to the\nfinal time of the simulation “Tf”. It is therefore employed for creating the\ntrajectories to reach goal position. However, in modes that require regular\nupdates as “track” or “guided”, it is substituted by the update time. In these\ncases, it should be slightly modified within drones. It is usually around 0.5s.\n\nThe numerical time step “numTimeStep” is employed for the trajectories.\n\n\"\"\"\n\n\n\ndef full_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):\n pos_obs = np.array([[1, 5, -2], [8, 2, -8], [5, 8, -9], [0, 0, -2], [3, 3, -1],[3, 9, -17],[5, 7, -18],[0, 0, -10],[5, 10, -16],[10,10,-12],[13,13,-13]])\n quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,0], pos_goal= [15,15,-15], pos_obs = pos_obs)\n quad1 = Quadcopter(Ti, Ts*90, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='guided', id_targ = -1, color = 'green', pos_ini = [0,3,0], pos_goal = [15,10,-15], pos_obs = pos_obs)\n quad2 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2, mode='track', id_targ = 0, color = 'pink', pos_ini = [3,0,0], pos_goal = [15,20,-15], pos_obs = pos_obs)\n quads = [quad0, quad1, quad2]\n return pos_obs,quads\n\ndef multi_waypoint_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):\n pos_obs = np.array([[50,0,0]])\n quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,0], pos_goal= [0,-17,-10], pos_obs = pos_obs)\n quad1 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [20,0,0], pos_goal = [-20,-15,-10], pos_obs = pos_obs)\n quad2 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2, mode='ennemy', id_targ = -1, color = 'red', pos_ini = [-20,-10,0], pos_goal = [-10,0,-20], pos_obs = pos_obs)\n quads = [quad0, quad1, quad2]\n return pos_obs,quads\n\ndef static_OA_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):\n pos_obs = []\n for i in range(30):\n pos_obs.append(random.sample(range(-10, 0), 3))\n pos_obs = np.array(pos_obs)\n print(pos_obs)\n quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,0], pos_goal= [-10,-10,-10], pos_obs = pos_obs)\n quads = [quad0]\n return pos_obs,quads\n\ndef dynamic_CA_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):\n #Tf =8s\n pos_obs = np.array([[50,0,0]])\n quad0 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='guided', id_targ = -1, color = 'blue', pos_ini = [0,10,-5],pos_goal = [30,10,-5], pos_obs = pos_obs)\n quad1 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [3,0,-5], pos_goal = [3,20,-5], pos_obs = pos_obs)\n quad2 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [8,0,-5], pos_goal = [8,20,-5], pos_obs = pos_obs)\n quad3 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 3, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [15,0,-5], pos_goal = [15,20,-5], pos_obs = pos_obs)\n quads = [quad0, quad1,quad2,quad3]\n return pos_obs,quads\n\ndef dynamic_CA_scenario_random_pos(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):\n #Tf =8s\n pos_obs = np.array([[50,0,0]])\n quad0 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='guided', id_targ = -1, color = 'blue', pos_ini = [0,10,-5],pos_goal = [30,10,-5], pos_obs = pos_obs)\n x, z = random.randint(3,17),-1*random.randint(1,8)\n quad1 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [x,0,z], pos_goal = [x,20,z], pos_obs = pos_obs)\n x, z = random.randint(3,17),-1*random.randint(1,8)\n quad2 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [x,0,z], pos_goal = [x,20,z], pos_obs = pos_obs)\n x, z = random.randint(3,17),-1*random.randint(1,8)\n quad3 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 3, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [x,0,z], pos_goal = [x,20,z], pos_obs = pos_obs)\n quads = [quad0, quad1,quad2,quad3]\n return pos_obs,quads\n\ndef simple_tracking_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):\n pos_obs = np.array([[-10,-10,0]])\n quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,0], pos_goal = [15,15,-15], pos_obs = pos_obs)\n quad1 = Quadcopter(Ti, Ts*90, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='track', id_targ = 0, color = 'green', pos_ini = [5,5,0], pos_goal = [2,2,-10], pos_obs = pos_obs)\n quads = [quad0, quad1]\n return pos_obs,quads\n\ndef multi_tracking_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):\n pos_obs = np.array([[-10,-10,0]])\n quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,0], pos_goal = [15,15,-15], pos_obs = pos_obs)\n quad1 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='track', id_targ = 0, color = 'green', pos_ini = [4,0,0], pos_goal = [4,4,-10], pos_obs = pos_obs)\n quad2 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2, mode='track', id_targ = 0, color = 'green', pos_ini = [4,4,0], pos_goal = [4,4,-10], pos_obs = pos_obs)\n quad3 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 3, mode='track', id_targ = 0, color = 'green', pos_ini = [4,-4,0], pos_goal = [4,4,-10], pos_obs = pos_obs)\n quads = [quad0, quad1, quad2, quad3]\n return pos_obs,quads\n\ndef tracking_loop_scenario(x,Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):\n pos_obs = np.array([[x/2,x/2,-10]])\n quad0 = Quadcopter(Ti, Ts*99, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='track', id_targ = 1, color = 'blue', pos_ini = [0,0,-10], pos_goal = [0,x,-10], pos_obs = pos_obs)\n quad1 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='track', id_targ = 2, color = 'green', pos_ini = [x,0,-10], pos_goal = [0,0,-10], pos_obs = pos_obs)\n quad2 = Quadcopter(Ti, Ts*101, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2, mode='track', id_targ = 3, color = 'orange', pos_ini = [x,x,-10],pos_goal = [x,0,-10], pos_obs = pos_obs)\n quad3 = Quadcopter(Ti, Ts*102, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 3, mode='track', id_targ = 0, color = 'pink', pos_ini = [0,x,-10], pos_goal = [x,x,-10],pos_obs = pos_obs)\n quads = [quad0, quad1,quad2,quad3]\n return pos_obs,quads\n\ndef tracking_and_kill_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):\n pos_obs = np.array([[-10,-10,0]])\n quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,-5], pos_goal = [20,15,-20], pos_obs = pos_obs)\n quad1 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='track', id_targ = 0, color = 'green', pos_ini = [5,0,0], pos_goal = [4,4,-10], pos_obs = pos_obs)\n quads = [quad0, quad1]\n return pos_obs,quads\n\ndef simple_guided_for_PF(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):\n pos_obs = []\n for i in range(20):\n pos_obs.append(random.sample(range(-10, 0), 3))\n pos_obs = np.array(pos_obs)\n quad0 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='guided', id_targ = -1, color = 'blue', pos_ini = [0,0,-5], pos_goal = [-10,-10,-10], pos_obs = pos_obs)\n quads = [quad0]\n return pos_obs,quads\n\ndef ROS_simu(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):\n fire_station=[]\n fire_truck=[]\n tree_1=[]\n tree_2=[]\n pos_obs=[]\n for i in range(20):\n x = random.sample(range(-10, 10), 1)[0]\n y = random.sample(range(-55, -45), 1)[0]\n z = random.sample(range(-12, 0), 1)[0]\n fire_station.append([x,y,z])\n \n for i in range(5):\n x = random.sample(range(-19, 21), 1)[0]\n y = random.sample(range(-55, -45), 1)[0]\n z = random.sample(range(-3, 0), 1)[0]\n fire_truck.append([x,y,z])\n\n for i in range(5):\n x = random.sample(range(-12, -8), 1)[0]\n y = random.sample(range(-42,-38), 1)[0]\n z = random.sample(range(-5, 0), 1)[0]\n tree_1.append([x,y,z])\n for i in range(5):\n x = random.sample(range(8, 12), 1)[0]\n y = random.sample(range(-42,-38), 1)[0]\n z = random.sample(range(-5, 0), 1)[0]\n tree_2.append([x,y,z])\n\n pos_obs = fire_station + fire_truck + tree_1 + tree_2\n pos_obs = np.array(pos_obs)\n quad0 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='guided', id_targ = -1, color = 'blue', pos_ini = [0,0,0], pos_goal = [0,-100,-10], pos_obs = pos_obs)\n quads = [quad0]\n return(pos_obs,quads)\n\ndef real_map(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):\n\n xs = [-1,0,1]\n ys = [-1,0,1]\n zs = [0,-1,-2,-3,-4,-5,-6,-7,-8,-9,-10]\n tower = [[x,y,z] for x in xs for y in ys for z in zs]\n\n xs = [-20,5,10]\n ys = [5,-10,10]\n zs = [0,-1,-2,-3]\n trees = [[x,y,z] for x in xs for y in ys for z in zs]\n\n xs = [-20,5,10]\n ys = [5,-10,10]\n zs = [-4,-5]\n\n tops = []\n for i in range(3):\n x, y = xs[i], ys[i]\n for z in zs:\n tops = tops + [[x-1,y,z],[x+1,y,z],[x,y,z],[x,y-1,z],[x,y+1,z]]\n print(tops)\n\n pos_obs = np.array(tower + trees + tops)\n\n quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,-5], pos_goal = [-10,-10,-10], pos_obs = pos_obs)\n quads = [quad0]\n return pos_obs,quads\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sunkr1995/genetic-drawing | [
"6e5cc755a55c1994770c3f18fb14f1cc651bb700"
] | [
"Sketch.py"
] | [
"'''\nAuthor: your name\nDate: 2021-07-02 17:20:23\nLastEditTime: 2021-07-08 16:28:05\nLastEditors: Please set LastEditors\nDescription: In User Settings Edit\nFilePath: /genetic-drawing/2.py\n'''\n#coding:utf-8\nimport cv2 \nimport math\nimport numpy as np\n \n \ndef dodgeNaive(image, mask):\n # determine the shape of the input image\n width, height = image.shape[:2]\n \n # prepare output argument with same size as image\n blend = np.zeros((width, height), np.uint8)\n \n for col in range(width):\n for row in range(height):\n # do for every pixel\n if mask[col, row] == 255:\n # avoid division by zero\n blend[col, row] = 255\n else:\n # shift image pixel value by 8 bits\n # divide by the inverse of the mask\n tmp = (image[col, row] << 8) / (255 - mask)\n # print('tmp={}'.format(tmp.shape))\n # make sure resulting value stays within bounds\n if tmp.any() > 255:\n tmp = 255\n blend[col, row] = tmp\n \n return blend\n \n \ndef dodgeV2(image, mask):\n return cv2.divide(image, 255 - mask, scale=256)\n \n \ndef burnV2(image, mask):\n return 255 - cv2.divide(255 - image, 255 - mask, scale=256)\n \n \ndef rgb_to_sketch(src_image_name, dst_image_name):\n img_rgb = cv2.imread(src_image_name)\n img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)\n # 读取图片时直接转换操作\n # img_gray = cv2.imread('example.jpg', cv2.IMREAD_GRAYSCALE)\n \n img_gray_inv = 255 - img_gray\n img_blur = cv2.GaussianBlur(img_gray_inv, ksize=(21, 21),\n sigmaX=0, sigmaY=0)\n img_blend = dodgeV2(img_gray, img_blur)\n \n cv2.imshow('original', img_rgb)\n cv2.imshow('gray', img_gray)\n cv2.imshow('gray_inv', img_gray_inv)\n cv2.imshow('gray_blur', img_blur)\n cv2.imshow(\"pencil sketch\", img_blend)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n cv2.imwrite(dst_image_name, img_blend)\n \n \nif __name__ == '__main__':\n src_image_name = '02.jpg'\n dst_image_name = 'sketch_02.jpg'\n rgb_to_sketch(src_image_name, dst_image_name)\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yyht/bert | [
"480c909e0835a455606e829310ff949c9dd23549",
"480c909e0835a455606e829310ff949c9dd23549",
"480c909e0835a455606e829310ff949c9dd23549",
"480c909e0835a455606e829310ff949c9dd23549",
"480c909e0835a455606e829310ff949c9dd23549",
"480c909e0835a455606e829310ff949c9dd23549",
"480c909e0835a455606e829310ff949c9dd23549",
"480c909e0835a455606e829310ff949c9dd23549",
"480c909e0835a455606e829310ff949c9dd23549",
"480c909e0835a455606e829310ff949c9dd23549",
"480c909e0835a455606e829310ff949c9dd23549",
"480c909e0835a455606e829310ff949c9dd23549",
"480c909e0835a455606e829310ff949c9dd23549",
"480c909e0835a455606e829310ff949c9dd23549"
] | [
"t2t_bert/utils/tensor2tensor/trax/rlax/ppo.py",
"BERT-keras-master/BERT-keras-master/transformer/embedding.py",
"bert-master/run_gaode_export.py",
"t2t_bert/distributed_multitask/embed_task.py",
"t2t_bert/task_module/global_batch_norm.py",
"t2t_bert/distributed_single_sentence_classification/model_mdd_distillation.py",
"t2t_bert/test/test_oqmrc_final.py",
"t2t_bert/pretrain_finetuning/generator_exporter.py",
"t2t_bert/model/bert/bert_electra_joint.py",
"t2t_bert/distributed_distillation/train_eval_estimator_fn.py",
"t2t_bert/pretrain_finetuning/generator_gumbel_normal.py",
"t2t_bert/loss/spectral_utils.py",
"t2t_bert/distributed_pair_sentence_classification/export.py",
"t2t_bert/dataset_generator/data_reader.py"
] | [
"# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"PPO in JAX.\n\nNotation:\n\nB, scalar - batch size\nT, scalar - number of time-steps in a trajectory, or the value of the padded\n time-step dimension.\nOBS, tuple - shape of a singular observation from the environment.\n Ex: For CartPole-v0 this is (4,) and Pong-v0 it's (210, 160, 3)\nA, scalar - Number of actions, assuming a discrete space.\n\nPolicy and Value function signatures:\n\nPolicy Function :: [B, T] + OBS -> [B, T, A]\nValue Function :: [B, T] + OBS -> [B, T, 1]\nPolicy and Value Function :: [B, T] + OBS -> ([B, T, A], [B, T, 1])\n\ni.e. the policy net should take a batch of *trajectories* and at each time-step\nin each batch deliver a probability distribution over actions.\n\nNOTE: It doesn't return logits, rather the expectation is that it returns\nlog-probabilities instead.\n\nNOTE: The policy and value functions need to take care to not take into account\nfuture time-steps while deciding the actions (or value) for the current\ntime-step.\n\nPolicy and Value Function produces a tuple of the expected output of a policy\nfunction and a value function.\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport functools\nimport os\nimport time\n\nfrom absl import logging\nimport cloudpickle as pickle\nimport gin\nimport gym\nfrom jax import grad\nfrom jax import jit\nfrom jax import lax\nfrom jax import numpy as np\nfrom jax import random as jax_random\nimport numpy as onp\nfrom tensor2tensor.envs import env_problem\nfrom tensor2tensor.envs import env_problem_utils\nfrom tensor2tensor.trax import jaxboard\nfrom tensor2tensor.trax import layers as tl\nfrom tensor2tensor.trax import optimizers as trax_opt\nfrom tensor2tensor.trax import trax\nfrom tensorflow.io import gfile\n\nDEBUG_LOGGING = False\nGAMMA = 0.99\nLAMBDA = 0.95\nEPSILON = 0.1\nEPOCHS = 50 # 100\nN_OPTIMIZER_STEPS = 100\nPRINT_EVERY_OPTIMIZER_STEP = 20\nBATCH_TRAJECTORIES = 32\n\n\ndef policy_and_value_net(rng_key,\n batch_observations_shape,\n observations_dtype,\n n_actions,\n bottom_layers_fn=(),\n two_towers=True):\n \"\"\"A policy and value net function.\"\"\"\n\n # Layers.\n\n # Now, with the current logits, one head computes action probabilities and the\n # other computes the value function.\n # NOTE: The LogSoftmax instead of the Softmax because of numerical stability.\n\n if two_towers:\n layers = [\n tl.Dup(),\n tl.Parallel(\n [bottom_layers_fn(), tl.Dense(n_actions), tl.LogSoftmax()],\n [bottom_layers_fn(), tl.Dense(1)],\n )\n ]\n else:\n layers = [\n bottom_layers_fn(),\n tl.Dup(),\n tl.Parallel(\n [tl.Dense(n_actions), tl.LogSoftmax()],\n [tl.Dense(1)],\n )\n ]\n net = tl.Model(layers)\n params = net.initialize(batch_observations_shape, observations_dtype, rng_key)\n return params, net\n\n\ndef optimizer_fn(net_params, step_size=1e-3):\n opt = trax_opt.Adam(step_size=step_size, b1=0.9, b2=0.999, eps=1e-08)\n opt_init = lambda x: (x, opt.tree_init(x))\n opt_update = lambda i, g, s: opt.tree_update(i, g, s[0], s[1])\n get_params = lambda x: x[0]\n opt_state = opt_init(net_params)\n return opt_state, opt_update, get_params\n\n\n# Should this be collect 'n' trajectories, or\n# Run the env for 'n' steps and take completed trajectories, or\n# Any other option?\ndef collect_trajectories(env,\n policy_fn,\n n_trajectories=1,\n policy=env_problem_utils.GUMBEL_SAMPLING,\n max_timestep=None,\n epsilon=0.1,\n reset=True,\n len_history_for_policy=32,\n rng=None):\n \"\"\"Collect trajectories with the given policy net and behaviour.\n\n Args:\n env: A gym env interface, for now this is not-batched.\n policy_fn: observations(B,T+1) -> log-probabs(B,T+1, A) callable.\n n_trajectories: int, number of trajectories.\n policy: string, \"greedy\", \"epsilon-greedy\", or \"categorical-sampling\" i.e.\n how to use the policy_fn to return an action.\n max_timestep: int or None, the index of the maximum time-step at which we\n return the trajectory, None for ending a trajectory only when env returns\n done.\n epsilon: float, the epsilon for `epsilon-greedy` policy.\n reset: bool, true if we want to reset the envs. The envs are also reset if\n max_max_timestep is None or < 0\n len_history_for_policy: int, the maximum history to keep for applying the\n policy on.\n rng: jax rng, splittable.\n\n Returns:\n A tuple (trajectory, number of trajectories that are done)\n trajectory: list of (observation, action, reward) tuples, where each element\n `i` is a tuple of numpy arrays with shapes as follows:\n observation[i] = (B, T_i + 1)\n action[i] = (B, T_i)\n reward[i] = (B, T_i)\n \"\"\"\n\n assert isinstance(env, env_problem.EnvProblem)\n # This is an env_problem, run its collect function.\n trajs, n_done, timing_info = env_problem_utils.play_env_problem_with_policy(\n env,\n policy_fn,\n num_trajectories=n_trajectories,\n max_timestep=max_timestep,\n policy_sampling=policy,\n eps=epsilon,\n reset=reset,\n len_history_for_policy=len_history_for_policy,\n rng=rng)\n # Skip returning raw_rewards here, since they aren't used.\n\n # t is the return value of Trajectory.as_numpy, so:\n # (observation, action, processed_reward, raw_reward, infos)\n return [(t[0], t[1], t[2], t[4]) for t in trajs], n_done, timing_info\n\n\n# This function can probably be simplified, ask how?\n# Can we do something much simpler than lax.pad, maybe np.pad?\n# Others?\n\n\ndef get_padding_value(dtype):\n \"\"\"Returns the padding value given a dtype.\"\"\"\n padding_value = None\n if dtype == np.uint8:\n padding_value = np.uint8(0)\n elif dtype == np.uint16:\n padding_value = np.uint16(0)\n elif dtype == np.float32 or dtype == np.float64:\n padding_value = 0.0\n else:\n padding_value = 0\n assert padding_value is not None\n return padding_value\n\n\n# TODO(afrozm): Use np.pad instead and make jittable?\ndef pad_trajectories(trajectories, boundary=20):\n \"\"\"Pad trajectories to a bucket length that is a multiple of boundary.\n\n Args:\n trajectories: list[(observation, actions, rewards)], where each observation\n is shaped (t+1,) + OBS and actions & rewards are shaped (t,), with the\n length of the list being B (batch size).\n boundary: int, bucket length, the actions and rewards are padded to integer\n multiples of boundary.\n\n Returns:\n tuple: (padding lengths, reward_mask, padded_observations, padded_actions,\n padded_rewards) where padded_observations is shaped (B, T+1) + OBS and\n padded_actions, padded_rewards & reward_mask are shaped (B, T).\n Where T is max(t) rounded up to an integer multiple of boundary.\n padded_length is how much padding we've added and\n reward_mask is 1s for actual rewards and 0s for the padding.\n \"\"\"\n\n # Let's compute max(t) over all trajectories.\n t_max = max(r.shape[0] for (_, _, r, _) in trajectories)\n\n # t_max is rounded to the next multiple of `boundary`\n boundary = int(boundary)\n bucket_length = boundary * int(np.ceil(float(t_max) / boundary))\n\n # So all obs will be padded to t_max + 1 and actions and rewards to t_max.\n padded_observations = []\n padded_actions = []\n padded_rewards = []\n padded_infos = collections.defaultdict(list)\n padded_lengths = []\n reward_masks = []\n\n for (o, a, r, i) in trajectories:\n # Determine the amount to pad, this holds true for obs, actions and rewards.\n num_to_pad = bucket_length + 1 - o.shape[0]\n padded_lengths.append(num_to_pad)\n if num_to_pad == 0:\n padded_observations.append(o)\n padded_actions.append(a)\n padded_rewards.append(r)\n reward_masks.append(onp.ones_like(r, dtype=np.int32))\n if i:\n for k, v in i.items():\n padded_infos[k].append(v)\n continue\n\n # First pad observations.\n padding_config = tuple([(0, num_to_pad, 0)] + [(0, 0, 0)] * (o.ndim - 1))\n\n padding_value = get_padding_value(o.dtype)\n action_padding_value = get_padding_value(a.dtype)\n reward_padding_value = get_padding_value(r.dtype)\n\n padded_obs = lax.pad(o, padding_value, padding_config)\n padded_observations.append(padded_obs)\n\n # Now pad actions and rewards.\n assert a.ndim == 1 and r.ndim == 1\n padding_config = ((0, num_to_pad, 0),)\n\n padded_action = lax.pad(a, action_padding_value, padding_config)\n padded_actions.append(padded_action)\n padded_reward = lax.pad(r, reward_padding_value, padding_config)\n padded_rewards.append(padded_reward)\n\n # Also create the mask to use later.\n reward_mask = onp.ones_like(r, dtype=np.int32)\n reward_masks.append(lax.pad(reward_mask, 0, padding_config))\n\n if i:\n for k, v in i.items():\n # Create a padding configuration for this value.\n padding_config = [(0, num_to_pad, 0)] + [(0, 0, 0)] * (v.ndim - 1)\n padded_infos[k].append(lax.pad(v, 0.0, tuple(padding_config)))\n\n # Now stack these padded_infos if they exist.\n stacked_padded_infos = None\n if padded_infos:\n stacked_padded_infos = {k: np.stack(v) for k, v in padded_infos.items()}\n\n return padded_lengths, np.stack(reward_masks), np.stack(\n padded_observations), np.stack(padded_actions), np.stack(\n padded_rewards), stacked_padded_infos\n\n\ndef rewards_to_go(rewards, mask, gamma=0.99):\n r\"\"\"Computes rewards to go.\n\n Reward to go is defined as follows, the discounted reward that we have to\n yet collect, going forward from this point, i.e.:\n\n r2g_t = \\sum_{l=0}^{\\infty} (\\gamma^{l} * reward_{t+l})\n\n Args:\n rewards: np.ndarray of shape (B, T) of rewards.\n mask: np.ndarray of shape (B, T) of mask for the rewards.\n gamma: float, discount factor.\n\n Returns:\n rewards to go, np.ndarray of shape (B, T).\n \"\"\"\n B, T = rewards.shape # pylint: disable=invalid-name,unused-variable\n\n masked_rewards = rewards * mask # (B, T)\n\n # The lax.scan version of this is slow, but we still show it here for\n # completeness.\n # rewards_rev = np.flip(masked_rewards, axis=1) # (B, T) flipped on time.\n # rrt = np.transpose(rewards_rev) # (T, B) transpose to scan over time.\n #\n # def discounting_add(carry, reward):\n # x = reward + (gamma * carry)\n # return x, x\n #\n # _, ys = lax.scan(discounting_add,\n # np.zeros_like(rrt[0], dtype=np.float32),\n # rrt.astype(np.float32))\n #\n # # ys is (T, B) and T is in reverse order.\n # return np.flip(np.transpose(ys), axis=1)\n\n # We use the following recurrence relation, derived from the equation above:\n #\n # r2g[t+1] = (r2g[t] - r[t]) / gamma\n #\n # This means we'll need to calculate r2g[0] first and then r2g[1] and so on ..\n #\n # **However** this leads to overflows for long sequences: r2g[t] - r[t] > 0\n # and gamma < 1.0, so the division keeps increasing.\n #\n # So we just run the recurrence in reverse, i.e.\n #\n # r2g[t] = r[t] + (gamma*r2g[t+1])\n #\n # This is much better, but might have lost updates since the (small) rewards\n # at earlier time-steps may get added to a (very?) large sum.\n\n # Compute r2g_{T-1} at the start and then compute backwards in time.\n r2gs = [masked_rewards[:, -1]]\n\n # Go from T-2 down to 0.\n for t in reversed(range(T - 1)):\n r2gs.append(masked_rewards[:, t] + (gamma * r2gs[-1]))\n\n # The list should have length T.\n assert T == len(r2gs)\n\n # First we stack them in the correct way to make it (B, T), but these are\n # still from newest (T-1) to oldest (0), so then we flip it on time axis.\n return np.flip(np.stack(r2gs, axis=1), axis=1)\n\n\n@jit\ndef value_loss_given_predictions(value_prediction,\n rewards,\n reward_mask,\n gamma=0.99,\n epsilon=0.2,\n value_prediction_old=None):\n \"\"\"Computes the value loss given the prediction of the value function.\n\n Args:\n value_prediction: np.ndarray of shape (B, T+1, 1)\n rewards: np.ndarray of shape (B, T) of rewards.\n reward_mask: np.ndarray of shape (B, T), the mask over rewards.\n gamma: float, discount factor.\n epsilon: float, clip-fraction, used if value_value_prediction_old isn't None\n value_prediction_old: np.ndarray of shape (B, T+1, 1) of value predictions\n using the old parameters. If provided, we incorporate this in the loss as\n well. This is from the OpenAI baselines implementation.\n\n Returns:\n The average L2 value loss, averaged over instances where reward_mask is 1.\n \"\"\"\n\n B, T = rewards.shape # pylint: disable=invalid-name\n assert (B, T) == reward_mask.shape\n assert (B, T + 1, 1) == value_prediction.shape\n\n value_prediction = np.squeeze(value_prediction, axis=2) # (B, T+1)\n value_prediction = value_prediction[:, :-1] * reward_mask # (B, T)\n r2g = rewards_to_go(rewards, reward_mask, gamma=gamma) # (B, T)\n loss = (value_prediction - r2g)**2\n\n # From the baselines implementation.\n if value_prediction_old is not None:\n value_prediction_old = np.squeeze(value_prediction_old, axis=2) # (B, T+1)\n value_prediction_old = value_prediction_old[:, :-1] * reward_mask # (B, T)\n\n v_clipped = value_prediction_old + np.clip(\n value_prediction - value_prediction_old, -epsilon, epsilon)\n v_clipped_loss = (v_clipped - r2g)**2\n loss = np.maximum(v_clipped_loss, loss)\n\n # Take an average on only the points where mask != 0.\n return np.sum(loss) / np.sum(reward_mask)\n\n\ndef deltas(predicted_values, rewards, mask, gamma=0.99):\n r\"\"\"Computes TD-residuals from V(s) and rewards.\n\n Where a `delta`, i.e. a td-residual is defined as:\n\n delta_{b,t} = r_{b,t} + \\gamma * v_{b,t+1} - v_{b,t}.\n\n Args:\n predicted_values: ndarray of shape (B, T+1). NOTE: Expects axis 2 was\n squeezed. These represent V(s_bt) for b < B and t < T+1\n rewards: ndarray of shape (B, T) of rewards.\n mask: ndarray of shape (B, T) of mask for rewards.\n gamma: float, discount factor.\n\n Returns:\n ndarray of shape (B, T) of one-step TD-residuals.\n \"\"\"\n\n # Predicted values at time t, cutting off the last to have shape (B, T).\n predicted_values_bt = predicted_values[:, :-1]\n # Predicted values at time t+1, by cutting off the first to have shape (B, T)\n predicted_values_btplus1 = predicted_values[:, 1:]\n # Return the deltas as defined above.\n return (rewards +\n (gamma * predicted_values_btplus1) - predicted_values_bt) * mask\n\n\ndef gae_advantages(td_deltas, mask, lambda_=0.95, gamma=0.99):\n r\"\"\"Computes the GAE advantages given the one step TD-residuals.\n\n The formula for a GAE advantage estimator is as follows:\n\n A_{bt} = \\sum_{l=0}^{\\infty}(\\gamma * \\lambda)^{l}(\\delta_{b,t+l}).\n\n Internally we just call rewards_to_go, since it is the same computation.\n\n Args:\n td_deltas: np.ndarray of shape (B, T) of one step TD-residuals.\n mask: np.ndarray of shape (B, T) of mask for the residuals. It maybe the\n case that the `td_deltas` are already masked correctly since they are\n produced by `deltas(...)`\n lambda_: float, lambda parameter for GAE estimators.\n gamma: float, lambda parameter for GAE estimators.\n\n Returns:\n GAE advantage estimates.\n \"\"\"\n\n return rewards_to_go(td_deltas, mask, lambda_ * gamma)\n\n\ndef chosen_probabs(probab_observations, actions):\n \"\"\"Picks out the probabilities of the actions along batch and time-steps.\n\n Args:\n probab_observations: ndarray of shape `[B, T+1, A]`, where\n probab_observations[b, t, i] contains the log-probability of action = i at\n the t^th time-step in the b^th trajectory.\n actions: ndarray of shape `[B, T]`, with each entry in [0, A) denoting which\n action was chosen in the b^th trajectory's t^th time-step.\n\n Returns:\n `[B, T]` ndarray with the log-probabilities of the chosen actions.\n \"\"\"\n B, T = actions.shape # pylint: disable=invalid-name\n assert (B, T + 1) == probab_observations.shape[:2]\n return probab_observations[np.arange(B)[:, None], np.arange(T), actions]\n\n\ndef compute_probab_ratios(p_new, p_old, actions, reward_mask):\n \"\"\"Computes the probability ratios for each time-step in a trajectory.\n\n Args:\n p_new: ndarray of shape [B, T+1, A] of the log-probabilities that the policy\n network assigns to all the actions at each time-step in each batch using\n the old parameters.\n p_old: ndarray of shape [B, T+1, A], same as above, but using old policy\n network parameters.\n actions: ndarray of shape [B, T] where each element is from [0, A).\n reward_mask: ndarray of shape [B, T] masking over probabilities.\n\n Returns:\n probab_ratios: ndarray of shape [B, T], where\n probab_ratios_{b,t} = p_new_{b,t,action_{b,t}} / p_old_{b,t,action_{b,t}}\n \"\"\"\n\n B, T = actions.shape # pylint: disable=invalid-name\n assert (B, T + 1) == p_old.shape[:2]\n assert (B, T + 1) == p_new.shape[:2]\n\n logp_old = chosen_probabs(p_old, actions)\n logp_new = chosen_probabs(p_new, actions)\n\n assert (B, T) == logp_old.shape\n assert (B, T) == logp_new.shape\n\n # Since these are log-probabilities, we just subtract them.\n probab_ratios = np.exp(logp_new - logp_old) * reward_mask\n assert (B, T) == probab_ratios.shape\n return probab_ratios\n\n\ndef clipped_probab_ratios(probab_ratios, epsilon=0.2):\n return np.clip(probab_ratios, 1 - epsilon, 1 + epsilon)\n\n\ndef clipped_objective(probab_ratios, advantages, reward_mask, epsilon=0.2):\n return np.minimum(\n probab_ratios * advantages,\n clipped_probab_ratios(probab_ratios, epsilon=epsilon) *\n advantages) * reward_mask\n\n\n@jit\ndef ppo_loss_given_predictions(log_probab_actions_new,\n log_probab_actions_old,\n value_predictions_old,\n padded_actions,\n padded_rewards,\n reward_mask,\n gamma=0.99,\n lambda_=0.95,\n epsilon=0.2):\n \"\"\"PPO objective, with an eventual minus sign, given predictions.\"\"\"\n B, T = padded_rewards.shape # pylint: disable=invalid-name\n assert (B, T) == padded_actions.shape\n assert (B, T) == reward_mask.shape\n\n _, _, A = log_probab_actions_old.shape # pylint: disable=invalid-name\n assert (B, T + 1, 1) == value_predictions_old.shape\n assert (B, T + 1, A) == log_probab_actions_old.shape\n assert (B, T + 1, A) == log_probab_actions_new.shape\n\n # (B, T)\n td_deltas = deltas(\n np.squeeze(value_predictions_old, axis=2), # (B, T+1)\n padded_rewards,\n reward_mask,\n gamma=gamma)\n\n # (B, T)\n advantages = gae_advantages(\n td_deltas, reward_mask, lambda_=lambda_, gamma=gamma)\n\n # Normalize the advantages.\n advantages = (advantages - np.mean(advantages)) / np.std(advantages)\n\n # (B, T)\n ratios = compute_probab_ratios(log_probab_actions_new, log_probab_actions_old,\n padded_actions, reward_mask)\n assert (B, T) == ratios.shape\n\n # (B, T)\n objective = clipped_objective(\n ratios, advantages, reward_mask, epsilon=epsilon)\n assert (B, T) == objective.shape\n\n # ()\n average_objective = np.sum(objective) / np.sum(reward_mask)\n\n # Loss is negative objective.\n return -average_objective\n\n\n@jit\ndef combined_loss_given_predictions(log_probab_actions_new,\n log_probab_actions_old,\n value_prediction_new,\n value_prediction_old,\n padded_actions,\n padded_rewards,\n reward_mask,\n gamma=0.99,\n lambda_=0.95,\n epsilon=0.2,\n c1=1.0,\n c2=0.01):\n \"\"\"Computes the combined (clipped loss + value loss) given predictions.\"\"\"\n loss_value = value_loss_given_predictions(\n value_prediction_new,\n padded_rewards,\n reward_mask,\n gamma=gamma,\n value_prediction_old=value_prediction_old,\n epsilon=epsilon)\n loss_ppo = ppo_loss_given_predictions(\n log_probab_actions_new,\n log_probab_actions_old,\n value_prediction_old,\n padded_actions,\n padded_rewards,\n reward_mask,\n gamma=gamma,\n lambda_=lambda_,\n epsilon=epsilon)\n entropy_bonus = masked_entropy(log_probab_actions_new, reward_mask)\n return (loss_ppo + (c1 * loss_value) - (c2 * entropy_bonus), loss_ppo,\n loss_value, entropy_bonus)\n\n\[email protected](jit, static_argnums=(3,))\ndef combined_loss(new_params,\n log_probab_actions_old,\n value_predictions_old,\n policy_and_value_net_apply,\n padded_observations,\n padded_actions,\n padded_rewards,\n reward_mask,\n gamma=0.99,\n lambda_=0.95,\n epsilon=0.2,\n c1=1.0,\n c2=0.01,\n rng=None):\n \"\"\"Computes the combined (clipped loss + value loss) given observations.\"\"\"\n log_probab_actions_new, value_predictions_new = policy_and_value_net_apply(\n padded_observations, new_params, rng=rng)\n\n # (combined_loss, ppo_loss, value_loss, entropy_bonus)\n return combined_loss_given_predictions(\n log_probab_actions_new,\n log_probab_actions_old,\n value_predictions_new,\n value_predictions_old,\n padded_actions,\n padded_rewards,\n reward_mask,\n gamma=gamma,\n lambda_=lambda_,\n epsilon=epsilon,\n c1=c1,\n c2=c2)\n\n\[email protected](jit, static_argnums=(2, 3, 4))\ndef policy_and_value_opt_step(i,\n opt_state,\n opt_update,\n get_params,\n policy_and_value_net_apply,\n log_probab_actions_old,\n value_predictions_old,\n padded_observations,\n padded_actions,\n padded_rewards,\n reward_mask,\n c1=1.0,\n c2=0.01,\n gamma=0.99,\n lambda_=0.95,\n epsilon=0.1,\n rng=None):\n \"\"\"Policy and Value optimizer step.\"\"\"\n\n # Combined loss function given the new params.\n def policy_and_value_loss(params):\n \"\"\"Returns the combined loss given just parameters.\"\"\"\n (loss, _, _, _) = combined_loss(\n params,\n log_probab_actions_old,\n value_predictions_old,\n policy_and_value_net_apply,\n padded_observations,\n padded_actions,\n padded_rewards,\n reward_mask,\n c1=c1,\n c2=c2,\n gamma=gamma,\n lambda_=lambda_,\n epsilon=epsilon,\n rng=rng)\n return loss\n\n new_params = get_params(opt_state)\n g = grad(policy_and_value_loss)(new_params)\n # TODO(afrozm): Maybe clip gradients?\n return opt_update(i, g, opt_state)\n\n\ndef get_time(t1, t2=None):\n if t2 is None:\n t2 = time.time()\n return round((t2 - t1) * 1000, 2)\n\n\ndef approximate_kl(log_prob_new, log_prob_old, mask):\n \"\"\"Computes the approximate KL divergence between the old and new log-probs.\n\n Args:\n log_prob_new: (B, T+1, A) log probs new\n log_prob_old: (B, T+1, A) log probs old\n mask: (B, T)\n\n Returns:\n Approximate KL.\n \"\"\"\n diff = log_prob_old - log_prob_new\n # Cut the last time-step out.\n diff = diff[:, :-1]\n # Mask out the irrelevant part.\n diff *= mask[:, :, np.newaxis] # make mask (B, T, 1)\n # Average on non-masked part.\n return np.sum(diff) / np.sum(mask)\n\n\ndef masked_entropy(log_probs, mask):\n \"\"\"Computes the entropy for the given log-probs.\n\n Args:\n log_probs: (B, T+1, A) log probs\n mask: (B, T) mask.\n\n Returns:\n Entropy.\n \"\"\"\n # Cut the last time-step out.\n lp = log_probs[:, :-1]\n # Mask out the irrelevant part.\n lp *= mask[:, :, np.newaxis] # make mask (B, T, 1)\n p = np.exp(lp) * mask[:, :, np.newaxis] # (B, T, 1)\n # Average on non-masked part and take negative.\n return -(np.sum(lp * p) / np.sum(mask))\n\n\ndef evaluate_policy(eval_env,\n get_predictions,\n temperatures,\n max_timestep=20000,\n n_evals=1,\n len_history_for_policy=32,\n rng=None):\n \"\"\"Evaluate the policy.\"\"\"\n\n processed_reward_sums = collections.defaultdict(list)\n raw_reward_sums = collections.defaultdict(list)\n for eval_rng in jax_random.split(rng, num=n_evals):\n for temperature in temperatures:\n trajs, _, _ = env_problem_utils.play_env_problem_with_policy(\n eval_env,\n get_predictions,\n num_trajectories=eval_env.batch_size,\n max_timestep=max_timestep,\n reset=True,\n policy_sampling=env_problem_utils.GUMBEL_SAMPLING,\n temperature=temperature,\n rng=eval_rng,\n len_history_for_policy=len_history_for_policy)\n processed_reward_sums[temperature].extend(sum(traj[2]) for traj in trajs)\n raw_reward_sums[temperature].extend(sum(traj[3]) for traj in trajs)\n\n # Return the mean and standard deviation for each temperature.\n def compute_stats(reward_dict):\n return {\n temperature: {\"mean\": onp.mean(rewards), \"std\": onp.std(rewards)}\n for (temperature, rewards) in reward_dict.items()\n }\n return {\n \"processed\": compute_stats(processed_reward_sums),\n \"raw\": compute_stats(raw_reward_sums),\n }\n\n\ndef maybe_restore_params(output_dir, policy_and_value_net_params):\n \"\"\"Maybe restore the params from the checkpoint dir.\n\n Args:\n output_dir: Directory where saved model checkpoints are stored.\n policy_and_value_net_params: Default params, returned if model is'nt found.\n\n Returns:\n triple (restore (bool), params, iter(int)) where iter is the epoch from\n which we restored the params, 0 is restore = False.\n \"\"\"\n model_files = gfile.glob(os.path.join(output_dir, \"model-??????.pkl\"))\n for model_file in reversed(sorted(model_files)):\n logging.info(\"Trying to restore model from %s\", model_file)\n try:\n with gfile.GFile(model_file, \"rb\") as f:\n loaded_policy_and_value_net_params = pickle.load(f)\n policy_and_value_net_params = loaded_policy_and_value_net_params\n model_file_basename = os.path.basename(model_file) # model-??????.pkl\n i = int(filter(str.isdigit, model_file_basename))\n return True, policy_and_value_net_params, i\n except EOFError as e:\n logging.error(\"Unable to load model from: %s with %s\", model_file, e)\n # Try an older version.\n continue\n return False, policy_and_value_net_params, 0\n\n\ndef write_eval_reward_summaries(reward_stats_by_mode, summary_writer, epoch):\n \"\"\"Writes evaluation reward statistics to summary and logs them.\n\n Args:\n reward_stats_by_mode: Nested dict of structure:\n {\n \"raw\": {\n <temperature 1>: {\n \"mean\": <reward mean>,\n \"std\": <reward std>,\n },\n <temperature 2>: ...\n },\n \"processed\": ...\n }\n summary_writer: jaxboard.SummaryWriter.\n epoch: Current epoch number.\n \"\"\"\n for (reward_mode, reward_stats_by_temp) in reward_stats_by_mode.items():\n for (temperature, reward_stats) in reward_stats_by_temp.items():\n for (stat_name, stat) in reward_stats.items():\n summary_writer.scalar(\n \"eval/{reward_mode}_reward_{stat_name}/\"\n \"temperature_{temperature}\".format(reward_mode=reward_mode,\n stat_name=stat_name,\n temperature=temperature),\n stat, step=epoch)\n logging.info(\"Epoch [% 6d] Policy Evaluation (%s reward) \"\n \"[temperature %.2f] = %10.2f (+/- %.2f)\",\n epoch, reward_mode, temperature,\n reward_stats[\"mean\"], reward_stats[\"std\"])\n\n\[email protected](blacklist=[\"output_dir\"])\ndef training_loop(\n env,\n eval_env,\n env_name,\n policy_and_value_net_fn,\n policy_and_value_optimizer_fn,\n output_dir,\n epochs=EPOCHS,\n n_optimizer_steps=N_OPTIMIZER_STEPS,\n print_every_optimizer_steps=PRINT_EVERY_OPTIMIZER_STEP,\n target_kl=0.01,\n boundary=20,\n max_timestep=None,\n max_timestep_eval=20000,\n random_seed=None,\n gamma=GAMMA,\n lambda_=LAMBDA,\n epsilon=EPSILON,\n c1=1.0,\n c2=0.01,\n eval_every_n=1000,\n done_frac_for_policy_save=0.5,\n enable_early_stopping=True,\n n_evals=1,\n len_history_for_policy=4,\n eval_temperatures=(1.0, 0.5),\n):\n \"\"\"Runs the training loop for PPO, with fixed policy and value nets.\n\n Args:\n env: gym.Env to use for training.\n eval_env: gym.Env to use for evaluation.\n env_name: Name of the environment.\n policy_and_value_net_fn: Function defining the policy and value network.\n policy_and_value_optimizer_fn: Function defining the optimizer.\n output_dir: Output dir.\n epochs: Number of epochs to run for.\n n_optimizer_steps: Number of optimizer steps.\n print_every_optimizer_steps: How often to log during the policy optimization\n process.\n target_kl: Policy iteration early stopping.\n boundary: We pad trajectories at integer multiples of this number.\n max_timestep: If set to an integer, maximum number of time-steps in\n a trajectory. Used in the collect procedure.\n max_timestep_eval: If set to an integer, maximum number of time-steps in an\n evaluation trajectory. Used in the collect procedure.\n random_seed: Random seed.\n gamma: Reward discount factor.\n lambda_: N-step TD-error discount factor in GAE.\n epsilon: Random action probability in epsilon-greedy sampling.\n c1: Value loss coefficient.\n c2: Entropy loss coefficient.\n eval_every_n: How frequently to eval the policy.\n done_frac_for_policy_save: Fraction of the trajectories that should be done\n to checkpoint the policy.\n enable_early_stopping: Whether to enable early stopping.\n n_evals: Number of times to evaluate.\n len_history_for_policy: How much of history to give to the policy.\n eval_temperatures: Sequence of temperatures to try for categorical sampling\n during evaluation.\n \"\"\"\n gfile.makedirs(output_dir)\n\n # Create summary writers and history.\n train_sw = jaxboard.SummaryWriter(os.path.join(output_dir, \"train\"))\n timing_sw = jaxboard.SummaryWriter(os.path.join(output_dir, \"timing\"))\n eval_sw = jaxboard.SummaryWriter(os.path.join(output_dir, \"eval\"))\n\n train_sw.text(\"env_name\", env_name)\n timing_sw.text(\"env_name\", env_name)\n eval_sw.text(\"env_name\", env_name)\n\n jax_rng_key = trax.get_random_number_generator_and_set_seed(random_seed)\n\n # Batch Observations Shape = [1, 1] + OBS, because we will eventually call\n # policy and value networks on shape [B, T] +_OBS\n batch_observations_shape = (1, 1) + env.observation_space.shape\n observations_dtype = env.observation_space.dtype\n\n assert isinstance(env.action_space, gym.spaces.Discrete)\n n_actions = env.action_space.n\n\n jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2)\n\n # Initialize the policy and value network.\n policy_and_value_net_params, policy_and_value_net_apply = (\n policy_and_value_net_fn(key1, batch_observations_shape,\n observations_dtype, n_actions))\n\n # Maybe restore the policy params. If there is nothing to restore, then\n # iteration = 0 and policy_and_value_net_params are returned as is.\n restore, policy_and_value_net_params, iteration = (\n maybe_restore_params(output_dir, policy_and_value_net_params))\n\n if restore:\n logging.info(\"Restored parameters from iteration [%d]\", iteration)\n # We should start from the next iteration.\n iteration += 1\n\n policy_and_value_net_apply = jit(policy_and_value_net_apply)\n\n # Initialize the optimizers.\n policy_and_value_optimizer = (\n policy_and_value_optimizer_fn(policy_and_value_net_params))\n (policy_and_value_opt_state, policy_and_value_opt_update,\n policy_and_value_get_params) = policy_and_value_optimizer\n\n n_trajectories_done = 0\n last_saved_at = 0\n\n logging.info(\"Starting the PPO training loop.\")\n for i in range(iteration, epochs):\n epoch_start_time = time.time()\n\n # Params we'll use to collect the trajectories.\n policy_and_value_net_params = policy_and_value_get_params(\n policy_and_value_opt_state)\n\n # A function to get the policy and value predictions.\n def get_predictions(observations, rng=None):\n \"\"\"Returns log-probs, value predictions and key back.\"\"\"\n key, key1 = jax_random.split(rng, num=2)\n\n log_probs, value_preds = policy_and_value_net_apply(\n observations, policy_and_value_net_params, rng=key1)\n\n return log_probs, value_preds, key\n\n # Evaluate the policy.\n policy_eval_start_time = time.time()\n if ((i + 1) % eval_every_n == 0) or (i == epochs - 1):\n jax_rng_key, key = jax_random.split(jax_rng_key, num=2)\n\n logging.vlog(1, \"Epoch [% 6d] evaluating policy.\", i)\n\n reward_stats = evaluate_policy(\n eval_env,\n get_predictions,\n temperatures=eval_temperatures,\n max_timestep=max_timestep_eval,\n n_evals=n_evals,\n len_history_for_policy=len_history_for_policy,\n rng=key)\n write_eval_reward_summaries(reward_stats, eval_sw, epoch=i)\n policy_eval_time = get_time(policy_eval_start_time)\n\n trajectory_collection_start_time = time.time()\n logging.vlog(1, \"Epoch [% 6d] collecting trajectories.\", i)\n jax_rng_key, key = jax_random.split(jax_rng_key)\n trajs, n_done, timing_info = collect_trajectories(\n env,\n policy_fn=get_predictions,\n n_trajectories=env.batch_size,\n max_timestep=max_timestep,\n rng=key,\n len_history_for_policy=len_history_for_policy,\n reset=(i == 0) or restore,\n epsilon=(10.0 / (i + 10.0))) # this is a different epsilon.\n trajectory_collection_time = get_time(trajectory_collection_start_time)\n\n logging.vlog(1, \"Collecting trajectories took %0.2f msec.\",\n trajectory_collection_time)\n\n avg_reward = float(sum(np.sum(traj[2]) for traj in trajs)) / len(trajs)\n max_reward = max(np.sum(traj[2]) for traj in trajs)\n min_reward = min(np.sum(traj[2]) for traj in trajs)\n\n train_sw.scalar(\"train/reward_mean_truncated\", avg_reward, step=i)\n\n logging.vlog(1, \"Rewards avg=[%0.2f], max=[%0.2f], min=[%0.2f], all=%s\",\n avg_reward, max_reward, min_reward,\n [float(np.sum(traj[2])) for traj in trajs])\n\n logging.vlog(1,\n \"Trajectory Length average=[%0.2f], max=[%0.2f], min=[%0.2f]\",\n float(sum(len(traj[0]) for traj in trajs)) / len(trajs),\n max(len(traj[0]) for traj in trajs),\n min(len(traj[0]) for traj in trajs))\n logging.vlog(2, \"Trajectory Lengths: %s\", [len(traj[0]) for traj in trajs])\n\n padding_start_time = time.time()\n (_, reward_mask, padded_observations, padded_actions,\n padded_rewards, padded_infos) = pad_trajectories(\n trajs, boundary=boundary)\n padding_time = get_time(padding_start_time)\n\n logging.vlog(1, \"Padding trajectories took %0.2f msec.\",\n get_time(padding_start_time))\n logging.vlog(1, \"Padded Observations' shape [%s]\",\n str(padded_observations.shape))\n logging.vlog(1, \"Padded Actions' shape [%s]\", str(padded_actions.shape))\n logging.vlog(1, \"Padded Rewards' shape [%s]\", str(padded_rewards.shape))\n\n # Some assertions.\n B, T = padded_actions.shape # pylint: disable=invalid-name\n assert (B, T) == padded_rewards.shape\n assert (B, T) == reward_mask.shape\n assert (B, T + 1) == padded_observations.shape[:2]\n assert (B, T + 1) + env.observation_space.shape == padded_observations.shape\n\n log_prob_recompute_start_time = time.time()\n assert (\"log_prob_actions\" in padded_infos and\n \"value_predictions\" in padded_infos)\n # These are the actual log-probabs and value predictions seen while picking\n # the actions.\n actual_log_probabs_traj = padded_infos[\"log_prob_actions\"]\n actual_value_predictions_traj = padded_infos[\"value_predictions\"]\n\n assert (B, T) == actual_log_probabs_traj.shape[:2]\n A = actual_log_probabs_traj.shape[2] # pylint: disable=invalid-name\n assert (B, T, 1) == actual_value_predictions_traj.shape\n\n # TODO(afrozm): log-probabs doesn't need to be (B, T+1, A) it can do with\n # (B, T, A), so make that change throughout.\n\n # NOTE: We don't have the log-probabs and value-predictions for the last\n # observation, so we re-calculate for everything, but use the original ones\n # for all but the last time-step.\n jax_rng_key, key = jax_random.split(jax_rng_key)\n log_probabs_traj, value_predictions_traj, _ = get_predictions(\n padded_observations, rng=key)\n\n assert (B, T + 1, A) == log_probabs_traj.shape\n assert (B, T + 1, 1) == value_predictions_traj.shape\n\n # Concatenate the last time-step's log-probabs and value predictions to the\n # actual log-probabs and value predictions and use those going forward.\n log_probabs_traj = np.concatenate(\n (actual_log_probabs_traj, log_probabs_traj[:, -1:, :]), axis=1)\n value_predictions_traj = np.concatenate(\n (actual_value_predictions_traj, value_predictions_traj[:, -1:, :]),\n axis=1)\n\n log_prob_recompute_time = get_time(log_prob_recompute_start_time)\n\n # Linear annealing from 0.1 to 0.0\n # epsilon_schedule = epsilon if epochs == 1 else epsilon * (1.0 -\n # (i /\n # (epochs - 1)))\n\n # Constant epsilon.\n epsilon_schedule = epsilon\n\n # Compute value and ppo losses.\n jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2)\n logging.vlog(2, \"Starting to compute P&V loss.\")\n loss_compute_start_time = time.time()\n cur_combined_loss, cur_ppo_loss, cur_value_loss, entropy_bonus = (\n combined_loss(\n policy_and_value_net_params,\n log_probabs_traj,\n value_predictions_traj,\n policy_and_value_net_apply,\n padded_observations,\n padded_actions,\n padded_rewards,\n reward_mask,\n gamma=gamma,\n lambda_=lambda_,\n epsilon=epsilon_schedule,\n c1=c1,\n c2=c2,\n rng=key1))\n loss_compute_time = get_time(loss_compute_start_time)\n logging.vlog(\n 1,\n \"Calculating P&V loss [%10.2f(%10.2f, %10.2f, %10.2f)] took %0.2f msec.\",\n cur_combined_loss, cur_value_loss, cur_ppo_loss, entropy_bonus,\n get_time(loss_compute_start_time))\n\n jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2)\n logging.vlog(1, \"Policy and Value Optimization\")\n optimization_start_time = time.time()\n keys = jax_random.split(key1, num=n_optimizer_steps)\n for j in range(n_optimizer_steps):\n k1, k2, k3 = jax_random.split(keys[j], num=3)\n t = time.time()\n # Update the optimizer state.\n policy_and_value_opt_state = policy_and_value_opt_step(\n j,\n policy_and_value_opt_state,\n policy_and_value_opt_update,\n policy_and_value_get_params,\n policy_and_value_net_apply,\n log_probabs_traj,\n value_predictions_traj,\n padded_observations,\n padded_actions,\n padded_rewards,\n reward_mask,\n c1=c1,\n c2=c2,\n gamma=gamma,\n lambda_=lambda_,\n epsilon=epsilon_schedule,\n rng=k1)\n\n # Compute the approx KL for early stopping.\n new_policy_and_value_net_params = policy_and_value_get_params(\n policy_and_value_opt_state)\n\n log_probab_actions_new, _ = policy_and_value_net_apply(\n padded_observations, new_policy_and_value_net_params, rng=k2)\n\n approx_kl = approximate_kl(log_probab_actions_new, log_probabs_traj,\n reward_mask)\n\n early_stopping = enable_early_stopping and approx_kl > 1.5 * target_kl\n if early_stopping:\n logging.vlog(\n 1, \"Early stopping policy and value optimization at iter: %d, \"\n \"with approx_kl: %0.2f\", j, approx_kl)\n # We don't return right-away, we want the below to execute on the last\n # iteration.\n\n t2 = time.time()\n if (((j + 1) % print_every_optimizer_steps == 0) or\n (j == n_optimizer_steps - 1) or early_stopping):\n # Compute and log the loss.\n (loss_combined, loss_ppo, loss_value, entropy_bonus) = (\n combined_loss(\n new_policy_and_value_net_params,\n log_probabs_traj,\n value_predictions_traj,\n policy_and_value_net_apply,\n padded_observations,\n padded_actions,\n padded_rewards,\n reward_mask,\n gamma=gamma,\n lambda_=lambda_,\n epsilon=epsilon_schedule,\n c1=c1,\n c2=c2,\n rng=k3))\n logging.vlog(1, \"One Policy and Value grad desc took: %0.2f msec\",\n get_time(t, t2))\n logging.vlog(\n 1, \"Combined Loss(value, ppo, entropy_bonus) [%10.2f] ->\"\n \" [%10.2f(%10.2f,%10.2f,%10.2f)]\", cur_combined_loss, loss_combined,\n loss_value, loss_ppo, entropy_bonus)\n\n if early_stopping:\n break\n\n optimization_time = get_time(optimization_start_time)\n\n logging.vlog(\n 1, \"Total Combined Loss reduction [%0.2f]%%\",\n (100 * (cur_combined_loss - loss_combined) / np.abs(cur_combined_loss)))\n\n # Save parameters every time we see the end of at least a fraction of batch\n # number of trajectories that are done (not completed -- completed includes\n # truncated and done).\n # Also don't save too frequently, enforce a minimum gap.\n # Or if this is the last iteration.\n policy_save_start_time = time.time()\n n_trajectories_done += n_done\n # TODO(afrozm): Refactor to trax.save_state.\n if (((n_trajectories_done >= done_frac_for_policy_save * env.batch_size) and\n (i - last_saved_at > eval_every_n) and\n (((i + 1) % eval_every_n == 0))) or (i == epochs - 1)):\n logging.vlog(1, \"Epoch [% 6d] saving model.\", i)\n old_model_files = gfile.glob(os.path.join(output_dir, \"model-??????.pkl\"))\n params_file = os.path.join(output_dir, \"model-%06d.pkl\" % i)\n with gfile.GFile(params_file, \"wb\") as f:\n pickle.dump(policy_and_value_net_params, f)\n # Remove the old model files.\n for path in old_model_files:\n gfile.remove(path)\n # Reset this number.\n n_trajectories_done = 0\n last_saved_at = i\n policy_save_time = get_time(policy_save_start_time)\n\n epoch_time = get_time(epoch_start_time)\n\n logging.info(\n \"Epoch [% 6d], Reward[min, max, avg] [%5.2f,%5.2f,%5.2f], Combined\"\n \" Loss(value, ppo, entropy) [%2.5f(%2.5f,%2.5f,%2.5f)]\", i, min_reward,\n max_reward, avg_reward, loss_combined, loss_value, loss_ppo,\n entropy_bonus)\n\n timing_dict = {\n \"epoch\": epoch_time,\n \"policy_eval\": policy_eval_time,\n \"trajectory_collection\": trajectory_collection_time,\n \"padding\": padding_time,\n \"log_prob_recompute\": log_prob_recompute_time,\n \"loss_compute\": loss_compute_time,\n \"optimization\": optimization_time,\n \"policy_save\": policy_save_time,\n }\n\n timing_dict.update(timing_info)\n\n for k, v in timing_dict.items():\n timing_sw.scalar(\"timing/%s\" % k, v, step=i)\n\n max_key_len = max(len(k) for k in timing_dict)\n timing_info_list = [\n \"%s : % 10.2f\" % (k.rjust(max_key_len + 1), v)\n for k, v in sorted(timing_dict.items())\n ]\n logging.info(\"Epoch [% 6d], Timings: \\n%s\", i, \"\\n\".join(timing_info_list))\n\n # Reset restore.\n restore = False\n\n # Flush summary writers once in a while.\n if (i + 1) % 1000 == 0 or i == epochs - 1:\n train_sw.flush()\n timing_sw.flush()\n eval_sw.flush()\n",
"import keras\nimport numpy as np\nfrom data.vocab import TextEncoder\n\n\ndef _get_pos_encoding_matrix(max_len: int, d_emb: int) -> np.array:\n pos_enc = np.array(\n [[pos / np.power(10000, 2 * (j // 2) / d_emb) for j in range(d_emb)] if pos != 0 else np.zeros(d_emb) for pos in\n range(max_len)], dtype=np.float32)\n pos_enc[1:, 0::2] = np.sin(pos_enc[1:, 0::2]) # dim 2i\n pos_enc[1:, 1::2] = np.cos(pos_enc[1:, 1::2]) # dim 2i+1\n return pos_enc\n\n\n# NOTE that for vocab_size you should also add special_count\nclass Embedding(keras.layers.Layer):\n def __init__(self, output_dim: int = 768, dropout: float = 0.1, vocab_size: int = 30000 + TextEncoder.SPECIAL_COUNT,\n max_len: int = 512, trainable_pos_embedding: bool = True,\n use_one_dropout: bool = False, **kwargs):\n super().__init__(**kwargs)\n self.max_len = max_len\n self.use_one_dropout = use_one_dropout\n self.output_dim = output_dim\n self.dropout = dropout\n self.vocab_size = vocab_size\n self.trainable_pos_embedding = trainable_pos_embedding\n\n self.segment_emb = keras.layers.Embedding(TextEncoder.NUM_SEGMENTS, output_dim, input_length=max_len,\n name='SegmentEmbedding')\n if not trainable_pos_embedding:\n self.pos_emb = keras.layers.Embedding(max_len, output_dim, trainable=False, input_length=max_len,\n name='PositionEmbedding',\n weights=[_get_pos_encoding_matrix(max_len, output_dim)])\n else:\n self.pos_emb = keras.layers.Embedding(max_len, output_dim, input_length=max_len, name='PositionEmbedding')\n self.token_emb = keras.layers.Embedding(vocab_size, output_dim, input_length=max_len, name='TokenEmbedding')\n self.embedding_dropout = keras.layers.Dropout(dropout, name='EmbeddingDropOut')\n self.add_embeddings = keras.layers.Add(name='AddEmbeddings')\n\n def compute_output_shape(self, input_shape):\n return input_shape[0][0], input_shape[0][1], self.output_dim\n\n def get_config(self):\n config = {\n 'max_len': self.max_len,\n 'use_one_dropout': self.use_one_dropout,\n 'output_dim': self.output_dim,\n 'dropout': self.dropout,\n 'vocab_size': self.vocab_size,\n 'trainable_pos_embedding': self.trainable_pos_embedding,\n }\n base_config = super().get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def __call__(self, inputs, **kwargs):\n tokens, segment_ids, pos_ids = inputs\n segment_embedding = self.segment_emb(segment_ids)\n pos_embedding = self.pos_emb(pos_ids)\n token_embedding = self.token_emb(tokens)\n if self.use_one_dropout:\n return self.embedding_dropout(self.add_embeddings([segment_embedding, pos_embedding, token_embedding]))\n return self.add_embeddings([self.embedding_dropout(segment_embedding), self.embedding_dropout(pos_embedding),\n self.embedding_dropout(token_embedding)])\n",
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"BERT finetuning runner.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport csv\nimport os\nimport modeling\nimport tensorflow as tf\n\nflags = tf.flags\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\"buckets\", \"\", \"oss buckets\")\nflags.DEFINE_integer(\"task_index\", 0, \"Worker task index\")\nflags.DEFINE_string(\"ps_hosts\", \"\", \"ps hosts\")\nflags.DEFINE_string(\"worker_hosts\", \"\", \"worker hosts\")\n\n## Required parameters\nflags.DEFINE_string(\n \"tfrecord_lst\", None,\n \"The input data dir. Should contain the .tsv files (or other data files) \"\n \"for the task.\")\n\nflags.DEFINE_string(\n \"bert_config_file\", None,\n \"The config json file corresponding to the pre-trained BERT model. \"\n \"This specifies the model architecture.\")\n\nflags.DEFINE_string(\n \"output_dir\", None,\n \"The output directory where the model checkpoints will be written.\")\n\n## Other parameters\n\nflags.DEFINE_string(\n \"init_checkpoint\", None,\n \"Initial checkpoint (usually from a pre-trained BERT model).\")\n\nflags.DEFINE_integer(\n \"max_seq_length\", 128,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter \"\n \"than this will be padded.\")\n\nflags.DEFINE_integer(\"train_batch_size\", 32, \"Total batch size for training.\")\nflags.DEFINE_integer(\"num_gpus\", 8, \"Total batch size for training.\")\nflags.DEFINE_integer(\"num_accumulated_batches\", 1, \"Total batch size for training.\")\n\nflags.DEFINE_float(\"learning_rate\", 5e-5, \"The initial learning rate for Adam.\")\nflags.DEFINE_float(\"weight_decay_rate\", 0.9, \"The initial learning rate for Adam.\")\nflags.DEFINE_float(\"warmup_proportion\", 0.1, \"The initial learning rate for Adam.\")\nflags.DEFINE_float(\"lr_decay_power\", 1.0, \"The initial learning rate for Adam.\")\nflags.DEFINE_float(\"layerwise_lr_decay_power\", 0.0, \"The initial learning rate for Adam.\")\n\nflags.DEFINE_float(\"num_train_epochs\", 3.0,\n \"Total number of training epochs to perform.\")\n\nflags.DEFINE_float(\"train_examples\", 2321511.0,\n \"Total number of training epochs to perform.\")\n\nflags.DEFINE_integer(\"save_checkpoints_steps\", 1000,\n \"How often to save the model checkpoint.\")\n\nflags.DEFINE_integer(\"iterations_per_loop\", 1000,\n \"How many steps to make in each estimator call.\")\n\nflags.DEFINE_integer(\"log_step_count_steps\", 100,\n \"How many steps to make in each estimator call.\")\n\nflags.DEFINE_integer(\"num_labels\", 1000,\n \"How many steps to make in each estimator call.\")\n\nflags.DEFINE_integer(\"keep_checkpoint_max\", 10,\n \"How many steps to make in each estimator call.\")\n\nflags.DEFINE_bool(\"if_multisigmoid\", True, \"Whether to use TPU or GPU/CPU.\")\nflags.DEFINE_bool(\"if_grad_penalty\", True, \"Whether to use TPU or GPU/CPU.\")\nflags.DEFINE_bool(\"do_distributed_training\", False, \"Whether to use TPU or GPU/CPU.\")\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\")\n\ndef create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings,\n if_multisigmoid=False,\n if_grad_penalty=False):\n \"\"\"Creates a classification model.\"\"\"\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n output_layer = model.get_pooled_output()\n\n hidden_size = output_layer.shape[-1].value\n\n with tf.variable_scope(\"cls/gaode/classification\"):\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n if not if_multisigmoid:\n print(\"===softmax cross-entropy===\")\n probabilities = tf.nn.softmax(logits, axis=-1)\n elif if_multisigmoid:\n print(\"===multilabel-sigmoid===\")\n probabilities = tf.nn.sigmoid(logits)\n\n return (probabilities)\n\n\ndef model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps,\n use_one_hot_embeddings,\n if_multisigmoid=False,\n if_grad_penalty=False,\n num_towers=1):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, [],\n num_labels, use_one_hot_embeddings,\n if_multisigmoid=if_multisigmoid,\n if_grad_penalty=if_grad_penalty)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n \n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n predictions={\"probabilities\": probabilities},\n export_outputs={\n \"output\":tf.estimator.export.PredictOutput(\n {\"probabilities\": probabilities}\n )\n })\n return output_spec\n\n return model_fn\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)\n\n import os\n output_dir = os.path.join(FLAGS.buckets, FLAGS.output_dir)\n init_checkpoint = os.path.join(FLAGS.buckets, FLAGS.init_checkpoint)\n\n sess_config = tf.ConfigProto(allow_soft_placement=True,\n log_device_placement=True)\n\n model_fn = model_fn_builder(\n bert_config=bert_config,\n num_labels=FLAGS.num_labels,\n init_checkpoint=init_checkpoint,\n learning_rate=FLAGS.learning_rate,\n num_train_steps=0,\n num_warmup_steps=0,\n use_one_hot_embeddings=True,\n if_multisigmoid=FLAGS.if_multisigmoid,\n if_grad_penalty=FLAGS.if_grad_penalty,\n num_towers=1)\n\n receiver_features = {\n \"input_ids\":tf.placeholder(tf.int32, [None, None], name='input_ids'),\n \"input_mask\":tf.placeholder(tf.int32, [None, None], name='input_mask'),\n \"segment_ids\":tf.placeholder(tf.int32, [None, None], name='segment_ids'),\n }\n\n def serving_input_receiver_fn():\n print(receiver_features, \"==input receiver_features==\")\n input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(receiver_features)()\n return input_fn\n\n estimator = tf.estimator.Estimator(\n model_fn=model_fn,\n model_dir=output_dir)\n\n import os\n input_export_dir = os.path.join(output_dir, 'export_dir')\n\n export_dir = estimator.export_savedmodel(input_export_dir, \n serving_input_receiver_fn,\n checkpoint_path=init_checkpoint)\n\n print(\"===Succeeded in exporting saved model==={}\".format(export_dir))\n\nif __name__ == \"__main__\":\n tf.app.run()\n",
"try:\n\tfrom distributed_single_sentence_classification.model_interface import model_zoo\n\tfrom distillation import distillation_utils\n\tfrom loss import loss_utils\nexcept:\n\tfrom distributed_single_sentence_classification.model_interface import model_zoo\n\tfrom distillation import distillation_utils\n\tfrom loss import loss_utils\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom model_io import model_io\nfrom task_module import classifier\nimport tensorflow as tf\nfrom metric import tf_metrics\nfrom task_module import pretrain\nfrom utils.bert import bert_utils\nfrom optimizer import distributed_optimizer as optimizer\nfrom utils.simclr import simclr_utils\n\ndef build_accuracy(logits, labels, mask, loss_type):\n\tmask = tf.cast(mask, tf.float32)\n\tif loss_type == 'contrastive_loss':\n\t\ttemp_sim = tf.subtract(tf.ones_like(logits), tf.rint(logits), name=\"temp_sim\") #auto threshold 0.5\n\t\tcorrect = tf.equal(\n\t\t\t\t\t\t\ttf.cast(temp_sim, tf.float32),\n\t\t\t\t\t\t\ttf.cast(labels, tf.float32)\n\t\t)\n\t\taccuracy = tf.reduce_sum(tf.cast(correct, tf.float32)*mask)/(1e-10+tf.reduce_sum(mask))\n\telif loss_type == 'exponent_neg_manhattan_distance_mse':\n\t\ttemp_sim = tf.rint(logits)\n\t\tcorrect = tf.equal(\n\t\t\t\t\t\t\ttf.cast(temp_sim, tf.float32),\n\t\t\t\t\t\t\ttf.cast(labels, tf.float32)\n\t\t)\n\t\taccuracy = tf.reduce_sum(tf.cast(correct, tf.float32)*mask)/(1e-10+tf.reduce_sum(mask))\n\treturn accuracy\n\ndef model_fn_builder(model,\n\t\t\t\t\tmodel_config,\n\t\t\t\t\tnum_labels,\n\t\t\t\t\tinit_checkpoint,\n\t\t\t\t\tmodel_reuse=None,\n\t\t\t\t\tload_pretrained=True,\n\t\t\t\t\tmodel_io_config={},\n\t\t\t\t\topt_config={},\n\t\t\t\t\texclude_scope=\"\",\n\t\t\t\t\tnot_storage_params=[],\n\t\t\t\t\ttarget=\"a\",\n\t\t\t\t\tlabel_lst=None,\n\t\t\t\t\toutput_type=\"sess\",\n\t\t\t\t\ttask_layer_reuse=None,\n\t\t\t\t\t**kargs):\n\n\tdef model_fn(features, labels, mode):\n\n\t\ttask_type = kargs.get(\"task_type\", \"cls\")\n\n\t\tlabel_ids = tf.cast(features[\"{}_label_ids\".format(task_type)], tf.float32)\n\t\tif task_type in ['mnli', 'cmnli']:\n\t\t\tloss_mask = tf.cast(features[\"{}_loss_multipiler\".format(task_type)], tf.float32)\n\t\t\tnerual_label = tf.not_equal(\n\t\t\t\t\t\t\tlabel_ids,\n\t\t\t\t\t\t\ttf.zeros_like(label_ids)\n\t\t\t)\n\n\t\t\tpos_label = tf.equal(\n\t\t\t\t\t\t\tlabel_ids,\n\t\t\t\t\t\t\ttf.ones_like(label_ids)\n\t\t\t)\n\n\t\t\tneg_label = tf.not_equal(\n\t\t\t\t\t\t\tlabel_ids,\n\t\t\t\t\t\t\t2*tf.ones_like(label_ids)\n\t\t\t)\n\n\t\t\tloss_mask *= tf.cast(nerual_label, dtype=tf.float32) # make neural label\n\t\t\tlabel_ids *= tf.cast(neg_label, dtype=tf.float32)\n\n\t\telse:\n\t\t\tloss_mask = tf.cast(features[\"{}_loss_multipiler\".format(task_type)], tf.float32)\n\n\t\tnum_task = kargs.get('num_task', 1)\n\n\t\tmodel_io_fn = model_io.ModelIO(model_io_config)\n\n\t\tif mode == tf.estimator.ModeKeys.TRAIN:\n\t\t\tdropout_prob = model_config.dropout_prob\n\t\t\tis_training = True\n\t\telse:\n\t\t\tdropout_prob = 0.0\n\t\t\tis_training = False\n\n\t\tif model_io_config.fix_lm == True:\n\t\t\tscope = model_config.scope + \"_finetuning\"\n\t\telse:\n\t\t\tscope = model_config.scope\n\n\t\tif kargs.get(\"get_pooled_output\", \"pooled_output\") == \"pooled_output\":\n\t\t\tpooled_feature = model.get_pooled_output()\n\t\telif kargs.get(\"get_pooled_output\", \"task_output\") == \"task_output\":\n\t\t\tpooled_feature_dict = model.get_task_output()\n\t\t\tpooled_feature = pooled_feature_dict['pooled_feature']\n\n\t\tif kargs.get('apply_head_proj', False):\n\t\t\twith tf.variable_scope(scope+\"/head_proj\", reuse=tf.AUTO_REUSE):\n\t\t\t\tfeature_a = simclr_utils.projection_head(pooled_feature_dict['feature_a'], \n\t\t\t\t\t\t\t\t\t\tis_training, \n\t\t\t\t\t\t\t\t\t\thead_proj_dim=128,\n\t\t\t\t\t\t\t\t\t\tnum_nlh_layers=1,\n\t\t\t\t\t\t\t\t\t\thead_proj_mode='nonlinear',\n\t\t\t\t\t\t\t\t\t\tname='head_contrastive')\n\t\t\t\tpooled_feature_dict['feature_a'] = feature_a\n\n\t\t\twith tf.variable_scope(scope+\"/head_proj\", reuse=tf.AUTO_REUSE):\n\t\t\t\tfeature_b = simclr_utils.projection_head(pooled_feature_dict['feature_b'], \n\t\t\t\t\t\t\t\t\t\tis_training, \n\t\t\t\t\t\t\t\t\t\thead_proj_dim=128,\n\t\t\t\t\t\t\t\t\t\tnum_nlh_layers=1,\n\t\t\t\t\t\t\t\t\t\thead_proj_mode='nonlinear',\n\t\t\t\t\t\t\t\t\t\tname='head_contrastive')\n\t\t\t\tpooled_feature_dict['feature_b'] = feature_b\n\t\t\ttf.logging.info(\"****** apply contrastive feature projection *******\")\t\t\n\n\t\tloss = tf.constant(0.0)\n\n\t\tparams_size = model_io_fn.count_params(model_config.scope)\n\t\tprint(\"==total encoder params==\", params_size)\n\n\t\tif kargs.get(\"feature_distillation\", True):\n\t\t\tuniversal_feature_a = features.get(\"input_ids_a_features\", None)\n\t\t\tuniversal_feature_b = features.get(\"input_ids_b_features\", None)\n\t\t\t\n\t\t\tif universal_feature_a is None or universal_feature_b is None:\n\t\t\t\ttf.logging.info(\"****** not apply feature distillation *******\")\n\t\t\t\tfeature_loss = tf.constant(0.0)\n\t\t\telse:\n\t\t\t\tfeature_a = pooled_feature_dict['feature_a']\n\t\t\t\tfeature_a_shape = bert_utils.get_shape_list(feature_a, expected_rank=[2,3])\n\t\t\t\tpretrain_feature_a_shape = bert_utils.get_shape_list(universal_feature_a, expected_rank=[2,3])\n\t\t\t\tif feature_a_shape[-1] != pretrain_feature_a_shape[-1]:\n\t\t\t\t\twith tf.variable_scope(scope+\"/feature_proj\", reuse=tf.AUTO_REUSE):\n\t\t\t\t\t\tproj_feature_a = tf.layers.dense(feature_a, pretrain_feature_a_shape[-1])\n\t\t\t\t\t# with tf.variable_scope(scope+\"/feature_rec\", reuse=tf.AUTO_REUSE):\n\t\t\t\t\t# \tproj_feature_a_rec = tf.layers.dense(proj_feature_a, feature_a_shape[-1])\n\t\t\t\t\t# loss += tf.reduce_mean(tf.reduce_sum(tf.square(proj_feature_a_rec-feature_a), axis=-1))/float(num_task)\n\t\t\t\t\ttf.logging.info(\"****** apply auto-encoder for feature compression *******\")\n\t\t\t\telse:\n\t\t\t\t\tproj_feature_a = feature_a\n\t\t\t\tfeature_a_norm = tf.stop_gradient(tf.sqrt(tf.reduce_sum(tf.pow(proj_feature_a, 2), axis=-1, keepdims=True))+1e-20)\n\t\t\t\tproj_feature_a /= feature_a_norm\n\n\t\t\t\tfeature_b = pooled_feature_dict['feature_b'] \n\t\t\t\tif feature_a_shape[-1] != pretrain_feature_a_shape[-1]:\n\t\t\t\t\twith tf.variable_scope(scope+\"/feature_proj\", reuse=tf.AUTO_REUSE):\n\t\t\t\t\t\tproj_feature_b = tf.layers.dense(feature_b, pretrain_feature_a_shape[-1])\n\t\t\t\t\t# with tf.variable_scope(scope+\"/feature_rec\", reuse=tf.AUTO_REUSE):\n\t\t\t\t\t# \tproj_feature_b_rec = tf.layers.dense(proj_feature_b, feature_a_shape[-1])\n\t\t\t\t\t# loss += tf.reduce_mean(tf.reduce_sum(tf.square(proj_feature_b_rec-feature_b), axis=-1))/float(num_task)\n\t\t\t\t\ttf.logging.info(\"****** apply auto-encoder for feature compression *******\")\n\t\t\t\telse:\n\t\t\t\t\tproj_feature_b = feature_b\n\n\t\t\t\tfeature_b_norm = tf.stop_gradient(tf.sqrt(tf.reduce_sum(tf.pow(proj_feature_b, 2), axis=-1, keepdims=True))+1e-20)\n\t\t\t\tproj_feature_b /= feature_b_norm\n\n\t\t\t\tfeature_a_distillation = tf.reduce_mean(tf.square(universal_feature_a-proj_feature_a), axis=-1)\n\t\t\t\tfeature_b_distillation = tf.reduce_mean(tf.square(universal_feature_b-proj_feature_b), axis=-1)\n\n\t\t\t\tfeature_loss = tf.reduce_mean((feature_a_distillation + feature_b_distillation)/2.0)/float(num_task)\n\t\t\t\tloss += feature_loss\n\t\t\t\ttf.logging.info(\"****** apply prertained feature distillation *******\")\n\n\t\tif kargs.get(\"embedding_distillation\", True):\n\t\t\tword_embed = model.emb_mat\n\t\t\trandom_embed_shape = bert_utils.get_shape_list(word_embed, expected_rank=[2,3])\n\t\t\tprint(\"==random_embed_shape==\", random_embed_shape)\n\t\t\tpretrained_embed = kargs.get('pretrained_embed', None)\n\t\t\tif pretrained_embed is None:\n\t\t\t\ttf.logging.info(\"****** not apply prertained feature distillation *******\")\n\t\t\t\tembed_loss = tf.constant(0.0)\n\t\t\telse:\n\t\t\t\tpretrain_embed_shape = bert_utils.get_shape_list(pretrained_embed, expected_rank=[2,3])\n\t\t\t\tprint(\"==pretrain_embed_shape==\", pretrain_embed_shape)\n\t\t\t\tif random_embed_shape[-1] != pretrain_embed_shape[-1]:\n\t\t\t\t\twith tf.variable_scope(scope+\"/embedding_proj\", reuse=tf.AUTO_REUSE):\n\t\t\t\t\t\tproj_embed = tf.layers.dense(word_embed, pretrain_embed_shape[-1])\n\t\t\t\telse:\n\t\t\t\t\tproj_embed = word_embed\n\t\t\t\t\n\t\t\t\tembed_loss = tf.reduce_mean(tf.reduce_mean(tf.square(proj_embed-pretrained_embed), axis=-1))/float(num_task)\n\t\t\t\tloss += embed_loss\n\t\t\t\ttf.logging.info(\"****** apply prertained feature distillation *******\")\n\n\t\tif kargs.get('loss', 'contrastive_loss') == 'contrastive_loss':\n\n\t\t\tfeature_a = tf.nn.l2_normalize(1e-20+pooled_feature_dict['feature_a'], axis=-1)\n\t\t\tfeature_b = tf.nn.l2_normalize(1e-20+pooled_feature_dict['feature_b'], axis=-1)\n\n\t\t\t# feature_a = pooled_feature_dict['feature_a']\n\t\t\t# feature_b = pooled_feature_dict['feature_b']\n\n\t\t\tper_example_loss, logits = loss_utils.contrastive_loss(label_ids, \n\t\t\t\t\t\t\t\t\tfeature_a,\n\t\t\t\t\t\t\t\t\tfeature_b,\n\t\t\t\t\t\t\t\t\tkargs.get('margin', 1.0))\n\t\t\ttf.logging.info(\"****** contrastive_loss *******\")\n\t\telif kargs.get('loss', 'contrastive_loss') == 'exponent_neg_manhattan_distance_mse':\n\t\t\tfeature_a = tf.nn.l2_normalize(1e-20+pooled_feature_dict['feature_a'], axis=-1)\n\t\t\tfeature_b = tf.nn.l2_normalize(1e-20+pooled_feature_dict['feature_b'], axis=-1)\n\n\t\t\t# feature_a = pooled_feature_dict['feature_a']\n\t\t\t# feature_b = pooled_feature_dict['feature_b']\n\n\t\t\tper_example_loss, logits = loss_utils.exponent_neg_manhattan_distance(label_ids, \n\t\t\t\t\t\t\t\t\tfeature_a,\n\t\t\t\t\t\t\t\t\tfeature_b,\n\t\t\t\t\t\t\t\t\t'mse')\n\t\t\ttf.logging.info(\"****** exponent_neg_manhattan_distance_mse *******\")\n\t\telse:\n\t\t\tfeature_a = tf.nn.l2_normalize(1e-20+pooled_feature_dict['feature_a'], axis=-1)\n\t\t\tfeature_b = tf.nn.l2_normalize(1e-20+pooled_feature_dict['feature_b'], axis=-1)\n\n\t\t\t# feature_a = pooled_feature_dict['feature_a']\n\t\t\t# feature_b = pooled_feature_dict['feature_b']\n\n\t\t\tper_example_loss, logits = loss_utils.contrastive_loss(label_ids, \n\t\t\t\t\t\t\t\t\tfeature_a,\n\t\t\t\t\t\t\t\t\tfeature_b,\n\t\t\t\t\t\t\t\t\tkargs.get('margin', 1.0))\n\t\t\ttf.logging.info(\"****** contrastive_loss *******\")\n\t\t# loss_mask = tf.cast(features[\"{}_loss_multipiler\".format(task_type)], tf.float32)\n\n\t\tmasked_per_example_loss = per_example_loss * loss_mask\n\t\ttask_loss = tf.reduce_sum(masked_per_example_loss) / (1e-10+tf.reduce_sum(loss_mask))\n\t\tloss += task_loss\n\n\t\t# with tf.variable_scope(scope+\"/{}/classifier\".format(task_type), reuse=task_layer_reuse):\n\t\t\t\n\t\t# \tfeature_a = pooled_feature_dict['feature_a']\n\t\t# \tfeature_b = pooled_feature_dict['feature_a']\n\n\t\t# \tlogtis_feature = tf.concat([feature_a, feature_b], axis=-1)\n\n\t\t# \t(_, \n\t\t# \t\tcls_per_example_loss, \n\t\t# \t\tcls_logits) = classifier.classifier(model_config,\n\t\t# \t\t\t\t\t\t\t\t\tlogtis_feature,\n\t\t# \t\t\t\t\t\t\t\t\tnum_labels,\n\t\t# \t\t\t\t\t\t\t\t\tlabel_ids,\n\t\t# \t\t\t\t\t\t\t\t\tdropout_prob)\n\n\t\t# loss_mask = tf.cast(features[\"{}_loss_multipiler\".format(task_type)], tf.float32)\n\t\t# masked_per_example_loss = cls_per_example_loss * loss_mask\n\t\t# task_loss = tf.reduce_sum(masked_per_example_loss) / (1e-10+tf.reduce_sum(loss_mask))\n\t\t# loss += task_loss\n\n\t\tif mode == tf.estimator.ModeKeys.TRAIN:\n\t\t\tmulti_task_config = kargs.get(\"multi_task_config\", {})\n\t\t\tif multi_task_config[task_type].get(\"lm_augumentation\", False):\n\t\t\t\tprint(\"==apply lm_augumentation==\")\n\t\t\t\tmasked_lm_positions = features[\"masked_lm_positions\"]\n\t\t\t\tmasked_lm_ids = features[\"masked_lm_ids\"]\n\t\t\t\tmasked_lm_weights = features[\"masked_lm_weights\"]\n\t\t\t\t(masked_lm_loss,\n\t\t\t\tmasked_lm_example_loss, \n\t\t\t\tmasked_lm_log_probs) = pretrain.get_masked_lm_output(\n\t\t\t\t\t\t\t\t\t\t\t\tmodel_config, \n\t\t\t\t\t\t\t\t\t\t\t\tmodel.get_sequence_output(), \n\t\t\t\t\t\t\t\t\t\t\t\tmodel.get_embedding_table(),\n\t\t\t\t\t\t\t\t\t\t\t\tmasked_lm_positions, \n\t\t\t\t\t\t\t\t\t\t\t\tmasked_lm_ids, \n\t\t\t\t\t\t\t\t\t\t\t\tmasked_lm_weights,\n\t\t\t\t\t\t\t\t\t\t\t\treuse=model_reuse)\n\n\t\t\t\tmasked_lm_loss_mask = tf.expand_dims(loss_mask, -1) * tf.ones((1, multi_task_config[task_type][\"max_predictions_per_seq\"]))\n\t\t\t\tmasked_lm_loss_mask = tf.reshape(masked_lm_loss_mask, (-1, ))\n\n\t\t\t\tmasked_lm_label_weights = tf.reshape(masked_lm_weights, [-1])\n\t\t\t\tmasked_lm_loss_mask *= tf.cast(masked_lm_label_weights, tf.float32)\n\n\t\t\t\tmasked_lm_example_loss *= masked_lm_loss_mask# multiply task_mask\n\t\t\t\tmasked_lm_loss = tf.reduce_sum(masked_lm_example_loss) / (1e-10+tf.reduce_sum(masked_lm_loss_mask))\n\t\t\t\tloss += multi_task_config[task_type][\"masked_lm_loss_ratio\"]*masked_lm_loss\n\n\t\t\t\tmasked_lm_label_ids = tf.reshape(masked_lm_ids, [-1])\n\t\t\t\t\n\t\t\t\tprint(masked_lm_log_probs.get_shape(), \"===masked lm log probs===\")\n\t\t\t\tprint(masked_lm_label_ids.get_shape(), \"===masked lm ids===\")\n\t\t\t\tprint(masked_lm_label_weights.get_shape(), \"===masked lm mask===\")\n\n\t\t\t\tlm_acc = build_accuracy(masked_lm_log_probs, masked_lm_label_ids, masked_lm_loss_mask)\n\n\t\tif kargs.get(\"task_invariant\", \"no\") == \"yes\":\n\t\t\tprint(\"==apply task adversarial training==\")\n\t\t\twith tf.variable_scope(scope+\"/dann_task_invariant\", reuse=model_reuse):\n\t\t\t\t(_, \n\t\t\t\ttask_example_loss, \n\t\t\t\ttask_logits) = distillation_utils.feature_distillation(model.get_pooled_output(), \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t1.0, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeatures[\"task_id\"], \n\t\t\t\t\t\t\t\t\t\t\t\t\t\tkargs.get(\"num_task\", 7),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tdropout_prob, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\tTrue)\n\t\t\t\tmasked_task_example_loss = loss_mask * task_example_loss\n\t\t\t\tmasked_task_loss = tf.reduce_sum(masked_task_example_loss) / (1e-10+tf.reduce_sum(loss_mask))\n\t\t\t\tloss += kargs.get(\"task_adversarial\", 1e-2) * masked_task_loss\n\n\t\ttvars = model_io_fn.get_params(model_config.scope, \n\t\t\t\t\t\t\t\t\t\tnot_storage_params=not_storage_params)\n\n\t\tif mode == tf.estimator.ModeKeys.TRAIN:\n\t\t\tmulti_task_config = kargs.get(\"multi_task_config\", {})\n\t\t\tif multi_task_config[task_type].get(\"lm_augumentation\", False):\n\t\t\t\tprint(\"==apply lm_augumentation==\")\n\t\t\t\tmasked_lm_pretrain_tvars = model_io_fn.get_params(\"cls/predictions\", \n\t\t\t\t\t\t\t\t\t\t\t\tnot_storage_params=not_storage_params)\n\t\t\t\ttvars.extend(masked_lm_pretrain_tvars)\n\n\t\ttry:\n\t\t\tparams_size = model_io_fn.count_params(model_config.scope)\n\t\t\tprint(\"==total params==\", params_size)\n\t\texcept:\n\t\t\tprint(\"==not count params==\")\n\t\t# print(tvars)\n\t\tif load_pretrained == \"yes\":\n\t\t\tmodel_io_fn.load_pretrained(tvars, \n\t\t\t\t\t\t\t\t\t\tinit_checkpoint,\n\t\t\t\t\t\t\t\t\t\texclude_scope=exclude_scope)\n\n\t\tif mode == tf.estimator.ModeKeys.TRAIN:\n\n\t\t\tacc = build_accuracy(logits, \n\t\t\t\t\t\t\t\tlabel_ids, \n\t\t\t\t\t\t\t\tloss_mask,\n\t\t\t\t\t\t\t\tloss_type=kargs.get('loss', 'contrastive_loss'))\n\n\t\t\treturn_dict = {\n\t\t\t\t\t\"loss\":loss, \n\t\t\t\t\t\"logits\":logits,\n\t\t\t\t\t\"task_num\":tf.reduce_sum(loss_mask),\n\t\t\t\t\t\"tvars\":tvars,\n\t\t\t\t\t\"positive_label\":tf.reduce_sum(label_ids*loss_mask)\n\t\t\t\t}\n\t\t\treturn_dict[\"{}_acc\".format(task_type)] = acc\n\t\t\tif kargs.get(\"task_invariant\", \"no\") == \"yes\":\n\t\t\t\treturn_dict[\"{}_task_loss\".format(task_type)] = masked_task_loss\n\t\t\t\ttask_acc = build_accuracy(task_logits, features[\"task_id\"], loss_mask)\n\t\t\t\treturn_dict[\"{}_task_acc\".format(task_type)] = task_acc\n\t\t\tif multi_task_config[task_type].get(\"lm_augumentation\", False):\n\t\t\t\treturn_dict[\"{}_masked_lm_loss\".format(task_type)] = masked_lm_loss\n\t\t\t\treturn_dict[\"{}_masked_lm_acc\".format(task_type)] = lm_acc\n\t\t\tif kargs.get(\"embedding_distillation\", True):\n\t\t\t\treturn_dict[\"embed_loss\"] = embed_loss*float(num_task)\n\t\t\telse:\n\t\t\t\treturn_dict[\"embed_loss\"] = task_loss\n\t\t\tif kargs.get(\"feature_distillation\", True):\n\t\t\t\treturn_dict[\"feature_loss\"] = feature_loss*float(num_task)\n\t\t\telse:\n\t\t\t\treturn_dict[\"feature_loss\"] = task_loss\n\t\t\treturn_dict[\"task_loss\"] = task_loss\n\t\t\treturn return_dict\n\t\telif mode == tf.estimator.ModeKeys.EVAL:\n\t\t\teval_dict = {\n\t\t\t\t\"loss\":loss, \n\t\t\t\t\"logits\":logits,\n\t\t\t\t\"feature\":model.get_pooled_output()\n\t\t\t}\n\t\t\tif kargs.get(\"adversarial\", \"no\") == \"adversarial\":\n\t\t\t\t eval_dict[\"task_logits\"] = task_logits\n\t\t\treturn eval_dict\n\treturn model_fn\n\n\n\t\t\n\n\t\t\t\t",
"# import tensorflow.compat.v1 as tf\nimport tensorflow as tf\nfrom tensorflow.python.tpu import tpu_function # pylint: disable=g-direct-tensorflow-import\n\nBATCH_NORM_EPSILON = 1e-5\n\n\nclass BatchNormalization(tf.layers.BatchNormalization):\n \"\"\"Batch Normalization layer that supports cross replica computation on TPU.\n This class extends the keras.BatchNormalization implementation by supporting\n cross replica means and variances. The base class implementation only computes\n moments based on mini-batch per replica (TPU core).\n For detailed information of arguments and implementation, refer to:\n https://www.tensorflow.org/api_docs/python/tf/keras/layers/BatchNormalization\n \"\"\"\n\n def __init__(self, fused=False, **kwargs):\n \"\"\"Builds the batch normalization layer.\n Arguments:\n fused: If `False`, use the system recommended implementation. Only support\n `False` in the current implementation.\n **kwargs: input augments that are forwarded to\n tf.layers.BatchNormalization.\n \"\"\"\n if fused in (True, None):\n raise ValueError('The TPU version of BatchNormalization does not support '\n 'fused=True.')\n super(BatchNormalization, self).__init__(fused=fused, **kwargs)\n\n def _cross_replica_average(self, t):\n \"\"\"Calculates the average value of input tensor across TPU replicas.\"\"\"\n num_shards = tpu_function.get_tpu_context().number_of_shards\n return tf.tpu.cross_replica_sum(t) / tf.cast(num_shards, t.dtype)\n\n def _moments(self, inputs, reduction_axes, keep_dims):\n \"\"\"Compute the mean and variance: it overrides the original _moments.\"\"\"\n shard_mean, shard_variance = super(BatchNormalization, self)._moments(\n inputs, reduction_axes, keep_dims=keep_dims)\n\n num_shards = tpu_function.get_tpu_context().number_of_shards\n if num_shards and num_shards > 1:\n # Each group has multiple replicas: here we compute group mean/variance by\n # aggregating per-replica mean/variance.\n group_mean = self._cross_replica_average(shard_mean)\n group_variance = self._cross_replica_average(shard_variance)\n\n # Group variance needs to also include the difference between shard_mean\n # and group_mean.\n mean_distance = tf.square(group_mean - shard_mean)\n group_variance += self._cross_replica_average(mean_distance)\n return (group_mean, group_variance)\n else:\n return (shard_mean, shard_variance)\n\n\ndef batch_norm_relu(FLAGS, inputs, is_training, relu=True, init_zero=False,\n center=True, scale=True, data_format='channels_last'):\n \"\"\"Performs a batch normalization followed by a ReLU.\n Args:\n inputs: `Tensor` of shape `[batch, channels, ...]`.\n is_training: `bool` for whether the model is training.\n relu: `bool` if False, omits the ReLU operation.\n init_zero: `bool` if True, initializes scale parameter of batch\n normalization with 0 instead of 1 (default).\n center: `bool` whether to add learnable bias factor.\n scale: `bool` whether to add learnable scaling factor.\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n Returns:\n A normalized `Tensor` with the same `data_format`.\n \"\"\"\n if init_zero:\n gamma_initializer = tf.zeros_initializer()\n else:\n gamma_initializer = tf.ones_initializer()\n\n if data_format == 'channels_first':\n axis = 1\n else:\n axis = -1\n\n if FLAGS.global_bn:\n bn_foo = BatchNormalization(\n axis=axis,\n momentum=FLAGS.batch_norm_decay,\n epsilon=BATCH_NORM_EPSILON,\n center=center,\n scale=scale,\n fused=False,\n gamma_initializer=gamma_initializer)\n inputs = bn_foo(inputs, training=is_training)\n else:\n inputs = tf.layers.batch_normalization(\n inputs=inputs,\n axis=axis,\n momentum=FLAGS.batch_norm_decay,\n epsilon=BATCH_NORM_EPSILON,\n center=center,\n scale=scale,\n training=is_training,\n fused=True,\n gamma_initializer=gamma_initializer)\n\n if relu:\n inputs = tf.nn.relu(inputs)\n return inputs",
"try:\n\tfrom .model_interface import model_zoo\nexcept:\n\tfrom model_interface import model_zoo\n\nimport tensorflow as tf\nimport numpy as np\nfrom bunch import Bunch\n\nfrom model_io import model_io\nfrom task_module import classifier\nimport tensorflow as tf\nfrom metric import tf_metrics\n\nfrom optimizer import distributed_optimizer as optimizer\nfrom model_io import model_io\n\nfrom distillation import knowledge_distillation as distill\n\ndef correlation(x, y):\n\tx = x - tf.reduce_mean(x, axis=-1, keepdims=True)\n\ty = y - tf.reduce_mean(y, axis=-1, keepdims=True)\n\tx = tf.nn.l2_normalize(x, -1)\n\ty = tf.nn.l2_normalize(y, -1)\n\treturn -tf.reduce_sum(x*y, axis=-1) # higher the better\n\ndef kd(x, y):\n\tx_prob = tf.nn.softmax(x)\n\tprint(x_prob.get_shape(), y.get_shape(), tf.reduce_sum(x_prob * y, axis=-1).get_shape())\n\treturn -tf.reduce_sum(x_prob * y, axis=-1) # higher the better\n\ndef mse(x, y):\n\tx = x - tf.reduce_mean(x, axis=-1, keepdims=True)\n\ty = y - tf.reduce_mean(y, axis=-1, keepdims=True)\n\treturn tf.reduce_sum((x-y)**2, axis=-1) # lower the better\n\ndef kd_distance(x, y, dist_type):\n\tif dist_type == \"person\":\n\t\treturn correlation(x,y)\n\telif dist_type == \"kd\":\n\t\treturn kd(x, y)\n\telif dist_type == \"mse\":\n\t\treturn mse(x, y)\n\ndef model_fn_builder(\n\t\t\t\t\tmodel_config,\n\t\t\t\t\tnum_labels,\n\t\t\t\t\tinit_checkpoint,\n\t\t\t\t\tmodel_reuse=None,\n\t\t\t\t\tload_pretrained=True,\n\t\t\t\t\tmodel_io_config={},\n\t\t\t\t\topt_config={},\n\t\t\t\t\texclude_scope=\"\",\n\t\t\t\t\tnot_storage_params=[],\n\t\t\t\t\ttarget=\"a\",\n\t\t\t\t\tlabel_lst=None,\n\t\t\t\t\toutput_type=\"sess\",\n\t\t\t\t\t**kargs):\n\n\tdef model_fn(features, labels, mode):\n\n\t\tmodel_api = model_zoo(model_config)\n\n\t\tmodel = model_api(model_config, features, labels,\n\t\t\t\t\t\t\tmode, target, reuse=model_reuse)\n\n\t\tlabel_ids = features[\"label_ids\"]\n\n\t\tif mode == tf.estimator.ModeKeys.TRAIN:\n\t\t\tdropout_prob = model_config.dropout_prob\n\t\telse:\n\t\t\tdropout_prob = 0.0\n\n\t\tif model_io_config.fix_lm == True:\n\t\t\tscope = model_config.scope + \"_finetuning\"\n\t\telse:\n\t\t\tscope = model_config.scope\n\n\t\twith tf.variable_scope(scope, reuse=model_reuse):\n\t\t\t(loss, \n\t\t\t\tper_example_loss, \n\t\t\t\tlogits) = classifier.classifier(model_config,\n\t\t\t\t\t\t\t\t\t\t\tmodel.get_pooled_output(),\n\t\t\t\t\t\t\t\t\t\t\tnum_labels,\n\t\t\t\t\t\t\t\t\t\t\tlabel_ids,\n\t\t\t\t\t\t\t\t\t\t\tdropout_prob)\n\t\t\tlabel_loss = tf.reduce_sum(per_example_loss * features[\"label_ratio\"]) / (1e-10+tf.reduce_sum(features[\"label_ratio\"]))\n\t\t\ttf.get_variable_scope().reuse_variables()\n\n\t\t\t(tgt_loss, \n\t\t\t\ttgt_per_example_loss, \n\t\t\t\ttgt_logits) = classifier.classifier(model_config,\n\t\t\t\t\t\t\t\t\t\t\tfeatures[\"distillation_feature\"],\n\t\t\t\t\t\t\t\t\t\t\tnum_labels,\n\t\t\t\t\t\t\t\t\t\t\tlabel_ids,\n\t\t\t\t\t\t\t\t\t\t\tdropout_prob)\n\n\t\tif mode == tf.estimator.ModeKeys.TRAIN:\n\n\t\t\tdistillation_api = distill.KnowledgeDistillation(kargs.get(\"disitllation_config\", Bunch({\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"logits_ratio_decay\":\"constant\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"logits_ratio\":0.5,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"logits_decay_rate\":0.999,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"distillation\":['mdd'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"feature_ratio\":0.5,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"feature_ratio_decay\":\"constant\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"feature_decay_rate\":0.999,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"kd_type\":\"kd\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"scope\":scope\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t})))\n\t\t\t# get teacher logits\n\t\t\tteacher_logit = tf.log(features[\"label_probs\"]+1e-10)/kargs.get(\"temperature\", 2.0) # log_softmax logits\n\t\t\tstudent_logit = tf.nn.log_softmax(logits /kargs.get(\"temperature\", 2.0)) # log_softmax logits\n\n\t\t\tdistillation_features = {\n\t\t\t\t\"student_logits_tensor\":student_logit,\n\t\t\t\t\"teacher_logits_tensor\":teacher_logit,\n\t\t\t\t\"student_feature_tensor\":model.get_pooled_output(),\n\t\t\t\t\"teacher_feature_tensor\":features[\"distillation_feature\"],\n\t\t\t\t\"student_label\":tf.ones_like(label_ids, dtype=tf.int32),\n\t\t\t\t\"teacher_label\":tf.zeros_like(label_ids, dtype=tf.int32),\n\t\t\t\t\"logits_ratio\":kargs.get(\"logits_ratio\", 0.5),\n\t\t\t\t\"feature_ratio\":kargs.get(\"logits_ratio\", 0.5),\n\t\t\t\t\"distillation_ratio\":features[\"distillation_ratio\"],\n\t\t\t\t\"src_f_logit\":logits,\n\t\t\t\t\"tgt_f_logit\":tgt_logits,\n\t\t\t\t\"src_tensor\":model.get_pooled_output(),\n\t\t\t\t\"tgt_tensor\":features[\"distillation_feature\"]\n\t\t\t}\n\n\t\t\tdistillation_loss = distillation_api.distillation(distillation_features,\n\t\t\t\t\t\t\t\t\t\t2, dropout_prob,\n\t\t\t\t\t\t\t\t\t\tmodel_reuse,\n\t\t\t\t\t\t\t\t\t\topt_config.num_train_steps,\n\t\t\t\t\t\t\t\t\t\tfeature_ratio=10,\n\t\t\t\t\t\t\t\t\t\tlogits_ratio_decay=\"constant\",\n\t\t\t\t\t\t\t\t\t\tfeature_ratio_decay=\"constant\",\n\t\t\t\t\t\t\t\t\t\tfeature_decay_rate=0.999,\n\t\t\t\t\t\t\t\t\t\tlogits_decay_rate=0.999,\n\t\t\t\t\t\t\t\t\t\tlogits_ratio=0.5,\n\t\t\t\t\t\t\t\t\t\tscope=scope+\"/adv_classifier\",\n\t\t\t\t\t\t\t\t\t\tnum_classes=num_labels,\n\t\t\t\t\t\t\t\t\t\tgamma=kargs.get(\"gamma\", 4))\n\n\t\t\tloss = label_loss + distillation_loss[\"distillation_loss\"]\n\n\t\tmodel_io_fn = model_io.ModelIO(model_io_config)\n\n\t\ttvars = model_io_fn.get_params(model_config.scope, \n\t\t\t\t\t\t\t\t\t\tnot_storage_params=not_storage_params)\n\t\tprint(tvars)\n\t\tif load_pretrained == \"yes\":\n\t\t\tmodel_io_fn.load_pretrained(tvars, \n\t\t\t\t\t\t\t\t\t\tinit_checkpoint,\n\t\t\t\t\t\t\t\t\t\texclude_scope=exclude_scope)\n\n\t\tif mode == tf.estimator.ModeKeys.TRAIN:\n\n\t\t\toptimizer_fn = optimizer.Optimizer(opt_config)\n\n\t\t\tmodel_io_fn.print_params(tvars, string=\", trainable params\")\n\t\t\tupdate_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n\t\t\twith tf.control_dependencies(update_ops):\n\t\t\t\ttrain_op = optimizer_fn.get_train_op(loss, tvars, \n\t\t\t\t\t\t\t\topt_config.init_lr, \n\t\t\t\t\t\t\t\topt_config.num_train_steps,\n\t\t\t\t\t\t\t\t**kargs)\n\n\t\t\t\tmodel_io_fn.set_saver()\n\n\t\t\t\tif kargs.get(\"task_index\", 1) == 0 and kargs.get(\"run_config\", None):\n\t\t\t\t\ttraining_hooks = []\n\t\t\t\telif kargs.get(\"task_index\", 1) == 0:\n\t\t\t\t\tmodel_io_fn.get_hooks(kargs.get(\"checkpoint_dir\", None), \n\t\t\t\t\t\t\t\t\t\t\t\t\t\tkargs.get(\"num_storage_steps\", 1000))\n\n\t\t\t\t\ttraining_hooks = model_io_fn.checkpoint_hook\n\t\t\t\telse:\n\t\t\t\t\ttraining_hooks = []\n\n\t\t\t\tif len(optimizer_fn.distributed_hooks) >= 1:\n\t\t\t\t\ttraining_hooks.extend(optimizer_fn.distributed_hooks)\n\t\t\t\tprint(training_hooks, \"==training_hooks==\", \"==task_index==\", kargs.get(\"task_index\", 1))\n\n\t\t\t\testimator_spec = tf.estimator.EstimatorSpec(mode=mode, \n\t\t\t\t\t\t\t\tloss=loss, train_op=train_op,\n\t\t\t\t\t\t\t\ttraining_hooks=training_hooks)\n\t\t\t\tif output_type == \"sess\":\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tpred_label = tf.argmax(distillation_loss[\"st_logits\"], axis=-1, output_type=tf.int32)\n\t\t\t\t\t\tcorrect = tf.equal(\n\t\t\t\t\t\t\ttf.cast(tf.ones_like(label_ids, dtype=tf.int32), tf.int32),\n\t\t\t\t\t\t\ttf.cast(pred_label, tf.int32)\n\t\t\t\t\t\t)\n\t\t\t\t\t\tst_accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n\n\t\t\t\t\t\tpred_label = tf.argmax(distillation_loss[\"te_logits\"], axis=-1, output_type=tf.int32)\n\t\t\t\t\t\tcorrect = tf.equal(\n\t\t\t\t\t\t\ttf.cast(tf.zeros_like(label_ids, dtype=tf.int32), tf.int32),\n\t\t\t\t\t\t\ttf.cast(pred_label, tf.int32)\n\t\t\t\t\t\t)\n\t\t\t\t\t\tte_accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n\t\t\t\t\texcept:\n\t\t\t\t\t\tte_accuracy = tf.constant(0.0)\n\t\t\t\t\t\tst_accuracy = tf.constant(0.0)\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tst_accuracy = tf.reduce_mean(distillation_loss[\"src_f1_prob\"])\t\t\t\t\t\t\n\t\t\t\t\t\tte_accuracy = tf.reduce_mean(distillation_loss[\"tgt_f1_prob\"])\n\t\t\t\t\texcept:\n\t\t\t\t\t\tte_accuracy = tf.constant(0.0)\n\t\t\t\t\t\tst_accuracy = tf.constant(0.0)\n\n\t\t\t\t\treturn {\n\t\t\t\t\t\t\"train\":{\n\t\t\t\t\t\t\t\t\t\t\"loss\":loss, \n\t\t\t\t\t\t\t\t\t\t\"logits\":logits,\n\t\t\t\t\t\t\t\t\t\t\"train_op\":train_op,\n\t\t\t\t\t\t\t\t\t\t\"cross_entropy\":label_loss,\n\t\t\t\t\t\t\t\t\t\t\"distillation_loss\":distillation_loss[\"distillation_loss\"],\n\t\t\t\t\t\t\t\t\t\t\"kd_num\":tf.reduce_sum(features[\"distillation_ratio\"]),\n\t\t\t\t\t\t\t\t\t\t\"ce_num\":tf.reduce_sum(features[\"label_ratio\"]),\n\t\t\t\t\t\t\t\t\t\t\"teacher_logit\":teacher_logit,\n\t\t\t\t\t\t\t\t\t\t\"student_logit\":student_logit,\n\t\t\t\t\t\t\t\t\t\t\"label_ratio\":features[\"label_ratio\"],\n\t\t\t\t\t\t\t\t\t\t\"distilaltion_logits_loss\":distillation_loss[\"distillation_logits_loss\"],\n\t\t\t\t\t\t\t\t\t\t\"distilaltion_feature_loss\":distillation_loss[\"distillation_feature_loss\"],\n\t\t\t\t\t\t\t\t\t\t\"distillation_loss\":distillation_loss[\"distillation_loss\"],\n\t\t\t\t\t\t\t\t\t\t\"st_accuracy\":st_accuracy,\n\t\t\t\t\t\t\t\t\t\t\"te_accuracy\":te_accuracy,\n\t\t\t\t\t\t\t\t\t\t\"mdd_loss\":distillation_loss[\"mdd_loss\"]\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\"hooks\":training_hooks\n\t\t\t\t\t}\n\t\t\t\telif output_type == \"estimator\":\n\t\t\t\t\treturn estimator_spec\n\n\t\telif mode == tf.estimator.ModeKeys.PREDICT:\n\t\t\tprint(logits.get_shape(), \"===logits shape===\")\n\t\t\tpred_label = tf.argmax(logits, axis=-1, output_type=tf.int32)\n\t\t\tprob = tf.nn.softmax(logits)\n\t\t\tmax_prob = tf.reduce_max(prob, axis=-1)\n\t\t\t\n\t\t\testimator_spec = tf.estimator.EstimatorSpec(\n\t\t\t\t\t\t\t\t\tmode=mode,\n\t\t\t\t\t\t\t\t\tpredictions={\n\t\t\t\t\t\t\t\t\t\t\t\t'pred_label':pred_label,\n\t\t\t\t\t\t\t\t\t\t\t\t\"max_prob\":max_prob\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\texport_outputs={\n\t\t\t\t\t\t\t\t\t\t\"output\":tf.estimator.export.PredictOutput(\n\t\t\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t'pred_label':pred_label,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"max_prob\":max_prob\n\t\t\t\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t)\n\t\t\treturn estimator_spec\n\n\t\telif mode == tf.estimator.ModeKeys.EVAL:\n\t\t\tdef metric_fn(per_example_loss,\n\t\t\t\t\t\tlogits, \n\t\t\t\t\t\tlabel_ids):\n\t\t\t\t\"\"\"Computes the loss and accuracy of the model.\"\"\"\n\t\t\t\tsentence_log_probs = tf.reshape(\n\t\t\t\t\tlogits, [-1, logits.shape[-1]])\n\t\t\t\tsentence_predictions = tf.argmax(\n\t\t\t\t\tlogits, axis=-1, output_type=tf.int32)\n\t\t\t\tsentence_labels = tf.reshape(label_ids, [-1])\n\t\t\t\tsentence_accuracy = tf.metrics.accuracy(\n\t\t\t\t\tlabels=label_ids, predictions=sentence_predictions)\n\t\t\t\tsentence_mean_loss = tf.metrics.mean(\n\t\t\t\t\tvalues=per_example_loss)\n\t\t\t\tsentence_f = tf_metrics.f1(label_ids, \n\t\t\t\t\t\t\t\t\t\tsentence_predictions, \n\t\t\t\t\t\t\t\t\t\tnum_labels, \n\t\t\t\t\t\t\t\t\t\tlabel_lst, average=\"macro\")\n\n\t\t\t\teval_metric_ops = {\n\t\t\t\t\t\t\t\t\t\"f1\": sentence_f,\n\t\t\t\t\t\t\t\t\t\"acc\":sentence_accuracy\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\treturn eval_metric_ops\n\n\t\t\teval_metric_ops = metric_fn( \n\t\t\t\t\t\t\tper_example_loss,\n\t\t\t\t\t\t\tlogits, \n\t\t\t\t\t\t\tlabel_ids)\n\t\t\t\n\t\t\testimator_spec = tf.estimator.EstimatorSpec(mode=mode, \n\t\t\t\t\t\t\t\tloss=loss,\n\t\t\t\t\t\t\t\teval_metric_ops=eval_metric_ops)\n\n\t\t\tif output_type == \"sess\":\n\t\t\t\treturn {\n\t\t\t\t\t\"eval\":{\n\t\t\t\t\t\t\t\"per_example_loss\":per_example_loss,\n\t\t\t\t\t\t\t\"logits\":logits,\n\t\t\t\t\t\t\t\"loss\":tf.reduce_mean(per_example_loss)\n\t\t\t\t\t\t}\n\t\t\t\t}\n\t\t\telif output_type == \"estimator\":\n\t\t\t\treturn estimator_spec\n\t\telse:\n\t\t\traise NotImplementedError()\n\treturn model_fn\n\n",
"import sys,os\nsys.path.append(\"..\")\nfrom model_io import model_io\nimport numpy as np\nimport tensorflow as tf\nfrom example import bert_classifier\nfrom bunch import Bunch\nfrom example import feature_writer, write_to_tfrecords, classifier_processor\nfrom data_generator import tokenization\nfrom data_generator import tf_data_utils\n\nflags = tf.flags\n\nFLAGS = flags.FLAGS\n\n## Required parameters\nflags.DEFINE_string(\n \"eval_data_file\", None,\n \"The config json file corresponding to the pre-trained BERT model. \"\n \"This specifies the model architecture.\")\n\nflags.DEFINE_string(\n \"output_file\", None,\n \"Input TF example files (can be a glob or comma separated).\")\n\nflags.DEFINE_string(\n \"config_file\", None,\n \"Input TF example files (can be a glob or comma separated).\")\n\nflags.DEFINE_string(\n \"init_checkpoint\", None,\n \"Input TF example files (can be a glob or comma separated).\")\n\nflags.DEFINE_string(\n \"result_file\", None,\n \"Input TF example files (can be a glob or comma separated).\")\n\nflags.DEFINE_string(\n \"vocab_file\", None,\n \"Input TF example files (can be a glob or comma separated).\")\n\nflags.DEFINE_string(\n \"label_id\", None,\n \"Input TF example files (can be a glob or comma separated).\")\n\nflags.DEFINE_integer(\n \"max_length\", 128,\n \"Input TF example files (can be a glob or comma separated).\")\n\nflags.DEFINE_string(\n \"train_file\", None,\n \"Input TF example files (can be a glob or comma separated).\")\n\nflags.DEFINE_string(\n \"dev_file\", None,\n \"Input TF example files (can be a glob or comma separated).\")\n\nflags.DEFINE_string(\n \"model_output\", None,\n \"Input TF example files (can be a glob or comma separated).\")\n\nflags.DEFINE_string(\n \"gpu_id\", None,\n \"Input TF example files (can be a glob or comma separated).\")\n\nflags.DEFINE_integer(\n \"epoch\", 5,\n \"Input TF example files (can be a glob or comma separated).\")\n\nflags.DEFINE_integer(\n \"num_classes\", 3,\n \"Input TF example files (can be a glob or comma separated).\")\n\nflags.DEFINE_integer(\n \"train_size\", 249847,\n \"Input TF example files (can be a glob or comma separated).\")\n\nflags.DEFINE_integer(\n \"batch_size\", 16,\n \"Input TF example files (can be a glob or comma separated).\")\n\ngraph = tf.Graph()\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\nwith graph.as_default():\n import json\n \n # config = json.load(open(\"/data/xuht/bert/chinese_L-12_H-768_A-12/bert_config.json\", \"r\"))\n # init_checkpoint = \"/data/xuht/bert/chinese_L-12_H-768_A-12/bert_model.ckpt\"\n\n config = json.load(open(FLAGS.config_file))\n init_checkpoint = FLAGS.init_checkpoint\n\n# init_checkpoint = \"/data/xuht/ai_challenge_cqmrc/bert/concat/model/oqmrc.ckpt\"\n config = Bunch(config)\n config.use_one_hot_embeddings = True\n config.scope = \"bert\"\n config.dropout_prob = 0.1\n config.label_type = \"single_label\"\n \n os.environ[\"CUDA_VISIBLE_DEVICES\"] = FLAGS.gpu_id\n sess = tf.Session()\n\n num_train_steps = int(\n FLAGS.train_size / FLAGS.batch_size * FLAGS.epoch)\n num_warmup_steps = int(num_train_steps * 0.1)\n\n num_storage_steps = int(FLAGS.train_size / FLAGS.batch_size)\n \n opt_config = Bunch({\"init_lr\":1e-5, \n \"num_train_steps\":num_train_steps,\n \"num_warmup_steps\":num_warmup_steps})\n\n model_io_config = Bunch({\"fix_lm\":False})\n \n model_io_fn = model_io.ModelIO(model_io_config)\n \n num_choice = FLAGS.num_classes\n max_seq_length = FLAGS.max_length\n\n model_train_fn = bert_classifier.multichoice_model_fn_builder(config, num_choice, init_checkpoint, \n reuse=None, \n load_pretrained=True,\n model_io_fn=model_io_fn,\n model_io_config=model_io_config, \n opt_config=opt_config)\n \n model_eval_fn = bert_classifier.multichoice_model_fn_builder(config, num_choice, init_checkpoint, \n reuse=True, \n load_pretrained=True,\n model_io_fn=model_io_fn,\n model_io_config=model_io_config, \n opt_config=opt_config)\n \n def metric_fn(features, logits, loss):\n print(logits.get_shape(), \"===logits shape===\")\n pred_label = tf.argmax(logits, axis=-1, output_type=tf.int32)\n prob = tf.nn.softmax(logits)\n accuracy = correct = tf.equal(\n tf.cast(pred_label, tf.int32),\n tf.cast(features[\"label_ids\"], tf.int32)\n )\n accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n return {\"accuracy\":accuracy, \"loss\":loss, \"pred_label\":pred_label, \"label_ids\":features[\"label_ids\"]}\n \n name_to_features = {\n \"input_ids\":\n tf.FixedLenFeature([max_seq_length*num_choice], tf.int64),\n \"input_mask\":\n tf.FixedLenFeature([max_seq_length*num_choice], tf.int64),\n \"segment_ids\":\n tf.FixedLenFeature([max_seq_length*num_choice], tf.int64),\n \"label_ids\":\n tf.FixedLenFeature([], tf.int64),\n }\n \n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\n \"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n for name in [\"input_ids\", \"input_mask\", \"segment_ids\"]:\n example[name] = tf.reshape(example[name], [-1, max_seq_length])\n return example \n\n params = Bunch({})\n params.epoch = FLAGS.epoch\n params.batch_size = FLAGS.batch_size\n train_features = tf_data_utils.train_input_fn(FLAGS.train_file,\n _decode_record, name_to_features, params)\n eval_features = tf_data_utils.eval_input_fn(FLAGS.dev_file,\n _decode_record, name_to_features, params)\n \n [train_op, train_loss, train_per_example_loss, train_logits] = model_train_fn(train_features, [], tf.estimator.ModeKeys.TRAIN)\n [_, eval_loss, eval_per_example_loss, eval_logits] = model_eval_fn(eval_features, [], tf.estimator.ModeKeys.EVAL)\n result = metric_fn(eval_features, eval_logits, eval_loss)\n \n model_io_fn.set_saver()\n \n init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n sess.run(init_op)\n \n def eval_fn(result):\n i = 0\n total_accuracy = 0\n label, label_id = [], []\n while True:\n try:\n eval_result = sess.run(result)\n total_accuracy += eval_result[\"accuracy\"]\n label_id.extend(eval_result[\"label_ids\"])\n label.extend(eval_result[\"pred_label\"])\n i += 1\n except tf.errors.OutOfRangeError:\n print(\"End of dataset\")\n break\n f1 = f1_score(label_id, label, average=\"macro\")\n accuracy = accuracy_score(label_id, label)\n print(\"test accuracy accuracy {} {}, f1 {}\".format(total_accuracy/i, \n accuracy, f1))\n return total_accuracy/ i, f1\n \n def train_fn(op, loss):\n i = 0\n cnt = 0\n total_loss = 0.0\n while True:\n try:\n [_, train_loss] = sess.run([op, loss])\n total_loss += train_loss\n i += 1\n cnt += 1\n if np.mod(i, num_storage_steps) == 0:\n print(total_loss/cnt)\n model_io_fn.save_model(sess, FLAGS.model_output+\"/oqmrc_{}.ckpt\".format(int(i/num_storage_steps)))\n total_loss = 0\n cnt = 0\n except tf.errors.OutOfRangeError:\n break\n \n print(\"===========begin to train============\") \n train_fn(train_op, train_loss)\n print(\"===========begin to eval============\")\n eval_fn(result)\n model_io_fn.save_model(sess, FLAGS.model_output+\"/oqmrc.ckpt\")\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"eval_data_file\")\n flags.mark_flag_as_required(\"output_file\")\n flags.mark_flag_as_required(\"config_file\")\n flags.mark_flag_as_required(\"init_checkpoint\")\n flags.mark_flag_as_required(\"result_file\")\n flags.mark_flag_as_required(\"vocab_file\")\n flags.mark_flag_as_required(\"train_file\")\n flags.mark_flag_as_required(\"dev_file\")\n flags.mark_flag_as_required(\"max_length\")\n flags.mark_flag_as_required(\"model_output\")\n flags.mark_flag_as_required(\"gpu_id\")\n flags.mark_flag_as_required(\"epoch\")\n flags.mark_flag_as_required(\"num_classes\")\n tf.app.run()",
"import tensorflow as tf\nimport numpy as np\n\nfrom task_module import pretrain, classifier, pretrain_albert\nimport tensorflow as tf\n\ntry:\n\tfrom distributed_single_sentence_classification.model_interface import model_zoo\nexcept:\n\tfrom distributed_single_sentence_classification.model_interface import model_zoo\n\nfrom pretrain_finetuning.token_generator import token_generator, random_input_ids_generation\n\nfrom utils.bert import bert_utils\nfrom model_io import model_io\n\nimport copy\n\ndef model_fn_builder(\n\t\t\t\t\tmodel_config,\n\t\t\t\t\tnum_labels,\n\t\t\t\t\tinit_checkpoint,\n\t\t\t\t\tmodel_reuse=None,\n\t\t\t\t\tload_pretrained=True,\n\t\t\t\t\tmodel_io_config={},\n\t\t\t\t\topt_config={},\n\t\t\t\t\texclude_scope=\"\",\n\t\t\t\t\tnot_storage_params=[],\n\t\t\t\t\ttarget=\"a\",\n\t\t\t\t\t**kargs):\n\n\tmodel_config = copy.deepcopy(model_config)\n\tif kargs.get(\"sharing_mode\", \"none\") == \"none\":\n\t\t\"\"\"\n\t\t'generator/' + model_config.scope\n\t\t\"\"\"\n\t\tmodel_config.scope = exclude_scope + '/' + model_config.scope\n\t\tgenerator_scope_prefix = exclude_scope\n\t\texclude_scope = exclude_scope\n\t\ttf.logging.info(\"****** generator parameter *******\")\n\telif kargs.get(\"sharing_mode\", \"none\") == \"all_sharing\":\n\t\tgenerator_scope_prefix = None\n\t\texclude_scope = ''\n\t\ttf.logging.info(\"****** generator parameter sharing with discriminator *******\")\n\n\tdef model_fn(features, labels, mode, params):\n\n\t\tmodel_api = model_zoo(model_config)\n\n\t\tmodel = model_api(model_config, features, labels,\n\t\t\t\t\t\t\tmode, target, reuse=tf.AUTO_REUSE,\n\t\t\t\t\t\t\t**kargs)\n\n\t\tif mode == tf.estimator.ModeKeys.TRAIN:\n\t\t\tdropout_prob = model_config.dropout_prob\n\t\telse:\n\t\t\tdropout_prob = 0.0\n\n\t\tif model_io_config.fix_lm == True:\n\t\t\tscope = model_config.scope + \"_finetuning\"\n\t\telse:\n\t\t\tscope = model_config.scope\n\t\t\n\t\t(nsp_loss, \n\t\t nsp_per_example_loss, \n\t\t nsp_log_prob) = pretrain.get_next_sentence_output(model_config,\n\t\t\t\t\t\t\t\t\t\tmodel.get_pooled_output(),\n\t\t\t\t\t\t\t\t\t\tfeatures['next_sentence_labels'],\n\t\t\t\t\t\t\t\t\t\treuse=tf.AUTO_REUSE,\n\t\t\t\t\t\t\t\t\t\tscope=generator_scope_prefix)\n\n\t\tif model_config.model_type == 'bert':\n\t\t\tmasked_lm_fn = pretrain.get_masked_lm_output\n\t\t\tseq_masked_lm_fn = pretrain.seq_mask_masked_lm_output\n\t\t\tprint(\"==apply bert masked lm==\")\n\t\telif model_config.model_type == 'albert':\n\t\t\tmasked_lm_fn = pretrain_albert.get_masked_lm_output\n\t\t\tseq_masked_lm_fn = pretrain_albert.seq_mask_masked_lm_output\n\t\t\tprint(\"==apply albert masked lm==\")\n\t\telse:\n\t\t\tmasked_lm_fn = pretrain.get_masked_lm_output\n\t\t\tseq_masked_lm_fn = pretrain_albert.seq_mask_masked_lm_output\n\t\t\tprint(\"==apply bert masked lm==\")\n\n\t\t(_,\n\t\t\t_, \n\t\t\tmasked_lm_log_probs,\n\t\t\t_) = seq_masked_lm_fn(model_config, \n\t\t\t\t\t\t\t\t\t\tmodel.get_sequence_output(), \n\t\t\t\t\t\t\t\t\t\tmodel.get_embedding_table(),\n\t\t\t\t\t\t\t\t\t\tfeatures['input_mask'], \n\t\t\t\t\t\t\t\t\t\tfeatures['input_ori_ids'], \n\t\t\t\t\t\t\t\t\t\tfeatures['input_ids'],\n\t\t\t\t\t\t\t\t\t\tfeatures['input_mask'],\n\t\t\t\t\t\t\t\t\t\treuse=tf.AUTO_REUSE,\n\t\t\t\t\t\t\t\t\t\tembedding_projection=model.get_embedding_projection_table(),\n\t\t\t\t\t\t\t\t\t\tscope=generator_scope_prefix)\n\n\t\tprint(model_config.lm_ratio, '==mlm lm_ratio==')\n\t\t# loss = model_config.lm_ratio * masked_lm_loss + 0.0 * nsp_loss\n\n\t\tmodel_io_fn = model_io.ModelIO(model_io_config)\n\n\t\tpretrained_tvars = model_io_fn.get_params(model_config.scope, \n\t\t\t\t\t\t\t\t\t\tnot_storage_params=not_storage_params)\n\n\t\tif generator_scope_prefix:\n\t\t\t\"\"\"\n\t\t\t\"generator/cls/predictions\"\n\t\t\t\"\"\"\n\t\t\tlm_pretrain_tvars = model_io_fn.get_params(generator_scope_prefix+\"/cls/predictions\", \n\t\t\t\t\t\t\t\t\t\tnot_storage_params=not_storage_params)\n\n\t\t\tnsp_pretrain_vars = model_io_fn.get_params(generator_scope_prefix+\"/cls/seq_relationship\",\n\t\t\t\t\t\t\t\t\t\tnot_storage_params=not_storage_params)\n\t\telse:\n\t\t\tlm_pretrain_tvars = model_io_fn.get_params(\"cls/predictions\", \n\t\t\t\t\t\t\t\t\t\tnot_storage_params=not_storage_params)\n\n\t\t\tnsp_pretrain_vars = model_io_fn.get_params(\"cls/seq_relationship\",\n\t\t\t\t\t\t\t\t\t\tnot_storage_params=not_storage_params)\n\n\t\tpretrained_tvars.extend(lm_pretrain_tvars)\n\t\tpretrained_tvars.extend(nsp_pretrain_vars)\n\t\ttvars = pretrained_tvars\n\n\t\tprint('==generator parameters==', tvars)\n\n\t\tif load_pretrained == \"yes\":\n\t\t\tuse_tpu = 1 if kargs.get('use_tpu', False) else 0\n\t\t\tscaffold_fn = model_io_fn.load_pretrained(tvars, \n\t\t\t\t\t\t\t\t\t\t\tinit_checkpoint,\n\t\t\t\t\t\t\t\t\t\t\texclude_scope=exclude_scope,\n\t\t\t\t\t\t\t\t\t\t\tuse_tpu=use_tpu)\n\t\telse:\n\t\t\tscaffold_fn = None\n\n\t\treturn_dict = {\n\t\t\t\t\t\"masked_lm_log_probs\":masked_lm_log_probs,\n\t\t\t\t\t\"tvars\":tvars\n\t\t\t\t}\n\t\treturn return_dict\n\treturn model_fn\n\t\t\n",
"import numpy as np\nimport tensorflow as tf\nfrom utils.bert import bert_utils\nfrom utils.bert import bert_modules\nimport copy\n\n\nclass Bert(object):\n\t\"\"\"\n\tdefault scope: bert\n\t\"\"\"\n\tdef __init__(self, config, *args, **kargs):\n\t\tself.config = copy.deepcopy(config)\n\t\ttf.logging.info(\" begin to build {}\".format(self.config.get(\"scope\", \"bert\")))\n\n\tdef build_embedder(self, input_ids, token_type_ids, \n\t\t\t\t\t\t\t\t\thidden_dropout_prob, \n\t\t\t\t\t\t\t\t\tattention_probs_dropout_prob,\n\t\t\t\t\t\t\t\t\t**kargs):\n\n\t\treuse = kargs[\"reuse\"]\n\n\t\tembedding_table_adv = kargs.get('embedding_table_adv', None)\n\t\tprint(embedding_table_adv, \"==embedding-adv\")\n\n\t\tif self.config.get(\"embedding\", \"none_factorized\") == \"none_factorized\":\n\t\t\tprojection_width = self.config.hidden_size\n\t\t\ttf.logging.info(\"==not using embedding factorized==\")\n\t\telse:\n\t\t\tprojection_width = self.config.get('embedding_size', self.config.hidden_size)\n\t\t\ttf.logging.info(\"==using embedding factorized: embedding size: %s==\", str(projection_width))\n\n\t\tif self.config.get('embedding_scope', None):\n\t\t\tembedding_scope = self.config['embedding_scope']\n\t\t\tother_embedding_scope = self.config['embedding_scope'] #self.config.get(\"scope\", \"bert\")\n\t\t\ttf.logging.info(\"==using embedding scope of original model_config.embedding_scope: %s, other_embedding_scope:%s ==\", embedding_scope, other_embedding_scope)\n\t\telse:\n\t\t\tembedding_scope = self.config.get(\"scope\", \"bert\")\n\t\t\tother_embedding_scope = self.config.get(\"scope\", \"bert\")\n\t\t\ttf.logging.info(\"==using embedding scope of original model_config.embedding_scope: %s, other_embedding_scope:%s ==\", embedding_scope, other_embedding_scope)\n\n\t\twith tf.variable_scope(embedding_scope, reuse=reuse):\n\t\t\twith tf.variable_scope(\"embeddings\"):\n\t\t\t\t# Perform embedding lookup on the word ids.\n\n\t\t\t\tinput_shape = bert_utils.get_shape_list(input_ids, expected_rank=[2,3])\n\t\t\t\tif len(input_shape) == 3:\n\t\t\t\t\ttf.logging.info(\"****** 3D embedding matmul *******\")\n\t\t\t\t\t(self.embedding_output_word, self.embedding_table) = bert_modules.gumbel_embedding_lookup(\n\t\t\t\t\t\t\tinput_ids=input_ids,\n\t\t\t\t\t\t\tvocab_size=self.config.vocab_size,\n\t\t\t\t\t\t\tembedding_size=projection_width,\n\t\t\t\t\t\t\tinitializer_range=self.config.initializer_range,\n\t\t\t\t\t\t\tword_embedding_name=\"word_embeddings\",\n\t\t\t\t\t\t\tuse_one_hot_embeddings=self.config.use_one_hot_embeddings,\n\t\t\t\t\t\t\tembedding_table_adv=embedding_table_adv)\n\t\t\t\telif len(input_shape) == 2:\n\t\t\t\t\t(self.embedding_output_word, self.embedding_table) = bert_modules.embedding_lookup(\n\t\t\t\t\t\tinput_ids=input_ids,\n\t\t\t\t\t\tvocab_size=self.config.vocab_size,\n\t\t\t\t\t\tembedding_size=projection_width,\n\t\t\t\t\t\tinitializer_range=self.config.initializer_range,\n\t\t\t\t\t\tword_embedding_name=\"word_embeddings\",\n\t\t\t\t\t\tuse_one_hot_embeddings=self.config.use_one_hot_embeddings,\n\t\t\t\t\t\tembedding_table_adv=embedding_table_adv)\n\t\t\t\telse:\n\t\t\t\t\t(self.embedding_output_word, self.embedding_table) = bert_modules.embedding_lookup(\n\t\t\t\t\t\tinput_ids=input_ids,\n\t\t\t\t\t\tvocab_size=self.config.vocab_size,\n\t\t\t\t\t\tembedding_size=projection_width,\n\t\t\t\t\t\tinitializer_range=self.config.initializer_range,\n\t\t\t\t\t\tword_embedding_name=\"word_embeddings\",\n\t\t\t\t\t\tuse_one_hot_embeddings=self.config.use_one_hot_embeddings,\n\t\t\t\t\t\tembedding_table_adv=embedding_table_adv)\n\n\t\t\t\t# if kargs.get(\"perturbation\", None):\n\t\t\t\t# \tself.embedding_output_word += kargs[\"perturbation\"]\n\t\t\t\t# \ttf.logging.info(\" add word pertubation for robust learning \")\n\n\t\twith tf.variable_scope(other_embedding_scope, reuse=reuse):\n\t\t\twith tf.variable_scope(\"embeddings\"):\n\n\t\t\t\tif kargs.get(\"reuse_mask\", False):\n\t\t\t\t\tdropout_name = other_embedding_scope + \"/embeddings\"\n\t\t\t\t\ttf.logging.info(\"****** reuse mask: %s *******\".format(dropout_name))\n\t\t\t\telse:\n\t\t\t\t\tdropout_name = None\n\n\t\t\t\t# Add positional embeddings and token type embeddings, then layer\n\t\t\t\t# normalize and perform dropout.\n\t\t\t\ttf.logging.info(\"==using segment type embedding ratio: %s==\", str(self.config.get(\"token_type_ratio\", 1.0)))\n\t\t\t\tself.embedding_output = bert_modules.embedding_postprocessor(\n\t\t\t\t\t\tinput_tensor=self.embedding_output_word,\n\t\t\t\t\t\tuse_token_type=True,\n\t\t\t\t\t\ttoken_type_ids=token_type_ids,\n\t\t\t\t\t\ttoken_type_vocab_size=self.config.type_vocab_size,\n\t\t\t\t\t\ttoken_type_embedding_name=\"token_type_embeddings\",\n\t\t\t\t\t\tuse_position_embeddings=True,\n\t\t\t\t\t\tposition_embedding_name=\"position_embeddings\",\n\t\t\t\t\t\tinitializer_range=self.config.initializer_range,\n\t\t\t\t\t\tmax_position_embeddings=self.config.max_position_embeddings,\n\t\t\t\t\t\tdropout_prob=hidden_dropout_prob,\n\t\t\t\t\t\ttoken_type_ratio=self.config.get(\"token_type_ratio\", 1.0),\n\t\t\t\t\t\tdropout_name=dropout_name)\n\n\tdef build_encoder(self, input_ids, input_mask, \n\t\t\t\t\t\t\t\t\thidden_dropout_prob, \n\t\t\t\t\t\t\t\t\tattention_probs_dropout_prob,\n\t\t\t\t\t\t\t\t\tembedding_output=None,\n\t\t\t\t\t\t\t\t\t**kargs):\n\t\treuse = kargs[\"reuse\"]\n\t\tinput_shape = bert_utils.get_shape_list(input_ids, expected_rank=[2,3])\n\t\tbatch_size = input_shape[0]\n\t\tseq_length = input_shape[1]\n\n\t\twith tf.variable_scope(self.config.get(\"scope\", \"bert\"), reuse=reuse):\n\t\t\twith tf.variable_scope(\"encoder\"):\n\t\t\t\t# This converts a 2D mask of shape [batch_size, seq_length] to a 3D\n\t\t\t\t# mask of shape [batch_size, seq_length, seq_length] which is used\n\t\t\t\t# for the attention scores.\n\t\t\t\tinput_shape = bert_utils.get_shape_list(input_ids, expected_rank=[2,3])\n\t\t\t\tif len(input_shape) == 3:\n\t\t\t\t\ttmp_input_ids = tf.argmax(input_ids, axis=-1)\n\t\t\t\telse:\n\t\t\t\t\ttmp_input_ids = input_ids\n\t\t\t\tattention_mask = bert_modules.create_attention_mask_from_input_mask(\n\t\t\t\t\t\ttmp_input_ids, input_mask)\n\n\t\t\t\tseq_type = kargs.get('seq_type', \"None\")\n\n\t\t\t\tif seq_type == \"seq2seq\":\n\t\t\t\t\tif kargs.get(\"mask_type\", \"left2right\") == \"left2right\":\n\t\t\t\t\t\tmask_sequence = input_mask\n\t\t\t\t\t\ttf.logging.info(\"==apply left2right LM model with casual mask==\")\n\t\t\t\t\telif kargs.get(\"mask_type\", \"left2right\") == \"seq2seq\":\n\t\t\t\t\t\ttoken_type_ids = kargs.get(\"token_type_ids\", None)\n\t\t\t\t\t\ttf.logging.info(\"==apply left2right LM model with conditional casual mask==\")\n\t\t\t\t\t\tif token_type_ids is None:\n\t\t\t\t\t\t\ttoken_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)\n\t\t\t\t\t\t\ttf.logging.info(\"==conditional mask is set to 0 and degenerate to left2right LM model==\")\n\t\t\t\t\t\tmask_sequence = token_type_ids\n\t\t\t\t\tattention_mask = bert_utils.generate_seq2seq_mask(attention_mask, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\tmask_sequence,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tseq_type,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t**kargs)\n\t\t\t\telse:\n\t\t\t\t\ttf.logging.info(\"==apply bi-directional LM model with bi-directional mask==\")\n\n\t\t\t\tif kargs.get('attention_type', 'efficient_attention') == 'normal_attention':\n\t\t\t\t\ttf.logging.info(\"****** normal attention *******\")\n\t\t\t\t\ttransformer_model = bert_modules.transformer_model\n\t\t\t\telif kargs.get('attention_type', 'efficient_attention') == 'efficient_attention':\n\t\t\t\t\ttf.logging.info(\"****** efficient attention *******\")\n\t\t\t\t\ttransformer_model = bert_modules.transformer_efficient_model\n\t\t\t\telif kargs.get('attention_type', 'efficient_attention') == 'rezero_transformer':\n\t\t\t\t\ttransformer_model = bert_modules.transformer_rezero_model\n\t\t\t\t\ttf.logging.info(\"****** rezero_transformer *******\")\n\t\t\t\telse:\n\t\t\t\t\ttf.logging.info(\"****** normal attention *******\")\n\t\t\t\t\ttransformer_model = bert_modules.transformer_model\n\n\t\t\t\tif kargs.get(\"reuse_mask\", False):\n\t\t\t\t\tdropout_name = self.config.get(\"scope\", \"bert\") + \"/encoder\"\n\t\t\t\t\ttf.logging.info(\"****** reuse mask: %s *******\".format(dropout_name))\n\t\t\t\telse:\n\t\t\t\t\tdropout_name = None\n\n\t\t\t\t# Run the stacked transformer.\n\t\t\t\t# `sequence_output` shape = [batch_size, seq_length, hidden_size].\n\t\t\t\t[self.all_encoder_layers,\n\t\t\t\tself.all_attention_scores,\n\t\t\t\tself.all_value_outputs] = transformer_model(\n\t\t\t\t\t\tinput_tensor=self.embedding_output,\n\t\t\t\t\t\tattention_mask=attention_mask,\n\t\t\t\t\t\thidden_size=self.config.hidden_size,\n\t\t\t\t\t\tnum_hidden_layers=self.config.num_hidden_layers,\n\t\t\t\t\t\tnum_attention_heads=self.config.num_attention_heads,\n\t\t\t\t\t\tintermediate_size=self.config.intermediate_size,\n\t\t\t\t\t\tintermediate_act_fn=bert_modules.get_activation(self.config.hidden_act),\n\t\t\t\t\t\thidden_dropout_prob=hidden_dropout_prob,\n\t\t\t\t\t\tattention_probs_dropout_prob=attention_probs_dropout_prob,\n\t\t\t\t\t\tinitializer_range=self.config.initializer_range,\n\t\t\t\t\t\tdo_return_all_layers=True,\n\t\t\t\t\t\tattention_fixed_size=self.config.get('attention_fixed_size', None),\n\t\t\t\t\t\tdropout_name=dropout_name)\n\n\tdef build_pooler(self, *args,**kargs):\n\t\treuse = kargs[\"reuse\"]\n\t\tlayer_num = kargs.get(\"layer_num\", -1)\n\t\twith tf.variable_scope(self.config.get(\"scope\", \"bert\"), reuse=reuse):\n\t\t\t# self.sequence_output = self.all_encoder_layers[-1]\n\t\t\tself.sequence_output = self.get_encoder_layers(layer_num)\n\n\t\t\t# The \"pooler\" converts the encoded sequence tensor of shape\n\t\t\t# [batch_size, seq_length, hidden_size] to a tensor of shape\n\t\t\t# [batch_size, hidden_size]. This is necessary for segment-level\n\t\t\t# (or segment-pair-level) classification tasks where we need a fixed\n\t\t\t# dimensional representation of the segment.\n\t\t\twith tf.variable_scope(\"pooler\"):\n\t\t\t\t# We \"pool\" the model by simply taking the hidden state corresponding\n\t\t\t\t# to the first token. We assume that this has been pre-trained\n\t\t\t\tfirst_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)\n\t\t\t\tself.pooled_output = tf.layers.dense(\n\t\t\t\t\t\tfirst_token_tensor,\n\t\t\t\t\t\tself.config.hidden_size,\n\t\t\t\t\t\tactivation=tf.tanh,\n\t\t\t\t\t\tkernel_initializer=bert_modules.create_initializer(self.config.initializer_range))\n\t\n\tdef get_multihead_attention(self, **kargs):\n\t\treturn self.all_attention_scores\n\t\n\tdef get_pooled_output(self, **kargs):\n\t\treturn self.pooled_output\n\n\tdef get_value_layer(self, **kargs):\n\t\treturn self.all_value_outputs\n\n\tdef get_embedding_projection_table(self, **kargs):\n\t\treturn None\n\n\tdef get_sequence_output(self, **kargs):\n\t\t\"\"\"Gets final hidden layer of encoder.\n\n\t\tReturns:\n\t\t\tfloat Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n\t\t\tto the final hidden of the transformer encoder.\n\t\t\"\"\"\n\t\treturn self.sequence_output\n\n\tdef get_all_encoder_layers(self, **kargs):\n\t\treturn self.all_encoder_layers\n\n\tdef get_embedding_output(self, **kargs):\n\t\treturn self.embedding_output_word\n\n\tdef get_embedding_table(self, **kargs):\n\t\treturn self.embedding_table\n\n\tdef get_encoder_layers(self, layer_num, **kargs):\n\t\tif layer_num >= 0 and layer_num <= len(self.all_encoder_layers) - 1:\n\t\t\tprint(\"==get encoder layer==\", layer_num)\n\t\t\treturn self.all_encoder_layers[layer_num]\n\t\telse:\n\t\t\treturn self.all_encoder_layers[-1]\n",
"# -*- coding: utf-8 -*-\nimport tensorflow as tf\n\nfrom optimizer import distributed_optimizer as optimizer\nfrom data_generator import distributed_tf_data_utils as tf_data_utils\n\n# try:\n# \tfrom .bert_model_fn import model_fn_builder\n# \tfrom .bert_model_fn import rule_model_fn_builder\n# \tfrom .classifynet_model_fn import \n# except:\n# \tfrom bert_model_fn import model_fn_builder\n# \tfrom bert_model_fn import rule_model_fn_builder\n\n# try:\n# \tfrom .model_fn import model_fn_builder\n# \tfrom .model_interface import model_config_parser\n# \tfrom .model_data_interface import data_interface\n# \tfrom .model_distillation_fn import model_fn_builder as model_distillation_fn\n# except:\n# \tfrom model_fn import model_fn_builder\n# \tfrom model_interface import model_config_parser\n# \tfrom model_data_interface import data_interface\n# \tfrom model_distillation_fn import model_fn_builder as model_distillation_fn\n\ntry:\n\t# from .model_fn import model_fn_builder\n\tfrom distributed_single_sentence_classification.model_interface import model_config_parser\n\tfrom distributed_single_sentence_classification.model_data_interface import data_interface\n\tfrom distributed_single_sentence_classification.model_fn_interface import model_fn_interface\n\t# from .model_distillation_fn import model_fn_builder as model_distillation_fn\nexcept:\n\t# from model_fn import model_fn_builder\n\tfrom distributed_single_sentence_classification.model_interface import model_config_parser\n\tfrom distributed_single_sentence_classification.model_data_interface import data_interface\n\t# from model_distillation_fn import model_fn_builder as model_distillation_fn\n\tfrom distributed_single_sentence_classification.model_fn_interface import model_fn_interface\n\ntry:\n\tfrom .distillation_model_fn import distillation_model_fn\nexcept:\n\tfrom distillation_model_fn import distillation_model_fn\n\ntry:\n\tfrom .distillation_pretrain_model_fn import distillation_model_fn\nexcept:\n\tfrom distillation_pretrain_model_fn import distillation_model_fn\n\nimport numpy as np\nimport tensorflow as tf\nfrom bunch import Bunch\nfrom model_io import model_io\nimport json\n\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report\n\ntry:\n\timport paisoar as pai\nexcept Exception as e:\n\tpai = None\n\ntry:\n\timport horovod.tensorflow as hvd\nexcept Exception as e:\n\thvd = None\n\nimport time, os, sys\n\ndef train_eval_fn(FLAGS,\n\t\t\t\tworker_count, \n\t\t\t\ttask_index, \n\t\t\t\tis_chief, \n\t\t\t\ttarget,\n\t\t\t\tinit_checkpoint,\n\t\t\t\ttrain_file,\n\t\t\t\tdev_file,\n\t\t\t\tcheckpoint_dir,\n\t\t\t\tis_debug,\n\t\t\t\t**kargs):\n\n\tgraph = tf.Graph()\n\twith graph.as_default():\n\t\timport json\n\t\t\t\t\n\t\t# config = json.load(open(FLAGS.config_file, \"r\"))\n\n\t\t# config = Bunch(config)\n\t\t# config.use_one_hot_embeddings = True\n\t\t# config.scope = \"bert\"\n\t\t# config.dropout_prob = 0.1\n\t\t# config.label_type = \"single_label\"\n\n\t\t# config.model = FLAGS.model_type\n\n\t\tconfig = model_config_parser(FLAGS)\n\t\t\n\t\tif FLAGS.if_shard == \"0\":\n\t\t\ttrain_size = FLAGS.train_size\n\t\t\tepoch = int(FLAGS.epoch / worker_count)\n\t\telif FLAGS.if_shard == \"1\":\n\t\t\tprint(\"==number of gpus==\", kargs.get('num_gpus', 1))\n\t\t\ttrain_size = int(FLAGS.train_size/worker_count/kargs.get('num_gpus', 1))\n\t\t\t# train_size = int(FLAGS.train_size)\n\t\t\tepoch = FLAGS.epoch\n\t\telse:\n\t\t\ttrain_size = int(FLAGS.train_size/worker_count)\n\t\t\tepoch = FLAGS.epoch\n\n\t\tinit_lr = FLAGS.init_lr\n\n\t\tdistillation_dict = json.load(tf.gfile.Open(FLAGS.distillation_config))\n\t\tdistillation_config = Bunch(json.load(tf.gfile.Open(FLAGS.multi_task_config)))\n\n\t\twarmup_ratio = config.get('warmup', 0.1)\n\n\t\tnum_train_steps = int(\n\t\t\ttrain_size / FLAGS.batch_size * epoch)\n\t\tif config.get('ln_type', 'postln') == 'postln':\n\t\t\tnum_warmup_steps = int(num_train_steps * warmup_ratio)\n\t\telif config.get('ln_type', 'preln') == 'postln':\n\t\t\tnum_warmup_steps = 0\n\t\telse:\n\t\t\tnum_warmup_steps = int(num_train_steps * warmup_ratio)\n\t\tprint('==num warmup steps==', num_warmup_steps)\n\n\t\tnum_storage_steps = min([int(train_size / FLAGS.batch_size), 10000 ])\n\t\tif num_storage_steps <= 100:\n\t\t\tnum_storage_steps = 500\n\n\t\tnum_eval_steps = int(FLAGS.eval_size / FLAGS.batch_size)\n\n\t\tif is_debug == \"0\":\n\t\t\tnum_storage_steps = 2\n\t\t\tnum_eval_steps = 10\n\t\t\tnum_train_steps = 10\n\t\tprint(\"num_train_steps {}, num_eval_steps {}, num_storage_steps {}\".format(num_train_steps, num_eval_steps, num_storage_steps))\n\n\t\tprint(\" model type {}\".format(FLAGS.model_type))\n\n\t\tprint(num_train_steps, num_warmup_steps, \"=============\", kargs.get('num_gpus', 1), '==number of gpus==')\n\n\t\tif worker_count*kargs.get(\"num_gpus\", 1) >= 2:\n\t\t\tclip_norm_scale = 1.0\n\t\t\tlr_scale = 0.8\n\t\telse:\n\t\t\tclip_norm_scale = 1.0\n\t\t\tlr_scale = 1.0\n\t\tlr = init_lr*worker_count*kargs.get(\"num_gpus\", 1)*lr_scale\n\t\tif lr >= 1e-3:\n\t\t\tlr = 1e-3\n\t\tprint('==init lr==', lr)\n\t\t\n\t\topt_config = Bunch({\"init_lr\":lr, \n\t\t\t\t\t\t\t\"num_train_steps\":num_train_steps,\n\t\t\t\t\t\t\t\"num_warmup_steps\":num_warmup_steps,\n\t\t\t\t\t\t\t\"worker_count\":worker_count,\n\t\t\t\t\t\t\t\"gpu_count\":worker_count*kargs.get(\"num_gpus\", 1),\n\t\t\t\t\t\t\t\"opt_type\":FLAGS.opt_type,\n\t\t\t\t\t\t\t\"is_chief\":is_chief,\n\t\t\t\t\t\t\t\"train_op\":kargs.get(\"train_op\", \"adam\"),\n\t\t\t\t\t\t\t\"decay\":kargs.get(\"decay\", \"no\"),\n\t\t\t\t\t\t\t\"warmup\":kargs.get(\"warmup\", \"no\"),\n\t\t\t\t\t\t\t\"clip_norm\":config.get(\"clip_norm\", 1.0),\n\t\t\t\t\t\t\t\"grad_clip\":config.get(\"grad_clip\", \"global_norm\"),\n\t\t\t\t\t\t\t\"epoch\":FLAGS.epoch,\n\t\t\t\t\t\t\t\"strategy\":FLAGS.distribution_strategy})\n\n\t\tanneal_config = Bunch({\n\t\t\t\t\t\"initial_value\":1.0,\n\t\t\t\t\t\"num_train_steps\":num_train_steps\n\t\t\t})\n\n\t\tmodel_io_config = Bunch({\"fix_lm\":False})\n\t\tmodel_io_fn = model_io.ModelIO(model_io_config)\n\t\t\n\t\tnum_classes = FLAGS.num_classes\n\n\t\tif FLAGS.opt_type == \"hvd\" and hvd:\n\t\t\tcheckpoint_dir = checkpoint_dir if task_index == 0 else None\n\t\telif FLAGS.opt_type == \"all_reduce\":\n\t\t\tcheckpoint_dir = checkpoint_dir\n\t\telif FLAGS.opt_type == \"collective_reduce\":\n\t\t\tcheckpoint_dir = checkpoint_dir if task_index == 0 else None\n\t\telif FLAGS.opt_type == \"ps\" or FLAGS.opt_type == \"ps_sync\":\n\t\t\tcheckpoint_dir = checkpoint_dir if task_index == 0 else None\n\t\tprint(\"==checkpoint_dir==\", checkpoint_dir, is_chief)\n\n\t\tmodel_config_dict = {}\n\t\tnum_labels_dict = {}\n\t\tinit_checkpoint_dict = {}\n\t\tload_pretrained_dict = {}\n\t\texclude_scope_dict = {}\n\t\tnot_storage_params_dict = {}\n\t\ttarget_dict = {}\n\n\t\tfor task_type in FLAGS.multi_task_type.split(\",\"):\n\t\t\tprint(\"==task type==\", task_type)\n\t\t\tmodel_config_dict[task_type] = model_config_parser(Bunch(distillation_config[task_type]))\n\t\t\tprint(task_type, distillation_config[task_type], '=====task model config======')\n\t\t\tnum_labels_dict[task_type] = distillation_config[task_type][\"num_labels\"]\n\t\t\tinit_checkpoint_dict[task_type] = os.path.join(FLAGS.buckets, distillation_config[task_type][\"init_checkpoint\"])\n\t\t\tload_pretrained_dict[task_type] = distillation_config[task_type][\"load_pretrained\"]\n\t\t\texclude_scope_dict[task_type] = distillation_config[task_type][\"exclude_scope\"]\n\t\t\tnot_storage_params_dict[task_type] = distillation_config[task_type][\"not_storage_params\"]\n\t\t\ttarget_dict[task_type] = distillation_config[task_type][\"target\"]\n\n\t\tmodel_fn = distillation_model_fn(model_config_dict,\n\t\t\t\t\tnum_labels_dict,\n\t\t\t\t\tinit_checkpoint_dict,\n\t\t\t\t\tload_pretrained_dict,\n\t\t\t\t\tmodel_io_config=model_io_config,\n\t\t\t\t\topt_config=opt_config,\n\t\t\t\t\texclude_scope_dict=exclude_scope_dict,\n\t\t\t\t\tnot_storage_params_dict=not_storage_params_dict,\n\t\t\t\t\ttarget_dict=target_dict,\n\t\t\t\t\toutput_type=\"estimator\",\n\t\t\t\t\tdistillation_config=distillation_dict,\n\t\t\t\t\t**kargs)\n\n\t\t# name_to_features = data_interface(FLAGS)\n\n\t\tname_to_features = {\n\t\t\t\t\"input_ids_a\":\n\t\t\t\t\t\ttf.FixedLenFeature([FLAGS.max_length], tf.int64),\n\t\t\t\t# \"input_mask_a\":\n\t\t\t\t# \t\ttf.FixedLenFeature([FLAGS.max_length], tf.int64),\n\t\t\t\t# \"segment_ids_a\":\n\t\t\t\t# \t\ttf.FixedLenFeature([FLAGS.max_length], tf.int64),\n\t\t\t\t\"input_ids_b\":\n\t\t\t\t\t\ttf.FixedLenFeature([FLAGS.max_length], tf.int64),\n\t\t\t\t# \"input_mask_b\":\n\t\t\t\t# \t\ttf.FixedLenFeature([FLAGS.max_length], tf.int64),\n\t\t\t\t# \"segment_ids_b\":\n\t\t\t\t# \t\ttf.FixedLenFeature([FLAGS.max_length], tf.int64)\n\t\t\t\t}\n\n\t\t# name_to_features = {\n\t\t# \t\t\t\"input_ids\":\n\t\t# \t\t\t\t\ttf.FixedLenFeature([FLAGS.max_length], tf.int64),\n\t\t# \t\t\t\"input_mask\":\n\t\t# \t\t\t\t\ttf.FixedLenFeature([FLAGS.max_length], tf.int64),\n\t\t# \t\t\t\"segment_ids\":\n\t\t# \t\t\t\t\ttf.FixedLenFeature([FLAGS.max_length], tf.int64)\n\t\t# \t}\n\n\t\tdef _decode_record(record, name_to_features):\n\t\t\t\"\"\"Decodes a record to a TensorFlow example.\n\t\t\t\"\"\"\n\t\t\texample = tf.parse_single_example(record, name_to_features)\n\n\t\t\t# tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n\t\t\t# So cast all int64 to int32.\n\t\t\tfor name in list(example.keys()):\n\t\t\t\tt = example[name]\n\t\t\t\tif t.dtype == tf.int64:\n\t\t\t\t\tt = tf.to_int32(t)\n\t\t\t\texample[name] = t\n\n\t\t\treturn example\n\n\t\tdef _decode_batch_record(record, name_to_features):\n\t\t\texample = tf.parse_example(record, name_to_features)\n\t\t\t# for name in list(example.keys()):\n\t\t\t# \tt = example[name]\n\t\t\t# \tif t.dtype == tf.int64:\n\t\t\t# \t\tt = tf.to_int32(t)\n\t\t\t# \texample[name] = t\n\n\t\t\treturn example\n\n\t\tparams = Bunch({})\n\t\tparams.epoch = FLAGS.epoch\n\t\tparams.batch_size = FLAGS.batch_size\n\n\t\tif kargs.get(\"run_config\", None):\n\t\t\tif kargs.get(\"parse_type\", \"parse_single\") == \"parse_single\":\n\t\t\t\ttrain_features = lambda: tf_data_utils.all_reduce_train_input_fn(train_file,\n\t\t\t\t\t\t\t\t\t\t\t_decode_record, name_to_features, params, if_shard=FLAGS.if_shard,\n\t\t\t\t\t\t\t\t\t\t\tworker_count=worker_count,\n\t\t\t\t\t\t\t\t\t\t\ttask_index=task_index)\n\t\t\t\teval_features = lambda: tf_data_utils.all_reduce_eval_input_fn(dev_file,\n\t\t\t\t\t\t\t\t\t\t\t_decode_record, name_to_features, params, if_shard=FLAGS.if_shard,\n\t\t\t\t\t\t\t\t\t\t\tworker_count=worker_count,\n\t\t\t\t\t\t\t\t\t\t\ttask_index=task_index)\n\t\t\telif kargs.get(\"parse_type\", \"parse_single\") == \"parse_batch\":\n\t\t\t\tprint(\"==apply parse example==\")\n\t\t\t\ttrain_features = lambda: tf_data_utils.all_reduce_train_batch_input_fn(train_file,\n\t\t\t\t\t\t\t\t\t\t\t_decode_batch_record, name_to_features, params, if_shard=FLAGS.if_shard,\n\t\t\t\t\t\t\t\t\t\t\tworker_count=worker_count,\n\t\t\t\t\t\t\t\t\t\t\ttask_index=task_index)\n\t\t\t\teval_features = lambda: tf_data_utils.all_reduce_eval_batch_input_fn(dev_file,\n\t\t\t\t\t\t\t\t\t\t\t_decode_batch_record, name_to_features, params, if_shard=FLAGS.if_shard,\n\t\t\t\t\t\t\t\t\t\t\tworker_count=worker_count,\n\t\t\t\t\t\t\t\t\t\t\ttask_index=task_index)\n\t\t\telif kargs.get(\"parse_type\", \"parse_single\") == \"parse_batch_multi_task\":\n\t\t\t\tdata_prior = [float(item) for item in FLAGS.data_prior.split(',')]\n\t\t\t\ttrain_features = lambda: tf_data_utils.all_reduce_multitask_train_batch_input_fn_sample(\n\t\t\t\t\t\t\t\t\t\ttrain_file,\n\t\t\t\t\t\t\t\t\t\t_decode_record, \n\t\t\t\t\t\t\t\t\t\tname_to_features, \n\t\t\t\t\t\t\t\t\t\tparams, \n\t\t\t\t\t\t\t\t\t\tdata_prior=data_prior,\n\t\t\t\t\t\t\t\t\t\tif_shard=FLAGS.if_shard,\n\t\t\t\t\t\t\t\t\t\tworker_count=worker_count,\n\t\t\t\t\t\t\t\t\t\ttask_index=task_index)\n\n\t\t\t\teval_features = lambda: tf_data_utils.all_reduce_eval_batch_input_fn(dev_file,\n\t\t\t\t\t\t\t\t\t\t\t_decode_batch_record, name_to_features, params, if_shard=FLAGS.if_shard,\n\t\t\t\t\t\t\t\t\t\t\tworker_count=worker_count,\n\t\t\t\t\t\t\t\t\t\t\ttask_index=task_index)\n\n\t\telse:\n\t\t\ttrain_features = lambda: tf_data_utils.train_input_fn(train_file,\n\t\t\t\t\t\t\t\t\t\t_decode_record, name_to_features, params, if_shard=FLAGS.if_shard,\n\t\t\t\t\t\t\t\t\t\tworker_count=worker_count,\n\t\t\t\t\t\t\t\t\t\ttask_index=task_index)\n\n\t\t\teval_features = lambda: tf_data_utils.eval_input_fn(dev_file,\n\t\t\t\t\t\t\t\t\t\t_decode_record, name_to_features, params, if_shard=FLAGS.if_shard,\n\t\t\t\t\t\t\t\t\t\tworker_count=worker_count,\n\t\t\t\t\t\t\t\t\t\ttask_index=task_index)\n\t\t\n\t\ttrain_hooks = []\n\t\teval_hooks = []\n\n\t\tsess_config = tf.ConfigProto(allow_soft_placement=False,\n\t\t\t\t\t\t\t\t\tlog_device_placement=False)\n\t\tif FLAGS.opt_type == \"ps\" or FLAGS.opt_type == \"ps_sync\":\n\t\t\tprint(\"==no need for hook==\")\n\t\telif FLAGS.opt_type == \"pai_soar\" and pai:\n\t\t\tprint(\"no need for hook\")\n\t\telif FLAGS.opt_type == \"hvd\" and hvd:\n\t\t\tsess_config.gpu_options.allow_growth = True\n\t\t\tsess_config.gpu_options.visible_device_list = str(hvd.local_rank())\n\t\t\tprint(\"==no need fo hook==\")\n\t\telse:\n\t\t\tprint(\"==no need for hooks==\")\n\n\t\tif kargs.get(\"run_config\", None):\n\t\t\trun_config = kargs.get(\"run_config\", None)\n\t\t\trun_config = run_config.replace(save_checkpoints_steps=num_storage_steps)\n\t\t\tprint(\"==run config==\", run_config.save_checkpoints_steps)\n\t\telse:\n\t\t\trun_config = tf.estimator.RunConfig(model_dir=checkpoint_dir, \n\t\t\t\t\t\t\t\t\t\t\tsave_checkpoints_steps=num_storage_steps,\n\t\t\t\t\t\t\t\t\t\t\tsession_config=sess_config)\n\n\t\tif kargs.get(\"profiler\", \"profiler\") == \"profiler\":\n\t\t\tif checkpoint_dir:\n\t\t\t\thooks = tf.train.ProfilerHook(\n\t\t\t\t\t\t\tsave_steps=100,\n\t\t\t\t\t\t\tsave_secs=None,\n\t\t\t\t\t\t\toutput_dir=os.path.join(checkpoint_dir, \"profiler\"),\n\t\t\t\t\t)\n\t\t\t\ttrain_hooks.append(hooks)\n\t\t\t\tprint(\"==add profiler hooks==\")\n\n\t\tmodel_estimator = tf.estimator.Estimator(\n\t\t\t\t\t\tmodel_fn=model_fn,\n\t\t\t\t\t\tmodel_dir=checkpoint_dir,\n\t\t\t\t\t\tconfig=run_config)\n\n\t\ttrain_being_time = time.time()\n\t\ttf.logging.info(\"==training distribution_strategy=={}\".format(kargs.get(\"distribution_strategy\", \"MirroredStrategy\")))\n\t\tif kargs.get(\"distribution_strategy\", \"MirroredStrategy\") == \"MirroredStrategy\":\n\t\t\tprint(\"==apply single machine multi-card training==\")\n\n\t\t\ttrain_spec = tf.estimator.TrainSpec(input_fn=train_features, \n\t\t\t\t\t\t\t\t\t\t\tmax_steps=num_train_steps)\n\n\t\t\teval_spec = tf.estimator.EvalSpec(input_fn=eval_features, \n\t\t\t\t\t\t\t\t\t\t\tsteps=num_eval_steps)\n\t\t\t\n\t\t\tmodel_estimator.train(input_fn=train_features,\n\t\t\t\t\t\t\tmax_steps=num_train_steps,\n\t\t\t\t\t\t\thooks=train_hooks)\n\t\t\t# tf.estimator.train(model_estimator, train_spec)\n\n\t\t\ttrain_end_time = time.time()\n\t\t\tprint(\"==training time==\", train_end_time - train_being_time)\n\t\t\ttf.logging.info(\"==training time=={}\".format(train_end_time - train_being_time))\n\t\t\teval_results = model_estimator.evaluate(input_fn=eval_features, steps=num_eval_steps)\n\t\t\tprint(eval_results)\n\t\t\t\n\t\telif kargs.get(\"distribution_strategy\", \"MirroredStrategy\") in [\"ParameterServerStrategy\", \"CollectiveAllReduceStrategy\"]: \n\t\t\tprint(\"==apply multi-machine machine multi-card training==\")\n\t\t\ttry:\n\t\t\t\tprint(os.environ['TF_CONFIG'], \"==tf_run_config==\")\n\t\t\texcept:\n\t\t\t\tprint(\"==not tf config==\")\n\t\t\ttrain_spec = tf.estimator.TrainSpec(input_fn=train_features, \n\t\t\t\t\t\t\t\t\t\t\tmax_steps=num_train_steps)\n\n\t\t\teval_spec = tf.estimator.EvalSpec(input_fn=eval_features, \n\t\t\t\t\t\t\t\t\t\t\tsteps=num_eval_steps)\n\n\t\t\t# tf.estimator.train(model_estimator, train_spec) # tf 1.12 doesn't need evaluate\n\n\t\t\ttf.estimator.train_and_evaluate(model_estimator, train_spec, eval_spec)\n\t\t\t# train_end_time = time.time()\n\t\t\t# print(\"==training time==\", train_end_time - train_being_time)\n\n\t\t\n\t\t",
"import tensorflow as tf\nimport numpy as np\n\nfrom task_module import pretrain, classifier, pretrain_albert\nimport tensorflow as tf\n\ntry:\n\tfrom distributed_single_sentence_classification.model_interface import model_zoo\nexcept:\n\tfrom distributed_single_sentence_classification.model_interface import model_zoo\n\nfrom pretrain_finetuning.token_generator import random_input_ids_generation\nfrom pretrain_finetuning.token_generator_gumbel import token_generator_gumbel_normal\n\nfrom utils.bert import bert_utils\nfrom model_io import model_io\n\nimport copy\n\ndef model_fn_builder(\n\t\t\t\t\tmodel_config,\n\t\t\t\t\tnum_labels,\n\t\t\t\t\tinit_checkpoint,\n\t\t\t\t\tmodel_reuse=None,\n\t\t\t\t\tload_pretrained=True,\n\t\t\t\t\tmodel_io_config={},\n\t\t\t\t\topt_config={},\n\t\t\t\t\texclude_scope=\"\",\n\t\t\t\t\tnot_storage_params=[],\n\t\t\t\t\ttarget=\"a\",\n\t\t\t\t\t**kargs):\n\n\tmodel_config = copy.deepcopy(model_config)\n\tif kargs.get(\"sharing_mode\", \"none\") == \"none\":\n\t\t\"\"\"\n\t\t'generator/' + model_config.scope\n\t\t\"\"\"\n\t\tmodel_config.scope = exclude_scope + '/' + model_config.scope\n\t\tgenerator_scope_prefix = exclude_scope\n\t\texclude_scope = exclude_scope\n\t\ttf.logging.info(\"****** generator parameter *******\")\n\telif kargs.get(\"sharing_mode\", \"none\") == \"all_sharing\":\n\t\tgenerator_scope_prefix = None\n\t\texclude_scope = ''\n\t\ttf.logging.info(\"****** generator parameter sharing with discriminator *******\")\n\n\tdef model_fn(features, labels, mode, params):\n\n\t\tmodel_api = model_zoo(model_config)\n\n\t\tif kargs.get('random_generator', '1') == '1':\n\t\t\tif mode in [tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL]:\n\t\t\t\tinput_ori_ids = features['input_ori_ids']\n\n\t\t\t\t[output_ids, \n\t\t\t\tsampled_binary_mask] = random_input_ids_generation(model_config,\n\t\t\t\t\t\t\t\t\t\t\tfeatures['input_ori_ids'],\n\t\t\t\t\t\t\t\t\t\t\tfeatures['input_mask'],\n\t\t\t\t\t\t\t\t\t\t\t**kargs)\n\t\t\t\tfeatures['input_ids'] = tf.identity(output_ids)\n\t\t\t\ttf.logging.info(\"****** do random generator *******\")\n\t\t\telse:\n\t\t\t\tsampled_binary_mask = None\n\t\t\t\toutput_ids = tf.identity(features['input_ids'])\n\t\telse:\n\t\t\tsampled_binary_mask = None\n\t\t\toutput_ids = tf.identity(features['input_ids'])\n\n\t\tmodel = model_api(model_config, features, labels,\n\t\t\t\t\t\t\tmode, target, reuse=tf.AUTO_REUSE,\n\t\t\t\t\t\t\t**kargs)\n\n\t\tif mode == tf.estimator.ModeKeys.TRAIN:\n\t\t\tdropout_prob = model_config.dropout_prob\n\t\telse:\n\t\t\tdropout_prob = 0.0\n\n\t\tif model_io_config.fix_lm == True:\n\t\t\tscope = model_config.scope + \"_finetuning\"\n\t\telse:\n\t\t\tscope = model_config.scope\n\t\t\n\t\t(nsp_loss, \n\t\t nsp_per_example_loss, \n\t\t nsp_log_prob) = pretrain.get_next_sentence_output(model_config,\n\t\t\t\t\t\t\t\t\t\tmodel.get_pooled_output(),\n\t\t\t\t\t\t\t\t\t\tfeatures['next_sentence_labels'],\n\t\t\t\t\t\t\t\t\t\treuse=tf.AUTO_REUSE,\n\t\t\t\t\t\t\t\t\t\tscope=generator_scope_prefix)\n\n\t\tmasked_lm_positions = features[\"masked_lm_positions\"]\n\t\tmasked_lm_ids = features[\"masked_lm_ids\"]\n\t\tmasked_lm_weights = features[\"masked_lm_weights\"]\n\n\t\tif model_config.model_type == 'bert':\n\t\t\tmasked_lm_fn = pretrain.get_masked_lm_output\n\t\t\tseq_masked_lm_fn = pretrain.seq_mask_masked_lm_output\n\t\t\tprint(\"==apply bert masked lm==\")\n\t\telif model_config.model_type == 'albert':\n\t\t\tmasked_lm_fn = pretrain_albert.get_masked_lm_output\n\t\t\tseq_masked_lm_fn = pretrain_albert.seq_mask_masked_lm_output\n\t\t\tprint(\"==apply albert masked lm==\")\n\t\telse:\n\t\t\tmasked_lm_fn = pretrain.get_masked_lm_output\n\t\t\tseq_masked_lm_fn = pretrain_albert.seq_mask_masked_lm_output\n\t\t\tprint(\"==apply bert masked lm==\")\n\n\t\tif sampled_binary_mask is not None:\n\t\t\t(masked_lm_loss,\n\t\t\tmasked_lm_example_loss, \n\t\t\tmasked_lm_log_probs,\n\t\t\tmasked_lm_mask) = seq_masked_lm_fn(model_config, \n\t\t\t\t\t\t\t\t\t\tmodel.get_sequence_output(), \n\t\t\t\t\t\t\t\t\t\tmodel.get_embedding_table(),\n\t\t\t\t\t\t\t\t\t\tfeatures['input_mask'], \n\t\t\t\t\t\t\t\t\t\tfeatures['input_ori_ids'], \n\t\t\t\t\t\t\t\t\t\tfeatures['input_ids'],\n\t\t\t\t\t\t\t\t\t\tsampled_binary_mask,\n\t\t\t\t\t\t\t\t\t\treuse=tf.AUTO_REUSE,\n\t\t\t\t\t\t\t\t\t\tembedding_projection=model.get_embedding_projection_table(),\n\t\t\t\t\t\t\t\t\t\tscope=generator_scope_prefix)\n\t\t\tmasked_lm_ids = features['input_ori_ids']\n\t\telse:\n\t\t\t(masked_lm_loss,\n\t\t\tmasked_lm_example_loss, \n\t\t\tmasked_lm_log_probs,\n\t\t\tmasked_lm_mask) = masked_lm_fn(\n\t\t\t\t\t\t\t\t\t\t\tmodel_config, \n\t\t\t\t\t\t\t\t\t\t\tmodel.get_sequence_output(), \n\t\t\t\t\t\t\t\t\t\t\tmodel.get_embedding_table(),\n\t\t\t\t\t\t\t\t\t\t\tmasked_lm_positions, \n\t\t\t\t\t\t\t\t\t\t\tmasked_lm_ids, \n\t\t\t\t\t\t\t\t\t\t\tmasked_lm_weights,\n\t\t\t\t\t\t\t\t\t\t\treuse=tf.AUTO_REUSE,\n\t\t\t\t\t\t\t\t\t\t\tembedding_projection=model.get_embedding_projection_table(),\n\t\t\t\t\t\t\t\t\t\t\tscope=generator_scope_prefix)\n\t\tprint(model_config.lm_ratio, '==mlm lm_ratio==')\n\t\tloss = model_config.lm_ratio * masked_lm_loss + 0.0 * nsp_loss\n\n\t\tsampled_ids = token_generator_gumbel_normal(model_config, \n\t\t\t\t\t\t\t\t\tmodel.get_sequence_output(), \n\t\t\t\t\t\t\t\t\tmodel.get_embedding_table(), \n\t\t\t\t\t\t\t\t\tfeatures['input_ids'], \n\t\t\t\t\t\t\t\t\tfeatures['input_ori_ids'],\n\t\t\t\t\t\t\t\t\tfeatures['input_mask'],\t\n\t\t\t\t\t\t\t\t\tembedding_projection=model.get_embedding_projection_table(),\n\t\t\t\t\t\t\t\t\tscope=generator_scope_prefix,\n\t\t\t\t\t\t\t\t\tmask_method='only_mask',\n\t\t\t\t\t\t\t\t\t**kargs)\n\n\t\tif model_config.get('gen_sample', 1) == 1:\n\t\t\tinput_ids = features['input_ori_ids']\n\t\t\tinput_mask = features['input_mask']\n\t\t\tsegment_ids = features['segment_ids']\n\n\t\tmodel_io_fn = model_io.ModelIO(model_io_config)\n\n\t\tpretrained_tvars = model_io_fn.get_params(model_config.scope, \n\t\t\t\t\t\t\t\t\t\tnot_storage_params=not_storage_params)\n\n\t\tif generator_scope_prefix:\n\t\t\t\"\"\"\n\t\t\t\"generator/cls/predictions\"\n\t\t\t\"\"\"\n\t\t\tlm_pretrain_tvars = model_io_fn.get_params(generator_scope_prefix+\"/cls/predictions\", \n\t\t\t\t\t\t\t\t\t\tnot_storage_params=not_storage_params)\n\n\t\t\tnsp_pretrain_vars = model_io_fn.get_params(generator_scope_prefix+\"/cls/seq_relationship\",\n\t\t\t\t\t\t\t\t\t\tnot_storage_params=not_storage_params)\n\t\telse:\n\t\t\tlm_pretrain_tvars = model_io_fn.get_params(\"cls/predictions\", \n\t\t\t\t\t\t\t\t\t\tnot_storage_params=not_storage_params)\n\n\t\t\tnsp_pretrain_vars = model_io_fn.get_params(\"cls/seq_relationship\",\n\t\t\t\t\t\t\t\t\t\tnot_storage_params=not_storage_params)\n\t\t# embedding_tvars = model_io_fn.get_params(model_config.get('embedding_scope', 'bert')+\"/embeddings\", \n\t\t# \t\t\t\t\t\t\tnot_storage_params=not_storage_params)\n\n\t\t# pretrained_tvars.extend(lm_pretrain_tvars)\n\t\t# pretrained_tvars.extend(embedding_tvars)\n\t\t# tvars = pretrained_tvars\n\n\t\tif model_config.get('embedding_scope', None) is not None:\n\t\t\tembedding_tvars = model_io_fn.get_params(model_config.get('embedding_scope', 'bert')+\"/embeddings\", \n\t\t\t\t\t\t\t\t\tnot_storage_params=not_storage_params)\n\t\t\tpretrained_tvars.extend(embedding_tvars)\n\n\t\tpretrained_tvars.extend(lm_pretrain_tvars)\n\t\tpretrained_tvars.extend(nsp_pretrain_vars)\n\t\ttvars = pretrained_tvars\n\n\t\tprint('==generator parameters==', tvars)\n\n\t\tif load_pretrained == \"yes\":\n\t\t\tuse_tpu = 1 if kargs.get('use_tpu', False) else 0\n\t\t\tscaffold_fn = model_io_fn.load_pretrained(tvars, \n\t\t\t\t\t\t\t\t\t\t\tinit_checkpoint,\n\t\t\t\t\t\t\t\t\t\t\texclude_scope=exclude_scope,\n\t\t\t\t\t\t\t\t\t\t\tuse_tpu=use_tpu,\n\t\t\t\t\t\t\t\t\t\t\trestore_var_name=model_config.get('restore_var_name', []))\n\t\telse:\n\t\t\tscaffold_fn = None\n\n\t\t# tf.add_to_collection(\"generator_loss\", masked_lm_loss)\n\t\treturn_dict = {\n\t\t\t\t\t\"loss\":loss, \n\t\t\t\t\t\"tvars\":tvars,\n\t\t\t\t\t\"model\":model,\n\t\t\t\t\t\"sampled_ids\":sampled_ids, # batch x gen_sample, seg_length\n\t\t\t\t\t\"sampled_input_ids\":input_ids, # batch x gen_sample, seg_length,\n\t\t\t\t\t\"sampled_input_mask\":input_mask,\n\t\t\t\t\t\"sampled_segment_ids\":segment_ids,\n\t\t\t\t\t\"masked_lm_ids\":masked_lm_ids,\n\t\t\t\t\t\"masked_lm_weights\":masked_lm_mask,\n\t\t\t\t\t\"masked_lm_log_probs\":masked_lm_log_probs,\n\t\t\t\t\t\"masked_lm_example_loss\":masked_lm_example_loss,\n\t\t\t\t\t\"next_sentence_example_loss\":nsp_per_example_loss,\n\t\t\t\t\t\"next_sentence_log_probs\":nsp_log_prob, \n\t\t\t\t\t\"next_sentence_labels\":features['next_sentence_labels'],\n\t\t\t\t\t\"output_ids\":output_ids,\n\t\t\t\t\t\"sampled_binary_mask\":sampled_binary_mask\n\t\t\t\t}\n\t\treturn return_dict\n\treturn model_fn\n\t\t\n",
"# coding=utf-8\n# Copyright 2020 The TensorFlow GAN Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python2, python3\n\"\"\"Keras-like layers and utilities that implement Spectral Normalization.\nBased on \"Spectral Normalization for Generative Adversarial Networks\" by Miyato,\net al in ICLR 2018. https://openreview.net/pdf?id=B1QRgziT-\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numbers\nimport re\n\nfrom six.moves import range\n# import tensorflow.compat.v1 as tf\nimport tensorflow as tf\n\n__all__ = [\n 'compute_spectral_norm',\n 'spectral_normalize',\n 'spectral_norm_regularizer',\n 'spectral_normalization_custom_getter',\n]\n\n# tf.bfloat16 should work, but tf.matmul converts those to tf.float32 which then\n# can't directly be assigned back to the tf.bfloat16 variable.\n_OK_DTYPES_FOR_SPECTRAL_NORM = (tf.float16, tf.float32, tf.float64)\n_PERSISTED_U_VARIABLE_SUFFIX = 'spectral_norm_u'\n\n\"\"\"\nsn_gettr = tfgan.features.spectral_normalization_custom_getter\noutput_size = 100\ndef generator(x):\n with tf.variable_scope(\n 'gen',\n custom_getter=sn_gettr(training=training),\n reuse=tf.compat.v1.AUTO_REUSE):\n return tf.compat.v1.layers.dense(\n x,\n units=output_size,\n kernel_initializer=tf.compat.v1.truncated_normal_initializer(),\n bias_initializer=tf.compat.v1.truncated_normal_initializer())\n\"\"\"\n\ndef compute_spectral_norm(w_tensor, power_iteration_rounds=1,\n training=True, name=None):\n \"\"\"Estimates the largest singular value in the weight tensor.\n **NOTE**: When `training=True`, repeatedly running inference actually changes\n the variables, since the spectral norm is repeatedly approximated by a power\n iteration method.\n Args:\n w_tensor: The weight matrix whose spectral norm should be computed.\n power_iteration_rounds: The number of iterations of the power method to\n perform. A higher number yields a better approximation.\n training: Whether to update the spectral normalization on variable\n access. This is useful to turn off during eval, for example, to not affect\n the graph during evaluation.\n name: An optional scope name.\n Returns:\n The largest singular value (the spectral norm) of w.\n Raises:\n ValueError: If TF is executing eagerly.\n ValueError: If called within a distribution strategy that is not supported.\n \"\"\"\n with tf.variable_scope(name, 'spectral_norm'):\n # The paper says to flatten convnet kernel weights from\n # (C_out, C_in, KH, KW) to (C_out, C_in * KH * KW). But TensorFlow's Conv2D\n # kernel weight shape is (KH, KW, C_in, C_out), so it should be reshaped to\n # (KH * KW * C_in, C_out), and similarly for other layers that put output\n # channels as last dimension.\n # n.b. this means that w here is equivalent to w.T in the paper.\n w = tf.reshape(w_tensor, (-1, w_tensor.get_shape()[-1]))\n\n # Persisted approximation of first left singular vector of matrix `w`.\n # Requires an appropriate aggregation method since we explicitly control\n # updates.\n replica_context = tf.distribute.get_replica_context()\n if replica_context is None: # cross repica strategy.\n # TODO(joelshor): Determine appropriate aggregation method.\n raise ValueError(\"spectral norm isn't supported in cross-replica \"\n \"distribution strategy.\")\n elif not tf.distribute.has_strategy(): # default strategy.\n aggregation = None\n else:\n aggregation = tf.VariableAggregation.ONLY_FIRST_REPLICA\n\n u_var = tf.get_variable(\n _PERSISTED_U_VARIABLE_SUFFIX,\n shape=(w.shape[0], 1),\n dtype=w.dtype,\n initializer=tf.initializers.random_normal(),\n trainable=False,\n aggregation=aggregation)\n u = u_var\n\n # Use power iteration method to approximate spectral norm.\n for _ in range(power_iteration_rounds):\n # `v` approximates the first right singular vector of matrix `w`.\n v = tf.nn.l2_normalize(tf.matmul(a=w, b=u, transpose_a=True))\n u = tf.nn.l2_normalize(tf.matmul(w, v))\n\n # Update persisted approximation.\n if training:\n with tf.control_dependencies([u_var.assign(u, name='update_u')]):\n u = tf.identity(u)\n\n u = tf.stop_gradient(u)\n v = tf.stop_gradient(v)\n\n # Largest singular value of `w`.\n spectral_norm = tf.matmul(tf.matmul(a=u, b=w, transpose_a=True), v)\n spectral_norm.shape.assert_is_fully_defined()\n spectral_norm.shape.assert_is_compatible_with([1, 1])\n\n return spectral_norm[0][0]\n\n\ndef spectral_normalize(w,\n power_iteration_rounds=1,\n equality_constrained=True,\n training=True,\n name=None):\n \"\"\"Normalizes a weight matrix by its spectral norm.\n **NOTE**: When `training=True`, repeatedly running inference actually changes\n the variables, since the spectral norm is repeatedly approximated by a power\n iteration method.\n Args:\n w: The weight matrix to be normalized.\n power_iteration_rounds: The number of iterations of the power method to\n perform. A higher number yields a better approximation.\n equality_constrained: If set to `True` will normalize the matrix such that\n its spectral norm is equal to 1, otherwise, will normalize the matrix such\n that its norm is at most 1.\n training: Whether to update the spectral normalization on variable\n access. This is useful to turn off during eval, for example, to not affect\n the graph during evaluation.\n name: An optional scope name.\n Returns:\n The input weight matrix, normalized so that its spectral norm is at most\n one.\n \"\"\"\n with tf.variable_scope(name, 'spectral_normalize'):\n normalization_factor = compute_spectral_norm(\n w, power_iteration_rounds=power_iteration_rounds, training=training)\n if not equality_constrained:\n normalization_factor = tf.maximum(1., normalization_factor)\n w_normalized = w / normalization_factor\n return tf.reshape(w_normalized, w.get_shape())\n\n\ndef spectral_norm_regularizer(scale, power_iteration_rounds=1,\n training=True, scope=None):\n \"\"\"Returns a function that can be used to apply spectral norm regularization.\n Small spectral norms enforce a small Lipschitz constant, which is necessary\n for Wasserstein GANs.\n **NOTE**: Repeatedly running inference actually changes the variables, since\n the spectral norm is repeatedly approximated by a power iteration method.\n Args:\n scale: A scalar multiplier. 0.0 disables the regularizer.\n power_iteration_rounds: The number of iterations of the power method to\n perform. A higher number yields a better approximation.\n training: Whether to update the spectral normalization on variable\n access. This is useful to turn off during eval, for example, to not affect\n the graph during evaluation.\n scope: An optional scope name.\n Returns:\n A function with the signature `sn(weights)` that applies spectral norm\n regularization.\n Raises:\n ValueError: If scale is negative or if scale is not a float.\n \"\"\"\n if isinstance(scale, numbers.Integral):\n raise ValueError('scale cannot be an integer: %s' % scale)\n if isinstance(scale, numbers.Real):\n if scale < 0.0:\n raise ValueError(\n 'Setting a scale less than 0 on a regularizer: %g' % scale)\n if scale == 0.0:\n tf.logging.info('Scale of 0 disables regularizer.')\n return lambda _: None\n\n def sn(weights, name=None):\n \"\"\"Applies spectral norm regularization to weights.\"\"\"\n with tf.name_scope(scope, 'SpectralNormRegularizer', [weights]) as name:\n scale_t = tf.convert_to_tensor(\n value=scale, dtype=weights.dtype.base_dtype, name='scale')\n return tf.multiply(\n scale_t,\n compute_spectral_norm(\n weights, power_iteration_rounds=power_iteration_rounds,\n training=training),\n name=name)\n\n return sn\n\n\ndef _default_name_filter(name):\n \"\"\"A filter function to identify common names of weight variables.\n Args:\n name: The variable name.\n Returns:\n Whether `name` is a standard name for a weight/kernel variables used in the\n Keras, tf.layers, tf.contrib.layers or tf.contrib.slim libraries.\n \"\"\"\n match = re.match(r'(.*\\/)?(depthwise_|pointwise_)?(weights|kernel)$', name)\n return match is not None\n\n\ndef spectral_normalization_custom_getter(name_filter=_default_name_filter,\n power_iteration_rounds=1,\n equality_constrained=True,\n training=True):\n \"\"\"Custom getter that performs Spectral Normalization on a weight tensor.\n Specifically it divides the weight tensor by its largest singular value. This\n is intended to stabilize GAN training, by making the discriminator satisfy a\n local 1-Lipschitz constraint.\n Based on [Spectral Normalization for Generative Adversarial Networks][sn-gan].\n [sn-gan]: https://openreview.net/forum?id=B1QRgziT-\n To reproduce an SN-GAN, apply this custom_getter to every weight tensor of\n your discriminator. The last dimension of the weight tensor must be the number\n of output channels.\n Apply this to layers by supplying this as the `custom_getter` of a\n `tf.variable_scope`. For example:\n with tf.variable_scope(\n 'discriminator', custom_getter=spectral_normalization_custom_getter()):\n net = discriminator_fn(net)\n It is important to carefully select to which weights you want to apply\n Spectral Normalization. In general you want to normalize the kernels of\n convolution and dense layers, but you do not want to normalize biases. You\n also want to avoid normalizing batch normalization (and similar) variables,\n but in general such layers play poorly with Spectral Normalization, since the\n gamma can cancel out the normalization in other layers. By default we supply a\n filter that matches the kernel variable names of the dense and convolution\n layers of the tf.layers, tf.contrib.layers, tf.keras and tf.contrib.slim\n libraries. If you are using anything else you'll need a custom `name_filter`.\n This custom getter internally creates a variable used to compute the spectral\n norm by power iteration. It will update every time the variable is accessed,\n which means the normalized discriminator weights may change slightly whilst\n training the generator. Whilst unusual, this matches how the paper's authors\n implement it, and in general additional rounds of power iteration can't hurt.\n IMPORTANT: Keras does not respect the custom_getter supplied by the\n VariableScope, so this approach won't work for Keras layers. For Keras layers\n each layer needs to have Spectral Normalization explicitly applied. This\n should be accomplished using code like:\n my_layer = tf.keras.layers.SomeLayer()\n layer.build(inputs.shape)\n layer.kernel = spectral_normalize(layer.kernel)\n outputs = layer.apply(inputs)\n Args:\n name_filter: Optionally, a method that takes a Variable name as input and\n returns whether this Variable should be normalized.\n power_iteration_rounds: The number of iterations of the power method to\n perform per step. A higher number yields a better approximation of the\n true spectral norm.\n equality_constrained: If set to `True` will normalize the matrix such that\n its spectral norm is equal to 1, otherwise, will normalize the matrix such\n that its norm is at most 1.\n training: Whether to update the spectral normalization on variable\n access. This is useful to turn off during eval, for example, to not affect\n the graph during evaluation.\n Returns:\n A custom getter function that applies Spectral Normalization to all\n Variables whose names match `name_filter`.\n Raises:\n ValueError: If name_filter is not callable.\n \"\"\"\n if not callable(name_filter):\n raise ValueError('name_filter must be callable')\n\n def _internal_getter(getter, name, *args, **kwargs):\n \"\"\"A custom getter function that applies Spectral Normalization.\n Args:\n getter: The true getter to call.\n name: Name of new/existing variable, in the same format as\n tf.get_variable.\n *args: Other positional arguments, in the same format as tf.get_variable.\n **kwargs: Keyword arguments, in the same format as tf.get_variable.\n Returns:\n The return value of `getter(name, *args, **kwargs)`, spectrally\n normalized.\n Raises:\n ValueError: If used incorrectly, or if `dtype` is not supported.\n \"\"\"\n if not name_filter(name):\n return getter(name, *args, **kwargs)\n\n if name.endswith(_PERSISTED_U_VARIABLE_SUFFIX):\n raise ValueError(\n 'Cannot apply Spectral Normalization to internal variables created '\n 'for Spectral Normalization. Tried to normalized variable [%s]' %\n name)\n\n if kwargs['dtype'] not in _OK_DTYPES_FOR_SPECTRAL_NORM:\n raise ValueError('Disallowed data type {}'.format(kwargs['dtype']))\n\n # This layer's weight Variable/PartitionedVariable.\n w_tensor = getter(name, *args, **kwargs)\n\n if len(w_tensor.get_shape()) < 2:\n raise ValueError(\n 'Spectral norm can only be applied to multi-dimensional tensors')\n\n return spectral_normalize(\n w_tensor,\n power_iteration_rounds=power_iteration_rounds,\n equality_constrained=equality_constrained,\n training=training,\n name=(name + '/spectral_normalize'))\n\n return _internal_getter",
"try:\n\tfrom distributed_single_sentence_classification.model_interface import model_config_parser\n\tfrom distributed_single_sentence_classification.model_data_interface import data_interface_server\n\tfrom distributed_single_sentence_classification.model_fn_interface import model_fn_interface\nexcept:\n\tfrom distributed_single_sentence_classification.model_interface import model_config_parser\n\tfrom distributed_single_sentence_classification.model_data_interface import data_interface_server\n\tfrom distributed_single_sentence_classification.model_fn_interface import model_fn_interface\n\nimport json\n\nimport numpy as np\nimport tensorflow as tf\nfrom bunch import Bunch\nfrom model_io import model_io\nimport json, os\n\ndef export_model(FLAGS,\n\t\t\t\tinit_checkpoint,\n\t\t\t\tcheckpoint_dir,\n\t\t\t\texport_dir,\n\t\t\t\t**kargs):\n\n\tconfig = model_config_parser(FLAGS)\n\topt_config = Bunch({})\n\tanneal_config = Bunch({})\n\tmodel_io_config = Bunch({\"fix_lm\":False})\n\n\twith tf.gfile.Open(FLAGS.label_id, \"r\") as frobj:\n\t\tlabel_dict = json.load(frobj)\n\n\tnum_classes = FLAGS.num_classes\n\n\tdef serving_input_receiver_fn():\n\t\treceiver_features = data_interface_server(FLAGS)\n\t\tprint(receiver_features, \"==input receiver_features==\")\n\t\tinput_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(receiver_features)()\n\t\treturn input_fn\n\n\tmodel_fn_builder = model_fn_interface(FLAGS)\n\tmodel_fn = model_fn_builder(config, num_classes, init_checkpoint, \n\t\t\t\t\t\t\t\t\t\t\tmodel_reuse=None, \n\t\t\t\t\t\t\t\t\t\t\tload_pretrained=FLAGS.load_pretrained,\n\t\t\t\t\t\t\t\t\t\t\topt_config=opt_config,\n\t\t\t\t\t\t\t\t\t\t\tmodel_io_config=model_io_config,\n\t\t\t\t\t\t\t\t\t\t\texclude_scope=\"\",\n\t\t\t\t\t\t\t\t\t\t\tnot_storage_params=[],\n\t\t\t\t\t\t\t\t\t\t\ttarget=kargs.get(\"input_target\", \"\"),\n\t\t\t\t\t\t\t\t\t\t\toutput_type=\"estimator\",\n\t\t\t\t\t\t\t\t\t\t\tcheckpoint_dir=checkpoint_dir,\n\t\t\t\t\t\t\t\t\t\t\tnum_storage_steps=100,\n\t\t\t\t\t\t\t\t\t\t\ttask_index=0,\n\t\t\t\t\t\t\t\t\t\t\tanneal_config=anneal_config,\n\t\t\t\t\t\t\t\t\t\t\t**kargs)\n\n\testimator = tf.estimator.Estimator(\n\t\t\t\tmodel_fn=model_fn,\n\t\t\t\tmodel_dir=checkpoint_dir)\n\n\texport_dir = estimator.export_savedmodel(export_dir, \n\t\t\t\t\t\t\t\t\tserving_input_receiver_fn,\n\t\t\t\t\t\t\t\t\tcheckpoint_path=init_checkpoint)\n\tprint(\"===Succeeded in exporting saved model==={}\".format(export_dir))\n\n\n",
"# -*- coding: utf-8 -*-\nfrom data_generator import data_processor\nimport collections\nimport re\nimport tensorflow as tf\nfrom data_generator import tokenization\nfrom hanziconv import HanziConv\nimport random\nimport platform\nimport unicodedata\n\ndef full2half(text):\n\ttext = unicodedata.normalize('NFKC', text)\n\treturn text\n\ndef clean(text):\n\ttext = text.strip()\n\ttext = tokenization.convert_to_unicode(text)\n\ttext = HanziConv.toSimplified(text)\n\ttext = full2half(text)\n\ttext = re.sub(u\"\\\\#.*?#|\\\\|.*?\\\\||\\\\[.*?]\", \"\", text)\n\ttext = re.sub(u\"\\\\s*\", \"\", text)\n\treturn text\n\nExampleInstance = collections.namedtuple(\"ExampleInstance\",\n\t\t\t\t\t\t\t\t\t\t [\"guid\", \"text_a\",\n\t\t\t\t\t\t\t\t\t\t \"text_b\",\n\t\t\t\t\t\t\t\t\t\t \"label\"])\n\nclass SentenceProcessor(data_processor.DataProcessor): \n\tdef get_labels(self, label_file):\n\t\timport json\n\t\twith tf.gfile.Open(label_file, \"r\") as frobj:\n\t\t\tlabel = json.load(frobj)\n\t\tself.label2id = label[\"label2id\"]\n\t\tself.id2label = label[\"id2label\"]\n\t\n\tdef _read_data(self, input_file):\n\t\twith tf.gfile.Open(input_file, \"r\") as f:\n\t\t\tlines = []\n\t\t\tfor line in f:\n\t\t\t\tcontent = line.strip()\n\t\t\t\tlines.append(content)\n\t\t\treturn lines\n\n\tdef _create_examples(self, lines,\n\t\t\t\t\t\t\t\tLABEL_SPLITTER=u\"__label__\"):\n\t\tre_pattern = u\"({}{})\".format(LABEL_SPLITTER, \"\\\\d+\")\n\t\tlabel_pattern = u\"(?<={})(\\\\d+)\".format(LABEL_SPLITTER)\n\t\t\n\t\texamples = []\n\t\tfor (i, line) in enumerate(lines):\n\t\t\ttry:\n\t\t\t\tguid = i\n\t\t\t\telement_list = re.split(re_pattern, line)\n\t\t\t\ttext_a = clean(element_list[-1])\n\n\t\t\t\tinput_labels = []\n\t\t\t\tfor l in re.finditer(label_pattern, line):\n\t\t\t\t\tinput_labels.append(l.group())\n\n\t\t\t\tinput_labels = [label.strip() for label in input_labels if label.strip() in list(self.label2id.keys())]\n\t\t\t\texamples.append(ExampleInstance(\n \tguid=guid,\n\t\t\t\t\t\ttext_a=text_a,\n\t\t\t\t\t\ttext_b=None,\n\t\t\t\t\t\tlabel=input_labels))\n\t\t\texcept:\n\t\t\t\tprint(line, i)\n\t\treturn examples\n\n\tdef get_train_examples(self, train_file, is_shuffle=True):\n\t\tdata = self._read_data(train_file)\n\t\texamples = self._create_examples(data)\n\t\tif is_shuffle:\n\t\t\trandom.shuffle(examples)\n\t\treturn examples\n\n\tdef get_dev_examples(self, dev_file, is_shuffle=False):\n\t\tdata = self._read_data(dev_file)\n\t\texamples = self._create_examples(data)\n\t\tif is_shuffle:\n\t\t\trandom.shuffle(examples)\n\t\treturn examples\n\n\tdef get_test_examples(self, test_file):\n\t\tdata = self._read_data(test_file)\n\t\texamples = self._create_examples(data)\n\t\treturn examples\n\nclass SentencePairProcessor(data_processor.DataProcessor): \n\tdef get_labels(self, label_file):\n\t\timport json\n\t\twith tf.gfile.Open(label_file, \"r\") as frobj:\n\t\t\tlabel = json.load(frobj)\n\t\tself.label2id = label[\"label2id\"]\n\t\tself.id2label = label[\"id2label\"]\n\t\n\tdef _read_data(self, input_file):\n\t\timport json\n\t\tdata = []\n\t\twith tf.gfile.Open(input_file, \"r\") as frobj:\n\t\t\tfor line in frobj:\n\t\t\t\tdata.append(json.loads(line.strip()))\n\t\treturn data\n\n\tdef _create_examples(self, data):\n\t\texamples = []\n\t\tfor index in range(len(data)):\n\t\t\tcontent = data[index]\n\t\t\ttry:\n\t\t\t\tguid = int(content[\"ID\"])\n\t\t\texcept:\n\t\t\t\tguid = index\n\t\t\ttry:\n\t\t\t\ttext_a = clean(content[\"sentence1\"])\n\t\t\t\ttext_b = clean(content[\"sentence2\"])\n\t\t\texcept:\n\t\t\t\tprint(content[\"sentence1\"], content[\"sentence2\"], index)\n\t\t\ttry:\n\t\t\t\tlabel = content[\"gold_label\"]\n\t\t\texcept:\n\t\t\t\tlabel = \"0\"\n\t\t\tif isinstance(text_a, str) and isinstance(text_b, str) or isinstance(text_a, unicode) and isinstance(text_b, unicode):\n\t\t\t\texamples.append(ExampleInstance(\n \tguid=guid,\n\t\t\t\t\t\ttext_a=text_a,\n\t\t\t\t\t\ttext_b=text_b,\n\t\t\t\t\t\tlabel=[label]))\n\t\treturn examples\n\n\tdef get_train_examples(self, train_file, is_shuffle=True):\n\t\tdata = self._read_data(train_file)\n\t\texamples = self._create_examples(data)\n\t\tif is_shuffle:\n\t\t\trandom.shuffle(examples)\n\t\treturn examples\n\n\tdef get_dev_examples(self, dev_file, is_shuffle=False):\n\t\tdata = self._read_data(dev_file)\n\t\texamples = self._create_examples(data)\n\t\tif is_shuffle:\n\t\t\trandom.shuffle(examples)\n\t\treturn examples\n\n\tdef get_test_examples(self, test_file):\n\t\tdata = self._read_data(test_file)\n\t\texamples = self._create_examples(data)\n\t\treturn examples"
] | [
[
"numpy.ones_like",
"tensorflow.io.gfile.GFile",
"tensorflow.io.gfile.makedirs",
"numpy.std",
"numpy.mean",
"tensorflow.io.gfile.remove"
],
[
"numpy.zeros",
"numpy.cos",
"numpy.power",
"numpy.sin"
],
[
"tensorflow.nn.bias_add",
"tensorflow.matmul",
"tensorflow.nn.softmax",
"tensorflow.estimator.Estimator",
"tensorflow.nn.sigmoid",
"tensorflow.estimator.export.PredictOutput",
"tensorflow.estimator.export.build_raw_serving_input_receiver_fn",
"tensorflow.zeros_initializer",
"tensorflow.train.init_from_checkpoint",
"tensorflow.placeholder",
"tensorflow.trainable_variables",
"tensorflow.truncated_normal_initializer",
"tensorflow.ConfigProto",
"tensorflow.logging.info",
"tensorflow.logging.set_verbosity",
"tensorflow.variable_scope",
"tensorflow.nn.dropout",
"tensorflow.app.run"
],
[
"tensorflow.nn.l2_normalize",
"tensorflow.constant",
"tensorflow.rint",
"tensorflow.reduce_mean",
"tensorflow.reduce_sum",
"tensorflow.pow",
"tensorflow.cast",
"tensorflow.ones_like",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.ones",
"tensorflow.layers.dense",
"tensorflow.zeros_like",
"tensorflow.logging.info",
"tensorflow.square",
"tensorflow.variable_scope"
],
[
"tensorflow.nn.relu",
"tensorflow.layers.batch_normalization",
"tensorflow.python.tpu.tpu_function.get_tpu_context",
"tensorflow.zeros_initializer",
"tensorflow.cast",
"tensorflow.square",
"tensorflow.ones_initializer",
"tensorflow.tpu.cross_replica_sum"
],
[
"tensorflow.metrics.accuracy",
"tensorflow.control_dependencies",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.get_collection",
"tensorflow.estimator.export.PredictOutput",
"tensorflow.argmax",
"tensorflow.nn.l2_normalize",
"tensorflow.metrics.mean",
"tensorflow.zeros_like",
"tensorflow.reduce_max",
"tensorflow.nn.softmax",
"tensorflow.constant",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.ones_like",
"tensorflow.log",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.variable_scope",
"tensorflow.get_variable_scope"
],
[
"tensorflow.Graph",
"tensorflow.nn.softmax",
"tensorflow.local_variables_initializer",
"tensorflow.FixedLenFeature",
"numpy.mod",
"sklearn.metrics.accuracy_score",
"tensorflow.cast",
"tensorflow.reshape",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"sklearn.metrics.f1_score",
"tensorflow.to_int32",
"tensorflow.parse_single_example",
"tensorflow.argmax",
"tensorflow.app.run"
],
[
"tensorflow.logging.info"
],
[
"tensorflow.zeros",
"tensorflow.squeeze",
"tensorflow.logging.info",
"tensorflow.variable_scope",
"tensorflow.argmax"
],
[
"tensorflow.Graph",
"tensorflow.estimator.Estimator",
"tensorflow.FixedLenFeature",
"tensorflow.gfile.Open",
"tensorflow.parse_example",
"tensorflow.ConfigProto",
"tensorflow.estimator.TrainSpec",
"tensorflow.estimator.RunConfig",
"tensorflow.estimator.EvalSpec",
"tensorflow.parse_single_example",
"tensorflow.to_int32",
"tensorflow.estimator.train_and_evaluate"
],
[
"tensorflow.identity",
"tensorflow.logging.info"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.matmul",
"tensorflow.distribute.get_replica_context",
"tensorflow.maximum",
"tensorflow.identity",
"tensorflow.stop_gradient",
"tensorflow.distribute.has_strategy",
"tensorflow.initializers.random_normal",
"tensorflow.logging.info",
"tensorflow.name_scope",
"tensorflow.variable_scope"
],
[
"tensorflow.estimator.export.build_raw_serving_input_receiver_fn",
"tensorflow.estimator.Estimator",
"tensorflow.gfile.Open"
],
[
"tensorflow.gfile.Open"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
viitormiiguel/AnalysisFinancial | [
"21d19c4eb200655ffd8605d4c38ab280a4552384"
] | [
"Results/ResultUniLex.py"
] | [
"import nltk\r\nimport csv\r\nimport datetime\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nnow = datetime.datetime.now()\r\ntoday = now.strftime(\"%Y-%m-%d\")\r\n\r\ndInfoMoney = 'C:/Users/vitor/Documents/GetDataset/Infomoney/'\r\ndInvesting = 'C:/Users/vitor/Documents/GetDataset/Investing.com/'\r\ndTrading = 'C:/Users/vitor/Documents/GetDataset/TradingView/'\r\n\r\n# Resultados Investing.com\r\nr_investing = open(dInvesting + today +'/polarityUniLexPre.csv', 'r', encoding='utf8')\r\n# r_investing = open(dInvesting + today +'/polarityUniLexNo.csv', 'r', encoding='utf8')\r\nposInv = 0 \r\nneuInv = 0 \r\nnegInv = 0\r\nfor t in r_investing.readlines():\r\n if 'Positivo' in t:\r\n posInv += 1\r\n if 'Neutro' in t:\r\n neuInv += 1\r\n if 'Negativo' in t:\r\n negInv += 1\r\nprint('Investing Pos ', posInv)\r\nprint('Investing Neu ', neuInv)\r\nprint('Investing Neg ', negInv)\r\n\r\n# Resultados InfoMoney\r\nr_infomoney = open(dInfoMoney + today +'/polarityUniLexPre.csv', 'r', encoding='utf8')\r\n# r_infomoney = open(dInfoMoney + today +'/polarityUniLexNo.csv', 'r', encoding='utf8')\r\nposInf = 0\r\nneuInf = 0 \r\nnegInf = 0\r\nfor t in r_infomoney.readlines():\r\n if 'Positivo' in t:\r\n posInf += 1\r\n if 'Neutro' in t:\r\n neuInf += 1\r\n if 'Negativo' in t:\r\n negInf += 1\r\nprint('InfoMoney Pos ', posInf)\r\nprint('InfoMoney Neu ', neuInf)\r\nprint('InfoMoney Neg ', negInf)\r\n\r\n# Resultados TradingView\r\nr_tradingview = open(dTrading + today +'/polarityUniLexPre.csv', 'r', encoding='utf8')\r\n# r_tradingview = open(dTrading + today +'/polarityUniLexNo.csv', 'r', encoding='utf8')\r\nposTrd = 0\r\nneuTrd = 0 \r\nnegTrd = 0\r\nfor t in r_tradingview.readlines():\r\n if 'Positivo' in t:\r\n posTrd += 1\r\n if 'Neutro' in t:\r\n neuTrd += 1\r\n if 'Negativo' in t:\r\n negTrd += 1\r\nprint('TradingView Pos ', posTrd)\r\nprint('TradingView Neu ', neuTrd)\r\nprint('TradingView Neg ', negTrd)\r\n\r\nraw_data = {'Fonte de Dados': ['Investing.com', 'InfoMoney', 'TradingView'],\r\n 'Pos': [posInv, posInf, posTrd],\r\n 'Neu': [neuInv, neuInf, neuTrd],\r\n 'Neg': [negInv, negInf, negTrd]}\r\ndf = pd.DataFrame(raw_data, columns = ['Fonte de Dados', 'Pos', 'Neu', 'Neg'])\r\ndf\r\n\r\n# Setting the positions and width for the bars\r\npos = list(range(len(df['Pos']))) \r\nwidth = 0.25 \r\nfig, ax = plt.subplots(figsize=(10,5))\r\n\r\n# Create a bar with pre_score data, # in position pos,\r\nplt.bar(pos, df['Pos'], width, alpha=0.5, color='#EE3224', label=df['Fonte de Dados'][0]) \r\n\r\n# Create a bar with mid_score data, # in position pos + some width buffer,\r\nplt.bar([p + width for p in pos], df['Neu'], width, alpha=0.5, color='#F78F1E', label=df['Fonte de Dados'][1]) \r\n\r\n# Create a bar with post_score data, # in position pos + some width buffer,\r\nplt.bar([p + width*2 for p in pos], df['Neg'], width, alpha=0.5, color='#FFC222', label=df['Fonte de Dados'][2]) \r\n\r\nax.set_title(\"OpLexicon sem Pré-Processamento\")\r\nax.set_ylabel('N° de Textos')\r\nax.set_xticks([p + 1 * width for p in pos])\r\nax.set_xticklabels(df['Fonte de Dados'])\r\n\r\nplt.xlim(min(pos)-width, max(pos)+width*4)\r\nplt.ylim([0, max(df['Pos'] + df['Neu'] + df['Neg'])] )\r\n\r\nplt.legend(['Positivo', 'Neutro', 'Negativo'], loc='upper left')\r\nplt.grid()\r\nplt.show()"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
gruber-sciencelab/MAPP | [
"81563f676b284c5b283a193a698ce618c044d3b5"
] | [
"modules/REPORT_RESULTS/scripts/select-motifs.py"
] | [
"\"\"\"\n##############################################################################\n#\n# Select top N distinct motifs with highest (statistically significant)\n# activity Z-score (for every site separately)\n#\n# AUTHOR: Maciej_Bak\n# AFFILIATION: University_of_Basel\n# AFFILIATION: Swiss_Institute_of_Bioinformatics\n# CONTACT: [email protected]\n# CREATED: 04-06-2020\n# LICENSE: Apache_2.0\n#\n##############################################################################\n\"\"\"\n\n# imports\nimport time\nimport logging\nimport logging.handlers\nfrom argparse import ArgumentParser, RawTextHelpFormatter\nimport os\nimport pandas as pd\n\n\ndef parse_arguments():\n \"\"\"Parser of the command-line arguments.\"\"\"\n parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)\n parser.add_argument(\n \"-v\",\n \"--verbosity\",\n dest=\"verbosity\",\n choices=(\"DEBUG\", \"INFO\", \"WARN\", \"ERROR\", \"CRITICAL\"),\n default=\"ERROR\",\n help=\"Verbosity/Log level. Defaults to ERROR\",\n )\n parser.add_argument(\n \"-l\", \"--logfile\", dest=\"logfile\", help=\"Store log to this file.\"\n )\n parser.add_argument(\n \"--topN-motifs\",\n dest=\"N\",\n default=1000000, # by default: effectively select all stat. sign. motifs\n required=False,\n help=\"Number of top motifs to select.\",\n )\n parser.add_argument(\n \"--infile-splicing-3ss\",\n dest=\"results_3ss\",\n required=True,\n help=\"Annotated results table (3ss).\",\n )\n parser.add_argument(\n \"--infile-splicing-5ss\",\n dest=\"results_5ss\",\n required=True,\n help=\"Annotated results table (5ss).\",\n )\n parser.add_argument(\n \"--infile-polyadenylation-pas\",\n dest=\"results_pas\",\n required=True,\n help=\"Annotated results table (pas).\",\n )\n parser.add_argument(\n \"--outfile-splicing-3ss-motifs\",\n dest=\"motifs_3ss\",\n required=True,\n help=\"Path for the text file with top motifs (3ss).\",\n )\n parser.add_argument(\n \"--outfile-splicing-5ss-motifs\",\n dest=\"motifs_5ss\",\n required=True,\n help=\"Path for the text file with top motifs (5ss).\",\n )\n parser.add_argument(\n \"--outfile-polyadenylation-pas-motifs\",\n dest=\"motifs_pas\",\n required=True,\n help=\"Path for the text file with top motifs (pas).\",\n )\n return parser\n\n\n##############################################################################\n\n\ndef main():\n \"\"\"Main body of the script.\"\"\"\n\n df = pd.read_csv(options.results_3ss, sep=\"\\t\", index_col=0)\n df = df[df[\"significance-marker\"]]\n motifs = []\n for ID, row in df.iterrows():\n if len(motifs) == int(options.N):\n break\n m = ID.split(\"|\")[-1]\n if m not in motifs:\n motifs.append(m)\n with open(options.motifs_3ss, \"w\") as f:\n for m in motifs:\n f.write(m + os.linesep)\n\n df = pd.read_csv(options.results_5ss, sep=\"\\t\", index_col=0)\n df = df[df[\"significance-marker\"]]\n motifs = []\n for ID, row in df.iterrows():\n if len(motifs) == int(options.N):\n break\n m = ID.split(\"|\")[-1]\n if m not in motifs:\n motifs.append(m)\n with open(options.motifs_5ss, \"w\") as f:\n for m in motifs:\n f.write(m + os.linesep)\n\n df = pd.read_csv(options.results_pas, sep=\"\\t\", index_col=0)\n df = df[df[\"significance-marker\"]]\n motifs = []\n for ID, row in df.iterrows():\n if len(motifs) == int(options.N):\n break\n m = ID.split(\"|\")[-1]\n if m not in motifs:\n motifs.append(m)\n with open(options.motifs_pas, \"w\") as f:\n for m in motifs:\n f.write(m + os.linesep)\n\n\n##############################################################################\n\nif __name__ == \"__main__\":\n\n try:\n # parse the command-line arguments\n options = parse_arguments().parse_args()\n\n # set up logging during the execution\n formatter = logging.Formatter(\n fmt=\"[%(asctime)s] %(levelname)s - %(message)s\",\n datefmt=\"%d-%b-%Y %H:%M:%S\",\n )\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(formatter)\n logger = logging.getLogger(\"logger\")\n logger.setLevel(logging.getLevelName(options.verbosity))\n logger.addHandler(console_handler)\n if options.logfile is not None:\n logfile_handler = logging.handlers.RotatingFileHandler(\n options.logfile, maxBytes=50000, backupCount=2\n )\n logfile_handler.setFormatter(formatter)\n logger.addHandler(logfile_handler)\n\n # execute the body of the script\n start_time = time.time()\n logger.info(\"Starting script\")\n main()\n seconds = time.time() - start_time\n\n # log the execution time\n minutes, seconds = divmod(seconds, 60)\n hours, minutes = divmod(minutes, 60)\n logger.info(\n \"Successfully finished in {hours}h:{minutes}m:{seconds}s\",\n hours=int(hours),\n minutes=int(minutes),\n seconds=int(seconds) if seconds > 1.0 else 1,\n )\n # log the exception in case it happens\n except Exception as e:\n logger.exception(str(e))\n raise e\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
KevBarbour/cryptobot | [
"57239c83ca5dd84d2a0e273f20782cf608ce99ba"
] | [
"top10losers/top10losers.py"
] | [
"#!/usr/bin/env python\nimport requests\nfrom bs4 import BeautifulSoup\nimport sys\nfrom twython import Twython\nimport numpy as np\n\napiKey = '...'\napiSecret = '...'\naccessToken = '...'\naccessTokenSecret = '...'\n\n#BeautifulSoup scraping algorythm\nurl = 'https://coinmarketcap.com'\nsoup = BeautifulSoup(requests.get(url).text, 'lxml')\nL=[]\n#H =[\"Rank\",\"Name\",\"M Cap\",\"$/1\", \"HURR\", \"DURR\", \"24 hr\"] \nF=0\n\nfor tr in soup.select('#currencies tr'):\n if not tr.select('td'):\n continue\n\n for i, td in enumerate(tr.select('td')[:7]) :\n txt = td.text.replace('\\n',' ').replace('*', '').replace('%','').replace('.com','').replace('chain','').replace('coin','').strip()\n L.append(txt)\n \n #dictates how many lines will be read\n F=F+1 \n if F>99:\n break\n \n #reshapes array to only include necessary columns and re orders them\nA = np.reshape(L, (100,7)) \nPerm = [1,3,6,2,4,5,0]\nA = A[:, Perm]\nA = np.delete(A, (1,3,4,5,6), 1)\n\n#sorting array based on percent change\nA = sorted(A,key=lambda x: (float(x[1])))\nA = A[:10]\n\n#write table to a python file and re reads it, possibly poor method\nwith open(\"output10losers.txt\", \"w\") as txt_file:\n for line in A:\n txt_file.write(\"#\" + \" \".join(line) + \"%\" + \"\\n\" )\n\nT = open(\"output10losers.txt\", \"r\")\n\nfinaltweet = T.read()\ntweetStr = \"Top 10 #Crypto Losers 24hrs:\" + \"\\n\" + finaltweet\n\n#twitter API commands\napi = Twython(apiKey,apiSecret,accessToken,accessTokenSecret)\napi.update_status(status=tweetStr)\nprint(\"Tweeted: \" + tweetStr)\n\n"
] | [
[
"numpy.reshape",
"numpy.delete"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
threefoldo/allennlp | [
"9fcc79566cc148cce9f967a7962ac03bc300f011"
] | [
"allennlp/nn/util.py"
] | [
"\"\"\"\nAssorted utilities for working with neural networks in AllenNLP.\n\"\"\"\n# pylint: disable=too-many-lines\nfrom collections import defaultdict\nfrom typing import Any, Dict, List, Optional, Sequence, Tuple, TypeVar\nimport logging\nimport math\nimport warnings\n\nimport torch\n\nfrom allennlp.common.checks import ConfigurationError\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\n\nT = TypeVar('T')\n\n\ndef has_tensor(obj) -> bool:\n \"\"\"\n Given a possibly complex data structure,\n check if it has any torch.Tensors in it.\n \"\"\"\n if isinstance(obj, torch.Tensor):\n return True\n elif isinstance(obj, dict):\n return any(has_tensor(value) for value in obj.values())\n elif isinstance(obj, (list, tuple)):\n return any(has_tensor(item) for item in obj)\n else:\n return False\n\n\ndef move_to_device(obj, cuda_device: int):\n \"\"\"\n Given a structure (possibly) containing Tensors on the CPU,\n move all the Tensors to the specified GPU (or do nothing, if they should be on the CPU).\n \"\"\"\n if cuda_device < 0 or not has_tensor(obj):\n return obj\n elif isinstance(obj, torch.Tensor):\n return obj.cuda(cuda_device)\n elif isinstance(obj, dict):\n return {key: move_to_device(value, cuda_device) for key, value in obj.items()}\n elif isinstance(obj, list):\n return [move_to_device(item, cuda_device) for item in obj]\n elif isinstance(obj, tuple):\n return tuple([move_to_device(item, cuda_device) for item in obj])\n else:\n return obj\n\n\ndef batch_tensor_dicts(tensor_dicts: List[Dict[str, torch.Tensor]],\n remove_trailing_dimension: bool = False) -> Dict[str, torch.Tensor]:\n \"\"\"\n Takes a list of tensor dictionaries, where each dictionary is assumed to have matching keys,\n and returns a single dictionary with all tensors with the same key batched together.\n\n Parameters\n ----------\n tensor_dicts : ``List[Dict[str, torch.Tensor]]``\n The list of tensor dictionaries to batch.\n remove_trailing_dimension : ``bool``\n If ``True``, we will check for a trailing dimension of size 1 on the tensors that are being\n batched, and remove it if we find it.\n \"\"\"\n key_to_tensors: Dict[str, List[torch.Tensor]] = defaultdict(list)\n for tensor_dict in tensor_dicts:\n for key, tensor in tensor_dict.items():\n key_to_tensors[key].append(tensor)\n batched_tensors = {}\n for key, tensor_list in key_to_tensors.items():\n batched_tensor = torch.stack(tensor_list)\n if remove_trailing_dimension and all(tensor.size(-1) == 1 for tensor in tensor_list):\n batched_tensor = batched_tensor.squeeze(-1)\n batched_tensors[key] = batched_tensor\n return batched_tensors\n\n\ndef get_lengths_from_binary_sequence_mask(mask: torch.Tensor):\n \"\"\"\n Compute sequence lengths for each batch element in a tensor using a\n binary mask.\n\n Parameters\n ----------\n mask : torch.Tensor, required.\n A 2D binary mask of shape (batch_size, sequence_length) to\n calculate the per-batch sequence lengths from.\n\n Returns\n -------\n A torch.LongTensor of shape (batch_size,) representing the lengths\n of the sequences in the batch.\n \"\"\"\n return mask.long().sum(-1)\n\n\ndef get_mask_from_sequence_lengths(sequence_lengths: torch.Tensor, max_length: int) -> torch.Tensor:\n \"\"\"\n Given a variable of shape ``(batch_size,)`` that represents the sequence lengths of each batch\n element, this function returns a ``(batch_size, max_length)`` mask variable. For example, if\n our input was ``[2, 2, 3]``, with a ``max_length`` of 4, we'd return\n ``[[1, 1, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0]]``.\n\n We require ``max_length`` here instead of just computing it from the input ``sequence_lengths``\n because it lets us avoid finding the max, then copying that value from the GPU to the CPU so\n that we can use it to construct a new tensor.\n \"\"\"\n # (batch_size, max_length)\n ones = sequence_lengths.new_ones(sequence_lengths.size(0), max_length)\n range_tensor = ones.cumsum(dim=1)\n return (sequence_lengths.unsqueeze(1) >= range_tensor).long()\n\n\ndef sort_batch_by_length(tensor: torch.Tensor, sequence_lengths: torch.Tensor):\n \"\"\"\n Sort a batch first tensor by some specified lengths.\n\n Parameters\n ----------\n tensor : torch.FloatTensor, required.\n A batch first Pytorch tensor.\n sequence_lengths : torch.LongTensor, required.\n A tensor representing the lengths of some dimension of the tensor which\n we want to sort by.\n\n Returns\n -------\n sorted_tensor : torch.FloatTensor\n The original tensor sorted along the batch dimension with respect to sequence_lengths.\n sorted_sequence_lengths : torch.LongTensor\n The original sequence_lengths sorted by decreasing size.\n restoration_indices : torch.LongTensor\n Indices into the sorted_tensor such that\n ``sorted_tensor.index_select(0, restoration_indices) == original_tensor``\n permuation_index : torch.LongTensor\n The indices used to sort the tensor. This is useful if you want to sort many\n tensors using the same ordering.\n \"\"\"\n\n if not isinstance(tensor, torch.Tensor) or not isinstance(sequence_lengths, torch.Tensor):\n raise ConfigurationError(\"Both the tensor and sequence lengths must be torch.Tensors.\")\n\n sorted_sequence_lengths, permutation_index = sequence_lengths.sort(0, descending=True)\n sorted_tensor = tensor.index_select(0, permutation_index)\n\n index_range = sequence_lengths.new_tensor(torch.arange(0, len(sequence_lengths)))\n # This is the equivalent of zipping with index, sorting by the original\n # sequence lengths and returning the now sorted indices.\n _, reverse_mapping = permutation_index.sort(0, descending=False)\n restoration_indices = index_range.index_select(0, reverse_mapping)\n return sorted_tensor, sorted_sequence_lengths, restoration_indices, permutation_index\n\n\ndef get_final_encoder_states(encoder_outputs: torch.Tensor,\n mask: torch.Tensor,\n bidirectional: bool = False) -> torch.Tensor:\n \"\"\"\n Given the output from a ``Seq2SeqEncoder``, with shape ``(batch_size, sequence_length,\n encoding_dim)``, this method returns the final hidden state for each element of the batch,\n giving a tensor of shape ``(batch_size, encoding_dim)``. This is not as simple as\n ``encoder_outputs[:, -1]``, because the sequences could have different lengths. We use the\n mask (which has shape ``(batch_size, sequence_length)``) to find the final state for each batch\n instance.\n\n Additionally, if ``bidirectional`` is ``True``, we will split the final dimension of the\n ``encoder_outputs`` into two and assume that the first half is for the forward direction of the\n encoder and the second half is for the backward direction. We will concatenate the last state\n for each encoder dimension, giving ``encoder_outputs[:, -1, :encoding_dim/2]`` concated with\n ``encoder_outputs[:, 0, encoding_dim/2:]``.\n \"\"\"\n # These are the indices of the last words in the sequences (i.e. length sans padding - 1). We\n # are assuming sequences are right padded.\n # Shape: (batch_size,)\n last_word_indices = mask.sum(1).long() - 1\n batch_size, _, encoder_output_dim = encoder_outputs.size()\n expanded_indices = last_word_indices.view(-1, 1, 1).expand(batch_size, 1, encoder_output_dim)\n # Shape: (batch_size, 1, encoder_output_dim)\n final_encoder_output = encoder_outputs.gather(1, expanded_indices)\n final_encoder_output = final_encoder_output.squeeze(1) # (batch_size, encoder_output_dim)\n if bidirectional:\n final_forward_output = final_encoder_output[:, :(encoder_output_dim // 2)]\n final_backward_output = encoder_outputs[:, 0, (encoder_output_dim // 2):]\n final_encoder_output = torch.cat([final_forward_output, final_backward_output], dim=-1)\n return final_encoder_output\n\n\ndef get_dropout_mask(dropout_probability: float, tensor_for_masking: torch.Tensor):\n \"\"\"\n Computes and returns an element-wise dropout mask for a given tensor, where\n each element in the mask is dropped out with probability dropout_probability.\n Note that the mask is NOT applied to the tensor - the tensor is passed to retain\n the correct CUDA tensor type for the mask.\n\n Parameters\n ----------\n dropout_probability : float, required.\n Probability of dropping a dimension of the input.\n tensor_for_masking : torch.Tensor, required.\n\n\n Returns\n -------\n A torch.FloatTensor consisting of the binary mask scaled by 1/ (1 - dropout_probability).\n This scaling ensures expected values and variances of the output of applying this mask\n and the original tensor are the same.\n \"\"\"\n binary_mask = tensor_for_masking.new_tensor(torch.rand(tensor_for_masking.size()) > dropout_probability)\n # Scale mask by 1/keep_prob to preserve output statistics.\n dropout_mask = binary_mask.float().div(1.0 - dropout_probability)\n return dropout_mask\n\n\ndef masked_softmax(vector: torch.Tensor, mask: torch.Tensor, dim: int = -1) -> torch.Tensor:\n \"\"\"\n ``torch.nn.functional.softmax(vector)`` does not work if some elements of ``vector`` should be\n masked. This performs a softmax on just the non-masked portions of ``vector``. Passing\n ``None`` in for the mask is also acceptable; you'll just get a regular softmax.\n\n ``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is\n broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will\n unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,\n do it yourself before passing the mask into this function.\n\n In the case that the input vector is completely masked, this function returns an array\n of ``0.0``. This behavior may cause ``NaN`` if this is used as the last layer of a model\n that uses categorical cross-entropy loss.\n \"\"\"\n if mask is None:\n result = torch.nn.functional.softmax(vector, dim=dim)\n else:\n mask = mask.float()\n while mask.dim() < vector.dim():\n mask = mask.unsqueeze(1)\n # To limit numerical errors from large vector elements outside the mask, we zero these out.\n result = torch.nn.functional.softmax(vector * mask, dim=dim)\n result = result * mask\n result = result / (result.sum(dim=dim, keepdim=True) + 1e-13)\n return result\n\n\ndef masked_log_softmax(vector: torch.Tensor, mask: torch.Tensor, dim: int = -1) -> torch.Tensor:\n \"\"\"\n ``torch.nn.functional.log_softmax(vector)`` does not work if some elements of ``vector`` should be\n masked. This performs a log_softmax on just the non-masked portions of ``vector``. Passing\n ``None`` in for the mask is also acceptable; you'll just get a regular log_softmax.\n\n ``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is\n broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will\n unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,\n do it yourself before passing the mask into this function.\n\n In the case that the input vector is completely masked, the return value of this function is\n arbitrary, but not ``nan``. You should be masking the result of whatever computation comes out\n of this in that case, anyway, so the specific values returned shouldn't matter. Also, the way\n that we deal with this case relies on having single-precision floats; mixing half-precision\n floats with fully-masked vectors will likely give you ``nans``.\n\n If your logits are all extremely negative (i.e., the max value in your logit vector is -50 or\n lower), the way we handle masking here could mess you up. But if you've got logit values that\n extreme, you've got bigger problems than this.\n \"\"\"\n if mask is not None:\n mask = mask.float()\n while mask.dim() < vector.dim():\n mask = mask.unsqueeze(1)\n # vector + mask.log() is an easy way to zero out masked elements in logspace, but it\n # results in nans when the whole vector is masked. We need a very small value instead of a\n # zero in the mask for these cases. log(1 + 1e-45) is still basically 0, so we can safely\n # just add 1e-45 before calling mask.log(). We use 1e-45 because 1e-46 is so small it\n # becomes 0 - this is just the smallest value we can actually use.\n vector = vector + (mask + 1e-45).log()\n return torch.nn.functional.log_softmax(vector, dim=dim)\n\n\ndef masked_max(vector: torch.Tensor,\n mask: torch.Tensor,\n dim: int,\n keepdim: bool = False,\n min_val: float = -1e7) -> torch.Tensor:\n \"\"\"\n To calculate max along certain dimensions on masked values\n\n Parameters\n ----------\n vector : ``torch.Tensor``\n The vector to calculate max, assume unmasked parts are already zeros\n mask : ``torch.Tensor``\n The mask of the vector. It must be broadcastable with vector.\n dim : ``int``\n The dimension to calculate max\n keepdim : ``bool``\n Whether to keep dimension\n min_val : ``float``\n The minimal value for paddings\n\n Returns\n -------\n A ``torch.Tensor`` of including the maximum values.\n \"\"\"\n one_minus_mask = (1.0 - mask).byte()\n replaced_vector = vector.masked_fill(one_minus_mask, min_val)\n max_value, _ = replaced_vector.max(dim=dim, keepdim=keepdim)\n return max_value\n\n\ndef masked_mean(vector: torch.Tensor,\n mask: torch.Tensor,\n dim: int,\n keepdim: bool = False,\n eps: float = 1e-8) -> torch.Tensor:\n \"\"\"\n To calculate mean along certain dimensions on masked values\n\n Parameters\n ----------\n vector : ``torch.Tensor``\n The vector to calculate mean.\n mask : ``torch.Tensor``\n The mask of the vector. It must be broadcastable with vector.\n dim : ``int``\n The dimension to calculate mean\n keepdim : ``bool``\n Whether to keep dimension\n eps : ``float``\n A small value to avoid zero division problem.\n\n Returns\n -------\n A ``torch.Tensor`` of including the mean values.\n \"\"\"\n one_minus_mask = (1.0 - mask).byte()\n replaced_vector = vector.masked_fill(one_minus_mask, 0.0)\n\n value_sum = torch.sum(replaced_vector, dim=dim, keepdim=keepdim)\n value_count = torch.sum(mask.float(), dim=dim, keepdim=keepdim)\n return value_sum / value_count.clamp(min=eps)\n\n\ndef viterbi_decode(tag_sequence: torch.Tensor,\n transition_matrix: torch.Tensor,\n tag_observations: Optional[List[int]] = None):\n \"\"\"\n Perform Viterbi decoding in log space over a sequence given a transition matrix\n specifying pairwise (transition) potentials between tags and a matrix of shape\n (sequence_length, num_tags) specifying unary potentials for possible tags per\n timestep.\n\n Parameters\n ----------\n tag_sequence : torch.Tensor, required.\n A tensor of shape (sequence_length, num_tags) representing scores for\n a set of tags over a given sequence.\n transition_matrix : torch.Tensor, required.\n A tensor of shape (num_tags, num_tags) representing the binary potentials\n for transitioning between a given pair of tags.\n tag_observations : Optional[List[int]], optional, (default = None)\n A list of length ``sequence_length`` containing the class ids of observed\n elements in the sequence, with unobserved elements being set to -1. Note that\n it is possible to provide evidence which results in degenerate labellings if\n the sequences of tags you provide as evidence cannot transition between each\n other, or those transitions are extremely unlikely. In this situation we log a\n warning, but the responsibility for providing self-consistent evidence ultimately\n lies with the user.\n\n Returns\n -------\n viterbi_path : List[int]\n The tag indices of the maximum likelihood tag sequence.\n viterbi_score : torch.Tensor\n The score of the viterbi path.\n \"\"\"\n sequence_length, num_tags = list(tag_sequence.size())\n if tag_observations:\n if len(tag_observations) != sequence_length:\n raise ConfigurationError(\"Observations were provided, but they were not the same length \"\n \"as the sequence. Found sequence of length: {} and evidence: {}\"\n .format(sequence_length, tag_observations))\n else:\n tag_observations = [-1 for _ in range(sequence_length)]\n\n path_scores = []\n path_indices = []\n\n if tag_observations[0] != -1:\n one_hot = torch.zeros(num_tags)\n one_hot[tag_observations[0]] = 100000.\n path_scores.append(one_hot)\n else:\n path_scores.append(tag_sequence[0, :])\n\n # Evaluate the scores for all possible paths.\n for timestep in range(1, sequence_length):\n # Add pairwise potentials to current scores.\n summed_potentials = path_scores[timestep - 1].unsqueeze(-1) + transition_matrix\n scores, paths = torch.max(summed_potentials, 0)\n\n # If we have an observation for this timestep, use it\n # instead of the distribution over tags.\n observation = tag_observations[timestep]\n # Warn the user if they have passed\n # invalid/extremely unlikely evidence.\n if tag_observations[timestep - 1] != -1:\n if transition_matrix[tag_observations[timestep - 1], observation] < -10000:\n logger.warning(\"The pairwise potential between tags you have passed as \"\n \"observations is extremely unlikely. Double check your evidence \"\n \"or transition potentials!\")\n if observation != -1:\n one_hot = torch.zeros(num_tags)\n one_hot[observation] = 100000.\n path_scores.append(one_hot)\n else:\n path_scores.append(tag_sequence[timestep, :] + scores.squeeze())\n path_indices.append(paths.squeeze())\n\n # Construct the most likely sequence backwards.\n viterbi_score, best_path = torch.max(path_scores[-1], 0)\n viterbi_path = [int(best_path.numpy())]\n for backward_timestep in reversed(path_indices):\n viterbi_path.append(int(backward_timestep[viterbi_path[-1]]))\n # Reverse the backward path.\n viterbi_path.reverse()\n return viterbi_path, viterbi_score\n\n\ndef get_text_field_mask(text_field_tensors: Dict[str, torch.Tensor],\n num_wrapping_dims: int = 0) -> torch.LongTensor:\n \"\"\"\n Takes the dictionary of tensors produced by a ``TextField`` and returns a mask\n with 0 where the tokens are padding, and 1 otherwise. We also handle ``TextFields``\n wrapped by an arbitrary number of ``ListFields``, where the number of wrapping ``ListFields``\n is given by ``num_wrapping_dims``.\n\n If ``num_wrapping_dims == 0``, the returned mask has shape ``(batch_size, num_tokens)``.\n If ``num_wrapping_dims > 0`` then the returned mask has ``num_wrapping_dims`` extra\n dimensions, so the shape will be ``(batch_size, ..., num_tokens)``.\n\n There could be several entries in the tensor dictionary with different shapes (e.g., one for\n word ids, one for character ids). In order to get a token mask, we use the tensor in\n the dictionary with the lowest number of dimensions. After subtracting ``num_wrapping_dims``,\n if this tensor has two dimensions we assume it has shape ``(batch_size, ..., num_tokens)``,\n and use it for the mask. If instead it has three dimensions, we assume it has shape\n ``(batch_size, ..., num_tokens, num_features)``, and sum over the last dimension to produce\n the mask. Most frequently this will be a character id tensor, but it could also be a\n featurized representation of each token, etc.\n\n If the input ``text_field_tensors`` contains the \"mask\" key, this is returned instead of inferring the mask.\n\n TODO(joelgrus): can we change this?\n NOTE: Our functions for generating masks create torch.LongTensors, because using\n torch.ByteTensors makes it easy to run into overflow errors\n when doing mask manipulation, such as summing to get the lengths of sequences - see below.\n >>> mask = torch.ones([260]).byte()\n >>> mask.sum() # equals 260.\n >>> var_mask = torch.autograd.V(mask)\n >>> var_mask.sum() # equals 4, due to 8 bit precision - the sum overflows.\n \"\"\"\n if \"mask\" in text_field_tensors:\n return text_field_tensors[\"mask\"]\n\n tensor_dims = [(tensor.dim(), tensor) for tensor in text_field_tensors.values()]\n tensor_dims.sort(key=lambda x: x[0])\n\n smallest_dim = tensor_dims[0][0] - num_wrapping_dims\n if smallest_dim == 2:\n token_tensor = tensor_dims[0][1]\n return (token_tensor != 0).long()\n elif smallest_dim == 3:\n character_tensor = tensor_dims[0][1]\n return ((character_tensor > 0).long().sum(dim=-1) > 0).long()\n else:\n raise ValueError(\"Expected a tensor with dimension 2 or 3, found {}\".format(smallest_dim))\n\n\ndef last_dim_softmax(tensor: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:\n \"\"\"\n Takes a tensor with 3 or more dimensions and does a masked softmax over the last dimension. We\n assume the tensor has shape ``(batch_size, ..., sequence_length)`` and that the mask (if given)\n has shape ``(batch_size, sequence_length)``.\n\n .. deprecated:: 0.6.1\n ``last_dim_softmax`` was deprecated in favor of just using ``masked_softmax`` in version\n 0.6.1. It will be removed in version 0.8.\n \"\"\"\n warnings.warn(\"``last_dim_softmax`` was deprecated in favor of just using ``masked_softmax`` \"\n \"in version 0.6.1. It will be removed in version 0.8.\", DeprecationWarning)\n return masked_softmax(tensor, mask, dim=-1)\n\n\ndef last_dim_log_softmax(tensor: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:\n \"\"\"\n Takes a tensor with 3 or more dimensions and does a masked log softmax over the last dimension.\n We assume the tensor has shape ``(batch_size, ..., sequence_length)`` and that the mask (if given)\n has shape ``(batch_size, sequence_length)``.\n\n .. deprecated:: 0.6.1\n ``last_dim_log_softmax`` was deprecated in favor of just using ``masked_log_softmax`` in\n version 0.6.1. It will be removed in version 0.8.\n \"\"\"\n warnings.warn(\"``last_dim_log_softmax`` was deprecated in favor of just using \"\n \"``masked_log_softmax`` in version 0.6.1. It will be removed in version 0.8.\",\n DeprecationWarning)\n return masked_log_softmax(tensor, mask, dim=-1)\n\n\ndef weighted_sum(matrix: torch.Tensor, attention: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Takes a matrix of vectors and a set of weights over the rows in the matrix (which we call an\n \"attention\" vector), and returns a weighted sum of the rows in the matrix. This is the typical\n computation performed after an attention mechanism.\n\n Note that while we call this a \"matrix\" of vectors and an attention \"vector\", we also handle\n higher-order tensors. We always sum over the second-to-last dimension of the \"matrix\", and we\n assume that all dimensions in the \"matrix\" prior to the last dimension are matched in the\n \"vector\". Non-matched dimensions in the \"vector\" must be `directly after the batch dimension`.\n\n For example, say I have a \"matrix\" with dimensions ``(batch_size, num_queries, num_words,\n embedding_dim)``. The attention \"vector\" then must have at least those dimensions, and could\n have more. Both:\n\n - ``(batch_size, num_queries, num_words)`` (distribution over words for each query)\n - ``(batch_size, num_documents, num_queries, num_words)`` (distribution over words in a\n query for each document)\n\n are valid input \"vectors\", producing tensors of shape:\n ``(batch_size, num_queries, embedding_dim)`` and\n ``(batch_size, num_documents, num_queries, embedding_dim)`` respectively.\n \"\"\"\n # We'll special-case a few settings here, where there are efficient (but poorly-named)\n # operations in pytorch that already do the computation we need.\n if attention.dim() == 2 and matrix.dim() == 3:\n return attention.unsqueeze(1).bmm(matrix).squeeze(1)\n if attention.dim() == 3 and matrix.dim() == 3:\n return attention.bmm(matrix)\n if matrix.dim() - 1 < attention.dim():\n expanded_size = list(matrix.size())\n for i in range(attention.dim() - matrix.dim() + 1):\n matrix = matrix.unsqueeze(1)\n expanded_size.insert(i + 1, attention.size(i + 1))\n matrix = matrix.expand(*expanded_size)\n intermediate = attention.unsqueeze(-1).expand_as(matrix) * matrix\n return intermediate.sum(dim=-2)\n\n\ndef sequence_cross_entropy_with_logits(logits: torch.FloatTensor,\n targets: torch.LongTensor,\n weights: torch.FloatTensor,\n batch_average: bool = None,\n average: str = \"batch\",\n label_smoothing: float = None) -> torch.FloatTensor:\n \"\"\"\n Computes the cross entropy loss of a sequence, weighted with respect to\n some user provided weights. Note that the weighting here is not the same as\n in the :func:`torch.nn.CrossEntropyLoss()` criterion, which is weighting\n classes; here we are weighting the loss contribution from particular elements\n in the sequence. This allows loss computations for models which use padding.\n\n Parameters\n ----------\n logits : ``torch.FloatTensor``, required.\n A ``torch.FloatTensor`` of size (batch_size, sequence_length, num_classes)\n which contains the unnormalized probability for each class.\n targets : ``torch.LongTensor``, required.\n A ``torch.LongTensor`` of size (batch, sequence_length) which contains the\n index of the true class for each corresponding step.\n weights : ``torch.FloatTensor``, required.\n A ``torch.FloatTensor`` of size (batch, sequence_length)\n batch_average : bool, optional, (default = None).\n A bool indicating whether the loss should be averaged across the batch,\n or returned as a vector of losses per batch element.\n\n .. deprecated:: 0.6.2\n ``batch_average`` was deprecated and replaced with\n the more general ``average`` in version 0.6.2. It will be removed\n in version 0.8.\n\n average: str, optional (default = \"batch\")\n If \"batch\", average the loss across the batches. If \"token\", average\n the loss across each item in the input. If ``None``, return a vector\n of losses per batch element.\n label_smoothing : ``float``, optional (default = None)\n Whether or not to apply label smoothing to the cross-entropy loss.\n For example, with a label smoothing value of 0.2, a 4 class classifcation\n target would look like ``[0.05, 0.05, 0.85, 0.05]`` if the 3rd class was\n the correct label.\n\n Returns\n -------\n A torch.FloatTensor representing the cross entropy loss.\n If ``average==\"batch\"`` or ``average==\"token\"``, the returned loss is a scalar.\n If ``average is None``, the returned loss is a vector of shape (batch_size,).\n\n \"\"\"\n if batch_average is not None:\n # Maintain old behavior\n if batch_average:\n warnings.warn(\"batch_average=True was deprecated and replaced \"\n \"with average='batch' in version 0.6.2. It will be \"\n \"removed in version 0.8.\", DeprecationWarning)\n average = \"batch\"\n else:\n warnings.warn(\"batch_average=False was deprecated and replaced \"\n \"with average=None in version 0.6.2. It will be \"\n \"removed in version 0.8.\", DeprecationWarning)\n average = None\n if average not in {None, \"token\", \"batch\"}:\n raise ValueError(\"Got average f{average}, expected one of \"\n \"None, 'token', or 'batch'\")\n\n # shape : (batch * sequence_length, num_classes)\n logits_flat = logits.view(-1, logits.size(-1))\n # shape : (batch * sequence_length, num_classes)\n log_probs_flat = torch.nn.functional.log_softmax(logits_flat, dim=-1)\n # shape : (batch * max_len, 1)\n targets_flat = targets.view(-1, 1).long()\n\n if label_smoothing is not None and label_smoothing > 0.0:\n num_classes = logits.size(-1)\n smoothing_value = label_smoothing / num_classes\n # Fill all the correct indices with 1 - smoothing value.\n one_hot_targets = torch.zeros_like(log_probs_flat).scatter_(-1, targets_flat, 1.0 - label_smoothing)\n smoothed_targets = one_hot_targets + smoothing_value\n negative_log_likelihood_flat = - log_probs_flat * smoothed_targets\n negative_log_likelihood_flat = negative_log_likelihood_flat.sum(-1, keepdim=True)\n else:\n # Contribution to the negative log likelihood only comes from the exact indices\n # of the targets, as the target distributions are one-hot. Here we use torch.gather\n # to extract the indices of the num_classes dimension which contribute to the loss.\n # shape : (batch * sequence_length, 1)\n negative_log_likelihood_flat = - torch.gather(log_probs_flat, dim=1, index=targets_flat)\n # shape : (batch, sequence_length)\n negative_log_likelihood = negative_log_likelihood_flat.view(*targets.size())\n # shape : (batch, sequence_length)\n negative_log_likelihood = negative_log_likelihood * weights.float()\n\n if average == \"batch\":\n # shape : (batch_size,)\n per_batch_loss = negative_log_likelihood.sum(1) / (weights.sum(1).float() + 1e-13)\n num_non_empty_sequences = ((weights.sum(1) > 0).float().sum() + 1e-13)\n return per_batch_loss.sum() / num_non_empty_sequences\n elif average == \"token\":\n return negative_log_likelihood.sum() / (weights.sum().float() + 1e-13)\n else:\n # shape : (batch_size,)\n per_batch_loss = negative_log_likelihood.sum(1) / (weights.sum(1).float() + 1e-13)\n return per_batch_loss\n\n\ndef replace_masked_values(tensor: torch.Tensor, mask: torch.Tensor, replace_with: float) -> torch.Tensor:\n \"\"\"\n Replaces all masked values in ``tensor`` with ``replace_with``. ``mask`` must be broadcastable\n to the same shape as ``tensor``. We require that ``tensor.dim() == mask.dim()``, as otherwise we\n won't know which dimensions of the mask to unsqueeze.\n\n This just does ``tensor.masked_fill()``, except the pytorch method fills in things with a mask\n value of 1, where we want the opposite. You can do this in your own code with\n ``tensor.masked_fill((1 - mask).byte(), replace_with)``.\n \"\"\"\n if tensor.dim() != mask.dim():\n raise ConfigurationError(\"tensor.dim() (%d) != mask.dim() (%d)\" % (tensor.dim(), mask.dim()))\n return tensor.masked_fill((1 - mask).byte(), replace_with)\n\n\ndef tensors_equal(tensor1: torch.Tensor, tensor2: torch.Tensor, tolerance: float = 1e-12) -> bool:\n \"\"\"\n A check for tensor equality (by value). We make sure that the tensors have the same shape,\n then check all of the entries in the tensor for equality. We additionally allow the input\n tensors to be lists or dictionaries, where we then do the above check on every position in the\n list / item in the dictionary. If we find objects that aren't tensors as we're doing that, we\n just defer to their equality check.\n\n This is kind of a catch-all method that's designed to make implementing ``__eq__`` methods\n easier, in a way that's really only intended to be useful for tests.\n \"\"\"\n # pylint: disable=too-many-return-statements\n if isinstance(tensor1, (list, tuple)):\n if not isinstance(tensor2, (list, tuple)) or len(tensor1) != len(tensor2):\n return False\n return all([tensors_equal(t1, t2, tolerance) for t1, t2 in zip(tensor1, tensor2)])\n elif isinstance(tensor1, dict):\n if not isinstance(tensor2, dict):\n return False\n if tensor1.keys() != tensor2.keys():\n return False\n return all([tensors_equal(tensor1[key], tensor2[key], tolerance) for key in tensor1])\n elif isinstance(tensor1, torch.Tensor):\n if not isinstance(tensor2, torch.Tensor):\n return False\n if tensor1.size() != tensor2.size():\n return False\n return ((tensor1 - tensor2).abs().float() < tolerance).all()\n else:\n try:\n return tensor1 == tensor2\n except RuntimeError:\n print(type(tensor1), type(tensor2))\n raise\n\n\ndef device_mapping(cuda_device: int):\n \"\"\"\n In order to `torch.load()` a GPU-trained model onto a CPU (or specific GPU),\n you have to supply a `map_location` function. Call this with\n the desired `cuda_device` to get the function that `torch.load()` needs.\n \"\"\"\n\n def inner_device_mapping(storage: torch.Storage, location) -> torch.Storage: # pylint: disable=unused-argument\n if cuda_device >= 0:\n return storage.cuda(cuda_device)\n else:\n return storage\n\n return inner_device_mapping\n\n\ndef combine_tensors(combination: str, tensors: List[torch.Tensor]) -> torch.Tensor:\n \"\"\"\n Combines a list of tensors using element-wise operations and concatenation, specified by a\n ``combination`` string. The string refers to (1-indexed) positions in the input tensor list,\n and looks like ``\"1,2,1+2,3-1\"``.\n\n We allow the following kinds of combinations: ``x``, ``x*y``, ``x+y``, ``x-y``, and ``x/y``,\n where ``x`` and ``y`` are positive integers less than or equal to ``len(tensors)``. Each of\n the binary operations is performed elementwise. You can give as many combinations as you want\n in the ``combination`` string. For example, for the input string ``\"1,2,1*2\"``, the result\n would be ``[1;2;1*2]``, as you would expect, where ``[;]`` is concatenation along the last\n dimension.\n\n If you have a fixed, known way to combine tensors that you use in a model, you should probably\n just use something like ``torch.cat([x_tensor, y_tensor, x_tensor * y_tensor])``. This\n function adds some complexity that is only necessary if you want the specific combination used\n to be `configurable`.\n\n If you want to do any element-wise operations, the tensors involved in each element-wise\n operation must have the same shape.\n\n This function also accepts ``x`` and ``y`` in place of ``1`` and ``2`` in the combination\n string.\n \"\"\"\n if len(tensors) > 9:\n raise ConfigurationError(\"Double-digit tensor lists not currently supported\")\n combination = combination.replace('x', '1').replace('y', '2')\n to_concatenate = [_get_combination(piece, tensors) for piece in combination.split(',')]\n return torch.cat(to_concatenate, dim=-1)\n\n\ndef _rindex(sequence: Sequence[T], obj: T) -> int:\n \"\"\"\n Return zero-based index in the sequence of the last item whose value is equal to obj. Raises a\n ValueError if there is no such item.\n\n Parameters\n ----------\n sequence : ``Sequence[T]``\n obj : ``T``\n\n Returns\n -------\n zero-based index associated to the position of the last item equal to obj\n \"\"\"\n for i in range(len(sequence) - 1, -1, -1):\n if sequence[i] == obj:\n return i\n\n raise ValueError(f\"Unable to find {obj} in sequence {sequence}.\")\n\n\ndef _get_combination(combination: str, tensors: List[torch.Tensor]) -> torch.Tensor:\n if combination.isdigit():\n index = int(combination) - 1\n return tensors[index]\n else:\n if len(combination) != 3:\n raise ConfigurationError(\"Invalid combination: \" + combination)\n first_tensor = _get_combination(combination[0], tensors)\n second_tensor = _get_combination(combination[2], tensors)\n operation = combination[1]\n if operation == '*':\n return first_tensor * second_tensor\n elif operation == '/':\n return first_tensor / second_tensor\n elif operation == '+':\n return first_tensor + second_tensor\n elif operation == '-':\n return first_tensor - second_tensor\n else:\n raise ConfigurationError(\"Invalid operation: \" + operation)\n\n\ndef combine_tensors_and_multiply(combination: str,\n tensors: List[torch.Tensor],\n weights: torch.nn.Parameter) -> torch.Tensor:\n \"\"\"\n Like :func:`combine_tensors`, but does a weighted (linear) multiplication while combining.\n This is a separate function from ``combine_tensors`` because we try to avoid instantiating\n large intermediate tensors during the combination, which is possible because we know that we're\n going to be multiplying by a weight vector in the end.\n\n Parameters\n ----------\n combination : ``str``\n Same as in :func:`combine_tensors`\n tensors : ``List[torch.Tensor]``\n A list of tensors to combine, where the integers in the ``combination`` are (1-indexed)\n positions in this list of tensors. These tensors are all expected to have either three or\n four dimensions, with the final dimension being an embedding. If there are four\n dimensions, one of them must have length 1.\n weights : ``torch.nn.Parameter``\n A vector of weights to use for the combinations. This should have shape (combined_dim,),\n as calculated by :func:`get_combined_dim`.\n \"\"\"\n if len(tensors) > 9:\n raise ConfigurationError(\"Double-digit tensor lists not currently supported\")\n combination = combination.replace('x', '1').replace('y', '2')\n pieces = combination.split(',')\n tensor_dims = [tensor.size(-1) for tensor in tensors]\n combination_dims = [_get_combination_dim(piece, tensor_dims) for piece in pieces]\n dims_so_far = 0\n to_sum = []\n for piece, combination_dim in zip(pieces, combination_dims):\n weight = weights[dims_so_far:(dims_so_far + combination_dim)]\n dims_so_far += combination_dim\n to_sum.append(_get_combination_and_multiply(piece, tensors, weight))\n result = to_sum[0]\n for result_piece in to_sum[1:]:\n result = result + result_piece\n return result\n\n\ndef _get_combination_and_multiply(combination: str,\n tensors: List[torch.Tensor],\n weight: torch.nn.Parameter) -> torch.Tensor:\n if combination.isdigit():\n index = int(combination) - 1\n return torch.matmul(tensors[index], weight)\n else:\n if len(combination) != 3:\n raise ConfigurationError(\"Invalid combination: \" + combination)\n first_tensor = _get_combination(combination[0], tensors)\n second_tensor = _get_combination(combination[2], tensors)\n operation = combination[1]\n if operation == '*':\n if first_tensor.dim() > 4 or second_tensor.dim() > 4:\n raise ValueError(\"Tensors with dim > 4 not currently supported\")\n if first_tensor.dim() == 4:\n expanded_dim = _rindex(first_tensor.size(), 1)\n first_tensor = first_tensor.squeeze(expanded_dim)\n if second_tensor.dim() == 4:\n expanded_dim = _rindex(second_tensor.size(), 1)\n second_tensor = second_tensor.squeeze(expanded_dim)\n intermediate = first_tensor * weight\n return torch.matmul(intermediate, second_tensor.transpose(-1, -2)).squeeze(-1)\n elif operation == '/':\n if first_tensor.dim() > 4 or second_tensor.dim() > 4:\n raise ValueError(\"Tensors with dim > 4 not currently supported\")\n if first_tensor.dim() == 4:\n expanded_dim = _rindex(first_tensor.size(), 1)\n first_tensor = first_tensor.squeeze(expanded_dim)\n if second_tensor.dim() == 4:\n expanded_dim = _rindex(second_tensor.size(), 1)\n second_tensor = second_tensor.squeeze(expanded_dim)\n intermediate = first_tensor * weight\n return torch.matmul(intermediate, second_tensor.pow(-1).transpose(-1, -2)).squeeze(-1)\n elif operation == '+':\n return torch.matmul(first_tensor, weight) + torch.matmul(second_tensor, weight)\n elif operation == '-':\n return torch.matmul(first_tensor, weight) - torch.matmul(second_tensor, weight)\n else:\n raise ConfigurationError(\"Invalid operation: \" + operation)\n\n\ndef get_combined_dim(combination: str, tensor_dims: List[int]) -> int:\n \"\"\"\n For use with :func:`combine_tensors`. This function computes the resultant dimension when\n calling ``combine_tensors(combination, tensors)``, when the tensor dimension is known. This is\n necessary for knowing the sizes of weight matrices when building models that use\n ``combine_tensors``.\n\n Parameters\n ----------\n combination : ``str``\n A comma-separated list of combination pieces, like ``\"1,2,1*2\"``, specified identically to\n ``combination`` in :func:`combine_tensors`.\n tensor_dims : ``List[int]``\n A list of tensor dimensions, where each dimension is from the `last axis` of the tensors\n that will be input to :func:`combine_tensors`.\n \"\"\"\n if len(tensor_dims) > 9:\n raise ConfigurationError(\"Double-digit tensor lists not currently supported\")\n combination = combination.replace('x', '1').replace('y', '2')\n return sum([_get_combination_dim(piece, tensor_dims) for piece in combination.split(',')])\n\n\ndef _get_combination_dim(combination: str, tensor_dims: List[int]) -> int:\n if combination.isdigit():\n index = int(combination) - 1\n return tensor_dims[index]\n else:\n if len(combination) != 3:\n raise ConfigurationError(\"Invalid combination: \" + combination)\n first_tensor_dim = _get_combination_dim(combination[0], tensor_dims)\n second_tensor_dim = _get_combination_dim(combination[2], tensor_dims)\n operation = combination[1]\n if first_tensor_dim != second_tensor_dim:\n raise ConfigurationError(\"Tensor dims must match for operation \\\"{}\\\"\".format(operation))\n return first_tensor_dim\n\n\ndef logsumexp(tensor: torch.Tensor,\n dim: int = -1,\n keepdim: bool = False) -> torch.Tensor:\n \"\"\"\n A numerically stable computation of logsumexp. This is mathematically equivalent to\n `tensor.exp().sum(dim, keep=keepdim).log()`. This function is typically used for summing log\n probabilities.\n\n Parameters\n ----------\n tensor : torch.FloatTensor, required.\n A tensor of arbitrary size.\n dim : int, optional (default = -1)\n The dimension of the tensor to apply the logsumexp to.\n keepdim: bool, optional (default = False)\n Whether to retain a dimension of size one at the dimension we reduce over.\n \"\"\"\n max_score, _ = tensor.max(dim, keepdim=keepdim)\n if keepdim:\n stable_vec = tensor - max_score\n else:\n stable_vec = tensor - max_score.unsqueeze(dim)\n return max_score + (stable_vec.exp().sum(dim, keepdim=keepdim)).log()\n\n\ndef get_device_of(tensor: torch.Tensor) -> int:\n \"\"\"\n Returns the device of the tensor.\n \"\"\"\n if not tensor.is_cuda:\n return -1\n else:\n return tensor.get_device()\n\n\ndef flatten_and_batch_shift_indices(indices: torch.Tensor,\n sequence_length: int) -> torch.Tensor:\n \"\"\"\n This is a subroutine for :func:`~batched_index_select`. The given ``indices`` of size\n ``(batch_size, d_1, ..., d_n)`` indexes into dimension 2 of a target tensor, which has size\n ``(batch_size, sequence_length, embedding_size)``. This function returns a vector that\n correctly indexes into the flattened target. The sequence length of the target must be\n provided to compute the appropriate offsets.\n\n .. code-block:: python\n\n indices = torch.ones([2,3], dtype=torch.long)\n # Sequence length of the target tensor.\n sequence_length = 10\n shifted_indices = flatten_and_batch_shift_indices(indices, sequence_length)\n # Indices into the second element in the batch are correctly shifted\n # to take into account that the target tensor will be flattened before\n # the indices are applied.\n assert shifted_indices == [1, 1, 1, 11, 11, 11]\n\n Parameters\n ----------\n indices : ``torch.LongTensor``, required.\n sequence_length : ``int``, required.\n The length of the sequence the indices index into.\n This must be the second dimension of the tensor.\n\n Returns\n -------\n offset_indices : ``torch.LongTensor``\n \"\"\"\n # Shape: (batch_size)\n offsets = get_range_vector(indices.size(0), get_device_of(indices)) * sequence_length\n for _ in range(len(indices.size()) - 1):\n offsets = offsets.unsqueeze(1)\n\n # Shape: (batch_size, d_1, ..., d_n)\n offset_indices = indices + offsets\n\n # Shape: (batch_size * d_1 * ... * d_n)\n offset_indices = offset_indices.view(-1)\n return offset_indices\n\n\ndef batched_index_select(target: torch.Tensor,\n indices: torch.LongTensor,\n flattened_indices: Optional[torch.LongTensor] = None) -> torch.Tensor:\n \"\"\"\n The given ``indices`` of size ``(batch_size, d_1, ..., d_n)`` indexes into the sequence\n dimension (dimension 2) of the target, which has size ``(batch_size, sequence_length,\n embedding_size)``.\n\n This function returns selected values in the target with respect to the provided indices, which\n have size ``(batch_size, d_1, ..., d_n, embedding_size)``. This can use the optionally\n precomputed :func:`~flattened_indices` with size ``(batch_size * d_1 * ... * d_n)`` if given.\n\n An example use case of this function is looking up the start and end indices of spans in a\n sequence tensor. This is used in the\n :class:`~allennlp.models.coreference_resolution.CoreferenceResolver`. Model to select\n contextual word representations corresponding to the start and end indices of mentions. The key\n reason this can't be done with basic torch functions is that we want to be able to use look-up\n tensors with an arbitrary number of dimensions (for example, in the coref model, we don't know\n a-priori how many spans we are looking up).\n\n Parameters\n ----------\n target : ``torch.Tensor``, required.\n A 3 dimensional tensor of shape (batch_size, sequence_length, embedding_size).\n This is the tensor to be indexed.\n indices : ``torch.LongTensor``\n A tensor of shape (batch_size, ...), where each element is an index into the\n ``sequence_length`` dimension of the ``target`` tensor.\n flattened_indices : Optional[torch.Tensor], optional (default = None)\n An optional tensor representing the result of calling :func:~`flatten_and_batch_shift_indices`\n on ``indices``. This is helpful in the case that the indices can be flattened once and\n cached for many batch lookups.\n\n Returns\n -------\n selected_targets : ``torch.Tensor``\n A tensor with shape [indices.size(), target.size(-1)] representing the embedded indices\n extracted from the batch flattened target tensor.\n \"\"\"\n if flattened_indices is None:\n # Shape: (batch_size * d_1 * ... * d_n)\n flattened_indices = flatten_and_batch_shift_indices(indices, target.size(1))\n\n # Shape: (batch_size * sequence_length, embedding_size)\n flattened_target = target.view(-1, target.size(-1))\n\n # Shape: (batch_size * d_1 * ... * d_n, embedding_size)\n flattened_selected = flattened_target.index_select(0, flattened_indices)\n selected_shape = list(indices.size()) + [target.size(-1)]\n # Shape: (batch_size, d_1, ..., d_n, embedding_size)\n selected_targets = flattened_selected.view(*selected_shape)\n return selected_targets\n\n\ndef flattened_index_select(target: torch.Tensor,\n indices: torch.LongTensor) -> torch.Tensor:\n \"\"\"\n The given ``indices`` of size ``(set_size, subset_size)`` specifies subsets of the ``target``\n that each of the set_size rows should select. The `target` has size\n ``(batch_size, sequence_length, embedding_size)``, and the resulting selected tensor has size\n ``(batch_size, set_size, subset_size, embedding_size)``.\n\n Parameters\n ----------\n target : ``torch.Tensor``, required.\n A Tensor of shape (batch_size, sequence_length, embedding_size).\n indices : ``torch.LongTensor``, required.\n A LongTensor of shape (set_size, subset_size). All indices must be < sequence_length\n as this tensor is an index into the sequence_length dimension of the target.\n\n Returns\n -------\n selected : ``torch.Tensor``, required.\n A Tensor of shape (batch_size, set_size, subset_size, embedding_size).\n \"\"\"\n if indices.dim() != 2:\n raise ConfigurationError(\"Indices passed to flattened_index_select had shape {} but \"\n \"only 2 dimensional inputs are supported.\".format(indices.size()))\n # Shape: (batch_size, set_size * subset_size, embedding_size)\n flattened_selected = target.index_select(1, indices.view(-1))\n\n # Shape: (batch_size, set_size, subset_size, embedding_size)\n selected = flattened_selected.view(target.size(0), indices.size(0), indices.size(1), -1)\n return selected\n\n\ndef get_range_vector(size: int, device: int) -> torch.Tensor:\n \"\"\"\n Returns a range vector with the desired size, starting at 0. The CUDA implementation\n is meant to avoid copy data from CPU to GPU.\n \"\"\"\n if device > -1:\n return torch.cuda.LongTensor(size, device=device).fill_(1).cumsum(0) - 1\n else:\n return torch.arange(0, size, dtype=torch.long)\n\n\ndef bucket_values(distances: torch.Tensor,\n num_identity_buckets: int = 4,\n num_total_buckets: int = 10) -> torch.Tensor:\n \"\"\"\n Places the given values (designed for distances) into ``num_total_buckets``semi-logscale\n buckets, with ``num_identity_buckets`` of these capturing single values.\n\n The default settings will bucket values into the following buckets:\n [0, 1, 2, 3, 4, 5-7, 8-15, 16-31, 32-63, 64+].\n\n Parameters\n ----------\n distances : ``torch.Tensor``, required.\n A Tensor of any size, to be bucketed.\n num_identity_buckets: int, optional (default = 4).\n The number of identity buckets (those only holding a single value).\n num_total_buckets : int, (default = 10)\n The total number of buckets to bucket values into.\n\n Returns\n -------\n A tensor of the same shape as the input, containing the indices of the buckets\n the values were placed in.\n \"\"\"\n # Chunk the values into semi-logscale buckets using .floor().\n # This is a semi-logscale bucketing because we divide by log(2) after taking the log.\n # We do this to make the buckets more granular in the initial range, where we expect\n # most values to fall. We then add (num_identity_buckets - 1) because we want these indices\n # to start _after_ the fixed number of buckets which we specified would only hold single values.\n logspace_index = (distances.float().log() / math.log(2)).floor().long() + (num_identity_buckets - 1)\n # create a mask for values which will go into single number buckets (i.e not a range).\n use_identity_mask = (distances <= num_identity_buckets).long()\n use_buckets_mask = 1 + (-1 * use_identity_mask)\n # Use the original values if they are less than num_identity_buckets, otherwise\n # use the logspace indices.\n combined_index = use_identity_mask * distances + use_buckets_mask * logspace_index\n # Clamp to put anything > num_total_buckets into the final bucket.\n return combined_index.clamp(0, num_total_buckets - 1)\n\n\ndef add_sentence_boundary_token_ids(tensor: torch.Tensor,\n mask: torch.Tensor,\n sentence_begin_token: Any,\n sentence_end_token: Any) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Add begin/end of sentence tokens to the batch of sentences.\n Given a batch of sentences with size ``(batch_size, timesteps)`` or\n ``(batch_size, timesteps, dim)`` this returns a tensor of shape\n ``(batch_size, timesteps + 2)`` or ``(batch_size, timesteps + 2, dim)`` respectively.\n\n Returns both the new tensor and updated mask.\n\n Parameters\n ----------\n tensor : ``torch.Tensor``\n A tensor of shape ``(batch_size, timesteps)`` or ``(batch_size, timesteps, dim)``\n mask : ``torch.Tensor``\n A tensor of shape ``(batch_size, timesteps)``\n sentence_begin_token: Any (anything that can be broadcast in torch for assignment)\n For 2D input, a scalar with the <S> id. For 3D input, a tensor with length dim.\n sentence_end_token: Any (anything that can be broadcast in torch for assignment)\n For 2D input, a scalar with the </S> id. For 3D input, a tensor with length dim.\n\n Returns\n -------\n tensor_with_boundary_tokens : ``torch.Tensor``\n The tensor with the appended and prepended boundary tokens. If the input was 2D,\n it has shape (batch_size, timesteps + 2) and if the input was 3D, it has shape\n (batch_size, timesteps + 2, dim).\n new_mask : ``torch.Tensor``\n The new mask for the tensor, taking into account the appended tokens\n marking the beginning and end of the sentence.\n \"\"\"\n # TODO: matthewp, profile this transfer\n sequence_lengths = mask.sum(dim=1).detach().cpu().numpy()\n tensor_shape = list(tensor.data.shape)\n new_shape = list(tensor_shape)\n new_shape[1] = tensor_shape[1] + 2\n tensor_with_boundary_tokens = tensor.new_zeros(*new_shape)\n if len(tensor_shape) == 2:\n tensor_with_boundary_tokens[:, 1:-1] = tensor\n tensor_with_boundary_tokens[:, 0] = sentence_begin_token\n for i, j in enumerate(sequence_lengths):\n tensor_with_boundary_tokens[i, j + 1] = sentence_end_token\n new_mask = (tensor_with_boundary_tokens != 0).long()\n elif len(tensor_shape) == 3:\n tensor_with_boundary_tokens[:, 1:-1, :] = tensor\n for i, j in enumerate(sequence_lengths):\n tensor_with_boundary_tokens[i, 0, :] = sentence_begin_token\n tensor_with_boundary_tokens[i, j + 1, :] = sentence_end_token\n new_mask = ((tensor_with_boundary_tokens > 0).long().sum(dim=-1) > 0).long()\n else:\n raise ValueError(\"add_sentence_boundary_token_ids only accepts 2D and 3D input\")\n\n return tensor_with_boundary_tokens, new_mask\n\n\ndef remove_sentence_boundaries(tensor: torch.Tensor,\n mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Remove begin/end of sentence embeddings from the batch of sentences.\n Given a batch of sentences with size ``(batch_size, timesteps, dim)``\n this returns a tensor of shape ``(batch_size, timesteps - 2, dim)`` after removing\n the beginning and end sentence markers. The sentences are assumed to be padded on the right,\n with the beginning of each sentence assumed to occur at index 0 (i.e., ``mask[:, 0]`` is assumed\n to be 1).\n\n Returns both the new tensor and updated mask.\n\n This function is the inverse of ``add_sentence_boundary_token_ids``.\n\n Parameters\n ----------\n tensor : ``torch.Tensor``\n A tensor of shape ``(batch_size, timesteps, dim)``\n mask : ``torch.Tensor``\n A tensor of shape ``(batch_size, timesteps)``\n\n Returns\n -------\n tensor_without_boundary_tokens : ``torch.Tensor``\n The tensor after removing the boundary tokens of shape ``(batch_size, timesteps - 2, dim)``\n new_mask : ``torch.Tensor``\n The new mask for the tensor of shape ``(batch_size, timesteps - 2)``.\n \"\"\"\n # TODO: matthewp, profile this transfer\n sequence_lengths = mask.sum(dim=1).detach().cpu().numpy()\n tensor_shape = list(tensor.data.shape)\n new_shape = list(tensor_shape)\n new_shape[1] = tensor_shape[1] - 2\n tensor_without_boundary_tokens = tensor.new_zeros(*new_shape)\n new_mask = tensor.new_zeros((new_shape[0], new_shape[1]), dtype=torch.long)\n for i, j in enumerate(sequence_lengths):\n if j > 2:\n tensor_without_boundary_tokens[i, :(j - 2), :] = tensor[i, 1:(j - 1), :]\n new_mask[i, :(j - 2)] = 1\n\n return tensor_without_boundary_tokens, new_mask\n\n\ndef add_positional_features(tensor: torch.Tensor,\n min_timescale: float = 1.0,\n max_timescale: float = 1.0e4):\n # pylint: disable=line-too-long\n \"\"\"\n Implements the frequency-based positional encoding described\n in `Attention is all you Need\n <https://www.semanticscholar.org/paper/Attention-Is-All-You-Need-Vaswani-Shazeer/0737da0767d77606169cbf4187b83e1ab62f6077>`_ .\n\n Adds sinusoids of different frequencies to a ``Tensor``. A sinusoid of a\n different frequency and phase is added to each dimension of the input ``Tensor``.\n This allows the attention heads to use absolute and relative positions.\n\n The number of timescales is equal to hidden_dim / 2 within the range\n (min_timescale, max_timescale). For each timescale, the two sinusoidal\n signals sin(timestep / timescale) and cos(timestep / timescale) are\n generated and concatenated along the hidden_dim dimension.\n\n Parameters\n ----------\n tensor : ``torch.Tensor``\n a Tensor with shape (batch_size, timesteps, hidden_dim).\n min_timescale : ``float``, optional (default = 1.0)\n The smallest timescale to use.\n max_timescale : ``float``, optional (default = 1.0e4)\n The largest timescale to use.\n\n Returns\n -------\n The input tensor augmented with the sinusoidal frequencies.\n \"\"\"\n _, timesteps, hidden_dim = tensor.size()\n\n timestep_range = get_range_vector(timesteps, get_device_of(tensor)).data.float()\n # We're generating both cos and sin frequencies,\n # so half for each.\n num_timescales = hidden_dim // 2\n timescale_range = get_range_vector(num_timescales, get_device_of(tensor)).data.float()\n\n log_timescale_increments = math.log(float(max_timescale) / float(min_timescale)) / float(num_timescales - 1)\n inverse_timescales = min_timescale * torch.exp(timescale_range * -log_timescale_increments)\n\n # Broadcasted multiplication - shape (timesteps, num_timescales)\n scaled_time = timestep_range.unsqueeze(1) * inverse_timescales.unsqueeze(0)\n # shape (timesteps, 2 * num_timescales)\n sinusoids = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 1)\n if hidden_dim % 2 != 0:\n # if the number of dimensions is odd, the cos and sin\n # timescales had size (hidden_dim - 1) / 2, so we need\n # to add a row of zeros to make up the difference.\n sinusoids = torch.cat([sinusoids, sinusoids.new_zeros(timesteps, 1)], 1)\n return tensor + sinusoids.unsqueeze(0)\n"
] | [
[
"torch.nn.functional.softmax",
"torch.max",
"torch.nn.functional.log_softmax",
"torch.cat",
"torch.zeros",
"torch.sin",
"torch.sum",
"torch.gather",
"torch.zeros_like",
"torch.cuda.LongTensor",
"torch.exp",
"torch.matmul",
"torch.arange",
"torch.stack",
"torch.cos"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DylanHooz/uestc_yolov3 | [
"72ed60aaf68a0ab2dbc8d4dfad7bddffce826dde"
] | [
"train.py"
] | [
"\"\"\"\r\nRetrain the YOLO model for your own dataset.\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport keras.backend as K\r\nfrom keras.layers import Input, Lambda\r\nfrom keras.models import Model\r\nfrom keras.optimizers import Adam\r\nfrom keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\r\n\r\nfrom yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss\r\nfrom yolo3.utils import get_random_data\r\n\r\n\r\ndef _main():\r\n annotation_path = '2007_trainval.txt'\r\n log_dir = 'logs/000/'\r\n classes_path = 'model_data/helmet_classes.txt'\r\n anchors_path = 'model_data/helmet_anchors.txt'\r\n class_names = get_classes(classes_path)\r\n num_classes = len(class_names)\r\n anchors = get_anchors(anchors_path)\r\n\r\n input_shape = (416,416) # multiple of 32, hw\r\n\r\n is_tiny_version = len(anchors)==6 # default setting\r\n if is_tiny_version:\r\n model = create_tiny_model(input_shape, anchors, num_classes,\r\n freeze_body=2, weights_path='model_data/tiny_yolo_weights.h5')\r\n else:\r\n model = create_model(input_shape, anchors, num_classes,\r\n freeze_body=2, weights_path='model_data/yolo_weights.h5') # make sure you know what you freeze\r\n\r\n logging = TensorBoard(log_dir=log_dir)\r\n checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',\r\n monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)\r\n reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)\r\n early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)\r\n\r\n val_split = 0.1\r\n with open(annotation_path) as f:\r\n lines = f.readlines()\r\n np.random.seed(10101)\r\n np.random.shuffle(lines)\r\n np.random.seed(None)\r\n num_val = int(len(lines)*val_split)\r\n num_train = len(lines) - num_val\r\n\r\n # Train with frozen layers first, to get a stable loss.\r\n # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.\r\n if True:\r\n model.compile(optimizer=Adam(lr=1e-3), loss={\r\n # use custom yolo_loss Lambda layer.\r\n 'yolo_loss': lambda y_true, y_pred: y_pred})\r\n\r\n batch_size = 32\r\n print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))\r\n model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),\r\n steps_per_epoch=max(1, num_train//batch_size),\r\n validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),\r\n validation_steps=max(1, num_val//batch_size),\r\n epochs=50,\r\n initial_epoch=0,\r\n callbacks=[logging, checkpoint])\r\n model.save_weights(log_dir + 'trained_weights_stage_1.h5')\r\n\r\n # Unfreeze and continue training, to fine-tune.\r\n # Train longer if the result is not good.\r\n if True:\r\n for i in range(len(model.layers)):\r\n model.layers[i].trainable = True\r\n model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change\r\n print('Unfreeze all of the layers.')\r\n\r\n batch_size = 16 # note that more GPU memory is required after unfreezing the body\r\n print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))\r\n model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),\r\n steps_per_epoch=max(1, num_train//batch_size),\r\n validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),\r\n validation_steps=max(1, num_val//batch_size),\r\n epochs=100,\r\n initial_epoch=50,\r\n callbacks=[logging, checkpoint, reduce_lr, early_stopping])\r\n model.save_weights(log_dir + 'trained_weights_final.h5')\r\n\r\n # Further training if needed.\r\n\r\n\r\ndef get_classes(classes_path):\r\n '''loads the classes'''\r\n with open(classes_path) as f:\r\n class_names = f.readlines()\r\n class_names = [c.strip() for c in class_names]\r\n return class_names\r\n\r\ndef get_anchors(anchors_path):\r\n '''loads the anchors from a file'''\r\n with open(anchors_path) as f:\r\n anchors = f.readline()\r\n anchors = [float(x) for x in anchors.split(',')]\r\n return np.array(anchors).reshape(-1, 2)\r\n\r\n\r\ndef create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,\r\n weights_path='model_data/yolo_weights.h5'):\r\n '''create the training model'''\r\n K.clear_session() # get a new session\r\n image_input = Input(shape=(None, None, 3))\r\n h, w = input_shape\r\n num_anchors = len(anchors)\r\n\r\n y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \\\r\n num_anchors//3, num_classes+5)) for l in range(3)]\r\n\r\n model_body = yolo_body(image_input, num_anchors//3, num_classes)\r\n print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))\r\n\r\n if load_pretrained:\r\n model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)\r\n print('Load weights {}.'.format(weights_path))\r\n if freeze_body in [1, 2]:\r\n # Freeze darknet53 body or freeze all but 3 output layers.\r\n num = (185, len(model_body.layers)-3)[freeze_body-1]\r\n for i in range(num): model_body.layers[i].trainable = False\r\n print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))\r\n\r\n model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',\r\n arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(\r\n [*model_body.output, *y_true])\r\n model = Model([model_body.input, *y_true], model_loss)\r\n\r\n return model\r\n\r\ndef create_tiny_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,\r\n weights_path='model_data/tiny_yolo_weights.h5'):\r\n '''create the training model, for Tiny YOLOv3'''\r\n K.clear_session() # get a new session\r\n image_input = Input(shape=(None, None, 3))\r\n h, w = input_shape\r\n num_anchors = len(anchors)\r\n\r\n y_true = [Input(shape=(h//{0:32, 1:16}[l], w//{0:32, 1:16}[l], \\\r\n num_anchors//2, num_classes+5)) for l in range(2)]\r\n\r\n model_body = tiny_yolo_body(image_input, num_anchors//2, num_classes)\r\n print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))\r\n\r\n if load_pretrained:\r\n model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)\r\n print('Load weights {}.'.format(weights_path))\r\n if freeze_body in [1, 2]:\r\n # Freeze the darknet body or freeze all but 2 output layers.\r\n num = (20, len(model_body.layers)-2)[freeze_body-1]\r\n for i in range(num): model_body.layers[i].trainable = False\r\n print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))\r\n\r\n model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',\r\n arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})(\r\n [*model_body.output, *y_true])\r\n model = Model([model_body.input, *y_true], model_loss)\r\n\r\n return model\r\n\r\ndef data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes):\r\n '''data generator for fit_generator'''\r\n n = len(annotation_lines)\r\n i = 0\r\n while True:\r\n image_data = []\r\n box_data = []\r\n for b in range(batch_size):\r\n if i==0:\r\n np.random.shuffle(annotation_lines)\r\n image, box = get_random_data(annotation_lines[i], input_shape, random=True)\r\n image_data.append(image)\r\n box_data.append(box)\r\n i = (i+1) % n\r\n image_data = np.array(image_data)\r\n box_data = np.array(box_data)\r\n y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)\r\n yield [image_data, *y_true], np.zeros(batch_size)\r\n\r\ndef data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes):\r\n n = len(annotation_lines)\r\n if n==0 or batch_size<=0: return None\r\n return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)\r\n\r\nif __name__ == '__main__':\r\n _main()\r\n\r\n"
] | [
[
"numpy.random.shuffle",
"numpy.array",
"numpy.zeros",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tyburam/python-machine-learning | [
"7cb346c99d24e959c1af63532603dd118558b16f"
] | [
"sigmoid.py"
] | [
"#!/usr/bin/python3\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef sigmoid(z):\n return 1.0 / (1.0 + np.exp(-z))\n\nz = np.arange(-7, 7, 0.01)\nphi_z = sigmoid(z)\n\nplt.plot(z, phi_z)\nplt.axvline(0.0, color = 'k')\nplt.axhspan(0.0, 1.0, facecolor = '1.0', alpha = 1.0, ls = 'dotted')\nplt.axhline(0.5, ls = 'dotted', color = 'k')\nplt.yticks([0.0, 0.5, 1.0])\nplt.ylim(-0.1, 1.1)\nplt.xlabel('z')\nplt.ylabel('$\\phi (z)$')\nplt.show()"
] | [
[
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.axhline",
"numpy.arange",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.axhspan",
"matplotlib.pyplot.plot",
"numpy.exp",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kalyanvasudev/pyrobot | [
"839ab89a5b3cdd6af9b1e884fa8e8f0007497e32",
"839ab89a5b3cdd6af9b1e884fa8e8f0007497e32",
"839ab89a5b3cdd6af9b1e884fa8e8f0007497e32"
] | [
"src/pyrobot/locobot/camera.py",
"tests/test_arm_controls.py",
"robots/LoCoBot/locobot_navigation/orb_slam2_ros/scripts/orb_slam2_ros/vslam.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nimport rospkg\nimport threading\nimport yaml\nfrom copy import deepcopy\n\nimport message_filters\nimport numpy as np\nimport pyrobot.util as prutil\nimport rospy\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom pyrobot.core import Camera\nfrom sensor_msgs.msg import CameraInfo\nfrom sensor_msgs.msg import Image\nfrom sensor_msgs.msg import JointState\nfrom std_msgs.msg import Float64\nfrom tf import TransformListener\n\n\ndef constrain_within_range(value, MIN, MAX):\n return min(max(value, MIN), MAX)\n\n\ndef is_within_range(value, MIN, MAX):\n return (value <= MAX) and (value >= MIN)\n\n\nclass SimpleCamera(Camera):\n \"\"\"\n This is camera class that interfaces with the Realsense\n camera on the locobot and locobot-lite.\n This class does not have the pan and tilt actuation\n capabilities for the camera.\n \"\"\"\n\n def __init__(self, configs):\n \"\"\"\n Constructor of the SimpleCamera class.\n\n :param configs: Camera specific configuration object\n\n :type configs: YACS CfgNode\n \"\"\"\n super(SimpleCamera, self).__init__(configs=configs)\n self.cv_bridge = CvBridge()\n self.camera_info_lock = threading.RLock()\n self.camera_img_lock = threading.RLock()\n self._tf_listener = TransformListener()\n self.rgb_img = None\n self.depth_img = None\n self.camera_info = None\n self.camera_P = None\n rospy.Subscriber(self.configs.CAMERA.ROSTOPIC_CAMERA_INFO_STREAM,\n CameraInfo,\n self._camera_info_callback)\n\n rgb_topic = self.configs.CAMERA.ROSTOPIC_CAMERA_RGB_STREAM\n self.rgb_sub = message_filters.Subscriber(rgb_topic, Image)\n depth_topic = self.configs.CAMERA.ROSTOPIC_CAMERA_DEPTH_STREAM\n self.depth_sub = message_filters.Subscriber(depth_topic, Image)\n img_subs = [self.rgb_sub, self.depth_sub]\n self.sync = message_filters.ApproximateTimeSynchronizer(img_subs,\n queue_size=10,\n slop=0.2)\n self.sync.registerCallback(self._sync_callback)\n depth_threshold = (self.configs.BASE.VSLAM.DEPTH_MIN,\n self.configs.BASE.VSLAM.DEPTH_MAX)\n cfg_filename = self.configs.BASE.VSLAM.CFG_FILENAME\n self.depth_cam = DepthImgProcessor(subsample_pixs=1,\n depth_threshold=depth_threshold,\n cfg_filename=cfg_filename)\n self.cam_cf = self.configs.BASE.VSLAM.RGB_CAMERA_CENTER_FRAME\n self.base_f = self.configs.BASE.VSLAM.VSLAM_BASE_FRAME\n\n def _sync_callback(self, rgb, depth):\n self.camera_img_lock.acquire()\n try:\n self.rgb_img = self.cv_bridge.imgmsg_to_cv2(rgb, \"bgr8\")\n self.rgb_img = self.rgb_img[:, :, ::-1]\n self.depth_img = self.cv_bridge.imgmsg_to_cv2(depth, \"passthrough\")\n except CvBridgeError as e:\n rospy.logerr(e)\n self.camera_img_lock.release()\n\n def _camera_info_callback(self, msg):\n self.camera_info_lock.acquire()\n self.camera_info = msg\n self.camera_P = np.array(msg.P).reshape((3, 4))\n self.camera_info_lock.release()\n\n def get_rgb(self):\n '''\n This function returns the RGB image perceived by the camera.\n\n :rtype: np.ndarray or None\n '''\n self.camera_img_lock.acquire()\n rgb = deepcopy(self.rgb_img)\n self.camera_img_lock.release()\n return rgb\n\n def get_depth(self):\n '''\n This function returns the depth image perceived by the camera.\n\n :rtype: np.ndarray or None\n '''\n self.camera_img_lock.acquire()\n depth = deepcopy(self.depth_img)\n self.camera_img_lock.release()\n return depth\n\n def get_rgb_depth(self):\n '''\n This function returns both the RGB and depth\n images perceived by the camera.\n\n :rtype: np.ndarray or None\n '''\n self.camera_img_lock.acquire()\n rgb = deepcopy(self.rgb_img)\n depth = deepcopy(self.depth_img)\n self.camera_img_lock.release()\n return rgb, depth\n\n def get_intrinsics(self):\n \"\"\"\n This function returns the camera intrinsics.\n\n :rtype: np.ndarray\n \"\"\"\n if self.camera_P is None:\n return self.camera_P\n self.camera_info_lock.acquire()\n P = deepcopy(self.camera_P)\n self.camera_info_lock.release()\n return P[:3, :3]\n\n def get_current_pcd(self, in_cam=True):\n \"\"\"\n Return the point cloud at current time step (one frame only)\n\n :param in_cam: return points in camera frame,\n otherwise, return points in base frame\n\n :type in_cam: bool\n :returns: tuple (pts, colors)\n\n pts: point coordinates in world frame (shape: :math:`[N, 3]`)\n\n colors: rgb values for pts_in_cam (shape: :math:`[N, 3]`)\n :rtype: tuple(np.ndarray, np.ndarray)\n \"\"\"\n trans, rot, T = self.get_link_transform(self.cam_cf,\n self.base_f)\n base2cam_trans = np.array(trans).reshape(-1, 1)\n base2cam_rot = np.array(rot)\n rgb_im, depth_im = self.get_rgb_depth()\n pcd_in_cam, colors = self.depth_cam.get_pcd_ic(depth_im=depth_im,\n rgb_im=rgb_im)\n pts = pcd_in_cam[:3, :].T\n if in_cam:\n return pts, colors\n pts = np.dot(pts, base2cam_rot.T)\n pts = pts + base2cam_trans.T\n return pts, colors\n\n def pix_to_3dpt(self, rs, cs, in_cam=False):\n \"\"\"\n Get the 3D points of the pixels in RGB images.\n\n :param rs: rows of interest in the RGB image.\n It can be a list or 1D numpy array\n which contains the row indices.\n The default value is None,\n which means all rows.\n :param cs: columns of interest in the RGB image.\n It can be a list or 1D numpy array\n which contains the column indices.\n The default value is None,\n which means all columns.\n :param in_cam: return points in camera frame,\n otherwise, return points in base frame\n\n :type rs: list or np.ndarray\n :type cs: list or np.ndarray\n :type in_cam: bool\n\n :returns: tuple (pts, colors)\n\n pts: point coordinates in world frame\n (shape: :math:`[N, 3]`)\n\n colors: rgb values for pts_in_cam\n (shape: :math:`[N, 3]`)\n\n :rtype: tuple(np.ndarray, np.ndarray)\n \"\"\"\n trans, rot, T = self.get_link_transform(self.cam_cf,\n self.base_f)\n base2cam_trans = np.array(trans).reshape(-1, 1)\n base2cam_rot = np.array(rot)\n rgb_im, depth_im = self.get_rgb_depth()\n pcd_in_cam = self.depth_cam.get_pix_3dpt(depth_im=depth_im,\n rs=rs,\n cs=cs)\n pts = pcd_in_cam[:3, :].T\n colors = rgb_im[rs, cs].reshape(-1, 3)\n if in_cam:\n return pts, colors\n pts = np.dot(pts, base2cam_rot.T)\n pts = pts + base2cam_trans.T\n return pts, colors\n\n def get_link_transform(self, src, tgt):\n \"\"\"\n Returns the latest transformation from the\n target_frame to the source frame,\n i.e., the transform of source frame w.r.t\n target frame. If the returned\n transform is applied to data, it will transform\n data in the source_frame into\n the target_frame\n\n For more information, please refer to\n http://wiki.ros.org/tf/Overview/Using%20Published%20Transforms\n\n :param src: source frame\n :param tgt: target frame\n :type src: string\n :type tgt: string\n\n :returns: tuple(trans, rot, T)\n\n trans: translational vector (shape: :math:`[3,]`)\n\n rot: rotation matrix (shape: :math:`[3, 3]`)\n\n T: transofrmation matrix (shape: :math:`[4, 4]`)\n :rtype: tuple(np.ndarray, np.ndarray, np.ndarray)\n \"\"\"\n trans, quat = prutil.get_tf_transform(self._tf_listener,\n tgt,\n src)\n rot = prutil.quat_to_rot_mat(quat)\n T = np.eye(4)\n T[:3, :3] = rot\n T[:3, 3] = trans\n return trans, rot, T\n\n\nclass LoCoBotCamera(SimpleCamera):\n \"\"\"\n This is camera class that interfaces with the Realsense\n camera and the pan and tilt joints on the robot.\n \"\"\"\n\n def __init__(self, configs):\n \"\"\"\n Constructor of the LoCoBotCamera class.\n\n :param configs: Object containing configurations for camera,\n pan joint and tilt joint.\n\n :type configs: YACS CfgNode\n \"\"\"\n use_camera = rospy.get_param('use_camera', False)\n use_sim = rospy.get_param('use_sim', False)\n use_camera = use_camera or use_sim\n if not use_camera:\n rospy.logwarn('Neither use_camera, nor use_sim, is not set'\n ' to True when the LoCoBot driver is launched.'\n 'You may not be able to command the camera'\n ' correctly using PyRobot!!!')\n return\n super(LoCoBotCamera, self).__init__(configs=configs)\n\n rospy.Subscriber(self.configs.ARM.ROSTOPIC_JOINT_STATES,\n JointState,\n self._camera_pose_callback)\n\n self.set_pan_pub = rospy.Publisher(\n self.configs.CAMERA.ROSTOPIC_SET_PAN, Float64, queue_size=1)\n self.set_tilt_pub = rospy.Publisher(\n self.configs.CAMERA.ROSTOPIC_SET_TILT, Float64, queue_size=1)\n self.pan = None\n self.tilt = None\n self.tol = 0.01\n\n def _camera_pose_callback(self, msg):\n if 'head_pan_joint' in msg.name:\n pan_id = msg.name.index('head_pan_joint')\n self.pan = msg.position[pan_id]\n if 'head_tilt_joint' in msg.name:\n tilt_id = msg.name.index('head_tilt_joint')\n self.tilt = msg.position[tilt_id]\n\n @property\n def state(self):\n \"\"\"\n Return the current pan and tilt joint angles of the robot camera.\n\n :return:\n pan_tilt: A list the form [pan angle, tilt angle]\n :rtype: list\n \"\"\"\n return self.get_state()\n\n def get_state(self):\n \"\"\"\n Return the current pan and tilt joint angles of the robot camera.\n\n :return:\n pan_tilt: A list the form [pan angle, tilt angle]\n :rtype: list\n \"\"\"\n return [self.pan, self.tilt]\n\n def get_pan(self):\n \"\"\"\n Return the current pan joint angle of the robot camera.\n\n :return:\n pan: Pan joint angle\n :rtype: float\n \"\"\"\n return self.pan\n\n def get_tilt(self):\n \"\"\"\n Return the current tilt joint angle of the robot camera.\n\n :return:\n tilt: Tilt joint angle\n :rtype: float\n \"\"\"\n return self.tilt\n\n def set_pan(self, pan, wait=True):\n \"\"\"\n Sets the pan joint angle to the specified value.\n\n :param pan: value to be set for pan joint\n :param wait: wait until the pan angle is set to\n the target angle.\n\n :type pan: float\n :type wait: bool\n \"\"\"\n pan = constrain_within_range(np.mod(pan + np.pi,\n 2 * np.pi) - np.pi,\n self.configs.CAMERA.MIN_PAN,\n self.configs.CAMERA.MAX_PAN)\n self.set_pan_pub.publish(pan)\n if wait:\n for i in range(30):\n rospy.sleep(0.1)\n if np.fabs(self.get_pan() - pan) < self.tol:\n break\n\n def set_tilt(self, tilt, wait=True):\n \"\"\"\n Sets the tilt joint angle to the specified value.\n\n :param tilt: value to be set for the tilt joint\n :param wait: wait until the tilt angle is set to\n the target angle.\n\n :type tilt: float\n :type wait: bool\n \"\"\"\n tilt = constrain_within_range(np.mod(tilt + np.pi,\n 2 * np.pi) - np.pi,\n self.configs.CAMERA.MIN_TILT,\n self.configs.CAMERA.MAX_TILT)\n self.set_tilt_pub.publish(tilt)\n if wait:\n for i in range(30):\n rospy.sleep(0.1)\n if np.fabs(self.get_tilt() - tilt) < self.tol:\n break\n\n def set_pan_tilt(self, pan, tilt, wait=True):\n \"\"\"\n Sets both the pan and tilt joint angles to the specified values.\n\n :param pan: value to be set for pan joint\n :param tilt: value to be set for the tilt joint\n :param wait: wait until the pan and tilt angles are set to\n the target angles.\n\n :type pan: float\n :type tilt: float\n :type wait: bool\n \"\"\"\n pan = constrain_within_range(np.mod(pan + np.pi,\n 2 * np.pi) - np.pi,\n self.configs.CAMERA.MIN_PAN,\n self.configs.CAMERA.MAX_PAN)\n tilt = constrain_within_range(np.mod(tilt + np.pi,\n 2 * np.pi) - np.pi,\n self.configs.CAMERA.MIN_TILT,\n self.configs.CAMERA.MAX_TILT)\n self.set_pan_pub.publish(pan)\n self.set_tilt_pub.publish(tilt)\n if wait:\n for i in range(30):\n rospy.sleep(0.1)\n if np.fabs(self.get_pan() - pan) < self.tol and \\\n np.fabs(self.get_tilt() - tilt) < self.tol:\n break\n\n def reset(self):\n \"\"\"\n This function resets the pan and tilt joints by actuating\n them to their home configuration.\n \"\"\"\n self.set_pan_tilt(self.configs.CAMERA.RESET_PAN,\n self.configs.CAMERA.RESET_TILT)\n\n\nclass DepthImgProcessor:\n \"\"\"\n This class transforms the depth image and rgb image to point cloud\n \"\"\"\n\n def __init__(self, subsample_pixs=1, depth_threshold=(0, 1.5),\n cfg_filename='realsense_d435.yaml'):\n \"\"\"\n The constructor for :class:`DepthImgProcessor` class.\n\n :param subsample_pixs: sample rows and columns for the images\n :param depth_threshold: minimum and maximum of valid depth values\n :param cfg_filename: configuration file name for ORB-SLAM2\n\n :type subsample_pixs: int\n :type depth_threshold: tuple\n :type cfg_filename: string\n \"\"\"\n assert (type(depth_threshold) is tuple and\n 0 < len(depth_threshold) < 3) or \\\n (depth_threshold is None)\n self.subsample_pixs = subsample_pixs\n self.depth_threshold = depth_threshold\n self.cfg_data = self.read_cfg(cfg_filename)\n self.intrinsic_mat = self.get_intrinsic()\n self.intrinsic_mat_inv = np.linalg.inv(self.intrinsic_mat)\n\n img_pixs = np.mgrid[0: self.cfg_data['Camera.height']: subsample_pixs,\n 0: self.cfg_data['Camera.width']: subsample_pixs]\n img_pixs = img_pixs.reshape(2, -1)\n img_pixs[[0, 1], :] = img_pixs[[1, 0], :]\n self.uv_one = np.concatenate((img_pixs,\n np.ones((1, img_pixs.shape[1]))))\n self.uv_one_in_cam = np.dot(self.intrinsic_mat_inv, self.uv_one)\n\n def get_pix_3dpt(self, depth_im, rs, cs):\n \"\"\"\n :param depth_im: depth image (shape: :math:`[H, W]`)\n :param rs: rows of interest. It can be a list or 1D numpy array\n which contains the row indices. The default value is None,\n which means all rows.\n :param cs: columns of interest. It can be a list or 1D numpy array\n which contains the column indices.\n The default value is None,\n which means all columns.\n :type depth_im: np.ndarray\n :type rs: list or np.ndarray\n :type cs: list or np.ndarray\n\n :return: 3D point coordinates of the pixels in\n camera frame (shape: :math:`[4, N]`)\n :rtype np.ndarray\n \"\"\"\n assert isinstance(rs,\n int) or isinstance(rs,\n list) or isinstance(rs,\n np.ndarray)\n assert isinstance(cs,\n int) or isinstance(cs,\n list) or isinstance(cs,\n np.ndarray)\n if isinstance(rs, int):\n rs = [rs]\n if isinstance(cs, int):\n cs = [cs]\n if isinstance(rs, np.ndarray):\n rs = rs.flatten()\n if isinstance(cs, np.ndarray):\n cs = cs.flatten()\n depth_im = depth_im[rs, cs]\n depth = depth_im.reshape(-1) / float(self.cfg_data['DepthMapFactor'])\n img_pixs = np.stack((rs, cs)).reshape(2, -1)\n img_pixs[[0, 1], :] = img_pixs[[1, 0], :]\n uv_one = np.concatenate((img_pixs,\n np.ones((1, img_pixs.shape[1]))))\n uv_one_in_cam = np.dot(self.intrinsic_mat_inv, uv_one)\n pts_in_cam = np.multiply(uv_one_in_cam, depth)\n pts_in_cam = np.concatenate((pts_in_cam,\n np.ones((1, pts_in_cam.shape[1]))),\n axis=0)\n return pts_in_cam\n\n def get_pcd_ic(self, depth_im, rgb_im=None):\n \"\"\"\n Returns the point cloud (filtered by minimum\n and maximum depth threshold)\n in camera's coordinate frame\n\n :param depth_im: depth image (shape: :math:`[H, W]`)\n :param rgb_im: rgb image (shape: :math:`[H, W, 3]`)\n\n :type depth_im: np.ndarray\n :type rgb_im: np.ndarray\n\n :returns: tuple (pts_in_cam, rgb_im)\n\n pts_in_cam: point coordinates in\n camera frame (shape: :math:`[4, N]`)\n\n rgb: rgb values for pts_in_cam (shape: :math:`[N, 3]`)\n :rtype tuple(np.ndarray, np.ndarray)\n \"\"\"\n # pcd in camera from depth\n depth_im = depth_im[0::self.subsample_pixs, 0::self.subsample_pixs]\n rgb_im = rgb_im[0::self.subsample_pixs, 0::self.subsample_pixs]\n depth = depth_im.reshape(-1) / float(self.cfg_data['DepthMapFactor'])\n rgb = None\n if rgb_im is not None:\n rgb = rgb_im.reshape(-1, 3)\n if self.depth_threshold is not None:\n valid = depth > self.depth_threshold[0]\n if len(self.depth_threshold) > 1:\n valid = np.logical_and(valid,\n depth < self.depth_threshold[1])\n uv_one_in_cam = self.uv_one_in_cam[:, valid]\n depth = depth[valid]\n rgb = rgb[valid]\n else:\n uv_one_in_cam = self.uv_one_in_cam\n pts_in_cam = np.multiply(uv_one_in_cam, depth)\n pts_in_cam = np.concatenate((pts_in_cam,\n np.ones((1, pts_in_cam.shape[1]))),\n axis=0)\n return pts_in_cam, rgb\n\n def get_pcd_iw(self, pts_in_cam, extrinsic_mat):\n \"\"\"\n Returns the point cloud in the world coordinate frame\n\n :param pts_in_cam: point coordinates in\n camera frame (shape: :math:`[4, N]`)\n :param extrinsic_mat: extrinsic matrix for\n the camera (shape: :math:`[4, 4]`)\n\n :type pts_in_cam: np.ndarray\n :type extrinsic_mat: np.ndarray\n\n :return: point coordinates in\n ORB-SLAM2's world frame (shape: :math:`[N, 3]`)\n :rtype: np.ndarray\n \"\"\"\n # pcd in world\n pts_in_world = np.dot(extrinsic_mat, pts_in_cam)\n pts_in_world = pts_in_world[:3, :].T\n return pts_in_world\n\n def read_cfg(self, cfg_filename):\n \"\"\"\n Reads the configuration file\n\n :param cfg_filename: configuration file name for ORB-SLAM2\n\n :type cfg_filename: string\n\n :return: configurations in the configuration file\n :rtype: dict\n \"\"\"\n rospack = rospkg.RosPack()\n slam_pkg_path = rospack.get_path('orb_slam2_ros')\n cfg_path = os.path.join(slam_pkg_path,\n 'cfg',\n cfg_filename)\n with open(cfg_path, 'r') as f:\n for i in range(1):\n f.readline()\n cfg_data = yaml.load(f)\n return cfg_data\n\n def get_intrinsic(self):\n \"\"\"\n Returns the instrinsic matrix of the camera\n\n :return: the intrinsic matrix (shape: :math:`[3, 3]`)\n :rtype: np.ndarray\n \"\"\"\n fx = self.cfg_data['Camera.fx']\n fy = self.cfg_data['Camera.fy']\n cx = self.cfg_data['Camera.cx']\n cy = self.cfg_data['Camera.cy']\n Itc = np.array([[fx, 0, cx],\n [0, fy, cy],\n [0, 0, 1]])\n return Itc\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\n\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport pytest\nimport time\n\nimport numpy as np\nimport pyrobot.util as prutil\nimport tf.transformations as tft\nfrom pyrobot import Robot\n\n\[email protected](scope=\"module\")\ndef create_robot():\n return Robot('locobot', use_base=False)\n\n\[email protected](\"position\", [[0.40, 0.72, -0.47, -1.4, 0.92],\n [-0.67, 0, 0.23, 1, -0.70]])\[email protected](\"plan\", ['True', 'False'])\ndef test_position_control(create_robot, position, plan):\n bot = create_robot\n bot.arm.go_home()\n time.sleep(1)\n bot.arm.set_joint_positions(position, plan=plan)\n time.sleep(1)\n joint_angles = bot.arm.get_joint_angles()\n ag_error = np.fabs(joint_angles.flatten() - np.array(position).flatten())\n assert np.max(ag_error) < 0.06\n\n\npositions = [np.array([0.279, 0.176, 0.217]),\n np.array([0.339, 0.0116, 0.255])]\norientations = [np.array([[0.5380200, -0.6650449, 0.5179283],\n [0.4758410, 0.7467951, 0.4646209],\n [-0.6957800, -0.0035238, 0.7182463]]),\n np.array([0.245, 0.613, -0.202, 0.723])]\n\n\[email protected](\"position, orientation\", [(positions[0],\n orientations[0]),\n (positions[1],\n orientations[1])])\[email protected](\"numerical\", ['True', 'False'])\ndef test_ee_pose_control(create_robot, position, orientation, numerical):\n bot = create_robot\n bot.arm.go_home()\n time.sleep(1)\n bot.arm.set_ee_pose(position=position,\n orientation=orientation,\n numerical=numerical)\n time.sleep(1)\n trans, rot, quat = bot.arm.pose_ee\n pos_error = np.linalg.norm(position.flatten() - trans.flatten())\n if orientation.size == 4:\n tgt_quat = orientation.flatten()\n elif orientation.size == 3:\n tgt_quat = prutil.euler_to_quat(orientation)\n elif orientation.size == 9:\n tgt_quat = prutil.rot_mat_to_quat(orientation)\n else:\n raise TypeError('Orientation must be in one of the following forms:'\n 'rotation matrix, euler angles, or quaternion')\n quat_diff = tft.quaternion_multiply(tft.quaternion_inverse(tgt_quat), quat)\n rot_similarity = quat_diff[3]\n assert rot_similarity > 0.98 and pos_error < 0.02\n\n\nposition = [np.array([0.28, 0.17, 0.22]),\n np.array([0.28, -0.17, 0.22]),\n np.array([0.09, 0.25, 0.34])]\npitch = [0.5, 0.5, np.pi / 2]\nroll = [None, 0.5, np.pi / 4]\ntgt_pos = [np.array([[0.2792254], [0.1694128], [0.2174248]]),\n np.array([[0.2793616], [-0.1692725], [0.2174295]]),\n np.array([[0.0973994], [0.2505398], [0.3373943]])]\ntgt_quat = [np.array([-0.0930975, 0.2367324, 0.3539513, 0.900005]),\n np.array([0.3134469, 0.1415047, -0.4016185, 0.8487815]),\n np.array([-0.1477051, 0.6930908, 0.1401051, 0.6915048])]\n\n\[email protected](\"position, \"\n \"pitch, \"\n \"roll, \"\n \"tgt_pos, \"\n \"tgt_quat\", [(position[i], pitch[i],\n roll[i], tgt_pos[i],\n tgt_quat[i]) for i in range(len(position))])\ndef test_ee_pitch_control(create_robot, position, pitch,\n roll, tgt_pos, tgt_quat):\n bot = create_robot\n bot.arm.go_home()\n time.sleep(1)\n bot.arm.set_ee_pose_pitch_roll(position=position,\n pitch=pitch,\n roll=roll,\n numerical=False,\n plan=False)\n time.sleep(1)\n trans, rot, quat = bot.arm.pose_ee\n pos_error = np.linalg.norm(position.flatten() - tgt_pos.flatten())\n quat_diff = tft.quaternion_multiply(tft.quaternion_inverse(tgt_quat), quat)\n rot_similarity = quat_diff[3]\n\n assert rot_similarity > 0.98 and pos_error < 0.01\n\n\[email protected](\"plan\", ['True', 'False'])\ndef test_ee_xyz_control(create_robot, plan):\n bot = create_robot\n bot.arm.go_home()\n time.sleep(1)\n tgt_trans, tgt_rot, tgt_quat = bot.arm.pose_ee\n displacement = np.array([0, 0, -0.15])\n tgt_pos = tgt_trans.flatten() + displacement.flatten()\n bot.arm.move_ee_xyz(displacement, plan=plan)\n time.sleep(1)\n trans, rot, quat = bot.arm.pose_ee\n pos_error = np.linalg.norm(tgt_pos.flatten() - trans.flatten())\n quat_diff = tft.quaternion_multiply(tft.quaternion_inverse(tgt_quat), quat)\n rot_similarity = quat_diff[3]\n\n rot_thresh = 0.98\n pos_thresh = 0.01\n assert rot_similarity > rot_thresh and pos_error < pos_thresh\n\n\ndef test_gripper_control(create_robot):\n bot = create_robot\n bot.arm.go_home()\n time.sleep(1)\n bot.gripper.open()\n time.sleep(1)\n g_state = bot.gripper.get_gripper_state()\n assert g_state == 0\n\n bot.gripper.close()\n time.sleep(1)\n g_state = bot.gripper.get_gripper_state()\n assert g_state == 3\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\n\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"\nInterface for visual slam (ORB-SLAM2)\n\"\"\"\n\nimport os\nimport threading\nfrom os.path import expanduser\n\nimport numpy as np\nimport rospy\nimport tf\nimport tf.transformations\nfrom geometry_msgs.msg import PoseStamped\nfrom nav_msgs.msg import OccupancyGrid\nfrom orb_slam2_ros.msg import Traj\n\nfrom pcdlib import PointCloudProcessor\n\n\nclass VisualSLAM(object):\n \"\"\"\n This class is used for fetching camera pose and base pose,\n and reconstructing the 3D world that the robot has seen so far.\n The engine behind this class is ORB-SLAM2.\n\n The origin of the fixed world coordinate frame is the base_link\n position in the first frame (when the program is just turned on).\n \"\"\"\n\n def __init__(self,\n map_img_dir=os.path.join(expanduser(\"~\"), '.ros/Imgs'),\n cam_pose_tp='/orb_slam2_rgbd/slam/camera_pose',\n cam_traj_tp='/orb_slam2_rgbd/slam/camera_traj',\n base_frame='/base_link',\n camera_frame='/camera_color_optical_frame',\n map_resolution=0.02,\n z_min=0.1,\n z_max=0.8,\n obstacle_cost=100,\n occ_map_rate=0.0,\n x_min=-5, y_min=-5, x_max=5, y_max=5):\n \"\"\"\n The constructor for :class:`VisualSLAM` class.\n\n :param map_img_dir: parent directory of the saved\n RGB images and depth images\n\n :type map_img_dir: string\n\n \"\"\"\n self.map_img_dir = map_img_dir\n self.cam_pose_tp = cam_pose_tp\n self.cam_traj_tp = cam_traj_tp\n self.base_frame = base_frame\n self.camera_frame = camera_frame\n self.map_resultion = map_resolution\n self.z_min = z_min\n self.z_max = z_max\n self.x_min = x_min\n self.x_max = x_max\n self.y_min = y_min\n self.y_max = y_max\n\n self.obstacle_cost = obstacle_cost\n rgb_dir = os.path.join(self.map_img_dir, 'RGBImgs')\n depth_dir = os.path.join(self.map_img_dir, 'DepthImgs')\n pcd_args = {\n 'rgb_dir': rgb_dir,\n 'depth_dir': depth_dir,\n 'cfg_filename': 'realsense_d435.yaml',\n 'subsample_pixs': 1,\n 'depth_threshold': (0.2, 1.5),\n 'camera_traj_topic': self.cam_traj_tp\n }\n self._pcd_processor = PointCloudProcessor(**pcd_args)\n self._cam_pose_in_cam = None\n self._cam_traj_in_cam = []\n self._cam_traj_lock = threading.Lock()\n self._cam_pose_sub = rospy.Subscriber(self.cam_pose_tp,\n PoseStamped,\n self._cam_pose_callback)\n self._cam_traj_sub = rospy.Subscriber(self.cam_traj_tp,\n Traj,\n self.cam_traj_callback)\n self.occ_map_pub = rospy.Publisher('/occupancy_map', OccupancyGrid, queue_size=1)\n self.occ_map_msg = self._init_occupancy_map()\n self.tf_listener = tf.TransformListener()\n rospy.sleep(1)\n\n trans, rot, T = self.get_link_transform(self.camera_frame,\n self.base_frame)\n self._ini_base2cam_T = T\n self._ini_base2cam_trans = np.array(trans).reshape(-1, 1)\n self._ini_base2cam_rot = np.array(rot)\n if occ_map_rate > 0:\n rospy.Timer(rospy.Duration(int(np.ceil(1.0 / occ_map_rate))),\n self._callback_occ_pub)\n\n @property\n def camera_pose(self):\n \"\"\"\n Returns the camera pose in the world frame\n\n :returns: (trans, rot, cam_pose_in_world)\n trans: translational vector (shape: :math:`[3,]`)\n rot: rotation matrix (shape: :math:`[3, 3]`)\n cam_pose_in_world: homogeneous camera pose (shape: :math:`[4, 4]`)\n :rtype: (numpy.ndarray, numpy.ndarray, numpy.ndarray)\n \"\"\"\n if self._cam_pose_in_cam is None:\n return None, None, None\n pos, ori = self._parse_pose_msg(self._cam_pose_in_cam)\n T = np.eye(4)\n T[:3, :3] = ori\n T[:3, 3] = pos\n cam_pose_in_world = self.cam_to_world(T)\n trans = cam_pose_in_world[:3, 3]\n rot = cam_pose_in_world[:3, :3]\n return trans, rot, cam_pose_in_world\n\n @property\n def camera_traj(self):\n \"\"\"\n Returns the camera trajectory in the world frame\n\n :return: the camera trajectory in homogeneous form (shape: :math:`[N, 4, 4]`)\n :rtype: numpy.ndarray\n \"\"\"\n cam_traj = []\n self._cam_traj_lock.acquire()\n for i in range(len(self._cam_traj_in_cam)):\n pos, ori = self._parse_pose_msg(self._cam_traj_in_cam[i])\n T = np.eye(4)\n T[:3, :3] = ori\n T[:3, 3] = pos\n cam_pose_in_world = self.cam_to_world(T)\n cam_traj.append(cam_pose_in_world.copy())\n self._cam_traj_lock.release()\n return np.asarray(cam_traj)\n\n @property\n def base_pose(self):\n \"\"\"\n Returns the base pose in the world frame\n\n :returns: (trans, rot, base_pose)\n trans: translational vector (shape: :math:`[3,]`)\n rot: rotation matrix (shape: :math:`[3, 3]`)\n base_pose: homogeneous base pose (shape: :math:`[4, 4]`)\n :rtype: (numpy.ndarray, numpy.ndarray, numpy.ndarray)\n \"\"\"\n # gives the base pose as a tuple (x, y, yaw)\n trans, rot, T = self.get_link_transform(self.base_frame,\n self.camera_frame)\n _, _, camera_pose = self.camera_pose\n if camera_pose is None:\n return None, None, None\n base_pose = np.dot(camera_pose, T)\n rot = base_pose[:3, :3]\n trans = base_pose[:3, 3]\n return trans, rot, base_pose\n\n @property\n def base_pose_xyyaw(self):\n \"\"\"\n Returns the (x, y, yaw) of the base. Note that here we assume\n that the robot moves on a flat floor\n\n :returns: (x, y, yaw)\n :rtype: (float, float, float)\n \"\"\"\n trans, rot, T = self.base_pose\n if T is None:\n return None, None, None\n angle = np.arctan2(rot[1, 0], rot[0, 0])\n # angle, axis, _ = tf.transformations.rotation_from_matrix(T)\n x = trans[0]\n y = trans[1]\n yaw = angle\n return x, y, yaw\n\n def cam_to_world(self, pose_in_cam):\n \"\"\"\n Convert the pose in the first camera frame to fixed world frame\n\n :param pose_in_cam: pose in the first camera frame,\n (shape: :math:`[4, None]`)\n :type pose_in_cam: numpy.ndarray\n :return: pose in the world frame (shape: :math:`[4, None]`)\n :rtype: numpy.ndarray\n \"\"\"\n pose_in_world = np.dot(self._ini_base2cam_T, pose_in_cam)\n return pose_in_world\n\n def get_3d_map(self):\n \"\"\"\n Fetch the 3D point cloud that the robot has seen so far\n\n :return: (pts, colors)\n pts: points of the point cloud (shape: :math:`[N, 3]`)\n colors: color of the points (shape: :math:`[N, 3]`)\n :rtype: (numpy.ndarray, numpy.ndarray)\n \"\"\"\n points, colors = self._pcd_processor.get_current_pcd()\n if points is None:\n return None, None\n\n pts = np.dot(points, self._ini_base2cam_rot.T)\n pts = pts + self._ini_base2cam_trans.T\n\n # points = np.concatenate((points, np.ones((points.shape[0], 1))), axis=1)\n # pts_in_world = np.dot(points, self._ini_base2cam_T.T)\n # pts = pts_in_world[:, :3]\n\n # points = points.T\n # homo_pts = np.concatenate((points, np.ones((1, points.shape[1]))),\n # axis=0)\n # pts_in_world = self.cam_to_world(homo_pts)\n # pts = pts_in_world[:3, :].T\n return pts, colors\n\n def _calc_grid_bds(self, points):\n if points.size == 0 or points is None:\n xMin = self.x_min\n xMax = self.x_max\n yMin = self.y_min\n yMax = self.y_max\n return\n mins = np.amin(points, axis=0)\n maxs = np.amax(points, axis=0)\n\n xMax = max(maxs[0], self.x_max)\n yMin = min(mins[1], self.y_min)\n xMin = min(mins[0], self.x_min)\n yMax = max(maxs[1], self.y_max)\n\n return xMin, yMin, xMax, yMax\n\n def get_occupancy_map(self):\n pts, colors = self.get_3d_map()\n if pts is None or pts.shape[0] < 1 or pts.size == 0:\n xcells = (int((self.x_max - self.x_min) / self.map_resultion)) + 1\n ycells = (int((self.y_max - self.y_min) / self.map_resultion)) + 1\n occ_map = np.zeros(xcells * ycells)\n return occ_map, xcells, ycells, self.x_min, self.y_min\n\n (self.x_min, self.y_min, self.x_max, self.y_max) = self._calc_grid_bds(pts)\n\n xcells = int(np.ceil((self.x_max - self.x_min) / self.map_resultion))\n ycells = int(np.ceil((self.y_max - self.y_min) / self.map_resultion))\n\n filter_idx = np.logical_and(pts[:, 2] >= self.z_min,\n pts[:, 2] <= self.z_max)\n\n pts = pts[filter_idx, :2]\n map_mins = np.array([self.x_min, self.y_min])\n pts = np.floor((pts - map_mins) / self.map_resultion).astype(int)\n pts = pts[:, 1] * xcells + pts[:, 0]\n occ_map = np.zeros(xcells * ycells)\n occ_map[pts] = self.obstacle_cost\n return occ_map, xcells, ycells, self.x_min, self.y_min\n\n def _pub_occupancy_map(self):\n occ_map, width, height, x_min, y_min = self.get_occupancy_map()\n if occ_map is None:\n return\n self.occ_map_msg.header.seq = self.occ_map_msg.header.seq + 1\n self.occ_map_msg.header.stamp = rospy.Time.now()\n self.occ_map_msg.info.map_load_time = rospy.Time.now()\n self.occ_map_msg.info.resolution = self.map_resultion\n self.occ_map_msg.info.width = width\n self.occ_map_msg.info.height = height\n self.occ_map_msg.info.origin.position.x = x_min\n self.occ_map_msg.info.origin.position.y = y_min\n self.occ_map_msg.data = occ_map\n self.occ_map_pub.publish(self.occ_map_msg)\n\n def _callback_occ_pub(self, event):\n self._pub_occupancy_map()\n\n def _parse_pose_msg(self, pose):\n \"\"\"\n Convert the ros topic pose (geometry_msgs/Pose)\n into translational vector\n and the rotational vector\n\n :param pose: pose\n :type pose: geometry_msgs.Pose\n\n :return: (pos, ori)\n pos: translational vector (shape: :math:`[3, ]`)\n ori: rotational vector (shape: :math:`[3, 3]`)\n :rtype: (numpy.ndarray, numpy.ndarray)\n \"\"\"\n pos = np.array([pose.pose.position.x,\n pose.pose.position.y,\n pose.pose.position.z])\n quat = np.array([pose.pose.orientation.x,\n pose.pose.orientation.y,\n pose.pose.orientation.z,\n pose.pose.orientation.w,\n ])\n ori = tf.transformations.quaternion_matrix(quat)[:3, :3]\n return pos, ori\n\n def _cam_pose_callback(self, pose):\n \"\"\"\n Store the pose from the pose subscriber\n\n :param pose: pose\n :type pose: geometry_msgs.Pose\n \"\"\"\n self._cam_pose_in_cam = pose\n\n def cam_traj_callback(self, traj_data):\n \"\"\"\n Store the trajectory from the trajectory subscriber\n\n :param traj_data: trajectory data\n :type traj_data: orb_slam2_ros.msg.Traj\n \"\"\"\n self._cam_traj_lock.acquire()\n self._cam_traj_in_cam = traj_data.poses\n self._cam_traj_lock.release()\n\n def get_link_transform(self, src, tgt):\n \"\"\"\n Returns the latest transformation from the target_frame to the source frame,\n i.e., the transform of source frame w.r.t target frame. If the returned\n transform is applied to data, it will transform data in the source_frame into\n the target_frame\n\n For more information, please refer to\n http://wiki.ros.org/tf/Overview/Using%20Published%20Transforms\n\n :param src: source frame\n :param tgt: target frame\n :type src: string\n :type tgt: string\n\n :returns: (trans, rot, T)\n trans: translational vector (shape: :math:`[3,]`)\n rot: rotation matrix (shape: :math:`[3, 3]`)\n T: transofrmation matrix (shape: :math:`[4, 4]`)\n :rtype: (numpy.ndarray, numpy.ndarray, numpy.ndarray)\n \"\"\"\n trans, quat = self._get_tf_transform(self.tf_listener,\n tgt,\n src)\n rot = tf.transformations.quaternion_matrix(quat)[:3, :3]\n T = np.eye(4)\n T[:3, :3] = rot\n T[:3, 3] = trans\n return trans, rot, T\n\n def _init_occupancy_map(self):\n grid = OccupancyGrid()\n grid.header.seq = 1\n grid.header.frame_id = '/map'\n grid.info.origin.position.z = 0\n grid.info.origin.orientation.x = 0\n grid.info.origin.orientation.y = 0\n grid.info.origin.orientation.z = 0\n grid.info.origin.orientation.w = 1\n return grid\n\n def _get_tf_transform(self, tf_listener, tgt_frame, src_frame):\n \"\"\"\n Uses ROS TF to lookup the current transform from tgt_frame to src_frame,\n If the returned transform is applied to data, it will transform data in\n the src_frame into the tgt_frame\n\n :param tgt_frame: target frame\n :param src_frame: source frame\n :type tgt_frame: string\n :type src_frame: string\n\n :returns: trans, translation (x,y,z)\n :rtype: tuple (of floats)\n :returns: quat, rotation as a quaternion (x,y,z,w)\n :rtype: tuple (of floats)\n \"\"\"\n try:\n tf_listener.waitForTransform(tgt_frame, src_frame,\n rospy.Time(0),\n rospy.Duration(3))\n (trans, quat) = tf_listener.lookupTransform(tgt_frame,\n src_frame,\n rospy.Time(0))\n except (tf.LookupException,\n tf.ConnectivityException,\n tf.ExtrapolationException):\n raise RuntimeError('Cannot fetch the transform from'\n ' {0:s} to {1:s}'.format(tgt_frame, src_frame))\n return trans, quat\n\n\ndef main():\n rospy.init_node('vslam', anonymous=True)\n vslam = VisualSLAM()\n rate = rospy.Rate(0.5)\n while not rospy.is_shutdown():\n rate.sleep()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.dot",
"numpy.multiply",
"numpy.logical_and",
"numpy.linalg.inv",
"numpy.eye",
"numpy.stack",
"numpy.ones",
"numpy.mod",
"numpy.array"
],
[
"numpy.max",
"numpy.array"
],
[
"numpy.dot",
"numpy.amax",
"numpy.logical_and",
"numpy.asarray",
"numpy.amin",
"numpy.eye",
"numpy.arctan2",
"numpy.ceil",
"numpy.floor",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
j-varun/enas | [
"1a19ccbd7c06168ae51e0de2986b30ea01cce070"
] | [
"enas/cifar10/data_utils.py"
] | [
"import os\nimport sys\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import _pickle as pickle\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n\ndef _read_data(data_path, train_files):\n \"\"\"Reads CIFAR-10 format data. Always returns NHWC format.\n\n Returns:\n images: np tensor of size [N, H, W, C]\n labels: np tensor of size [N]\n \"\"\"\n images, labels = [], []\n for file_name in train_files:\n print(file_name)\n full_name = os.path.join(data_path, file_name)\n with open(full_name, 'rb') as finp:\n data = pickle.load(finp, encoding='bytes')\n batch_images = data[b\"data\"].astype(np.float32) / 255.0\n batch_labels = np.array(data[b\"labels\"], dtype=np.int32)\n images.append(batch_images)\n labels.append(batch_labels)\n images = np.concatenate(images, axis=0)\n labels = np.concatenate(labels, axis=0)\n images = np.reshape(images, [-1, 3, 32, 32])\n images = np.transpose(images, [0, 2, 3, 1])\n\n return images, labels\n\ndef _read_fmnist_data(data_path):\n \"\"\"Reads Fashion-Mnist data. Returns NHWC format.\n\n Returns:\n images: np tensor of size [N, H, W, C]\n labels: np tensor of size [N]\n \"\"\"\n images, labels = {},{}\n data = input_data.read_data_sets(data_path)\n images[\"train\"] = data.train.images.reshape(-1, 1, 28, 28) / 255.0\n images[\"test\"] = data.test.images.reshape(-1, 1, 28, 28) / 255.0\n\n images[\"train\"] = np.transpose(images[\"train\"], [0, 2, 3, 1])\n images[\"test\"] = np.transpose(images[\"test\"], [0, 2, 3, 1])\n\n labels[\"train\"] = np.array(data.train.labels, dtype = np.int32)\n labels[\"test\"] = np.array(data.test.labels, dtype = np.int32)\n print(\"Read and processed data..\")\n print(labels[\"test\"])\n\n return images, labels\n\n\ndef valid_split_data(images, labels, num_valids=5000):\n if num_valids:\n images[\"valid\"] = images[\"train\"][-num_valids:]\n labels[\"valid\"] = labels[\"train\"][-num_valids:]\n\n images[\"train\"] = images[\"train\"][:-num_valids]\n labels[\"train\"] = labels[\"train\"][:-num_valids]\n else:\n images[\"valid\"], labels[\"valid\"] = None, None\n return images, labels\n\ndef read_data(data_path, num_valids=5000, dataset = \"cifar\"):\n print(\"-\" * 80)\n print(\"Reading data\")\n print(os.getcwd())\n\n images, labels = {}, {}\n if(dataset == \"fmnist\"):\n print(\"Fashion-Mnist\")\n images, labels = _read_fmnist_data(data_path)\n images, labels = valid_split_data(images, labels, num_valids)\n return images, labels\n\n if dataset == \"stacking\":\n images[\"path\"] = data_path\n return images, labels\n else:\n train_files = [\n \"data_batch_1\",\n \"data_batch_2\",\n \"data_batch_3\",\n \"data_batch_4\",\n \"data_batch_5\",\n ]\n test_file = [\n \"test_batch\",\n ]\n images[\"train\"], labels[\"train\"] = _read_data(data_path, train_files)\n\n images, labels = valid_split_data(images, labels, num_valids)\n\n images[\"test\"], labels[\"test\"] = _read_data(data_path, test_file)\n\n print(\"Prepropcess: [subtract mean], [divide std]\")\n mean = np.mean(images[\"train\"], axis=(0, 1, 2), keepdims=True)\n std = np.std(images[\"train\"], axis=(0, 1, 2), keepdims=True)\n\n print(\"mean: {}\".format(np.reshape(mean * 255.0, [-1])))\n print(\"std: {}\".format(np.reshape(std * 255.0, [-1])))\n\n images[\"train\"] = (images[\"train\"] - mean) / std\n if num_valids:\n images[\"valid\"] = (images[\"valid\"] - mean) / std\n images[\"test\"] = (images[\"test\"] - mean) / std\n\n return images, labels\n\n"
] | [
[
"numpy.reshape",
"numpy.concatenate",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"numpy.std",
"numpy.mean",
"numpy.transpose",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
minhhoangbui/PICK-pytorch | [
"c74d2d1e5d1f8c7e837ea9776146bc84a7ecf30a"
] | [
"src/runner/trainer.py"
] | [
"# -*- coding: utf-8 -*-\n# @Author: Wenwen Yu\n# @Created Time: 7/12/2020 9:50 PM\n\nimport os\nimport numpy as np\nfrom numpy import inf\n\nimport torch\nimport torch.distributed as dist\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\nfrom src.utils import inf_loop\nfrom src.utils.metrics import MetricTracker, SpanBasedF1MetricTracker\nfrom torch.utils.tensorboard import SummaryWriter\n# from src.logger import TensorboardWriter\nfrom src.utils.utils import to_union\n\n\nclass Trainer:\n \"\"\"\n Trainer class\n \"\"\"\n\n def __init__(self, model, optimizer, config, data_loader, iob_labels_vocab_cls,\n valid_data_loader=None, lr_scheduler=None, max_len_step=None):\n \"\"\"\n :param model:\n :param optimizer:\n :param config:\n :param data_loader:\n :param iob_labels_vocab_cls\n :param valid_data_loader:\n :param lr_scheduler:\n :param max_len_step: controls number of batches(steps) in each epoch.\n \"\"\"\n self.config = config\n self.iob_labels_vocab_cls = iob_labels_vocab_cls\n self.distributed = config['distributed']\n if self.distributed:\n self.local_master = (config['local_rank'] == 0)\n self.global_master = (dist.get_rank() == 0)\n else:\n self.local_master = True\n self.global_master = True\n self.logger = config.get_logger('trainer', config['trainer']['log_verbosity']) if self.local_master else None\n\n # setup GPU device if available, move model into configured device\n self.device, self.device_ids = self._prepare_device(config['local_rank'], config['local_world_size'])\n self.model = model.to(self.device)\n\n self.optimizer = optimizer\n\n cfg_trainer = config['trainer']\n self.epochs = cfg_trainer['epochs']\n self.save_period = cfg_trainer['save_period']\n monitor_open = cfg_trainer['monitor_open']\n if monitor_open:\n self.monitor = cfg_trainer.get('monitor', 'off')\n else:\n self.monitor = 'off'\n\n # configuration to monitor model performance and save best\n if self.monitor == 'off':\n self.monitor_mode = 'off'\n self.monitor_best = 0\n else:\n self.monitor_mode, self.monitor_metric = self.monitor.split()\n assert self.monitor_mode in ['min', 'max']\n\n self.monitor_best = inf if self.monitor_mode == 'min' else -inf\n self.early_stop = cfg_trainer.get('early_stop', inf)\n self.early_stop = inf if self.early_stop == -1 else self.early_stop\n\n self.start_epoch = 1\n\n if self.local_master:\n self.checkpoint_dir = config.save_dir\n # setup visualization writer instance\n # self.writer = TensorboardWriter(config.log_dir, self.logger, cfg_trainer['tensorboard'])\n self.writer = SummaryWriter(config.tensorboard_dir)\n # load checkpoint for resume training\n if config.resume is not None:\n self._resume_checkpoint(config.resume)\n\n # load checkpoint following load to multi-gpu, avoid 'module.' prefix\n if self.config['trainer']['sync_batch_norm'] and self.distributed:\n self.model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.model)\n\n if self.distributed:\n self.model = DDP(self.model, device_ids=self.device_ids, output_device=self.device_ids[0],\n find_unused_parameters=True)\n\n self.data_loader = data_loader\n if max_len_step is None: # max length of iteration step of every epoch\n # epoch-based training\n self.len_step = len(self.data_loader)\n else:\n # iteration-based training\n self.data_loader = inf_loop(data_loader)\n self.len_step = max_len_step\n self.valid_data_loader = valid_data_loader\n self.do_validation = self.valid_data_loader is not None\n self.lr_scheduler = lr_scheduler\n\n log_step = self.config['trainer']['log_step_interval']\n self.log_step = log_step if log_step != -1 and 0 < log_step < self.len_step else int(\n np.sqrt(data_loader.batch_size))\n\n self.val_epoch_interval = self.config['trainer']['val_epoch_interval']\n\n self.gl_loss_lambda = self.config['trainer']['gl_loss_lambda']\n\n self.train_loss_metrics = MetricTracker('loss', 'gl_loss', 'crf_loss',\n writer=self.writer if self.local_master else None)\n self.valid_f1_metrics = SpanBasedF1MetricTracker(iob_labels_vocab_cls)\n\n def train(self):\n \"\"\"\n Full training logic, including train and validation.\n \"\"\"\n\n if self.distributed:\n dist.barrier() # Syncing machines before training\n\n not_improved_count = 0\n val_result_dict = None\n if self.config['evaluate_only']:\n print(\"------Evaluation only------\")\n val_result_dict = self._valid_epoch(0)\n val_res = SpanBasedF1MetricTracker.dict2str(val_result_dict)\n\n self.logger_info('[Step Validation] Epoch:[{}/{}]] \\n{}'.\n format(0, self.epochs, val_res))\n return\n for epoch in range(self.start_epoch, self.epochs + 1):\n\n # ensure distribute worker sample different data,\n # set different random seed by passing epoch to sampler\n if self.distributed:\n self.data_loader.sampler.set_epoch(epoch)\n result_dict = self._train_epoch(epoch)\n\n # print logged information to the screen\n if self.do_validation:\n val_result_dict = result_dict['val_result_dict']\n val_res = SpanBasedF1MetricTracker.dict2str(val_result_dict)\n else:\n val_res = ''\n # every epoch log information\n self.logger_info('[Epoch Validation] Epoch:[{}/{}] Total Loss: {:.6f} '\n 'GL_Loss: {:.6f} CRF_Loss: {:.6f} \\n{}'.\n format(epoch, self.epochs, result_dict['loss'],\n result_dict['gl_loss'] * self.gl_loss_lambda,\n result_dict['crf_loss'], val_res))\n\n # evaluate model performance according to configured metric, check early stop, and\n # save best checkpoint as model_best\n best = False\n if self.monitor_mode != 'off' and self.do_validation:\n best, not_improved_count = self._is_best_monitor_metric(best, not_improved_count, val_result_dict)\n if not_improved_count > self.early_stop:\n self.logger_info(\"Validation performance didn't improve for {} epochs. \"\n \"Training stops.\".format(self.early_stop))\n break\n\n if epoch % self.save_period == 0:\n self._save_checkpoint(epoch, save_best=best)\n\n def _is_best_monitor_metric(self, best, not_improved_count, val_result_dict):\n \"\"\"\n monitor metric\n :param best:\n :param not_improved_count:\n :param val_result_dict:\n :return:\n \"\"\"\n entity_name, metric = self.monitor_metric.split('-')\n val_monitor_metric_res = val_result_dict[entity_name][metric]\n try:\n # check whether model performance improved or not, according to specified metric(monitor_metric)\n improved = (self.monitor_mode == 'min' and val_monitor_metric_res <= self.monitor_best) or \\\n (self.monitor_mode == 'max' and val_monitor_metric_res >= self.monitor_best)\n except KeyError:\n self.logger_warning(\"Warning: Metric '{}' is not found. \"\n \"Model performance monitoring is disabled.\".format(self.monitor_metric))\n self.monitor_mode = 'off'\n improved = False\n if improved:\n self.monitor_best = val_monitor_metric_res\n not_improved_count = 0\n best = True\n else:\n not_improved_count += 1\n return best, not_improved_count\n\n def _train_epoch(self, epoch):\n \"\"\"\n Training logic for an epoch\n :param epoch: Integer, current training epoch.\n :return: A log dict that contains average loss and metric in this epoch.\n \"\"\"\n self.model.train()\n self.train_loss_metrics.reset()\n # step iteration start ##\n for step_idx, input_data_item in enumerate(self.data_loader):\n step_idx += 1\n\n for key, input_value in input_data_item.items():\n if input_value is not None and isinstance(input_value, torch.Tensor):\n input_data_item[key] = input_value.to(self.device, non_blocking=True)\n if self.config['trainer']['anomaly_detection']:\n # This mode will increase the runtime and should only be enabled for debugging\n with torch.autograd.detect_anomaly():\n self.optimizer.zero_grad()\n # model forward\n output = self.model(**input_data_item)\n # calculate loss\n gl_loss = output['gl_loss']\n crf_loss = output['crf_loss']\n total_loss = torch.sum(crf_loss) + self.gl_loss_lambda * torch.sum(gl_loss)\n # backward\n total_loss.backward()\n # self.average_gradients(self.model)\n self.optimizer.step()\n else:\n self.optimizer.zero_grad()\n # model forward\n output = self.model(**input_data_item)\n # calculate loss\n gl_loss = output['gl_loss']\n crf_loss = output['crf_loss']\n total_loss = torch.sum(crf_loss) + self.gl_loss_lambda * torch.sum(gl_loss)\n # backward\n total_loss.backward()\n # self.average_gradients(self.model)\n self.optimizer.step()\n\n # Use a barrier() to make sure that all process have finished forward and backward\n if self.distributed:\n dist.barrier()\n # obtain the sum of all total_loss at all processes\n dist.all_reduce(total_loss, op=dist.reduce_op.SUM)\n\n size = dist.get_world_size()\n else:\n size = 1\n gl_loss /= size # averages gl_loss across the whole world\n crf_loss /= size # averages crf_loss across the whole world\n\n # calculate average loss across the batch size\n avg_gl_loss = torch.mean(gl_loss)\n avg_crf_loss = torch.mean(crf_loss)\n avg_loss = avg_crf_loss + self.gl_loss_lambda * avg_gl_loss\n # update metrics\n # self.writer.set_step((epoch - 1) * self.len_step + step_idx - 1) if self.local_master else None\n self.train_loss_metrics.update('loss', avg_loss.item(), epoch)\n self.train_loss_metrics.update('gl_loss', avg_gl_loss.item() * self.gl_loss_lambda, epoch)\n self.train_loss_metrics.update('crf_loss', avg_crf_loss.item(), epoch)\n\n # log messages\n if step_idx % self.log_step == 0:\n self.logger_info('Train Epoch:[{}/{}] Step:[{}/{}] Total Loss: {:.6f} GL_Loss: {:.6f} CRF_Loss: {:.6f}'.\n format(epoch, self.epochs, step_idx, self.len_step,\n avg_loss.item(), avg_gl_loss.item() * self.gl_loss_lambda, avg_crf_loss.item()))\n\n # decide whether continue iter\n if step_idx == self.len_step + 1:\n break\n\n # step iteration end ##\n\n # do validation after val_step_interval iteration\n if self.do_validation and epoch % self.val_epoch_interval == 0:\n val_result_dict = self._valid_epoch(epoch)\n self.logger_info('[Step Validation] Epoch:[{}/{}]] \\n{}'.\n format(epoch, self.epochs, self.len_step,\n SpanBasedF1MetricTracker.dict2str(val_result_dict)))\n\n # check if best metric, if true, then save as model_best checkpoint.\n best, not_improved_count = self._is_best_monitor_metric(False, 0, val_result_dict)\n if best:\n self._save_checkpoint(epoch, best)\n\n # {'loss': avg_loss, 'gl_loss': avg_gl_loss, 'crf_loss': avg_crf_loss}\n log = self.train_loss_metrics.result()\n\n # do validation after training an epoch\n if self.do_validation:\n val_result_dict = self._valid_epoch(epoch)\n log['val_result_dict'] = val_result_dict\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n self.model.train()\n\n return log\n\n def _valid_epoch(self, epoch):\n \"\"\"\n Validate after training an epoch or regular step, this is a time-consuming procedure if validation data is big.\n :param epoch: Integer, current training epoch.\n :return: A dict that contains information about validation\n \"\"\"\n\n self.model.eval()\n self.valid_f1_metrics.reset()\n with torch.no_grad():\n for step_idx, input_data_item in enumerate(self.valid_data_loader):\n for key, input_value in input_data_item.items():\n if input_value is not None and isinstance(input_value, torch.Tensor):\n input_data_item[key] = input_value.to(self.device, non_blocking=True)\n\n output = self.model(**input_data_item)\n logits = output['logits']\n new_mask = output['new_mask']\n if hasattr(self.model, 'module'):\n # List[(List[int], torch.Tensor)] contain the tag indices of the maximum likelihood tag sequence.\n # and the score of the viterbi path.\n best_paths = self.model.module.decoder.crf_layer.viterbi_tags(logits, mask=new_mask,\n logits_batch_first=True)\n else:\n best_paths = self.model.decoder.crf_layer.viterbi_tags(logits, mask=new_mask,\n logits_batch_first=True)\n\n predicted_tags = []\n for path, score in best_paths:\n predicted_tags.append(path)\n\n # self.writer.set_step((epoch - 1) * len(self.valid_data_loader) + step_idx, 'valid') \\\n # if self.local_master else None\n\n # calculate and update f1 metrics\n # (B, N*T, out_dim)\n predicted_tags_hard_prob = logits * 0\n for i, instance_tags in enumerate(predicted_tags):\n for j, tag_id in enumerate(instance_tags):\n predicted_tags_hard_prob[i, j, tag_id] = 1\n\n golden_tags = input_data_item['iob_tags_label']\n mask = input_data_item['mask']\n\n union_iob_tags = to_union(golden_tags, mask, self.iob_labels_vocab_cls)\n\n if self.distributed:\n dist.barrier() #\n self.valid_f1_metrics.update(predicted_tags_hard_prob.long(), union_iob_tags, new_mask)\n\n # add histogram of model parameters to the tensorboard\n # for name, p in self.model.named_parameters():\n # self.writer.add_histogram(name, p, bins='auto')\n\n f1_result_dict = self.valid_f1_metrics.result()\n\n overall_dict = f1_result_dict['overall']\n if self.local_master:\n for key, value in overall_dict.items():\n self.writer.add_scalar(key, value, epoch)\n\n return f1_result_dict\n\n @staticmethod\n def average_gradients(model):\n \"\"\"\n Gradient averaging\n :param model:\n :return:\n \"\"\"\n size = float(dist.get_world_size())\n for param in model.parameters():\n dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM)\n param.grad.data /= size\n\n def logger_info(self, msg):\n self.logger.info(msg) if self.local_master else None\n\n def logger_warning(self, msg):\n self.logger.warning(msg) if self.local_master else None\n\n def _prepare_device(self, local_rank, local_world_size):\n \"\"\"\n setup GPU device if available, move model into configured device\n :param local_rank:\n :param local_world_size:\n :return:\n \"\"\"\n if self.distributed:\n n_gpu_per_process = torch.cuda.device_count() // local_world_size\n device_ids = list(range(local_rank * n_gpu_per_process, (local_rank + 1) * n_gpu_per_process))\n\n if torch.cuda.is_available() and local_rank != -1:\n torch.cuda.set_device(device_ids[0]) # device_ids[0] =local_rank if local_world_size = n_gpu per node\n device = 'cuda'\n self.logger_info(\n f\"[Process {os.getpid()}] world_size = {dist.get_world_size()}, \"\n + f\"rank = {dist.get_rank()}, n_gpu/process = {n_gpu_per_process}, device_ids = {device_ids}\"\n )\n else:\n self.logger_warning('Training will be using CPU!')\n device = 'cpu'\n device = torch.device(device)\n return device, device_ids\n else:\n n_gpu = torch.cuda.device_count()\n n_gpu_use = local_world_size\n if n_gpu_use > 0 and n_gpu == 0:\n self.logger_warning(\"Warning: There\\'s no GPU available on this machine,\"\n \"training will be performed on CPU.\")\n n_gpu_use = 0\n if n_gpu_use > n_gpu:\n self.logger_warning(\"Warning: The number of GPU\\'s configured to use is {}, but only {} are available \"\n \"on this machine.\".format(n_gpu_use, n_gpu))\n n_gpu_use = n_gpu\n\n list_ids = list(range(n_gpu_use))\n if n_gpu_use > 0:\n torch.cuda.set_device(list_ids[0]) # only use first available gpu as devices\n self.logger_warning(f'Training is using GPU {list_ids[0]}!')\n device = 'cuda'\n else:\n self.logger_warning('Training is using CPU!')\n device = 'cpu'\n device = torch.device(device)\n return device, list_ids\n\n def _save_checkpoint(self, epoch, save_best=False):\n \"\"\"\n Saving checkpoints\n :param epoch: current epoch number\n :param save_best: if True, rename the saved checkpoint to 'model_best.pth'\n :return:\n \"\"\"\n # only local master process do save model\n if not self.local_master:\n return\n\n if hasattr(self.model, 'module'):\n arch = type(self.model.module).__name__\n state_dict = self.model.module.state_dict()\n else:\n arch = type(self.model).__name__\n state_dict = self.model.state_dict()\n state = {\n 'arch': arch,\n 'epoch': epoch,\n 'state_dict': state_dict,\n 'optimizer': self.optimizer.state_dict(),\n 'monitor_best': self.monitor_best,\n 'config': self.config\n }\n if save_best:\n best_path = str(self.checkpoint_dir / 'model_best.pth')\n torch.save(state, best_path)\n self.logger_info(\"Saving current best: model_best.pth ...\")\n else:\n filename = str(self.checkpoint_dir / 'checkpoint-epoch{}.pth'.format(epoch))\n torch.save(state, filename)\n self.logger_info(\"Saving checkpoint: {} ...\".format(filename))\n\n def _resume_checkpoint(self, resume_path):\n \"\"\"\n Resume from saved checkpoints\n :param resume_path: Checkpoint path to be resumed\n :return:\n \"\"\"\n resume_path = str(resume_path)\n self.logger_info(\"Loading checkpoint: {} ...\".format(resume_path))\n # map_location = {'cuda:%d' % 0: 'cuda:%d' % self.config['local_rank']}\n checkpoint = torch.load(resume_path, map_location=self.device)\n self.start_epoch = checkpoint['epoch'] + 1\n self.monitor_best = checkpoint['monitor_best']\n\n # load architecture params from checkpoint.\n if checkpoint['config']['model_arch'] != self.config['model_arch']:\n self.logger_warning(\"Warning: Architecture configuration given in config file is different from that of \"\n \"checkpoint. This may yield an exception while state_dict is being loaded.\")\n self.model.load_state_dict(checkpoint['state_dict'])\n\n # load optimizer state from checkpoint only when optimizer type is not changed.\n if checkpoint['config']['optimizer']['type'] != self.config['optimizer']['type']:\n self.logger_warning(\"Warning: Optimizer type given in config file is different from that of checkpoint. \"\n \"Optimizer parameters not being resumed.\")\n else:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n self.logger_info(\"Checkpoint loaded. Resume training from epoch {}\".format(self.start_epoch))\n"
] | [
[
"torch.mean",
"numpy.sqrt",
"torch.cuda.set_device",
"torch.load",
"torch.cuda.device_count",
"torch.sum",
"torch.distributed.barrier",
"torch.nn.SyncBatchNorm.convert_sync_batchnorm",
"torch.no_grad",
"torch.utils.tensorboard.SummaryWriter",
"torch.cuda.is_available",
"torch.device",
"torch.distributed.get_rank",
"torch.distributed.get_world_size",
"torch.autograd.detect_anomaly",
"torch.distributed.all_reduce",
"torch.nn.parallel.DistributedDataParallel",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Toulik1729231/WebScraping1-Using-Python | [
"42562c66c905f925ea0848b8ae7dfbca6b5a1afd"
] | [
"scrap_players.py"
] | [
"import requests\r\nfrom bs4 import BeautifulSoup\r\nfrom logger_impl import *\r\nimport MongoDao\r\nimport pandas as pd\r\nimport time\r\n\r\npayload = {'key': 'ac9e8cf2dec81949d9ee1235ed6ae3fb', 'url':\r\n'https://httpbin.org/ip'}\r\n\r\n\r\n\r\ndef scrapData(scorecardSoup, matchId, matchDesc, matchTypeText, pageUrl, season, Date, venue):\r\n\r\n #pageUrl = \"http://www.espncricinfo.com/series/11422/scorecard/858491/bangladesh-vs-pakistan-only-t20i-pakistan-tour-of-bangladesh-2015\"\r\n try:\r\n \"\"\"page = urllib.request.urlopen(pageUrl)\r\n\r\n ## get match-id and match-name from url\r\n pageUrlArr = pageUrl.split('/')\r\n matchId = pageUrlArr[len(pageUrlArr ) - 2]\r\n matchDesc = pageUrlArr[len(pageUrlArr ) - 1] \"\"\"\r\n #soup = BeautifulSoup(page, 'html.parser')\r\n soup = scorecardSoup\r\n\r\n #print(\"page html: \", soup.prettify())\r\n scorecardDiv = soup.find_all('article', class_='sub-module scorecard')\r\n playerBatsmanDict = {}\r\n playerBowlerDict = {}\r\n batsmanScorecardParam = ['run_scored', 'balls_faced','M', '4s', '6s', 'strike_rate']\r\n bowlerScorecardParam = ['O', 'M', 'R', 'W', 'Econ', 'WD', 'NB']\r\n\r\n teamList = []\r\n teamIDList = []\r\n inningsTeam = []\r\n## print(len(scorecardDiv))\r\n #creating playing team list\r\n for scorecardVal in scorecardDiv:\r\n #print(scorecardVal)\r\n team = scorecardVal.find('h2').get_text()\r\n if matchTypeText == 'Tests':\r\n team = str(team).replace('1st Innings', '').replace('2nd Innings', '')\r\n else:\r\n team = str(team).replace('Innings', '')\r\n if team.strip() in teamList:\r\n break\r\n teamList.append(team.strip())\r\n count = {teamList[0]:0,teamList[1]:0}\r\n\r\n for team in teamList:\r\n word = team.split(' ')\r\n if len(word) == 1:\r\n id_ = team[:3]\r\n teamIDList.append(id_)\r\n else:\r\n id_ = ''\r\n for x in word:\r\n id_ = id_ + x[0]\r\n teamIDList.append(id_)\r\n\r\n for scorecardVal in scorecardDiv:\r\n team = scorecardVal.find('h2').get_text()\r\n inn = ''\r\n if matchTypeText == 'Tests':\r\n inn = ' '.join(str(team).split(' ')[-2:])\r\n team = str(team).replace('1st Innings', '').replace('2nd Innings', '')\r\n else:\r\n team = str(team).replace('Innings', '')\r\n team = team.strip()\r\n count[team] += 1\r\n## print(count)\r\n logger.info(\"team: \" + team)\r\n #print(\"batsman div: \", scorecardVal)\r\n batsmanList = scorecardVal.find_all('div', class_='wrap batsmen')\r\n batsmanListNotBatted = scorecardVal.find('div', class_='wrap dnb').find_all('a')\r\n## for bt in batsmanListNotBatted:\r\n## print(bt.get('href'))\r\n## print(bt.get_text())\r\n for batsman in batsmanList:\r\n batsmanDict = {}\r\n #print(\"batsman data: \", batsman)\r\n batsmanAnchor = batsman.find('div', class_=\"cell batsmen\").find('a')\r\n batsmanLink = batsmanAnchor.get('href')\r\n batsmanName = batsmanAnchor.get_text()\r\n\r\n batsmanLinkArr = str(batsmanLink).split('/')\r\n cricInfoBatsmanId = batsmanLinkArr[len(batsmanLinkArr) - 1]\r\n cricInfoBatsmanId = str(cricInfoBatsmanId).replace('.html', '')\r\n #print(\"batsman Name: \", batsmanName, \" batsmanId: \", cricInfoBatsmanId)\r\n batsmanDict['short_name'] = batsmanName\r\n batsmanDict['player_cric_info_link'] = batsmanLink\r\n batsmanDict['team'] = team\r\n \r\n\r\n #print(\"batsmanDiv: \", batsmanDiv.get_text())\r\n try:\r\n commentry = batsman.find('div', class_=\"cell commentary\").find('a').get_text()\r\n batsmanDict['commentry'] = commentry\r\n except AttributeError as ae:\r\n batsmanDict['commentry'] = ''\r\n\r\n #print(\"batsman commentry: \", commentry)\r\n #print(\"commentryDiv: \", commentryDiv.get_text())\r\n batsmanStatsList = batsman.find_all('div', class_=\"cell runs\")\r\n ctr = 0\r\n tempList = []\r\n for batsmanStats in batsmanStatsList:\r\n #print(\"anchor: \", batsmanStats.get_text())\r\n #param = batsmanScorecardParam[ctr]\r\n #ctr += 1\r\n #batsmanDict[param] = batsmanStats.get_text()\r\n tempList.append(batsmanStats.get_text())\r\n \r\n if len(tempList) == 6:\r\n batsmanDict['run_scored'] = tempList[0]\r\n batsmanDict['balls_faced'] = tempList[1]\r\n batsmanDict['M'] = tempList[2]\r\n batsmanDict['4s'] = tempList[3]\r\n batsmanDict['6s'] = tempList[4]\r\n batsmanDict['strike_rate'] = tempList[5]\r\n else:\r\n batsmanDict['run_scored'] = tempList[0]\r\n batsmanDict['balls_faced'] = tempList[1]\r\n batsmanDict['M'] = '-'\r\n batsmanDict['4s'] = tempList[2]\r\n batsmanDict['6s'] = tempList[3]\r\n batsmanDict['strike_rate'] = tempList[4]\r\n\r\n \r\n \r\n batsmanDict['innings'] = inn\r\n key = cricInfoBatsmanId# + \"_\" + team\r\n if matchTypeText == 'Tests':\r\n key = key + inn[0]\r\n playerBatsmanDict[key] = batsmanDict\r\n \r\n #break\r\n## print(batsmanListNotBatted)\r\n\r\n for batsmen in batsmanListNotBatted:\r\n batsmanDict={}\r\n batsmanLink = batsmen.get('href')\r\n batsmanName = batsmen.get_text()\r\n batsmanLinkArr = str(batsmanLink).split('/')\r\n cricInfoBatsmanId = batsmanLinkArr[len(batsmanLinkArr) - 1]\r\n cricInfoBatsmanId = str(cricInfoBatsmanId).replace('.html', '')\r\n batsmanDict['short_name'] = batsmanName\r\n batsmanDict['player_cric_info_link'] = batsmanLink\r\n batsmanDict['team'] = team\r\n batsmanDict['run_scored'] = '-'\r\n batsmanDict['balls_faced'] = '-'\r\n batsmanDict['M'] = '-'\r\n batsmanDict['4s'] = '-'\r\n batsmanDict['6s'] = '-'\r\n batsmanDict['strike_rate'] = '-'\r\n batsmanDict['innings'] = inn\r\n key = cricInfoBatsmanId# + \"_\" + team\r\n #print('id : ',cricInfoBatsmanId)\r\n #print('key : ',key)\r\n #print(batsmanDict)\r\n if matchTypeText == 'Tests':\r\n key = key+inn[0]\r\n playerBatsmanDict[key] = batsmanDict\r\n #print('Dict added : ',playerBatsmanDict[key])\r\n\r\n bowlersTR = scorecardVal.find('tbody').find_all('tr')\r\n #print(\"bowler section: \", bowlersTR)\r\n for bowlerRow in bowlersTR:\r\n bowlersTD = bowlerRow.find_all('td')\r\n bowlerAnchor = bowlersTD[0].find('a')\r\n bowlerLink = bowlerAnchor.get('href')\r\n bowlerName = bowlerAnchor.get_text()\r\n #print(\"bowler name: \", bowlerName, \" link: \", bowlerLink)\r\n bowlerLinkArr = str(bowlerLink).split('/')\r\n cricInfoBowlerId = bowlerLinkArr[len(bowlerLinkArr) - 1]\r\n cricInfoBowlerId = str(cricInfoBowlerId).replace('.html', '')\r\n logger.info(\"bowlersTD: \" + str(bowlersTD))\r\n logger.info(\"length bowlersTD: \" + str(len(bowlersTD)))\r\n if len(bowlersTD) == 13:\r\n overs = bowlersTD[2].find(text=True)\r\n maidens = bowlersTD[3].find(text=True)\r\n runs = bowlersTD[4].find(text=True)\r\n wickets = bowlersTD[5].find(text=True)\r\n economy = bowlersTD[6].find(text=True)\r\n dotBalls = bowlersTD[7].find(text=True)\r\n ballerFours = bowlersTD[8].find(text=True)\r\n ballerSixes = bowlersTD[9].find(text=True)\r\n wideBalls = bowlersTD[10].find(text=True)\r\n noBalls = bowlersTD[11].find(text=True)\r\n \r\n else:\r\n overs = bowlersTD[2].find(text=True)\r\n maidens = bowlersTD[3].find(text=True)\r\n runs = bowlersTD[4].find(text=True)\r\n wickets = bowlersTD[5].find(text=True)\r\n economy = bowlersTD[6].find(text=True)\r\n dotBalls = 0\r\n ballerFours = 0\r\n ballerSixes = 0\r\n wideBalls = bowlersTD[7].find(text=True)\r\n noBalls = bowlersTD[8].find(text=True)\r\n \r\n## print('o'+overs)\r\n## print(maidens)\r\n## print(runs)\r\n## print(wickets)\r\n## print(economy)\r\n## print(dotBalls)\r\n## print(ballerFours)\r\n## print(ballerSixes)\r\n## print(wideBalls)\r\n## print(noBalls) \r\n \r\n \r\n #['O', 'M', 'R', 'W', 'Econ', 'WD', 'NB']\r\n bowlerDict = {}\r\n bowlerDict['short_name'] = bowlerName\r\n bowlerDict['player_cric_info_link'] = bowlerLink\r\n if '.' in overs:\r\n oversArr = overs.split('.')\r\n totalBalls: int = int(oversArr[0]) * 6\r\n totalBalls += int(oversArr[1])\r\n else:\r\n totalBalls: int = int(overs) * 6\r\n\r\n # getting the bowling team name\r\n if team == teamList[0]:\r\n bowlingTeam = teamList[1]\r\n else:\r\n bowlingTeam = teamList[0]\r\n\r\n bowlerDict['team'] = bowlingTeam\r\n bowlerDict['balls_bowled'] = totalBalls\r\n bowlerDict['maiden_overs'] = maidens\r\n bowlerDict['runs_given'] = runs\r\n bowlerDict['wicket'] = wickets\r\n bowlerDict['econ'] = economy\r\n bowlerDict['dot_delivery'] = dotBalls\r\n bowlerDict['four_delivery'] = ballerFours\r\n bowlerDict['six_delivery'] = ballerSixes\r\n bowlerDict['wide_balls'] = wideBalls\r\n bowlerDict['no_balls'] = noBalls\r\n bowlerDict['innings'] = inn\r\n #print(overs, maidens, runs, wickets, economy, wideBalls, noBalls)\r\n key = cricInfoBowlerId# + \"_\" + team\r\n if matchTypeText == 'Tests':\r\n key = key+inn[0]\r\n playerBowlerDict[key] = bowlerDict\r\n\r\n #print(\"batsmanDict: \", playerBatsmanDict)\r\n #print(\"bowlerDict: \", playerBowlerDict)\r\n\r\n if matchTypeText == 'Tests' and ((count[teamList[0]] == 2 and count[teamList[1]] == 1) or (count[teamList[0]] == 1 and count[teamList[1]] == 2)):\r\n # if \r\n missing = ''\r\n if count[teamList[0]] == 1:\r\n missing = teamList[0]\r\n elif count[teamList[1]] == 1:\r\n missing = teamList[1]\r\n\r\n for scorecardVal in scorecardDiv:\r\n team = scorecardVal.find('h2').get_text()\r\n inn = ' '.join(str(team).split(' ')[-2:])\r\n team = str(team).replace('1st Innings', '').replace('2nd Innings', '')\r\n team = team.strip()\r\n if team == missing:\r\n batsmanList = scorecardVal.find_all('div', class_='wrap batsmen')\r\n batsmanListNotBatted = scorecardVal.find('div', class_='wrap dnb').find_all('a')\r\n for batsman in batsmanList:\r\n batsmanDict = {}\r\n batsmanAnchor = batsman.find('div', class_=\"cell batsmen\").find('a')\r\n batsmanLink = batsmanAnchor.get('href')\r\n batsmanName = batsmanAnchor.get_text()\r\n batsmanLinkArr = str(batsmanLink).split('/')\r\n cricInfoBatsmanId = batsmanLinkArr[len(batsmanLinkArr) - 1]\r\n cricInfoBatsmanId = str(cricInfoBatsmanId).replace('.html', '')\r\n batsmanDict['short_name'] = batsmanName\r\n batsmanDict['player_cric_info_link'] = batsmanLink\r\n batsmanDict['team'] = team\r\n batsmanDict['run_scored'] = '-'\r\n batsmanDict['balls_faced'] = '-'\r\n batsmanDict['M'] = '-'\r\n batsmanDict['4s'] = '-'\r\n batsmanDict['6s'] = '-'\r\n batsmanDict['strike_rate'] = '-'\r\n batsmanDict['innings'] = '2nd Innings'\r\n## print(batsmanList)\r\n key = cricInfoBatsmanId\r\n batsmanDict['commentry'] = '-'\r\n if matchTypeText == 'Tests':\r\n key = key+'2'\r\n playerBatsmanDict[key] = batsmanDict\r\n\r\n for batsmen in batsmanListNotBatted:\r\n batsmanLink = batsmen.get('href')\r\n batsmanName = batsmen.get_text()\r\n batsmanLinkArr = str(batsmanLink).split('/')\r\n cricInfoBatsmanId = batsmanLinkArr[len(batsmanLinkArr) - 1]\r\n cricInfoBatsmanId = str(cricInfoBatsmanId).replace('.html', '')\r\n batsmanDict['short_name'] = batsmanName\r\n batsmanDict['player_cric_info_link'] = batsmanLink\r\n batsmanDict['team'] = team\r\n batsmanDict['run_scored'] = '-'\r\n batsmanDict['balls_faced'] = '-'\r\n batsmanDict['M'] = '-'\r\n batsmanDict['4s'] = '-'\r\n batsmanDict['6s'] = '-'\r\n batsmanDict['strike_rate'] = '-'\r\n batsmanDict['innings'] = '2nd Innings'\r\n key = cricInfoBatsmanId# + \"_\" + team\r\n if matchTypeText == 'Tests':\r\n key = key+'2'\r\n playerBatsmanDict[key] = batsmanDict\r\n \r\n # checking batsman in bowler map, if found add them in playerBatsmanDict\r\n if matchTypeText == 'Tests':\r\n for batsmanKey, batsmanValue in playerBatsmanDict.items():\r\n if batsmanKey in playerBowlerDict:\r\n if playerBatsmanDict[batsmanKey]['innings'] == playerBowlerDict[batsmanKey]['innings']:\r\n bowlerData = playerBowlerDict[batsmanKey]\r\n fianlDict = {**batsmanValue, **bowlerData}\r\n playerBatsmanDict[batsmanKey] = fianlDict\r\n del playerBowlerDict[batsmanKey]\r\n else: \r\n for batsmanKey, batsmanValue in playerBatsmanDict.items():\r\n if batsmanKey in playerBowlerDict:\r\n bowlerData = playerBowlerDict[batsmanKey]\r\n fianlDict = {**batsmanValue, **bowlerData}\r\n playerBatsmanDict[batsmanKey] = fianlDict\r\n del playerBowlerDict[batsmanKey]\r\n\r\n## print(\"after merging batsmanDict: \", playerBatsmanDict)\r\n## print(\"after merging bowlerDict: \", playerBowlerDict)\r\n playerFinalDict = {**playerBatsmanDict, **playerBowlerDict}\r\n\r\n## \r\n## print(\"Player final dict: \", playerFinalDict)\r\n \r\n ##TODO mark player as 'Batsman', 'Bowler', 'WicketKeeper', 'All rounder'\r\n pno = 0\r\n for playerKey, playerValue in playerFinalDict.items():\r\n flag = True\r\n while flag:\r\n try:\r\n pno+=1\r\n if pno <= 5:\r\n shortName = playerValue['short_name']\r\n playerDict = playerFinalDict[playerKey] \r\n if '†' in shortName:\r\n #checking for WicketKeeper positio\r\n playerDict['Position'] = \"WK\"\r\n elif 'econ' in playerDict:\r\n playerDict['Position'] = \"Bowler\"\r\n else:\r\n playerDict['Position'] = \"Batsman\"\r\n #print('Pno : ' + str(pno))\r\n playerDict['match_id'] = matchId + '_' + playerDict['innings'][:2]\r\n playerDict['match_desc'] = matchDesc\r\n playerDict['match_type_text'] = matchTypeText +' '+ playerDict['innings']\r\n playerDict['season'] = season\r\n playerDict['MatchURL'] = pageUrl\r\n playerDict['Match_start_Date'] = Date\r\n playerDict['Venue'] = venue\r\n if playerDict['team'] == teamList[0]:\r\n playerDict['TeamID'] = teamIDList[0]\r\n playerDict['OpponentID'] = teamIDList[1]\r\n else:\r\n playerDict['TeamID'] = teamIDList[1]\r\n playerDict['OpponentID'] = teamIDList[0]\r\n url = playerDict['player_cric_info_link']\r\n page = requests.get(url,params = payload).text\r\n soup = BeautifulSoup(page,'html.parser')\r\n pees = soup.find_all('p',class_='ciPlayerinformationtxt')\r\n val = []\r\n key = []\r\n for pee in pees:\r\n key.append(pee.find('b').get_text())\r\n val.append(pee.find('span').get_text())\r\n if \"Full name\" in key:\r\n playerDict['Player_Full_Name'] = val[key.index(\"Full name\")]\r\n else:\r\n playerDict['Player_Full_Name'] = '-'\r\n if 'Born' in key:\r\n playerDict['date,place_of_birth'] = val[key.index('Born')].replace('\\n','').strip()\r\n else:\r\n playerDict['date,place_of_birth'] = '-'\r\n if 'Nickname' in key:\r\n playerDict['Player_Nickname'] = val[key.index('Nickname')]\r\n else:\r\n playerDict['Player_Nickname'] = '-'\r\n \r\n \r\n ## playerDict['Player_Full_Name'] = data[0]\r\n ## playerDict['data,place_of_birth'] = data[1][1:]\r\n ## if data[4] == None:\r\n ## playerDict['Player_Nickname'] = '-'\r\n ## else:\r\n ## playerDict['Player_Nickname'] = data[4]\r\n \r\n\r\n #DOB_PlaceOB = soup.fin_next('p',class_='ciPlayerinformationtxt').find('span').get_text()\r\n \r\n \r\n \r\n # below adding missed parameters in player's dict with default 0 value\r\n if not 'run_scored' in playerDict:\r\n playerDict['run_scored'] = \"-\"\r\n\r\n if not 'balls_faced' in playerDict:\r\n playerDict['balls_faced'] = \"-\"\r\n\r\n if not 'strike_rate' in playerDict:\r\n playerDict['strike_rate'] = \"-\"\r\n\r\n if not 'balls_bowled' in playerDict:\r\n playerDict['balls_bowled'] = \"-\"\r\n\r\n if not 'maiden_overs' in playerDict:\r\n playerDict['maiden_overs'] = \"-\"\r\n if not 'runs_given' in playerDict:\r\n playerDict['runs_given'] = \"-\"\r\n if not 'wicket' in playerDict:\r\n playerDict['wicket'] = \"-\"\r\n if not 'econ' in playerDict:\r\n playerDict['econ'] = \"-\"\r\n if not 'wide_balls' in playerDict:\r\n playerDict['wide_balls'] = \"-\"\r\n if not 'no_balls' in playerDict:\r\n playerDict['no_balls'] = \"-\"\r\n flag = False\r\n else:\r\n pno = 0\r\n time.sleep(10)\r\n \r\n \r\n except Exception as e:\r\n print('pausing scrapping for 5 mins : '+str(e))\r\n time.sleep(300)\r\n flag = True\r\n \r\n\r\n \r\n # print(\"Player final dict 2: \", playerFinalDict)\r\n\r\n for key, val in playerFinalDict.items():\r\n val['cric_info_id'] = key\r\n val['_id'] = key + \"-\" + matchId\r\n #print(key)\r\n #MongoDao.insertToPlayerStats(val)\r\n \r\n\r\n logger.info(\"players inserted successfully for url: \" + pageUrl)\r\n #MongoDao.insertToProcessedUrls(pageUrl)\r\n #print(playerFinalDict.key())\r\n df = pd.DataFrame(playerFinalDict)\r\n return df\r\n\r\n except Exception as e:\r\n logger.error(\"ERROR while processing URL: \" + pageUrl)\r\n logger.exception(\"message\")\r\n print(\"Scrapping : \"+str(e))\r\n #print((\"ERROR while processing URL: \" + pageUrl))\r\n\r\n\r\n\r\n#scrapODI_T20Data('', '', '', \"T20\", '', '')\r\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
portugueslab/scikit-image | [
"0fa3bcb118bb208a0cc7d3e8b96cd96c1ce7a75b",
"0fa3bcb118bb208a0cc7d3e8b96cd96c1ce7a75b",
"0fa3bcb118bb208a0cc7d3e8b96cd96c1ce7a75b",
"0fa3bcb118bb208a0cc7d3e8b96cd96c1ce7a75b"
] | [
"skimage/future/graph/graph_cut.py",
"skimage/morphology/watershed.py",
"skimage/transform/tests/test_warps.py",
"doc/examples/color_exposure/plot_adapt_rgb.py"
] | [
"try:\n import networkx as nx\nexcept ImportError:\n from ..._shared.utils import warn\n warn('RAGs require networkx')\nimport numpy as np\nfrom . import _ncut\nfrom . import _ncut_cy\nfrom scipy.sparse import linalg\n\n\ndef cut_threshold(labels, rag, thresh, in_place=True):\n \"\"\"Combine regions separated by weight less than threshold.\n\n Given an image's labels and its RAG, output new labels by\n combining regions whose nodes are separated by a weight less\n than the given threshold.\n\n Parameters\n ----------\n labels : ndarray\n The array of labels.\n rag : RAG\n The region adjacency graph.\n thresh : float\n The threshold. Regions connected by edges with smaller weights are\n combined.\n in_place : bool\n If set, modifies `rag` in place. The function will remove the edges\n with weights less that `thresh`. If set to `False` the function\n makes a copy of `rag` before proceeding.\n\n Returns\n -------\n out : ndarray\n The new labelled array.\n\n Examples\n --------\n >>> from skimage import data, segmentation\n >>> from skimage.future import graph\n >>> img = data.astronaut()\n >>> labels = segmentation.slic(img)\n >>> rag = graph.rag_mean_color(img, labels)\n >>> new_labels = graph.cut_threshold(labels, rag, 10)\n\n References\n ----------\n .. [1] Alain Tremeau and Philippe Colantoni\n \"Regions Adjacency Graph Applied To Color Image Segmentation\"\n http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.11.5274\n\n \"\"\"\n if not in_place:\n rag = rag.copy()\n\n # Because deleting edges while iterating through them produces an error.\n to_remove = [(x, y) for x, y, d in rag.edges(data=True)\n if d['weight'] >= thresh]\n rag.remove_edges_from(to_remove)\n\n comps = nx.connected_components(rag)\n\n # We construct an array which can map old labels to the new ones.\n # All the labels within a connected component are assigned to a single\n # label in the output.\n map_array = np.arange(labels.max() + 1, dtype=labels.dtype)\n for i, nodes in enumerate(comps):\n for node in nodes:\n for label in rag.node[node]['labels']:\n map_array[label] = i\n\n return map_array[labels]\n\n\ndef cut_normalized(labels, rag, thresh=0.001, num_cuts=10, in_place=True,\n max_edge=1.0):\n \"\"\"Perform Normalized Graph cut on the Region Adjacency Graph.\n\n Given an image's labels and its similarity RAG, recursively perform\n a 2-way normalized cut on it. All nodes belonging to a subgraph\n that cannot be cut further are assigned a unique label in the\n output.\n\n Parameters\n ----------\n labels : ndarray\n The array of labels.\n rag : RAG\n The region adjacency graph.\n thresh : float\n The threshold. A subgraph won't be further subdivided if the\n value of the N-cut exceeds `thresh`.\n num_cuts : int\n The number or N-cuts to perform before determining the optimal one.\n in_place : bool\n If set, modifies `rag` in place. For each node `n` the function will\n set a new attribute ``rag.node[n]['ncut label']``.\n max_edge : float, optional\n The maximum possible value of an edge in the RAG. This corresponds to\n an edge between identical regions. This is used to put self\n edges in the RAG.\n\n Returns\n -------\n out : ndarray\n The new labeled array.\n\n Examples\n --------\n >>> from skimage import data, segmentation\n >>> from skimage.future import graph\n >>> img = data.astronaut()\n >>> labels = segmentation.slic(img)\n >>> rag = graph.rag_mean_color(img, labels, mode='similarity')\n >>> new_labels = graph.cut_normalized(labels, rag)\n\n References\n ----------\n .. [1] Shi, J.; Malik, J., \"Normalized cuts and image segmentation\",\n Pattern Analysis and Machine Intelligence,\n IEEE Transactions on, vol. 22, no. 8, pp. 888-905, August 2000.\n\n \"\"\"\n if not in_place:\n rag = rag.copy()\n\n for node in rag.nodes():\n rag.add_edge(node, node, weight=max_edge)\n\n _ncut_relabel(rag, thresh, num_cuts)\n\n map_array = np.zeros(labels.max() + 1, dtype=labels.dtype)\n # Mapping from old labels to new\n for n, d in rag.nodes(data=True):\n map_array[d['labels']] = d['ncut label']\n\n return map_array[labels]\n\n\ndef partition_by_cut(cut, rag):\n \"\"\"Compute resulting subgraphs from given bi-parition.\n\n Parameters\n ----------\n cut : array\n A array of booleans. Elements set to `True` belong to one\n set.\n rag : RAG\n The Region Adjacency Graph.\n\n Returns\n -------\n sub1, sub2 : RAG\n The two resulting subgraphs from the bi-partition.\n \"\"\"\n # `cut` is derived from `D` and `W` matrices, which also follow the\n # ordering returned by `rag.nodes()` because we use\n # nx.to_scipy_sparse_matrix.\n\n # Example\n # rag.nodes() = [3, 7, 9, 13]\n # cut = [True, False, True, False]\n # nodes1 = [3, 9]\n # nodes2 = [7, 10]\n\n nodes1 = [n for i, n in enumerate(rag.nodes()) if cut[i]]\n nodes2 = [n for i, n in enumerate(rag.nodes()) if not cut[i]]\n\n sub1 = rag.subgraph(nodes1)\n sub2 = rag.subgraph(nodes2)\n\n return sub1, sub2\n\n\ndef get_min_ncut(ev, d, w, num_cuts):\n \"\"\"Threshold an eigenvector evenly, to determine minimum ncut.\n\n Parameters\n ----------\n ev : array\n The eigenvector to threshold.\n d : ndarray\n The diagonal matrix of the graph.\n w : ndarray\n The weight matrix of the graph.\n num_cuts : int\n The number of evenly spaced thresholds to check for.\n\n Returns\n -------\n mask : array\n The array of booleans which denotes the bi-partition.\n mcut : float\n The value of the minimum ncut.\n \"\"\"\n mcut = np.inf\n mn = ev.min()\n mx = ev.max()\n\n # If all values in `ev` are equal, it implies that the graph can't be\n # further sub-divided. In this case the bi-partition is the the graph\n # itself and an empty set.\n min_mask = np.zeros_like(ev, dtype=np.bool)\n if np.allclose(mn, mx):\n return min_mask, mcut\n\n # Refer Shi & Malik 2001, Section 3.1.3, Page 892\n # Perform evenly spaced n-cuts and determine the optimal one.\n for t in np.linspace(mn, mx, num_cuts, endpoint=False):\n mask = ev > t\n cost = _ncut.ncut_cost(mask, d, w)\n if cost < mcut:\n min_mask = mask\n mcut = cost\n\n return min_mask, mcut\n\n\ndef _label_all(rag, attr_name):\n \"\"\"Assign a unique integer to the given attribute in the RAG.\n\n This function assumes that all labels in `rag` are unique. It\n picks up a random label from them and assigns it to the `attr_name`\n attribute of all the nodes.\n\n rag : RAG\n The Region Adjacency Graph.\n attr_name : string\n The attribute to which a unique integer is assigned.\n \"\"\"\n node = min(rag.nodes())\n new_label = rag.node[node]['labels'][0]\n for n, d in rag.nodes(data=True):\n d[attr_name] = new_label\n\n\ndef _ncut_relabel(rag, thresh, num_cuts):\n \"\"\"Perform Normalized Graph cut on the Region Adjacency Graph.\n\n Recursively partition the graph into 2, until further subdivision\n yields a cut greater than `thresh` or such a cut cannot be computed.\n For such a subgraph, indices to labels of all its nodes map to a single\n unique value.\n\n Parameters\n ----------\n labels : ndarray\n The array of labels.\n rag : RAG\n The region adjacency graph.\n thresh : float\n The threshold. A subgraph won't be further subdivided if the\n value of the N-cut exceeds `thresh`.\n num_cuts : int\n The number or N-cuts to perform before determining the optimal one.\n map_array : array\n The array which maps old labels to new ones. This is modified inside\n the function.\n \"\"\"\n d, w = _ncut.DW_matrices(rag)\n m = w.shape[0]\n\n if m > 2:\n d2 = d.copy()\n # Since d is diagonal, we can directly operate on its data\n # the inverse of the square root\n d2.data = np.reciprocal(np.sqrt(d2.data, out=d2.data), out=d2.data)\n\n # Refer Shi & Malik 2001, Equation 7, Page 891\n vals, vectors = linalg.eigsh(d2 * (d - w) * d2, which='SM',\n k=min(100, m - 2))\n\n # Pick second smallest eigenvector.\n # Refer Shi & Malik 2001, Section 3.2.3, Page 893\n vals, vectors = np.real(vals), np.real(vectors)\n index2 = _ncut_cy.argmin2(vals)\n ev = vectors[:, index2]\n\n cut_mask, mcut = get_min_ncut(ev, d, w, num_cuts)\n if (mcut < thresh):\n # Sub divide and perform N-cut again\n # Refer Shi & Malik 2001, Section 3.2.5, Page 893\n sub1, sub2 = partition_by_cut(cut_mask, rag)\n\n _ncut_relabel(sub1, thresh, num_cuts)\n _ncut_relabel(sub2, thresh, num_cuts)\n return\n\n # The N-cut wasn't small enough, or could not be computed.\n # The remaining graph is a region.\n # Assign `ncut label` by picking any label from the existing nodes, since\n # `labels` are unique, `new_label` is also unique.\n _label_all(rag, 'ncut label')\n",
"\"\"\"watershed.py - watershed algorithm\n\nThis module implements a watershed algorithm that apportions pixels into\nmarked basins. The algorithm uses a priority queue to hold the pixels\nwith the metric for the priority queue being pixel value, then the time\nof entry into the queue - this settles ties in favor of the closest marker.\n\nSome ideas taken from\nSoille, \"Automated Basin Delineation from Digital Elevation Models Using\nMathematical Morphology\", Signal Processing 20 (1990) 171-182.\n\nThe most important insight in the paper is that entry time onto the queue\nsolves two problems: a pixel should be assigned to the neighbor with the\nlargest gradient or, if there is no gradient, pixels on a plateau should\nbe split between markers on opposite sides.\n\nOriginally part of CellProfiler, code licensed under both GPL and BSD licenses.\nWebsite: http://www.cellprofiler.org\n\nCopyright (c) 2003-2009 Massachusetts Institute of Technology\nCopyright (c) 2009-2011 Broad Institute\nAll rights reserved.\n\nOriginal author: Lee Kamentsky\n\"\"\"\n\nimport numpy as np\nfrom scipy import ndimage as ndi\n\nfrom . import _watershed\nfrom ..util import crop, regular_seeds\n\n\ndef _validate_inputs(image, markers, mask):\n \"\"\"Ensure that all inputs to watershed have matching shapes and types.\n\n Parameters\n ----------\n image : array\n The input image.\n markers : int or array of int\n The marker image.\n mask : array, or None\n A boolean mask, True where we want to compute the watershed.\n\n Returns\n -------\n image, markers, mask : arrays\n The validated and formatted arrays. Image will have dtype float64,\n markers int32, and mask int8. If ``None`` was given for the mask,\n it is a volume of all 1s.\n\n Raises\n ------\n ValueError\n If the shapes of the given arrays don't match.\n \"\"\"\n if not isinstance(markers, (np.ndarray, list, tuple)):\n # not array-like, assume int\n markers = regular_seeds(image.shape, markers)\n elif markers.shape != image.shape:\n raise ValueError(\"`markers` (shape {}) must have same shape \"\n \"as `image` (shape {})\".format(markers.shape, image.shape))\n if mask is not None and mask.shape != image.shape:\n raise ValueError(\"`mask` must have same shape as `image`\")\n if mask is None:\n # Use a complete `True` mask if none is provided\n mask = np.ones(image.shape, bool)\n return (image.astype(np.float64),\n markers.astype(np.int32),\n mask.astype(np.int8))\n\n\ndef _validate_connectivity(image_dim, connectivity, offset):\n \"\"\"Convert any valid connectivity to a structuring element and offset.\n\n Parameters\n ----------\n image_dim : int\n The number of dimensions of the input image.\n connectivity : int, array, or None\n The neighborhood connectivity. An integer is interpreted as in\n ``scipy.ndimage.generate_binary_structure``, as the maximum number\n of orthogonal steps to reach a neighbor. An array is directly\n interpreted as a structuring element and its shape is validated against\n the input image shape. ``None`` is interpreted as a connectivity of 1.\n offset : tuple of int, or None\n The coordinates of the center of the structuring element.\n\n Returns\n -------\n c_connectivity : array of bool\n The structuring element corresponding to the input `connectivity`.\n offset : array of int\n The offset corresponding to the center of the structuring element.\n\n Raises\n ------\n ValueError:\n If the image dimension and the connectivity or offset dimensions don't\n match.\n \"\"\"\n if connectivity is None:\n connectivity = 1\n if np.isscalar(connectivity):\n c_connectivity = ndi.generate_binary_structure(image_dim, connectivity)\n else:\n c_connectivity = np.array(connectivity, bool)\n if c_connectivity.ndim != image_dim:\n raise ValueError(\"Connectivity dimension must be same as image\")\n if offset is None:\n if any([x % 2 == 0 for x in c_connectivity.shape]):\n raise ValueError(\"Connectivity array must have an unambiguous \"\n \"center\")\n offset = np.array(c_connectivity.shape) // 2\n return c_connectivity, offset\n\n\ndef _compute_neighbors(image, structure, offset):\n \"\"\"Compute neighborhood as an array of linear offsets into the image.\n\n These are sorted according to Euclidean distance from the center (given\n by `offset`), ensuring that immediate neighbors are visited first.\n \"\"\"\n structure[tuple(offset)] = 0 # ignore the center; it's not a neighbor\n locations = np.transpose(np.nonzero(structure))\n sqdistances = np.sum((locations - offset)**2, axis=1)\n neighborhood = (np.ravel_multi_index(locations.T, image.shape) -\n np.ravel_multi_index(offset, image.shape)).astype(np.int32)\n sorted_neighborhood = neighborhood[np.argsort(sqdistances)]\n return sorted_neighborhood\n\n\ndef watershed(image, markers, connectivity=1, offset=None, mask=None,\n compactness=0, watershed_line=False):\n \"\"\"Find watershed basins in `image` flooded from given `markers`.\n\n Parameters\n ----------\n image: ndarray (2-D, 3-D, ...) of integers\n Data array where the lowest value points are labeled first.\n markers: int, or ndarray of int, same shape as `image`\n The desired number of markers, or an array marking the basins with the\n values to be assigned in the label matrix. Zero means not a marker.\n connectivity: ndarray, optional\n An array with the same number of dimensions as `image` whose\n non-zero elements indicate neighbors for connection.\n Following the scipy convention, default is a one-connected array of\n the dimension of the image.\n offset: array_like of shape image.ndim, optional\n offset of the connectivity (one offset per dimension)\n mask: ndarray of bools or 0s and 1s, optional\n Array of same shape as `image`. Only points at which mask == True\n will be labeled.\n compactness : float, optional\n Use compact watershed [3]_ with given compactness parameter.\n Higher values result in more regularly-shaped watershed basins.\n watershed_line : bool, optional\n If watershed_line is True, a one-pixel wide line separates the regions\n obtained by the watershed algorithm. The line has the label 0.\n\n Returns\n -------\n out: ndarray\n A labeled matrix of the same type and shape as markers\n\n See also\n --------\n skimage.segmentation.random_walker: random walker segmentation\n A segmentation algorithm based on anisotropic diffusion, usually\n slower than the watershed but with good results on noisy data and\n boundaries with holes.\n\n Notes\n -----\n This function implements a watershed algorithm [1]_ [2]_ that apportions\n pixels into marked basins. The algorithm uses a priority queue to hold\n the pixels with the metric for the priority queue being pixel value, then\n the time of entry into the queue - this settles ties in favor of the\n closest marker.\n\n Some ideas taken from\n Soille, \"Automated Basin Delineation from Digital Elevation Models Using\n Mathematical Morphology\", Signal Processing 20 (1990) 171-182\n\n The most important insight in the paper is that entry time onto the queue\n solves two problems: a pixel should be assigned to the neighbor with the\n largest gradient or, if there is no gradient, pixels on a plateau should\n be split between markers on opposite sides.\n\n This implementation converts all arguments to specific, lowest common\n denominator types, then passes these to a C algorithm.\n\n Markers can be determined manually, or automatically using for example\n the local minima of the gradient of the image, or the local maxima of the\n distance function to the background for separating overlapping objects\n (see example).\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Watershed_%28image_processing%29\n\n .. [2] http://cmm.ensmp.fr/~beucher/wtshed.html\n\n .. [3] Peer Neubert & Peter Protzel (2014). Compact Watershed and\n Preemptive SLIC: On Improving Trade-offs of Superpixel Segmentation\n Algorithms. ICPR 2014, pp 996-1001. DOI:10.1109/ICPR.2014.181\n https://www.tu-chemnitz.de/etit/proaut/forschung/rsrc/cws_pSLIC_ICPR.pdf\n\n Examples\n --------\n The watershed algorithm is useful to separate overlapping objects.\n\n We first generate an initial image with two overlapping circles:\n\n >>> x, y = np.indices((80, 80))\n >>> x1, y1, x2, y2 = 28, 28, 44, 52\n >>> r1, r2 = 16, 20\n >>> mask_circle1 = (x - x1)**2 + (y - y1)**2 < r1**2\n >>> mask_circle2 = (x - x2)**2 + (y - y2)**2 < r2**2\n >>> image = np.logical_or(mask_circle1, mask_circle2)\n\n Next, we want to separate the two circles. We generate markers at the\n maxima of the distance to the background:\n\n >>> from scipy import ndimage as ndi\n >>> distance = ndi.distance_transform_edt(image)\n >>> from skimage.feature import peak_local_max\n >>> local_maxi = peak_local_max(distance, labels=image,\n ... footprint=np.ones((3, 3)),\n ... indices=False)\n >>> markers = ndi.label(local_maxi)[0]\n\n Finally, we run the watershed on the image and markers:\n\n >>> labels = watershed(-distance, markers, mask=image)\n\n The algorithm works also for 3-D images, and can be used for example to\n separate overlapping spheres.\n \"\"\"\n image, markers, mask = _validate_inputs(image, markers, mask)\n connectivity, offset = _validate_connectivity(image.ndim, connectivity,\n offset)\n\n # pad the image, markers, and mask so that we can use the mask to\n # keep from running off the edges\n pad_width = [(p, p) for p in offset]\n image = np.pad(image, pad_width, mode='constant')\n mask = np.pad(mask, pad_width, mode='constant').ravel()\n output = np.pad(markers, pad_width, mode='constant')\n\n flat_neighborhood = _compute_neighbors(image, connectivity, offset)\n marker_locations = np.flatnonzero(output).astype(np.int32)\n image_strides = np.array(image.strides, dtype=np.int32) // image.itemsize\n\n _watershed.watershed_raveled(image.ravel(),\n marker_locations, flat_neighborhood,\n mask, image_strides, compactness,\n output.ravel(),\n watershed_line)\n\n output = crop(output, pad_width, copy=True)\n\n return output\n",
"import numpy as np\nfrom scipy.ndimage import map_coordinates\n\nfrom skimage.transform._warps import _stackcopy\nfrom skimage.transform import (warp, warp_coords, rotate, resize, rescale,\n AffineTransform,\n ProjectiveTransform,\n SimilarityTransform,\n downscale_local_mean)\nfrom skimage import transform as tf, data, img_as_float\nfrom skimage.color import rgb2gray\n\nfrom skimage._shared import testing\nfrom skimage._shared.testing import (assert_almost_equal, assert_equal,\n test_parallel)\nfrom skimage._shared._warnings import expected_warnings\n\n\nnp.random.seed(0)\n\n\ndef test_stackcopy():\n layers = 4\n x = np.empty((3, 3, layers))\n y = np.eye(3, 3)\n _stackcopy(x, y)\n for i in range(layers):\n assert_almost_equal(x[..., i], y)\n\n\ndef test_warp_tform():\n x = np.zeros((5, 5), dtype=np.double)\n x[2, 2] = 1\n theta = - np.pi / 2\n tform = SimilarityTransform(scale=1, rotation=theta, translation=(0, 4))\n\n x90 = warp(x, tform, order=1)\n assert_almost_equal(x90, np.rot90(x))\n\n x90 = warp(x, tform.inverse, order=1)\n assert_almost_equal(x90, np.rot90(x))\n\n\ndef test_warp_callable():\n x = np.zeros((5, 5), dtype=np.double)\n x[2, 2] = 1\n refx = np.zeros((5, 5), dtype=np.double)\n refx[1, 1] = 1\n\n def shift(xy):\n return xy + 1\n\n outx = warp(x, shift, order=1)\n assert_almost_equal(outx, refx)\n\n\n@test_parallel()\ndef test_warp_matrix():\n x = np.zeros((5, 5), dtype=np.double)\n x[2, 2] = 1\n refx = np.zeros((5, 5), dtype=np.double)\n refx[1, 1] = 1\n\n matrix = np.array([[1, 0, 1], [0, 1, 1], [0, 0, 1]])\n\n # _warp_fast\n outx = warp(x, matrix, order=1)\n assert_almost_equal(outx, refx)\n # check for ndimage.map_coordinates\n outx = warp(x, matrix, order=5)\n\n\ndef test_warp_nd():\n for dim in range(2, 8):\n shape = dim * (5,)\n\n x = np.zeros(shape, dtype=np.double)\n x_c = dim * (2,)\n x[x_c] = 1\n refx = np.zeros(shape, dtype=np.double)\n refx_c = dim * (1,)\n refx[refx_c] = 1\n\n coord_grid = dim * (slice(0, 5, 1),)\n coords = np.array(np.mgrid[coord_grid]) + 1\n\n outx = warp(x, coords, order=0, cval=0)\n\n assert_almost_equal(outx, refx)\n\n\ndef test_warp_clip():\n x = np.zeros((5, 5), dtype=np.double)\n x[2, 2] = 1\n\n with expected_warnings(['The default mode', 'The default multichannel']):\n outx = rescale(x, 3, order=3, clip=False, anti_aliasing=False)\n assert outx.min() < 0\n\n with expected_warnings(['The default mode', 'The default multichannel']):\n outx = rescale(x, 3, order=3, clip=True, anti_aliasing=False)\n assert_almost_equal(outx.min(), 0)\n assert_almost_equal(outx.max(), 1)\n\n\ndef test_homography():\n x = np.zeros((5, 5), dtype=np.double)\n x[1, 1] = 1\n theta = -np.pi / 2\n M = np.array([[np.cos(theta), - np.sin(theta), 0],\n [np.sin(theta), np.cos(theta), 4],\n [0, 0, 1]])\n\n x90 = warp(x,\n inverse_map=ProjectiveTransform(M).inverse,\n order=1)\n assert_almost_equal(x90, np.rot90(x))\n\n\ndef test_rotate():\n x = np.zeros((5, 5), dtype=np.double)\n x[1, 1] = 1\n x90 = rotate(x, 90)\n assert_almost_equal(x90, np.rot90(x))\n\n\ndef test_rotate_resize():\n x = np.zeros((10, 10), dtype=np.double)\n\n x45 = rotate(x, 45, resize=False)\n assert x45.shape == (10, 10)\n\n x45 = rotate(x, 45, resize=True)\n # new dimension should be d = sqrt(2 * (10/2)^2)\n assert x45.shape == (14, 14)\n\n\ndef test_rotate_center():\n x = np.zeros((10, 10), dtype=np.double)\n x[4, 4] = 1\n refx = np.zeros((10, 10), dtype=np.double)\n refx[2, 5] = 1\n x20 = rotate(x, 20, order=0, center=(0, 0))\n assert_almost_equal(x20, refx)\n x0 = rotate(x20, -20, order=0, center=(0, 0))\n assert_almost_equal(x0, x)\n\n\ndef test_rotate_resize_center():\n x = np.zeros((10, 10), dtype=np.double)\n x[0, 0] = 1\n\n ref_x45 = np.zeros((14, 14), dtype=np.double)\n ref_x45[6, 0] = 1\n ref_x45[7, 0] = 1\n\n x45 = rotate(x, 45, resize=True, center=(3, 3), order=0)\n # new dimension should be d = sqrt(2 * (10/2)^2)\n assert x45.shape == (14, 14)\n assert_equal(x45, ref_x45)\n\n\ndef test_rescale():\n # same scale factor\n x = np.zeros((5, 5), dtype=np.double)\n x[1, 1] = 1\n with expected_warnings(['The default mode', 'The default multichannel']):\n scaled = rescale(x, 2, order=0, anti_aliasing=False)\n ref = np.zeros((10, 10))\n ref[2:4, 2:4] = 1\n assert_almost_equal(scaled, ref)\n\n # different scale factors\n x = np.zeros((5, 5), dtype=np.double)\n x[1, 1] = 1\n with expected_warnings(['The default mode', 'The default multichannel']):\n scaled = rescale(x, (2, 1), order=0, anti_aliasing=False)\n ref = np.zeros((10, 5))\n ref[2:4, 1] = 1\n assert_almost_equal(scaled, ref)\n\n\ndef test_rescale_invalid_scale():\n x = np.zeros((10, 10, 3))\n with testing.raises(ValueError):\n rescale(x, (2, 2), multichannel=False)\n with testing.raises(ValueError):\n rescale(x, (2, 2, 2), multichannel=True)\n\n\ndef test_rescale_multichannel():\n # 1D + channels\n x = np.zeros((8, 3), dtype=np.double)\n scaled = rescale(x, 2, order=0, multichannel=True, anti_aliasing=False)\n assert_equal(scaled.shape, (16, 3))\n # 2D\n scaled = rescale(x, 2, order=0, multichannel=False, anti_aliasing=False)\n assert_equal(scaled.shape, (16, 6))\n\n # 2D + channels\n x = np.zeros((8, 8, 3), dtype=np.double)\n scaled = rescale(x, 2, order=0, multichannel=True, anti_aliasing=False)\n assert_equal(scaled.shape, (16, 16, 3))\n # 3D\n scaled = rescale(x, 2, order=0, multichannel=False, anti_aliasing=False)\n assert_equal(scaled.shape, (16, 16, 6))\n\n # 3D + channels\n x = np.zeros((8, 8, 8, 3), dtype=np.double)\n scaled = rescale(x, 2, order=0, multichannel=True, anti_aliasing=False)\n assert_equal(scaled.shape, (16, 16, 16, 3))\n # 4D\n scaled = rescale(x, 2, order=0, multichannel=False, anti_aliasing=False)\n assert_equal(scaled.shape, (16, 16, 16, 6))\n\n\ndef test_rescale_multichannel_defaults():\n # ensure multichannel=None matches the previous default behaviour\n\n # 2D: multichannel should default to False\n x = np.zeros((8, 3), dtype=np.double)\n with expected_warnings(['The default mode', 'The default multichannel']):\n scaled = rescale(x, 2, order=0, anti_aliasing=False)\n assert_equal(scaled.shape, (16, 6))\n\n # 3D: multichannel should default to True\n x = np.zeros((8, 8, 3), dtype=np.double)\n with expected_warnings(['The default mode', 'The default multichannel']):\n scaled = rescale(x, 2, order=0, anti_aliasing=False)\n assert_equal(scaled.shape, (16, 16, 3))\n\n\ndef test_resize2d():\n x = np.zeros((5, 5), dtype=np.double)\n x[1, 1] = 1\n with expected_warnings(['The default mode']):\n resized = resize(x, (10, 10), order=0, anti_aliasing=False)\n ref = np.zeros((10, 10))\n ref[2:4, 2:4] = 1\n assert_almost_equal(resized, ref)\n\n\ndef test_resize3d_keep():\n # keep 3rd dimension\n x = np.zeros((5, 5, 3), dtype=np.double)\n x[1, 1, :] = 1\n with expected_warnings(['The default mode']):\n resized = resize(x, (10, 10), order=0, anti_aliasing=False)\n with testing.raises(ValueError):\n # output_shape too short\n resize(x, (10, ), order=0, anti_aliasing=False)\n ref = np.zeros((10, 10, 3))\n ref[2:4, 2:4, :] = 1\n assert_almost_equal(resized, ref)\n with expected_warnings(['The default mode']):\n resized = resize(x, (10, 10, 3), order=0, anti_aliasing=False)\n assert_almost_equal(resized, ref)\n\n\ndef test_resize3d_resize():\n # resize 3rd dimension\n x = np.zeros((5, 5, 3), dtype=np.double)\n x[1, 1, :] = 1\n with expected_warnings(['The default mode']):\n resized = resize(x, (10, 10, 1), order=0, anti_aliasing=False)\n ref = np.zeros((10, 10, 1))\n ref[2:4, 2:4] = 1\n assert_almost_equal(resized, ref)\n\n\ndef test_resize3d_2din_3dout():\n # 3D output with 2D input\n x = np.zeros((5, 5), dtype=np.double)\n x[1, 1] = 1\n with expected_warnings(['The default mode']):\n resized = resize(x, (10, 10, 1), order=0, anti_aliasing=False)\n ref = np.zeros((10, 10, 1))\n ref[2:4, 2:4] = 1\n assert_almost_equal(resized, ref)\n\n\ndef test_resize2d_4d():\n # resize with extra output dimensions\n x = np.zeros((5, 5), dtype=np.double)\n x[1, 1] = 1\n out_shape = (10, 10, 1, 1)\n resized = resize(x, out_shape, order=0, anti_aliasing=False)\n ref = np.zeros(out_shape)\n ref[2:4, 2:4, ...] = 1\n assert_almost_equal(resized, ref)\n\n\ndef test_resize_nd():\n for dim in range(1, 6):\n shape = 2 + np.arange(dim) * 2\n x = np.ones(shape)\n out_shape = np.asarray(shape) * 1.5\n resized = resize(x, out_shape, order=0, mode='reflect',\n anti_aliasing=False)\n expected_shape = 1.5 * shape\n assert_equal(resized.shape, expected_shape)\n assert np.all(resized == 1)\n\n\ndef test_resize3d_bilinear():\n # bilinear 3rd dimension\n x = np.zeros((5, 5, 2), dtype=np.double)\n x[1, 1, 0] = 0\n x[1, 1, 1] = 1\n resized = resize(x, (10, 10, 1), order=1, mode='constant',\n anti_aliasing=False)\n ref = np.zeros((10, 10, 1))\n ref[1:5, 1:5, :] = 0.03125\n ref[1:5, 2:4, :] = 0.09375\n ref[2:4, 1:5, :] = 0.09375\n ref[2:4, 2:4, :] = 0.28125\n assert_almost_equal(resized, ref)\n\n\ndef test_swirl():\n image = img_as_float(data.checkerboard())\n\n swirl_params = {'radius': 80, 'rotation': 0, 'order': 2, 'mode': 'reflect'}\n\n with expected_warnings(['Bi-quadratic.*bug']):\n swirled = tf.swirl(image, strength=10, **swirl_params)\n unswirled = tf.swirl(swirled, strength=-10, **swirl_params)\n\n assert np.mean(np.abs(image - unswirled)) < 0.01\n\n swirl_params.pop('mode')\n\n with expected_warnings(['Bi-quadratic.*bug', 'default']):\n swirled = tf.swirl(image, strength=10, **swirl_params)\n unswirled = tf.swirl(swirled, strength=-10, **swirl_params)\n\n assert np.mean(np.abs(image[1:-1, 1:-1] - unswirled[1:-1, 1:-1])) < 0.01\n\n\ndef test_const_cval_out_of_range():\n img = np.random.randn(100, 100)\n cval = - 10\n warped = warp(img, AffineTransform(translation=(10, 10)), cval=cval)\n assert np.sum(warped == cval) == (2 * 100 * 10 - 10 * 10)\n\n\ndef test_warp_identity():\n img = img_as_float(rgb2gray(data.astronaut()))\n assert len(img.shape) == 2\n assert np.allclose(img, warp(img, AffineTransform(rotation=0)))\n assert not np.allclose(img, warp(img, AffineTransform(rotation=0.1)))\n rgb_img = np.transpose(np.asarray([img, np.zeros_like(img), img]),\n (1, 2, 0))\n warped_rgb_img = warp(rgb_img, AffineTransform(rotation=0.1))\n assert np.allclose(rgb_img, warp(rgb_img, AffineTransform(rotation=0)))\n assert not np.allclose(rgb_img, warped_rgb_img)\n # assert no cross-talk between bands\n assert np.all(0 == warped_rgb_img[:, :, 1])\n\n\ndef test_warp_coords_example():\n image = data.astronaut().astype(np.float32)\n assert 3 == image.shape[2]\n tform = SimilarityTransform(translation=(0, -10))\n coords = warp_coords(tform, (30, 30, 3))\n map_coordinates(image[:, :, 0], coords[:2])\n\n\ndef test_downsize():\n x = np.zeros((10, 10), dtype=np.double)\n x[2:4, 2:4] = 1\n scaled = resize(x, (5, 5), order=0, anti_aliasing=False)\n assert_equal(scaled.shape, (5, 5))\n assert_equal(scaled[1, 1], 1)\n assert_equal(scaled[2:, :].sum(), 0)\n assert_equal(scaled[:, 2:].sum(), 0)\n\n\ndef test_downsize_anti_aliasing():\n x = np.zeros((10, 10), dtype=np.double)\n x[2, 2] = 1\n scaled = resize(x, (5, 5), order=1, anti_aliasing=True)\n assert_equal(scaled.shape, (5, 5))\n assert np.all(scaled[:3, :3] > 0)\n assert_equal(scaled[3:, :].sum(), 0)\n assert_equal(scaled[:, 3:].sum(), 0)\n\n\ndef test_downsize_anti_aliasing_invalid_stddev():\n x = np.zeros((10, 10), dtype=np.double)\n with testing.raises(ValueError):\n resize(x, (5, 5), order=0, anti_aliasing=True, anti_aliasing_sigma=-1)\n with expected_warnings([\"Anti-aliasing standard deviation greater\"]):\n resize(x, (5, 15), order=0, anti_aliasing=True,\n anti_aliasing_sigma=(1, 1), mode=\"reflect\")\n resize(x, (5, 15), order=0, anti_aliasing=True,\n anti_aliasing_sigma=(0, 1), mode=\"reflect\")\n\n\ndef test_downscale():\n x = np.zeros((10, 10), dtype=np.double)\n x[2:4, 2:4] = 1\n scaled = rescale(x, 0.5, order=0, anti_aliasing=False)\n assert_equal(scaled.shape, (5, 5))\n assert_equal(scaled[1, 1], 1)\n assert_equal(scaled[2:, :].sum(), 0)\n assert_equal(scaled[:, 2:].sum(), 0)\n\n\ndef test_downscale_anti_aliasing():\n x = np.zeros((10, 10), dtype=np.double)\n x[2, 2] = 1\n scaled = rescale(x, 0.5, order=1, anti_aliasing=True)\n assert_equal(scaled.shape, (5, 5))\n assert np.all(scaled[:3, :3] > 0)\n assert_equal(scaled[3:, :].sum(), 0)\n assert_equal(scaled[:, 3:].sum(), 0)\n\n\ndef test_downscale_local_mean():\n image1 = np.arange(4 * 6).reshape(4, 6)\n out1 = downscale_local_mean(image1, (2, 3))\n expected1 = np.array([[4., 7.],\n [16., 19.]])\n assert_equal(expected1, out1)\n\n image2 = np.arange(5 * 8).reshape(5, 8)\n out2 = downscale_local_mean(image2, (4, 5))\n expected2 = np.array([[14., 10.8],\n [8.5, 5.7]])\n assert_equal(expected2, out2)\n\n\ndef test_invalid():\n with testing.raises(ValueError):\n warp(np.ones((4, 3, 3, 3)),\n SimilarityTransform())\n\n\ndef test_inverse():\n tform = SimilarityTransform(scale=0.5, rotation=0.1)\n inverse_tform = SimilarityTransform(matrix=np.linalg.inv(tform.params))\n image = np.arange(10 * 10).reshape(10, 10).astype(np.double)\n assert_equal(warp(image, inverse_tform), warp(image, tform.inverse))\n\n\ndef test_slow_warp_nonint_oshape():\n image = np.random.rand(5, 5)\n\n with testing.raises(ValueError):\n warp(image, lambda xy: xy,\n output_shape=(13.1, 19.5))\n\n warp(image, lambda xy: xy, output_shape=(13.0001, 19.9999))\n\n\ndef test_keep_range():\n image = np.linspace(0, 2, 25).reshape(5, 5)\n\n with expected_warnings(['The default mode', 'The default multichannel']):\n out = rescale(image, 2, preserve_range=False, clip=True, order=0,\n anti_aliasing=False)\n assert out.min() == 0\n assert out.max() == 2\n\n with expected_warnings(['The default mode', 'The default multichannel']):\n out = rescale(image, 2, preserve_range=True, clip=True, order=0,\n anti_aliasing=False)\n assert out.min() == 0\n assert out.max() == 2\n\n with expected_warnings(['The default mode', 'The default multichannel']):\n out = rescale(image.astype(np.uint8), 2, preserve_range=False,\n anti_aliasing=False,\n clip=True, order=0)\n assert out.min() == 0\n assert out.max() == 2 / 255.0\n",
"\"\"\"\n=========================================\nAdapting gray-scale filters to RGB images\n=========================================\n\nThere are many filters that are designed to work with gray-scale images but not\nwith color images. To simplify the process of creating functions that can adapt\nto RGB images, scikit-image provides the ``adapt_rgb`` decorator.\n\nTo actually use the ``adapt_rgb`` decorator, you have to decide how you want to\nadapt the RGB image for use with the gray-scale filter. There are two\npre-defined handlers:\n\n``each_channel``\n Pass each of the RGB channels to the filter one-by-one, and stitch the\n results back into an RGB image.\n``hsv_value``\n Convert the RGB image to HSV and pass the value channel to the filter.\n The filtered result is inserted back into the HSV image and converted\n back to RGB.\n\nBelow, we demonstrate the use of ``adapt_rgb`` on a couple of gray-scale\nfilters:\n\"\"\"\nfrom skimage.color.adapt_rgb import adapt_rgb, each_channel, hsv_value\nfrom skimage import filters\n\n\n@adapt_rgb(each_channel)\ndef sobel_each(image):\n return filters.sobel(image)\n\n\n@adapt_rgb(hsv_value)\ndef sobel_hsv(image):\n return filters.sobel(image)\n\n######################################################################\n# We can use these functions as we would normally use them, but now they work\n# with both gray-scale and color images. Let's plot the results with a color\n# image:\n\nfrom skimage import data\nfrom skimage.exposure import rescale_intensity\nimport matplotlib.pyplot as plt\n\nimage = data.astronaut()\n\nfig = plt.figure(figsize=(14, 7))\nax_each = fig.add_subplot(121, adjustable='box-forced')\nax_hsv = fig.add_subplot(122, sharex=ax_each, sharey=ax_each,\n adjustable='box-forced')\n\n# We use 1 - sobel_each(image)\n# but this will not work if image is not normalized\nax_each.imshow(rescale_intensity(1 - sobel_each(image)))\nax_each.set_xticks([]), ax_each.set_yticks([])\nax_each.set_title(\"Sobel filter computed\\n on individual RGB channels\")\n\n# We use 1 - sobel_hsv(image) but this will not work if image is not normalized\nax_hsv.imshow(rescale_intensity(1 - sobel_hsv(image)))\nax_hsv.set_xticks([]), ax_hsv.set_yticks([])\nax_hsv.set_title(\"Sobel filter computed\\n on (V)alue converted image (HSV)\")\n\n######################################################################\n# Notice that the result for the value-filtered image preserves the color of\n# the original image, but channel filtered image combines in a more\n# surprising way. In other common cases, smoothing for example, the channel\n# filtered image will produce a better result than the value-filtered image.\n#\n# You can also create your own handler functions for ``adapt_rgb``. To do so,\n# just create a function with the following signature::\n#\n# def handler(image_filter, image, *args, **kwargs):\n# # Manipulate RGB image here...\n# image = image_filter(image, *args, **kwargs)\n# # Manipulate filtered image here...\n# return image\n#\n# Note that ``adapt_rgb`` handlers are written for filters where the image is\n# the first argument.\n#\n# As a very simple example, we can just convert any RGB image to grayscale\n# and then return the filtered result:\n\nfrom skimage.color import rgb2gray\n\n\ndef as_gray(image_filter, image, *args, **kwargs):\n gray_image = rgb2gray(image)\n return image_filter(gray_image, *args, **kwargs)\n\n######################################################################\n# It's important to create a signature that uses ``*args`` and ``**kwargs``\n# to pass arguments along to the filter so that the decorated function is\n# allowed to have any number of positional and keyword arguments.\n#\n# Finally, we can use this handler with ``adapt_rgb`` just as before:\n\n\n@adapt_rgb(as_gray)\ndef sobel_gray(image):\n return filters.sobel(image)\n\nfig = plt.figure(figsize=(7, 7))\nax = fig.add_subplot(111, sharex=ax_each, sharey=ax_each,\n adjustable='box-forced')\n\n# We use 1 - sobel_gray(image)\n# but this will not work if image is not normalized\nax.imshow(rescale_intensity(1 - sobel_gray(image)), cmap=plt.cm.gray)\nax.set_xticks([]), ax.set_yticks([])\nax.set_title(\"Sobel filter computed\\n on the converted grayscale image\")\n\nplt.show()\n\n######################################################################\n#\n# .. note::\n#\n# A very simple check of the array shape is used for detecting RGB\n# images, so ``adapt_rgb`` is not recommended for functions that support\n# 3D volumes or color images in non-RGB spaces.\n"
] | [
[
"numpy.sqrt",
"numpy.allclose",
"numpy.linspace",
"numpy.real",
"numpy.zeros_like"
],
[
"numpy.pad",
"numpy.nonzero",
"scipy.ndimage.generate_binary_structure",
"numpy.ones",
"numpy.flatnonzero",
"numpy.isscalar",
"numpy.ravel_multi_index",
"numpy.argsort",
"numpy.array",
"numpy.sum"
],
[
"numpy.linspace",
"numpy.asarray",
"numpy.all",
"numpy.random.randn",
"numpy.zeros_like",
"numpy.allclose",
"numpy.arange",
"numpy.eye",
"numpy.sin",
"numpy.zeros",
"numpy.rot90",
"numpy.linalg.inv",
"scipy.ndimage.map_coordinates",
"numpy.random.rand",
"numpy.array",
"numpy.sum",
"numpy.abs",
"numpy.random.seed",
"numpy.cos",
"numpy.ones",
"numpy.empty"
],
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ibianka/HARK | [
"8678dbab0a0ace1520ac8f7ff5b33765122619f4"
] | [
"DCT-Copula-Illustration.py"
] | [
"# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.2'\n# jupytext_version: 1.1.3\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %% [markdown]\n# # Dimensionality Reduction in [Bayer and Luetticke (2018)](https://cepr.org/active/publications/discussion_papers/dp.php?dpno=13071)\n#\n# [](https://mybinder.org/v2/gh/econ-ark/HARK/BayerLuetticke?filepath=HARK%2FBayerLuetticke%2FDCT-Copula-Illustration.ipynb)\n#\n# This companion to the [main notebook](TwoAsset.ipynb) explains in more detail how the authors reduce the dimensionality of their problem\n#\n# - Based on original slides by Christian Bayer and Ralph Luetticke \n# - Original Jupyter notebook by Seungcheol Lee \n# - Further edits by Chris Carroll, Tao Wang \n#\n\n# %% [markdown]\n# ### Preliminaries\n#\n# In Steady-state Equilibrium (StE) in the model, in any given period, a consumer in state $s$ (which comprises liquid assets $m$, illiquid assets $k$, and human capital $\\newcommand{hLev}{h}\\hLev$) has two key choices:\n# 1. To adjust ('a') or not adjust ('n') their holdings of illiquid assets $k$\n# 1. Contingent on that choice, decide the level of consumption, yielding consumption functions:\n# * $c_n(s)$ - nonadjusters\n# * $c_a(s)$ - adjusters\n#\n# The usual envelope theorem applies here, so marginal value wrt the liquid asset equals marginal utility with respect to consumption:\n# $[\\frac{d v}{d m} = \\frac{d u}{d c}]$.\n# In practice, the authors solve their problem using the marginal value of money $\\texttt{Vm} = dv/dm$, but because the marginal utility function is invertible it is trivial to recover $\\texttt{c}$ from $(u^{\\prime})^{-1}(\\texttt{Vm} )$. The consumption function is therefore computed from the $\\texttt{Vm}$ function\n\n# %% {\"code_folding\": [0]}\n# Setup stuff\n\n# This is a jupytext paired notebook that autogenerates a corresponding .py file\n# which can be executed from a terminal command line via \"ipython [name].py\"\n# But a terminal does not permit inline figures, so we need to test jupyter vs terminal\n# Google \"how can I check if code is executed in the ipython notebook\"\ndef in_ipynb():\n try:\n if str(type(get_ipython())) == \"<class 'ipykernel.zmqshell.ZMQInteractiveShell'>\":\n return True\n else:\n return False\n except NameError:\n return False\n\n# Determine whether to make the figures inline (for spyder or jupyter)\n# vs whatever is the automatic setting that will apply if run from the terminal\nif in_ipynb():\n # %matplotlib inline generates a syntax error when run from the shell\n # so do this instead\n get_ipython().run_line_magic('matplotlib', 'inline') \nelse:\n get_ipython().run_line_magic('matplotlib', 'auto') \n \n# The tools for navigating the filesystem\nimport sys\nimport os\n\n# Find pathname to this file:\nmy_file_path = os.path.dirname(os.path.abspath(\"TwoAsset.ipynb\"))\n\n# Relative directory for pickled code\ncode_dir = os.path.join(my_file_path, \"BayerLuetticke_code/TwoAssetCode\") \n\nsys.path.insert(0, code_dir)\nsys.path.insert(0, my_file_path)\n\n# %% {\"code_folding\": []}\n# Load precalculated Stationary Equilibrium (StE) object EX3SS\n\nimport pickle\nos.chdir(code_dir) # Go to the directory with pickled code\n\n## EX3SS_20.p is the information in the stationary equilibrium \n## (20: the number of illiquid and liquid weath gridpoints)\n### The comments above are original, but it seems that there are 30 not 20 points now\n\nEX3SS=pickle.load(open(\"EX3SS_20.p\", \"rb\"))\n\n# %% [markdown]\n# ### Dimensions\n#\n# The imported StE solution to the problem represents the functions at a set of gridpoints of\n# * liquid assets ($n_m$ points), illiquid assets ($n_k$), and human capital ($n_h$)\n# * In the code these are $\\{\\texttt{nm,nk,nh}\\}$\n#\n# So even if the grids are fairly sparse for each state variable, the total number of combinations of the idiosyncratic state gridpoints is large: $n = n_m \\times n_k \\times n_h$. So, e.g., $\\bar{c}$ is a set of size $n$ containing the level of consumption at each possible _combination_ of gridpoints.\n#\n# In the \"real\" micro problem, it would almost never happen that a continuous variable like $m$ would end up being exactly equal to one of the prespecified gridpoints. But the functions need to be evaluated at such non-grid points. This is addressed by linear interpolation. That is, if, say, the grid had $m_{8} = 40$ and $m_{9} = 50$ then and a consumer ended up with $m = 45$ then the approximation is that $\\tilde{c}(45) = 0.5 \\bar{c}_{8} + 0.5 \\bar{c}_{9}$.\n#\n\n# %% {\"code_folding\": []}\n# Show dimensions of the consumer's problem (state space)\n\nprint('c_n is of dimension: ' + str(EX3SS['mutil_c_n'].shape))\nprint('c_a is of dimension: ' + str(EX3SS['mutil_c_a'].shape))\n\nprint('Vk is of dimension:' + str(EX3SS['Vk'].shape))\nprint('Vm is of dimension:' + str(EX3SS['Vm'].shape))\n\nprint('For convenience, these are all constructed from the same exogenous grids:')\nprint(str(len(EX3SS['grid']['m']))+' gridpoints for liquid assets;')\nprint(str(len(EX3SS['grid']['k']))+' gridpoints for illiquid assets;')\nprint(str(len(EX3SS['grid']['h']))+' gridpoints for individual productivity.')\nprint('')\nprint('Therefore, the joint distribution is of size: ')\nprint(str(EX3SS['mpar']['nm'])+\n ' * '+str(EX3SS['mpar']['nk'])+\n ' * '+str(EX3SS['mpar']['nh'])+\n ' = '+ str(EX3SS['mpar']['nm']*EX3SS['mpar']['nk']*EX3SS['mpar']['nh']))\n\n\n# %% [markdown]\n# ### Dimension Reduction\n#\n# The authors use different dimensionality reduction methods for the consumer's problem and the distribution across idiosyncratic states\n\n# %% [markdown]\n# #### Representing the consumer's problem with Basis Functions\n#\n# The idea is to find an efficient \"compressed\" representation of our functions (e.g., the consumption function), which BL do using tools originally developed for image compression. The analogy to image compression is that nearby pixels are likely to have identical or very similar colors, so we need only to find an efficient way to represent how the colors _change_ from one pixel to nearby ones. Similarly, consumption at a given point $s_{i}$ is likely to be close to consumption point at another point $s_{j}$ that is \"close\" in the state space (similar wealth, income, etc), so a function that captures that similarity efficiently can preserve most of the information without keeping all of the points.\n#\n# Like linear interpolation, the [DCT transformation](https://en.wikipedia.org/wiki/Discrete_cosine_transform) is a method of representing a continuous function using a finite set of numbers. It uses a set of independent [basis functions](https://en.wikipedia.org/wiki/Basis_function) to do this.\n#\n# But it turns out that some of those basis functions are much more important than others in representing the steady-state functions. Dimension reduction is accomplished by basically ignoring all basis functions that make \"small enough\" contributions to the representation of the function. \n#\n# ##### When might this go wrong?\n#\n# Suppose the consumption function changes in a recession in ways that change behavior radically at some states. Like, suppose unemployment almost never happens in steady state, but it can happen in temporary recessions. Suppose further that, even for employed people, in a recession, _worries_ about unemployment cause many of them to prudently withdraw some of their illiquid assets -- behavior opposite of what people in the same state would be doing during expansions. In that case, the basis functions that represented the steady state function would have had no incentive to be able to represent well the part of the space that is never seen in steady state, so any functions that might help do so might well have been dropped in the dimension reduction stage.\n#\n# On the whole, it seems unlikely that this kind of thing is a major problem, because the vast majority of the variation that people experience is idiosyncratic. There is always unemployment, for example; it just moves up and down a bit with aggregate shocks, but since the experience of unemployment is in fact well represented in the steady state the method should have no trouble capturing it.\n#\n# Where the method might have more trouble is in representing economies in which there are multiple equilibria in which behavior is quite different.\n\n# %% [markdown]\n# #### For the distribution of agents across states: Copula\n#\n# The other tool the authors use is the [\"copula\"](https://en.wikipedia.org/wiki/Copula_(probability_theory)), which allows us to represent the distribution of people across idiosyncratic states efficiently\n#\n# The copula is computed from the joint distribution of states in StE and will be used to transform the [marginal distributions](https://en.wikipedia.org/wiki/Marginal_distribution) back to joint distributions. (For an illustration of how the assumptions used when modeling asset price distributions using copulas can fail see [Salmon](https://www.wired.com/2009/02/wp-quant/))\n#\n# * A copula is a representation of the joint distribution expressed using a mapping between the uniform joint CDF and the marginal distributions of the variables\n# \n# * The crucial assumption is that what aggregate shocks do is to squeeze or distort the steady state distribution, but leave the rank structure of the distribution the same\n# * An example of when this might not hold is the following. Suppose that in expansions, the people at the top of the distribution of illiquid assets (the top 1 percent, say) are also at the top 1 percent of liquid assets. But in recessions the bottom 99 percent get angry at the top 1 percent of illiquid asset holders and confiscate part of their liquid assets (the illiquid assets can't be confiscated quickly because they are illiquid). Now the people in the top 99 percent of illiquid assets might be in the _bottom_ 1 percent of liquid assets.\n# \n# - In this case we just need to represent how the mapping from ranks into levels of assets\n#\n# - This reduces the number of points for which we need to track transitions from $3600 = 30 \\times 30 \\times 4$ to $64 = 30+30+4$. Or the total number of points we need to contemplate goes from $3600^2 \\approx 13 $million to $64^2=4096$. \n\n# %% {\"code_folding\": []}\n# Get some specs about the copula, which is precomputed in the EX3SS object\n\nprint('The copula consists of two parts: gridpoints and values at those gridpoints:'+ \\\n '\\n gridpoints have dimensionality of '+str(EX3SS['Copula']['grid'].shape) + \\\n '\\n where the first element is total number of gridpoints' + \\\n '\\n and the second element is number of idiosyncratic state variables' + \\\n '\\n whose values also are of dimension of '+str(EX3SS['Copula']['value'].shape[0]) + \\\n '\\n each entry of which is the probability that all three of the'\n '\\n state variables are below the corresponding point.')\n\n\n# %% {\"code_folding\": []}\n## Import necessary libraries\n\nfrom __future__ import print_function\nimport sys \nsys.path.insert(0,'../')\n\nimport numpy as np\nfrom numpy.linalg import matrix_rank\nimport scipy as sc\nfrom scipy.stats import norm \nfrom scipy.interpolate import interp1d, interp2d, griddata, RegularGridInterpolator, interpn\nimport multiprocessing as mp\nfrom multiprocessing import Pool, cpu_count, Process\nfrom math import ceil\nimport math as mt\nfrom scipy import sparse as sp # used to work with sparse matrices\nfrom scipy import linalg #linear algebra \nfrom math import log, cos, pi, sqrt\nimport time\nfrom SharedFunc3 import Transition, ExTransitions, GenWeight, MakeGridkm, Tauchen, Fastroot\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport scipy.io #scipy input and output\nimport scipy.fftpack as sf # scipy discrete fourier transforms\n\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nfrom matplotlib import cm\n\nimport seaborn as sns\n\nimport copy as cp\n\n\n# %% {\"code_folding\": []}\n## State reduction and discrete cosine transformation\n\nclass StateReduc_Dct:\n \n def __init__(self, par, mpar, grid, Output, targets, Vm, Vk, \n joint_distr, Copula, c_n_guess, c_a_guess, psi_guess,\n m_n_star, m_a_star, cap_a_star, mutil_c_n, mutil_c_a,mutil_c, P_H):\n \n self.par = par # Parameters of the theoretical model\n self.mpar = mpar # Parameters of the numerical representation\n self.grid = grid # Discrete grid\n self.Output = Output # Results of the calculations\n self.targets = targets # Like, debt-to-GDP ratio or other desiderata\n self.Vm = Vm # Marginal value from liquid cash-on-hand\n self.Vk = Vk # Marginal value of capital\n self.joint_distr = joint_distr # Multidimensional histogram\n self.Copula = Copula # Encodes rank marginal correlation of joint distribution\n self.mutil_c = mutil_c # Marginal utility of consumption\n self.P_H = P_H # Transition matrix for macro states (not including distribution)\n \n \n def StateReduc(self):\n \"\"\"\n input\n -----\n self: dict, stored results from a StE \n \n output\n ------\n Newly generated\n ===============\n X_ss: ndarray, stacked states, including \n Y_ss: ndarray, controls \n Gamma_state: ndarray, marginal distributions of individual states \n grid: ndarray, discrete grids\n targets: ndarray, debt-to-GDP ratio or other desiderata\n P_H: transition probability of\n indexMUdct: ndarray, indices selected after dct operation on marginal utility of consumption\n indexVKdct: ndarray, indices selected after dct operation on marginal value of capital\n State: ndarray, dimension equal to reduced states\n State_m: ndarray, dimension equal to reduced states\n Contr: ndarray, dimension equal to reduced controls\n Contr_m: ndarray, dimension equal to reduced controls\n \n Passed down from the input\n ==========================\n Copula: dict, grids and values\n joint_distr: ndarray, nk x nm x nh\n Output: dict, outputs from the model \n par: dict, parameters of the theoretical model\n mpar:dict, parameters of the numerical representation\n aggrshock: string, type of aggregate shock used to purturb the StE \n \"\"\"\n \n # Inverse of CRRA on x for utility and marginal utility\n invutil = lambda x : ((1-self.par['xi'])*x)**(1./(1-self.par['xi'])) \n invmutil = lambda x : (1./x)**(1./self.par['xi']) \n \n # X=States\n # Marg dist of liquid assets summing over pty and illiquid assets k\n Xss=np.asmatrix(np.concatenate((np.sum(np.sum(self.joint_distr.copy(),axis=1),axis =1), \n np.transpose(np.sum(np.sum(self.joint_distr.copy(),axis=0),axis=1)),# marg dist k\n np.sum(np.sum(self.joint_distr.copy(),axis=1),axis=0), # marg dist pty (\\approx income)\n [np.log(self.par['RB'])],[ 0.]))).T # Given the constant interest rate\n \n # Y=\"controls\" (according to this literature's odd terminology)\n # c = invmarg(marg(c)), so first bit gets consumption policy function\n Yss=np.asmatrix(np.concatenate((invmutil(self.mutil_c.copy().flatten(order = 'F')),\\\n invmutil(self.Vk.copy().flatten(order = 'F')),\n [np.log(self.par['Q'])], # Question: Price of the illiquid asset, right?\n [ np.log(self.par['PI'])], # Inflation\n [ np.log(self.Output)], \n [np.log(self.par['G'])], # Gov spending\n [np.log(self.par['W'])], # Wage\n [np.log(self.par['R'])], # Nominal R\n [np.log(self.par['PROFITS'])], \n [np.log(self.par['N'])], # Hours worked\n [np.log(self.targets['T'])], # Taxes\n [np.log(self.grid['K'])], # Kapital\n [np.log(self.targets['B'])]))).T # Government debt\n \n # Mapping for Histogram\n # Gamma_state matrix reduced set of states\n # nm = number of gridpoints for liquid assets\n # nk = number of gridpoints for illiquid assets\n # nh = number of gridpoints for human capital (pty)\n Gamma_state = np.zeros( # Create zero matrix of size [nm + nk + nh,nm + nk + nh - 4]\n (self.mpar['nm']+self.mpar['nk']+self.mpar['nh'],\n self.mpar['nm']+self.mpar['nk']+self.mpar['nh'] - 4)) \n # Question: Why 4? 4 = 3+1, 3: sum to 1 for m, k, h and 1: for entrepreneurs \n\n # Impose adding-up conditions: \n # In each of the block matrices, probabilities must add to 1\n \n for j in range(self.mpar['nm']-1): # np.squeeze reduces one-dimensional matrix to vector\n Gamma_state[0:self.mpar['nm'],j] = -np.squeeze(Xss[0:self.mpar['nm']])\n Gamma_state[j,j]=1. - Xss[j] # \n Gamma_state[j,j]=Gamma_state[j,j] - np.sum(Gamma_state[0:self.mpar['nm'],j])\n bb = self.mpar['nm'] # Question: bb='bottom base'? because bb shorter to type than self.mpar['nm'] everywhere\n\n for j in range(self.mpar['nk']-1):\n Gamma_state[bb+np.arange(0,self.mpar['nk'],1), bb+j-1] = -np.squeeze(Xss[bb+np.arange(0,self.mpar['nk'],1)])\n Gamma_state[bb+j,bb-1+j] = 1. - Xss[bb+j] \n Gamma_state[bb+j,bb-1+j] = (Gamma_state[bb+j,bb-1+j] - \n np.sum(Gamma_state[bb+np.arange(0,self.mpar['nk']),bb-1+j]))\n bb = self.mpar['nm'] + self.mpar['nk']\n\n for j in range(self.mpar['nh']-2): \n # Question: Why -2? 1 for h sum to 1 and 1 for entrepreneur Some other symmetry/adding-up condition.\n Gamma_state[bb+np.arange(0,self.mpar['nh']-1,1), bb+j-2] = -np.squeeze(Xss[bb+np.arange(0,self.mpar['nh']-1,1)])\n Gamma_state[bb+j,bb-2+j] = 1. - Xss[bb+j]\n Gamma_state[bb+j,bb-2+j] = Gamma_state[bb+j,bb-2+j] - np.sum(Gamma_state[bb+np.arange(0,self.mpar['nh']-1,1),bb-2+j])\n\n # Number of other state variables not including the gridded -- here, just the interest rate \n self.mpar['os'] = len(Xss) - (self.mpar['nm']+self.mpar['nk']+self.mpar['nh'])\n # For each gridpoint there are two \"regular\" controls: consumption and illiquid saving\n # Counts the number of \"other\" controls (PROFITS, Q, etc)\n self.mpar['oc'] = len(Yss) - 2*(self.mpar['nm']*self.mpar['nk']*self.mpar['nh'])\n \n aggrshock = self.par['aggrshock']\n accuracy = self.par['accuracy']\n \n # Do the dct on the steady state marginal utility\n # Returns an array of indices for the used basis vectors\n indexMUdct = self.do_dct(invmutil(self.mutil_c.copy().flatten(order='F')),\n self.mpar,accuracy)\n\n # Do the dct on the steady state marginal value of capital\n # Returns an array of indices for the used basis vectors\n indexVKdct = self.do_dct(invmutil(self.Vk.copy()),self.mpar,accuracy)\n \n # Calculate the numbers of states and controls\n aux = np.shape(Gamma_state)\n self.mpar['numstates'] = np.int64(aux[1] + self.mpar['os'])\n self.mpar['numcontrols'] = np.int64(len(indexMUdct) + \n len(indexVKdct) + \n self.mpar['oc'])\n \n # Size of the reduced matrices to be used in the Fsys\n # Set to zero because in steady state they are zero\n State = np.zeros((self.mpar['numstates'],1))\n State_m = State\n Contr = np.zeros((self.mpar['numcontrols'],1))\n Contr_m = Contr\n \n return {'Xss': Xss, 'Yss':Yss, 'Gamma_state': Gamma_state, \n 'par':self.par, 'mpar':self.mpar, 'aggrshock':aggrshock,\n 'Copula':self.Copula,'grid':self.grid,'targets':self.targets,'P_H':self.P_H, \n 'joint_distr': self.joint_distr, 'Output': self.Output, 'indexMUdct':indexMUdct, 'indexVKdct':indexVKdct,\n 'State':State, 'State_m':State_m, 'Contr':Contr, 'Contr_m':Contr_m}\n\n # Discrete cosine transformation magic happens here\n # sf is scipy.fftpack tool\n def do_dct(self, obj, mpar, level):\n \"\"\"\n input\n -----\n obj: ndarray nm x nk x nh \n dimension of states before dct \n mpar: dict\n parameters in the numerical representaion of the model, e.g. nm, nk and nh\n level: float \n accuracy level for dct \n output\n ------\n index_reduced: ndarray n_dct x 1 \n an array of indices that select the needed grids after dct\n \n \"\"\"\n obj = np.reshape(obj.copy(),(mpar['nm'],mpar['nk'],mpar['nh']),order='F')\n X1 = sf.dct(obj,norm='ortho',axis=0) # dct is operated along three dimensions axis=0/1/2\n X2 = sf.dct(X1.copy(),norm='ortho',axis=1)\n X3 = sf.dct(X2.copy(),norm='ortho',axis=2)\n\n # Pick the coefficients that are big\n XX = X3.flatten(order='F') \n ind = np.argsort(abs(XX.copy()))[::-1]\n # i will \n i = 1 \n # Sort from smallest (=best) to biggest (=worst)\n # and count how many are 'good enough to keep'\n while linalg.norm(XX[ind[:i]].copy())/linalg.norm(XX) < level:\n i += 1 \n \n needed = i # Question:Isn't this counting the ones that are NOT needed?\n \n index_reduced = np.sort(ind[:i]) # Retrieve the good \n \n return index_reduced\n\n# %% {\"code_folding\": []}\n## Choose an aggregate shock to perturb(one of three shocks: MP, TFP, Uncertainty)\n\nEX3SS['par']['aggrshock'] = 'MP'\nEX3SS['par']['rhoS'] = 0.0 # Persistence of variance\nEX3SS['par']['sigmaS'] = 0.001 # STD of variance shocks\n\n#EX3SS['par']['aggrshock'] = 'TFP'\n#EX3SS['par']['rhoS'] = 0.95\n#EX3SS['par']['sigmaS'] = 0.0075\n \n#EX3SS['par']['aggrshock'] = 'Uncertainty'\n#EX3SS['par']['rhoS'] = 0.84 # Persistence of variance\n#EX3SS['par']['sigmaS'] = 0.54 # STD of variance shocks\n\n# %% {\"code_folding\": []}\n## Choose an accuracy of approximation with DCT\n### Determines number of basis functions chosen -- enough to match this accuracy\n### EX3SS is precomputed steady-state pulled in above\nEX3SS['par']['accuracy'] = 0.99999 \n\n# %% {\"code_folding\": []}\n## Implement state reduction and DCT\n### Do state reduction on steady state\nEX3SR=StateReduc_Dct(**EX3SS) # Takes StE result as input and get ready to invoke state reduction operation\nSR=EX3SR.StateReduc() # StateReduc is operated \n\n# %% {\"code_folding\": [0]}\n# Measuring the effectiveness of the state reduction\n\nprint('What are the results from the state reduction?')\n#print('Newly added attributes after the operation include \\n'+str(set(SR.keys())-set(EX3SS.keys())))\n\nprint('\\n')\n\nprint('To achieve an accuracy of '+str(EX3SS['par']['accuracy'])+'\\n') \n\nprint('The dimension of the policy functions is reduced to '+str(SR['indexMUdct'].shape[0]) \\\n +' from '+str(EX3SS['mpar']['nm']*EX3SS['mpar']['nk']*EX3SS['mpar']['nh'])\n )\nprint('The dimension of the marginal value functions is reduced to '+str(SR['indexVKdct'].shape[0]) \\\n + ' from ' + str(EX3SS['Vk'].shape))\nprint('The total number of control variables is '+str(SR['Contr'].shape[0])+'='+str(SR['indexMUdct'].shape[0]) + \\\n '+'+str(SR['indexVKdct'].shape[0])+'+ # of other macro controls')\nprint('\\n')\nprint('The copula represents the joint distribution with a vector of size '+str(SR['Gamma_state'].shape) )\nprint('The dimension of states including exogenous state, is ' +str(SR['Xss'].shape[0]))\n\nprint('It simply stacks all grids of different\\\n \\n state variables regardless of their joint distributions.\\\n \\n This is due to the assumption that the rank order remains the same.')\nprint('The total number of state variables is '+str(SR['State'].shape[0]) + '='+\\\n str(SR['Gamma_state'].shape[1])+'+ the number of macro states (like the interest rate)')\n\n\n# %% [markdown]\n# ### Graphical Illustration\n#\n# #### Policy/value functions\n#\n# Taking the consumption function as an example, we plot consumption by adjusters and non-adjusters over a range of $k$ and $m$ that encompasses x percent of the mass of the distribution function. \n#\n# We plot the functions for the top and bottom values of the wage $h$ distribution\n#\n\n# %% {\"code_folding\": []}\n## Graphical illustration\n\nxi = EX3SS['par']['xi']\ninvmutil = lambda x : (1./x)**(1./xi) \n\n### convert marginal utilities back to consumption function\nmut_StE = EX3SS['mutil_c']\nmut_n_StE = EX3SS['mutil_c_n'] # marginal utility of non-adjusters\nmut_a_StE = EX3SS['mutil_c_a'] # marginal utility of adjusters \n\nc_StE = invmutil(mut_StE)\ncn_StE = invmutil(mut_n_StE)\nca_StE = invmutil(mut_a_StE)\n\n\n### grid values \ndim_StE = mut_StE.shape\nmgrid = EX3SS['grid']['m']\nkgrid = EX3SS['grid']['k']\nhgrid = EX3SS['grid']['h']\n\n\n# %% {\"code_folding\": []}\n## define some functions to be used next\n\ndef dct3d(x):\n x0=sf.dct(x.copy(),axis=0,norm='ortho')\n x1=sf.dct(x0.copy(),axis=1,norm='ortho')\n x2=sf.dct(x1.copy(),axis=2,norm='ortho')\n return x2\n\ndef idct3d(x):\n x2 = sf.idct(x.copy(),axis=2,norm='ortho')\n x1 = sf.idct(x2.copy(),axis=1,norm='ortho')\n x0 = sf.idct(x1.copy(),axis=0,norm='ortho') \n return x0\n\ndef DCTApprox(fullgrids,dct_index):\n dim=fullgrids.shape\n dctcoefs = dct3d(fullgrids)\n dctcoefs_rdc = np.zeros(dim)\n dctcoefs_rdc[dct_index]=dctcoefs[dct_index]\n approxgrids = idct3d(dctcoefs_rdc)\n return approxgrids\n\n# %% [markdown]\n# Depending on the accuracy level, the DCT operation choses the necessary number of basis functions used to approximate consumption function at the full grids. This is illustrated in the p31-p34 in this [slides](https://www.dropbox.com/s/46fdxh0aphazm71/presentation_method.pdf?dl=0). We show this for both 1-dimensional (m or k) or 2-dimenstional grids (m and k) in the following. \n\n# %% {\"code_folding\": []}\n## 2D graph of consumption function: c(m) fixing k and h\n\n\n## list of accuracy levels \nAccuracy_BL = 0.99999 # From BL\nAccuracy_Less0 = 0.999\nAccuracy_Less1 = 0.99\nAccuracy_Less2 = 0.95\n\nacc_lst = np.array([Accuracy_BL,Accuracy_Less0,Accuracy_Less1,Accuracy_Less2])\n\n## c(m) fixing k and h\nfig = plt.figure(figsize=(8,8))\nfig.suptitle('c at full grids and c approximated by DCT in different accuracy levels' \n '\\n non-adjusters, fixing k and h',\n fontsize=(13))\nfig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.3)\n\nfor idx in range(len(acc_lst)):\n EX3SS_cp =cp.deepcopy(EX3SS) \n EX3SS_cp['par']['accuracy'] = acc_lst[idx]\n EX3SR_cp=StateReduc_Dct(**EX3SS_cp) # Takes StE result as input and get ready to invoke state reduction operation\n SR_cp=EX3SR_cp.StateReduc()\n mut_rdc_idx_flt_cp = SR_cp['indexMUdct']\n mut_rdc_idx_cp = np.unravel_index(mut_rdc_idx_flt_cp,dim_StE,order='F')\n nb_bf_cp = len(mut_rdc_idx_cp[0])\n print(str(nb_bf_cp) +\" basis functions used.\")\n c_n_approx_cp = DCTApprox(cn_StE,mut_rdc_idx_cp)\n c_a_approx_cp = DCTApprox(ca_StE,mut_rdc_idx_cp)\n cn_diff_cp = c_n_approx_cp-cn_StE\n \n # choose the fix grid of h and k\n hgrid_fix=2 # fix level of h as an example \n kgrid_fix=10 # fix level of k as an example\n \n # get the corresponding c function approximated by dct\n cVec = c_a_approx_cp[:,kgrid_fix,hgrid_fix]\n \n ## plots \n ax = fig.add_subplot(2,2,idx+1)\n ax.plot(mgrid,cVec,label='c approximated by DCT')\n ax.plot(mgrid,ca_StE[:,kgrid_fix,hgrid_fix],'--',label='c at full grids')\n ax.plot(mgrid,cVec,'r*')\n ax.set_xlabel('m',fontsize=13)\n ax.set_ylabel(r'$c(m)$',fontsize=13)\n ax.set_title(r'accuracy=${}$'.format(acc_lst[idx]))\n ax.legend(loc=0)\n\n# %% {\"code_folding\": []}\n## 2D graph of consumption function: c(k) fixing m and h\n\nfig = plt.figure(figsize=(8,8))\nfig.suptitle('c at full grids and c approximated by DCT in different accuracy levels' \n '\\n non-adjusters, fixing m and h',\n fontsize=(13))\nfig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.3)\n\nfor idx in range(len(acc_lst)):\n EX3SS_cp =cp.deepcopy(EX3SS)\n EX3SS_cp['par']['accuracy'] = acc_lst[idx]\n EX3SR_cp=StateReduc_Dct(**EX3SS_cp) # Takes StE result as input and get ready to invoke state reduction operation\n SR_cp=EX3SR_cp.StateReduc()\n mut_rdc_idx_flt_cp= SR_cp['indexMUdct']\n mut_rdc_idx_cp = np.unravel_index(mut_rdc_idx_flt_cp,dim_StE,order='F')\n nb_bf_cp = len(mut_rdc_idx_cp[0])\n print(str(nb_bf_cp) +\" basis functions used.\")\n c_n_approx_cp = DCTApprox(cn_StE,mut_rdc_idx_cp)\n c_a_approx_cp = DCTApprox(ca_StE,mut_rdc_idx_cp)\n cn_diff_cp = c_n_approx_cp-cn_StE\n \n # choose the fix grid of h and m \n hgrid_fix=2 # fix level of h as an example \n mgrid_fix=10 # fix level of k as an example\n \n # get the corresponding c function approximated by dct\n cVec = c_n_approx_cp[mgrid_fix,:,hgrid_fix]\n\n ## plots \n ax = fig.add_subplot(2,2,idx+1)\n ax.plot(kgrid,cVec,label='c approximated by DCT')\n ax.plot(kgrid,cn_StE[mgrid_fix,:,hgrid_fix],'--',label='c at full grids')\n ax.plot(kgrid,cVec,'r*')\n ax.set_xlabel('k',fontsize=13)\n ax.set_ylabel(r'$c(k)$',fontsize=13)\n ax.set_title(r'accuracy=${}$'.format(acc_lst[idx]))\n ax.legend(loc=0)\n\n# %% {\"code_folding\": []}\n# Restore the solution corresponding to the original BL accuracy\n\nEX3SS['par']['accuracy'] = Accuracy_BL \nEX3SR=StateReduc_Dct(**EX3SS) # Takes StE result as input and get ready to invoke state reduction operation\nSR=EX3SR.StateReduc() # StateReduc is operated \n\n\n## indexMUdct is one dimension, needs to be unraveled to 3 dimensions\nmut_rdc_idx_flt = SR['indexMUdct']\nmut_rdc_idx = np.unravel_index(mut_rdc_idx_flt,dim_StE,order='F')\n\nnb_dct = len(mut_StE.flatten()) \nmut_rdc_bool = np.zeros(nb_dct) # boolean array of 30 x 30 x 4 \nfor i in range(nb_dct):\n mut_rdc_bool[i]=i in list(SR['indexMUdct'])\nmut_rdc_bool_3d = (mut_rdc_bool==1).reshape(dim_StE)\nmut_rdc_mask_3d = (mut_rdc_bool).reshape(dim_StE)\n\n# Get the joint distribution calculated elsewhere\n\njoint_distr = EX3SS['joint_distr']\nmarginal_mk = EX3SS['joint_distr'].sum(axis=2)\n\n# Location at which to cut off the topmost part of the distributions\n\nmass_pct = 0.9\n\n## Again, for BL accuracy level, get dct compressed c functions at all grids \n\nc_n_approx = DCTApprox(cn_StE,mut_rdc_idx)\nc_a_approx = DCTApprox(ca_StE,mut_rdc_idx)\n\n# %% {\"code_folding\": []}\n# 3D surface plots of consumption function at full grids and approximated by DCT\n## at all grids and grids after dct first for non-adjusters and then for adjusters\n\n## for non-adjusters\n\n## full grids now \n## WangTao: \n## After plotting for the entire set of gridpoints, next plot only for the bottom mass_pct of the distributions\n\nmmgrid,kkgrid = np.meshgrid(mgrid,kgrid)\n\nfig = plt.figure(figsize=(14,14))\nfig.suptitle('Consumption of non-adjusters at grid points of m and k (for each h)',\n fontsize=(13))\nfor hgrid_id in range(EX3SS['mpar']['nh']):\n ## prepare the reduced grids \n hgrid_fix=hgrid_id \n \n ## plots \n ax = fig.add_subplot(2,2,hgrid_id+1, projection='3d')\n ax.scatter(mmgrid,kkgrid,c_n_approx[:,:,hgrid_fix],marker='v',color='red',\n label='StE(after dct):non-adjuster')\n ax.plot_surface(mmgrid,kkgrid,cn_StE[:,:,hgrid_fix],cmap='Blues',\n label='StE(before dct): non-adjuster')\n ax.set_xlabel('m',fontsize=13)\n ax.set_ylabel('k',fontsize=13)\n ax.set_zlabel(r'$c_n(m,k)$',fontsize=13)\n plt.gca().invert_yaxis()\n #ax.set_xlim([0,mmax])\n #ax.set_ylim([0,kmax])\n ax.set_title(r'$h({})$'.format(hgrid_fix))\n ax.view_init(20, 100)\n\n# %% {\"code_folding\": []}\n## Same thing in a different way: image plots of c functions at full grids and c approximated by DCT\n\n\n## for non-adjusters\n\n## full grids \nmmgrid,kkgrid = np.meshgrid(mgrid,kgrid)\n\n### for adjusters \nfig = plt.figure(figsize=(14,14))\nfig.suptitle('Consumption of non-adjusters at grid points of m and k(for different h)',\n fontsize=(13))\nfor hgrid_id in range(EX3SS['mpar']['nh']):\n ## prepare the reduced grids \n hgrid_fix=hgrid_id\n \n ## plots \n ax = fig.add_subplot(2,2,hgrid_id+1)\n ax.imshow(np.hstack((cn_StE[:,:,hgrid_fix],c_n_approx[:,:,hgrid_fix])))\n ax.set_xlabel('m',fontsize=13)\n ax.set_ylabel('k',fontsize=13)\n ax.set_title(r'$h({})$'.format(hgrid_fix))\n\n# %% {\"code_folding\": []}\n## 3D scatter plots of the difference of full-grid c and approximated c\n\n## for non-adjusters\n\n## full grids \nmmgrid,kkgrid = np.meshgrid(mgrid,kgrid)\n\n### for adjusters \nfig = plt.figure(figsize=(14,14))\nfig.suptitle('Consumption of non-adjusters at grid points of m and k (for each h)',\n fontsize=(13))\nfor hgrid_id in range(EX3SS['mpar']['nh']):\n ## prepare the reduced grids \n hgrid_fix=hgrid_id \n cn_diff = c_n_approx-cn_StE\n \n ## plots \n ax = fig.add_subplot(2,2,hgrid_id+1, projection='3d')\n ax.plot_surface(mmgrid,kkgrid,cn_diff[:,:,hgrid_fix], rstride=1, \n cstride=1,cmap=cm.coolwarm, edgecolor='none',\n label='Difference of full-grid and approximated consumption function')\n ax.set_xlabel('m',fontsize=13)\n ax.set_ylabel('k',fontsize=13)\n ax.set_zlabel(r'$c_a(m,k)$',fontsize=13)\n plt.gca().invert_yaxis()\n plt.gca().invert_xaxis()\n #ax.set_xlim([0,mmax])\n #ax.set_ylim([0,kmax])\n ax.set_title(r'$h({})$'.format(hgrid_fix))\n ax.view_init(20, 40)\n\n# %% {\"code_folding\": []}\n# Difference of full-grid c and DCT compressed c for difference levels of accuracy\n\nfig = plt.figure(figsize=(14,14))\nfig.suptitle('Differences of c at full grids and c approximated by DCT in different accuracy levels(non-adjusters)',\n fontsize=(13))\n\nfor idx in range(len(acc_lst)):\n EX3SS_cp =cp.deepcopy(EX3SS)\n EX3SS_cp['par']['accuracy'] = acc_lst[idx]\n EX3SR_cp=StateReduc_Dct(**EX3SS_cp) # Takes StE result as input and get ready to invoke state reduction operation\n SR_cp=EX3SR_cp.StateReduc()\n mut_rdc_idx_flt_cp = SR_cp['indexMUdct']\n mut_rdc_idx_cp = np.unravel_index(mut_rdc_idx_flt_cp,dim_StE,order='F')\n nb_bf_cp = len(mut_rdc_idx_cp[0])\n print(str(nb_bf_cp) +\" basis functions used.\")\n c_n_approx_cp = DCTApprox(cn_StE,mut_rdc_idx_cp)\n c_a_approx_cp = DCTApprox(ca_StE,mut_rdc_idx_cp)\n cn_diff_cp = c_n_approx_cp-cn_StE\n \n hgrid_fix=1 # fix level of h as an example \n \n ## plots \n ax = fig.add_subplot(2,2,idx+1, projection='3d')\n ax.plot_surface(mmgrid,kkgrid,cn_diff_cp[:,:,hgrid_fix], rstride=1, \n cstride=1,cmap=cm.summer, edgecolor='none',\n label='Difference of full-grid and approximated consumption functions')\n ax.set_xlabel('m',fontsize=13)\n ax.set_ylabel('k',fontsize=13)\n ax.set_zlabel('Difference of c functions',fontsize=13)\n plt.gca().invert_yaxis()\n plt.gca().invert_xaxis()\n #ax.set_xlim([0,mmax])\n #ax.set_ylim([0,kmax])\n ax.set_zlim([-8,2])\n ax.set_title(r'accuracy=${}$'.format(acc_lst[idx]))\n ax.view_init(10, 60)\n\n# %% {\"code_folding\": []}\n# for adjusters \n\nfig = plt.figure(figsize=(14,14))\nfig.suptitle('Consumption of adjusters at grid points of m and k(for different h)',\n fontsize=(13))\nfor hgrid_id in range(EX3SS['mpar']['nh']):\n ## prepare the reduced grids \n hgrid_fix=hgrid_id \n \n ## plots \n ax = fig.add_subplot(2,2,hgrid_id+1, projection='3d')\n ax.scatter(mmgrid,kkgrid,c_a_approx[:,:,hgrid_fix],marker='v',color='red',\n label='StE(after dct):adjuster')\n ax.plot_surface(mmgrid,kkgrid,ca_StE[:,:,hgrid_fix],cmap='Blues',\n label='StE(before dct): adjuster')\n ax.set_xlabel('m',fontsize=13)\n ax.set_ylabel('k',fontsize=13)\n ax.set_zlabel(r'$c_a(m,k)$',fontsize=13)\n plt.gca().invert_yaxis()\n #ax.set_xlim([0,mmax])\n #ax.set_ylim([0,kmax])\n ax.set_title(r'$h({})$'.format(hgrid_fix))\n ax.view_init(20, 150)\n\n# %% {\"code_folding\": []}\n# Compare consumption functions of adjusters and non-adjusters approximated by DCT\n\nfig = plt.figure(figsize=(14,14))\nfig.suptitle('Consumption of adjusters (yellow)/non-adjusters (blue) at grid points of m and k (for each h)',\n fontsize=(13))\nfor hgrid_id in range(EX3SS['mpar']['nh']):\n ## prepare the reduced grids \n hgrid_fix=hgrid_id \n \n ## plots \n ax = fig.add_subplot(2,2,hgrid_id+1, projection='3d')\n ax.plot_surface(mmgrid,kkgrid,c_n_approx[:,:,hgrid_fix],cmap=cm.winter,\n label='StE(after dct):non-adjuster')\n ax.plot_surface(mmgrid,kkgrid,c_a_approx[:,:,hgrid_fix],cmap=cm.autumn,\n label='StE(after dct):adjuster')\n ax.set_xlabel('m',fontsize=13)\n ax.set_ylabel('k',fontsize=13)\n ax.set_zlabel(r'$c_a(m,k)$',fontsize=13)\n ax.set_title(r'$h({})$'.format(hgrid_fix))\n plt.gca().invert_yaxis()\n plt.gca().invert_xaxis() \n #ax.set_xlim(0,mmax)\n #ax.set_ylim(0,kmax)\n ax.view_init(20, 60)\n\n# %% {\"code_folding\": []}\n## the differences of c functions of adjusters and non-adjusters approximated by DCT.\n\nc_diff_approx=c_n_approx-c_a_approx\n\nfig = plt.figure(figsize=(14,14))\nfig.suptitle('Consumption of adjusters/non-adjusters at grid points of m and k(for different h)',\n fontsize=(13))\nfor hgrid_id in range(EX3SS['mpar']['nh']):\n ## prepare the reduced grids \n hgrid_fix=hgrid_id \n \n ## plots \n ax = fig.add_subplot(2,2,hgrid_id+1, projection='3d')\n ax.plot_surface(mmgrid,kkgrid,c_diff_approx[:,:,hgrid_fix],cmap=cm.coolwarm,\n label='StE(after dct):difference of non-adjuster and adjusters')\n ax.set_xlabel('m',fontsize=13)\n ax.set_ylabel('k',fontsize=13)\n ax.set_zlabel(r'$c_n(m,k)-c_a(m,k)$',fontsize=12)\n ax.set_title(r'$h({})$'.format(hgrid_fix))\n plt.gca().invert_yaxis()\n plt.gca().invert_xaxis()\n #ax.set_xlim(0,mmax)\n #ax.set_ylim(0,kmax)\n ax.view_init(20, 80)\n\n# %% [markdown]\n# ##### Observation\n#\n# - For a given grid value of productivity, the remaining grid points after DCT to represent the whole consumption function are concentrated in low values of $k$ and $m$. This is because the slopes of the surfaces of marginal utility are changing the most in these regions. For larger values of $k$ and $m$ the functions become smooth and only slightly concave, so they can be represented by many fewer points\n# - For different grid values of productivity (2 sub plots), the numbers of grid points in the DCT operation differ. From the lowest to highest values of productivity, there are 78, 33, 25 and 18 grid points, respectively. They add up to the total number of gridpoints of 154 after DCT operation, as we noted above for marginal utility function. \n\n# %% [markdown]\n# #### Distribution of states \n#\n# - We first plot the distribution of $k$ fixing $m$ and $h$. Next, we plot the joint distribution of $m$ and $k$ only fixing $h$ in 3-dimenstional space. \n# - The joint-distribution can be represented by marginal distributions of $m$, $k$ and $h$ and a copula that describes the correlation between the three states. The former is straightfoward. We plot the copula only. The copula is essentially a multivariate cummulative distribution function where each marginal is uniform. (Translation from the uniform to the appropriate nonuniform distribution is handled at a separate stage).\n#\n\n# %% {\"code_folding\": []}\n### Marginalize along h grids\n\njoint_distr = EX3SS['joint_distr']\njoint_distr_km = EX3SS['joint_distr'].sum(axis=2)\n\n### Plot distributions in 2 dimensional graph \n\nfig = plt.figure(figsize=(10,10))\nplt.suptitle('Marginal distribution of k at different m')\n\nfor hgrid_id in range(EX3SS['mpar']['nh']):\n ax = plt.subplot(2,2,hgrid_id+1)\n ax.set_title(r'$h({})$'.format(hgrid_id))\n ax.set_xlabel('k',size=12)\n for id in range(EX3SS['mpar']['nm']): \n ax.plot(kgrid,joint_distr[id,:,hgrid_id])\n\n# %% {\"code_folding\": []}\n## Plot joint distribution of k and m in 3d graph\n\nfig = plt.figure(figsize=(14,14))\nfig.suptitle('Joint distribution of m and k(for different h)',\n fontsize=(13))\n\n\nfor hgrid_id in range(EX3SS['mpar']['nh']):\n ## plots \n ax = fig.add_subplot(2,2,hgrid_id+1, projection='3d')\n ax.plot_surface(mmgrid,kkgrid,joint_distr[:,:,hgrid_fix], rstride=1, cstride=1,\n cmap='viridis', edgecolor='none')\n ax.set_xlabel('m',fontsize=13)\n ax.set_ylabel('k',fontsize=13)\n plt.gca().invert_yaxis()\n #ax.set_zlabel(r'$p(m,k)$',fontsize=10)\n ax.set_title(r'$h({})$'.format(hgrid_id))\n ax.set_xlim(0,400)\n ax.view_init(20, 40)\n \n\n\n# %% [markdown]\n# Notice the CDFs in StE copula have 4 modes, corresponding to the number of $h$ gridpoints. Each of the four parts of the cdf is a joint-distribution of $m$ and $k$. It can be presented in 3-dimensional graph as below. \n\n# %% {\"code_folding\": []}\n## Plot the copula \n\ncdf=EX3SS['Copula']['value'].reshape(4,30,30) # important: 4,30,30 not 30,30,4? \n\nfig = plt.figure(figsize=(14,14))\nfig.suptitle('Copula of m and k(for different h)',\n fontsize=(13))\nfor hgrid_id in range(EX3SS['mpar']['nh']):\n ## plots \n ax = fig.add_subplot(2,2,hgrid_id+1, projection='3d')\n ax.plot_surface(mmgrid,kkgrid,cdf[hgrid_id,:,:], rstride=1, cstride=1,\n cmap='viridis', edgecolor='None')\n ax.set_xlabel('m',fontsize=13)\n ax.set_ylabel('k',fontsize=13)\n ax.set_title(r'$h({})$'.format(hgrid_id))\n \n ## for each h grid, take the 95% mass of m and k as the maximum of the m and k axis \n \n marginal_mk = joint_distr[:,:,hgrid_id]\n marginal_m = marginal_mk.sum(axis=0)\n marginal_k = marginal_mk.sum(axis=1)\n mmax = mgrid[(np.abs(marginal_m.cumsum()-mass_pct*marginal_m.cumsum().max())).argmin()]\n kmax = kgrid[(np.abs(marginal_k.cumsum()-mass_pct*marginal_k.cumsum().max())).argmin()]\n plt.gca().invert_yaxis()\n plt.gca().invert_xaxis()\n #ax.set_xlim(0,mmax)\n #ax.set_ylim(0,kmax)\n ax.view_init(30, 60)\n\n# %% [markdown]\n# # To Do:\n#\n# 1. Plot the _difference_ in the _approximation errors_ for adjusters and nonadjusters\n# 1. Make color or transparency be determined by the population density from the copula\n# 1. Make extra versions of the figures where the color is determined by the population density at that location (given by the copula)\n# 1. Differences _between_ adjusters and nonadjusters in consumption are not interesting and should be deleted\n# 1. Eliminate \"magic numbers\"\n# 1. Improve comments so a new reader can understand what is being done\n\n# %% [markdown]\n# Given the assumption that the copula remains the same after aggregate risk is introduced, we can use the same copula and the marginal distributions to recover the full joint-distribution of the states. \n\n# %% [markdown]\n# ### Summary: what do we achieve after the transformation?\n#\n# - Using the DCT, the dimension of the policy and value functions are reduced from 3600 to 154 and 94, respectively.\n# - By marginalizing the joint distribution with the fixed copula assumption, the marginal distribution is of dimension 64 compared to its joint distribution of a dimension of 3600.\n#\n#\n#\n"
] | [
[
"numpy.hstack",
"matplotlib.pyplot.gca",
"numpy.log",
"matplotlib.pyplot.suptitle",
"numpy.arange",
"numpy.squeeze",
"numpy.sort",
"numpy.int64",
"scipy.fftpack.dct",
"matplotlib.pyplot.subplot",
"numpy.shape",
"scipy.linalg.norm",
"numpy.array",
"numpy.meshgrid",
"numpy.unravel_index",
"numpy.sum",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
}
] |
arceushui/Keyword-Spotting-Alibaba | [
"10e718491075dee8f875c7860385bc4eef22a790",
"10e718491075dee8f875c7860385bc4eef22a790"
] | [
"espnet2/bin/enh_inference.py",
"espnet2/layers/senet.py"
] | [
"#!/usr/bin/env python3\nimport argparse\nimport logging\nfrom pathlib import Path\nimport sys\nfrom typing import List\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\nfrom typing import Union\n\nimport humanfriendly\nimport numpy as np\nimport torch\nfrom tqdm import trange\nfrom typeguard import check_argument_types\n\nfrom espnet.utils.cli_utils import get_commandline_args\nfrom espnet2.fileio.sound_scp import SoundScpWriter\nfrom espnet2.tasks.enh import EnhancementTask\nfrom espnet2.torch_utils.device_funcs import to_device\nfrom espnet2.torch_utils.set_all_random_seed import set_all_random_seed\nfrom espnet2.utils import config_argparse\nfrom espnet2.utils.types import str2bool\nfrom espnet2.utils.types import str2triple_str\nfrom espnet2.utils.types import str_or_none\n\n\nEPS = torch.finfo(torch.get_default_dtype()).eps\n\n\nclass SeparateSpeech:\n \"\"\"SeparateSpeech class\n\n Examples:\n >>> import soundfile\n >>> separate_speech = SeparateSpeech(\"enh_config.yml\", \"enh.pth\")\n >>> audio, rate = soundfile.read(\"speech.wav\")\n >>> separate_speech(audio)\n [separated_audio1, separated_audio2, ...]\n\n \"\"\"\n\n def __init__(\n self,\n enh_train_config: Union[Path, str],\n enh_model_file: Union[Path, str] = None,\n segment_size: Optional[float] = None,\n hop_size: Optional[float] = None,\n normalize_segment_scale: bool = False,\n show_progressbar: bool = False,\n ref_channel: Optional[int] = None,\n normalize_output_wav: bool = False,\n device: str = \"cpu\",\n dtype: str = \"float32\",\n ):\n assert check_argument_types()\n\n # 1. Build Enh model\n enh_model, enh_train_args = EnhancementTask.build_model_from_file(\n enh_train_config, enh_model_file, device\n )\n enh_model.to(dtype=getattr(torch, dtype)).eval()\n\n self.device = device\n self.dtype = dtype\n self.enh_train_args = enh_train_args\n self.enh_model = enh_model\n\n # only used when processing long speech, i.e.\n # segment_size is not None and hop_size is not None\n self.segment_size = segment_size\n self.hop_size = hop_size\n self.normalize_segment_scale = normalize_segment_scale\n self.normalize_output_wav = normalize_output_wav\n self.show_progressbar = show_progressbar\n\n self.num_spk = enh_model.num_spk\n task = \"enhancement\" if self.num_spk == 1 else \"separation\"\n\n # reference channel for processing multi-channel speech\n if ref_channel is not None:\n logging.info(\n \"Overwrite enh_model.separator.ref_channel with {}\".format(ref_channel)\n )\n enh_model.separator.ref_channel = ref_channel\n self.ref_channel = ref_channel\n else:\n self.ref_channel = enh_model.ref_channel\n\n self.segmenting = segment_size is not None and hop_size is not None\n if self.segmenting:\n logging.info(\"Perform segment-wise speech %s\" % task)\n logging.info(\n \"Segment length = {} sec, hop length = {} sec\".format(\n segment_size, hop_size\n )\n )\n else:\n logging.info(\"Perform direct speech %s on the input\" % task)\n\n @torch.no_grad()\n def __call__(\n self, speech_mix: Union[torch.Tensor, np.ndarray], fs: int = 8000\n ) -> List[torch.Tensor]:\n \"\"\"Inference\n\n Args:\n speech_mix: Input speech data (Batch, Nsamples [, Channels])\n fs: sample rate\n Returns:\n [separated_audio1, separated_audio2, ...]\n\n \"\"\"\n assert check_argument_types()\n\n # Input as audio signal\n if isinstance(speech_mix, np.ndarray):\n speech_mix = torch.as_tensor(speech_mix)\n\n assert speech_mix.dim() > 1, speech_mix.size()\n batch_size = speech_mix.size(0)\n speech_mix = speech_mix.to(getattr(torch, self.dtype))\n # lenghts: (B,)\n lengths = speech_mix.new_full(\n [batch_size], dtype=torch.long, fill_value=speech_mix.size(1)\n )\n\n # a. To device\n speech_mix = to_device(speech_mix, device=self.device)\n lengths = to_device(lengths, device=self.device)\n\n if self.segmenting and lengths[0] > self.segment_size * fs:\n # Segment-wise speech enhancement/separation\n overlap_length = int(np.round(fs * (self.segment_size - self.hop_size)))\n num_segments = int(\n np.ceil((speech_mix.size(1) - overlap_length) / (self.hop_size * fs))\n )\n t = T = int(self.segment_size * fs)\n pad_shape = speech_mix[:, :T].shape\n enh_waves = []\n range_ = trange if self.show_progressbar else range\n for i in range_(num_segments):\n st = int(i * self.hop_size * fs)\n en = st + T\n if en >= lengths[0]:\n # en - st < T (last segment)\n en = lengths[0]\n speech_seg = speech_mix.new_zeros(pad_shape)\n t = en - st\n speech_seg[:, :t] = speech_mix[:, st:en]\n else:\n t = T\n speech_seg = speech_mix[:, st:en] # B x T [x C]\n\n lengths_seg = speech_mix.new_full(\n [batch_size], dtype=torch.long, fill_value=T\n )\n # b. Enhancement/Separation Forward\n feats, f_lens = self.enh_model.encoder(speech_seg, lengths_seg)\n feats, _, _ = self.enh_model.separator(feats, f_lens)\n processed_wav = [\n self.enh_model.decoder(f, lengths_seg)[0] for f in feats\n ]\n if speech_seg.dim() > 2:\n # multi-channel speech\n speech_seg_ = speech_seg[:, self.ref_channel]\n else:\n speech_seg_ = speech_seg\n\n if self.normalize_segment_scale:\n # normalize the energy of each separated stream\n # to match the input energy\n processed_wav = [\n self.normalize_scale(w, speech_seg_) for w in processed_wav\n ]\n # List[torch.Tensor(num_spk, B, T)]\n enh_waves.append(torch.stack(processed_wav, dim=0))\n\n # c. Stitch the enhanced segments together\n waves = enh_waves[0]\n for i in range(1, num_segments):\n # permutation between separated streams in last and current segments\n perm = self.cal_permumation(\n waves[:, :, -overlap_length:],\n enh_waves[i][:, :, :overlap_length],\n criterion=\"si_snr\",\n )\n # repermute separated streams in current segment\n for batch in range(batch_size):\n enh_waves[i][:, batch] = enh_waves[i][perm[batch], batch]\n\n if i == num_segments - 1:\n enh_waves[i][:, :, t:] = 0\n enh_waves_res_i = enh_waves[i][:, :, overlap_length:t]\n else:\n enh_waves_res_i = enh_waves[i][:, :, overlap_length:]\n\n # overlap-and-add (average over the overlapped part)\n waves[:, :, -overlap_length:] = (\n waves[:, :, -overlap_length:] + enh_waves[i][:, :, :overlap_length]\n ) / 2\n # concatenate the residual parts of the later segment\n waves = torch.cat([waves, enh_waves_res_i], dim=2)\n # ensure the stitched length is same as input\n assert waves.size(2) == speech_mix.size(1), (waves.shape, speech_mix.shape)\n waves = torch.unbind(waves, dim=0)\n else:\n # b. Enhancement/Separation Forward\n feats, f_lens = self.enh_model.encoder(speech_mix, lengths)\n feats, _, _ = self.enh_model.separator(feats, f_lens)\n waves = [self.enh_model.decoder(f, lengths)[0] for f in feats]\n\n assert len(waves) == self.num_spk, len(waves) == self.num_spk\n assert len(waves[0]) == batch_size, (len(waves[0]), batch_size)\n if self.normalize_output_wav:\n waves = [\n (w / abs(w).max(dim=1, keepdim=True)[0] * 0.9).cpu().numpy()\n for w in waves\n ] # list[(batch, sample)]\n else:\n waves = [w.cpu().numpy() for w in waves]\n\n return waves\n\n @staticmethod\n @torch.no_grad()\n def normalize_scale(enh_wav, ref_ch_wav):\n \"\"\"Normalize the energy of enh_wav to match that of ref_ch_wav.\n\n Args:\n enh_wav (torch.Tensor): (B, Nsamples)\n ref_ch_wav (torch.Tensor): (B, Nsamples)\n Returns:\n enh_wav (torch.Tensor): (B, Nsamples)\n \"\"\"\n ref_energy = torch.sqrt(torch.mean(ref_ch_wav.pow(2), dim=1))\n enh_energy = torch.sqrt(torch.mean(enh_wav.pow(2), dim=1))\n return enh_wav * (ref_energy / enh_energy)[:, None]\n\n @torch.no_grad()\n def cal_permumation(self, ref_wavs, enh_wavs, criterion=\"si_snr\"):\n \"\"\"Calculate the permutation between seaprated streams in two adjacent segments.\n\n Args:\n ref_wavs (List[torch.Tensor]): [(Batch, Nsamples)]\n enh_wavs (List[torch.Tensor]): [(Batch, Nsamples)]\n criterion (str): one of (\"si_snr\", \"mse\", \"corr)\n Returns:\n perm (torch.Tensor): permutation for enh_wavs (Batch, num_spk)\n \"\"\"\n loss_func = {\n \"si_snr\": self.enh_model.si_snr_loss,\n \"mse\": lambda enh, ref: torch.mean((enh - ref).pow(2), dim=1),\n \"corr\": lambda enh, ref: (\n (enh * ref).sum(dim=1)\n / (enh.pow(2).sum(dim=1) * ref.pow(2).sum(dim=1) + EPS)\n ).clamp(min=EPS, max=1 - EPS),\n }[criterion]\n\n _, perm = self.enh_model._permutation_loss(ref_wavs, enh_wavs, loss_func)\n return perm\n\n\ndef humanfriendly_or_none(value: str):\n if value in (\"none\", \"None\", \"NONE\"):\n return None\n return humanfriendly.parse_size(value)\n\n\ndef inference(\n output_dir: str,\n batch_size: int,\n dtype: str,\n fs: int,\n ngpu: int,\n seed: int,\n num_workers: int,\n log_level: Union[int, str],\n data_path_and_name_and_type: Sequence[Tuple[str, str, str]],\n key_file: Optional[str],\n enh_train_config: str,\n enh_model_file: str,\n allow_variable_data_keys: bool,\n segment_size: Optional[float],\n hop_size: Optional[float],\n normalize_segment_scale: bool,\n show_progressbar: bool,\n ref_channel: Optional[int],\n normalize_output_wav: bool,\n):\n assert check_argument_types()\n if batch_size > 1:\n raise NotImplementedError(\"batch decoding is not implemented\")\n if ngpu > 1:\n raise NotImplementedError(\"only single GPU decoding is supported\")\n\n logging.basicConfig(\n level=log_level,\n format=\"%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\",\n )\n\n if ngpu >= 1:\n device = \"cuda\"\n else:\n device = \"cpu\"\n\n # 1. Set random-seed\n set_all_random_seed(seed)\n\n # 2. Build separate_speech\n separate_speech = SeparateSpeech(\n enh_train_config=enh_train_config,\n enh_model_file=enh_model_file,\n segment_size=segment_size,\n hop_size=hop_size,\n normalize_segment_scale=normalize_segment_scale,\n show_progressbar=show_progressbar,\n ref_channel=ref_channel,\n normalize_output_wav=normalize_output_wav,\n device=device,\n dtype=dtype,\n )\n\n # 3. Build data-iterator\n loader = EnhancementTask.build_streaming_iterator(\n data_path_and_name_and_type,\n dtype=dtype,\n batch_size=batch_size,\n key_file=key_file,\n num_workers=num_workers,\n preprocess_fn=EnhancementTask.build_preprocess_fn(\n separate_speech.enh_train_args, False\n ),\n collate_fn=EnhancementTask.build_collate_fn(\n separate_speech.enh_train_args, False\n ),\n allow_variable_data_keys=allow_variable_data_keys,\n inference=True,\n )\n\n # 4. Start for-loop\n writers = []\n for i in range(separate_speech.num_spk):\n writers.append(\n SoundScpWriter(f\"{output_dir}/wavs/{i + 1}\", f\"{output_dir}/spk{i + 1}.scp\")\n )\n\n for keys, batch in loader:\n assert isinstance(batch, dict), type(batch)\n assert all(isinstance(s, str) for s in keys), keys\n _bs = len(next(iter(batch.values())))\n assert len(keys) == _bs, f\"{len(keys)} != {_bs}\"\n batch = {k: v for k, v in batch.items() if not k.endswith(\"_lengths\")}\n\n waves = separate_speech(**batch)\n for (spk, w) in enumerate(waves):\n for b in range(batch_size):\n writers[spk][keys[b]] = fs, w[b]\n print(w[b],file=sys.stderr)\n for writer in writers:\n writer.close()\n\n\ndef get_parser():\n parser = config_argparse.ArgumentParser(\n description=\"Frontend inference\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n\n # Note(kamo): Use '_' instead of '-' as separator.\n # '-' is confusing if written in yaml.\n parser.add_argument(\n \"--log_level\",\n type=lambda x: x.upper(),\n default=\"INFO\",\n choices=(\"CRITICAL\", \"ERROR\", \"WARNING\", \"INFO\", \"DEBUG\", \"NOTSET\"),\n help=\"The verbose level of logging\",\n )\n\n parser.add_argument(\"--output_dir\", type=str, required=True)\n parser.add_argument(\n \"--ngpu\",\n type=int,\n default=0,\n help=\"The number of gpus. 0 indicates CPU mode\",\n )\n parser.add_argument(\"--seed\", type=int, default=0, help=\"Random seed\")\n parser.add_argument(\n \"--dtype\",\n default=\"float32\",\n choices=[\"float16\", \"float32\", \"float64\"],\n help=\"Data type\",\n )\n parser.add_argument(\n \"--fs\", type=humanfriendly_or_none, default=8000, help=\"Sampling rate\"\n )\n parser.add_argument(\n \"--num_workers\",\n type=int,\n default=1,\n help=\"The number of workers used for DataLoader\",\n )\n\n group = parser.add_argument_group(\"Input data related\")\n group.add_argument(\n \"--data_path_and_name_and_type\",\n type=str2triple_str,\n required=True,\n action=\"append\",\n )\n group.add_argument(\"--key_file\", type=str_or_none)\n group.add_argument(\"--allow_variable_data_keys\", type=str2bool, default=False)\n\n group = parser.add_argument_group(\"Output data related\")\n group.add_argument(\n \"--normalize_output_wav\",\n type=str2bool,\n default=False,\n help=\"Whether to normalize the predicted wav to [-1~1]\",\n )\n\n group = parser.add_argument_group(\"The model configuration related\")\n group.add_argument(\"--enh_train_config\", type=str, required=True)\n group.add_argument(\"--enh_model_file\", type=str, required=True)\n\n group = parser.add_argument_group(\"Data loading related\")\n group.add_argument(\n \"--batch_size\",\n type=int,\n default=1,\n help=\"The batch size for inference\",\n )\n group = parser.add_argument_group(\"SeparateSpeech related\")\n group.add_argument(\n \"--segment_size\",\n type=float,\n default=None,\n help=\"Segment length in seconds for segment-wise speech enhancement/separation\",\n )\n group.add_argument(\n \"--hop_size\",\n type=float,\n default=None,\n help=\"Hop length in seconds for segment-wise speech enhancement/separation\",\n )\n group.add_argument(\n \"--normalize_segment_scale\",\n type=str2bool,\n default=False,\n help=\"Whether to normalize the energy of the separated streams in each segment\",\n )\n group.add_argument(\n \"--show_progressbar\",\n type=str2bool,\n default=False,\n help=\"Whether to show a progress bar when performing segment-wise speech \"\n \"enhancement/separation\",\n )\n group.add_argument(\n \"--ref_channel\",\n type=int,\n default=None,\n help=\"If not None, this will overwrite the ref_channel defined in the \"\n \"separator module (for multi-channel speech processing)\",\n )\n\n return parser\n\n\ndef main(cmd=None):\n print(get_commandline_args(), file=sys.stderr)\n parser = get_parser()\n args = parser.parse_args(cmd)\n kwargs = vars(args)\n kwargs.pop(\"config\", None)\n inference(**kwargs)\n\n\nif __name__ == \"__main__\":\n main()\n",
"\nfrom torch import nn\n\"\"\"\nmodified from https://github.com/moskomule/senet.pytorch/blob/master/senet/se_module.py\n\"\"\"\n\nclass SELayer(nn.Module):\n def __init__(self, channel, reduction=16):\n super(SELayer, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool1d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel, channel // reduction, bias=False),\n nn.ReLU(inplace=True),\n nn.Linear(channel // reduction, channel, bias=False),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n b, t, c = x.size()\n x = x.transpose(1,2) # b,c,t\n y = self.avg_pool(x).view(b, c,)\n y = self.fc(y).view(b, c, 1, )\n result = x * y.expand_as(x) # b,c,t\n return result.transpose(1,2) # b,t,c\n"
] | [
[
"torch.cat",
"numpy.round",
"torch.no_grad",
"torch.unbind",
"torch.stack",
"torch.get_default_dtype",
"torch.as_tensor"
],
[
"torch.nn.Linear",
"torch.nn.AdaptiveAvgPool1d",
"torch.nn.ReLU",
"torch.nn.Sigmoid"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RosettaCommons/jade2 | [
"40affc7c4e0f1f6ee07030e72de284e3484946e7"
] | [
"jade2/basic/structure/PythonPDB2.py"
] | [
"\n## @author Jared Adolf-Bryfogle ([email protected])\n\n#Python Imports\nimport copy\nimport pandas\nimport re, logging\nfrom collections import defaultdict\nfrom typing import Union, DefaultDict, List, Any, Dict\nfrom pathlib import Path\n\nfrom jade2.basic.path import *\n\nclass PythonPDB2:\n def __init__(self, pdb_file_path: Union[str, Path] = \"\"):\n \"\"\"\n \n Lightweight PDB class specifically for manipulating pdbs in scripts and simple apps as well as obtaining subsets of data in the PDB.\n 2.0 Uses a vector of dictionaries as main pdb_map for easier manipulation of the pdb_map.\n Notes:\n Not Meant to be fast - written for ease of use!\n ALL elements of the pdb_map data are stored as strings!\n\n \"\"\"\n\n self.elements = (\"id\", \"atom_number\", \"atom_name\", \"alternate_location\", \\\n \"three_letter_code\", \"chain\", \"residue_number\", \"i_code\", \"x\", \"y\", \"z\", \\\n \"occupancy\", \"b_factor\", \"element\", \"charge\")\n self.pdb_file_path = str(pdb_file_path)\n\n self.pdb_map: List[DefaultDict[str, str]] = [] #[int line]:[string element]:[string value]\n\n self.header: List[str] = [] #Unparsed header, but the data is held here as a list of strings. - Everything NOT ATOM or HETATM is here\n self.remarks: List[str] = [] #Only REMARK lines as strings\n\n if pdb_file_path:\n self.read_pdb_into_map()\n else:\n logging.info(\"Loading blank PythonPDB\")\n\n\n def set_pdb_map(self, pdb_map: List[DefaultDict[str, str]]):\n self.pdb_map = pdb_map\n\n ####################################################################\n # Getters + PDB_Map Subsets\n #\n #\n\n def get_pdb_map(self) -> List[DefaultDict[str, str]]:\n return self.pdb_map\n\n def get_dataframe(self) -> pandas.DataFrame:\n \"\"\"\n Get the PDB Map as a dataframe dataframe\n \"\"\"\n return pandas.DataFrame(self.pdb_map)\n\n def get_header(self) -> List[str]:\n \"\"\"\n Get 'header' of PDB as list of strings\n \"\"\"\n return self.header\n\n def get_remarks(self) -> List[str]:\n \"\"\"\n Get 'REMARK' lines of PDB as a list of strings\n \"\"\"\n return self.remarks\n\n def add_remark(self, remark: str):\n remark = \"REMARK \"+remark\n self.remarks.append(remark)\n\n def get_chain(self, chain) -> List[DefaultDict[str, str]]:\n \"\"\"\n Get Chain data as pdb_map subset\n \"\"\"\n chain_data = []\n for dat in self.pdb_map:\n if dat[\"chain\"] == chain:\n chain_data.append(dat)\n return chain_data\n\n def rename_chain(self, old_chain, new_chain):\n for i in range(0, len(self.pdb_map) ):\n #print(\"CHAIN :\",self.pdb_map[i][\"chain\"],\":\")\n if self.pdb_map[i][\"chain\"] == old_chain:\n #print(\"Chain found. Attempting to change\")\n self.pdb_map[i][\"chain\"] = new_chain\n\n def get_waters(self) -> List[DefaultDict[str, str]]:\n \"\"\"\n Get water data as pdb_map subset\n \"\"\"\n water_data = []\n for dat in self.pdb_map:\n if dat[\"three_letter_code\"] in [\"HOH\",\"TP3\",\"TP5\",\"TIP3\",\"TIP5\"]:\n water_data.append(dat)\n return water_data\n\n def get_hetatms(self) -> List[DefaultDict[str, str]]:\n \"\"\"\n Get hetatm data as pdb_map subset\n \"\"\"\n het_data = []\n for dat in self.pdb_map:\n if dat[\"id\"] == \"HETATM\":\n het_data.append(dat)\n return het_data\n\n def get_bb_data(self) -> List[DefaultDict[str, str]]:\n \"\"\"\n Get pdb_map subset of only N, CA, and C atoms\n \"\"\"\n bb_data = []\n for dat in self.pdb_map:\n if dat[\"atom_name\"] in [\"N\", \"CA\", \"C\"]:\n bb_data.append(dat)\n return bb_data\n\n def get_all_residues_of_type(self, name3: str) -> List[DefaultDict[str, str]]:\n \"\"\"\n Get PDB_Map subset of all residues of specific type\n \"\"\"\n res_data = []\n for dat in self.pdb_map:\n if dat[\"three_letter_code\"] == name3:\n res_data.append(dat)\n return res_data\n\n def get_residue(self, resnum: int, chain: str, icode: str= \"\") -> List[DefaultDict[str, str]]:\n \"\"\"\n Get PDB_Map subset of a specific residue\n \"\"\"\n residue = []\n for dat in self.pdb_map:\n if dat[\"residue_number\"] == str(resnum) and dat[\"chain\"] == chain and dat[\"icode\"] == \"\":\n residue.append(dat)\n return residue\n\n\n\n ####################################################################\n # Main\n #\n #\n\n def read_pdb_into_map(self):\n \"\"\"\n Reads PDB file path into a basic PDB map. All data is held as strings.\n \"\"\"\n \n FILE = open_file(self.pdb_file_path, 'r')\n i = 0\n for line in FILE:\n line = line.strip()\n line = line.strip('\\n')\n\n if not line: continue\n\n if re.search(\"REMARK\", line[0:6]):\n self.remarks.append(line)\n\n elif (re.search(\"END\", line[0:6]) or re.search(\"TER\", line[0:6])):\n #We ignore END and TER for now.\n pass\n\n elif (re.search(\"ATOM\", line[0:6]) or re.search(\"HETATM\", line[0:6])):\n\n self.pdb_map.append(defaultdict())\n\n self.pdb_map[i][\"id\"]=line[0:6].strip()\n self.pdb_map[i][\"atom_number\"]=line[6:11].strip(); self.pdb_map[i][\"atom_name\"] = line[12:16]\n self.pdb_map[i][\"alternate_location\"]=line[16]; self.pdb_map[i][\"three_letter_code\"] = line[17:21].strip()\n self.pdb_map[i][\"chain\"] = line[21]; self.pdb_map[i][\"residue_number\"]= line[22:26].strip()\n self.pdb_map[i][\"i_code\"] = line[26]; self.pdb_map[i][\"x\"] = line[27:38].strip()\n self.pdb_map[i][\"y\"]= line[38:46].strip(); self.pdb_map[i][\"z\"]= line[46:54].strip()\n self.pdb_map[i][\"occupancy\"] = line[54:60].strip(); self.pdb_map[i][\"b_factor\"]=line[60:66].strip()\n self.pdb_map[i][\"element\"]=line[66:78].strip(); self.pdb_map[i][\"charge\"]=line[78:79].strip()\n\n i +=1\n\n elif (re.search(\"REMARK\", line[0:6])):\n self.remarks.append(line)\n\n else:\n self.header.append(line)\n\n FILE.close()\n\n def save_PDB(self, filename: Union[Path, str], output_remarks: bool = True, output_header: bool= True) -> Union[Path, str]:\n \"\"\"\n Uses a the pdb_map to save the data as a PDB file.\n Returns the filename\n \"\"\"\n\n #global_variables.current_directory = os.path.dirname(filename)\n\n FILE = open_file(filename, 'w')\n if output_remarks:\n for line in self.remarks:\n FILE.write(line+\"\\n\")\n\n if output_header:\n for line in self.header:\n FILE.write(line+\"\\n\")\n\n for entry in self.pdb_map:\n line = self.morph_line_in_pdb_map_to_pdb_line(entry)\n FILE.write(line+\"\\n\")\n FILE.close()\n print(\"PDB File Written...\")\n return filename\n\n def morph_line_in_pdb_map_to_pdb_line(self, entry: DefaultDict[str, str]) -> str:\n \"\"\"\n Oh What fun. ;)\n Magic Numbers?: (6,5,4,3,1,4,8,8,8,4,5);\n \"\"\"\n\n\n #Here we fix the formating of atom name. If we stripped the atom name.\n \"\"\"\n atom_name = self.pdb_map[line_num]['atom_name']\n if len(atom_name)==1:\n atom_name=' '+atom_name+' '\n elif len(atom_name)==2:\n #Note that 2 letter elements like CA (calcium) differ from CA (C-Alpha)\n #If calcium, would go @column 13. if C-Alpha, column 14.\n atom_name=' '+atom_name+' '\n elif len(atom_name)==3:\n atom_name=' '+atom_name\n elif len(atom_name)==4:\n atom_name=atom_name\n else:\n print \"Atom Name missing. Inserting spaces.\"\n atom_name = ' '\n \"\"\"\n\n #Create the PDB line.\n line = (entry['id']).ljust(6)+ (entry['atom_number']).rjust(5)+\" \"+ entry['atom_name']+ \\\n (entry['alternate_location'])+ ((entry['three_letter_code']).rjust(3)).ljust(4)+ (entry['chain'])+ \\\n (entry['residue_number']).rjust(4)+ (entry['i_code']) + \\\n (entry['x']).rjust(11)+ (entry['y']).rjust(8)+ (entry['z']).rjust(8) + \\\n (entry['occupancy']).rjust(6)+ (entry['b_factor']).rjust(6)\n\n #Note three letter code is wonky due to DA residues. ljust(4) was not working.\n return line\n\n\n ##################\n # Addition\n #\n #\n\n def add_ca_residue(self, x: str, y: str, z: str, restype: str = \"ALA\", b_fac: float = 0, chain=\"X\"):\n \"\"\"\n Add a residue to the map that is only CA\n :param x:\n :param y:\n :param z:\n :param restype:\n :param b_fac:\n :return: None\n \"\"\"\n pass\n\n ####################################################################\n # Removal\n #\n #\n\n def remove_antigen(self):\n \"\"\"\n Remove Antigen from an LH only PDB\n \"\"\"\n temp_pdb_map = copy.deepcopy(self.pdb_map)\n for dat in temp_pdb_map:\n if dat[\"chain\"] not in ['L', 'H']:\n self.pdb_map.remove(dat)\n\n def remove_chain(self, chain: str):\n \"\"\"\n Removes chain from pdb_map\n \"\"\"\n temp_pdb_map = copy.deepcopy(self.pdb_map)\n for dat in temp_pdb_map:\n if dat[\"chain\"]==chain:\n self.pdb_map.remove(dat)\n\n def remove_residue_type(self, name3: str):\n temp_pdb_map = copy.deepcopy(self.pdb_map)\n for dat in temp_pdb_map:\n if dat[\"three_letter_code\"]==name3:\n self.pdb_map.remove(dat)\n\n def remove_hetatm_atoms(self):\n temp_pdb_map = copy.deepcopy(self.pdb_map)\n for dat in temp_pdb_map:\n if dat[\"id\"]==\"HETATM\":\n self.pdb_map.remove(dat)\n \n \n def remove_element_column(self):\n \"\"\"\n Removes the extra stuff in the element column, but not the element itself.\n \"\"\"\n for i in range(0, len(self.pdb_map)):\n ele = self.pdb_map[i][\"element\"]\n e = ele[11]\n self.pdb_map[i][\"element\"]=\" \"+e\n print(\"Extra stuff in Element Columns Removed\")\n return self.pdb_map\n \n def remove_waters(self):\n \"\"\"\n Removes waters from pdb_map\n \"\"\"\n #codes = [\"HOH\",\"TP3\",\"TP5\",\"TIP3\",\"TIP5\"]\n temp_pdb_map = copy.deepcopy(self.pdb_map) #This is to pop elements\n for dat in temp_pdb_map:\n if dat[\"three_letter_code\"] in [\"HOH\",\"TP3\",\"TP5\",\"TIP3\",\"TIP5\"]:\n #self.pdb_map.pop(num)\n self.pdb_map.remove(dat)\n \n def remove_alternate_residues(self):\n \"\"\"\n Removes any alternate residue codes and renumbers by renumbering from 1 and integrating any inserts. \n \"\"\"\n \n def get_residue_num(num): return int(self.pdb_map_copy[num][\"residue_number\"])\n def set_residue_num(num, resnum): self.pdb_map[num][\"residue_number\"]=str(resnum)\n def get_chain(num):return self.pdb_map_copy[num][\"chain\"]\n def get_i_code(num):return self.pdb_map_copy[num][\"i_code\"]\n \n def check_id(num):\n if self.pdb_map_copy[num]['id']==\"ATOM\":\n return True\n else:\n return False\n \n def check_new_residue(old_num, num, insert_residue=False, pdb_map = False):\n if insert_residue:\n if get_i_code(old_num)==get_i_code(num):\n return False\n else:\n return True\n else:\n if get_residue_num(old_num)==get_residue_num(num):\n return False\n else:\n return True\n \n def check_new_chain(old_num, num):\n if get_chain(old_num)==get_chain(num):\n return False\n else:\n return True\n \n def check_insertion(num):\n if not get_i_code(num)==\" \":\n return True\n else:\n return False\n \n def renumber_from_one(chain_only, start_num):\n resnum = 1\n for num in sorted(chain_only):\n \n insert = check_insertion(num)\n \n #print repr(get_residue_num(num))+\":\"+repr(insert)\n \n #This is so we don't check if it's a new residue with num-1 - Which won't actually be part of the chain!\n if num==start_num:\n set_residue_num(num, resnum)\n\n \n \n #Iterate resnum if new residue\n elif check_new_residue(num-1, num, insert):\n resnum+=1\n set_residue_num(num, resnum)\n\n else:\n set_residue_num(num, resnum)\n \n #Set i code at the end, so we can tell if we have new residues or not.\n for num in sorted(chain_only):\n self.pdb_map[num][\"i_code\"]=\" \"\n \n def renumber_from_insert(chain_only, start_num):\n pass\n \n self.pdb_map_copy = copy.deepcopy(self.pdb_map)\n \n #Get chains with insertion codes - Now renumbers all chains. Will be an option later.\n chains_with_inserts = dict(); \n for num in range(0, len(self.pdb_map)):\n #if get_i_code(num)==\" \":\n chains_with_inserts[get_chain(num)]=True\n\n \n #Iterate through all lines/atoms\n #Initialize for scope\n start_residue=0;\n new_start=False\n for chain in chains_with_inserts:\n print(\"Renumbering chain \"+chain)\n chain_only=dict()\n for num in range(0, len(self.pdb_map)):\n if chain == get_chain(num) and check_id(num):\n chain_only[num]=self.pdb_map[num]\n lines = sorted(chain_only)\n res_start = get_residue_num(lines[0])\n \n renumber_from_one(chain_only, lines[0])\n \n #For now, we only renumber from one.\n #else:\n #chain_only = renumber_from_insert(chain_only, lines[0]) \n \n\n\n ####################################################################\n # General Manipulation\n #\n #\n\n def change_occupancy(self):\n \"\"\"\n Changes ALL occupancies in a PDB dictionary to 1.00\n Returns PDB Dictionary.\n \"\"\"\n \n check = 0\n for key in range(0, len(self.pdb_map)):\n if self.pdb_map[key][\"occupancy\"].rfind(\"0.00\")!=-1:\n print(\"Changing occupancy of residue \" + self.pdb_map[key][\"residue_number\"] + \"To 1.00\")\n check =1\n self.pdb_map[key][\"occupancy\"] = \" 1.00\"\n if check ==1:\n print(\"Occupancy Column OK for PyRosetta...\")\n\n\n def combine_pdb(self, py_pdb: 'PythonPDB2'):\n \"\"\"\n Combines pdb_map from instance of PyPDB to this one. Does not do any checks.\n \"\"\"\n m = py_pdb.get_pdb_map()\n for dat in m:\n self.pdb_map.append(dat)\n\n def copy_chain_into_pdb_map(self, py_pdb: 'PythonPDB2', chain: str):\n \"\"\"\n Copies all data from one pdb_map of a py_pdb of a chain into the one held in this class. Useful for reordering chains.\n \"\"\"\n m = py_pdb.get_pdb_map()\n for dat in m:\n if dat[\"chain\"] == chain:\n self.pdb_map.append(dat)\n\n def copy_all_but_chains_into_pdb_map(self, py_pdb:'PythonPDB2', chains):\n \"\"\"\n Copies all data from one pdb_map of a py_pdb of all data except the specified chains into this one. Useful for reordering chains.\n \"\"\"\n m = py_pdb.get_pdb_map()\n for dat in m:\n if not dat[\"chain\"] in chains:\n self.pdb_map.append(dat)\n\n def combine_pdb_map(self, pdb_map: List[DefaultDict[str, str]]):\n \"\"\"\n Combines pdb_map passed with the PythonPDBs map\n \"\"\"\n for dat in pdb_map:\n self.pdb_map.append(dat)\n\n def pdb_alias(self, pairs: Dict[Any, Any], element: str):\n \"\"\"\n Replaces ALL occurances of old element with new from pair.\n pair is a dictionary. In C++ it would be an array of pairs. [string old]:[string new]\n For Specific functions, please see below.\n \"\"\"\n for num in range(0, len(self.pdb_map)):\n for old in pairs:\n if self.pdb_map[num][element] == old:\n self.pdb_map[num][element] = pairs[old]\n\n def pdb_atom_alias(self, line_num: int, pair: Dict[Any, Any]):\n \"\"\"\n Replaces atom_names with ones Rosetta is happy with.\n pair is a dictionary. In C++ it would be an array of pairs. [string MD atom_name]:[string rosetta atom_name]\n \"\"\"\n for start in pair:\n if self.pdb_map[line_num][\"atom_name\"] == start:\n print(self.pdb_map[line_num][\"three_letter_code\"] + \":\" + self.pdb_map[line_num][\"atom_name\"] + \":\" +\n pair[start])\n self.pdb_map[line_num][\"atom_name\"] = pair[start]\n\n def pdb_residue_alias(self, pairs: Dict[Any, Any]):\n \"\"\"\n Replaces ALL occurances of old residue with new residue.\n pair is a dictionary. In C++ it would be an array of pairs. [string old residue_name]:[string new residue_name]\n \"\"\"\n for num in range(0, len(self.pdb_map)):\n for old in pairs:\n if self.pdb_map[num][\"residue_name\"] == old:\n self.pdb_map[num][\"residue_name\"] = pairs[old]\n\n def pdb_chain_alias(self, pairs: Dict[Any, Any]):\n \"\"\"\n Replaces ALL occurances of old chain with new chain.\n pair is a dictionary. In C++ it would be an array of pairs. [string old chain]:[string new chain]\n \"\"\"\n for num in range(0, len(self.pdb_map)):\n for old in pairs:\n if self.pdb_map[num][\"chain\"] == old:\n self.pdb_map[num][\"chain\"] = pairs[old]\n\n def clean_PDB(self):\n \"\"\"\n Removes HSD, Waters: Tries to fix atom and residue name inconsistencies.\n HAS worked for changing a single MD pdb (NAMD) frame to Rosetta file.\n PLEASE Expand if possible to alias all residues for Rosetta compatability.\n NOT gaurenteed, but SHOULD work ok.\n \"\"\"\n\n self.RESIDUES_aliased = False; self.WATER_aliased=False; self.IONS_aliased=False; self.DNA_aliased = False\n\n waters: List[DefaultDict[str, str]] = [] #List of data that have waters\n print(\"Attempting to change residue names, atom names, and water\")\n for n in range(0, len(self.pdb_map)):\n dat = self.pdb_map[n]\n\n #print self.pdb_map[key][\"three_letter_code\"]\n def alias_dna():\n if dat[\"three_letter_code\"]==\"DA\":\n self.DNA_aliased=True\n dat[\"three_letter_code\"]=\"A\"\n\n elif dat[\"three_letter_code\"]==\"DT\":\n self.DNA_aliased=True\n dat[\"three_letter_code\"]=\"T\"\n\n elif dat[\"three_letter_code\"]==\"DC\":\n self.DNA_aliased=True\n dat[\"three_letter_code\"]=\"C\"\n\n elif dat[\"three_letter_code\"]==\"DG\":\n self.DNA_aliased=True\n dat[\"three_letter_code\"]=\"G\"\n\n else:\n return\n\n def alias_water():\n if dat[\"three_letter_code\"] in [\"HOH\", \"TIP3\", \"WAT\", \"TIP5\"]:\n self.WATER_aliased=True\n dat[\"three_letter_code\"]=\"TP3\" #IO_STRING for TP3 is WAT...Buy still reads TP#?\n dat[\"id\"]=\"HETATM\"\n waters.append(dat)\n\n #def alias_ions():\n #if self.pdb_map[key][\"chain\"]==\"I\":\n #IONS_aliased= True\n #self.pdb_map[key][\"id\"]=\"HETATM\"\n\n def alias_residues():\n if dat[\"three_letter_code\"] == \"HSD\":\n self.RESIDUES_aliased = True\n dat[\"three_letter_code\"]=\"HIS\"\n\n def alias_atoms():\n if dat[\"three_letter_code\"]== \"SER \":\n atom_pairs = {\" HG1\":\" HG \"}\n\n elif dat[\"three_letter_code\"]==\"ILE \":\n atom_pairs = {\" CD \":\" CD1\"}\n self.pdb_map = self.pdb_atom_alias(n, atom_pairs)\n\n elif dat[\"three_letter_code\"]==\"LEU \":\n atom_pairs = {\" OT1\":\" O \", \" OT2\":\" OXT\"}\n self.pdb_map = self.pdb_atom_alias(n, atom_pairs)\n\n elif dat[\"three_letter_code\"]==\"VAL \":\n atom_pairs = {\" OT1\":\" O \", \" OT2\":\" OXT\"}\n self.pdb_map = self.pdb_atom_alias(n, atom_pairs)\n\n elif dat[\"three_letter_code\"]==\"LYS \":\n atom_pairs = {\" HZ1\":\" 1HZ\", \" HZ2\":\" 2HZ\", \" HZ3\":\" 3HZ\"}\n self.pdb_map = self.pdb_atom_alias(n, atom_pairs)\n\n elif dat[\"three_letter_code\"]==\"ARG \":\n atom_pairs = {\" HH11\":\" 1HH1\", \" HH12\":\" 2HH1\", \" HH21\":\" 1HH2\", \" HH22\":\" 2HH2\"}\n self.pdb_map = self.pdb_atom_alias(n, atom_pairs)\n\n elif dat[\"three_letter_code\"]==\"ASN \":\n atom_pairs = {\"HD21\":\"1HD2\", \"HD22\":\"2HD2\"}\n self.pdb_map = self.pdb_atom_alias(n, atom_pairs)\n\n elif dat[\"three_letter_code\"]==\"PRO \":\n atom_pairs = {\" OT1\":\" O \", \" OT2\":\" OXT\", \" HD1\":\" 1HD\", \" HD2\":\" 2HD\", \" HB1\":\" 1HB\", \" HG1\":\" 1HG\", \" HG2\":\" 2HG\"}\n self.pdb_map = self.pdb_atom_alias(n, atom_pairs)\n\n\n #Unnessessary, but organized.\n alias_water()\n #alias_ions()\n #alias_residues()\n alias_atoms()\n alias_dna()\n\n #Removes Waters. Keeps Ions.\n #for key in waters:\n #self.pdb_map.pop(key)\n\n #Outputs what was found:\n if self.RESIDUES_aliased:\n print(\"Residues Changed\")\n\n if self.WATER_aliased:\n print(\"Water found...changed to TP3. Remove to decrease calculation time.\")\n\n if self.IONS_aliased:\n print(\"Ions found. Most are able to be read into Rosetta\")\n\n if self.DNA_aliased:\n print(\"DNA found, changed to single letter code.\")\n\n\n\n\n ####################################################################\n # B Factor Replacements\n #\n #\n\n def read_file_and_replace_b_factors(self, deliminator: str, filename: str, resnum_column: int=1, chain_column:int=2, data_column: int=3, atomname_column=False):\n \"\"\"\n This function reads a deliminated file with data and inserts the data into the BFactor column. Used to visualize arbitrary data.\n Use function options to control which column the data is in as well as where your resnums and chains are located.\n If atomname column is given, will insert by atom instead of by residue\n \"\"\"\n \n INFILE = open_file(filename, 'r')\n for line in INFILE:\n if line[0] == \"#\":continue\n line = line.strip()\n lineSP = line.split(deliminator)\n if len(lineSP)<3:\n print(\"Could not read line. Must have resnum, chain, and data columns\")\n continue\n if not atomname_column:\n self.replace_residue_b_factor(lineSP[resnum_column-1], lineSP[chain_column-1], lineSP[data_column-1])\n else:\n if len(lineSP)<4:\n print(\"Could not read line. Must have resnum, chain, atomname, and data columns\")\n continue\n self.replace_atom_b_factor(lineSP[resnum_column-1], lineSP[chain_column-1], lineSP[atomname_column-1], lineSP[data_column-1])\n INFILE.close()\n \n def replace_residue_b_factor(self, resnum: int, chain: str, data: float):\n \"\"\"\n Replaces the b factor of each atom in the residue with data.\n Can be all string representations or not.\n \"\"\"\n \n if type(resnum)!=str:\n resnum = str(resnum)\n if type(data)!=float:\n data=float(data) #In case data is an integer.\n \n #Need to make sure Bfactor column is adjusted correctly.\n \n for line in range(0, len(self.pdb_map)):\n if ((self.pdb_map[line]['residue_number']==resnum) and (self.pdb_map[line]['chain']==chain)):\n self.pdb_map[line]['b_factor']=\"%.2f\"%data\n else:\n continue\n \n \n \n def replace_atom_b_factor(self, resnum: int, chain: str, atomname: str, data: float):\n \"\"\"\n Replaces the b factor of an atom.\n Can be all string representations or not.\n \"\"\"\n \n if type(resnum)!=str:\n resnum = str(resnum)\n if type(data)!=float:\n data=float(data)\n \n #Need to make sure Bfactor column is adjusted correctly.\n \n for line in range(0, len(self.pdb_map)):\n if ((self.pdb_map[line]['residue_number']==resnum) and (self.pdb_map[line]['chain']==chain) and (self.pdb_map[line][\"atom_name\"]==atomname)):\n self.pdb_map[line]['b_factor']=\"%.2f\"%data\n else:\n continue\n\n \n "
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
SimiaCryptus/models | [
"c652a23a650070b71e286f1ded93726670161940",
"c652a23a650070b71e286f1ded93726670161940",
"c652a23a650070b71e286f1ded93726670161940",
"c652a23a650070b71e286f1ded93726670161940",
"c652a23a650070b71e286f1ded93726670161940",
"c652a23a650070b71e286f1ded93726670161940",
"c652a23a650070b71e286f1ded93726670161940",
"c652a23a650070b71e286f1ded93726670161940",
"c652a23a650070b71e286f1ded93726670161940",
"c652a23a650070b71e286f1ded93726670161940",
"c652a23a650070b71e286f1ded93726670161940",
"c652a23a650070b71e286f1ded93726670161940",
"c652a23a650070b71e286f1ded93726670161940",
"c652a23a650070b71e286f1ded93726670161940",
"c652a23a650070b71e286f1ded93726670161940",
"c652a23a650070b71e286f1ded93726670161940",
"c652a23a650070b71e286f1ded93726670161940",
"c652a23a650070b71e286f1ded93726670161940",
"c652a23a650070b71e286f1ded93726670161940",
"c652a23a650070b71e286f1ded93726670161940",
"c652a23a650070b71e286f1ded93726670161940",
"c652a23a650070b71e286f1ded93726670161940"
] | [
"research/audioset/vggish/vggish_train_demo.py",
"official/mnist/mnist_eager_test.py",
"tutorials/rnn/quickdraw/train_model.py",
"research/syntaxnet/dragnn/tools/conll_checkpoint_converter.py",
"research/gan/progressive_gan/data_provider_test.py",
"research/autoencoder/VariationalAutoencoderRunner.py",
"research/vid2depth/ops/icp_train_demo.py",
"research/slim/nets/inception_v4_test.py",
"research/tcn/dataset/webcam.py",
"research/gan/mnist/conditional_eval.py",
"research/slim/nets/inception_v1_test.py",
"research/efficient-hrl/context/context.py",
"research/skip_thoughts/skip_thoughts/vocabulary_expansion.py",
"research/deeplab/datasets/build_voc2012_data.py",
"official/resnet/keras/keras_imagenet_test.py",
"official/mnist/dataset.py",
"research/syntaxnet/dragnn/tools/parser_trainer.py",
"research/gan/progressive_gan/train.py",
"research/slim/nets/inception_v3.py",
"research/lstm_object_detection/utils/config_util_test.py",
"research/fivo/fivo/models/base.py",
"research/efficient-hrl/environments/create_maze_env.py"
] | [
"# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nr\"\"\"A simple demonstration of running VGGish in training mode.\n\nThis is intended as a toy example that demonstrates how to use the VGGish model\ndefinition within a larger model that adds more layers on top, and then train\nthe larger model. If you let VGGish train as well, then this allows you to\nfine-tune the VGGish model parameters for your application. If you don't let\nVGGish train, then you use VGGish as a feature extractor for the layers above\nit.\n\nFor this toy task, we are training a classifier to distinguish between three\nclasses: sine waves, constant signals, and white noise. We generate synthetic\nwaveforms from each of these classes, convert into shuffled batches of log mel\nspectrogram examples with associated labels, and feed the batches into a model\nthat includes VGGish at the bottom and a couple of additional layers on top. We\nalso plumb in labels that are associated with the examples, which feed a label\nloss used for training.\n\nUsage:\n # Run training for 100 steps using a model checkpoint in the default\n # location (vggish_model.ckpt in the current directory). Allow VGGish\n # to get fine-tuned.\n $ python vggish_train_demo.py --num_batches 100\n\n # Same as before but run for fewer steps and don't change VGGish parameters\n # and use a checkpoint in a different location\n $ python vggish_train_demo.py --num_batches 50 \\\n --train_vggish=False \\\n --checkpoint /path/to/model/checkpoint\n\"\"\"\n\nfrom __future__ import print_function\n\nfrom random import shuffle\n\nimport numpy as np\nimport tensorflow as tf\nimport vggish_input\nimport vggish_params\nimport vggish_slim\n\nflags = tf.app.flags\nslim = tf.contrib.slim\n\nflags.DEFINE_integer(\n 'num_batches', 30,\n 'Number of batches of examples to feed into the model. Each batch is of '\n 'variable size and contains shuffled examples of each class of audio.')\n\nflags.DEFINE_boolean(\n 'train_vggish', True,\n 'If True, allow VGGish parameters to change during training, thus '\n 'fine-tuning VGGish. If False, VGGish parameters are fixed, thus using '\n 'VGGish as a fixed feature extractor.')\n\nflags.DEFINE_string(\n 'checkpoint', 'vggish_model.ckpt',\n 'Path to the VGGish checkpoint file.')\n\nFLAGS = flags.FLAGS\n\n_NUM_CLASSES = 3\n\n\ndef _get_examples_batch():\n \"\"\"Returns a shuffled batch of examples of all audio classes.\n\n Note that this is just a toy function because this is a simple demo intended\n to illustrate how the training code might work.\n\n Returns:\n a tuple (features, labels) where features is a NumPy array of shape\n [batch_size, num_frames, num_bands] where the batch_size is variable and\n each row is a log mel spectrogram patch of shape [num_frames, num_bands]\n suitable for feeding VGGish, while labels is a NumPy array of shape\n [batch_size, num_classes] where each row is a multi-hot label vector that\n provides the labels for corresponding rows in features.\n \"\"\"\n # Make a waveform for each class.\n num_seconds = 5\n sr = 44100 # Sampling rate.\n t = np.linspace(0, num_seconds, int(num_seconds * sr)) # Time axis.\n # Random sine wave.\n freq = np.random.uniform(100, 1000)\n sine = np.sin(2 * np.pi * freq * t)\n # Random constant signal.\n magnitude = np.random.uniform(-1, 1)\n const = magnitude * t\n # White noise.\n noise = np.random.normal(-1, 1, size=t.shape)\n\n # Make examples of each signal and corresponding labels.\n # Sine is class index 0, Const class index 1, Noise class index 2.\n sine_examples = vggish_input.waveform_to_examples(sine, sr)\n sine_labels = np.array([[1, 0, 0]] * sine_examples.shape[0])\n const_examples = vggish_input.waveform_to_examples(const, sr)\n const_labels = np.array([[0, 1, 0]] * const_examples.shape[0])\n noise_examples = vggish_input.waveform_to_examples(noise, sr)\n noise_labels = np.array([[0, 0, 1]] * noise_examples.shape[0])\n\n # Shuffle (example, label) pairs across all classes.\n all_examples = np.concatenate((sine_examples, const_examples, noise_examples))\n all_labels = np.concatenate((sine_labels, const_labels, noise_labels))\n labeled_examples = list(zip(all_examples, all_labels))\n shuffle(labeled_examples)\n\n # Separate and return the features and labels.\n features = [example for (example, _) in labeled_examples]\n labels = [label for (_, label) in labeled_examples]\n return (features, labels)\n\n\ndef main(_):\n with tf.Graph().as_default(), tf.Session() as sess:\n # Define VGGish.\n embeddings = vggish_slim.define_vggish_slim(FLAGS.train_vggish)\n\n # Define a shallow classification model and associated training ops on top\n # of VGGish.\n with tf.variable_scope('mymodel'):\n # Add a fully connected layer with 100 units.\n num_units = 100\n fc = slim.fully_connected(embeddings, num_units)\n\n # Add a classifier layer at the end, consisting of parallel logistic\n # classifiers, one per class. This allows for multi-class tasks.\n logits = slim.fully_connected(\n fc, _NUM_CLASSES, activation_fn=None, scope='logits')\n tf.sigmoid(logits, name='prediction')\n\n # Add training ops.\n with tf.variable_scope('train'):\n global_step = tf.Variable(\n 0, name='global_step', trainable=False,\n collections=[tf.GraphKeys.GLOBAL_VARIABLES,\n tf.GraphKeys.GLOBAL_STEP])\n\n # Labels are assumed to be fed as a batch multi-hot vectors, with\n # a 1 in the position of each positive class label, and 0 elsewhere.\n labels = tf.placeholder(\n tf.float32, shape=(None, _NUM_CLASSES), name='labels')\n\n # Cross-entropy label loss.\n xent = tf.nn.sigmoid_cross_entropy_with_logits(\n logits=logits, labels=labels, name='xent')\n loss = tf.reduce_mean(xent, name='loss_op')\n tf.summary.scalar('loss', loss)\n\n # We use the same optimizer and hyperparameters as used to train VGGish.\n optimizer = tf.train.AdamOptimizer(\n learning_rate=vggish_params.LEARNING_RATE,\n epsilon=vggish_params.ADAM_EPSILON)\n optimizer.minimize(loss, global_step=global_step, name='train_op')\n\n # Initialize all variables in the model, and then load the pre-trained\n # VGGish checkpoint.\n sess.run(tf.global_variables_initializer())\n vggish_slim.load_vggish_slim_checkpoint(sess, FLAGS.checkpoint)\n\n # Locate all the tensors and ops we need for the training loop.\n features_tensor = sess.graph.get_tensor_by_name(\n vggish_params.INPUT_TENSOR_NAME)\n labels_tensor = sess.graph.get_tensor_by_name('mymodel/train/labels:0')\n global_step_tensor = sess.graph.get_tensor_by_name(\n 'mymodel/train/global_step:0')\n loss_tensor = sess.graph.get_tensor_by_name('mymodel/train/loss_op:0')\n train_op = sess.graph.get_operation_by_name('mymodel/train/train_op')\n\n # The training loop.\n for _ in range(FLAGS.num_batches):\n (features, labels) = _get_examples_batch()\n [num_steps, loss, _] = sess.run(\n [global_step_tensor, loss_tensor, train_op],\n feed_dict={features_tensor: features, labels_tensor: labels})\n print('Step %d: loss %g' % (num_steps, loss))\n\nif __name__ == '__main__':\n tf.app.run()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf # pylint: disable=g-bad-import-order\nimport tensorflow.contrib.eager as tfe # pylint: disable=g-bad-import-order\nfrom official.mnist import mnist\nfrom official.mnist import mnist_eager\nfrom official.utils.misc import keras_utils\n\n\ndef device():\n return \"/device:GPU:0\" if tfe.num_gpus() else \"/device:CPU:0\"\n\n\ndef data_format():\n return \"channels_first\" if tfe.num_gpus() else \"channels_last\"\n\n\ndef random_dataset():\n batch_size = 64\n images = tf.random_normal([batch_size, 784])\n labels = tf.random_uniform([batch_size], minval=0, maxval=10, dtype=tf.int32)\n return tf.data.Dataset.from_tensors((images, labels))\n\n\ndef train(defun=False):\n model = mnist.create_model(data_format())\n if defun:\n model.call = tfe.defun(model.call)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)\n dataset = random_dataset()\n with tf.device(device()):\n mnist_eager.train(model, optimizer, dataset,\n step_counter=tf.train.get_or_create_global_step())\n\n\ndef evaluate(defun=False):\n model = mnist.create_model(data_format())\n dataset = random_dataset()\n if defun:\n model.call = tfe.defun(model.call)\n with tf.device(device()):\n mnist_eager.test(model, dataset)\n\n\nclass MNISTTest(tf.test.TestCase):\n \"\"\"Run tests for MNIST eager loop.\"\"\"\n\n def setUp(self):\n if not keras_utils.is_v2_0():\n tf.compat.v1.enable_v2_behavior()\n super(MNISTTest, self).setUp()\n\n def test_train(self):\n train(defun=False)\n\n def test_evaluate(self):\n evaluate(defun=False)\n\n def test_train_with_defun(self):\n train(defun=True)\n\n def test_evaluate_with_defun(self):\n evaluate(defun=True)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nr\"\"\"Binary for training a RNN-based classifier for the Quick, Draw! data.\n\npython train_model.py \\\n --training_data train_data \\\n --eval_data eval_data \\\n --model_dir /tmp/quickdraw_model/ \\\n --cell_type cudnn_lstm\n\nWhen running on GPUs using --cell_type cudnn_lstm is much faster.\n\nThe expected performance is ~75% in 1.5M steps with the default configuration.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport ast\nimport functools\nimport sys\n\nimport tensorflow as tf\n\n\ndef get_num_classes():\n classes = []\n with tf.gfile.GFile(FLAGS.classes_file, \"r\") as f:\n classes = [x for x in f]\n num_classes = len(classes)\n return num_classes\n\n\ndef get_input_fn(mode, tfrecord_pattern, batch_size):\n \"\"\"Creates an input_fn that stores all the data in memory.\n\n Args:\n mode: one of tf.contrib.learn.ModeKeys.{TRAIN, INFER, EVAL}\n tfrecord_pattern: path to a TF record file created using create_dataset.py.\n batch_size: the batch size to output.\n\n Returns:\n A valid input_fn for the model estimator.\n \"\"\"\n\n def _parse_tfexample_fn(example_proto, mode):\n \"\"\"Parse a single record which is expected to be a tensorflow.Example.\"\"\"\n feature_to_type = {\n \"ink\": tf.VarLenFeature(dtype=tf.float32),\n \"shape\": tf.FixedLenFeature([2], dtype=tf.int64)\n }\n if mode != tf.estimator.ModeKeys.PREDICT:\n # The labels won't be available at inference time, so don't add them\n # to the list of feature_columns to be read.\n feature_to_type[\"class_index\"] = tf.FixedLenFeature([1], dtype=tf.int64)\n\n parsed_features = tf.parse_single_example(example_proto, feature_to_type)\n labels = None\n if mode != tf.estimator.ModeKeys.PREDICT:\n labels = parsed_features[\"class_index\"]\n parsed_features[\"ink\"] = tf.sparse_tensor_to_dense(parsed_features[\"ink\"])\n return parsed_features, labels\n\n def _input_fn():\n \"\"\"Estimator `input_fn`.\n\n Returns:\n A tuple of:\n - Dictionary of string feature name to `Tensor`.\n - `Tensor` of target labels.\n \"\"\"\n dataset = tf.data.TFRecordDataset.list_files(tfrecord_pattern)\n if mode == tf.estimator.ModeKeys.TRAIN:\n dataset = dataset.shuffle(buffer_size=10)\n dataset = dataset.repeat()\n # Preprocesses 10 files concurrently and interleaves records from each file.\n dataset = dataset.interleave(\n tf.data.TFRecordDataset,\n cycle_length=10,\n block_length=1)\n dataset = dataset.map(\n functools.partial(_parse_tfexample_fn, mode=mode),\n num_parallel_calls=10)\n dataset = dataset.prefetch(10000)\n if mode == tf.estimator.ModeKeys.TRAIN:\n dataset = dataset.shuffle(buffer_size=1000000)\n # Our inputs are variable length, so pad them.\n dataset = dataset.padded_batch(\n batch_size, padded_shapes=dataset.output_shapes)\n features, labels = dataset.make_one_shot_iterator().get_next()\n return features, labels\n\n return _input_fn\n\n\ndef model_fn(features, labels, mode, params):\n \"\"\"Model function for RNN classifier.\n\n This function sets up a neural network which applies convolutional layers (as\n configured with params.num_conv and params.conv_len) to the input.\n The output of the convolutional layers is given to LSTM layers (as configured\n with params.num_layers and params.num_nodes).\n The final state of the all LSTM layers are concatenated and fed to a fully\n connected layer to obtain the final classification scores.\n\n Args:\n features: dictionary with keys: inks, lengths.\n labels: one hot encoded classes\n mode: one of tf.estimator.ModeKeys.{TRAIN, INFER, EVAL}\n params: a parameter dictionary with the following keys: num_layers,\n num_nodes, batch_size, num_conv, conv_len, num_classes, learning_rate.\n\n Returns:\n ModelFnOps for Estimator API.\n \"\"\"\n\n def _get_input_tensors(features, labels):\n \"\"\"Converts the input dict into inks, lengths, and labels tensors.\"\"\"\n # features[ink] is a sparse tensor that is [8, batch_maxlen, 3]\n # inks will be a dense tensor of [8, maxlen, 3]\n # shapes is [batchsize, 2]\n shapes = features[\"shape\"]\n # lengths will be [batch_size]\n lengths = tf.squeeze(\n tf.slice(shapes, begin=[0, 0], size=[params.batch_size, 1]))\n inks = tf.reshape(features[\"ink\"], [params.batch_size, -1, 3])\n if labels is not None:\n labels = tf.squeeze(labels)\n return inks, lengths, labels\n\n def _add_conv_layers(inks, lengths):\n \"\"\"Adds convolution layers.\"\"\"\n convolved = inks\n for i in range(len(params.num_conv)):\n convolved_input = convolved\n if params.batch_norm:\n convolved_input = tf.layers.batch_normalization(\n convolved_input,\n training=(mode == tf.estimator.ModeKeys.TRAIN))\n # Add dropout layer if enabled and not first convolution layer.\n if i > 0 and params.dropout:\n convolved_input = tf.layers.dropout(\n convolved_input,\n rate=params.dropout,\n training=(mode == tf.estimator.ModeKeys.TRAIN))\n convolved = tf.layers.conv1d(\n convolved_input,\n filters=params.num_conv[i],\n kernel_size=params.conv_len[i],\n activation=None,\n strides=1,\n padding=\"same\",\n name=\"conv1d_%d\" % i)\n return convolved, lengths\n\n def _add_regular_rnn_layers(convolved, lengths):\n \"\"\"Adds RNN layers.\"\"\"\n if params.cell_type == \"lstm\":\n cell = tf.nn.rnn_cell.BasicLSTMCell\n elif params.cell_type == \"block_lstm\":\n cell = tf.contrib.rnn.LSTMBlockCell\n cells_fw = [cell(params.num_nodes) for _ in range(params.num_layers)]\n cells_bw = [cell(params.num_nodes) for _ in range(params.num_layers)]\n if params.dropout > 0.0:\n cells_fw = [tf.contrib.rnn.DropoutWrapper(cell) for cell in cells_fw]\n cells_bw = [tf.contrib.rnn.DropoutWrapper(cell) for cell in cells_bw]\n outputs, _, _ = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(\n cells_fw=cells_fw,\n cells_bw=cells_bw,\n inputs=convolved,\n sequence_length=lengths,\n dtype=tf.float32,\n scope=\"rnn_classification\")\n return outputs\n\n def _add_cudnn_rnn_layers(convolved):\n \"\"\"Adds CUDNN LSTM layers.\"\"\"\n # Convolutions output [B, L, Ch], while CudnnLSTM is time-major.\n convolved = tf.transpose(convolved, [1, 0, 2])\n lstm = tf.contrib.cudnn_rnn.CudnnLSTM(\n num_layers=params.num_layers,\n num_units=params.num_nodes,\n dropout=params.dropout if mode == tf.estimator.ModeKeys.TRAIN else 0.0,\n direction=\"bidirectional\")\n outputs, _ = lstm(convolved)\n # Convert back from time-major outputs to batch-major outputs.\n outputs = tf.transpose(outputs, [1, 0, 2])\n return outputs\n\n def _add_rnn_layers(convolved, lengths):\n \"\"\"Adds recurrent neural network layers depending on the cell type.\"\"\"\n if params.cell_type != \"cudnn_lstm\":\n outputs = _add_regular_rnn_layers(convolved, lengths)\n else:\n outputs = _add_cudnn_rnn_layers(convolved)\n # outputs is [batch_size, L, N] where L is the maximal sequence length and N\n # the number of nodes in the last layer.\n mask = tf.tile(\n tf.expand_dims(tf.sequence_mask(lengths, tf.shape(outputs)[1]), 2),\n [1, 1, tf.shape(outputs)[2]])\n zero_outside = tf.where(mask, outputs, tf.zeros_like(outputs))\n outputs = tf.reduce_sum(zero_outside, axis=1)\n return outputs\n\n def _add_fc_layers(final_state):\n \"\"\"Adds a fully connected layer.\"\"\"\n return tf.layers.dense(final_state, params.num_classes)\n\n # Build the model.\n inks, lengths, labels = _get_input_tensors(features, labels)\n convolved, lengths = _add_conv_layers(inks, lengths)\n final_state = _add_rnn_layers(convolved, lengths)\n logits = _add_fc_layers(final_state)\n # Add the loss.\n cross_entropy = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels, logits=logits))\n # Add the optimizer.\n train_op = tf.contrib.layers.optimize_loss(\n loss=cross_entropy,\n global_step=tf.train.get_global_step(),\n learning_rate=params.learning_rate,\n optimizer=\"Adam\",\n # some gradient clipping stabilizes training in the beginning.\n clip_gradients=params.gradient_clipping_norm,\n summaries=[\"learning_rate\", \"loss\", \"gradients\", \"gradient_norm\"])\n # Compute current predictions.\n predictions = tf.argmax(logits, axis=1)\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions={\"logits\": logits, \"predictions\": predictions},\n loss=cross_entropy,\n train_op=train_op,\n eval_metric_ops={\"accuracy\": tf.metrics.accuracy(labels, predictions)})\n\n\ndef create_estimator_and_specs(run_config):\n \"\"\"Creates an Experiment configuration based on the estimator and input fn.\"\"\"\n model_params = tf.contrib.training.HParams(\n num_layers=FLAGS.num_layers,\n num_nodes=FLAGS.num_nodes,\n batch_size=FLAGS.batch_size,\n num_conv=ast.literal_eval(FLAGS.num_conv),\n conv_len=ast.literal_eval(FLAGS.conv_len),\n num_classes=get_num_classes(),\n learning_rate=FLAGS.learning_rate,\n gradient_clipping_norm=FLAGS.gradient_clipping_norm,\n cell_type=FLAGS.cell_type,\n batch_norm=FLAGS.batch_norm,\n dropout=FLAGS.dropout)\n\n estimator = tf.estimator.Estimator(\n model_fn=model_fn,\n config=run_config,\n params=model_params)\n\n train_spec = tf.estimator.TrainSpec(input_fn=get_input_fn(\n mode=tf.estimator.ModeKeys.TRAIN,\n tfrecord_pattern=FLAGS.training_data,\n batch_size=FLAGS.batch_size), max_steps=FLAGS.steps)\n\n eval_spec = tf.estimator.EvalSpec(input_fn=get_input_fn(\n mode=tf.estimator.ModeKeys.EVAL,\n tfrecord_pattern=FLAGS.eval_data,\n batch_size=FLAGS.batch_size))\n\n return estimator, train_spec, eval_spec\n\n\ndef main(unused_args):\n estimator, train_spec, eval_spec = create_estimator_and_specs(\n run_config=tf.estimator.RunConfig(\n model_dir=FLAGS.model_dir,\n save_checkpoints_secs=300,\n save_summary_steps=100))\n tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.register(\"type\", \"bool\", lambda v: v.lower() == \"true\")\n parser.add_argument(\n \"--training_data\",\n type=str,\n default=\"\",\n help=\"Path to training data (tf.Example in TFRecord format)\")\n parser.add_argument(\n \"--eval_data\",\n type=str,\n default=\"\",\n help=\"Path to evaluation data (tf.Example in TFRecord format)\")\n parser.add_argument(\n \"--classes_file\",\n type=str,\n default=\"\",\n help=\"Path to a file with the classes - one class per line\")\n parser.add_argument(\n \"--num_layers\",\n type=int,\n default=3,\n help=\"Number of recurrent neural network layers.\")\n parser.add_argument(\n \"--num_nodes\",\n type=int,\n default=128,\n help=\"Number of node per recurrent network layer.\")\n parser.add_argument(\n \"--num_conv\",\n type=str,\n default=\"[48, 64, 96]\",\n help=\"Number of conv layers along with number of filters per layer.\")\n parser.add_argument(\n \"--conv_len\",\n type=str,\n default=\"[5, 5, 3]\",\n help=\"Length of the convolution filters.\")\n parser.add_argument(\n \"--cell_type\",\n type=str,\n default=\"lstm\",\n help=\"Cell type used for rnn layers: cudnn_lstm, lstm or block_lstm.\")\n parser.add_argument(\n \"--batch_norm\",\n type=\"bool\",\n default=\"False\",\n help=\"Whether to enable batch normalization or not.\")\n parser.add_argument(\n \"--learning_rate\",\n type=float,\n default=0.0001,\n help=\"Learning rate used for training.\")\n parser.add_argument(\n \"--gradient_clipping_norm\",\n type=float,\n default=9.0,\n help=\"Gradient clipping norm used during training.\")\n parser.add_argument(\n \"--dropout\",\n type=float,\n default=0.3,\n help=\"Dropout used for convolutions and bidi lstm layers.\")\n parser.add_argument(\n \"--steps\",\n type=int,\n default=100000,\n help=\"Number of training steps.\")\n parser.add_argument(\n \"--batch_size\",\n type=int,\n default=8,\n help=\"Batch size to use for training/evaluation.\")\n parser.add_argument(\n \"--model_dir\",\n type=str,\n default=\"\",\n help=\"Path for storing the model checkpoints.\")\n parser.add_argument(\n \"--self_test\",\n type=\"bool\",\n default=\"False\",\n help=\"Whether to enable batch normalization or not.\")\n\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n",
"# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Conversion script for CoNLL checkpoints to DRAGNN SavedModel format.\n\nThis script loads and finishes a CoNLL checkpoint, then exports it as a\nSavedModel. It expects that the CoNLL RNN cells have been updated using the\nRNN update script.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport tensorflow as tf\nfrom absl import flags\nfrom dragnn.protos import spec_pb2\nfrom dragnn.python import dragnn_model_saver_lib as saver_lib\nfrom dragnn.python import spec_builder\nfrom google.protobuf import text_format\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('master_spec', None, 'Path to task context with '\n 'inputs and parameters for feature extractors.')\nflags.DEFINE_string('params_path', None, 'Path to trained model parameters.')\nflags.DEFINE_string('export_path', '', 'Output path for exported servo model.')\nflags.DEFINE_string('resource_path', '',\n 'Base directory for resources in the master spec.')\nflags.DEFINE_bool('export_moving_averages', True,\n 'Whether to export the moving average parameters.')\n\n\ndef export(master_spec_path, params_path, resource_path, export_path,\n export_moving_averages):\n \"\"\"Restores a model and exports it in SavedModel form.\n\n This method loads a graph specified by the spec at master_spec_path and the\n params in params_path. It then saves the model in SavedModel format to the\n location specified in export_path.\n\n Args:\n master_spec_path: Path to a proto-text master spec.\n params_path: Path to the parameters file to export.\n resource_path: Path to resources in the master spec.\n export_path: Path to export the SavedModel to.\n export_moving_averages: Whether to export the moving average parameters.\n \"\"\"\n # Old CoNLL checkpoints did not need a known-word-map. Create a temporary if\n # that file is missing.\n if not tf.gfile.Exists(os.path.join(resource_path, 'known-word-map')):\n with tf.gfile.FastGFile(os.path.join(resource_path, 'known-word-map'),\n 'w') as out_file:\n out_file.write('This file intentionally left blank.')\n\n graph = tf.Graph()\n master_spec = spec_pb2.MasterSpec()\n with tf.gfile.FastGFile(master_spec_path) as fin:\n text_format.Parse(fin.read(), master_spec)\n\n # This is a workaround for an issue where the segmenter master-spec had a\n # spurious resource in it; this resource was not respected in the spec-builder\n # and ended up crashing the saver (since it didn't really exist).\n for component in master_spec.component:\n del component.resource[:]\n\n spec_builder.complete_master_spec(master_spec, None, resource_path)\n\n # Remove '/' if it exists at the end of the export path, ensuring that\n # path utils work correctly.\n stripped_path = export_path.rstrip('/')\n saver_lib.clean_output_paths(stripped_path)\n\n short_to_original = saver_lib.shorten_resource_paths(master_spec)\n saver_lib.export_master_spec(master_spec, graph)\n saver_lib.export_to_graph(master_spec, params_path, stripped_path, graph,\n export_moving_averages)\n saver_lib.export_assets(master_spec, short_to_original, stripped_path)\n\n\ndef main(unused_argv):\n # Run the exporter.\n export(FLAGS.master_spec, FLAGS.params_path, FLAGS.resource_path,\n FLAGS.export_path, FLAGS.export_moving_averages)\n tf.logging.info('Export complete.')\n\n\nif __name__ == '__main__':\n tf.app.run()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport data_provider\nimport numpy as np\nimport tensorflow as tf\nfrom absl import flags\n\n\nclass DataProviderTest(tf.test.TestCase):\n\n def setUp(self):\n super(DataProviderTest, self).setUp()\n self.testdata_dir = os.path.join(\n flags.FLAGS.test_srcdir,\n 'google3/third_party/tensorflow_models/gan/progressive_gan/testdata/')\n\n def test_normalize_image(self):\n image_np = np.asarray([0, 255, 210], dtype=np.uint8)\n normalized_image = data_provider.normalize_image(tf.constant(image_np))\n self.assertEqual(normalized_image.dtype, tf.float32)\n self.assertEqual(normalized_image.shape.as_list(), [3])\n with self.test_session(use_gpu=True) as sess:\n normalized_image_np = sess.run(normalized_image)\n self.assertNDArrayNear(normalized_image_np, [-1, 1, 0.6470588235], 1.0e-6)\n\n def test_sample_patch_large_patch_returns_upscaled_image(self):\n image_np = np.reshape(np.arange(2 * 2), [2, 2, 1])\n image = tf.constant(image_np, dtype=tf.float32)\n image_patch = data_provider.sample_patch(\n image, patch_height=3, patch_width=3, colors=1)\n with self.test_session(use_gpu=True) as sess:\n image_patch_np = sess.run(image_patch)\n expected_np = np.asarray([[[0.], [0.66666669], [1.]], [[1.33333337], [2.],\n [2.33333349]],\n [[2.], [2.66666675], [3.]]])\n self.assertNDArrayNear(image_patch_np, expected_np, 1.0e-6)\n\n def test_sample_patch_small_patch_returns_downscaled_image(self):\n image_np = np.reshape(np.arange(3 * 3), [3, 3, 1])\n image = tf.constant(image_np, dtype=tf.float32)\n image_patch = data_provider.sample_patch(\n image, patch_height=2, patch_width=2, colors=1)\n with self.test_session(use_gpu=True) as sess:\n image_patch_np = sess.run(image_patch)\n expected_np = np.asarray([[[0.], [1.5]], [[4.5], [6.]]])\n self.assertNDArrayNear(image_patch_np, expected_np, 1.0e-6)\n\n def test_batch_images(self):\n image_np = np.reshape(np.arange(3 * 3), [3, 3, 1])\n image = tf.constant(image_np, dtype=tf.float32)\n images = data_provider.batch_images(\n image,\n patch_height=2,\n patch_width=2,\n colors=1,\n batch_size=2,\n shuffle=False,\n num_threads=1)\n with self.test_session(use_gpu=True) as sess:\n with tf.contrib.slim.queues.QueueRunners(sess):\n images_np = sess.run(images)\n expected_np = np.asarray([[[[0.], [1.5]], [[4.5], [6.]]], [[[0.], [1.5]],\n [[4.5], [6.]]]])\n self.assertNDArrayNear(images_np, expected_np, 1.0e-6)\n\n def test_provide_data(self):\n images = data_provider.provide_data(\n 'mnist',\n 'train',\n dataset_dir=self.testdata_dir,\n batch_size=2,\n shuffle=False,\n patch_height=3,\n patch_width=3,\n colors=1)\n self.assertEqual(images.shape.as_list(), [2, 3, 3, 1])\n with self.test_session(use_gpu=True) as sess:\n with tf.contrib.slim.queues.QueueRunners(sess):\n images_np = sess.run(images)\n self.assertEqual(images_np.shape, (2, 3, 3, 1))\n\n def test_provide_data_from_image_files_a_single_pattern(self):\n file_pattern = os.path.join(self.testdata_dir, '*.jpg')\n images = data_provider.provide_data_from_image_files(\n file_pattern,\n batch_size=2,\n shuffle=False,\n patch_height=3,\n patch_width=3,\n colors=1)\n self.assertEqual(images.shape.as_list(), [2, 3, 3, 1])\n with self.test_session(use_gpu=True) as sess:\n sess.run(tf.local_variables_initializer())\n with tf.contrib.slim.queues.QueueRunners(sess):\n images_np = sess.run(images)\n self.assertEqual(images_np.shape, (2, 3, 3, 1))\n\n def test_provide_data_from_image_files_a_list_of_patterns(self):\n file_pattern = [os.path.join(self.testdata_dir, '*.jpg')]\n images = data_provider.provide_data_from_image_files(\n file_pattern,\n batch_size=2,\n shuffle=False,\n patch_height=3,\n patch_width=3,\n colors=1)\n self.assertEqual(images.shape.as_list(), [2, 3, 3, 1])\n with self.test_session(use_gpu=True) as sess:\n sess.run(tf.local_variables_initializer())\n with tf.contrib.slim.queues.QueueRunners(sess):\n images_np = sess.run(images)\n self.assertEqual(images_np.shape, (2, 3, 3, 1))\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport sklearn.preprocessing as prep\nimport tensorflow as tf\nfrom autoencoder_models.VariationalAutoencoder import VariationalAutoencoder\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n\n\ndef min_max_scale(X_train, X_test):\n preprocessor = prep.MinMaxScaler().fit(X_train)\n X_train = preprocessor.transform(X_train)\n X_test = preprocessor.transform(X_test)\n return X_train, X_test\n\n\ndef get_random_block_from_data(data, batch_size):\n start_index = np.random.randint(0, len(data) - batch_size)\n return data[start_index:(start_index + batch_size)]\n\n\nX_train, X_test = min_max_scale(mnist.train.images, mnist.test.images)\n\nn_samples = int(mnist.train.num_examples)\ntraining_epochs = 20\nbatch_size = 128\ndisplay_step = 1\n\nautoencoder = VariationalAutoencoder(\n n_input=784,\n n_hidden=200,\n optimizer=tf.train.AdamOptimizer(learning_rate = 0.001))\n\nfor epoch in range(training_epochs):\n avg_cost = 0.\n total_batch = int(n_samples / batch_size)\n # Loop over all batches\n for i in range(total_batch):\n batch_xs = get_random_block_from_data(X_train, batch_size)\n\n # Fit training using batch data\n cost = autoencoder.partial_fit(batch_xs)\n # Compute average loss\n avg_cost += cost / n_samples * batch_size\n\n # Display logs per epoch step\n if epoch % display_step == 0:\n print(\"Epoch:\", '%d,' % (epoch + 1),\n \"Cost:\", \"{:.9f}\".format(avg_cost))\n\nprint(\"Total cost: \" + str(autoencoder.calc_total_cost(X_test)))\n",
"# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Verify the op's ability to discover a hidden transformation and residual.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport random\nimport time\n\nimport icp_util\nimport numpy as np\nimport os.path\nimport tensorflow as tf\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nfrom icp_op import icp\n\nFLAGS = flags.FLAGS\nflags.DEFINE_integer('batch_size', 4, 'Batch size.')\nflags.DEFINE_float('learning_rate', 0.1, 'Learning rate.')\nflags.DEFINE_integer('max_steps', 2000, 'Number of steps to run trainer.')\nflags.DEFINE_string('train_dir', '/tmp/icp_train_demo',\n 'Directory to save event files for TensorBoard.')\n\n# Every training step feeds the model two points clouds A, B, such that\n# A = random_transform . sample_cloud\n# B = (SECRET_EGO_MOTION . A) + cone(CENTER, RADIUS, SECRET_RES_HEIGHT).\n# The ICP op gradients should help the program discover the values for\n# SECRET_EGO_MOTION and SECRET_RES_HEIGHT to get the best alignment for A, B.\nSECRET_EGO_MOTION = [0.0, 0.0, 0.1, 0.0, 0.0, 0.0]\nRES_CENTER = [0.103, 1.954, 0]\nRES_RADIUS = 10.0\nSECRET_RES_HEIGHT = 0.1\n\n\nclass DataProducer(object):\n \"\"\"Generates training data.\"\"\"\n\n def __init__(self):\n pass\n\n @classmethod\n def setup(cls):\n \"\"\"Open a KITTI video and read its point clouds.\"\"\"\n lidar_cloud_path = os.path.join(FLAGS.test_srcdir,\n icp_util.LIDAR_CLOUD_PATH)\n cls.sample_cloud = np.load(lidar_cloud_path)\n logging.info('sample_cloud: %s', cls.sample_cloud)\n x_min = np.min(cls.sample_cloud[:, 0])\n x_max = np.max(cls.sample_cloud[:, 0])\n y_min = np.min(cls.sample_cloud[:, 1])\n y_max = np.max(cls.sample_cloud[:, 1])\n z_min = np.min(cls.sample_cloud[:, 2])\n z_max = np.max(cls.sample_cloud[:, 2])\n logging.info('x: %s - %s', x_min, x_max)\n logging.info('y: %s - %s', y_min, y_max)\n logging.info('z: %s - %s', z_min, z_max)\n\n @classmethod\n def random_transform(cls):\n tx = random.uniform(-0.2, 0.2)\n ty = random.uniform(-0.2, 0.2)\n tz = random.uniform(-0.9, 0.9)\n rx = random.uniform(-0.2, 0.2) * np.pi\n ry = random.uniform(-0.2, 0.2) * np.pi\n rz = random.uniform(-0.2, 0.2) * np.pi\n transform = [tx, ty, tz, rx, ry, rz]\n return transform\n\n @classmethod\n def next_batch(cls, batch_size):\n \"\"\"Returns a training batch.\"\"\"\n source_items = []\n target_items = []\n for _ in range(batch_size):\n source_cloud = icp_util.np_transform_cloud_xyz(cls.sample_cloud,\n cls.random_transform())\n source_items.append(source_cloud)\n dist_to_center = np.linalg.norm((source_cloud - RES_CENTER)[:, :2],\n axis=1, keepdims=True)\n res = np.maximum(RES_RADIUS - dist_to_center, 0.0) / RES_RADIUS\n res *= SECRET_RES_HEIGHT\n # x = 0, y = 0, z = res.\n res = np.concatenate((np.zeros_like(res), np.zeros_like(res), res),\n axis=1)\n target_cloud = icp_util.np_transform_cloud_xyz(source_cloud + res,\n SECRET_EGO_MOTION)\n target_items.append(target_cloud)\n return np.stack(source_items), np.stack(target_items)\n\n\ndef placeholder_inputs(batch_size):\n cloud_shape = (batch_size, DataProducer.sample_cloud.shape[0], 3)\n source_placeholder = tf.placeholder(tf.float32, shape=cloud_shape)\n target_placeholder = tf.placeholder(tf.float32, shape=cloud_shape)\n return source_placeholder, target_placeholder\n\n\ndef fill_feed_dict(source_placeholder, target_placeholder):\n # Create the feed_dict for the placeholders filled with the next\n # `batch size` examples.\n source_feed, target_feed = DataProducer.next_batch(FLAGS.batch_size)\n feed_dict = {\n source_placeholder: source_feed,\n target_placeholder: target_feed,\n }\n return feed_dict\n\n\ndef run_training():\n \"\"\"Train model for a number of steps.\"\"\"\n # Tell TensorFlow that the model will be built into the default Graph.\n with tf.Graph().as_default():\n DataProducer.setup()\n source_placeholder, target_placeholder = placeholder_inputs(\n FLAGS.batch_size)\n transform, residual = inference(source_placeholder, target_placeholder)\n loss = loss_func(transform, residual)\n train_op = training(loss, FLAGS.learning_rate)\n summary_op = tf.summary.merge_all()\n init = tf.global_variables_initializer()\n\n with tf.Session() as sess:\n summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)\n sess.run(init)\n # Start the training loop.\n for step in range(FLAGS.max_steps):\n start_time = time.time()\n feed_dict = fill_feed_dict(source_placeholder, target_placeholder)\n _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)\n duration = time.time() - start_time\n # Print status to stdout.\n print('Step %d: loss = %f (%.2f sec)' % (step, loss_value, duration))\n # Update the events file.\n summary_str = sess.run(summary_op, feed_dict=feed_dict)\n summary_writer.add_summary(summary_str, step)\n summary_writer.flush()\n\n\ndef inference(source, target):\n \"\"\"Builds model.\"\"\"\n ego_motion = tf.Variable(tf.zeros([6]), name='ego_motion')\n res_height = tf.Variable(tf.fill([1], 0.0), name='res_height')\n tf.summary.scalar('tx', ego_motion[0])\n tf.summary.scalar('ty', ego_motion[1])\n tf.summary.scalar('tz', ego_motion[2])\n tf.summary.scalar('rx', ego_motion[3])\n tf.summary.scalar('ry', ego_motion[4])\n tf.summary.scalar('rz', ego_motion[5])\n tf.summary.scalar('res_height', res_height[0])\n\n dist_to_center = tf.norm((source - RES_CENTER)[:, :, :2], axis=2,\n keep_dims=True)\n res = tf.maximum(RES_RADIUS - dist_to_center, 0.0) / RES_RADIUS\n res *= res_height\n res = tf.concat([tf.zeros_like(res), tf.zeros_like(res), res], axis=2)\n\n shifted_source = source + res\n ego_motion = tf.stack([ego_motion] * FLAGS.batch_size)\n transform, residual = icp(shifted_source, ego_motion, target)\n return transform, residual\n\n\ndef loss_func(transform, residual):\n return (tf.reduce_mean(tf.square(transform), name='transform_mean') +\n tf.reduce_mean(tf.square(residual), name='residual_mean'))\n\n\ndef training(loss, learning_rate):\n tf.summary.scalar('loss', loss)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n # Create a variable to track the global step.\n global_step = tf.Variable(0, name='global_step', trainable=False)\n # Use the optimizer to apply the gradients that minimize the loss\n # (and also increment the global step counter) as a single training step.\n train_op = optimizer.minimize(loss, global_step=global_step)\n return train_op\n\n\ndef main(_):\n run_training()\n\n\nif __name__ == '__main__':\n app.run(main)\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for slim.inception_v4.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom nets import inception\n\n\nclass InceptionTest(tf.test.TestCase):\n\n def testBuildLogits(self):\n batch_size = 5\n height, width = 299, 299\n num_classes = 1000\n inputs = tf.random_uniform((batch_size, height, width, 3))\n logits, end_points = inception.inception_v4(inputs, num_classes)\n auxlogits = end_points['AuxLogits']\n predictions = end_points['Predictions']\n self.assertTrue(auxlogits.op.name.startswith('InceptionV4/AuxLogits'))\n self.assertListEqual(auxlogits.get_shape().as_list(),\n [batch_size, num_classes])\n self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n self.assertTrue(predictions.op.name.startswith(\n 'InceptionV4/Logits/Predictions'))\n self.assertListEqual(predictions.get_shape().as_list(),\n [batch_size, num_classes])\n\n def testBuildPreLogitsNetwork(self):\n batch_size = 5\n height, width = 299, 299\n num_classes = None\n inputs = tf.random_uniform((batch_size, height, width, 3))\n net, end_points = inception.inception_v4(inputs, num_classes)\n self.assertTrue(net.op.name.startswith('InceptionV4/Logits/AvgPool'))\n self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1536])\n self.assertFalse('Logits' in end_points)\n self.assertFalse('Predictions' in end_points)\n\n def testBuildWithoutAuxLogits(self):\n batch_size = 5\n height, width = 299, 299\n num_classes = 1000\n inputs = tf.random_uniform((batch_size, height, width, 3))\n logits, endpoints = inception.inception_v4(inputs, num_classes,\n create_aux_logits=False)\n self.assertFalse('AuxLogits' in endpoints)\n self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n\n def testAllEndPointsShapes(self):\n batch_size = 5\n height, width = 299, 299\n num_classes = 1000\n inputs = tf.random_uniform((batch_size, height, width, 3))\n _, end_points = inception.inception_v4(inputs, num_classes)\n endpoints_shapes = {'Conv2d_1a_3x3': [batch_size, 149, 149, 32],\n 'Conv2d_2a_3x3': [batch_size, 147, 147, 32],\n 'Conv2d_2b_3x3': [batch_size, 147, 147, 64],\n 'Mixed_3a': [batch_size, 73, 73, 160],\n 'Mixed_4a': [batch_size, 71, 71, 192],\n 'Mixed_5a': [batch_size, 35, 35, 384],\n # 4 x Inception-A blocks\n 'Mixed_5b': [batch_size, 35, 35, 384],\n 'Mixed_5c': [batch_size, 35, 35, 384],\n 'Mixed_5d': [batch_size, 35, 35, 384],\n 'Mixed_5e': [batch_size, 35, 35, 384],\n # Reduction-A block\n 'Mixed_6a': [batch_size, 17, 17, 1024],\n # 7 x Inception-B blocks\n 'Mixed_6b': [batch_size, 17, 17, 1024],\n 'Mixed_6c': [batch_size, 17, 17, 1024],\n 'Mixed_6d': [batch_size, 17, 17, 1024],\n 'Mixed_6e': [batch_size, 17, 17, 1024],\n 'Mixed_6f': [batch_size, 17, 17, 1024],\n 'Mixed_6g': [batch_size, 17, 17, 1024],\n 'Mixed_6h': [batch_size, 17, 17, 1024],\n # Reduction-A block\n 'Mixed_7a': [batch_size, 8, 8, 1536],\n # 3 x Inception-C blocks\n 'Mixed_7b': [batch_size, 8, 8, 1536],\n 'Mixed_7c': [batch_size, 8, 8, 1536],\n 'Mixed_7d': [batch_size, 8, 8, 1536],\n # Logits and predictions\n 'AuxLogits': [batch_size, num_classes],\n 'global_pool': [batch_size, 1, 1, 1536],\n 'PreLogitsFlatten': [batch_size, 1536],\n 'Logits': [batch_size, num_classes],\n 'Predictions': [batch_size, num_classes]}\n self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())\n for endpoint_name in endpoints_shapes:\n expected_shape = endpoints_shapes[endpoint_name]\n self.assertTrue(endpoint_name in end_points)\n self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),\n expected_shape)\n\n def testBuildBaseNetwork(self):\n batch_size = 5\n height, width = 299, 299\n inputs = tf.random_uniform((batch_size, height, width, 3))\n net, end_points = inception.inception_v4_base(inputs)\n self.assertTrue(net.op.name.startswith(\n 'InceptionV4/Mixed_7d'))\n self.assertListEqual(net.get_shape().as_list(), [batch_size, 8, 8, 1536])\n expected_endpoints = [\n 'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'Mixed_3a',\n 'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',\n 'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',\n 'Mixed_6e', 'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a',\n 'Mixed_7b', 'Mixed_7c', 'Mixed_7d']\n self.assertItemsEqual(end_points.keys(), expected_endpoints)\n for name, op in end_points.items():\n self.assertTrue(op.name.startswith('InceptionV4/' + name))\n\n def testBuildOnlyUpToFinalEndpoint(self):\n batch_size = 5\n height, width = 299, 299\n all_endpoints = [\n 'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'Mixed_3a',\n 'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',\n 'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',\n 'Mixed_6e', 'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a',\n 'Mixed_7b', 'Mixed_7c', 'Mixed_7d']\n for index, endpoint in enumerate(all_endpoints):\n with tf.Graph().as_default():\n inputs = tf.random_uniform((batch_size, height, width, 3))\n out_tensor, end_points = inception.inception_v4_base(\n inputs, final_endpoint=endpoint)\n self.assertTrue(out_tensor.op.name.startswith(\n 'InceptionV4/' + endpoint))\n self.assertItemsEqual(all_endpoints[:index+1], end_points.keys())\n\n def testVariablesSetDevice(self):\n batch_size = 5\n height, width = 299, 299\n num_classes = 1000\n inputs = tf.random_uniform((batch_size, height, width, 3))\n # Force all Variables to reside on the device.\n with tf.variable_scope('on_cpu'), tf.device('/cpu:0'):\n inception.inception_v4(inputs, num_classes)\n with tf.variable_scope('on_gpu'), tf.device('/gpu:0'):\n inception.inception_v4(inputs, num_classes)\n for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'):\n self.assertDeviceEqual(v.device, '/cpu:0')\n for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'):\n self.assertDeviceEqual(v.device, '/gpu:0')\n\n def testHalfSizeImages(self):\n batch_size = 5\n height, width = 150, 150\n num_classes = 1000\n inputs = tf.random_uniform((batch_size, height, width, 3))\n logits, end_points = inception.inception_v4(inputs, num_classes)\n self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n pre_pool = end_points['Mixed_7d']\n self.assertListEqual(pre_pool.get_shape().as_list(),\n [batch_size, 3, 3, 1536])\n\n def testGlobalPool(self):\n batch_size = 1\n height, width = 350, 400\n num_classes = 1000\n inputs = tf.random_uniform((batch_size, height, width, 3))\n logits, end_points = inception.inception_v4(inputs, num_classes)\n self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n pre_pool = end_points['Mixed_7d']\n self.assertListEqual(pre_pool.get_shape().as_list(),\n [batch_size, 9, 11, 1536])\n\n def testGlobalPoolUnknownImageShape(self):\n batch_size = 1\n height, width = 350, 400\n num_classes = 1000\n with self.test_session() as sess:\n inputs = tf.placeholder(tf.float32, (batch_size, None, None, 3))\n logits, end_points = inception.inception_v4(\n inputs, num_classes, create_aux_logits=False)\n self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n pre_pool = end_points['Mixed_7d']\n images = tf.random_uniform((batch_size, height, width, 3))\n sess.run(tf.global_variables_initializer())\n logits_out, pre_pool_out = sess.run([logits, pre_pool],\n {inputs: images.eval()})\n self.assertTupleEqual(logits_out.shape, (batch_size, num_classes))\n self.assertTupleEqual(pre_pool_out.shape, (batch_size, 9, 11, 1536))\n\n def testUnknownBatchSize(self):\n batch_size = 1\n height, width = 299, 299\n num_classes = 1000\n with self.test_session() as sess:\n inputs = tf.placeholder(tf.float32, (None, height, width, 3))\n logits, _ = inception.inception_v4(inputs, num_classes)\n self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [None, num_classes])\n images = tf.random_uniform((batch_size, height, width, 3))\n sess.run(tf.global_variables_initializer())\n output = sess.run(logits, {inputs: images.eval()})\n self.assertEquals(output.shape, (batch_size, num_classes))\n\n def testEvaluation(self):\n batch_size = 2\n height, width = 299, 299\n num_classes = 1000\n with self.test_session() as sess:\n eval_inputs = tf.random_uniform((batch_size, height, width, 3))\n logits, _ = inception.inception_v4(eval_inputs,\n num_classes,\n is_training=False)\n predictions = tf.argmax(logits, 1)\n sess.run(tf.global_variables_initializer())\n output = sess.run(predictions)\n self.assertEquals(output.shape, (batch_size,))\n\n def testTrainEvalWithReuse(self):\n train_batch_size = 5\n eval_batch_size = 2\n height, width = 150, 150\n num_classes = 1000\n with self.test_session() as sess:\n train_inputs = tf.random_uniform((train_batch_size, height, width, 3))\n inception.inception_v4(train_inputs, num_classes)\n eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))\n logits, _ = inception.inception_v4(eval_inputs,\n num_classes,\n is_training=False,\n reuse=True)\n predictions = tf.argmax(logits, 1)\n sess.run(tf.global_variables_initializer())\n output = sess.run(predictions)\n self.assertEquals(output.shape, (eval_batch_size,))\n\n def testNoBatchNormScaleByDefault(self):\n height, width = 299, 299\n num_classes = 1000\n inputs = tf.placeholder(tf.float32, (1, height, width, 3))\n with tf.contrib.slim.arg_scope(inception.inception_v4_arg_scope()):\n inception.inception_v4(inputs, num_classes, is_training=False)\n\n self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), [])\n\n def testBatchNormScale(self):\n height, width = 299, 299\n num_classes = 1000\n inputs = tf.placeholder(tf.float32, (1, height, width, 3))\n with tf.contrib.slim.arg_scope(\n inception.inception_v4_arg_scope(batch_norm_scale=True)):\n inception.inception_v4(inputs, num_classes, is_training=False)\n\n gamma_names = set(\n v.op.name for v in tf.global_variables('.*/BatchNorm/gamma:0$'))\n self.assertGreater(len(gamma_names), 0)\n for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'):\n self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nr\"\"\"Collect images from multiple simultaneous webcams.\n\nUsage:\n\n1. Define some environment variables that describe what you're collecting.\ndataset=your_dataset_name\nmode=train\nnum_views=2\nviddir=/tmp/tcn/videos\ntmp_imagedir=/tmp/tcn/tmp_images\ndebug_vids=1\n\n2. Run the script.\nexport DISPLAY=:0.0 && \\\nroot=learning/brain/research/tcn && \\\nbazel build -c opt --copt=-mavx tcn/webcam && \\\nbazel-bin/tcn/webcam \\\n--dataset $dataset \\\n--mode $mode \\\n--num_views $num_views \\\n--tmp_imagedir $tmp_imagedir \\\n--viddir $viddir \\\n--debug_vids 1 \\\n--logtostderr\n\n3. Hit Ctrl-C when done collecting, upon which the script will compile videos\nfor each view and optionally a debug video concatenating multiple\nsimultaneous views.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport multiprocessing\nimport os\nimport subprocess\nimport sys\nimport time\nfrom multiprocessing import Process\n\nimport cv2\nimport matplotlib\n\nmatplotlib.use('TkAgg')\nfrom matplotlib import animation # pylint: disable=g-import-not-at-top\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom six.moves import input\nimport tensorflow as tf\ntf.logging.set_verbosity(tf.logging.INFO)\n\n\ntf.flags.DEFINE_string('dataset', '', 'Name of the dataset we`re collecting.')\ntf.flags.DEFINE_string('mode', '',\n 'What type of data we`re collecting. E.g.:'\n '`train`,`valid`,`test`, or `demo`')\ntf.flags.DEFINE_string('seqname', '',\n 'Name of this sequence. If empty, the script will use'\n 'the name seq_N+1 where seq_N is the latest'\n 'integer-named sequence in the videos directory.')\ntf.flags.DEFINE_integer('num_views', 2,\n 'Number of webcams.')\ntf.flags.DEFINE_string('tmp_imagedir', '/tmp/tcn/data',\n 'Temporary outdir to write images.')\ntf.flags.DEFINE_string('viddir', '/tmp/tcn/videos',\n 'Base directory to write debug videos.')\ntf.flags.DEFINE_boolean('debug_vids', True,\n 'Whether to generate debug vids with multiple'\n 'concatenated views.')\ntf.flags.DEFINE_string('debug_lhs_view', '0',\n 'Which viewpoint to use for the lhs video.')\ntf.flags.DEFINE_string('debug_rhs_view', '1',\n 'Which viewpoint to use for the rhs video.')\ntf.flags.DEFINE_integer('height', 1080, 'Raw input height.')\ntf.flags.DEFINE_integer('width', 1920, 'Raw input width.')\ntf.flags.DEFINE_string('webcam_ports', None,\n 'Comma-separated list of each webcam usb port.')\nFLAGS = tf.app.flags.FLAGS\n\n\nclass ImageQueue(object):\n \"\"\"An image queue holding each stream's most recent image.\n\n Basically implements a process-safe collections.deque(maxlen=1).\n \"\"\"\n\n def __init__(self):\n self.lock = multiprocessing.Lock()\n self._queue = multiprocessing.Queue(maxsize=1)\n\n def append(self, data):\n with self.lock:\n if self._queue.full():\n # Pop the first element.\n _ = self._queue.get()\n self._queue.put(data)\n\n def get(self):\n with self.lock:\n return self._queue.get()\n\n def empty(self):\n return self._queue.empty()\n\n def close(self):\n return self._queue.close()\n\n\nclass WebcamViewer(object):\n \"\"\"A class which displays a live stream from the webcams.\"\"\"\n\n def __init__(self, display_queues):\n \"\"\"Create a WebcamViewer instance.\"\"\"\n self.height = FLAGS.height\n self.width = FLAGS.width\n self.queues = display_queues\n\n def _get_next_images(self):\n \"\"\"Gets the next image to display.\"\"\"\n # Wait for one image per view.\n not_found = True\n while not_found:\n if True in [q.empty() for q in self.queues]:\n # At least one image queue is empty; wait.\n continue\n else:\n # Retrieve the images.\n latest = [q.get() for q in self.queues]\n combined = np.concatenate(latest, axis=1)\n not_found = False\n return combined\n\n def run(self):\n \"\"\"Displays the Kcam live stream in a window.\n\n This function blocks until the window is closed.\n \"\"\"\n fig, rgb_axis = plt.subplots()\n\n image_rows = self.height\n image_cols = self.width * FLAGS.num_views\n initial_image = np.zeros((image_rows, image_cols, 3))\n rgb_image = rgb_axis.imshow(initial_image, interpolation='nearest')\n\n def update_figure(frame_index):\n \"\"\"Animation function for matplotlib FuncAnimation. Updates the image.\n\n Args:\n frame_index: The frame number.\n Returns:\n An iterable of matplotlib drawables to clear.\n \"\"\"\n _ = frame_index\n images = self._get_next_images()\n images = images[..., [2, 1, 0]]\n rgb_image.set_array(images)\n return rgb_image,\n\n # We must keep a reference to this animation in order for it to work.\n unused_animation = animation.FuncAnimation(\n fig, update_figure, interval=50, blit=True)\n mng = plt.get_current_fig_manager()\n mng.resize(*mng.window.maxsize())\n plt.show()\n\n\ndef reconcile(queues, write_queue):\n \"\"\"Gets a list of concurrent images from each view queue.\n\n This waits for latest images to be available in all view queues,\n then continuously:\n - Creates a list of current images for each view.\n - Writes the list to a queue of image lists to write to disk.\n Args:\n queues: A list of `ImageQueues`, holding the latest image from each webcam.\n write_queue: A multiprocessing.Queue holding lists of concurrent images.\n \"\"\"\n # Loop forever.\n while True:\n # Wait till all queues have an image.\n if True in [q.empty() for q in queues]:\n continue\n else:\n # Retrieve all views' images.\n latest = [q.get() for q in queues]\n # Copy the list of all concurrent images to the write queue.\n write_queue.put(latest)\n\n\ndef persist(write_queue, view_dirs):\n \"\"\"Pulls lists of concurrent images off a write queue, writes them to disk.\n\n Args:\n write_queue: A multiprocessing.Queue holding lists of concurrent images;\n one image per view.\n view_dirs: A list of strings, holding the output image directories for each\n view.\n \"\"\"\n timestep = 0\n while True:\n # Wait till there is work in the queue.\n if write_queue.empty():\n continue\n # Get a list of concurrent images to write to disk.\n view_ims = write_queue.get()\n for view_idx, image in enumerate(view_ims):\n view_base = view_dirs[view_idx]\n # Assign all concurrent view images the same sequence timestep.\n fname = os.path.join(view_base, '%s.png' % str(timestep).zfill(10))\n cv2.imwrite(fname, image)\n # Move to the next timestep.\n timestep += 1\n\n\ndef get_image(camera):\n \"\"\"Captures a single image from the camera and returns it in PIL format.\"\"\"\n data = camera.read()\n _, im = data\n return im\n\n\ndef capture_webcam(camera, display_queue, reconcile_queue):\n \"\"\"Captures images from simultaneous webcams, writes them to queues.\n\n Args:\n camera: A cv2.VideoCapture object representing an open webcam stream.\n display_queue: An ImageQueue.\n reconcile_queue: An ImageQueue.\n \"\"\"\n # Take some ramp images to allow cams to adjust for brightness etc.\n for i in range(60):\n tf.logging.info('Taking ramp image %d.' % i)\n get_image(camera)\n\n cnt = 0\n start = time.time()\n while True:\n # Get images for all cameras.\n im = get_image(camera)\n # Replace the current image in the display and reconcile queues.\n display_queue.append(im)\n reconcile_queue.append(im)\n cnt += 1\n current = time.time()\n if cnt % 100 == 0:\n tf.logging.info('Collected %s of video, %d frames at ~%.2f fps.' % (\n timer(start, current), cnt, cnt/(current-start)))\n\n\ndef timer(start, end):\n \"\"\"Returns a formatted time elapsed.\"\"\"\n hours, rem = divmod(end-start, 3600)\n minutes, seconds = divmod(rem, 60)\n return '{:0>2}:{:0>2}:{:05.2f}'.format(int(hours), int(minutes), seconds)\n\n\ndef display_webcams(display_queues):\n \"\"\"Builds an WebcamViewer to animate incoming images, runs it.\"\"\"\n viewer = WebcamViewer(display_queues)\n viewer.run()\n\n\ndef create_vids(view_dirs, seqname):\n \"\"\"Creates one video per view per sequence.\"\"\"\n vidbase = os.path.join(FLAGS.viddir, FLAGS.dataset, FLAGS.mode)\n if not os.path.exists(vidbase):\n os.makedirs(vidbase)\n vidpaths = []\n for idx, view_dir in enumerate(view_dirs):\n vidname = os.path.join(vidbase, '%s_view%d.mp4' % (seqname, idx))\n encode_vid_cmd = r'mencoder mf://%s/*.png \\\n -mf fps=29:type=png \\\n -ovc lavc -lavcopts vcodec=mpeg4:mbd=2:trell \\\n -oac copy -o %s' % (view_dir, vidname)\n os.system(encode_vid_cmd)\n vidpaths.append(vidname)\n\n debugpath = None\n if FLAGS.debug_vids:\n lhs = vidpaths[FLAGS.debug_lhs_view]\n rhs = vidpaths[FLAGS.debug_rhs_view]\n debug_base = os.path.join('%s_debug' % FLAGS.viddir, FLAGS.dataset,\n FLAGS.mode)\n if not os.path.exists(debug_base):\n os.makedirs(debug_base)\n debugpath = '%s/%s.mp4' % (debug_base, seqname)\n os.system(r\"avconv \\\n -i %s \\\n -i %s \\\n -filter_complex '[0:v]pad=iw*2:ih[int];[int][1:v]overlay=W/2:0[vid]' \\\n -map [vid] \\\n -c:v libx264 \\\n -crf 23 \\\n -preset veryfast \\\n %s\" % (lhs, rhs, debugpath))\n\n return vidpaths, debugpath\n\n\ndef setup_paths():\n \"\"\"Sets up the necessary paths to collect videos.\"\"\"\n assert FLAGS.dataset\n assert FLAGS.mode\n assert FLAGS.num_views\n\n # Setup directory for final images used to create videos for this sequence.\n tmp_imagedir = os.path.join(FLAGS.tmp_imagedir, FLAGS.dataset, FLAGS.mode)\n if not os.path.exists(tmp_imagedir):\n os.makedirs(tmp_imagedir)\n\n # Create a base directory to hold all sequence videos if it doesn't exist.\n vidbase = os.path.join(FLAGS.viddir, FLAGS.dataset, FLAGS.mode)\n if not os.path.exists(vidbase):\n os.makedirs(vidbase)\n\n # Get one directory per concurrent view and a sequence name.\n view_dirs, seqname = get_view_dirs(vidbase, tmp_imagedir)\n\n # Get an output path to each view's video.\n vid_paths = []\n for idx, _ in enumerate(view_dirs):\n vid_path = os.path.join(vidbase, '%s_view%d.mp4' % (seqname, idx))\n vid_paths.append(vid_path)\n\n # Optionally build paths to debug_videos.\n debug_path = None\n if FLAGS.debug_vids:\n debug_base = os.path.join('%s_debug' % FLAGS.viddir, FLAGS.dataset,\n FLAGS.mode)\n if not os.path.exists(debug_base):\n os.makedirs(debug_base)\n debug_path = '%s/%s.mp4' % (debug_base, seqname)\n\n return view_dirs, vid_paths, debug_path\n\n\ndef get_view_dirs(vidbase, tmp_imagedir):\n \"\"\"Creates and returns one view directory per webcam.\"\"\"\n # Create and append a sequence name.\n if FLAGS.seqname:\n seqname = FLAGS.seqname\n else:\n # If there's no video directory, this is the first sequence.\n if not os.listdir(vidbase):\n seqname = '0'\n else:\n # Otherwise, get the latest sequence name and increment it.\n seq_names = [i.split('_')[0] for i in os.listdir(vidbase)]\n latest_seq = sorted(map(int, seq_names), reverse=True)[0]\n seqname = str(latest_seq+1)\n tf.logging.info('No seqname specified, using: %s' % seqname)\n view_dirs = [os.path.join(\n tmp_imagedir, '%s_view%d' % (seqname, v)) for v in range(FLAGS.num_views)]\n for d in view_dirs:\n if not os.path.exists(d):\n os.makedirs(d)\n return view_dirs, seqname\n\n\ndef get_cameras():\n \"\"\"Opens cameras using cv2, ensures they can take images.\"\"\"\n # Try to get free webcam ports.\n if FLAGS.webcam_ports:\n ports = map(int, FLAGS.webcam_ports.split(','))\n else:\n ports = range(FLAGS.num_views)\n cameras = [cv2.VideoCapture(i) for i in ports]\n\n if not all([i.isOpened() for i in cameras]):\n try:\n # Try to find and kill hanging cv2 process_ids.\n output = subprocess.check_output(['lsof -t /dev/video*'], shell=True)\n tf.logging.info('Found hanging cv2 process_ids: \\n')\n tf.logging.info(output)\n tf.logging.info('Killing hanging processes...')\n for process_id in output.split('\\n')[:-1]:\n subprocess.call(['kill %s' % process_id], shell=True)\n time.sleep(3)\n # Recapture webcams.\n cameras = [cv2.VideoCapture(i) for i in ports]\n except subprocess.CalledProcessError:\n raise ValueError(\n 'Cannot connect to cameras. Try running: \\n'\n 'ls -ltrh /dev/video* \\n '\n 'to see which ports your webcams are connected to. Then hand those '\n 'ports as a comma-separated list to --webcam_ports, e.g. '\n '--webcam_ports 0,1')\n\n # Verify each camera is able to capture images.\n ims = map(get_image, cameras)\n assert False not in [i is not None for i in ims]\n return cameras\n\n\ndef launch_images_to_videos(view_dirs, vid_paths, debug_path):\n \"\"\"Launch job in separate process to convert images to videos.\"\"\"\n\n f = 'learning/brain/research/tcn/dataset/images_to_videos.py'\n cmd = ['python %s ' % f]\n cmd += ['--view_dirs %s ' % ','.join(i for i in view_dirs)]\n cmd += ['--vid_paths %s ' % ','.join(i for i in vid_paths)]\n cmd += ['--debug_path %s ' % debug_path]\n cmd += ['--debug_lhs_view %s ' % FLAGS.debug_lhs_view]\n cmd += ['--debug_rhs_view %s ' % FLAGS.debug_rhs_view]\n cmd += [' & ']\n cmd = ''.join(i for i in cmd)\n\n # Call images_to_videos asynchronously.\n fnull = open(os.devnull, 'w')\n subprocess.Popen([cmd], stdout=fnull, stderr=subprocess.STDOUT, shell=True)\n\n for p in vid_paths:\n tf.logging.info('Writing final video to: %s' % p)\n if debug_path:\n tf.logging.info('Writing debug video to: %s' % debug_path)\n\n\ndef main(_):\n # Initialize the camera capture objects.\n cameras = get_cameras()\n # Get one output directory per view.\n view_dirs, vid_paths, debug_path = setup_paths()\n try:\n # Wait for user input.\n try:\n tf.logging.info('About to write to:')\n for v in view_dirs:\n tf.logging.info(v)\n input('Press Enter to continue...')\n except SyntaxError:\n pass\n\n # Create a queue per view for displaying and saving images.\n display_queues = [ImageQueue() for _ in range(FLAGS.num_views)]\n reconcile_queues = [ImageQueue() for _ in range(FLAGS.num_views)]\n\n # Create a queue for collecting all tuples of multi-view images to write to\n # disk.\n write_queue = multiprocessing.Queue()\n\n processes = []\n # Create a process to display collected images in real time.\n processes.append(Process(target=display_webcams, args=(display_queues,)))\n # Create a process to collect the latest simultaneous images from each view.\n processes.append(Process(\n target=reconcile, args=(reconcile_queues, write_queue,)))\n # Create a process to collect the latest simultaneous images from each view.\n processes.append(Process(\n target=persist, args=(write_queue, view_dirs,)))\n\n for (cam, dq, rq) in zip(cameras, display_queues, reconcile_queues):\n processes.append(Process(\n target=capture_webcam, args=(cam, dq, rq,)))\n\n for p in processes:\n p.start()\n for p in processes:\n p.join()\n\n except KeyboardInterrupt:\n # Close the queues.\n for q in display_queues + reconcile_queues:\n q.close()\n # Release the cameras.\n for cam in cameras:\n cam.release()\n\n # Launch images_to_videos script asynchronously.\n launch_images_to_videos(view_dirs, vid_paths, debug_path)\n\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0) # pylint: disable=protected-access\n\n\nif __name__ == '__main__':\n tf.app.run()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Evaluates a conditional TFGAN trained MNIST model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport data_provider\nimport networks\nimport tensorflow as tf\nfrom absl import app\nfrom absl import flags\n\nimport util\n\ntfgan = tf.contrib.gan\n\n\nflags.DEFINE_string('checkpoint_dir', '/tmp/mnist/',\n 'Directory where the model was written to.')\n\nflags.DEFINE_string('eval_dir', '/tmp/mnist/',\n 'Directory where the results are saved to.')\n\nflags.DEFINE_integer('num_images_per_class', 10,\n 'Number of images to generate per class.')\n\nflags.DEFINE_integer('noise_dims', 64,\n 'Dimensions of the generator noise vector')\n\nflags.DEFINE_string('classifier_filename', None,\n 'Location of the pretrained classifier. If `None`, use '\n 'default.')\n\nflags.DEFINE_integer('max_number_of_evaluations', None,\n 'Number of times to run evaluation. If `None`, run '\n 'forever.')\n\nflags.DEFINE_boolean('write_to_disk', True, 'If `True`, run images to disk.')\n\nFLAGS = flags.FLAGS\nNUM_CLASSES = 10\n\n\ndef main(_, run_eval_loop=True):\n with tf.name_scope('inputs'):\n noise, one_hot_labels = _get_generator_inputs(\n FLAGS.num_images_per_class, NUM_CLASSES, FLAGS.noise_dims)\n\n # Generate images.\n with tf.variable_scope('Generator'): # Same scope as in train job.\n images = networks.conditional_generator(\n (noise, one_hot_labels), is_training=False)\n\n # Visualize images.\n reshaped_img = tfgan.eval.image_reshaper(\n images, num_cols=FLAGS.num_images_per_class)\n tf.summary.image('generated_images', reshaped_img, max_outputs=1)\n\n # Calculate evaluation metrics.\n tf.summary.scalar('MNIST_Classifier_score',\n util.mnist_score(images, FLAGS.classifier_filename))\n tf.summary.scalar('MNIST_Cross_entropy',\n util.mnist_cross_entropy(\n images, one_hot_labels, FLAGS.classifier_filename))\n\n # Write images to disk.\n image_write_ops = None\n if FLAGS.write_to_disk:\n image_write_ops = tf.write_file(\n '%s/%s'% (FLAGS.eval_dir, 'conditional_gan.png'),\n tf.image.encode_png(data_provider.float_image_to_uint8(\n reshaped_img[0])))\n\n # For unit testing, use `run_eval_loop=False`.\n if not run_eval_loop: return\n tf.contrib.training.evaluate_repeatedly(\n FLAGS.checkpoint_dir,\n hooks=[tf.contrib.training.SummaryAtEndHook(FLAGS.eval_dir),\n tf.contrib.training.StopAfterNEvalsHook(1)],\n eval_ops=image_write_ops,\n max_number_of_evaluations=FLAGS.max_number_of_evaluations)\n\n\ndef _get_generator_inputs(num_images_per_class, num_classes, noise_dims):\n # Since we want a grid of numbers for the conditional generator, manually\n # construct the desired class labels.\n num_images_generated = num_images_per_class * num_classes\n noise = tf.random_normal([num_images_generated, noise_dims])\n labels = [lbl for lbl in range(num_classes) for _\n in range(num_images_per_class)]\n one_hot_labels = tf.one_hot(tf.constant(labels), num_classes)\n return noise, one_hot_labels\n\n\nif __name__ == '__main__':\n app.run(main)\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for nets.inception_v1.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nfrom nets import inception\n\nslim = tf.contrib.slim\n\n\nclass InceptionV1Test(tf.test.TestCase):\n\n def testBuildClassificationNetwork(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n\n inputs = tf.random_uniform((batch_size, height, width, 3))\n logits, end_points = inception.inception_v1(inputs, num_classes)\n self.assertTrue(logits.op.name.startswith(\n 'InceptionV1/Logits/SpatialSqueeze'))\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n self.assertTrue('Predictions' in end_points)\n self.assertListEqual(end_points['Predictions'].get_shape().as_list(),\n [batch_size, num_classes])\n\n def testBuildPreLogitsNetwork(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = None\n\n inputs = tf.random_uniform((batch_size, height, width, 3))\n net, end_points = inception.inception_v1(inputs, num_classes)\n self.assertTrue(net.op.name.startswith('InceptionV1/Logits/AvgPool'))\n self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1024])\n self.assertFalse('Logits' in end_points)\n self.assertFalse('Predictions' in end_points)\n\n def testBuildBaseNetwork(self):\n batch_size = 5\n height, width = 224, 224\n\n inputs = tf.random_uniform((batch_size, height, width, 3))\n mixed_6c, end_points = inception.inception_v1_base(inputs)\n self.assertTrue(mixed_6c.op.name.startswith('InceptionV1/Mixed_5c'))\n self.assertListEqual(mixed_6c.get_shape().as_list(),\n [batch_size, 7, 7, 1024])\n expected_endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',\n 'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b',\n 'Mixed_3c', 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c',\n 'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2',\n 'Mixed_5b', 'Mixed_5c']\n self.assertItemsEqual(end_points.keys(), expected_endpoints)\n\n def testBuildOnlyUptoFinalEndpoint(self):\n batch_size = 5\n height, width = 224, 224\n endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',\n 'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',\n 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d',\n 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b',\n 'Mixed_5c']\n for index, endpoint in enumerate(endpoints):\n with tf.Graph().as_default():\n inputs = tf.random_uniform((batch_size, height, width, 3))\n out_tensor, end_points = inception.inception_v1_base(\n inputs, final_endpoint=endpoint)\n self.assertTrue(out_tensor.op.name.startswith(\n 'InceptionV1/' + endpoint))\n self.assertItemsEqual(endpoints[:index+1], end_points.keys())\n\n def testBuildAndCheckAllEndPointsUptoMixed5c(self):\n batch_size = 5\n height, width = 224, 224\n\n inputs = tf.random_uniform((batch_size, height, width, 3))\n _, end_points = inception.inception_v1_base(inputs,\n final_endpoint='Mixed_5c')\n endpoints_shapes = {\n 'Conv2d_1a_7x7': [5, 112, 112, 64],\n 'MaxPool_2a_3x3': [5, 56, 56, 64],\n 'Conv2d_2b_1x1': [5, 56, 56, 64],\n 'Conv2d_2c_3x3': [5, 56, 56, 192],\n 'MaxPool_3a_3x3': [5, 28, 28, 192],\n 'Mixed_3b': [5, 28, 28, 256],\n 'Mixed_3c': [5, 28, 28, 480],\n 'MaxPool_4a_3x3': [5, 14, 14, 480],\n 'Mixed_4b': [5, 14, 14, 512],\n 'Mixed_4c': [5, 14, 14, 512],\n 'Mixed_4d': [5, 14, 14, 512],\n 'Mixed_4e': [5, 14, 14, 528],\n 'Mixed_4f': [5, 14, 14, 832],\n 'MaxPool_5a_2x2': [5, 7, 7, 832],\n 'Mixed_5b': [5, 7, 7, 832],\n 'Mixed_5c': [5, 7, 7, 1024]\n }\n\n self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())\n for endpoint_name in endpoints_shapes:\n expected_shape = endpoints_shapes[endpoint_name]\n self.assertTrue(endpoint_name in end_points)\n self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),\n expected_shape)\n\n def testModelHasExpectedNumberOfParameters(self):\n batch_size = 5\n height, width = 224, 224\n inputs = tf.random_uniform((batch_size, height, width, 3))\n with slim.arg_scope(inception.inception_v1_arg_scope()):\n inception.inception_v1_base(inputs)\n total_params, _ = slim.model_analyzer.analyze_vars(\n slim.get_model_variables())\n self.assertAlmostEqual(5607184, total_params)\n\n def testHalfSizeImages(self):\n batch_size = 5\n height, width = 112, 112\n\n inputs = tf.random_uniform((batch_size, height, width, 3))\n mixed_5c, _ = inception.inception_v1_base(inputs)\n self.assertTrue(mixed_5c.op.name.startswith('InceptionV1/Mixed_5c'))\n self.assertListEqual(mixed_5c.get_shape().as_list(),\n [batch_size, 4, 4, 1024])\n\n def testBuildBaseNetworkWithoutRootBlock(self):\n batch_size = 5\n height, width = 28, 28\n channels = 192\n\n inputs = tf.random_uniform((batch_size, height, width, channels))\n _, end_points = inception.inception_v1_base(\n inputs, include_root_block=False)\n endpoints_shapes = {\n 'Mixed_3b': [5, 28, 28, 256],\n 'Mixed_3c': [5, 28, 28, 480],\n 'MaxPool_4a_3x3': [5, 14, 14, 480],\n 'Mixed_4b': [5, 14, 14, 512],\n 'Mixed_4c': [5, 14, 14, 512],\n 'Mixed_4d': [5, 14, 14, 512],\n 'Mixed_4e': [5, 14, 14, 528],\n 'Mixed_4f': [5, 14, 14, 832],\n 'MaxPool_5a_2x2': [5, 7, 7, 832],\n 'Mixed_5b': [5, 7, 7, 832],\n 'Mixed_5c': [5, 7, 7, 1024]\n }\n\n self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())\n for endpoint_name in endpoints_shapes:\n expected_shape = endpoints_shapes[endpoint_name]\n self.assertTrue(endpoint_name in end_points)\n self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),\n expected_shape)\n\n def testUnknownImageShape(self):\n tf.reset_default_graph()\n batch_size = 2\n height, width = 224, 224\n num_classes = 1000\n input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))\n with self.test_session() as sess:\n inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))\n logits, end_points = inception.inception_v1(inputs, num_classes)\n self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n pre_pool = end_points['Mixed_5c']\n feed_dict = {inputs: input_np}\n tf.global_variables_initializer().run()\n pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)\n self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])\n\n def testGlobalPoolUnknownImageShape(self):\n tf.reset_default_graph()\n batch_size = 1\n height, width = 250, 300\n num_classes = 1000\n input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))\n with self.test_session() as sess:\n inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))\n logits, end_points = inception.inception_v1(inputs, num_classes,\n global_pool=True)\n self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n pre_pool = end_points['Mixed_5c']\n feed_dict = {inputs: input_np}\n tf.global_variables_initializer().run()\n pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)\n self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024])\n\n def testUnknowBatchSize(self):\n batch_size = 1\n height, width = 224, 224\n num_classes = 1000\n\n inputs = tf.placeholder(tf.float32, (None, height, width, 3))\n logits, _ = inception.inception_v1(inputs, num_classes)\n self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [None, num_classes])\n images = tf.random_uniform((batch_size, height, width, 3))\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n output = sess.run(logits, {inputs: images.eval()})\n self.assertEquals(output.shape, (batch_size, num_classes))\n\n def testEvaluation(self):\n batch_size = 2\n height, width = 224, 224\n num_classes = 1000\n\n eval_inputs = tf.random_uniform((batch_size, height, width, 3))\n logits, _ = inception.inception_v1(eval_inputs, num_classes,\n is_training=False)\n predictions = tf.argmax(logits, 1)\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n output = sess.run(predictions)\n self.assertEquals(output.shape, (batch_size,))\n\n def testTrainEvalWithReuse(self):\n train_batch_size = 5\n eval_batch_size = 2\n height, width = 224, 224\n num_classes = 1000\n\n train_inputs = tf.random_uniform((train_batch_size, height, width, 3))\n inception.inception_v1(train_inputs, num_classes)\n eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))\n logits, _ = inception.inception_v1(eval_inputs, num_classes, reuse=True)\n predictions = tf.argmax(logits, 1)\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n output = sess.run(predictions)\n self.assertEquals(output.shape, (eval_batch_size,))\n\n def testLogitsNotSqueezed(self):\n num_classes = 25\n images = tf.random_uniform([1, 224, 224, 3])\n logits, _ = inception.inception_v1(images,\n num_classes=num_classes,\n spatial_squeeze=False)\n\n with self.test_session() as sess:\n tf.global_variables_initializer().run()\n logits_out = sess.run(logits)\n self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])\n\n def testNoBatchNormScaleByDefault(self):\n height, width = 224, 224\n num_classes = 1000\n inputs = tf.placeholder(tf.float32, (1, height, width, 3))\n with slim.arg_scope(inception.inception_v1_arg_scope()):\n inception.inception_v1(inputs, num_classes, is_training=False)\n\n self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), [])\n\n def testBatchNormScale(self):\n height, width = 224, 224\n num_classes = 1000\n inputs = tf.placeholder(tf.float32, (1, height, width, 3))\n with slim.arg_scope(\n inception.inception_v1_arg_scope(batch_norm_scale=True)):\n inception.inception_v1(inputs, num_classes, is_training=False)\n\n gamma_names = set(\n v.op.name for v in tf.global_variables('.*/BatchNorm/gamma:0$'))\n self.assertGreater(len(gamma_names), 0)\n for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'):\n self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Context for Universal Value Function agents.\n\nA context specifies a list of contextual variables, each with\n own sampling and reward computation methods.\n\nExamples of contextual variables include\n goal states, reward combination vectors, etc.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gin.tf\nimport numpy as np\nimport tensorflow as tf\nfrom tf_agents import specs\nfrom utils import utils as uvf_utils\n\n\[email protected]\nclass Context(object):\n \"\"\"Base context.\"\"\"\n VAR_NAME = 'action'\n\n def __init__(self,\n tf_env,\n context_ranges=None,\n context_shapes=None,\n state_indices=None,\n variable_indices=None,\n gamma_index=None,\n settable_context=False,\n timers=None,\n samplers=None,\n reward_weights=None,\n reward_fn=None,\n random_sampler_mode='random',\n normalizers=None,\n context_transition_fn=None,\n context_multi_transition_fn=None,\n meta_action_every_n=None):\n self._tf_env = tf_env\n self.variable_indices = variable_indices\n self.gamma_index = gamma_index\n self._settable_context = settable_context\n self.timers = timers\n self._context_transition_fn = context_transition_fn\n self._context_multi_transition_fn = context_multi_transition_fn\n self._random_sampler_mode = random_sampler_mode\n\n # assign specs\n self._obs_spec = self._tf_env.observation_spec()\n self._context_shapes = tuple([\n shape if shape is not None else self._obs_spec.shape\n for shape in context_shapes\n ])\n self.context_specs = tuple([\n specs.TensorSpec(dtype=self._obs_spec.dtype, shape=shape)\n for shape in self._context_shapes\n ])\n if context_ranges is not None:\n self.context_ranges = context_ranges\n else:\n self.context_ranges = [None] * len(self._context_shapes)\n\n self.context_as_action_specs = tuple([\n specs.BoundedTensorSpec(\n shape=shape,\n dtype=(tf.float32 if self._obs_spec.dtype in\n [tf.float32, tf.float64] else self._obs_spec.dtype),\n minimum=context_range[0],\n maximum=context_range[-1])\n for shape, context_range in zip(self._context_shapes, self.context_ranges)\n ])\n\n if state_indices is not None:\n self.state_indices = state_indices\n else:\n self.state_indices = [None] * len(self._context_shapes)\n if self.variable_indices is not None and self.n != len(\n self.variable_indices):\n raise ValueError(\n 'variable_indices (%s) must have the same length as contexts (%s).' %\n (self.variable_indices, self.context_specs))\n assert self.n == len(self.context_ranges)\n assert self.n == len(self.state_indices)\n\n # assign reward/sampler fns\n self._sampler_fns = dict()\n self._samplers = dict()\n self._reward_fns = dict()\n\n # assign reward fns\n self._add_custom_reward_fns()\n reward_weights = reward_weights or None\n self._reward_fn = self._make_reward_fn(reward_fn, reward_weights)\n\n # assign samplers\n self._add_custom_sampler_fns()\n for mode, sampler_fns in samplers.items():\n self._make_sampler_fn(sampler_fns, mode)\n\n # create normalizers\n if normalizers is None:\n self._normalizers = [None] * len(self.context_specs)\n else:\n self._normalizers = [\n normalizer(tf.zeros(shape=spec.shape, dtype=spec.dtype))\n if normalizer is not None else None\n for normalizer, spec in zip(normalizers, self.context_specs)\n ]\n assert self.n == len(self._normalizers)\n\n self.meta_action_every_n = meta_action_every_n\n\n # create vars\n self.context_vars = {}\n self.timer_vars = {}\n self.create_vars(self.VAR_NAME)\n self.t = tf.Variable(\n tf.zeros(shape=(), dtype=tf.int32), name='num_timer_steps')\n\n def _add_custom_reward_fns(self):\n pass\n\n def _add_custom_sampler_fns(self):\n pass\n\n def sample_random_contexts(self, batch_size):\n \"\"\"Sample random batch contexts.\"\"\"\n assert self._random_sampler_mode is not None\n return self.sample_contexts(self._random_sampler_mode, batch_size)[0]\n\n def sample_contexts(self, mode, batch_size, state=None, next_state=None,\n **kwargs):\n \"\"\"Sample a batch of contexts.\n\n Args:\n mode: A string representing the mode [`train`, `explore`, `eval`].\n batch_size: Batch size.\n Returns:\n Two lists of [batch_size, num_context_dims] contexts.\n \"\"\"\n contexts, next_contexts = self._sampler_fns[mode](\n batch_size, state=state, next_state=next_state,\n **kwargs)\n self._validate_contexts(contexts)\n self._validate_contexts(next_contexts)\n return contexts, next_contexts\n\n def compute_rewards(self, mode, states, actions, rewards, next_states,\n contexts):\n \"\"\"Compute context-based rewards.\n\n Args:\n mode: A string representing the mode ['uvf', 'task'].\n states: A [batch_size, num_state_dims] tensor.\n actions: A [batch_size, num_action_dims] tensor.\n rewards: A [batch_size] tensor representing unmodified rewards.\n next_states: A [batch_size, num_state_dims] tensor.\n contexts: A list of [batch_size, num_context_dims] tensors.\n Returns:\n A [batch_size] tensor representing rewards.\n \"\"\"\n return self._reward_fn(states, actions, rewards, next_states,\n contexts)\n\n def _make_reward_fn(self, reward_fns_list, reward_weights):\n \"\"\"Returns a fn that computes rewards.\n\n Args:\n reward_fns_list: A fn or a list of reward fns.\n mode: A string representing the operating mode.\n reward_weights: A list of reward weights.\n \"\"\"\n if not isinstance(reward_fns_list, (list, tuple)):\n reward_fns_list = [reward_fns_list]\n if reward_weights is None:\n reward_weights = [1.0] * len(reward_fns_list)\n assert len(reward_fns_list) == len(reward_weights)\n\n reward_fns_list = [\n self._custom_reward_fns[fn] if isinstance(fn, (str,)) else fn\n for fn in reward_fns_list\n ]\n\n def reward_fn(*args, **kwargs):\n \"\"\"Returns rewards, discounts.\"\"\"\n reward_tuples = [\n reward_fn(*args, **kwargs) for reward_fn in reward_fns_list\n ]\n rewards_list = [reward_tuple[0] for reward_tuple in reward_tuples]\n discounts_list = [reward_tuple[1] for reward_tuple in reward_tuples]\n ndims = max([r.shape.ndims for r in rewards_list])\n if ndims > 1: # expand reward shapes to allow broadcasting\n for i in range(len(rewards_list)):\n for _ in range(rewards_list[i].shape.ndims - ndims):\n rewards_list[i] = tf.expand_dims(rewards_list[i], axis=-1)\n for _ in range(discounts_list[i].shape.ndims - ndims):\n discounts_list[i] = tf.expand_dims(discounts_list[i], axis=-1)\n rewards = tf.add_n(\n [r * tf.to_float(w) for r, w in zip(rewards_list, reward_weights)])\n discounts = discounts_list[0]\n for d in discounts_list[1:]:\n discounts *= d\n\n return rewards, discounts\n\n return reward_fn\n\n def _make_sampler_fn(self, sampler_cls_list, mode):\n \"\"\"Returns a fn that samples a list of context vars.\n\n Args:\n sampler_cls_list: A list of sampler classes.\n mode: A string representing the operating mode.\n \"\"\"\n if not isinstance(sampler_cls_list, (list, tuple)):\n sampler_cls_list = [sampler_cls_list]\n\n self._samplers[mode] = []\n sampler_fns = []\n for spec, sampler in zip(self.context_specs, sampler_cls_list):\n if isinstance(sampler, (str,)):\n sampler_fn = self._custom_sampler_fns[sampler]\n else:\n sampler_fn = sampler(context_spec=spec)\n self._samplers[mode].append(sampler_fn)\n sampler_fns.append(sampler_fn)\n\n def batch_sampler_fn(batch_size, state=None, next_state=None, **kwargs):\n \"\"\"Sampler fn.\"\"\"\n contexts_tuples = [\n sampler(batch_size, state=state, next_state=next_state, **kwargs)\n for sampler in sampler_fns]\n contexts = [c[0] for c in contexts_tuples]\n next_contexts = [c[1] for c in contexts_tuples]\n contexts = [\n normalizer.update_apply(c) if normalizer is not None else c\n for normalizer, c in zip(self._normalizers, contexts)\n ]\n next_contexts = [\n normalizer.apply(c) if normalizer is not None else c\n for normalizer, c in zip(self._normalizers, next_contexts)\n ]\n return contexts, next_contexts\n\n self._sampler_fns[mode] = batch_sampler_fn\n\n def set_env_context_op(self, context, disable_unnormalizer=False):\n \"\"\"Returns a TensorFlow op that sets the environment context.\n\n Args:\n context: A list of context Tensor variables.\n disable_unnormalizer: Disable unnormalization.\n Returns:\n A TensorFlow op that sets the environment context.\n \"\"\"\n ret_val = np.array(1.0, dtype=np.float32)\n if not self._settable_context:\n return tf.identity(ret_val)\n\n if not disable_unnormalizer:\n context = [\n normalizer.unapply(tf.expand_dims(c, 0))[0]\n if normalizer is not None else c\n for normalizer, c in zip(self._normalizers, context)\n ]\n\n def set_context_func(*env_context_values):\n tf.logging.info('[set_env_context_op] Setting gym environment context.')\n # pylint: disable=protected-access\n self.gym_env.set_context(*env_context_values)\n return ret_val\n # pylint: enable=protected-access\n\n with tf.name_scope('set_env_context'):\n set_op = tf.py_func(set_context_func, context, tf.float32,\n name='set_env_context_py_func')\n set_op.set_shape([])\n return set_op\n\n def set_replay(self, replay):\n \"\"\"Set replay buffer for samplers.\n\n Args:\n replay: A replay buffer.\n \"\"\"\n for _, samplers in self._samplers.items():\n for sampler in samplers:\n sampler.set_replay(replay)\n\n def get_clip_fns(self):\n \"\"\"Returns a list of clip fns for contexts.\n\n Returns:\n A list of fns that clip context tensors.\n \"\"\"\n clip_fns = []\n for context_range in self.context_ranges:\n def clip_fn(var_, range_=context_range):\n \"\"\"Clip a tensor.\"\"\"\n if range_ is None:\n clipped_var = tf.identity(var_)\n elif isinstance(range_[0], (int, long, float, list, np.ndarray)):\n clipped_var = tf.clip_by_value(\n var_,\n range_[0],\n range_[1],)\n else: raise NotImplementedError(range_)\n return clipped_var\n clip_fns.append(clip_fn)\n return clip_fns\n\n def _validate_contexts(self, contexts):\n \"\"\"Validate if contexts have right specs.\n\n Args:\n contexts: A list of [batch_size, num_context_dim] tensors.\n Raises:\n ValueError: If shape or dtype mismatches that of spec.\n \"\"\"\n for i, (context, spec) in enumerate(zip(contexts, self.context_specs)):\n if context[0].shape != spec.shape:\n raise ValueError('contexts[%d] has invalid shape %s wrt spec shape %s' %\n (i, context[0].shape, spec.shape))\n if context.dtype != spec.dtype:\n raise ValueError('contexts[%d] has invalid dtype %s wrt spec dtype %s' %\n (i, context.dtype, spec.dtype))\n\n def context_multi_transition_fn(self, contexts, **kwargs):\n \"\"\"Returns multiple future contexts starting from a batch.\"\"\"\n assert self._context_multi_transition_fn\n return self._context_multi_transition_fn(contexts, None, None, **kwargs)\n\n def step(self, mode, agent=None, action_fn=None, **kwargs):\n \"\"\"Returns [next_contexts..., next_timer] list of ops.\n\n Args:\n mode: a string representing the mode=[train, explore, eval].\n **kwargs: kwargs for context_transition_fn.\n Returns:\n a list of ops that set the context.\n \"\"\"\n if agent is None:\n ops = []\n if self._context_transition_fn is not None:\n def sampler_fn():\n samples = self.sample_contexts(mode, 1)[0]\n return [s[0] for s in samples]\n values = self._context_transition_fn(self.vars, self.t, sampler_fn, **kwargs)\n ops += [tf.assign(var, value) for var, value in zip(self.vars, values)]\n ops.append(tf.assign_add(self.t, 1)) # increment timer\n return ops\n else:\n ops = agent.tf_context.step(mode, **kwargs)\n state = kwargs['state']\n next_state = kwargs['next_state']\n state_repr = kwargs['state_repr']\n next_state_repr = kwargs['next_state_repr']\n with tf.control_dependencies(ops): # Step high level context before computing low level one.\n # Get the context transition function output.\n values = self._context_transition_fn(self.vars, self.t, None,\n state=state_repr,\n next_state=next_state_repr)\n # Select a new goal every C steps, otherwise use context transition.\n low_level_context = [\n tf.cond(tf.equal(self.t % self.meta_action_every_n, 0),\n lambda: tf.cast(action_fn(next_state, context=None), tf.float32),\n lambda: values)]\n ops = [tf.assign(var, value)\n for var, value in zip(self.vars, low_level_context)]\n with tf.control_dependencies(ops):\n return [tf.assign_add(self.t, 1)] # increment timer\n return ops\n\n def reset(self, mode, agent=None, action_fn=None, state=None):\n \"\"\"Returns ops that reset the context.\n\n Args:\n mode: a string representing the mode=[train, explore, eval].\n Returns:\n a list of ops that reset the context.\n \"\"\"\n if agent is None:\n values = self.sample_contexts(mode=mode, batch_size=1)[0]\n if values is None:\n return []\n values = [value[0] for value in values]\n values[0] = uvf_utils.tf_print(\n values[0],\n values,\n message='context:reset, mode=%s' % mode,\n first_n=10,\n name='context:reset:%s' % mode)\n all_ops = []\n for _, context_vars in sorted(self.context_vars.items()):\n ops = [tf.assign(var, value) for var, value in zip(context_vars, values)]\n all_ops += ops\n all_ops.append(self.set_env_context_op(values))\n all_ops.append(tf.assign(self.t, 0)) # reset timer\n return all_ops\n else:\n ops = agent.tf_context.reset(mode)\n # NOTE: The code is currently written in such a way that the higher level\n # policy does not provide a low-level context until the second\n # observation. Insead, we just zero-out low-level contexts.\n for key, context_vars in sorted(self.context_vars.items()):\n ops += [tf.assign(var, tf.zeros_like(var)) for var, meta_var in\n zip(context_vars, agent.tf_context.context_vars[key])]\n\n ops.append(tf.assign(self.t, 0)) # reset timer\n return ops\n\n def create_vars(self, name, agent=None):\n \"\"\"Create tf variables for contexts.\n\n Args:\n name: Name of the variables.\n Returns:\n A list of [num_context_dims] tensors.\n \"\"\"\n if agent is not None:\n meta_vars = agent.create_vars(name)\n else:\n meta_vars = {}\n assert name not in self.context_vars, ('Conflict! %s is already '\n 'initialized.') % name\n self.context_vars[name] = tuple([\n tf.Variable(\n tf.zeros(shape=spec.shape, dtype=spec.dtype),\n name='%s_context_%d' % (name, i))\n for i, spec in enumerate(self.context_specs)\n ])\n return self.context_vars[name], meta_vars\n\n @property\n def n(self):\n return len(self.context_specs)\n\n @property\n def vars(self):\n return self.context_vars[self.VAR_NAME]\n\n # pylint: disable=protected-access\n @property\n def gym_env(self):\n return self._tf_env.pyenv._gym_env\n\n @property\n def tf_env(self):\n return self._tf_env\n # pylint: enable=protected-access\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Compute an expanded vocabulary of embeddings using a word2vec model.\n\nThis script loads the word embeddings from a trained skip-thoughts model and\nfrom a trained word2vec model (typically with a larger vocabulary). It trains a\nlinear regression model without regularization to learn a linear mapping from\nthe word2vec embedding space to the skip-thoughts embedding space. The model is\nthen applied to all words in the word2vec vocabulary, yielding vectors in the\nskip-thoughts word embedding space for the union of the two vocabularies.\n\nThe linear regression task is to learn a parameter matrix W to minimize\n || X - Y * W ||^2,\nwhere X is a matrix of skip-thoughts embeddings of shape [num_words, dim1],\nY is a matrix of word2vec embeddings of shape [num_words, dim2], and W is a\nmatrix of shape [dim2, dim1].\n\nThis is based on the \"Translation Matrix\" method from the paper:\n\n \"Exploiting Similarities among Languages for Machine Translation\"\n Tomas Mikolov, Quoc V. Le, Ilya Sutskever\n https://arxiv.org/abs/1309.4168\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport gensim.models\nimport numpy as np\nimport os.path\nimport sklearn.linear_model\nimport tensorflow as tf\n\nFLAGS = tf.flags.FLAGS\n\ntf.flags.DEFINE_string(\"skip_thoughts_model\", None,\n \"Checkpoint file or directory containing a checkpoint \"\n \"file.\")\n\ntf.flags.DEFINE_string(\"skip_thoughts_vocab\", None,\n \"Path to vocabulary file containing a list of newline-\"\n \"separated words where the word id is the \"\n \"corresponding 0-based index in the file.\")\n\ntf.flags.DEFINE_string(\"word2vec_model\", None,\n \"File containing a word2vec model in binary format.\")\n\ntf.flags.DEFINE_string(\"output_dir\", None, \"Output directory.\")\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\n\ndef _load_skip_thoughts_embeddings(checkpoint_path):\n \"\"\"Loads the embedding matrix from a skip-thoughts model checkpoint.\n\n Args:\n checkpoint_path: Model checkpoint file or directory containing a checkpoint\n file.\n\n Returns:\n word_embedding: A numpy array of shape [vocab_size, embedding_dim].\n\n Raises:\n ValueError: If no checkpoint file matches checkpoint_path.\n \"\"\"\n if tf.gfile.IsDirectory(checkpoint_path):\n checkpoint_file = tf.train.latest_checkpoint(checkpoint_path)\n if not checkpoint_file:\n raise ValueError(\"No checkpoint file found in %s\" % checkpoint_path)\n else:\n checkpoint_file = checkpoint_path\n\n tf.logging.info(\"Loading skip-thoughts embedding matrix from %s\",\n checkpoint_file)\n reader = tf.train.NewCheckpointReader(checkpoint_file)\n word_embedding = reader.get_tensor(\"word_embedding\")\n tf.logging.info(\"Loaded skip-thoughts embedding matrix of shape %s\",\n word_embedding.shape)\n\n return word_embedding\n\n\ndef _load_vocabulary(filename):\n \"\"\"Loads a vocabulary file.\n\n Args:\n filename: Path to text file containing newline-separated words.\n\n Returns:\n vocab: A dictionary mapping word to word id.\n \"\"\"\n tf.logging.info(\"Reading vocabulary from %s\", filename)\n vocab = collections.OrderedDict()\n with tf.gfile.GFile(filename, mode=\"r\") as f:\n for i, line in enumerate(f):\n word = line.decode(\"utf-8\").strip()\n assert word not in vocab, \"Attempting to add word twice: %s\" % word\n vocab[word] = i\n tf.logging.info(\"Read vocabulary of size %d\", len(vocab))\n return vocab\n\n\ndef _expand_vocabulary(skip_thoughts_emb, skip_thoughts_vocab, word2vec):\n \"\"\"Runs vocabulary expansion on a skip-thoughts model using a word2vec model.\n\n Args:\n skip_thoughts_emb: A numpy array of shape [skip_thoughts_vocab_size,\n skip_thoughts_embedding_dim].\n skip_thoughts_vocab: A dictionary of word to id.\n word2vec: An instance of gensim.models.Word2Vec.\n\n Returns:\n combined_emb: A dictionary mapping words to embedding vectors.\n \"\"\"\n # Find words shared between the two vocabularies.\n tf.logging.info(\"Finding shared words\")\n shared_words = [w for w in word2vec.vocab if w in skip_thoughts_vocab]\n\n # Select embedding vectors for shared words.\n tf.logging.info(\"Selecting embeddings for %d shared words\", len(shared_words))\n shared_st_emb = skip_thoughts_emb[[\n skip_thoughts_vocab[w] for w in shared_words\n ]]\n shared_w2v_emb = word2vec[shared_words]\n\n # Train a linear regression model on the shared embedding vectors.\n tf.logging.info(\"Training linear regression model\")\n model = sklearn.linear_model.LinearRegression()\n model.fit(shared_w2v_emb, shared_st_emb)\n\n # Create the expanded vocabulary.\n tf.logging.info(\"Creating embeddings for expanded vocabuary\")\n combined_emb = collections.OrderedDict()\n for w in word2vec.vocab:\n # Ignore words with underscores (spaces).\n if \"_\" not in w:\n w_emb = model.predict(word2vec[w].reshape(1, -1))\n combined_emb[w] = w_emb.reshape(-1)\n\n for w in skip_thoughts_vocab:\n combined_emb[w] = skip_thoughts_emb[skip_thoughts_vocab[w]]\n\n tf.logging.info(\"Created expanded vocabulary of %d words\", len(combined_emb))\n\n return combined_emb\n\n\ndef main(unused_argv):\n if not FLAGS.skip_thoughts_model:\n raise ValueError(\"--skip_thoughts_model is required.\")\n if not FLAGS.skip_thoughts_vocab:\n raise ValueError(\"--skip_thoughts_vocab is required.\")\n if not FLAGS.word2vec_model:\n raise ValueError(\"--word2vec_model is required.\")\n if not FLAGS.output_dir:\n raise ValueError(\"--output_dir is required.\")\n\n if not tf.gfile.IsDirectory(FLAGS.output_dir):\n tf.gfile.MakeDirs(FLAGS.output_dir)\n\n # Load the skip-thoughts embeddings and vocabulary.\n skip_thoughts_emb = _load_skip_thoughts_embeddings(FLAGS.skip_thoughts_model)\n skip_thoughts_vocab = _load_vocabulary(FLAGS.skip_thoughts_vocab)\n\n # Load the Word2Vec model.\n word2vec = gensim.models.Word2Vec.load_word2vec_format(\n FLAGS.word2vec_model, binary=True)\n\n # Run vocabulary expansion.\n embedding_map = _expand_vocabulary(skip_thoughts_emb, skip_thoughts_vocab,\n word2vec)\n\n # Save the output.\n vocab = embedding_map.keys()\n vocab_file = os.path.join(FLAGS.output_dir, \"vocab.txt\")\n with tf.gfile.GFile(vocab_file, \"w\") as f:\n f.write(\"\\n\".join(vocab))\n tf.logging.info(\"Wrote vocabulary file to %s\", vocab_file)\n\n embeddings = np.array(embedding_map.values())\n embeddings_file = os.path.join(FLAGS.output_dir, \"embeddings.npy\")\n np.save(embeddings_file, embeddings)\n tf.logging.info(\"Wrote embeddings file to %s\", embeddings_file)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n",
"# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Converts PASCAL VOC 2012 data to TFRecord file format with Example protos.\n\nPASCAL VOC 2012 dataset is expected to have the following directory structure:\n\n + pascal_voc_seg\n - build_data.py\n - build_voc2012_data.py (current working directory).\n + VOCdevkit\n + VOC2012\n + JPEGImages\n + SegmentationClass\n + ImageSets\n + Segmentation\n + tfrecord\n\nImage folder:\n ./VOCdevkit/VOC2012/JPEGImages\n\nSemantic segmentation annotations:\n ./VOCdevkit/VOC2012/SegmentationClass\n\nlist folder:\n ./VOCdevkit/VOC2012/ImageSets/Segmentation\n\nThis script converts data into sharded data files and save at tfrecord folder.\n\nThe Example proto contains the following fields:\n\n image/encoded: encoded image content.\n image/filename: image filename.\n image/format: image file format.\n image/height: image height.\n image/width: image width.\n image/channels: image channels.\n image/segmentation/class/encoded: encoded semantic segmentation content.\n image/segmentation/class/format: semantic segmentation file format.\n\"\"\"\nimport math\nimport sys\n\nimport build_data\nimport os.path\nimport tensorflow as tf\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string('image_folder',\n './VOCdevkit/VOC2012/JPEGImages',\n 'Folder containing images.')\n\ntf.app.flags.DEFINE_string(\n 'semantic_segmentation_folder',\n './VOCdevkit/VOC2012/SegmentationClassRaw',\n 'Folder containing semantic segmentation annotations.')\n\ntf.app.flags.DEFINE_string(\n 'list_folder',\n './VOCdevkit/VOC2012/ImageSets/Segmentation',\n 'Folder containing lists for training and validation')\n\ntf.app.flags.DEFINE_string(\n 'output_dir',\n './tfrecord',\n 'Path to save converted SSTable of TensorFlow examples.')\n\n\n_NUM_SHARDS = 4\n\n\ndef _convert_dataset(dataset_split):\n \"\"\"Converts the specified dataset split to TFRecord format.\n\n Args:\n dataset_split: The dataset split (e.g., train, test).\n\n Raises:\n RuntimeError: If loaded image and label have different shape.\n \"\"\"\n dataset = os.path.basename(dataset_split)[:-4]\n sys.stdout.write('Processing ' + dataset)\n filenames = [x.strip('\\n') for x in open(dataset_split, 'r')]\n num_images = len(filenames)\n num_per_shard = int(math.ceil(num_images / float(_NUM_SHARDS)))\n\n image_reader = build_data.ImageReader('jpeg', channels=3)\n label_reader = build_data.ImageReader('png', channels=1)\n\n for shard_id in range(_NUM_SHARDS):\n output_filename = os.path.join(\n FLAGS.output_dir,\n '%s-%05d-of-%05d.tfrecord' % (dataset, shard_id, _NUM_SHARDS))\n with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:\n start_idx = shard_id * num_per_shard\n end_idx = min((shard_id + 1) * num_per_shard, num_images)\n for i in range(start_idx, end_idx):\n sys.stdout.write('\\r>> Converting image %d/%d shard %d' % (\n i + 1, len(filenames), shard_id))\n sys.stdout.flush()\n # Read the image.\n image_filename = os.path.join(\n FLAGS.image_folder, filenames[i] + '.' + FLAGS.image_format)\n image_data = tf.gfile.FastGFile(image_filename, 'rb').read()\n height, width = image_reader.read_image_dims(image_data)\n # Read the semantic segmentation annotation.\n seg_filename = os.path.join(\n FLAGS.semantic_segmentation_folder,\n filenames[i] + '.' + FLAGS.label_format)\n seg_data = tf.gfile.FastGFile(seg_filename, 'rb').read()\n seg_height, seg_width = label_reader.read_image_dims(seg_data)\n if height != seg_height or width != seg_width:\n raise RuntimeError('Shape mismatched between image and label.')\n # Convert to tf example.\n example = build_data.image_seg_to_tfexample(\n image_data, filenames[i], height, width, seg_data)\n tfrecord_writer.write(example.SerializeToString())\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n\ndef main(unused_argv):\n dataset_splits = tf.gfile.Glob(os.path.join(FLAGS.list_folder, '*.txt'))\n for dataset_split in dataset_splits:\n _convert_dataset(dataset_split)\n\n\nif __name__ == '__main__':\n tf.app.run()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test the keras ResNet model with ImageNet data.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tempfile import mkdtemp\n\nimport tensorflow as tf\nfrom official.resnet import imagenet_main\nfrom official.resnet.keras import keras_imagenet_main\nfrom official.utils.misc import keras_utils\nfrom official.utils.testing import integration\n# pylint: disable=ungrouped-imports\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.platform import googletest\n\n\nclass KerasImagenetTest(googletest.TestCase):\n \"\"\"Unit tests for Keras ResNet with ImageNet.\"\"\"\n\n _extra_flags = [\n \"-batch_size\", \"4\",\n \"-train_steps\", \"1\",\n \"-use_synthetic_data\", \"true\"\n ]\n _tempdir = None\n\n def get_temp_dir(self):\n if not self._tempdir:\n self._tempdir = mkdtemp(dir=googletest.GetTempDir())\n return self._tempdir\n\n @classmethod\n def setUpClass(cls): # pylint: disable=invalid-name\n super(KerasImagenetTest, cls).setUpClass()\n keras_imagenet_main.define_imagenet_keras_flags()\n\n def setUp(self):\n super(KerasImagenetTest, self).setUp()\n imagenet_main.NUM_IMAGES[\"validation\"] = 4\n\n def tearDown(self):\n super(KerasImagenetTest, self).tearDown()\n tf.io.gfile.rmtree(self.get_temp_dir())\n\n def test_end_to_end_no_dist_strat(self):\n \"\"\"Test Keras model with 1 GPU, no distribution strategy.\"\"\"\n config = keras_utils.get_config_proto_v1()\n tf.compat.v1.enable_eager_execution(config=config)\n\n extra_flags = [\n \"-distribution_strategy\", \"off\",\n \"-model_dir\", \"keras_imagenet_no_dist_strat\",\n \"-data_format\", \"channels_last\",\n ]\n extra_flags = extra_flags + self._extra_flags\n\n integration.run_synthetic(\n main=keras_imagenet_main.run,\n tmp_root=self.get_temp_dir(),\n extra_flags=extra_flags\n )\n\n def test_end_to_end_graph_no_dist_strat(self):\n \"\"\"Test Keras model in legacy graph mode with 1 GPU, no dist strat.\"\"\"\n extra_flags = [\n \"-enable_eager\", \"false\",\n \"-distribution_strategy\", \"off\",\n \"-model_dir\", \"keras_imagenet_graph_no_dist_strat\",\n \"-data_format\", \"channels_last\",\n ]\n extra_flags = extra_flags + self._extra_flags\n\n integration.run_synthetic(\n main=keras_imagenet_main.run,\n tmp_root=self.get_temp_dir(),\n extra_flags=extra_flags\n )\n\n def test_end_to_end_1_gpu(self):\n \"\"\"Test Keras model with 1 GPU.\"\"\"\n config = keras_utils.get_config_proto_v1()\n tf.compat.v1.enable_eager_execution(config=config)\n\n if context.num_gpus() < 1:\n self.skipTest(\n \"{} GPUs are not available for this test. {} GPUs are available\".\n format(1, context.num_gpus()))\n\n extra_flags = [\n \"-num_gpus\", \"1\",\n \"-distribution_strategy\", \"default\",\n \"-model_dir\", \"keras_imagenet_1_gpu\",\n \"-data_format\", \"channels_last\",\n ]\n extra_flags = extra_flags + self._extra_flags\n\n integration.run_synthetic(\n main=keras_imagenet_main.run,\n tmp_root=self.get_temp_dir(),\n extra_flags=extra_flags\n )\n\n def test_end_to_end_graph_1_gpu(self):\n \"\"\"Test Keras model in legacy graph mode with 1 GPU.\"\"\"\n if context.num_gpus() < 1:\n self.skipTest(\n \"{} GPUs are not available for this test. {} GPUs are available\".\n format(1, context.num_gpus()))\n\n extra_flags = [\n \"-num_gpus\", \"1\",\n \"-enable_eager\", \"false\",\n \"-distribution_strategy\", \"default\",\n \"-model_dir\", \"keras_imagenet_graph_1_gpu\",\n \"-data_format\", \"channels_last\",\n ]\n extra_flags = extra_flags + self._extra_flags\n\n integration.run_synthetic(\n main=keras_imagenet_main.run,\n tmp_root=self.get_temp_dir(),\n extra_flags=extra_flags\n )\n\n def test_end_to_end_2_gpu(self):\n \"\"\"Test Keras model with 2 GPUs.\"\"\"\n config = keras_utils.get_config_proto_v1()\n tf.compat.v1.enable_eager_execution(config=config)\n\n if context.num_gpus() < 2:\n self.skipTest(\n \"{} GPUs are not available for this test. {} GPUs are available\".\n format(2, context.num_gpus()))\n\n extra_flags = [\n \"-num_gpus\", \"2\",\n \"-distribution_strategy\", \"default\",\n \"-model_dir\", \"keras_imagenet_2_gpu\",\n ]\n extra_flags = extra_flags + self._extra_flags\n\n integration.run_synthetic(\n main=keras_imagenet_main.run,\n tmp_root=self.get_temp_dir(),\n extra_flags=extra_flags\n )\n\n def test_end_to_end_xla_2_gpu(self):\n \"\"\"Test Keras model with XLA and 2 GPUs.\"\"\"\n config = keras_utils.get_config_proto_v1()\n tf.compat.v1.enable_eager_execution(config=config)\n\n if context.num_gpus() < 2:\n self.skipTest(\n \"{} GPUs are not available for this test. {} GPUs are available\".\n format(2, context.num_gpus()))\n\n extra_flags = [\n \"-num_gpus\", \"2\",\n \"-enable_xla\", \"true\",\n \"-distribution_strategy\", \"default\",\n \"-model_dir\", \"keras_imagenet_xla_2_gpu\",\n ]\n extra_flags = extra_flags + self._extra_flags\n\n integration.run_synthetic(\n main=keras_imagenet_main.run,\n tmp_root=self.get_temp_dir(),\n extra_flags=extra_flags\n )\n\n def test_end_to_end_2_gpu_fp16(self):\n \"\"\"Test Keras model with 2 GPUs and fp16.\"\"\"\n config = keras_utils.get_config_proto_v1()\n tf.compat.v1.enable_eager_execution(config=config)\n\n if context.num_gpus() < 2:\n self.skipTest(\n \"{} GPUs are not available for this test. {} GPUs are available\".\n format(2, context.num_gpus()))\n\n extra_flags = [\n \"-num_gpus\", \"2\",\n \"-dtype\", \"fp16\",\n \"-distribution_strategy\", \"default\",\n \"-model_dir\", \"keras_imagenet_2_gpu_fp16\",\n ]\n extra_flags = extra_flags + self._extra_flags\n\n integration.run_synthetic(\n main=keras_imagenet_main.run,\n tmp_root=self.get_temp_dir(),\n extra_flags=extra_flags\n )\n\n def test_end_to_end_xla_2_gpu_fp16(self):\n \"\"\"Test Keras model with XLA, 2 GPUs and fp16.\"\"\"\n config = keras_utils.get_config_proto_v1()\n tf.compat.v1.enable_eager_execution(config=config)\n\n if context.num_gpus() < 2:\n self.skipTest(\n \"{} GPUs are not available for this test. {} GPUs are available\".\n format(2, context.num_gpus()))\n\n extra_flags = [\n \"-num_gpus\", \"2\",\n \"-dtype\", \"fp16\",\n \"-enable_xla\", \"true\",\n \"-distribution_strategy\", \"default\",\n \"-model_dir\", \"keras_imagenet_xla_2_gpu_fp16\",\n ]\n extra_flags = extra_flags + self._extra_flags\n\n integration.run_synthetic(\n main=keras_imagenet_main.run,\n tmp_root=self.get_temp_dir(),\n extra_flags=extra_flags\n )\n\n def test_end_to_end_graph_2_gpu(self):\n \"\"\"Test Keras model in legacy graph mode with 2 GPUs.\"\"\"\n if context.num_gpus() < 2:\n self.skipTest(\n \"{} GPUs are not available for this test. {} GPUs are available\".\n format(2, context.num_gpus()))\n\n extra_flags = [\n \"-num_gpus\", \"2\",\n \"-enable_eager\", \"false\",\n \"-distribution_strategy\", \"default\",\n \"-model_dir\", \"keras_imagenet_graph_2_gpu\",\n ]\n extra_flags = extra_flags + self._extra_flags\n\n integration.run_synthetic(\n main=keras_imagenet_main.run,\n tmp_root=self.get_temp_dir(),\n extra_flags=extra_flags\n )\n\n def test_end_to_end_graph_xla_2_gpu(self):\n \"\"\"Test Keras model in legacy graph mode with XLA and 2 GPUs.\"\"\"\n if context.num_gpus() < 2:\n self.skipTest(\n \"{} GPUs are not available for this test. {} GPUs are available\".\n format(2, context.num_gpus()))\n\n extra_flags = [\n \"-num_gpus\", \"2\",\n \"-enable_eager\", \"false\",\n \"-enable_xla\", \"true\",\n \"-distribution_strategy\", \"default\",\n \"-model_dir\", \"keras_imagenet_graph_xla_2_gpu\",\n ]\n extra_flags = extra_flags + self._extra_flags\n\n integration.run_synthetic(\n main=keras_imagenet_main.run,\n tmp_root=self.get_temp_dir(),\n extra_flags=extra_flags\n )\n\n\nif __name__ == '__main__':\n googletest.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"tf.data.Dataset interface to the MNIST dataset.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gzip\nimport os\nimport shutil\nimport tempfile\n\nimport numpy as np\nimport tensorflow as tf\nfrom six.moves import urllib\n\n\ndef read32(bytestream):\n \"\"\"Read 4 bytes from bytestream as an unsigned 32-bit integer.\"\"\"\n dt = np.dtype(np.uint32).newbyteorder('>')\n return np.frombuffer(bytestream.read(4), dtype=dt)[0]\n\n\ndef check_image_file_header(filename):\n \"\"\"Validate that filename corresponds to images for the MNIST dataset.\"\"\"\n with tf.gfile.Open(filename, 'rb') as f:\n magic = read32(f)\n read32(f) # num_images, unused\n rows = read32(f)\n cols = read32(f)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST file %s' % (magic,\n f.name))\n if rows != 28 or cols != 28:\n raise ValueError(\n 'Invalid MNIST file %s: Expected 28x28 images, found %dx%d' %\n (f.name, rows, cols))\n\n\ndef check_labels_file_header(filename):\n \"\"\"Validate that filename corresponds to labels for the MNIST dataset.\"\"\"\n with tf.gfile.Open(filename, 'rb') as f:\n magic = read32(f)\n read32(f) # num_items, unused\n if magic != 2049:\n raise ValueError('Invalid magic number %d in MNIST file %s' % (magic,\n f.name))\n\n\ndef download(directory, filename):\n \"\"\"Download (and unzip) a file from the MNIST dataset if not already done.\"\"\"\n filepath = os.path.join(directory, filename)\n if tf.gfile.Exists(filepath):\n return filepath\n if not tf.gfile.Exists(directory):\n tf.gfile.MakeDirs(directory)\n # CVDF mirror of http://yann.lecun.com/exdb/mnist/\n url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz'\n _, zipped_filepath = tempfile.mkstemp(suffix='.gz')\n print('Downloading %s to %s' % (url, zipped_filepath))\n urllib.request.urlretrieve(url, zipped_filepath)\n with gzip.open(zipped_filepath, 'rb') as f_in, \\\n tf.gfile.Open(filepath, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n os.remove(zipped_filepath)\n return filepath\n\n\ndef dataset(directory, images_file, labels_file):\n \"\"\"Download and parse MNIST dataset.\"\"\"\n\n images_file = download(directory, images_file)\n labels_file = download(directory, labels_file)\n\n check_image_file_header(images_file)\n check_labels_file_header(labels_file)\n\n def decode_image(image):\n # Normalize from [0, 255] to [0.0, 1.0]\n image = tf.decode_raw(image, tf.uint8)\n image = tf.cast(image, tf.float32)\n image = tf.reshape(image, [784])\n return image / 255.0\n\n def decode_label(label):\n label = tf.decode_raw(label, tf.uint8) # tf.string -> [tf.uint8]\n label = tf.reshape(label, []) # label is a scalar\n return tf.cast(label, tf.int32)\n\n images = tf.data.FixedLengthRecordDataset(\n images_file, 28 * 28, header_bytes=16).map(decode_image)\n labels = tf.data.FixedLengthRecordDataset(\n labels_file, 1, header_bytes=8).map(decode_label)\n return tf.data.Dataset.zip((images, labels))\n\n\ndef train(directory):\n \"\"\"tf.data.Dataset object for MNIST training data.\"\"\"\n return dataset(directory, 'train-images-idx3-ubyte',\n 'train-labels-idx1-ubyte')\n\n\ndef test(directory):\n \"\"\"tf.data.Dataset object for MNIST test data.\"\"\"\n return dataset(directory, 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte')\n",
"# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A program to train a tensorflow neural net parser from a conll file.\"\"\"\n\n\n\n\nimport os\n\nimport os.path\nimport tensorflow as tf\nfrom absl import app\nfrom absl import flags\nfrom dragnn.protos import spec_pb2\nfrom dragnn.python import evaluation\nfrom dragnn.python import graph_builder\nfrom dragnn.python import lexicon\nfrom dragnn.python import sentence_io\nfrom dragnn.python import spec_builder\nfrom dragnn.python import trainer_lib\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import tf_logging as logging\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('tf_master', '',\n 'TensorFlow execution engine to connect to.')\nflags.DEFINE_string('resource_path', '', 'Path to constructed resources.')\nflags.DEFINE_string('tensorboard_dir', '',\n 'Directory for TensorBoard logs output.')\nflags.DEFINE_string('checkpoint_filename', '',\n 'Filename to save the best checkpoint to.')\n\nflags.DEFINE_string('training_corpus_path', '', 'Path to training data.')\nflags.DEFINE_string('dev_corpus_path', '', 'Path to development set data.')\n\nflags.DEFINE_bool('compute_lexicon', False, '')\nflags.DEFINE_bool('projectivize_training_set', True, '')\n\nflags.DEFINE_integer('batch_size', 4, 'Batch size.')\nflags.DEFINE_integer('report_every', 200,\n 'Report cost and training accuracy every this many steps.')\n\n\ndef main(unused_argv):\n logging.set_verbosity(logging.INFO)\n\n if not gfile.IsDirectory(FLAGS.resource_path):\n gfile.MakeDirs(FLAGS.resource_path)\n\n # Constructs lexical resources for SyntaxNet in the given resource path, from\n # the training data.\n if FLAGS.compute_lexicon:\n logging.info('Computing lexicon...')\n lexicon.build_lexicon(FLAGS.resource_path, FLAGS.training_corpus_path)\n\n # Construct the \"lookahead\" ComponentSpec. This is a simple right-to-left RNN\n # sequence model, which encodes the context to the right of each token. It has\n # no loss except for the downstream components.\n\n char2word = spec_builder.ComponentSpecBuilder('char_lstm')\n char2word.set_network_unit(\n name='wrapped_units.LayerNormBasicLSTMNetwork',\n hidden_layer_sizes='256')\n char2word.set_transition_system(name='char-shift-only', left_to_right='true')\n char2word.add_fixed_feature(name='chars', fml='char-input.text-char',\n embedding_dim=16)\n char2word.fill_from_resources(FLAGS.resource_path, FLAGS.tf_master)\n\n lookahead = spec_builder.ComponentSpecBuilder('lookahead')\n lookahead.set_network_unit(\n name='wrapped_units.LayerNormBasicLSTMNetwork',\n hidden_layer_sizes='256')\n lookahead.set_transition_system(name='shift-only', left_to_right='false')\n lookahead.add_link(source=char2word, fml='input.last-char-focus',\n embedding_dim=32)\n lookahead.fill_from_resources(FLAGS.resource_path, FLAGS.tf_master)\n\n # Construct the ComponentSpec for tagging. This is a simple left-to-right RNN\n # sequence tagger.\n tagger = spec_builder.ComponentSpecBuilder('tagger')\n tagger.set_network_unit(\n name='wrapped_units.LayerNormBasicLSTMNetwork',\n hidden_layer_sizes='256')\n tagger.set_transition_system(name='tagger')\n tagger.add_token_link(source=lookahead, fml='input.focus', embedding_dim=32)\n tagger.fill_from_resources(FLAGS.resource_path, FLAGS.tf_master)\n\n # Construct the ComponentSpec for parsing.\n parser = spec_builder.ComponentSpecBuilder('parser')\n parser.set_network_unit(name='FeedForwardNetwork', hidden_layer_sizes='256',\n layer_norm_hidden='True')\n parser.set_transition_system(name='arc-standard')\n parser.add_token_link(source=lookahead, fml='input.focus', embedding_dim=32)\n parser.add_token_link(\n source=tagger,\n fml='input.focus stack.focus stack(1).focus',\n embedding_dim=32)\n\n # Recurrent connection for the arc-standard parser. For both tokens on the\n # stack, we connect to the last time step to either SHIFT or REDUCE that\n # token. This allows the parser to build up compositional representations of\n # phrases.\n parser.add_link(\n source=parser, # recurrent connection\n name='rnn-stack', # unique identifier\n fml='stack.focus stack(1).focus', # look for both stack tokens\n source_translator='shift-reduce-step', # maps token indices -> step\n embedding_dim=32) # project down to 32 dims\n\n parser.fill_from_resources(FLAGS.resource_path, FLAGS.tf_master)\n\n master_spec = spec_pb2.MasterSpec()\n master_spec.component.extend([char2word.spec, lookahead.spec,\n tagger.spec, parser.spec])\n logging.info('Constructed master spec: %s', str(master_spec))\n hyperparam_config = spec_pb2.GridPoint()\n hyperparam_config.decay_steps = 128000\n hyperparam_config.learning_rate = 0.001\n hyperparam_config.learning_method = 'adam'\n hyperparam_config.adam_beta1 = 0.9\n hyperparam_config.adam_beta2 = 0.9\n hyperparam_config.adam_eps = 0.0001\n hyperparam_config.gradient_clip_norm = 1\n hyperparam_config.self_norm_alpha = 1.0\n hyperparam_config.use_moving_average = True\n hyperparam_config.dropout_rate = 0.7\n hyperparam_config.seed = 1\n\n # Build the TensorFlow graph.\n graph = tf.Graph()\n with graph.as_default():\n builder = graph_builder.MasterBuilder(master_spec, hyperparam_config)\n component_targets = spec_builder.default_targets_from_spec(master_spec)\n trainers = [\n builder.add_training_from_config(target) for target in component_targets\n ]\n assert len(trainers) == 2\n annotator = builder.add_annotation()\n builder.add_saver()\n\n # Read in serialized protos from training data.\n training_set = sentence_io.ConllSentenceReader(\n FLAGS.training_corpus_path,\n projectivize=FLAGS.projectivize_training_set).corpus()\n dev_set = sentence_io.ConllSentenceReader(\n FLAGS.dev_corpus_path, projectivize=False).corpus()\n\n # Ready to train!\n logging.info('Training on %d sentences.', len(training_set))\n logging.info('Tuning on %d sentences.', len(dev_set))\n\n pretrain_steps = [100, 0]\n tagger_steps = 1000\n train_steps = [tagger_steps, 8 * tagger_steps]\n\n tf.logging.info('Creating TensorFlow checkpoint dir...')\n gfile.MakeDirs(os.path.dirname(FLAGS.checkpoint_filename))\n summary_writer = trainer_lib.get_summary_writer(FLAGS.tensorboard_dir)\n\n with tf.Session(FLAGS.tf_master, graph=graph) as sess:\n # Make sure to re-initialize all underlying state.\n sess.run(tf.global_variables_initializer())\n trainer_lib.run_training(\n sess, trainers, annotator, evaluation.parser_summaries, pretrain_steps,\n train_steps, training_set, dev_set, dev_set, FLAGS.batch_size,\n summary_writer, FLAGS.report_every, builder.saver,\n FLAGS.checkpoint_filename)\n\n\nif __name__ == '__main__':\n app.run(main)\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Train a progressive GAN model.\n\nSee https://arxiv.org/abs/1710.10196 for details about the model.\n\nSee https://github.com/tkarras/progressive_growing_of_gans for the original\ntheano implementation.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport networks\nimport numpy as np\nimport tensorflow as tf\nfrom absl import logging\n\ntfgan = tf.contrib.gan\n\n\ndef make_train_sub_dir(stage_id, **kwargs):\n \"\"\"Returns the log directory for training stage `stage_id`.\"\"\"\n return os.path.join(kwargs['train_root_dir'], 'stage_{:05d}'.format(stage_id))\n\n\ndef make_resolution_schedule(**kwargs):\n \"\"\"Returns an object of `ResolutionSchedule`.\"\"\"\n return networks.ResolutionSchedule(\n start_resolutions=(kwargs['start_height'], kwargs['start_width']),\n scale_base=kwargs['scale_base'],\n num_resolutions=kwargs['num_resolutions'])\n\n\ndef get_stage_ids(**kwargs):\n \"\"\"Returns a list of stage ids.\n\n Args:\n **kwargs: A dictionary of\n 'train_root_dir': A string of root directory of training logs.\n 'num_resolutions': An integer of number of progressive resolutions.\n \"\"\"\n train_sub_dirs = [\n sub_dir for sub_dir in tf.gfile.ListDirectory(kwargs['train_root_dir'])\n if sub_dir.startswith('stage_')\n ]\n\n # If fresh start, start with start_stage_id = 0\n # If has been trained for n = len(train_sub_dirs) stages, start with the last\n # stage, i.e. start_stage_id = n - 1.\n start_stage_id = max(0, len(train_sub_dirs) - 1)\n\n return range(start_stage_id, get_total_num_stages(**kwargs))\n\n\ndef get_total_num_stages(**kwargs):\n \"\"\"Returns total number of training stages.\"\"\"\n return 2 * kwargs['num_resolutions'] - 1\n\n\ndef get_batch_size(stage_id, **kwargs):\n \"\"\"Returns batch size for each stage.\n\n It is expected that `len(batch_size_schedule) == num_resolutions`. Each stage\n corresponds to a resolution and hence a batch size. However if\n `len(batch_size_schedule) < num_resolutions`, pad `batch_size_schedule` in the\n beginning with the first batch size.\n\n Args:\n stage_id: An integer of training stage index.\n **kwargs: A dictionary of\n 'batch_size_schedule': A list of integer, each element is the batch size\n for the current training image resolution.\n 'num_resolutions': An integer of number of progressive resolutions.\n\n Returns:\n An integer batch size for the `stage_id`.\n \"\"\"\n batch_size_schedule = kwargs['batch_size_schedule']\n num_resolutions = kwargs['num_resolutions']\n if len(batch_size_schedule) < num_resolutions:\n batch_size_schedule = (\n [batch_size_schedule[0]] * (num_resolutions - len(batch_size_schedule))\n + batch_size_schedule)\n\n return int(batch_size_schedule[(stage_id + 1) // 2])\n\n\ndef get_stage_info(stage_id, **kwargs):\n \"\"\"Returns information for a training stage.\n\n Args:\n stage_id: An integer of training stage index.\n **kwargs: A dictionary of\n 'num_resolutions': An integer of number of progressive resolutions.\n 'stable_stage_num_images': An integer of number of training images in\n the stable stage.\n 'transition_stage_num_images': An integer of number of training images\n in the transition stage.\n 'total_num_images': An integer of total number of training images.\n\n Returns:\n A tuple of integers. The first entry is the number of blocks. The second\n entry is the accumulated total number of training images when stage\n `stage_id` is finished.\n\n Raises:\n ValueError: If `stage_id` is not in [0, total number of stages).\n \"\"\"\n total_num_stages = get_total_num_stages(**kwargs)\n if not (stage_id >= 0 and stage_id < total_num_stages):\n raise ValueError(\n '`stage_id` must be in [0, {0}), but instead was {1}'.format(\n total_num_stages, stage_id))\n\n # Even stage_id: stable training stage.\n # Odd stage_id: transition training stage.\n num_blocks = (stage_id + 1) // 2 + 1\n num_images = ((stage_id // 2 + 1) * kwargs['stable_stage_num_images'] + (\n (stage_id + 1) // 2) * kwargs['transition_stage_num_images'])\n\n total_num_images = kwargs['total_num_images']\n if stage_id >= total_num_stages - 1:\n num_images = total_num_images\n num_images = min(num_images, total_num_images)\n\n return num_blocks, num_images\n\n\ndef make_latent_vectors(num, **kwargs):\n \"\"\"Returns a batch of `num` random latent vectors.\"\"\"\n return tf.random_normal([num, kwargs['latent_vector_size']], dtype=tf.float32)\n\n\ndef make_interpolated_latent_vectors(num_rows, num_columns, **kwargs):\n \"\"\"Returns a batch of linearly interpolated latent vectors.\n\n Given two randomly generated latent vector za and zb, it can generate\n a row of `num_columns` interpolated latent vectors, i.e.\n [..., za + (zb - za) * i / (num_columns - 1), ...] where\n i = 0, 1, ..., `num_columns` - 1.\n\n This function produces `num_rows` such rows and returns a (flattened)\n batch of latent vectors with batch size `num_rows * num_columns`.\n\n Args:\n num_rows: An integer. Number of rows of interpolated latent vectors.\n num_columns: An integer. Number of interpolated latent vectors in each row.\n **kwargs: A dictionary of\n 'latent_vector_size': An integer of latent vector size.\n\n Returns:\n A `Tensor` of shape `[num_rows * num_columns, latent_vector_size]`.\n \"\"\"\n ans = []\n for _ in range(num_rows):\n z = tf.random_normal([2, kwargs['latent_vector_size']])\n r = tf.reshape(\n tf.to_float(tf.range(num_columns)) / (num_columns - 1), [-1, 1])\n dz = z[1] - z[0]\n ans.append(z[0] + tf.stack([dz] * num_columns) * r)\n return tf.concat(ans, axis=0)\n\n\ndef define_loss(gan_model, **kwargs):\n \"\"\"Defines progressive GAN losses.\n\n The generator and discriminator both use wasserstein loss. In addition,\n a small penalty term is added to the discriminator loss to prevent it getting\n too large.\n\n Args:\n gan_model: A `GANModel` namedtuple.\n **kwargs: A dictionary of\n 'gradient_penalty_weight': A float of gradient norm target for\n wasserstein loss.\n 'gradient_penalty_target': A float of gradient penalty weight for\n wasserstein loss.\n 'real_score_penalty_weight': A float of Additional penalty to keep\n the scores from drifting too far from zero.\n\n Returns:\n A `GANLoss` namedtuple.\n \"\"\"\n gan_loss = tfgan.gan_loss(\n gan_model,\n generator_loss_fn=tfgan.losses.wasserstein_generator_loss,\n discriminator_loss_fn=tfgan.losses.wasserstein_discriminator_loss,\n gradient_penalty_weight=kwargs['gradient_penalty_weight'],\n gradient_penalty_target=kwargs['gradient_penalty_target'],\n gradient_penalty_epsilon=0.0)\n\n real_score_penalty = tf.reduce_mean(\n tf.square(gan_model.discriminator_real_outputs))\n tf.summary.scalar('real_score_penalty', real_score_penalty)\n\n return gan_loss._replace(\n discriminator_loss=(\n gan_loss.discriminator_loss +\n kwargs['real_score_penalty_weight'] * real_score_penalty))\n\n\ndef define_train_ops(gan_model, gan_loss, **kwargs):\n \"\"\"Defines progressive GAN train ops.\n\n Args:\n gan_model: A `GANModel` namedtuple.\n gan_loss: A `GANLoss` namedtuple.\n **kwargs: A dictionary of\n 'adam_beta1': A float of Adam optimizer beta1.\n 'adam_beta2': A float of Adam optimizer beta2.\n 'generator_learning_rate': A float of generator learning rate.\n 'discriminator_learning_rate': A float of discriminator learning rate.\n\n Returns:\n A tuple of `GANTrainOps` namedtuple and a list variables tracking the state\n of optimizers.\n \"\"\"\n with tf.variable_scope('progressive_gan_train_ops') as var_scope:\n beta1, beta2 = kwargs['adam_beta1'], kwargs['adam_beta2']\n gen_opt = tf.train.AdamOptimizer(kwargs['generator_learning_rate'], beta1,\n beta2)\n dis_opt = tf.train.AdamOptimizer(kwargs['discriminator_learning_rate'],\n beta1, beta2)\n gan_train_ops = tfgan.gan_train_ops(gan_model, gan_loss, gen_opt, dis_opt)\n return gan_train_ops, tf.get_collection(\n tf.GraphKeys.GLOBAL_VARIABLES, scope=var_scope.name)\n\n\ndef add_generator_smoothing_ops(generator_ema, gan_model, gan_train_ops):\n \"\"\"Adds generator smoothing ops.\"\"\"\n with tf.control_dependencies([gan_train_ops.generator_train_op]):\n new_generator_train_op = generator_ema.apply(gan_model.generator_variables)\n\n gan_train_ops = gan_train_ops._replace(\n generator_train_op=new_generator_train_op)\n generator_vars_to_restore = generator_ema.variables_to_restore(\n gan_model.generator_variables)\n return gan_train_ops, generator_vars_to_restore\n\n\ndef build_model(stage_id, batch_size, real_images, **kwargs):\n \"\"\"Builds progressive GAN model.\n\n Args:\n stage_id: An integer of training stage index.\n batch_size: Number of training images in each minibatch.\n real_images: A 4D `Tensor` of NHWC format.\n **kwargs: A dictionary of\n 'start_height': An integer of start image height.\n 'start_width': An integer of start image width.\n 'scale_base': An integer of resolution multiplier.\n 'num_resolutions': An integer of number of progressive resolutions.\n 'stable_stage_num_images': An integer of number of training images in\n the stable stage.\n 'transition_stage_num_images': An integer of number of training images\n in the transition stage.\n 'total_num_images': An integer of total number of training images.\n 'kernel_size': Convolution kernel size.\n 'colors': Number of image channels.\n 'to_rgb_use_tanh_activation': Whether to apply tanh activation when\n output rgb.\n 'fmap_base': Base number of filters.\n 'fmap_decay': Decay of number of filters.\n 'fmap_max': Max number of filters.\n 'latent_vector_size': An integer of latent vector size.\n 'gradient_penalty_weight': A float of gradient norm target for\n wasserstein loss.\n 'gradient_penalty_target': A float of gradient penalty weight for\n wasserstein loss.\n 'real_score_penalty_weight': A float of Additional penalty to keep\n the scores from drifting too far from zero.\n 'adam_beta1': A float of Adam optimizer beta1.\n 'adam_beta2': A float of Adam optimizer beta2.\n 'generator_learning_rate': A float of generator learning rate.\n 'discriminator_learning_rate': A float of discriminator learning rate.\n\n Returns:\n An inernal object that wraps all information about the model.\n \"\"\"\n kernel_size = kwargs['kernel_size']\n colors = kwargs['colors']\n resolution_schedule = make_resolution_schedule(**kwargs)\n\n num_blocks, num_images = get_stage_info(stage_id, **kwargs)\n\n current_image_id = tf.train.get_or_create_global_step()\n current_image_id_inc_op = current_image_id.assign_add(batch_size)\n tf.summary.scalar('current_image_id', current_image_id)\n\n progress = networks.compute_progress(\n current_image_id, kwargs['stable_stage_num_images'],\n kwargs['transition_stage_num_images'], num_blocks)\n tf.summary.scalar('progress', progress)\n\n real_images = networks.blend_images(\n real_images, progress, resolution_schedule, num_blocks=num_blocks)\n\n def _num_filters_fn(block_id):\n \"\"\"Computes number of filters of block `block_id`.\"\"\"\n return networks.num_filters(block_id, kwargs['fmap_base'],\n kwargs['fmap_decay'], kwargs['fmap_max'])\n\n def _generator_fn(z):\n \"\"\"Builds generator network.\"\"\"\n return networks.generator(\n z,\n progress,\n _num_filters_fn,\n resolution_schedule,\n num_blocks=num_blocks,\n kernel_size=kernel_size,\n colors=colors,\n to_rgb_activation=(tf.tanh\n if kwargs['to_rgb_use_tanh_activation'] else None))\n\n def _discriminator_fn(x):\n \"\"\"Builds discriminator network.\"\"\"\n return networks.discriminator(\n x,\n progress,\n _num_filters_fn,\n resolution_schedule,\n num_blocks=num_blocks,\n kernel_size=kernel_size)\n\n ########## Define model.\n z = make_latent_vectors(batch_size, **kwargs)\n\n gan_model = tfgan.gan_model(\n generator_fn=lambda z: _generator_fn(z)[0],\n discriminator_fn=lambda x, unused_z: _discriminator_fn(x)[0],\n real_data=real_images,\n generator_inputs=z)\n\n ########## Define loss.\n gan_loss = define_loss(gan_model, **kwargs)\n\n ########## Define train ops.\n gan_train_ops, optimizer_var_list = define_train_ops(gan_model, gan_loss,\n **kwargs)\n gan_train_ops = gan_train_ops._replace(\n global_step_inc_op=current_image_id_inc_op)\n\n ########## Generator smoothing.\n generator_ema = tf.train.ExponentialMovingAverage(decay=0.999)\n gan_train_ops, generator_vars_to_restore = add_generator_smoothing_ops(\n generator_ema, gan_model, gan_train_ops)\n\n class Model(object):\n pass\n\n model = Model()\n model.stage_id = stage_id\n model.batch_size = batch_size\n model.resolution_schedule = resolution_schedule\n model.num_images = num_images\n model.num_blocks = num_blocks\n model.current_image_id = current_image_id\n model.progress = progress\n model.num_filters_fn = _num_filters_fn\n model.generator_fn = _generator_fn\n model.discriminator_fn = _discriminator_fn\n model.gan_model = gan_model\n model.gan_loss = gan_loss\n model.gan_train_ops = gan_train_ops\n model.optimizer_var_list = optimizer_var_list\n model.generator_ema = generator_ema\n model.generator_vars_to_restore = generator_vars_to_restore\n return model\n\n\ndef make_var_scope_custom_getter_for_ema(ema):\n \"\"\"Makes variable scope custom getter.\"\"\"\n def _custom_getter(getter, name, *args, **kwargs):\n var = getter(name, *args, **kwargs)\n ema_var = ema.average(var)\n return ema_var if ema_var else var\n return _custom_getter\n\n\ndef add_model_summaries(model, **kwargs):\n \"\"\"Adds model summaries.\n\n This function adds several useful summaries during training:\n - fake_images: A grid of fake images based on random latent vectors.\n - interp_images: A grid of fake images based on interpolated latent vectors.\n - real_images_blend: A grid of real images.\n - summaries for `gan_model` losses, variable distributions etc.\n\n Args:\n model: An model object having all information of progressive GAN model,\n e.g. the return of build_model().\n **kwargs: A dictionary of\n 'fake_grid_size': The fake image grid size for summaries.\n 'interp_grid_size': The latent space interpolated image grid size for\n summaries.\n 'colors': Number of image channels.\n 'latent_vector_size': An integer of latent vector size.\n \"\"\"\n fake_grid_size = kwargs['fake_grid_size']\n interp_grid_size = kwargs['interp_grid_size']\n colors = kwargs['colors']\n\n image_shape = list(model.resolution_schedule.final_resolutions)\n\n fake_batch_size = fake_grid_size**2\n fake_images_shape = [fake_batch_size] + image_shape + [colors]\n\n interp_batch_size = interp_grid_size**2\n interp_images_shape = [interp_batch_size] + image_shape + [colors]\n\n # When making prediction, use the ema smoothed generator vars.\n with tf.variable_scope(\n model.gan_model.generator_scope,\n reuse=True,\n custom_getter=make_var_scope_custom_getter_for_ema(model.generator_ema)):\n z_fake = make_latent_vectors(fake_batch_size, **kwargs)\n fake_images = model.gan_model.generator_fn(z_fake)\n fake_images.set_shape(fake_images_shape)\n\n z_interp = make_interpolated_latent_vectors(interp_grid_size,\n interp_grid_size, **kwargs)\n interp_images = model.gan_model.generator_fn(z_interp)\n interp_images.set_shape(interp_images_shape)\n\n tf.summary.image(\n 'fake_images',\n tfgan.eval.eval_utils.image_grid(\n fake_images,\n grid_shape=[fake_grid_size] * 2,\n image_shape=image_shape,\n num_channels=colors),\n max_outputs=1)\n\n tf.summary.image(\n 'interp_images',\n tfgan.eval.eval_utils.image_grid(\n interp_images,\n grid_shape=[interp_grid_size] * 2,\n image_shape=image_shape,\n num_channels=colors),\n max_outputs=1)\n\n real_grid_size = int(np.sqrt(model.batch_size))\n tf.summary.image(\n 'real_images_blend',\n tfgan.eval.eval_utils.image_grid(\n model.gan_model.real_data[:real_grid_size**2],\n grid_shape=(real_grid_size, real_grid_size),\n image_shape=image_shape,\n num_channels=colors),\n max_outputs=1)\n\n tfgan.eval.add_gan_model_summaries(model.gan_model)\n\n\ndef make_scaffold(stage_id, optimizer_var_list, **kwargs):\n \"\"\"Makes a custom scaffold.\n\n The scaffold\n - restores variables from the last training stage.\n - initializes new variables in the new block.\n\n Args:\n stage_id: An integer of stage id.\n optimizer_var_list: A list of optimizer variables.\n **kwargs: A dictionary of\n 'train_root_dir': A string of root directory of training logs.\n 'num_resolutions': An integer of number of progressive resolutions.\n 'stable_stage_num_images': An integer of number of training images in\n the stable stage.\n 'transition_stage_num_images': An integer of number of training images\n in the transition stage.\n 'total_num_images': An integer of total number of training images.\n\n Returns:\n A `Scaffold` object.\n \"\"\"\n # Holds variables that from the previous stage and need to be restored.\n restore_var_list = []\n prev_ckpt = None\n curr_ckpt = tf.train.latest_checkpoint(make_train_sub_dir(stage_id, **kwargs))\n if stage_id > 0 and curr_ckpt is None:\n prev_ckpt = tf.train.latest_checkpoint(\n make_train_sub_dir(stage_id - 1, **kwargs))\n\n num_blocks, _ = get_stage_info(stage_id, **kwargs)\n prev_num_blocks, _ = get_stage_info(stage_id - 1, **kwargs)\n\n # Holds variables created in the new block of the current stage. If the\n # current stage is a stable stage (except the initial stage), this list\n # will be empty.\n new_block_var_list = []\n for block_id in range(prev_num_blocks + 1, num_blocks + 1):\n new_block_var_list.extend(\n tf.get_collection(\n tf.GraphKeys.GLOBAL_VARIABLES,\n scope='.*/{}/'.format(networks.block_name(block_id))))\n\n # Every variables that are 1) not for optimizers and 2) from the new block\n # need to be restored.\n restore_var_list = [\n var for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n if var not in set(optimizer_var_list + new_block_var_list)\n ]\n\n # Add saver op to graph. This saver is used to restore variables from the\n # previous stage.\n saver_for_restore = tf.train.Saver(\n var_list=restore_var_list, allow_empty=True)\n # Add the op to graph that initializes all global variables.\n init_op = tf.global_variables_initializer()\n\n def _init_fn(unused_scaffold, sess):\n # First initialize every variables.\n sess.run(init_op)\n logging.info('\\n'.join([var.name for var in restore_var_list]))\n # Then overwrite variables saved in previous stage.\n if prev_ckpt is not None:\n saver_for_restore.restore(sess, prev_ckpt)\n\n # Use a dummy init_op here as all initialization is done in init_fn.\n return tf.train.Scaffold(init_op=tf.constant([]), init_fn=_init_fn)\n\n\ndef make_status_message(model):\n \"\"\"Makes a string `Tensor` of training status.\"\"\"\n return tf.string_join(\n [\n 'Starting train step: current_image_id: ',\n tf.as_string(model.current_image_id), ', progress: ',\n tf.as_string(model.progress), ', num_blocks: {}'.format(\n model.num_blocks), ', batch_size: {}'.format(model.batch_size)\n ],\n name='status_message')\n\n\ndef train(model, **kwargs):\n \"\"\"Trains progressive GAN for stage `stage_id`.\n\n Args:\n model: An model object having all information of progressive GAN model,\n e.g. the return of build_model().\n **kwargs: A dictionary of\n 'train_root_dir': A string of root directory of training logs.\n 'master': Name of the TensorFlow master to use.\n 'task': The Task ID. This value is used when training with multiple\n workers to identify each worker.\n 'save_summaries_num_images': Save summaries in this number of images.\n Returns:\n None.\n \"\"\"\n logging.info('stage_id=%d, num_blocks=%d, num_images=%d', model.stage_id,\n model.num_blocks, model.num_images)\n\n scaffold = make_scaffold(model.stage_id, model.optimizer_var_list, **kwargs)\n\n tfgan.gan_train(\n model.gan_train_ops,\n logdir=make_train_sub_dir(model.stage_id, **kwargs),\n get_hooks_fn=tfgan.get_sequential_train_hooks(tfgan.GANTrainSteps(1, 1)),\n hooks=[\n tf.train.StopAtStepHook(last_step=model.num_images),\n tf.train.LoggingTensorHook(\n [make_status_message(model)], every_n_iter=10)\n ],\n master=kwargs['master'],\n is_chief=(kwargs['task'] == 0),\n scaffold=scaffold,\n save_checkpoint_secs=600,\n save_summaries_steps=(kwargs['save_summaries_num_images']))\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains the definition for inception v3 classification network.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom nets import inception_utils\n\nslim = tf.contrib.slim\ntrunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)\n\n\ndef inception_v3_base(inputs,\n final_endpoint='Mixed_7c',\n min_depth=16,\n depth_multiplier=1.0,\n scope=None):\n \"\"\"Inception model from http://arxiv.org/abs/1512.00567.\n\n Constructs an Inception v3 network from inputs to the given final endpoint.\n This method can construct the network up to the final inception block\n Mixed_7c.\n\n Note that the names of the layers in the paper do not correspond to the names\n of the endpoints registered by this function although they build the same\n network.\n\n Here is a mapping from the old_names to the new names:\n Old name | New name\n =======================================\n conv0 | Conv2d_1a_3x3\n conv1 | Conv2d_2a_3x3\n conv2 | Conv2d_2b_3x3\n pool1 | MaxPool_3a_3x3\n conv3 | Conv2d_3b_1x1\n conv4 | Conv2d_4a_3x3\n pool2 | MaxPool_5a_3x3\n mixed_35x35x256a | Mixed_5b\n mixed_35x35x288a | Mixed_5c\n mixed_35x35x288b | Mixed_5d\n mixed_17x17x768a | Mixed_6a\n mixed_17x17x768b | Mixed_6b\n mixed_17x17x768c | Mixed_6c\n mixed_17x17x768d | Mixed_6d\n mixed_17x17x768e | Mixed_6e\n mixed_8x8x1280a | Mixed_7a\n mixed_8x8x2048a | Mixed_7b\n mixed_8x8x2048b | Mixed_7c\n\n Args:\n inputs: a tensor of size [batch_size, height, width, channels].\n final_endpoint: specifies the endpoint to construct the network up to. It\n can be one of ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',\n 'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3',\n 'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c',\n 'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c'].\n min_depth: Minimum depth value (number of channels) for all convolution ops.\n Enforced when depth_multiplier < 1, and not an active constraint when\n depth_multiplier >= 1.\n depth_multiplier: Float multiplier for the depth (number of channels)\n for all convolution ops. The value must be greater than zero. Typical\n usage will be to set this value in (0, 1) to reduce the number of\n parameters or computation cost of the model.\n scope: Optional variable_scope.\n\n Returns:\n tensor_out: output tensor corresponding to the final_endpoint.\n end_points: a set of activations for external use, for example summaries or\n losses.\n\n Raises:\n ValueError: if final_endpoint is not set to one of the predefined values,\n or depth_multiplier <= 0\n \"\"\"\n # end_points will collect relevant activations for external use, for example\n # summaries or losses.\n end_points = {}\n\n if depth_multiplier <= 0:\n raise ValueError('depth_multiplier is not greater than zero.')\n depth = lambda d: max(int(d * depth_multiplier), min_depth)\n\n with tf.variable_scope(scope, 'InceptionV3', [inputs]):\n with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],\n stride=1, padding='VALID'):\n # 299 x 299 x 3\n end_point = 'Conv2d_1a_3x3'\n net = slim.conv2d(inputs, depth(32), [3, 3], stride=2, scope=end_point)\n end_points[end_point] = net\n if end_point == final_endpoint: return net, end_points\n # 149 x 149 x 32\n end_point = 'Conv2d_2a_3x3'\n net = slim.conv2d(net, depth(32), [3, 3], scope=end_point)\n end_points[end_point] = net\n if end_point == final_endpoint: return net, end_points\n # 147 x 147 x 32\n end_point = 'Conv2d_2b_3x3'\n net = slim.conv2d(net, depth(64), [3, 3], padding='SAME', scope=end_point)\n end_points[end_point] = net\n if end_point == final_endpoint: return net, end_points\n # 147 x 147 x 64\n end_point = 'MaxPool_3a_3x3'\n net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)\n end_points[end_point] = net\n if end_point == final_endpoint: return net, end_points\n # 73 x 73 x 64\n end_point = 'Conv2d_3b_1x1'\n net = slim.conv2d(net, depth(80), [1, 1], scope=end_point)\n end_points[end_point] = net\n if end_point == final_endpoint: return net, end_points\n # 73 x 73 x 80.\n end_point = 'Conv2d_4a_3x3'\n net = slim.conv2d(net, depth(192), [3, 3], scope=end_point)\n end_points[end_point] = net\n if end_point == final_endpoint: return net, end_points\n # 71 x 71 x 192.\n end_point = 'MaxPool_5a_3x3'\n net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)\n end_points[end_point] = net\n if end_point == final_endpoint: return net, end_points\n # 35 x 35 x 192.\n\n # Inception blocks\n with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],\n stride=1, padding='SAME'):\n # mixed: 35 x 35 x 256.\n end_point = 'Mixed_5b'\n with tf.variable_scope(end_point):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1')\n branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],\n scope='Conv2d_0b_5x5')\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')\n branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],\n scope='Conv2d_0b_3x3')\n branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],\n scope='Conv2d_0c_3x3')\n with tf.variable_scope('Branch_3'):\n branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')\n branch_3 = slim.conv2d(branch_3, depth(32), [1, 1],\n scope='Conv2d_0b_1x1')\n net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])\n end_points[end_point] = net\n if end_point == final_endpoint: return net, end_points\n\n # mixed_1: 35 x 35 x 288.\n end_point = 'Mixed_5c'\n with tf.variable_scope(end_point):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0b_1x1')\n branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],\n scope='Conv_1_0c_5x5')\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.conv2d(net, depth(64), [1, 1],\n scope='Conv2d_0a_1x1')\n branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],\n scope='Conv2d_0b_3x3')\n branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],\n scope='Conv2d_0c_3x3')\n with tf.variable_scope('Branch_3'):\n branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')\n branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],\n scope='Conv2d_0b_1x1')\n net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])\n end_points[end_point] = net\n if end_point == final_endpoint: return net, end_points\n\n # mixed_2: 35 x 35 x 288.\n end_point = 'Mixed_5d'\n with tf.variable_scope(end_point):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1')\n branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],\n scope='Conv2d_0b_5x5')\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')\n branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],\n scope='Conv2d_0b_3x3')\n branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],\n scope='Conv2d_0c_3x3')\n with tf.variable_scope('Branch_3'):\n branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')\n branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],\n scope='Conv2d_0b_1x1')\n net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])\n end_points[end_point] = net\n if end_point == final_endpoint: return net, end_points\n\n # mixed_3: 17 x 17 x 768.\n end_point = 'Mixed_6a'\n with tf.variable_scope(end_point):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(net, depth(384), [3, 3], stride=2,\n padding='VALID', scope='Conv2d_1a_1x1')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')\n branch_1 = slim.conv2d(branch_1, depth(96), [3, 3],\n scope='Conv2d_0b_3x3')\n branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], stride=2,\n padding='VALID', scope='Conv2d_1a_1x1')\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',\n scope='MaxPool_1a_3x3')\n net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])\n end_points[end_point] = net\n if end_point == final_endpoint: return net, end_points\n\n # mixed4: 17 x 17 x 768.\n end_point = 'Mixed_6b'\n with tf.variable_scope(end_point):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1')\n branch_1 = slim.conv2d(branch_1, depth(128), [1, 7],\n scope='Conv2d_0b_1x7')\n branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],\n scope='Conv2d_0c_7x1')\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1')\n branch_2 = slim.conv2d(branch_2, depth(128), [7, 1],\n scope='Conv2d_0b_7x1')\n branch_2 = slim.conv2d(branch_2, depth(128), [1, 7],\n scope='Conv2d_0c_1x7')\n branch_2 = slim.conv2d(branch_2, depth(128), [7, 1],\n scope='Conv2d_0d_7x1')\n branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],\n scope='Conv2d_0e_1x7')\n with tf.variable_scope('Branch_3'):\n branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')\n branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],\n scope='Conv2d_0b_1x1')\n net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])\n end_points[end_point] = net\n if end_point == final_endpoint: return net, end_points\n\n # mixed_5: 17 x 17 x 768.\n end_point = 'Mixed_6c'\n with tf.variable_scope(end_point):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')\n branch_1 = slim.conv2d(branch_1, depth(160), [1, 7],\n scope='Conv2d_0b_1x7')\n branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],\n scope='Conv2d_0c_7x1')\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')\n branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],\n scope='Conv2d_0b_7x1')\n branch_2 = slim.conv2d(branch_2, depth(160), [1, 7],\n scope='Conv2d_0c_1x7')\n branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],\n scope='Conv2d_0d_7x1')\n branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],\n scope='Conv2d_0e_1x7')\n with tf.variable_scope('Branch_3'):\n branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')\n branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],\n scope='Conv2d_0b_1x1')\n net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])\n end_points[end_point] = net\n if end_point == final_endpoint: return net, end_points\n # mixed_6: 17 x 17 x 768.\n end_point = 'Mixed_6d'\n with tf.variable_scope(end_point):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')\n branch_1 = slim.conv2d(branch_1, depth(160), [1, 7],\n scope='Conv2d_0b_1x7')\n branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],\n scope='Conv2d_0c_7x1')\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')\n branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],\n scope='Conv2d_0b_7x1')\n branch_2 = slim.conv2d(branch_2, depth(160), [1, 7],\n scope='Conv2d_0c_1x7')\n branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],\n scope='Conv2d_0d_7x1')\n branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],\n scope='Conv2d_0e_1x7')\n with tf.variable_scope('Branch_3'):\n branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')\n branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],\n scope='Conv2d_0b_1x1')\n net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])\n end_points[end_point] = net\n if end_point == final_endpoint: return net, end_points\n\n # mixed_7: 17 x 17 x 768.\n end_point = 'Mixed_6e'\n with tf.variable_scope(end_point):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')\n branch_1 = slim.conv2d(branch_1, depth(192), [1, 7],\n scope='Conv2d_0b_1x7')\n branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],\n scope='Conv2d_0c_7x1')\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')\n branch_2 = slim.conv2d(branch_2, depth(192), [7, 1],\n scope='Conv2d_0b_7x1')\n branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],\n scope='Conv2d_0c_1x7')\n branch_2 = slim.conv2d(branch_2, depth(192), [7, 1],\n scope='Conv2d_0d_7x1')\n branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],\n scope='Conv2d_0e_1x7')\n with tf.variable_scope('Branch_3'):\n branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')\n branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],\n scope='Conv2d_0b_1x1')\n net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])\n end_points[end_point] = net\n if end_point == final_endpoint: return net, end_points\n\n # mixed_8: 8 x 8 x 1280.\n end_point = 'Mixed_7a'\n with tf.variable_scope(end_point):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')\n branch_0 = slim.conv2d(branch_0, depth(320), [3, 3], stride=2,\n padding='VALID', scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')\n branch_1 = slim.conv2d(branch_1, depth(192), [1, 7],\n scope='Conv2d_0b_1x7')\n branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],\n scope='Conv2d_0c_7x1')\n branch_1 = slim.conv2d(branch_1, depth(192), [3, 3], stride=2,\n padding='VALID', scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',\n scope='MaxPool_1a_3x3')\n net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])\n end_points[end_point] = net\n if end_point == final_endpoint: return net, end_points\n # mixed_9: 8 x 8 x 2048.\n end_point = 'Mixed_7b'\n with tf.variable_scope(end_point):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')\n branch_1 = tf.concat(axis=3, values=[\n slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),\n slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0b_3x1')])\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')\n branch_2 = slim.conv2d(\n branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')\n branch_2 = tf.concat(axis=3, values=[\n slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),\n slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')])\n with tf.variable_scope('Branch_3'):\n branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')\n branch_3 = slim.conv2d(\n branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')\n net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])\n end_points[end_point] = net\n if end_point == final_endpoint: return net, end_points\n\n # mixed_10: 8 x 8 x 2048.\n end_point = 'Mixed_7c'\n with tf.variable_scope(end_point):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')\n branch_1 = tf.concat(axis=3, values=[\n slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),\n slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0c_3x1')])\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')\n branch_2 = slim.conv2d(\n branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')\n branch_2 = tf.concat(axis=3, values=[\n slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),\n slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')])\n with tf.variable_scope('Branch_3'):\n branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')\n branch_3 = slim.conv2d(\n branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')\n net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])\n end_points[end_point] = net\n if end_point == final_endpoint: return net, end_points\n raise ValueError('Unknown final endpoint %s' % final_endpoint)\n\n\ndef inception_v3(inputs,\n num_classes=1000,\n is_training=True,\n dropout_keep_prob=0.8,\n min_depth=16,\n depth_multiplier=1.0,\n prediction_fn=slim.softmax,\n spatial_squeeze=True,\n reuse=None,\n create_aux_logits=True,\n scope='InceptionV3',\n global_pool=False):\n \"\"\"Inception model from http://arxiv.org/abs/1512.00567.\n\n \"Rethinking the Inception Architecture for Computer Vision\"\n\n Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,\n Zbigniew Wojna.\n\n With the default arguments this method constructs the exact model defined in\n the paper. However, one can experiment with variations of the inception_v3\n network by changing arguments dropout_keep_prob, min_depth and\n depth_multiplier.\n\n The default image size used to train this network is 299x299.\n\n Args:\n inputs: a tensor of size [batch_size, height, width, channels].\n num_classes: number of predicted classes. If 0 or None, the logits layer\n is omitted and the input features to the logits layer (before dropout)\n are returned instead.\n is_training: whether is training or not.\n dropout_keep_prob: the percentage of activation values that are retained.\n min_depth: Minimum depth value (number of channels) for all convolution ops.\n Enforced when depth_multiplier < 1, and not an active constraint when\n depth_multiplier >= 1.\n depth_multiplier: Float multiplier for the depth (number of channels)\n for all convolution ops. The value must be greater than zero. Typical\n usage will be to set this value in (0, 1) to reduce the number of\n parameters or computation cost of the model.\n prediction_fn: a function to get predictions out of logits.\n spatial_squeeze: if True, logits is of shape [B, C], if false logits is of\n shape [B, 1, 1, C], where B is batch_size and C is number of classes.\n reuse: whether or not the network and its variables should be reused. To be\n able to reuse 'scope' must be given.\n create_aux_logits: Whether to create the auxiliary logits.\n scope: Optional variable_scope.\n global_pool: Optional boolean flag to control the avgpooling before the\n logits layer. If false or unset, pooling is done with a fixed window\n that reduces default-sized inputs to 1x1, while larger inputs lead to\n larger outputs. If true, any input size is pooled down to 1x1.\n\n Returns:\n net: a Tensor with the logits (pre-softmax activations) if num_classes\n is a non-zero integer, or the non-dropped-out input to the logits layer\n if num_classes is 0 or None.\n end_points: a dictionary from components of the network to the corresponding\n activation.\n\n Raises:\n ValueError: if 'depth_multiplier' is less than or equal to zero.\n \"\"\"\n if depth_multiplier <= 0:\n raise ValueError('depth_multiplier is not greater than zero.')\n depth = lambda d: max(int(d * depth_multiplier), min_depth)\n\n with tf.variable_scope(scope, 'InceptionV3', [inputs], reuse=reuse) as scope:\n with slim.arg_scope([slim.batch_norm, slim.dropout],\n is_training=is_training):\n net, end_points = inception_v3_base(\n inputs, scope=scope, min_depth=min_depth,\n depth_multiplier=depth_multiplier)\n\n # Auxiliary Head logits\n if create_aux_logits and num_classes:\n with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],\n stride=1, padding='SAME'):\n aux_logits = end_points['Mixed_6e']\n with tf.variable_scope('AuxLogits'):\n aux_logits = slim.avg_pool2d(\n aux_logits, [5, 5], stride=3, padding='VALID',\n scope='AvgPool_1a_5x5')\n aux_logits = slim.conv2d(aux_logits, depth(128), [1, 1],\n scope='Conv2d_1b_1x1')\n\n # Shape of feature map before the final layer.\n kernel_size = _reduced_kernel_size_for_small_input(\n aux_logits, [5, 5])\n aux_logits = slim.conv2d(\n aux_logits, depth(768), kernel_size,\n weights_initializer=trunc_normal(0.01),\n padding='VALID', scope='Conv2d_2a_{}x{}'.format(*kernel_size))\n aux_logits = slim.conv2d(\n aux_logits, num_classes, [1, 1], activation_fn=None,\n normalizer_fn=None, weights_initializer=trunc_normal(0.001),\n scope='Conv2d_2b_1x1')\n if spatial_squeeze:\n aux_logits = tf.squeeze(aux_logits, [1, 2], name='SpatialSqueeze')\n end_points['AuxLogits'] = aux_logits\n\n # Final pooling and prediction\n with tf.variable_scope('Logits'):\n if global_pool:\n # Global average pooling.\n net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='GlobalPool')\n end_points['global_pool'] = net\n else:\n # Pooling with a fixed kernel size.\n kernel_size = _reduced_kernel_size_for_small_input(net, [8, 8])\n net = slim.avg_pool2d(net, kernel_size, padding='VALID',\n scope='AvgPool_1a_{}x{}'.format(*kernel_size))\n end_points['AvgPool_1a'] = net\n if not num_classes:\n return net, end_points\n # 1 x 1 x 2048\n net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')\n end_points['PreLogits'] = net\n # 2048\n logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,\n normalizer_fn=None, scope='Conv2d_1c_1x1')\n if spatial_squeeze:\n logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')\n # 1000\n end_points['Logits'] = logits\n end_points['Predictions'] = prediction_fn(logits, scope='Predictions')\n return logits, end_points\ninception_v3.default_image_size = 299\n\n\ndef _reduced_kernel_size_for_small_input(input_tensor, kernel_size):\n \"\"\"Define kernel size which is automatically reduced for small input.\n\n If the shape of the input images is unknown at graph construction time this\n function assumes that the input images are is large enough.\n\n Args:\n input_tensor: input tensor of size [batch_size, height, width, channels].\n kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]\n\n Returns:\n a tensor with the kernel size.\n\n TODO(jrru): Make this function work with unknown shapes. Theoretically, this\n can be done with the code below. Problems are two-fold: (1) If the shape was\n known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot\n handle tensors that define the kernel size.\n shape = tf.shape(input_tensor)\n return = tf.stack([tf.minimum(shape[1], kernel_size[0]),\n tf.minimum(shape[2], kernel_size[1])])\n\n \"\"\"\n shape = input_tensor.get_shape().as_list()\n if shape[1] is None or shape[2] is None:\n kernel_size_out = kernel_size\n else:\n kernel_size_out = [min(shape[1], kernel_size[0]),\n min(shape[2], kernel_size[1])]\n return kernel_size_out\n\n\ninception_v3_arg_scope = inception_utils.inception_arg_scope\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for object_detection.utils.config_util.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport tensorflow as tf\nfrom google.protobuf import text_format\nfrom lstm_object_detection.protos import pipeline_pb2 as internal_pipeline_pb2\nfrom lstm_object_detection.utils import config_util\nfrom object_detection.protos import pipeline_pb2\n\n\ndef _write_config(config, config_path):\n \"\"\"Writes a config object to disk.\"\"\"\n config_text = text_format.MessageToString(config)\n with tf.gfile.Open(config_path, \"wb\") as f:\n f.write(config_text)\n\n\nclass ConfigUtilTest(tf.test.TestCase):\n\n def test_get_configs_from_pipeline_file(self):\n \"\"\"Test that proto configs can be read from pipeline config file.\"\"\"\n pipeline_config_path = os.path.join(self.get_temp_dir(), \"pipeline.config\")\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\n pipeline_config.model.ssd.num_classes = 10\n pipeline_config.train_config.batch_size = 32\n pipeline_config.train_input_reader.label_map_path = \"path/to/label_map\"\n pipeline_config.eval_config.num_examples = 20\n pipeline_config.eval_input_reader.add().queue_capacity = 100\n\n pipeline_config.Extensions[\n internal_pipeline_pb2.lstm_model].train_unroll_length = 5\n pipeline_config.Extensions[\n internal_pipeline_pb2.lstm_model].eval_unroll_length = 10\n\n _write_config(pipeline_config, pipeline_config_path)\n\n configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)\n self.assertProtoEquals(pipeline_config.model, configs[\"model\"])\n self.assertProtoEquals(pipeline_config.train_config,\n configs[\"train_config\"])\n self.assertProtoEquals(pipeline_config.train_input_reader,\n configs[\"train_input_config\"])\n self.assertProtoEquals(pipeline_config.eval_config, configs[\"eval_config\"])\n self.assertProtoEquals(pipeline_config.eval_input_reader,\n configs[\"eval_input_configs\"])\n self.assertProtoEquals(\n pipeline_config.Extensions[internal_pipeline_pb2.lstm_model],\n configs[\"lstm_model\"])\n\n def test_create_pipeline_proto_from_configs(self):\n \"\"\"Tests that proto can be reconstructed from configs dictionary.\"\"\"\n pipeline_config_path = os.path.join(self.get_temp_dir(), \"pipeline.config\")\n\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\n pipeline_config.model.ssd.num_classes = 10\n pipeline_config.train_config.batch_size = 32\n pipeline_config.train_input_reader.label_map_path = \"path/to/label_map\"\n pipeline_config.eval_config.num_examples = 20\n pipeline_config.eval_input_reader.add().queue_capacity = 100\n\n pipeline_config.Extensions[\n internal_pipeline_pb2.lstm_model].train_unroll_length = 5\n pipeline_config.Extensions[\n internal_pipeline_pb2.lstm_model].eval_unroll_length = 10\n _write_config(pipeline_config, pipeline_config_path)\n\n configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)\n pipeline_config_reconstructed = (\n config_util.create_pipeline_proto_from_configs(configs))\n self.assertEqual(pipeline_config, pipeline_config_reconstructed)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Reusable model classes for FIVO.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sonnet as snt\nimport tensorflow as tf\nfrom fivo import nested_utils as nested\n\ntfd = tf.contrib.distributions\n\n\nclass ELBOTrainableSequenceModel(object):\n \"\"\"An abstract class for ELBO-trainable sequence models to extend.\n\n Because the ELBO, IWAE, and FIVO bounds all accept the same arguments,\n any model that is ELBO-trainable is also IWAE- and FIVO-trainable.\n \"\"\"\n\n def zero_state(self, batch_size, dtype):\n \"\"\"Returns the initial state of the model as a Tensor or tuple of Tensors.\n\n Args:\n batch_size: The batch size.\n dtype: The datatype to use for the state.\n \"\"\"\n raise NotImplementedError(\"zero_state not yet implemented.\")\n\n def set_observations(self, observations, seq_lengths):\n \"\"\"Sets the observations for the model.\n\n This method provides the model with all observed variables including both\n inputs and targets. It will be called before running any computations with\n the model that require the observations, e.g. training the model or\n computing bounds, and should be used to run any necessary preprocessing\n steps.\n\n Args:\n observations: A potentially nested set of Tensors containing\n all observations for the model, both inputs and targets. Typically\n a set of Tensors with shape [max_seq_len, batch_size, data_size].\n seq_lengths: A [batch_size] Tensor of ints encoding the length of each\n sequence in the batch (sequences can be padded to a common length).\n \"\"\"\n self.observations = observations\n self.max_seq_len = tf.reduce_max(seq_lengths)\n self.observations_ta = nested.tas_for_tensors(\n observations, self.max_seq_len, clear_after_read=False)\n self.seq_lengths = seq_lengths\n\n def propose_and_weight(self, state, t):\n \"\"\"Propogates model state one timestep and computes log weights.\n\n This method accepts the current state of the model and computes the state\n for the next timestep as well as the incremental log weight of each\n element in the batch.\n\n Args:\n state: The current state of the model.\n t: A scalar integer Tensor representing the current timestep.\n Returns:\n next_state: The state of the model after one timestep.\n log_weights: A [batch_size] Tensor containing the incremental log weights.\n \"\"\"\n raise NotImplementedError(\"propose_and_weight not yet implemented.\")\n\nDEFAULT_INITIALIZERS = {\"w\": tf.contrib.layers.xavier_initializer(),\n \"b\": tf.zeros_initializer()}\n\n\nclass ConditionalNormalDistribution(object):\n \"\"\"A Normal distribution conditioned on Tensor inputs via a fc network.\"\"\"\n\n def __init__(self, size, hidden_layer_sizes, sigma_min=0.0,\n raw_sigma_bias=0.25, hidden_activation_fn=tf.nn.relu,\n initializers=None, name=\"conditional_normal_distribution\"):\n \"\"\"Creates a conditional Normal distribution.\n\n Args:\n size: The dimension of the random variable.\n hidden_layer_sizes: The sizes of the hidden layers of the fully connected\n network used to condition the distribution on the inputs.\n sigma_min: The minimum standard deviation allowed, a scalar.\n raw_sigma_bias: A scalar that is added to the raw standard deviation\n output from the fully connected network. Set to 0.25 by default to\n prevent standard deviations close to 0.\n hidden_activation_fn: The activation function to use on the hidden layers\n of the fully connected network.\n initializers: The variable intitializers to use for the fully connected\n network. The network is implemented using snt.nets.MLP so it must\n be a dictionary mapping the keys 'w' and 'b' to the initializers for\n the weights and biases. Defaults to xavier for the weights and zeros\n for the biases when initializers is None.\n name: The name of this distribution, used for sonnet scoping.\n \"\"\"\n self.sigma_min = sigma_min\n self.raw_sigma_bias = raw_sigma_bias\n self.name = name\n self.size = size\n if initializers is None:\n initializers = DEFAULT_INITIALIZERS\n self.fcnet = snt.nets.MLP(\n output_sizes=hidden_layer_sizes + [2*size],\n activation=hidden_activation_fn,\n initializers=initializers,\n activate_final=False,\n use_bias=True,\n name=name + \"_fcnet\")\n\n def condition(self, tensor_list, **unused_kwargs):\n \"\"\"Computes the parameters of a normal distribution based on the inputs.\"\"\"\n inputs = tf.concat(tensor_list, axis=1)\n outs = self.fcnet(inputs)\n mu, sigma = tf.split(outs, 2, axis=1)\n sigma = tf.maximum(tf.nn.softplus(sigma + self.raw_sigma_bias),\n self.sigma_min)\n return mu, sigma\n\n def __call__(self, *args, **kwargs):\n \"\"\"Creates a normal distribution conditioned on the inputs.\"\"\"\n mu, sigma = self.condition(args, **kwargs)\n return tf.contrib.distributions.Normal(loc=mu, scale=sigma)\n\n\nclass ConditionalBernoulliDistribution(object):\n \"\"\"A Bernoulli distribution conditioned on Tensor inputs via a fc net.\"\"\"\n\n def __init__(self, size, hidden_layer_sizes, hidden_activation_fn=tf.nn.relu,\n initializers=None, bias_init=0.0,\n name=\"conditional_bernoulli_distribution\"):\n \"\"\"Creates a conditional Bernoulli distribution.\n\n Args:\n size: The dimension of the random variable.\n hidden_layer_sizes: The sizes of the hidden layers of the fully connected\n network used to condition the distribution on the inputs.\n hidden_activation_fn: The activation function to use on the hidden layers\n of the fully connected network.\n initializers: The variable intiializers to use for the fully connected\n network. The network is implemented using snt.nets.MLP so it must\n be a dictionary mapping the keys 'w' and 'b' to the initializers for\n the weights and biases. Defaults to xavier for the weights and zeros\n for the biases when initializers is None.\n bias_init: A scalar or vector Tensor that is added to the output of the\n fully-connected network that parameterizes the mean of this\n distribution.\n name: The name of this distribution, used for sonnet scoping.\n \"\"\"\n self.bias_init = bias_init\n self.size = size\n if initializers is None:\n initializers = DEFAULT_INITIALIZERS\n self.fcnet = snt.nets.MLP(\n output_sizes=hidden_layer_sizes + [size],\n activation=hidden_activation_fn,\n initializers=initializers,\n activate_final=False,\n use_bias=True,\n name=name + \"_fcnet\")\n\n def condition(self, tensor_list):\n \"\"\"Computes the p parameter of the Bernoulli distribution.\"\"\"\n inputs = tf.concat(tensor_list, axis=1)\n return self.fcnet(inputs) + self.bias_init\n\n def __call__(self, *args):\n p = self.condition(args)\n return tf.contrib.distributions.Bernoulli(logits=p)\n\n\nclass NormalApproximatePosterior(ConditionalNormalDistribution):\n \"\"\"A Normally-distributed approx. posterior with res_q parameterization.\"\"\"\n\n def __init__(self, size, hidden_layer_sizes, sigma_min=0.0,\n raw_sigma_bias=0.25, hidden_activation_fn=tf.nn.relu,\n initializers=None, smoothing=False,\n name=\"conditional_normal_distribution\"):\n super(NormalApproximatePosterior, self).__init__(\n size, hidden_layer_sizes, sigma_min=sigma_min,\n raw_sigma_bias=raw_sigma_bias,\n hidden_activation_fn=hidden_activation_fn, initializers=initializers,\n name=name)\n self.smoothing = smoothing\n\n def condition(self, tensor_list, prior_mu, smoothing_tensors=None):\n \"\"\"Generates the mean and variance of the normal distribution.\n\n Args:\n tensor_list: The list of Tensors to condition on. Will be concatenated and\n fed through a fully connected network.\n prior_mu: The mean of the prior distribution associated with this\n approximate posterior. Will be added to the mean produced by\n this approximate posterior, in res_q fashion.\n smoothing_tensors: A list of Tensors. If smoothing is True, these Tensors\n will be concatenated with the tensors in tensor_list.\n Returns:\n mu: The mean of the approximate posterior.\n sigma: The standard deviation of the approximate posterior.\n \"\"\"\n if self.smoothing:\n tensor_list.extend(smoothing_tensors)\n mu, sigma = super(NormalApproximatePosterior, self).condition(tensor_list)\n return mu + prior_mu, sigma\n\n\nclass NonstationaryLinearDistribution(object):\n \"\"\"A set of loc-scale distributions that are linear functions of inputs.\n\n This class defines a series of location-scale distributions such that\n the means are learnable linear functions of the inputs and the log variances\n are learnable constants. The functions and log variances are different across\n timesteps, allowing the distributions to be nonstationary.\n \"\"\"\n\n def __init__(self,\n num_timesteps,\n inputs_per_timestep=None,\n outputs_per_timestep=None,\n initializers=None,\n variance_min=0.0,\n output_distribution=tfd.Normal,\n dtype=tf.float32):\n \"\"\"Creates a NonstationaryLinearDistribution.\n\n Args:\n num_timesteps: The number of timesteps, i.e. the number of distributions.\n inputs_per_timestep: A list of python ints, the dimension of inputs to the\n linear function at each timestep. If not provided, the dimension at each\n timestep is assumed to be 1.\n outputs_per_timestep: A list of python ints, the dimension of the output\n distribution at each timestep. If not provided, the dimension at each\n timestep is assumed to be 1.\n initializers: A dictionary containing intializers for the variables. The\n initializer under the key 'w' is used for the weights in the linear\n function and the initializer under the key 'b' is used for the biases.\n Defaults to xavier initialization for the weights and zeros for the\n biases.\n variance_min: Python float, the minimum variance of each distribution.\n output_distribution: A locatin-scale subclass of tfd.Distribution that\n defines the output distribution, e.g. Normal.\n dtype: The dtype of the weights and biases.\n \"\"\"\n if not initializers:\n initializers = DEFAULT_INITIALIZERS\n if not inputs_per_timestep:\n inputs_per_timestep = [1] * num_timesteps\n if not outputs_per_timestep:\n outputs_per_timestep = [1] * num_timesteps\n self.num_timesteps = num_timesteps\n self.variance_min = variance_min\n self.initializers = initializers\n self.dtype = dtype\n self.output_distribution = output_distribution\n\n def _get_variables_ta(shapes, name, initializer, trainable=True):\n \"\"\"Creates a sequence of variables and stores them in a TensorArray.\"\"\"\n # Infer shape if all shapes are equal.\n first_shape = shapes[0]\n infer_shape = all(shape == first_shape for shape in shapes)\n ta = tf.TensorArray(\n dtype=dtype, size=len(shapes), dynamic_size=False,\n clear_after_read=False, infer_shape=infer_shape)\n for t, shape in enumerate(shapes):\n var = tf.get_variable(\n name % t, shape=shape, initializer=initializer, trainable=trainable)\n ta = ta.write(t, var)\n return ta\n\n bias_shapes = [[num_outputs] for num_outputs in outputs_per_timestep]\n self.log_variances = _get_variables_ta(\n bias_shapes, \"proposal_log_variance_%d\", initializers[\"b\"])\n self.mean_biases = _get_variables_ta(\n bias_shapes, \"proposal_b_%d\", initializers[\"b\"])\n weight_shapes = zip(inputs_per_timestep, outputs_per_timestep)\n self.mean_weights = _get_variables_ta(\n weight_shapes, \"proposal_w_%d\", initializers[\"w\"])\n self.shapes = tf.TensorArray(\n dtype=tf.int32, size=num_timesteps,\n dynamic_size=False, clear_after_read=False).unstack(weight_shapes)\n\n def __call__(self, t, inputs):\n \"\"\"Computes the distribution at timestep t.\n\n Args:\n t: Scalar integer Tensor, the current timestep. Must be in\n [0, num_timesteps).\n inputs: The inputs to the linear function parameterizing the mean of\n the current distribution. A Tensor of shape [batch_size, num_inputs_t].\n Returns:\n A tfd.Distribution subclass representing the distribution at timestep t.\n \"\"\"\n b = self.mean_biases.read(t)\n w = self.mean_weights.read(t)\n shape = self.shapes.read(t)\n w = tf.reshape(w, shape)\n b = tf.reshape(b, [shape[1], 1])\n log_variance = self.log_variances.read(t)\n scale = tf.sqrt(tf.maximum(tf.exp(log_variance), self.variance_min))\n loc = tf.matmul(w, inputs, transpose_a=True) + b\n return self.output_distribution(loc=loc, scale=scale)\n\n\ndef encode_all(inputs, encoder):\n \"\"\"Encodes a timeseries of inputs with a time independent encoder.\n\n Args:\n inputs: A [time, batch, feature_dimensions] tensor.\n encoder: A network that takes a [batch, features_dimensions] input and\n encodes the input.\n Returns:\n A [time, batch, encoded_feature_dimensions] output tensor.\n \"\"\"\n input_shape = tf.shape(inputs)\n num_timesteps, batch_size = input_shape[0], input_shape[1]\n reshaped_inputs = tf.reshape(inputs, [-1, inputs.shape[-1]])\n inputs_encoded = encoder(reshaped_inputs)\n inputs_encoded = tf.reshape(inputs_encoded,\n [num_timesteps, batch_size, encoder.output_size])\n return inputs_encoded\n\n\ndef ta_for_tensor(x, **kwargs):\n \"\"\"Creates a TensorArray for the input tensor.\"\"\"\n return tf.TensorArray(\n x.dtype, tf.shape(x)[0], dynamic_size=False, **kwargs).unstack(x)\n",
"# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport gin.tf\nimport tensorflow as tf\nfrom environments.ant_maze_env import AntMazeEnv\nfrom environments.point_maze_env import PointMazeEnv\nfrom tf_agents.environments import gym_wrapper\nfrom tf_agents.environments import tf_py_environment\n\n\[email protected]\ndef create_maze_env(env_name=None, top_down_view=False):\n n_bins = 0\n manual_collision = False\n if env_name.startswith('Ego'):\n n_bins = 8\n env_name = env_name[3:]\n if env_name.startswith('Ant'):\n cls = AntMazeEnv\n env_name = env_name[3:]\n maze_size_scaling = 8\n elif env_name.startswith('Point'):\n cls = PointMazeEnv\n manual_collision = True\n env_name = env_name[5:]\n maze_size_scaling = 4\n else:\n assert False, 'unknown env %s' % env_name\n\n maze_id = None\n observe_blocks = False\n put_spin_near_agent = False\n if env_name == 'Maze':\n maze_id = 'Maze'\n elif env_name == 'Push':\n maze_id = 'Push'\n elif env_name == 'Fall':\n maze_id = 'Fall'\n elif env_name == 'Block':\n maze_id = 'Block'\n put_spin_near_agent = True\n observe_blocks = True\n elif env_name == 'BlockMaze':\n maze_id = 'BlockMaze'\n put_spin_near_agent = True\n observe_blocks = True\n else:\n raise ValueError('Unknown maze environment %s' % env_name)\n\n gym_mujoco_kwargs = {\n 'maze_id': maze_id,\n 'n_bins': n_bins,\n 'observe_blocks': observe_blocks,\n 'put_spin_near_agent': put_spin_near_agent,\n 'top_down_view': top_down_view,\n 'manual_collision': manual_collision,\n 'maze_size_scaling': maze_size_scaling\n }\n gym_env = cls(**gym_mujoco_kwargs)\n gym_env.reset()\n wrapped_env = gym_wrapper.GymWrapper(gym_env)\n return wrapped_env\n\n\nclass TFPyEnvironment(tf_py_environment.TFPyEnvironment):\n\n def __init__(self, *args, **kwargs):\n super(TFPyEnvironment, self).__init__(*args, **kwargs)\n\n def start_collect(self):\n pass\n\n def current_obs(self):\n time_step = self.current_time_step()\n return time_step.observation[0] # For some reason, there is an extra dim.\n\n def step(self, actions):\n actions = tf.expand_dims(actions, 0)\n next_step = super(TFPyEnvironment, self).step(actions)\n return next_step.is_last()[0], next_step.reward[0], next_step.discount[0]\n\n def reset(self):\n return super(TFPyEnvironment, self).reset()\n"
] | [
[
"tensorflow.Graph",
"tensorflow.Variable",
"tensorflow.reduce_mean",
"tensorflow.sigmoid",
"tensorflow.placeholder",
"numpy.sin",
"numpy.concatenate",
"tensorflow.global_variables_initializer",
"numpy.random.normal",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.Session",
"tensorflow.train.AdamOptimizer",
"tensorflow.variable_scope",
"numpy.random.uniform",
"numpy.array",
"tensorflow.summary.scalar",
"tensorflow.app.run"
],
[
"tensorflow.data.Dataset.from_tensors",
"tensorflow.compat.v1.enable_v2_behavior",
"tensorflow.test.main",
"tensorflow.train.get_or_create_global_step",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.contrib.eager.num_gpus",
"tensorflow.contrib.eager.defun",
"tensorflow.random_uniform",
"tensorflow.random_normal"
],
[
"tensorflow.layers.conv1d",
"tensorflow.metrics.accuracy",
"tensorflow.FixedLenFeature",
"tensorflow.gfile.GFile",
"tensorflow.reduce_sum",
"tensorflow.layers.dropout",
"tensorflow.estimator.RunConfig",
"tensorflow.estimator.train_and_evaluate",
"tensorflow.layers.batch_normalization",
"tensorflow.layers.dense",
"tensorflow.squeeze",
"tensorflow.train.get_global_step",
"tensorflow.parse_single_example",
"tensorflow.argmax",
"tensorflow.app.run",
"tensorflow.contrib.cudnn_rnn.CudnnLSTM",
"tensorflow.estimator.Estimator",
"tensorflow.shape",
"tensorflow.sparse_tensor_to_dense",
"tensorflow.zeros_like",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.VarLenFeature",
"tensorflow.contrib.rnn.stack_bidirectional_dynamic_rnn",
"tensorflow.transpose",
"tensorflow.slice",
"tensorflow.contrib.rnn.DropoutWrapper",
"tensorflow.reshape",
"tensorflow.data.TFRecordDataset.list_files"
],
[
"tensorflow.Graph",
"tensorflow.logging.info",
"tensorflow.gfile.FastGFile",
"tensorflow.app.run"
],
[
"tensorflow.constant",
"tensorflow.local_variables_initializer",
"numpy.asarray",
"numpy.arange",
"tensorflow.test.main",
"tensorflow.contrib.slim.queues.QueueRunners"
],
[
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.train.AdamOptimizer",
"sklearn.preprocessing.MinMaxScaler"
],
[
"tensorflow.zeros",
"tensorflow.stack",
"numpy.max",
"numpy.zeros_like",
"tensorflow.summary.scalar",
"tensorflow.Graph",
"tensorflow.Variable",
"numpy.stack",
"tensorflow.Session",
"tensorflow.square",
"numpy.load",
"tensorflow.norm",
"tensorflow.fill",
"numpy.min",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.zeros_like",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.summary.merge_all",
"tensorflow.summary.FileWriter",
"numpy.maximum",
"tensorflow.maximum",
"numpy.linalg.norm"
],
[
"tensorflow.device",
"tensorflow.Graph",
"tensorflow.get_collection",
"tensorflow.global_variables",
"tensorflow.test.main",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.variable_scope",
"tensorflow.argmax",
"tensorflow.random_uniform"
],
[
"tensorflow.flags.DEFINE_boolean",
"tensorflow.flags.DEFINE_string",
"matplotlib.use",
"matplotlib.pyplot.subplots",
"numpy.concatenate",
"matplotlib.pyplot.get_current_fig_manager",
"tensorflow.logging.info",
"tensorflow.logging.set_verbosity",
"matplotlib.animation.FuncAnimation",
"matplotlib.pyplot.show",
"numpy.zeros",
"tensorflow.flags.DEFINE_integer",
"tensorflow.app.run"
],
[
"tensorflow.constant",
"tensorflow.summary.image",
"tensorflow.contrib.training.SummaryAtEndHook",
"tensorflow.name_scope",
"tensorflow.variable_scope",
"tensorflow.contrib.training.StopAfterNEvalsHook",
"tensorflow.random_normal"
],
[
"tensorflow.Graph",
"tensorflow.global_variables",
"tensorflow.test.main",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.reset_default_graph",
"numpy.random.uniform",
"tensorflow.argmax",
"tensorflow.random_uniform"
],
[
"tensorflow.clip_by_value",
"tensorflow.assign_add",
"tensorflow.control_dependencies",
"tensorflow.zeros",
"tensorflow.assign",
"tensorflow.identity",
"tensorflow.expand_dims",
"tensorflow.equal",
"tensorflow.zeros_like",
"tensorflow.logging.info",
"tensorflow.name_scope",
"tensorflow.to_float",
"numpy.array",
"tensorflow.py_func"
],
[
"tensorflow.train.latest_checkpoint",
"tensorflow.flags.DEFINE_string",
"tensorflow.gfile.GFile",
"numpy.save",
"tensorflow.train.NewCheckpointReader",
"tensorflow.logging.info",
"tensorflow.logging.set_verbosity",
"tensorflow.gfile.MakeDirs",
"tensorflow.gfile.IsDirectory",
"tensorflow.app.run"
],
[
"tensorflow.app.flags.DEFINE_string",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.gfile.FastGFile",
"tensorflow.app.run"
],
[
"tensorflow.python.platform.googletest.GetTempDir",
"tensorflow.python.eager.context.num_gpus",
"tensorflow.compat.v1.enable_eager_execution",
"tensorflow.python.platform.googletest.main"
],
[
"tensorflow.gfile.Open",
"tensorflow.gfile.Exists",
"tensorflow.decode_raw",
"tensorflow.cast",
"tensorflow.reshape",
"numpy.dtype",
"tensorflow.data.FixedLengthRecordDataset",
"tensorflow.data.Dataset.zip",
"tensorflow.gfile.MakeDirs"
],
[
"tensorflow.python.platform.tf_logging.set_verbosity",
"tensorflow.Graph",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.python.platform.gfile.MakeDirs",
"tensorflow.global_variables_initializer",
"tensorflow.logging.info",
"tensorflow.Session",
"tensorflow.python.platform.gfile.IsDirectory"
],
[
"tensorflow.concat",
"numpy.sqrt",
"tensorflow.control_dependencies",
"tensorflow.stack",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.train.AdamOptimizer",
"tensorflow.summary.scalar",
"tensorflow.get_collection",
"tensorflow.train.get_or_create_global_step",
"tensorflow.square",
"tensorflow.train.Saver",
"tensorflow.as_string",
"tensorflow.gfile.ListDirectory",
"tensorflow.train.StopAtStepHook",
"tensorflow.global_variables_initializer",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.variable_scope",
"tensorflow.random_normal"
],
[
"tensorflow.concat",
"tensorflow.reduce_mean",
"tensorflow.squeeze",
"tensorflow.truncated_normal_initializer",
"tensorflow.variable_scope"
],
[
"tensorflow.test.main",
"tensorflow.gfile.Open"
],
[
"tensorflow.reduce_max",
"tensorflow.matmul",
"tensorflow.concat",
"tensorflow.get_variable",
"tensorflow.shape",
"tensorflow.contrib.distributions.Bernoulli",
"tensorflow.TensorArray",
"tensorflow.zeros_initializer",
"tensorflow.reshape",
"tensorflow.exp",
"tensorflow.contrib.distributions.Normal",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.split",
"tensorflow.nn.softplus"
],
[
"tensorflow.expand_dims"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
samuelru/session-knn-ae | [
"c6232667dbe57f82391d487875b52f651ca08a21"
] | [
"ipython/3_Training_Predicting/prnn_cb12_train_predict.py"
] | [
"from keras.layers import Input, Dense, concatenate\nfrom keras.layers.recurrent import GRU\nfrom keras.utils import plot_model\nfrom keras.models import Model, load_model\nfrom keras.callbacks import ModelCheckpoint\nimport keras\nimport pandas as pd\nimport numpy as np\nimport keras.backend as K\nfrom keras.utils import to_categorical\nfrom keras.losses import categorical_crossentropy\nfrom multiprocessing import Pool, cpu_count\nimport pickle\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\nimport os\n\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\n\n\n\ndataset = \"cb12/\"\npath = \"../../data/\"\ninterim_path = path + dataset + \"interim/\"\nprocessed_path = path + dataset + \"processed/\"\nmodel_path = \"models/\"\nmodel_path_valid = \"models/valid/\"\n\n\n\ndef TOP1(y_true, y_pred):\n y1 = y_pred * y_true\n y2 = K.sum(y1, axis=1)[:, np.newaxis]\n y3 = y_true - y1\n return (K.sum(K.sigmoid(y_pred - y2)) + y3 * y3) / tf.cast(tf.shape(y_true)[0], tf.float32)\n\nloss = TOP1\n\ndef create_prnn_model(left_input_size, right_input_size, batch_size = 512, hidden_units = 100, o_activation='softmax', lr = 0.001): \n emb_size = 50\n size = emb_size\n\n # left input - item vector\n input_left = Input(batch_shape=(batch_size, 1, left_input_size), name='input_left')\n gru_left, gru_left_states = GRU(hidden_units, stateful=True, return_state=True, name='gru_left')(input_left)\n\n # right input - feature vector\n input_right = Input(batch_shape=(batch_size, 1, right_input_size), name='input_right')\n gru_right, gru_right_states = GRU(hidden_units, stateful=True, return_state=True, name='gru_right')(input_right)\n \n # merging both layers and creating the model\n merged = concatenate([gru_left, gru_right])\n #change softmax per another activation funciton?\n output = Dense(left_input_size, activation=o_activation, name='output')(merged)\n model = Model(inputs=[input_left, input_right], outputs=output, name='gru4rec')\n \n encoder = Model(inputs=[input_left, input_right], outputs=merged)\n\n # define model's optimizer\n #optimizer = optim.Optimizer(optimizer=self.optimizer, lr=self.lr)\n #opt = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)\n opt = keras.optimizers.Adagrad(lr=lr)\n \n # define model's loss function --> implement here the top1 loss function\n# loss_function = loss.LossFunction(loss_type=self.loss_function)\n #model.compile(loss=loss_function, optimizer=opt, metrics=['accuracy'])\n \n model.compile(loss=loss, optimizer=opt, metrics=['accuracy'])\n filepath = model_path_valid + 'prnn_cb12_checkpoint.h5'\n checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=2, save_best_only=True, mode='min')\n callbacks_list = []\n model.summary()\n #plot_model(model, show_shapes=True, to_file='rnn-structure.png')\n return model, encoder\n\ndef get_states(model):\n #return the actual states of the layers\n return [K.get_value(s) for s,_ in model.state_updates]\n\n\ndef freeze_layer(model, layer_name, lr):\n if layer_name == 'gru_left':\n # gru left layer will not be trained this mini batch\n model.get_layer(layer_name).trainable = False\n # but gru right will\n model.get_layer('gru_right').trainable = True\n elif layer_name == 'gru_right':\n # gru right layer will not be trained this mini batch\n model.get_layer(layer_name).trainable = False\n # but gru left will\n model.get_layer('gru_left').trainable = True\n else:\n raise NotImplementedError\n \n # opt = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)\n opt = keras.optimizers.Adagrad(lr=lr)\n model.compile(loss=loss, optimizer=opt, metrics=['accuracy'])\n return model\n\n\nclass SessionDataset:\n \"\"\"Credit to yhs-968/pyGRU4REC.\"\"\" \n def __init__(self, data, sep='\\t', session_key='session_id', item_key='item_id', time_key='created_at', n_samples=-1, itemmap=None, time_sort=False):\n \"\"\"\n Args:\n path: path of the csv file\n sep: separator for the csv\n session_key, item_key, time_key: name of the fields corresponding to the sessions, items, time\n n_samples: the number of samples to use. If -1, use the whole dataset.\n itemmap: mapping between item IDs and item indices\n time_sort: whether to sort the sessions by time or not\n \"\"\"\n self.df = data\n self.session_key = session_key\n self.item_key = item_key\n self.time_key = time_key\n self.time_sort = time_sort\n self.add_item_indices(itemmap=itemmap)\n self.df.sort_values([session_key, time_key], inplace=True)\n\n # Sort the df by time, and then by session ID. That is, df is sorted by session ID and\n # clicks within a session are next to each other, where the clicks within a session are time-ordered.\n\n self.click_offsets = self.get_click_offsets() \n #array of the positions where there is a change of session. \n #len = len(session_idx_arr) + 1\n \n self.session_idx_arr = self.order_session_idx() \n #array of sessions [0 1 2 3 4 .... n-1]\n \n def get_click_offsets(self):\n \"\"\"\n Return the offsets of the beginning clicks of each session IDs,\n where the offset is calculated against the first click of the first session ID.\n \"\"\"\n offsets = np.zeros(self.df[self.session_key].nunique() + 1, dtype=np.int32)\n # group & sort the df by session_key and get the offset values\n offsets[1:] = self.df.groupby(self.session_key).size().cumsum()\n return offsets\n\n def order_session_idx(self):\n \"\"\" Order the session indices \"\"\"\n if self.time_sort:\n # starting time for each sessions, sorted by session IDs\n sessions_start_time = self.df.groupby(self.session_key)[self.time_key].min().values\n # order the session indices by session starting times\n session_idx_arr = np.argsort(sessions_start_time)\n else:\n session_idx_arr = np.arange(self.df[self.session_key].nunique())\n return session_idx_arr\n \n def add_item_indices(self, itemmap=None):\n \"\"\" \n Add item index column named \"item_idx\" to the df\n Args:\n itemmap (pd.DataFrame): mapping between the item Ids and indices\n \"\"\"\n if itemmap is None:\n item_ids = self.df[self.item_key].unique() # unique item ids\n item2idx = pd.Series(data=np.arange(len(item_ids)),\n index=item_ids)\n itemmap = pd.DataFrame({self.item_key:item_ids,\n 'item_idx':item2idx[item_ids].values})\n \n self.itemmap = itemmap\n self.df = pd.merge(self.df, self.itemmap, on=self.item_key, how='inner')\n \n @property \n def items(self):\n return self.itemmap.item_id.unique()\n\n\n\n\nclass SessionDataLoader:\n \"\"\"Credit to yhs-968/pyGRU4REC.\"\"\" \n def __init__(self, dataset, batch_size):\n \"\"\"\n A class for creating session-parallel mini-batches.\n Args:\n dataset (SessionDataset): the session dataset to generate the batches from\n batch_size (int): size of the batch\n \"\"\"\n self.dataset = dataset\n self.batch_size = batch_size\n self.done_sessions_counter = 0\n \n def __iter__(self):\n \"\"\" Returns the iterator for producing session-parallel training mini-batches.\n Yields:\n input (B,): Item indices that will be encoded as one-hot vectors later.\n target (B,): a Variable that stores the target item indices\n masks: Numpy array indicating the positions of the sessions to be terminated\n \"\"\"\n\n df = self.dataset.df\n \n session_key='session_id'\n item_key='item_id'\n time_key='created_at'\n self.n_items = df[item_key].nunique()\n click_offsets = self.dataset.click_offsets\n #print(click_offsets)\n session_idx_arr = self.dataset.session_idx_arr\n #print(session_idx_arr)\n \n iters = np.arange(self.batch_size)\n #iters = np.arange(1)\n\n maxiter = iters.max()\n \n start = click_offsets[session_idx_arr[iters]]\n end = click_offsets[session_idx_arr[iters] + 1]\n #print(start)\n #print(end)\n mask = [] # indicator for the sessions to be terminated\n finished = False \n\n while not finished:\n #minimum lenght of all the sessions\n minlen = (end - start).min()\n # Item indices (for embedding) for clicks where the first sessions start\n idx_target = df.item_idx.values[start]\n for i in range(minlen - 1):\n # Build inputs & targets\n idx_input = idx_target\n idx_target = df.item_idx.values[start + i + 1]\n inp = idx_input\n target = idx_target\n yield inp, target, mask\n \n # click indices where a particular session meets second-to-last element\n start = start + (minlen - 1)\n # see if how many sessions should terminate\n mask = np.arange(len(iters))[(end - start) <= 1]\n self.done_sessions_counter = len(mask)\n for idx in mask:\n maxiter += 1\n if maxiter >= len(click_offsets) - 1:\n finished = True\n break\n # update the next starting/ending point\n iters[idx] = maxiter\n start[idx] = click_offsets[session_idx_arr[maxiter]]\n end[idx] = click_offsets[session_idx_arr[maxiter] + 1]\n \n\n\n\ndef train_prnn(model, lr, loader, layer_freezing_enabled = False, num_epochs = 10):\n for epoch in range(0, num_epochs):\n print(\"Epoch: \" + str(epoch+1))\n epoch_loss = 0 \n\n i = 0\n for feat, target, mask in loader:\n #feat = np array size BATCH_SIZE with the item indexes of the first items of the first BATCH_SIZE sessions\n #comvert feat to an array size (BATCH_SIZE, 26723) of one hot encoding the indes with loader.n_items\n\n input_oh = to_categorical(feat, num_classes=loader.n_items)\n #convert from shape (BATCH_SIZE, 26723) to (BATCH_SIZE, 1, 26723)\n input_oh = np.expand_dims(input_oh, axis=1) \n\n # with the argmax function you get back again the feat/target np array (arg_input = feat)\n ### arg_input = np.argmax(to_categorical(feat, num_classes=loader.n_items), axis=1)\n ### arg_output = np.argmax(to_categorical(target, num_classes=loader.n_items), axis=1)\n input_feature = np.array([])\n\n for line in feat:\n #result = int(mapitem[(mapitem.item_idx == line)].item_id.values)\n result = str(mapitem[(mapitem.item_idx == line)].item_id.values[0])\n #print(result)\n \n # use empty feature vec if missing\n feature_vector = empty_feature_vec\n if result in item_encodings.keys():\n feature_vector = item_encodings[result]\n \n input_feature = np.append(input_feature, feature_vector)\n\n input_feature = input_feature.reshape(batch_size, 1, feature_size)\n\n #target = np array size BATCH_SIZE with the item indexes of the TARGET items of the feat array items\n target_oh = to_categorical(target, num_classes=loader.n_items)\n\n #calculate the loss between the input and the expected output\n\n if layer_freezing_enabled:\n if i % 2 is 0:\n model = freeze_layer(model, 'gru_left', lr = lr)\n else:\n model = freeze_layer(model, 'gru_right', lr = lr)\n\n tr_loss = model.train_on_batch([input_oh, input_feature], target_oh)\n epoch_loss += tr_loss[0]\n\n i = i + 1\n print(\"Epoch loss: \" + str(epoch_loss))\n return model\n\n\n\n# # Set data for final training\n\n# set data\n\ntrain_path = '../../data/' + dataset + 'processed/train_14d.csv'\ntrain = pd.read_csv(train_path, sep='\\t')[['session_id', 'item_id', 'created_at']]\n\ninteractions = pd.read_csv('../../data/' + dataset + 'interim/interactions.csv', header=0, sep='\\t')\nitems = pd.read_csv('../../data/' + dataset + 'interim/items.csv', header=0, sep='\\t')\nview_fields = [\"item_id\", \"state\", \"ReqTopic\", \"DescTopic\", \"TitTopic\"]\ncommon_items = items.merge(interactions, on=['item_id'])[view_fields].drop_duplicates()\n\nitem_count = len(train['item_id'].unique())\nprint(item_count)\nsession_count = len(train['created_at'].unique())\nprint(len(common_items))\n\n# CB12 items need to be converted to dummies\n\ncommon = common_items\n\n\ncommon[\"item_id\"] = common[\"item_id\"].astype('str')\ncommon[\"DescTopic\"] = common[\"DescTopic\"].astype('str')\ncommon[\"TitTopic\"] = common[\"TitTopic\"].astype('str')\ncommon[\"ReqTopic\"] = common[\"ReqTopic\"].astype('str')\n\ndf2 = pd.DataFrame(index=common.index)\ns1 = pd.get_dummies(common[\"state\"].fillna(\"\").str.split(\",\").apply(pd.Series).stack(), prefix=\"state\").sum(level=0)\ndf2 = pd.concat([df2, s1], axis=1)\ns1 = pd.get_dummies(common[\"ReqTopic\"].fillna(\"\").str.split(\",\").apply(pd.Series).stack(), prefix=\"ReqTopic\").sum(level=0)\ndf2 = pd.concat([df2, s1], axis=1)\ndf2 = df2.drop([\"state_\", \"ReqTopic_\"], axis=1, errors=\"ignore\")\n\ns1 = pd.get_dummies(common[\"DescTopic\"].fillna(\"\").str.split(\",\").apply(pd.Series).stack(), prefix=\"DescTopic\").sum(level=0)\ndf2 = pd.concat([df2, s1], axis=1)\n\ns1 = pd.get_dummies(common[\"TitTopic\"].fillna(\"\").str.split(\",\").apply(pd.Series).stack(), prefix=\"TitTopic\").sum(level=0)\ndf2 = pd.concat([df2, s1], axis=1)\n\ndf2 = df2.drop([\"DescTopic_\", \"TitTopic_\"], axis=1, errors=\"ignore\")\n\n\ncommon = common.drop([\"state\", \"ReqTopic\", \"DescTopic\", \"TitTopic\"], axis=1)\ndf2 = pd.concat([common, df2], axis=1)\n\none_hot = df2\nprint(one_hot.shape)\n# number of content features per item\nfeature_size = one_hot.shape[1] - 1\n\nitem_encodings = {}\nfor index, row in one_hot.iterrows():\n item_id = row[\"item_id\"]\n item_encodings[item_id] = row.values[1:]\n\nprint(len(item_encodings))\n\nempty_feature_vec = np.zeros(feature_size, dtype=int)\n\n# load data\n\nbatch_size = 512\n\ntrain_dataset = SessionDataset(train)\nloader = SessionDataLoader(train_dataset, batch_size=batch_size)\nmapitem = loader.dataset.itemmap\n\n\n# # Train final model\n\n\n# In[ ]:\n\n\n# use best params\nls = 1000\nact = \"softmax\"\nlr = 0.001\n# define model\nmodel, encoder = create_prnn_model(item_count, feature_size, batch_size=batch_size, hidden_units = ls, o_activation = act, lr = lr)\n\n# train model\nmodel_name = \"cb12_prnn_a_\" + act + \"_ls_\" + str(ls) + \"_lr_\" + str(lr) + \".model2\"\nprint(\"Starting to train: \" + model_name)\n\nmodel = train_prnn(model, lr, loader)\n\npickle.dump(model, open(model_path + model_name, 'wb'), protocol=4)\nprint(\"Stored model in: \" + model_path + model_name)\n\n\n# # Generate predictions\n\ndef predict_function(sid, test_session, pr, item_idx_map, idx_item_map, cut_off=20, \n session_key='session_id', item_key='item_id', time_key='created_at'):\n test_session.sort_values([time_key], inplace=True)\n # get first and only session_id (as we grouped it before calling this method)\n session_id = test_session[session_key].unique()[0]\n\n log_columns = [\"session_id\", \"input_items\", \"input_count\", \"position\", \"remaining_items\", \"remaining_count\", \"predictions\"]\n log_df = pd.DataFrame(columns = log_columns)\n\n session_length = len(test_session)\n il = a = np.zeros((batch_size, 1, len(item_idx_map)))\n ir = a = np.zeros((batch_size, 1, 115))\n \n for i in range(session_length -1):\n # use current item as reference point (rest is for testing)\n current_item_id = test_session[item_key].values[i]\n\n item_vec = np.zeros(len(item_idx_map), dtype=int)\n item_idx = item_idx_map[current_item_id]\n item_vec[item_idx] = 1\n # set vector in batch input\n il[i, 0] = item_vec\n \n #item_features = item_encodings[current_item_id]\n \n # use empty feature vec if missing\n item_features = empty_feature_vec\n if current_item_id in item_encodings.keys():\n item_features = item_encodings[result]\n \n #item_features = item_features.reshape(1,1, len(item_features))\n ir[i, 0] = item_features\n \n # do batch prediction\n pred = model.predict([il, ir], batch_size=batch_size)\n \n # for every subsession prediction\n for i in range(session_length-1):\n preds = pred[i]\n topn_idx_preds = preds.argsort()[-cut_off:][::-1]\n \n predictions = []\n # for every recommended item index\n for item_idx in topn_idx_preds:\n pred_item = idx_item_map[item_idx]\n predictions.append(pred_item)\n \n current_input_set = test_session[item_key].values[:i+1]\n remaining_test_set = test_session[item_key].values[i+1:]\n \n position = \"MID\"\n if i == 0:\n position = \"FIRST\"\n if len(remaining_test_set) == 1:\n position = \"LAST\"\n \n log_df = log_df.append({\n \"session_id\": sid,\n \"input_items\": ','.join(map(str, current_input_set)),\n \"input_count\": len(current_input_set),\n \"position\": position,\n \"remaining_items\": ','.join(map(str, remaining_test_set)),\n \"remaining_count\": len(remaining_test_set),\n \"predictions\": ','.join(map(str, predictions))\n }, ignore_index=True) \n \n \n log_df['input_count'] = log_df['input_count'].astype(int)\n log_df['remaining_count'] = log_df['remaining_count'].astype(int)\n \n return log_df\n\n# In[ ]:\n\n\nimport keras.losses\nkeras.losses.TOP1 = TOP1\n\nprint(\"Preparing train data...\")\ntrain_dataset = SessionDataset(train)\nloader = SessionDataLoader(train_dataset, batch_size=batch_size)\n \n\ntest_path = '../../data/' + dataset + 'processed/test_14d.csv'\ntest = pd.read_csv(test_path, sep='\\t')[['session_id', 'item_id', 'created_at']]\ntest_dataset = SessionDataset(test)\ntest_generator = SessionDataLoader(test_dataset, batch_size=batch_size)\n\nsession_groups = test.groupby(\"session_id\")\nmapitem = loader.dataset.itemmap\n\nitem_idx_map = {}\nidx_item_map = {}\nfor index, row in mapitem.iterrows():\n item_id = row[\"item_id\"]\n item_idx = row[\"item_idx\"]\n item_idx_map[item_id] = item_idx\n idx_item_map[item_idx] = item_id\n\n \npredict_path = \"../../data/cb12/interim/predict/base/\"\n\n\nmodel_name = \"cb12_prnn_a_\" + act + \"_ls_\" + str(ls) + \"_lr_\" + str(lr) + \".model2\"\nmodel = pickle.load(open(model_path + model_name, 'rb'))\nprint(\"Loaded: \" + model_name)\nres_list = []\n# predict\nreport_freq = len(session_groups) // 5 \ncount = 0\nfor sid, session in session_groups:\n pred_df = predict_function(sid, session, model, item_idx_map, idx_item_map)\n res_list.append(pred_df)\n # reset states\n model.get_layer('gru_left').reset_states()\n model.get_layer('gru_right').reset_states()\n # print progress\n count += 1\n if count % report_freq == 0:\n print(\"Predicted for \" + str(count) + \" sessions. \" + str(len(session_groups) - count) + \" sessions to go.\" )\n# concat results\nres = pd.concat(res_list)\nres = res.reindex(columns = [\"session_id\", \"input_items\", \"input_count\", \"position\", \"remaining_items\", \"remaining_count\", \"predictions\"])\n\nres.to_csv(predict_path + \"test_14d_prnn2.csv\", sep='\\t')\n \nprint(\"Stored predictions: \" + predict_path + \"test_14d_prnn2.csv\")\n\n\n# In[ ]:\n\n\n\n\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"pandas.merge",
"numpy.expand_dims",
"tensorflow.compat.v1.disable_v2_behavior",
"numpy.arange",
"pandas.DataFrame",
"numpy.append",
"tensorflow.compat.v1.shape",
"numpy.argsort",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
MarcosVs98/candlestick-indicators | [
"5423b56751eead43569b15917d29519b4dd6f0e3"
] | [
"CandlestickIndicators.py"
] | [
"import logging\nimport numpy as np\nimport pandas as pd\nimport plotly.graph_objects as go\nimport plotly.express as px\n\nclass ChartIndicatorException(Exception):\n\tpass\n\n\nclass PlottingExeception(ChartIndicatorException):\n\tpass\n\n\nclass TraceCandlesException(ChartIndicatorException):\n\tpass\n\n\nclass ErrorImplementingIndicator(ChartIndicatorException):\n\tpass\n\n\nlog = logging.getLogger(\"candlestick-chart-indicator\")\n\n\nclass CandlestickChartIndicator(ABC):\n\t\"\"\"\n\tBase class responsible for the implementation of candlestick graphics, and their data.\n\n\tdetail:\n\t\tThis class implements a \"Chain of Responsibility\" design pattern.\n\t\thttps://en.wikipedia.org/wiki/Chain-of-responsibility_pattern.\n\t\"\"\"\n\[email protected]\n\tdef inicate(self):\n\t\tpass\n\n\nclass MA(CandlestickChartIndicator):\n\t\"\"\"\n\tClass responsible for implementing a simple Moving Average that stops\n\tfilter out price fluctuations helping to identify trends.\n\t\"\"\"\n\tdef indicate(self, data_frame, data=[], **kwargs):\n\t\ttry:\n\t\t\tma = data_frame['close'].rolling(window=kwargs.get(\"days\", 21)).mean()\n\t\t\ttrace_avg = go.Scatter(x=ma.index, y=MA, name='MA', line=dict(color='#BEBECF'), opacity=0.8)\n\t\t\tdata.append(trace_avg)\n\t\texcept (ErrorImplementingIndicator, TypeError) as e:\n\t\t\tlog.warning(f\"Error implementing 'ma' indicator: {e}\")\n\t\tfinally:\n\t\t\treturn data\n\n\nclass EMA(CandlestickChartIndicator):\n\t\"\"\"\n\tClass responsible for implementing an exponential moving average\n\tEMA = Price today * K + EMA yesterday x (1-k) where K = 2 /(N+1)\n\t\"\"\"\n\tdef indicate(self, data_frame, data=[], **kwargs):\n\t\ttry:\n\t\t\tk = (2 / (kwargs.get(\"days\", 21) + 1))\n\t\t\tma = data_frame['close'].rolling(window=kwargs.get(\"days\", 21)).mean()\n\t\t\tema_data = pd.DataFrame(index=ma.index)\n\t\t\tema_data['PRICE'] = data_frame['close']\n\t\t\tema_data['MA'] = ma\n\t\t\tema_data['EMA'] = np.NaN\n\t\t\tema_data['EMA'][0] = ema_data['MA'][1]\n\t\t\tfor i in range(1, len(ema_data)):\n\t\t\t\tema_data['EMA'][i] = (ema_data['PRICE'][i] * k) + ((1-k) * ema_data['EMA'][i-1])\n\t\t\ttrace_ema = go.Scatter(\n\t\t\t\tx=ema_data.index, y=ema_data['MA'], name='EMA', line=dict(color='#17BECF'), opacity=0.8)\n\t\t\tdata.append(trace_ema)\n\t\texcept (ErrorImplementingIndicator, TypeError) as e:\n\t\t\tlog.warning(f\"Error implementing 'ema' indicator: {e}\")\n\t\tfinally:\n\t\t\treturn data\n\n\nclass CrossingMovingAvarege(CandlestickChartIndicator):\n\t\"\"\"\n\tClass responsible for implementing the crossing of moving averages that consists of indicating\n\tbuying and selling an asset whenever the averages cross.\n\n\tdetail:\n\t\tThis indicator consists of 2 sets of simple moving averages. an acquaintance\n\t\tas short average or short and another known as long average or long whenever short crosses\n\t\tthe long down we make a sale, whenever the long crosses the short up we buy.\n\t\"\"\"\n\tdef indicate(self, data_frame, data=[], **kwargs):\n\t\ttry:\n\t\t\tshort_rolling = data_frame['close'].rolling(window=kwargs.get(\"short_rolling\", 9)).mean()\n\t\t\tlong_rolling = data_frame['close'].rolling(window=kwargs.get(\"long_rolling\", 21)).mean()\n\t\t\ttrace_short_rolling = go.Scatter(\n\t\t\t\tx=short_rolling.index, y=short_rolling, name='SHORT', line=dict(color='#17BECF'), opacity=0.5)\n\t\t\ttrace_long_rolling = go.Scatter(\n\t\t\t\tx=long_rolling.index, y=long_rolling, name='LONG', line=dict(color='#17becf'), opacity=0.5)\n\t\t\tdata.append(trace_short_rolling)\n\t\t\tdata.append(trace_long_rolling)\n\t\texcept (ErrorImplementingIndicator, TypeError) as e:\n\t\t\tlog.warning(f\"Error implementing 'crossing moving avarege' indicator: {e}\")\n\t\tfinally:\n\t\t\treturn data\n\n\nclass MACD(CandlestickChartIndicator):\n\n\t\"\"\"\n\tClass responsible for implementing a MACD -> Convergence - Divergence\n\tof the moving average, which uses 3 exponential moving averages.\n\t\"\"\"\n\tdef indicator(self, data_frame, data=[], **kwargs):\n\t\ttry:\n\t\t\thigh_average = data_frame['max'].rolling(window=kwargs.get(\"high\", 8)).mean()\n\t\t\tlow_average = data_frame['min'].rolling(window=kwargs.get(\"low\", 8)).mean()\n\t\t\thilo_high = pd.DataFrame(index=data_frame.index)\n\t\t\thilo_low = pd.DataFrame(index=data_frame.index)\n\t\t\thilo_high['max'] = np.where(data_frame['close'] > high_average, low_average, np.NaN)\n\t\t\thilo_low['min'] = np.where(data_frame['close'] < low_average, high_average, np.NaN)\n\t\t\ttrace_high = go.Scatter(x=hilo_high.index, y=hilo_high, line=dict(color='#17BECF'), opacity=1)\n\t\t\ttrace_low = go.Scatter(x=hilo_low.index, y=hilo_low, line=dict(color='#B22222'), opacity=1)\n\t\t\tdata.append(trace_high)\n\t\t\tdata.append(trace_low)\n\t\texcept (ErrorImplementingIndicator, TypeError) as e:\n\t\t\tlog.warning(f\"Error implementing 'macd' indicator: {e}\")\n\t\tfinally:\n\t\t\treturn data\n\n\nclass BollingerBands(CandlestickChartIndicator):\n\t\"\"\"\n\tClass responsible for implementing boolinger bands based on variations\n\tprices at standard deviation levels.\n\n\tdetail:\n\tThis indicator is able to measure price volatility.\n\t\"\"\"\n\tdef indicate(self, data_frame, data=[], **kwargs):\n\t\ttry:\n\t\t\tdf_avg = data_frame['close'].rolling(window=kwargs.get(\"days\", 21)).mean().dropna()\n\t\t\tdf_std = data_frame['close'].rolling(window=kwargs.get(\"days\", 21)).std().dropna()\n\t\t\tdf_bollinger = pd.DataFrame(index=df_avg.index)\n\n\t\t\tdf_bollinger['mband'] = df_avg\n\t\t\tdf_bollinger['uband'] = df_avg + df_std.apply(lambda x: (x * 2))\n\t\t\tdf_bollinger['iband'] = df_avg - df_std.apply(lambda x: (x * 2))\n\t\t\tdf_price = data_frame[df_bollinger.index[0]:]\n\n\t\t\ttrace_prices = go.Candlestick(\n\t\t\t x = df_price.index,\n\t\t\t open = df_price['open'],\n\t\t\t high = df_price['max'],\n\t\t\t low = df_price['min'],\n\t\t\t close = df_price['close'],\n\t\t\t name='prices')\n\t\t\tuband = go.Scatter(\n\t\t\t\tx=df_bollinger.index, y=df_bollinger['uband'], name='Upper Band',\n\t\t\t\tline=dict(color='#17BECF'), opacity=0.8)\n\t\t\tmband = go.Scatter(\n\t\t\t\tx=df_bollinger.index, y=df_bollinger['mband'], name='Moving Band',\n\t\t\t\tline=dict(color='#B22222'), opacity=0.5)\n\t\t\tiband = go.Scatter(\n\t\t\t\tx=df_bollinger.index, y=df_bollinger['iband'], name='Lower Band',\n\t\t\t\tline=dict(color='#17BECF'), opacity=0.8)\n\t\t\tdata.append(uband)\n\t\t\tdata.append(mband)\n\t\t\tdata.append(iband)\n\t\t\tdata.append(trace_prices)\n\t\texcept (ErrorImplementingIndicator, TypeError) as e:\n\t\t\tlog.warning(f\"Error implementing 'bollinger bands' indicator: {e}\")\n\t\tfinally:\n\t\t\treturn data\n\n\n# end-of-file"
] | [
[
"numpy.where",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
nr-patel/NP-SDC-T3-P4-Capstone-Project | [
"d20b4cb009c72f9d1b6fd8f36aca2af4c7bffb08"
] | [
"ros/src/tl_detector/tl_detector.py"
] | [
"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import Int32, Float32MultiArray\nfrom std_msgs.msg import MultiArrayDimension, MultiArrayDimension\nfrom geometry_msgs.msg import PoseStamped, Pose\nfrom styx_msgs.msg import TrafficLightArray, TrafficLight\nfrom styx_msgs.msg import Lane\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge\nfrom light_classification.tl_classifier import TLClassifier\nimport tf\nimport cv2\nimport yaml\nimport math\nimport numpy as np\n\n# For now state is ground truth, so no need to have a cnt threshold\nSTATE_COUNT_THRESHOLD = 0\n\nclass TLDetector(object):\n def __init__(self):\n rospy.init_node('tl_detector')\n\n self.pose = None\n self.waypoints = None\n self.camera_image = None\n self.lights = None\n self.has_image = False # we don't have image yet\n\n self.pose_wp_idx = None\n self.tl_wp_idx = [] # Waypoint indices of traffic lights\n self.tl_xy = [] # Stop line positions of traffic lights\n\n config_string = rospy.get_param(\"/traffic_light_config\")\n self.config = yaml.load(config_string)\n\n self.bridge = CvBridge()\n self.use_simulator_classifier = rospy.get_param('~on_simulator')\n rospy.loginfo(\"Is on simulator? %s\" , self.use_simulator_classifier)\n self.light_classifier = TLClassifier(isSimulator = self.use_simulator_classifier)\n self.listener = tf.TransformListener()\n\n self.state = TrafficLight.UNKNOWN\n self.last_state = TrafficLight.UNKNOWN\n self.state_count = 0\n\n self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Float32MultiArray, queue_size=15)\n\n sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n # Add closest waypoint subscriber to receive current closest waypoint from waypoint WaypointUpdater\n sub4 = rospy.Subscriber('/closest_waypoint', Int32, self.closest_cb)\n\n '''\n /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and\n helps you acquire an accurate ground truth data source for the traffic light\n classifier by sending the current color state of all traffic lights in the\n simulator. When testing on the vehicle, the color state will not be available. You'll need to\n rely on the position of the light and the camera image to predict it.\n '''\n sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)\n sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)\n\n rospy.spin()\n\n def pose_cb(self, msg):\n self.pose = msg\n\n def waypoints_cb(self, waypoints):\n self.waypoints = waypoints.waypoints\n N = len(self.waypoints)\n # Waypoints are only loaded once so at boot find closest waypoint idx of each traffic light stop line\n\n for x, y in self.config['stop_line_positions']:\n ds = []\n [ds.append(math.sqrt((x-self.waypoints[i].pose.pose.position.x)**2 + (y-self.waypoints[i].pose.pose.position.y)**2)) for i in range(N)]\n best_idx = np.argmin(ds)\n self.tl_wp_idx.append(best_idx)\n self.tl_xy.append([x, y])\n\n def closest_cb(self, msg):\n self.pose_wp_idx = msg.data\n\n def traffic_cb(self, msg):\n self.lights = msg.lights\n\n def image_cb(self, msg):\n \"\"\"Identifies red lights in the incoming camera image and publishes the index\n of the waypoint closest to the red light's stop line to /traffic_waypoint\n\n Args:\n msg (Image): image from car-mounted camera\n\n \"\"\"\n self.has_image = True\n self.camera_image = msg\n \n # Every time waypoint updater finds new closest waypoint, re-calculate location\n # of nearest stop line, waypoint closest to nearest stop line, and state of nearest light\n closest_tl_xy, light_wp, state = self.process_traffic_lights()\n\n if state == TrafficLight.GREEN:\n light_wp = -1\n\n '''\n Publish upcoming red lights at camera frequency.\n Each predicted state has to occur `STATE_COUNT_THRESHOLD` number\n of times till we start using it. Otherwise the previous stable state is\n used.\n '''\n # Publish nearest waypoint and x-y coords of stop line so waypoint updater can slow if necessary\n red_light_pub = Float32MultiArray()\n red_light_pub.layout.dim.append(MultiArrayDimension())\n red_light_pub.layout.dim[0].label = \"length\"\n red_light_pub.layout.dim[0].size = 3\n red_light_pub.layout.dim[0].stride = 3\n red_light_pub.layout.data_offset = 0\n red_light_pub.data = [light_wp, closest_tl_xy[0], closest_tl_xy[1]]\n self.upcoming_red_light_pub.publish(red_light_pub)\n\n def get_light_state(self, light):\n \"\"\"Determines the current color of the traffic light\n\n Args:\n light (TrafficLight): light to classify\n\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n if(not self.has_image):\n self.prev_light_loc = None\n return False\n\n cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, \"bgr8\")\n\n image = np.asanyarray(cv_image)\n\n # Get classification\n return self.light_classifier.get_classification(image)\n\n def get_state_string(self, state):\n if (state == 0):\n state_s = \"RED\"\n elif (state == 1):\n state_s = \"YELLOW\"\n elif (state == 2):\n state_s = \"GREEN\"\n else:\n state_s = \"UNKNOWN\"\n\n return state_s\n\n def process_traffic_lights(self):\n \"\"\"Finds closest visible traffic light, if one exists, and determines its\n location and color\n\n Returns:\n int: index of waypoint closest to the upcoming stop line for a traffic light (-1 if none exists)\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n list (float): x,y coordinates of nearest traffic light stopline\n\n \"\"\"\n closest_tl_wp_idx = 0\n\n # This assumes ego always travels around loop in start direction. Should be fixed to use Yuri's calculation from waypoint_updater.py.\n closest_tl_wp_idx = min(self.tl_wp_idx)\n closest_tl_xy = self.tl_xy[np.argmin(self.tl_wp_idx)]\n if (self.pose_wp_idx):\n for i in range(len(self.tl_wp_idx)):\n if self.tl_wp_idx[i] > self.pose_wp_idx:\n closest_tl_wp_idx = self.tl_wp_idx[i]\n closest_tl_xy = self.tl_xy[i]\n break\n\n # We now have x,y position of stopline of closest traffic light.\n # Initially, rather than use camera img and classifier, we can get ground truth state of that light from the simulator.\n stop_x = closest_tl_xy[0]\n stop_y = closest_tl_xy[1]\n state = TrafficLight.UNKNOWN\n if (self.lights):\n n_lights = len(self.lights)\n ds = []\n [ds.append(math.sqrt((stop_x - self.lights[i].pose.pose.position.x)**2 + (stop_y - self.lights[i].pose.pose.position.y)**2)) for i in range(n_lights)]\n if (self.use_simulator_classifier):\n groundtruth = self.lights[np.argmin(ds)].state\n rospy.loginfo('groundtruth is {}'.format(self.get_state_string(groundtruth)))\n \n state = self.get_light_state(self.lights[np.argmin(ds)])\n rospy.loginfo('state is {}'.format(self.get_state_string(state)))\n\n return closest_tl_xy, closest_tl_wp_idx, state\n\nif __name__ == '__main__':\n try:\n TLDetector()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start traffic node.')\n"
] | [
[
"numpy.asanyarray",
"numpy.argmin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Aeon1/XlsxWriter | [
"6871b6c3fe6c294632054ea91f23d9e27068bcc1"
] | [
"examples/pandas_chart_columns.py"
] | [
"##############################################################################\n#\n# An example of converting a Pandas dataframe to an xlsx file with a grouped\n# column chart using Pandas and XlsxWriter.\n#\n# Copyright 2013-2019, John McNamara, [email protected]\n#\n\nimport pandas as pd\n\n# Some sample data to plot.\nfarm_1 = {'Apples': 10, 'Berries': 32, 'Squash': 21, 'Melons': 13, 'Corn': 18}\nfarm_2 = {'Apples': 15, 'Berries': 43, 'Squash': 17, 'Melons': 10, 'Corn': 22}\nfarm_3 = {'Apples': 6, 'Berries': 24, 'Squash': 22, 'Melons': 16, 'Corn': 30}\nfarm_4 = {'Apples': 12, 'Berries': 30, 'Squash': 15, 'Melons': 9, 'Corn': 15}\n\ndata = [farm_1, farm_2, farm_3, farm_4]\nindex = ['Farm 1', 'Farm 2', 'Farm 3', 'Farm 4']\n\n# Create a Pandas dataframe from the data.\ndf = pd.DataFrame(data, index=index)\n\n# Create a Pandas Excel writer using XlsxWriter as the engine.\nsheet_name = 'Sheet1'\nwriter = pd.ExcelWriter('pandas_chart_columns.xlsx', engine='xlsxwriter')\ndf.to_excel(writer, sheet_name=sheet_name)\n\n# Access the XlsxWriter workbook and worksheet objects from the dataframe.\nworkbook = writer.book\nworksheet = writer.sheets[sheet_name]\n\n# Create a chart object.\nchart = workbook.add_chart({'type': 'column'})\n\n# Some alternative colors for the chart.\ncolors = ['#E41A1C', '#377EB8', '#4DAF4A', '#984EA3', '#FF7F00']\n\n# Configure the series of the chart from the dataframe data.\nfor col_num in range(1, len(farm_1) + 1):\n chart.add_series({\n 'name': ['Sheet1', 0, col_num],\n 'categories': ['Sheet1', 1, 0, 4, 0],\n 'values': ['Sheet1', 1, col_num, 4, col_num],\n 'fill': {'color': colors[col_num - 1]},\n 'overlap': -10,\n })\n\n# Configure the chart axes.\nchart.set_x_axis({'name': 'Total Produce'})\nchart.set_y_axis({'name': 'Farms', 'major_gridlines': {'visible': False}})\n\n# Insert the chart into the worksheet.\nworksheet.insert_chart('H2', chart)\n\n# Close the Pandas Excel writer and output the Excel file.\nwriter.save()\n"
] | [
[
"pandas.DataFrame",
"pandas.ExcelWriter"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
lucamarx/pyAutoSpec | [
"d57efb6ff4c37ede1377351fd3dd3a6ce362b551"
] | [
"pyautospec/function_mps.py"
] | [
"\"\"\"\nMps based function compression algorithm\n\"\"\"\nimport numpy as np\nimport itertools\n\nfrom typing import List\n\nfrom .mps import Mps\nfrom .plots import function_wfa_comparison_chart\n\n\ndef word2real(s : List[int], x0 : float = 0.0, x1 : float = 1.0) -> float:\n \"\"\"\n Convert the binary representation s of xϵ[x0,x1) into the number itself\n \"\"\"\n s = [0] + s\n return x0 + sum([s[i] * 2**(-i) for i in range(len(s))]) * (x1-x0)\n\n\ndef real2word(r : float, l : int = 8, x0 : float = 0.0, x1 : float = 1.0) -> List[int]:\n \"\"\"\n Convert a real number xϵ[x0,x1) into its binary representation (with\n maximum length l)\n \"\"\"\n if r < x0 or r >= x1:\n raise Exception(\"out of bounds\")\n\n r = (r - x0) / (x1 - x0)\n w = []\n for _ in range(0,l+1):\n d = 1 if r >= 1 else 0\n w.append(d)\n r = (r-d)*2\n return w[1:]\n\n\nclass FunctionMps():\n \"\"\"\n Mps based real function model\n \"\"\"\n\n def __init__(self, sequence_length : int = 8, max_bond_dim : int = 20):\n \"\"\"\n Intialize a model of a real function f: [x0,x1) → R\n\n Parameters:\n -----------\n\n sequence_length : int\n the underlying MPS length\n\n max_bond_dim : int\n the underlying MPS maximum bond dimension\n \"\"\"\n self.f, self.x0, self.x1 = None, None, None\n\n self.model = Mps(sequence_length, 2, max_bond_dim)\n\n\n def __repr__(self) -> str:\n if self.f is None:\n return \" FunctionMps(N={}) <?>: [<?>,<?>] → R\\n{}\".format(len(self.model), self.model.__repr__())\n else:\n return \" FunctionMps(N={}) {}: [{:.2f},{:.2f}] → R\\n{}\".format(len(self.model), self.f.__repr__(), self.x0, self.x1, self.model.__repr__())\n\n\n def _one_hot(self, X : List[List[int]]) -> np.ndarray:\n \"\"\"\n Perform one-hot encoding\n \"\"\"\n idxs = np.array(X).reshape(-1)\n return np.eye(self.model.part_d)[idxs].reshape((-1, len(self.model), self.model.part_d))\n\n\n def __call__(self, x : float) -> float:\n \"\"\"\n Evaluate learned function at x\n\n Parameters:\n -----------\n\n x : float\n a point in [x0,x1)\n\n Returns:\n --------\n\n the value of the function at x\n \"\"\"\n return self.model(self._one_hot([real2word(x, l=len(self.model), x0=self.x0, x1=self.x1)]))[0]\n\n\n def comparison_chart(self, n_points : int = 50):\n \"\"\"\n Compare the two functions\n\n Parameters:\n -----------\n\n n_points : int\n the number of points in the plot\n \"\"\"\n function_wfa_comparison_chart(self, n_points, None, plot_derivative = False)\n\n\n def fit(self, f, x0 : float = 0.0, x1 : float = 1.0, learn_rate : float = 0.1, batch_size : int = 32, epochs : int = 10):\n \"\"\"\n Fit the model to the function f defined on the interval [x0,x1)\n\n Parameters:\n -----------\n\n f : function\n the function to be fitted\n\n x0 : float\n x1 : float\n the interval the function is defined on\n\n learn_rate : float\n the learning rate\n\n batch_size : int\n the batch size used at each step\n\n epochs : int\n the number of epochs\n\n Returns:\n --------\n\n The object itself\n \"\"\"\n self.f = f\n self.x0 = x0\n self.x1 = x1\n\n data = [(list(x), f(word2real(list(x), x0=x0, x1=x1))) for x in itertools.product(*([[0,1]] * len(self.model)))]\n\n self.model.fit(self._one_hot(np.array([t[0] for t in data])), np.array([t[1] for t in data]), learn_rate=learn_rate, batch_size=batch_size, epochs=epochs)\n\n return self\n"
] | [
[
"numpy.eye",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Cheese229/DataAssignmentCIS | [
"7e31892721aa2b3845df3e76296af500f29c9196"
] | [
"Simulation_virus_BB.py"
] | [
"\"\"\"\r\n Bigger scale simulation of a virus spread in a city.\r\n This would have been the better opt for the project, as it uses geospatial visualisation (which is not in this code)\r\n and data gathered from a a ride share, a specific city, their population, and their public transport data.\r\n\r\n I still don't understand how geospatial visualisation works (I think I will look more into it in the holidays)\r\n The simulation uses mathematical equations on how a virus would spread and includes its recovery rates.\r\n\r\n Technically this code works (I think)...\r\n It is just missing it's data and its visuals\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom collections import namedtuple\r\n\r\nParam = namedtuple('Param', 'R0 DE DI I0 HopitalisationRate HospitalIters')\r\n# I0 is the distribution of infected people at time t=0, if None then randomly choose inf number of people\r\n\r\n# flow is a 3D matrix of dimentions r x n x n (i.e., 84 x 549 x 549),\r\n# flow[t mod r] is the desired OD matrix at time t.\r\n\r\ndef seir(par, distr, flow, alpha, iterations, inf):\r\n\r\n r = flow.shape[0]\r\n n = flow.shape[1]\r\n N = distr[0].sum() #total population, we assume that N = sum(flow)\r\n Svec = distr[0].copy()\r\n Evec = np.zeros(n)\r\n Ivec = np.zeros(n)\r\n Rvec = np.zeros(n)\r\n\r\n if par.I0 is None:\r\n initial = np.zeros(n)\r\n # randomly choose inf infections\r\n for i in range(inf):\r\n loc = np.random.randint(n)\r\n if (Svec[loc] > initial[loc]):\r\n initial[loc] += 1.0\r\n\r\n else:\r\n initial = par.I0\r\n assert ((Svec < initial).sum() == 0)\r\n\r\n Svec -= initial\r\n Ivec += initial\r\n \r\n res = np.zeros((iterations, 5))\r\n res[0,:] = [Svec.sum(), Evec.sum(), Ivec.sum(), Rvec.sum(), 0]\r\n\r\n realflow = flow.copy()\r\n\r\n realflow = realflow / realflow.sum(axis=2)[:,:, np.newaxis]\r\n realflow = alpha * realflow\r\n\r\n history = np.zeros((iterations, 5, n))\r\n history[0,0,:] = Svec\r\n history[0,1,:] = Evec\r\n history[0,2,:] = Ivec\r\n history[0,3,:] = Rvec\r\n\r\n eachIter = np.zeros(iterations + 1)\r\n\r\n # run simulation\r\n for iter in range(0, iterations - 1):\r\n realOD = realflow[iter % r]\r\n\r\n d = distr[iter % r] + 1\r\n\r\n if ((d>N+1).any()):\r\n print(\"Houston, we have a problem!\")\r\n return res, history\r\n # N = S + E + I + R\r\n\r\n newE = Svec * Ivec / d * (par.R0 / par.DI)\r\n newI = Evec / par.DE\r\n newR = Ivec / par.DI\r\n\r\n Svec -= newE\r\n Svec = (Svec + np.matmul(Svec.reshape(1,n), realOD) - Svec * realOD.sum(axis=1))\r\n Evec = Evec + newE - newI\r\n Evec = (Evec + np.matmul(Evec.reshape(1,n), realOD) - Evec * realOD.sum(axis=1))\r\n Ivec = Ivec + newI - newR\r\n Ivec = (Ivec + np.matmul(Ivec.reshape(1,n), realOD) - Ivec * realOD.sum(axis=1))\r\n Rvec += newR\r\n Rvec = (Rvec + np.matmul(Rvec.reshape(1,n), realOD) - Rvec * realOD.sum(axis=1))\r\n\r\n res[iter + 1,:] = [Svec.sum(), Evec.sum(), Ivec.sum(), Rvec.sum(), 0]\r\n eachIter[iter + 1] = newI.sum()\r\n res[iter + 1, 4] = eachIter[max(0, iter - par.HospitalIters) : iter].sum() * par.HospitalisationRate\r\n\r\n history[iter + 1,0,:] = Svec\r\n history[iter + 1,1,:] = Evec\r\n history[iter + 1,2,:] = Ivec\r\n history[iter + 1,3,:] = Rvec\r\n\r\n\r\n return res, history"
] | [
[
"numpy.zeros",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
soumitri2001/EvoCluster | [
"001dfb4c1f00db84ad1c2f2228eed6112d7e65b1",
"2f8e3f21c7045478394e7e02a22835f7c184c0c7"
] | [
"EvoCluster/_objectives.py",
"EvoCluster/optimizers/CFFA.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 9 18:12:29 2019\n\n@author: Raneem\n\"\"\"\n\nfrom sklearn import cluster, metrics\nfrom scipy.spatial.distance import pdist, cdist\nimport numpy\nimport sys\n\ndef getLabelsPred(startpts, points, k):\n labelsPred = [-1] * len(points)\n \n for i in range(len(points)):\n distances = numpy.linalg.norm(points[i]-startpts, axis = 1)\n labelsPred[i] = numpy.argmin(distances)\n \n return labelsPred\n \n\ndef SSE(startpts, points, k, metric):\n labelsPred = getLabelsPred(startpts, points, k)\n fitness = 0\n \n if numpy.unique(labelsPred).size < k:\n fitness = sys.float_info.max\n else:\n centroidsForPoints = startpts[labelsPred]\n fitness = 0\n for i in range(k):\n indexes = [n for n,x in enumerate(labelsPred) if x==i]\n fit = cdist(points[indexes], centroidsForPoints[indexes], metric)**2\n fit = sum(fit)[0]\n fitness += fit\n return fitness, labelsPred\n\n\ndef TWCV(startpts, points, k):\n labelsPred = getLabelsPred(startpts, points, k)\n \n if numpy.unique(labelsPred).size < k:\n fitness = sys.float_info.max\n else:\n sumAllFeatures = sum(sum(numpy.power(points,2)))\n sumAllPairPointsCluster = 0\n for clusterId in range(k):\n indices = numpy.where(numpy.array(labelsPred) == clusterId)[0]\n pointsInCluster = points[numpy.array(indices)]\n sumPairPointsCluster = sum(pointsInCluster)\n sumPairPointsCluster = numpy.power(sumPairPointsCluster,2)\n sumPairPointsCluster = sum(sumPairPointsCluster)\n sumPairPointsCluster = sumPairPointsCluster/len(pointsInCluster)\n \n sumAllPairPointsCluster += sumPairPointsCluster\n fitness = (sumAllFeatures - sumAllPairPointsCluster)\n return fitness, labelsPred\n\n\ndef SC(startpts, points, k, metric): \n labelsPred = getLabelsPred(startpts, points, k)\n \n if numpy.unique(labelsPred).size < k:\n fitness = sys.float_info.max\n else:\n silhouette = metrics.silhouette_score(points, labelsPred, metric=metric)\n #silhouette = (silhouette - (-1)) / (1 - (-1))\n silhouette = (silhouette + 1) / 2\n fitness = 1 - silhouette\n return fitness, labelsPred\n\n\ndef DB(startpts, points, k):\n labelsPred = getLabelsPred(startpts, points, k)\n if numpy.unique(labelsPred).size < k:\n fitness = sys.float_info.max\n else:\n fitness = metrics.davies_bouldin_score(points, labelsPred)\n return fitness, labelsPred\n\ndef CH(startpts, points, k):\n labelsPred = getLabelsPred(startpts, points, k)\n \n if numpy.unique(labelsPred).size < k:\n fitness = sys.float_info.max\n else:\n ch = metrics.calinski_harabaz_score(points, labelsPred)\n fitness = 1 / ch\n return fitness, labelsPred\n\n\ndef delta_fast(ck, cl, distances):\n values = distances[numpy.where(ck)][:, numpy.where(cl)]\n values = values[numpy.nonzero(values)]\n\n return numpy.min(values)\n \ndef big_delta_fast(ci, distances):\n values = distances[numpy.where(ci)][:, numpy.where(ci)]\n #values = values[numpy.nonzero(values)]\n \n return numpy.max(values)\n\ndef dunn_fast(points, labels, metric):\n v = pdist(points, metric)\n size_X = len(points)\n X = numpy.zeros((size_X,size_X))\n X[numpy.triu_indices(X.shape[0], k = 1)] = v\n distances = X + X.T\n ks = numpy.sort(numpy.unique(labels))\n \n deltas = numpy.ones([len(ks), len(ks)])*1000000\n big_deltas = numpy.zeros([len(ks), 1])\n \n l_range = list(range(0, len(ks)))\n \n for k in l_range:\n for l in (l_range[0:k]+l_range[k+1:]):\n deltas[k, l] = delta_fast((labels == ks[k]), (labels == ks[l]), distances)\n \n big_deltas[k] = big_delta_fast((labels == ks[k]), distances)\n\n di = numpy.min(deltas)/numpy.max(big_deltas)\n return di\n \n\ndef DI(startpts, points, k, metric):\n labelsPred = getLabelsPred(startpts, points, k)\n \n if numpy.unique(labelsPred).size < k:\n fitness = sys.float_info.max\n else:\n dunn = dunn_fast(points, labelsPred, metric)\n if(dunn < 0):\n dunn = 0\n fitness = 1 - dunn\n return fitness, labelsPred\n\n\ndef getFunctionDetails(a): \n # [name, lb, ub]\n param = { 0: [\"SSE\",0,1],\n 1: [\"TWCV\",0,1],\n 2: [\"SC\",0,1],\n 3: [\"DB\",0,1],\n #4: [\"CH\",0,1],\n 4: [\"DI\",0,1]\n }\n return param.get(a, \"nothing\")",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 29 00:49:35 2016\n\n@author: hossam\n\"\"\"\n\n#% ======================================================== % \n#% Files of the Matlab programs included in the book: %\n#% Xin-She Yang, Nature-Inspired Metaheuristic Algorithms, %\n#% Second Edition, Luniver Press, (2010). www.luniver.com %\n#% ======================================================== % \n#\n#% -------------------------------------------------------- %\n#% Firefly Algorithm for constrained optimization using %\n#% for the design of a spring (benchmark) % \n#% by Xin-She Yang (Cambridge University) Copyright @2009 %\n#% -------------------------------------------------------- %\n\nimport numpy\nimport math\nimport time\nfrom .._solution import solution\n\n\n\n\n\ndef alpha_new(alpha,NGen):\n #% alpha_n=alpha_0(1-delta)^NGen=10^(-4);\n #% alpha_0=0.9\n delta=1-(10**(-4)/0.9)**(1/NGen);\n alpha=(1-delta)*alpha\n return alpha\n\n\n\ndef FFA(objf,lb,ub,dim,n,MaxGeneration, k, points, metric):\n\n #General parameters\n\n #n=50 #number of fireflies\n #dim=30 #dim \n #lb=-50\n #ub=50\n #MaxGeneration=500\n \n #FFA parameters\n alpha=0.5 # Randomness 0--1 (highly random)\n betamin=0.20 # minimum value of beta\n gamma=1 # Absorption coefficient\n \n \n zn=numpy.ones(n)\n zn.fill(float(\"inf\")) \n \n \n #ns(i,:)=Lb+(Ub-Lb).*rand(1,d);\n ns = numpy.zeros((n, dim))\n ns=numpy.random.uniform(0,1,(n,dim)) *(ub-lb)+lb\n \n Lightn=numpy.ones(n)\n Lightn.fill(float(\"inf\")) \n labelsPred=numpy.zeros((n,len(points)))\n \n #[ns,Lightn]=init_ffa(n,d,Lb,Ub,u0)\n \n convergence=[]\n s=solution()\n\n \n print(\"CS is optimizing \\\"\"+objf.__name__+\"\\\"\") \n \n timerStart=time.time() \n s.startTime=time.strftime(\"%Y-%m-%d-%H-%M-%S\")\n \n # Main loop\n for Iteration in range (0,MaxGeneration): # start iterations\n \n #% This line of reducing alpha is optional\n alpha=alpha_new(alpha,MaxGeneration);\n \n #% Evaluate new solutions (for all n fireflies)\n for i in range(0,n): \n startpts = numpy.reshape(ns[i,:], (k,(int)(dim/k)))\n if objf.__name__ == 'SSE' or objf.__name__ == 'SC' or objf.__name__ == 'DI':\n fitnessValue, labelsPredValues=objf(startpts, points, k, metric) \n else:\n fitnessValue, labelsPredValues=objf(startpts, points, k) \n zn[i] = fitnessValue\n Lightn[i]=zn[i]\n labelsPred[i,:] = labelsPredValues\n \n \n \n \n # Ranking fireflies by their light intensity/objectives\n \n \n Lightn=numpy.sort(zn)\n Index=numpy.argsort(zn)\n ns=ns[Index,:]\n \n \n #Find the current best\n nso=ns\n Lighto=Lightn\n nbest=ns[0,:] \n Lightbest=Lightn[0]\n labelsPredBest=labelsPred[0]\n \n #% For output only\n fbest=Lightbest\n \n #% Move all fireflies to the better locations\n # [ns]=ffa_move(n,d,ns,Lightn,nso,Lighto,nbest,...\n # Lightbest,alpha,betamin,gamma,Lb,Ub);\n \n scale=numpy.ones(dim)*abs(ub-lb)\n\n for i in range (0,n):\n # The attractiveness parameter beta=exp(-gamma*r)\n for j in range(0,n):\n r=numpy.sqrt(numpy.sum((ns[i,:]-ns[j,:])**2));\n #r=1\n # Update moves\n if Lightn[i]>Lighto[j]: # Brighter and more attractive\n beta0=1\n beta=(beta0-betamin)*math.exp(-gamma*r**2)+betamin\n tmpf=alpha*(numpy.random.rand(dim)-0.5)*scale\n ns[i,:]=ns[i,:]*(1-beta)+nso[j,:]*beta+tmpf\n \n \n #ns=numpy.clip(ns, lb, ub)\n \n convergence.append(fbest)\n \t\n if (Iteration%1==0):\n print(['At iteration '+ str(Iteration)+ ' the best fitness is '+ str(fbest)])\n # \n ####################### End main loop\n timerEnd=time.time() \n s.endTime=time.strftime(\"%Y-%m-%d-%H-%M-%S\")\n s.executionTime=timerEnd-timerStart\n s.convergence=convergence\n s.optimizer=\"FFA\"\n s.objfname=objf.__name__\n s.labelsPred = numpy.array(labelsPredBest, dtype=numpy.int64)\n s.bestIndividual = nbest\n \n return s\n \n \n \n \n \n"
] | [
[
"numpy.nonzero",
"numpy.min",
"sklearn.metrics.silhouette_score",
"numpy.unique",
"numpy.power",
"numpy.triu_indices",
"numpy.linalg.norm",
"sklearn.metrics.calinski_harabaz_score",
"scipy.spatial.distance.cdist",
"numpy.max",
"scipy.spatial.distance.pdist",
"numpy.argmin",
"sklearn.metrics.davies_bouldin_score",
"numpy.array",
"numpy.zeros",
"numpy.where"
],
[
"numpy.sort",
"numpy.ones",
"numpy.random.uniform",
"numpy.random.rand",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shfshf/seq2annotation | [
"a824520d46f0b3d70268fae422976a5ce1b3f4ce",
"a824520d46f0b3d70268fae422976a5ce1b3f4ce"
] | [
"loadmodel.py",
"seq2annotation/algorithms/Stacked_BiLSTM_CRF_model.py"
] | [
"import tensorflow as tf\r\nfrom tensorflow.python.platform import gfile\r\n\r\n# only for bugfix\r\ntf.contrib.rnn\r\n\r\noutput_graph_path = './model.pb'\r\n\r\ngraph = tf.Graph()\r\n\r\nwith gfile.FastGFile(output_graph_path, 'rb') as f:\r\n output_graph_def = tf.GraphDef()\r\n output_graph_def.ParseFromString(f.read())\r\n\r\nwith graph.as_default():\r\n tf.import_graph_def(output_graph_def, name=\"\")\r\n\r\n with tf.Session(graph=graph) as sess:\r\n init_all_tables = graph.get_operation_by_name('init_all_tables')\r\n sess.run(init_all_tables)\r\n # sess.run(tf.global_variables_initializer())\r\n # sess.run(tf.local_variables_initializer())\r\n # 得到当前图有几个操作节点\r\n print(\"%d ops in the final graph.\" % len(output_graph_def.node))\r\n\r\n tensor_name = [tensor.name for tensor in output_graph_def.node]\r\n print(tensor_name)\r\n print('---------------------------')\r\n\r\n Placeholder = sess.graph.get_tensor_by_name('Placeholder:0')\r\n Placeholder_1 = sess.graph.get_tensor_by_name('Placeholder_1:0')\r\n # embedding层的输出\r\n embedding_out = sess.graph.get_tensor_by_name('embedding_lookup:0')\r\n enbedding_transpose = sess.graph.get_tensor_by_name('transpose:0')\r\n # BiLSTM层的输出\r\n BiLSTM_out = sess.graph.get_tensor_by_name('concat:0')\r\n BiLSTM_transpose_1 = sess.graph.get_tensor_by_name('transpose_1:0')\r\n\r\n a = sess.graph.get_tensor_by_name('Variable_1:0')\r\n a_array = a.eval(session=sess)\r\n for i in a_array[:1]:\r\n print(i)\r\n print('#####################')\r\n\r\n input_words = [['唱', '一', '首', '不', '消', '失', '的', '回', '忆']]\r\n input_words_len = [9]\r\n\r\n b = sess.graph.get_tensor_by_name('hash_table_Lookup/hash_table_Lookup/LookupTableFindV2:0')\r\n b = sess.run(b, {Placeholder: input_words, Placeholder_1: input_words_len})\r\n\r\n for i in b:\r\n print(i)",
"import tensorflow as tf\nfrom seq2annotation.algorithms.model import Model\n\n\nclass StackedBilstmCrfModel(Model):\n @classmethod\n def default_params(cls):\n default_params = {\n 'stacked_layers': 2\n }\n\n return default_params\n\n def bilstm_layer(self, embeddings, nwords):\n t = tf.transpose(embeddings, perm=[1, 0, 2])\n lstm_cell_fw = tf.contrib.rnn.LSTMBlockFusedCell(self.params['lstm_size'])\n lstm_cell_bw = tf.contrib.rnn.LSTMBlockFusedCell(self.params['lstm_size'])\n lstm_cell_bw = tf.contrib.rnn.TimeReversedFusedRNN(lstm_cell_bw)\n output_fw, _ = lstm_cell_fw(t, dtype=tf.float32,\n sequence_length=nwords)\n output_bw, _ = lstm_cell_bw(t, dtype=tf.float32,\n sequence_length=nwords)\n output = tf.concat([output_fw, output_bw], axis=-1)\n # transpose it back\n output = tf.transpose(output, perm=[1, 0, 2])\n\n return output\n\n def call(self, embeddings, nwords):\n inner_layer_data = self.bilstm_layer(embeddings, nwords)\n for i in range(1, self.params['stacked_layers']):\n inner_layer_data = self.bilstm_layer(inner_layer_data, nwords)\n\n return inner_layer_data\n"
] | [
[
"tensorflow.Graph",
"tensorflow.import_graph_def",
"tensorflow.Session",
"tensorflow.python.platform.gfile.FastGFile",
"tensorflow.GraphDef"
],
[
"tensorflow.concat",
"tensorflow.contrib.rnn.TimeReversedFusedRNN",
"tensorflow.contrib.rnn.LSTMBlockFusedCell",
"tensorflow.transpose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
GaoX2015/intro_ds | [
"886e678e5353e9b4c0d4f3da83a00d6b9a2f06a5",
"886e678e5353e9b4c0d4f3da83a00d6b9a2f06a5",
"886e678e5353e9b4c0d4f3da83a00d6b9a2f06a5",
"886e678e5353e9b4c0d4f3da83a00d6b9a2f06a5"
] | [
"ch06-sgd/utils.py",
"ch09-generative_models/yahmm/hmm/tests/test_multinomialHMM.py",
"ch08-supervised/svm/linear_separable_svm.py",
"ch05-logit/example/logit_example.py"
] | [
"# -*- coding: UTF-8 -*-\n\"\"\"\n此脚本用于随机生成线性模型数据、定义模型以及其他工具\n\"\"\"\n\n\nimport numpy as np\nimport tensorflow as tf\n\n\ndef generateLinearData(dimension, num):\n \"\"\"\n 随机产生线性模型数据\n\n 参数\n ----\n dimension :int,自变量个数\n\n num :int,数据个数\n\n 返回\n ----\n x :np.array,自变量\n\n y :np.array,因变量\n \"\"\"\n np.random.seed(1024)\n beta = np.array(range(dimension)) + 1\n x = np.random.random((num, dimension))\n epsilon = np.random.random((num, 1))\n # 将被预测值写成矩阵形式,会极大加快速度\n y = x.dot(beta).reshape((-1, 1)) + epsilon\n return x, y\n\n\ndef createLinearModel(dimension):\n \"\"\"\n 搭建模型,包括数据中的自变量,应变量和损失函数\n\n 参数\n ----\n dimension : int,自变量的个数\n\n 返回\n ----\n model :dict,里面包含模型的参数,损失函数,自变量,应变量\n \"\"\"\n np.random.seed(1024)\n # 定义自变量和应变量\n x = tf.placeholder(tf.float64, shape=[None, dimension], name='x')\n ## 将被预测值写成矩阵形式,会极大加快速度\n y = tf.placeholder(tf.float64, shape=[None, 1], name=\"y\")\n # 定义参数估计值和预测值\n betaPred = tf.Variable(np.random.random([dimension, 1]))\n yPred = tf.matmul(x, betaPred, name=\"y_pred\")\n # 定义损失函数\n loss = tf.reduce_mean(tf.square(yPred - y))\n model = {\"loss_function\": loss, \"independent_variable\": x,\n \"dependent_variable\": y, \"prediction\": yPred, \"model_params\": betaPred}\n return model\n\n\ndef createSummaryWriter(logPath):\n \"\"\"\n 检查所给路径是否已存在,如果存在删除原有日志。并创建日志写入对象\n\n 参数\n ----\n logPath :string,日志存储路径\n\n 返回\n ----\n summaryWriter :FileWriter,日志写入器\n \"\"\"\n if tf.gfile.Exists(logPath):\n tf.gfile.DeleteRecursively(logPath)\n summaryWriter = tf.summary.FileWriter(logPath, graph=tf.get_default_graph())\n return summaryWriter\n",
"# -*- coding: UTF-8 -*-\nimport numpy as np\nfrom hmm.multinomialHMM import MultinomialHMM\nfrom numpy.testing import assert_array_equal, assert_array_almost_equal\n\n\ndef test_mutlnomialhmm():\n \"\"\"\n \"\"\"\n mh = MultinomialHMM(alpha=1)\n Y = [0, 1, 1, 1]\n X = [[1, 0, 0], [0, 1, 1], [0, 1, 0], [0, 0, 1]]\n mh.fit(X, Y, [4])\n assert_array_almost_equal(np.exp(mh.initProb_), [2./3, 1./3])\n assert_array_almost_equal(np.exp(mh.transProb_),\n [[1./3, 2./3], [1./4, 3./4]])\n assert_array_almost_equal(np.exp(mh.emitProb_),\n [[2./4, 1./4, 1./4], [1./7, 3./7, 3./7]])\n mh = MultinomialHMM(alpha=0.1)\n mh.fit(X, Y, [2, 2])\n assert_array_almost_equal(np.exp(mh.initProb_), [1.1/2.2, 1.1/2.2])\n assert_array_almost_equal(np.exp(mh.transProb_),\n [[.1/1.2, 1.1/1.2], [.1/1.2, 1.1/1.2]])\n assert_array_almost_equal(np.exp(mh.emitProb_),\n [[1.1/1.3, .1/1.3, .1/1.3], [.1/4.3, 2.1/4.3, 2.1/4.3]])\n assert_array_almost_equal(mh.predict(X), mh.predict(X, lengths=[1, 3]))\n",
"# -*- coding: UTF-8 -*-\n\"\"\"\n此脚本用于展示线性可分情况下的支持向量学习机\n\"\"\"\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.svm import SVC\n\n\ndef generateSeparatableData(n):\n \"\"\"\n 产生线性可分的数据集\n \"\"\"\n np.random.seed(2046)\n X = np.r_[np.random.randn(n, 2) - [1, 1], np.random.randn(n, 2) + [3, 3]]\n Y = [[0]] * n + [[1]] * n\n data = np.concatenate((Y, X), axis=1)\n data = pd.DataFrame(data, columns=[\"y\", \"x1\", \"x2\"])\n return data\n\n\ndef generateInseparatableData(n):\n \"\"\"\n 产生线性不可分的数据集\n \"\"\"\n data = generateSeparatableData(n)\n inseparatable = [[1, -1, 1.5], [0, 3, 1]]\n inseparatable = pd.DataFrame(inseparatable, columns=[\"y\", \"x1\", \"x2\"])\n data = data.append(inseparatable)\n return data\n\n\ndef trainModel(data):\n \"\"\"\n 训练SVM模型\n \"\"\"\n # 将惩罚系数设为很大,使模型接近于线性可分时的模型\n model = SVC(C=1e4, kernel=\"linear\")\n model.fit(data[[\"x1\", \"x2\"]], data[\"y\"])\n return model\n\n\ndef visualize(data, model=None):\n \"\"\"\n 将模型结果可视化\n \"\"\"\n # 创建一个图形框\n fig = plt.figure(figsize=(6, 6), dpi=80)\n # 在图形框里画一幅图\n ax = fig.add_subplot(1, 1, 1)\n label1 = data[data[\"y\"]>0]\n ax.scatter(label1[[\"x1\"]], label1[[\"x2\"]], marker=\"o\")\n label0 = data[data[\"y\"]==0]\n ax.scatter(label0[[\"x1\"]], label0[[\"x2\"]], marker=\"^\", color=\"k\")\n if model is not None:\n w = model.coef_\n a = -w[0][0] / w[0][1]\n xx = np.linspace(-3, 5)\n yy = a * xx - (model.intercept_) / w[0][1]\n yy_down = yy - 1 / w[0][1]\n yy_up = yy + 1 / w[0][1]\n ax.plot(xx, yy, \"r\")\n ax.plot(xx, yy_down, \"r--\")\n ax.plot(xx, yy_up, \"r--\")\n plt.show()\n\n\nif __name__ == \"__main__\":\n data = generateSeparatableData(20)\n data1 = generateInseparatableData(20)\n re = trainModel(data)\n visualize(data1, re)\n",
"# -*- coding: UTF-8 -*-\n\"\"\"\n此脚本用于展示逻辑回归模型的搭建过程以及统计性质\n\"\"\"\n\n\n# 保证脚本与Python3兼容\nfrom __future__ import print_function\n\nimport os\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport statsmodels.api as sm\nfrom statsmodels.graphics.mosaicplot import mosaic\n\n\ndef modelSummary(re):\n \"\"\"\n 分析逻辑回归模型的统计性质\n \"\"\"\n # 整体统计分析结果\n print(re.summary())\n # 用f test检验education_num的系数是否显著\n print(\"检验假设education_num的系数等于0:\")\n print(re.f_test(\"education_num=0\"))\n # 用f test检验两个假设是否同时成立\n print(\"检验假设education_num的系数等于0.32和hours_per_week的系数等于0.04同时成立:\")\n print(re.f_test(\"education_num=0.32, hours_per_week=0.04\"))\n\n\ndef transLabel(data):\n \"\"\"\n 将文字变量转化为数字变量\n \"\"\"\n data[\"label_code\"] = pd.Categorical(data.label).codes\n return data\n\n\ndef visualData(data):\n \"\"\"\n 画直方图,直观了解数据\n \"\"\"\n data[[\"age\", \"hours_per_week\", \"education_num\", \"label_code\"]].hist(\n rwidth=0.9, grid=False, figsize=(8, 8), alpha=0.6, color=\"grey\")\n plt.show()\n\n\ndef analyseData(data):\n \"\"\"\n 通过统计方法,了解数据性质\n \"\"\"\n # 在Windows下运行此脚本需确保Windows下的命令提示符(cmd)能显示中文\n print(\"显示基本统计信息:\")\n print(data.describe(include=\"all\"))\n # 计算education_num, label交叉报表\n cross1 = pd.crosstab(pd.qcut(data[\"education_num\"], [0, .25, .5, .75, 1]), data[\"label\"])\n print(\"显示education_num, label交叉报表:\")\n print(cross1)\n # 将交叉报表图形化\n props = lambda key: {\"color\": \"0.45\"} if ' >50K' in key else {\"color\": \"#C6E2FF\"}\n mosaic(cross1[[\" >50K\", \" <=50K\"]].stack(), properties=props)\n # 计算hours_per_week, label交叉报表\n cross2 = pd.crosstab(pd.cut(data[\"hours_per_week\"], 5), data[\"label\"])\n # 将交叉报表归一化,利于分析数据\n cross2_norm = cross2.div(cross2.sum(1).astype(float), axis=0)\n print(\"显示hours_per_week, label交叉报表:\")\n print(cross2_norm)\n # 图形化归一化后的交叉报表\n cross2_norm.plot(kind=\"bar\", color=[\"#C6E2FF\", \"0.45\"], rot=0)\n plt.show()\n \n\n\ndef trainModel(data):\n \"\"\"\n 搭建逻辑回归模型,并训练模型\n \"\"\"\n formula = \"label_code ~ age + education_num + capital_gain + capital_loss + hours_per_week\"\n model = sm.Logit.from_formula(formula, data=data)\n re = model.fit()\n return re\n\n\ndef readData(path):\n \"\"\"\n 使用pandas读取数据\n \"\"\"\n data = pd.read_csv(path)\n cols = [\"age\", \"education_num\", \"capital_gain\", \"capital_loss\", \"hours_per_week\", \"label\"]\n return data[cols]\n\n\ndef interpretModel(re):\n \"\"\"\n 理解模型结果\n\n 参数\n ----\n re :BinaryResults,训练好的逻辑回归模型\n \"\"\"\n conf = re.conf_int()\n conf['OR'] = re.params\n # 计算各个变量对事件发生比的影响\n # conf里面的三列,分别对应着估计值的下界、上界和估计值本身\n conf.columns = ['2.5%', '97.5%', 'OR']\n print(\"各个变量对事件发生比的影响:\")\n print(np.exp(conf))\n # 计算各个变量的边际效应\n print(\"各个变量的边际效应:\")\n print(re.get_margeff(at=\"overall\").summary())\n\n\ndef makePrediction(re, testSet, alpha=0.5):\n \"\"\"\n 使用训练好的模型对测试数据做预测\n \"\"\"\n # 关闭pandas有关chain_assignment的警告\n pd.options.mode.chained_assignment = None\n # 计算事件发生的概率\n testSet[\"prob\"] = re.predict(testSet)\n print(\"事件发生概率(预测概率)大于0.6的数据个数:\")\n print(testSet[testSet[\"prob\"] > 0.6].shape[0]) # 输出值为576\n print(\"事件发生概率(预测概率)大于0.5的数据个数:\")\n print(testSet[testSet[\"prob\"] > 0.5].shape[0]) # 输出值为834\n # 根据预测的概率,得出最终的预测\n testSet[\"pred\"] = testSet.apply(lambda x: 1 if x[\"prob\"] > alpha else 0, axis=1)\n return testSet\n\n\ndef evaluation(re):\n \"\"\"\n 计算预测结果的查准查全率以及f1\n\n 参数\n ----\n re :DataFrame,预测结果,里面包含两列:真实值‘lable_code’、预测值‘pred’\n \"\"\"\n bins = np.array([0, 0.5, 1])\n label = re[\"label_code\"]\n pred = re[\"pred\"]\n tn, fp, fn, tp = np.histogram2d(label, pred, bins=bins)[0].flatten()\n precision = tp / (tp + fp) # 0.707\n recall = tp / (tp + fn) # 0.374\n f1 = 2 * precision * recall / (precision + recall) # 0.490\n print(\"查准率: %.3f, 查全率: %.3f, f1: %.3f\" % (precision, recall, f1))\n\n\ndef logitRegression(data):\n \"\"\"\n 逻辑回归模型分析步骤展示\n\n 参数\n ----\n data :DataFrame,建模数据\n \"\"\"\n data = transLabel(data)\n visualData(data)\n analyseData(data)\n # 将数据分为训练集和测试集\n trainSet, testSet = train_test_split(data, test_size=0.2, random_state=2310)\n # 训练模型并分析模型效果\n re = trainModel(trainSet)\n modelSummary(re)\n interpretModel(re)\n re = makePrediction(re, testSet)\n evaluation(re)\n\n\nif __name__ == \"__main__\":\n # 设置显示格式\n pd.set_option('display.width', 1000)\n homePath = os.path.dirname(os.path.abspath(__file__))\n # Windows下的存储路径与Linux并不相同\n if os.name == \"nt\":\n dataPath = \"%s\\\\data\\\\adult.data\" % homePath\n else:\n dataPath = \"%s/data/adult.data\" % homePath\n data = readData(dataPath)\n logitRegression(data)\n"
] | [
[
"tensorflow.matmul",
"numpy.random.random",
"tensorflow.gfile.DeleteRecursively",
"numpy.random.seed",
"tensorflow.gfile.Exists",
"tensorflow.placeholder",
"tensorflow.square",
"tensorflow.get_default_graph"
],
[
"numpy.exp"
],
[
"numpy.random.seed",
"numpy.linspace",
"pandas.DataFrame",
"numpy.concatenate",
"numpy.random.randn",
"sklearn.svm.SVC",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"pandas.read_csv",
"pandas.Categorical",
"sklearn.model_selection.train_test_split",
"pandas.cut",
"numpy.exp",
"pandas.set_option",
"numpy.array",
"pandas.qcut",
"matplotlib.pyplot.show",
"numpy.histogram2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
HarliWu/From-Deterioration-to-Acceleration-A-Calibration-Approach-to-Rehabilitating-Step-Asynchronism-in-Fe | [
"3a2f7196a2ca0446ce7ff7c8d15a0fa56a1d91d4",
"3a2f7196a2ca0446ce7ff7c8d15a0fa56a1d91d4"
] | [
"federated_learning/FedaGrac/param_server.py",
"data/emnist.py"
] | [
"import time, os, json, time\nimport numpy as np\n\nimport torch\nfrom torch._C import device\nimport torch.distributed as dist\nfrom torch.autograd import Variable\n\ndef test_model(model, test_data, dev):\n correct, total = 0, 0\n model.eval()\n\n with torch.no_grad():\n for data, target in test_data:\n data, target = Variable(data).cuda(dev), Variable(target).cuda(dev)\n output = model(data)\n # get the index of the max log-probability\n _, predictions = output.max(1)\n total += predictions.size(0)\n correct += torch.sum(predictions == target.data).float()\n\n acc = correct / total\n return acc.item()\n\ndef update_model(model, global_mu, size, cpu, gpu, args):\n # all_param = model.state_dict()\n\n # receive the parameter variance from workers \n for param in model.parameters():\n tensor = torch.zeros_like(param.data, device=cpu)\n gather_list = [torch.zeros_like(param.data, device=cpu) for _ in range(size)]\n dist.gather(tensor=tensor, gather_list=gather_list, dst=0)\n param.data = torch.zeros_like(param.data, device=gpu)\n for w in range(size):\n # Suppose the model received from clients are well processed \n param.data = param.data + gather_list[w].clone().detach().to(gpu)\n\n # receive averaged K from workers\n avg_k_list = [torch.tensor(0.0) for _ in range(size)]\n dist.gather(tensor=torch.tensor(0.0), gather_list=avg_k_list, dst=0)\n avg_k = sum(avg_k_list)\n print('Averaged K:', avg_k)\n # send averaged K to workers \n avg_k_list = [avg_k if args.avg_k==-1 else torch.tensor(float(args.avg_k)) for _ in range(size)]\n dist.scatter(tensor=avg_k, scatter_list=avg_k_list)\n\n # receive the mu from clients\n for idx, param in enumerate(global_mu):\n tensor = torch.zeros_like(param.data, device=cpu)\n gather_list = [torch.zeros_like(param.data, device=cpu) for _ in range(size)]\n dist.gather(tensor=tensor, gather_list=gather_list, dst=0)\n global_mu[idx] = torch.zeros_like(param.data, device=gpu)\n for w in range(size):\n # Suppose the model received from clients are well processed \n global_mu[idx] = global_mu[idx] + gather_list[w].clone().detach().to(gpu)\n\n # send the parameters to workers \n for param in model.parameters():\n tmp_p = param.clone().detach().to(cpu)\n scatter_p_list = [tmp_p for _ in range(size)]\n dist.scatter(tensor=tmp_p, scatter_list=scatter_p_list)\n if torch.sum(torch.isnan(tmp_p)) > 0:\n print(\"NaN occurs. Terminate. \")\n exit(-1)\n\n # send global_mu to workers\n for param in global_mu:\n tmp_p = param.clone().detach().to(cpu)\n scatter_p_list = [tmp_p for _ in range(size)]\n dist.scatter(tensor=tmp_p, scatter_list=scatter_p_list)\n\n # model.load_state_dict(all_param)\n\ndef run(size, model, args, test_data, f_result, cpu, gpu):\n # Receive the weights from all clients \n temp_w = torch.tensor([0.0 for _ in range(args.num_workers+1)])\n weights = [torch.tensor([0.0 for _ in range(args.num_workers+1)]) for _ in range(size)]\n dist.gather(tensor=temp_w, gather_list=weights, dst=0)\n weights = sum(weights)\n weights = weights / torch.sum(weights)\n print('weights:', weights)\n\n # send weights to clients\n weights_list = [weights.clone().detach().to(cpu) for _ in range(size)]\n dist.scatter(tensor=temp_w, scatter_list=weights_list)\n \n start = time.time()\n model = model.cuda(gpu)\n\n for p in model.parameters():\n tmp_p = p.clone().detach().to(cpu)\n scatter_p_list = [tmp_p for _ in range(size)]\n # dist.scatter(tensor=tmp_p, scatter_list=scatter_p_list, group=group)\n dist.scatter(tensor=tmp_p, scatter_list=scatter_p_list)\n\n global_mu = [torch.zeros_like(param.data, device=gpu) for param in model.parameters()]\n\n print('Model has sent to all nodes! ')\n print('Begin!') \n\n np.random.seed(42)\n\n for t in range(args.T):\n model.train()\n # send participants to all clients \n participants = np.random.choice(np.arange(len(weights)), size=args.num_part, replace=True, p=weights.numpy()) if args.partial else np.arange(len(weights))\n print('Participants list:', list(participants))\n participants = torch.tensor(participants).to(cpu)\n part_list = [participants for _ in range(size)]\n dist.scatter(tensor=participants, scatter_list=part_list)\n\n # receive the list of train loss from workers\n info_list = [torch.tensor(0.0) for _ in range(size)]\n # dist.gather(tensor=torch.tensor([0.0]), gather_list=info_list, group=group)\n dist.gather(tensor=torch.tensor(0.0), gather_list=info_list, dst=0)\n # info_list = np.concatenate([list(a) for a in info_list])\n # train_loss = sum(info_list).item() / args.num_part if args.partial else sum(info_list * weights).item()\n train_loss = sum(info_list).item()\n\n # if args.partial:\n # update_model_partial(model, size, cpu, gpu, args.num_part)\n # else:\n # update_model_full(model, size, cpu, gpu, weights)\n update_model(model, global_mu, size, cpu, gpu, args)\n\n timestamp = time.time() - start\n test_acc = test_model(model, test_data, gpu)\n print(\"Epoch: {}\\t\\tLoss: {}\\t\\tAccuracy: {}\".format(t, train_loss, test_acc))\n f_result.write(str(t) + \"\\t\" + str(timestamp) + \"\\t\" + str(train_loss) + \"\\t\" + str(test_acc) + \"\\n\")\n f_result.flush()\n\ndef init_processes(rank, size, model, args, test_data, cpu, gpu, backend='mpi'):\n if backend == 'mpi':\n dist.init_process_group(backend)\n elif backend == 'gloo':\n os.environ['MASTER_ADDR'] = '127.0.0.1'\n os.environ['MASTER_PORT'] = '29500'\n dist.init_process_group(backend, rank=rank, world_size=size)\n if not os.path.exists(args.result):\n os.makedirs(args.result)\n result_file = os.path.join(args.result, '{}.txt'.format(len(os.listdir(args.result))))\n f_result = open(result_file, 'w')\n f_result.write(json.dumps(vars(args)) + '\\n')\n run(size, model, args, test_data, f_result, cpu, gpu)",
"from torchvision.datasets import EMNIST\nimport numpy as np\nfrom torch.utils.data import DataLoader\nimport torchvision.transforms as transforms\nimport os\n\nfrom .utils import _get_partitioner, _use_partitioner, CustomImageFolder\n\ntransform = transforms.Compose([transforms.ToTensor()])\n\ndef get_dataset(ranks:list, workers:list, batch_size:int, data_aug:bool=True, dataset_root='./dataset', **kwargs):\n if data_aug:\n trainset = EMNIST(root=dataset_root + '/emnist_data', split='byclass', train=True, download=True, transform=transform)\n testset = EMNIST(root=dataset_root + '/emnist_data', split='byclass', train=False, download=True, transform=transform)\n else:\n trainset = EMNIST(root=dataset_root + '/emnist_data', split='byclass', train=True, download=True)\n testset = EMNIST(root=dataset_root + '/emnist_data', split='byclass', train=False, download=True)\n \n partitioner = _get_partitioner(trainset, workers, **kwargs)\n data_ratio_pairs = {}\n for rank in ranks:\n data, ratio = _use_partitioner(partitioner, rank, workers)\n data = DataLoader(dataset=data, batch_size=batch_size, shuffle=False)\n data_ratio_pairs[rank] = (data, ratio)\n testset = DataLoader(dataset=testset, batch_size=batch_size, shuffle=False)\n\n return data_ratio_pairs, testset\n\ndef get_dataset_with_precat(ranks:list, workers:list, batch_size:int, test_required:bool=False, dataset_root='./dataset'):\n if test_required:\n try:\n testset = get_testset_from_folder(batch_size, dataset_root)\n except:\n testset = get_testdataset(batch_size, dataset_root=dataset_root)\n finally:\n testset = DataLoader(dataset=testset, batch_size=batch_size, shuffle=False)\n else:\n testset = None\n\n data_ratio_pairs = {}\n for rank in ranks:\n idx = np.where(workers == rank)[0][0]\n current_path = dataset_root + '/emnist_data/{}_partitions/{}'.format(len(workers), idx)\n trainset = CustomImageFolder(root=current_path, transform=transform)\n trainset = DataLoader(dataset=trainset, batch_size=batch_size, shuffle=False)\n with open(current_path + '/weight.txt', 'r') as f:\n ratio = eval(f.read())\n data_ratio_pairs[rank] = (trainset, ratio)\n testset = DataLoader(dataset=testset, batch_size=batch_size, shuffle=False)\n \n return data_ratio_pairs, testset\n\ndef get_testdataset(batch_size:int, dataset_root='./dataset'):\n testset = EMNIST(root=dataset_root + '/emnist_data', split='byclass', train=False, download=True, transform=transform)\n testset = DataLoader(dataset=testset, batch_size=batch_size, shuffle=False)\n return testset\n\ndef get_testset_from_folder(batch_size:int, dataset_root='./dataset'):\n current_path = dataset_root + '/emnist_data/testset'\n testset = CustomImageFolder(root=current_path, transform=transform)\n testset = DataLoader(dataset=testset, batch_size=batch_size, shuffle=False)\n return testset\n\nif __name__ == \"__main__\":\n # store partitioned dataset \n num_workers, bsz = 10, 1\n workers = np.arange(num_workers) + 1\n path = 'D:/dataset'\n \n data_ratio_pairs, testset = get_dataset(workers, workers, bsz, isNonIID=False, dataset_root=path, data_aug=False)\n path = path + '/emnist_data/{}_partitions'.format(num_workers)\n if os.path.exists(path) is False:\n os.makedirs(path)\n\n data_ratio_pairs[\"testset\"] = (testset, 0)\n for idx, pair in data_ratio_pairs.items():\n data, ratio = pair\n data = data.dataset\n current_path = os.path.join(path, str(idx)) if idx != \"testset\" else os.path.join(path, \"../testset\")\n if os.path.exists(current_path):\n import shutil\n shutil.rmtree(current_path)\n os.makedirs(current_path)\n\n with open(current_path + '/weight.txt', 'w') as f:\n f.write('{}\\t{}\\n'.format(idx, ratio))\n \n for i in range(len(data)):\n sample, target = data[i]\n if os.path.exists(os.path.join(current_path, str(int(target)))) is False:\n os.makedirs(os.path.join(current_path, str(int(target))))\n sample.save(current_path + '/{}/{}.jpg'.format(target, i))\n"
] | [
[
"torch.distributed.scatter",
"torch.distributed.init_process_group",
"numpy.random.seed",
"torch.isnan",
"torch.zeros_like",
"torch.sum",
"torch.tensor",
"torch.no_grad",
"torch.distributed.gather",
"torch.autograd.Variable"
],
[
"numpy.arange",
"torch.utils.data.DataLoader",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
balintmaci/drone_intro_exercises | [
"1d8b839fecd6b0c5e33210b9a88fd741a71034cc"
] | [
"ex1/daniel/imu_exercise_kalman.py"
] | [
"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# IMU exercise\n# Copyright (c) 2015-2020 Kjeld Jensen [email protected] [email protected]\n\n# import libraries\nfrom math import pi, sqrt, atan2\nimport matplotlib.pyplot as plt\nfrom pylab import ion\n\n# name of the file to read ##\nfileName = 'imu_razor_data_pitch_55deg.txt'\n\n## IMU type\n#imuType = 'vectornav_vn100'\nimuType = 'sparkfun_razor'\n\n# other parameters\nshowPlot = True\nshow3DLiveView = False\nshow3DLiveViewInterval = 3\n\nif show3DLiveView == True:\n\tfrom imu_box3d import imu_visualize\n\n\n##### Insert initialize code below ###################\n\n# approx. bias values determined by averaging over static measurements\nbias_gyro_x = 3.95*3.14/180 # [rad/measurement]\nbias_gyro_y = 2.85*3.14/180 # [rad/measurement]\nbias_gyro_z = 0.41*3.14/180 # [rad/measurement]\n\n# variances\ngyroVar = 0.02\npitchVar = 0.01\n\n# Kalman filter start guess\nestAngle = 0\nestVar = 3.14\n\n# Kalman filter housekeeping variables\ngyroVarAcc = 0.0\n\n######################################################\n\n## Variables for plotting ##\nplotDataGyro = []\nplotDataAcc = []\nplotDataKalman = []\n\n## Initialize your variables here ##\ngyro_x_rel = 0.0\ngyro_y_rel = 0.0\ngyro_z_rel = 0.0\n\n# open the imu data file\nf = open (fileName, \"r\")\n\n# initialize variables\ncount = 0\n\n# initialize 3D liveview\nif show3DLiveView == True:\n\timuview = imu_visualize()\n\timuview.set_axis (0, 0, 0)\n\timuview.update()\n\n# looping through file\nfor line in f:\n\tcount += 1\n\n\t# split the line into CSV formatted data\n\tline = line.replace ('*',',') # make the checkum another csv value\n\tcsv = line.split(',')\n\n\t# keep track of the timestamps \n\tts_recv = float(csv[0])\n\tif count == 1: \n\t\tts_now = ts_recv # only the first time\n\tts_prev = ts_now\n\tts_now = ts_recv\n\n\tif imuType == 'sparkfun_razor': \n\t\t# import data from a SparkFun Razor IMU (SDU firmware)\n\t\t# outputs ENU reference system\n\t\tacc_x = int(csv[2]) / 1000.0 * 4 * 9.82;\n\t\tacc_y = int(csv[3]) / 1000.0 * 4 * 9.82;\n\t\tacc_z = int(csv[4]) / 1000.0 * 4 * 9.82;\n\t\tgyro_x = int(csv[5]) * 1/14.375 * pi/180.0;\n\t\tgyro_y = int(csv[6]) * 1/14.375 * pi/180.0;\n\t\tgyro_z = int(csv[7]) * 1/14.375 * pi/180.0;\n\n\telif imuType == 'vectornav_vn100': \n\t\t# import data from a VectorNav VN-100 configured to output $VNQMR\n\t\t# outputs NED reference system (therefore converted to ENU)\n\t\tacc_y = float(csv[9])\n\t\tacc_x = float(csv[10])\n\t\tacc_z = -float(csv[11])\n\t\tgyro_y = float(csv[12])\n\t\tgyro_x = float(csv[13])\n\t\tgyro_z = -float(csv[14])\n\n\t# subtract defined static bias for each gyro\t\t\n\tgyro_x -= bias_gyro_x\n\tgyro_y -= bias_gyro_y\n\tgyro_z -= bias_gyro_z\n\n\t##### Insert loop code below #########################\n\n\t# Variables available\n\t# ----------------------------------------------------\n\t# count\t\tCurrent number of updates\t\t\n\t# ts_prev\tTime stamp at the previous update\n\t# ts_now\tTime stamp at this update\n\t# acc_x\t\tAcceleration measured along the x axis\n\t# acc_y\t\tAcceleration measured along the y axis\n\t# acc_z\t\tAcceleration measured along the z axis\n\t# gyro_x\tAngular velocity measured about the x axis\n\t# gyro_y\tAngular velocity measured about the y axis\n\t# gyro_z\tAngular velocity measured about the z axis\n\n\t## Insert your code here ##\n\t# calculate pitch (x-axis) and roll (y-axis) angles\n\tpitch = atan2(acc_y,sqrt(acc_x*acc_x + acc_z*acc_z))\n\troll = atan2(-acc_x, acc_z)\n\n\t# integrate gyro velocities to releative angles\n\tgyro_x_rel += gyro_x*(ts_now-ts_prev)\n\tgyro_y_rel += gyro_y*(ts_now-ts_prev)\n\tgyro_z_rel += gyro_z*(ts_now-ts_prev)\n\n\t# Kalman prediction step (we have new data in each iteration)\n\tgyroVarAcc += gyroVar\n\testAngle += gyro_y*(ts_now-ts_prev)\n\testVar += gyroVarAcc*(ts_now-ts_prev)\n\n\t# Kalman correction step (we have new data in each iteration)\n\tK = estVar/(estVar+pitchVar)\n\testAngle += K*(roll-estAngle)\n\testVar *= (1-K)\n\tgyroVarAcc = 0\n\n\t# define which value to plot as the Kalman filter estimate\n\tkalman_estimate = estAngle\n\n\t# define which value to plot as the absolute value (pitch/roll)\n\tpitch_roll_plot = roll\n\n\t# define which value to plot as the relative gyro value\n\tgyro_rel_plot = gyro_y_rel\n\n\t######################################################\n\n\t# if 3D liveview is enabled\n\tif show3DLiveView == True and count % show3DLiveViewInterval == 0:\n\n\t\t# determine what variables to liveview\n\t\troll_view = 0.0\n\t\tyaw_view = 0.0\n\t\tpitch_view = kalman_estimate\n\n\t\timuview.set_axis (-pitch_view, -yaw_view, roll_view)\n\t\timuview.update()\n\n\t# if plotting is enabled\n\tif showPlot == True:\n\t\tplotDataGyro.append(gyro_rel_plot*180.0/pi)\n\t\tplotDataAcc.append(pitch_roll_plot*180.0/pi)\n\t\tplotDataKalman.append(kalman_estimate*180.0/pi)\n\n# closing the file\t\nf.close()\n\n# show the plot\nif showPlot == True:\n\tion()\n\tplt.figure(1)\n\tplt.title('Gyro integrated (relative) angle')\n\tplt.plot(plotDataGyro)\n\tplt.savefig('imu_exercise_gyro.png')\n\n\tplt.figure(2)\n\tplt.title('Accelerometer (blue) & Kalman estimation (red) angles')\n\tplt.plot(plotDataAcc,'blue')\n\tplt.plot(plotDataKalman,'red')\n\tplt.savefig('imu_exercise_acc_kalman.png')\n\tplt.draw()\n\tprint ('Press enter to quit')\n\treal_raw_input = vars(__builtins__).get('raw_input',input)\n\treal_raw_input()\n\n\n"
] | [
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CanyellWang/MachineLearning_Python_wchy | [
"7eac77f7446a0c69bfb1a8be7da405895409d131"
] | [
"NeuralNetwok/NeuralNetwork.py"
] | [
"#-*- coding: utf-8 -*-\nimport numpy as np\nfrom scipy import io as spio\nfrom matplotlib import pyplot as plt\nfrom scipy import optimize\nfrom matplotlib.font_manager import FontProperties\nfont = FontProperties(fname=r\"c:\\windows\\fonts\\simsun.ttc\", size=14) # 解决windows环境下画图汉字乱码问题\n\nfrom sklearn import datasets\nfrom sklearn.preprocessing import StandardScaler\nimport time\n\ndef neuralNetwork(input_layer_size,hidden_layer_size,out_put_layer):\n data_img = loadmat_data(\"data_digits.mat\")\n X = data_img['X']\n y = data_img['y']\n\n '''scaler = StandardScaler()\n scaler.fit(X)\n X = scaler.transform(X)''' \n \n m,n = X.shape\n \"\"\"digits = datasets.load_digits()\n X = digits.data\n y = digits.target\n m,n = X.shape\n \n scaler = StandardScaler()\n scaler.fit(X)\n X = scaler.transform(X)\"\"\"\n \n ## 随机显示几行数据\n rand_indices = [t for t in [np.random.randint(x-x, m) for x in range(100)]] # 生成100个0-m的随机数\n display_data(X[rand_indices,:]) # 显示100个数字 \n \n #nn_params = np.vstack((Theta1.reshape(-1,1),Theta2.reshape(-1,1)))\n \n Lambda = 1\n \n initial_Theta1 = randInitializeWeights(input_layer_size,hidden_layer_size); \n initial_Theta2 = randInitializeWeights(hidden_layer_size,out_put_layer)\n \n initial_nn_params = np.vstack((initial_Theta1.reshape(-1,1),initial_Theta2.reshape(-1,1))) #展开theta \n #np.savetxt(\"testTheta.csv\",initial_nn_params,delimiter=\",\")\n start = time.time()\n result = optimize.fmin_cg(nnCostFunction, initial_nn_params, fprime=nnGradient, args=(input_layer_size,hidden_layer_size,out_put_layer,X,y,Lambda), maxiter=100)\n print (u'执行时间:',time.time()-start)\n print (result)\n '''可视化 Theta1'''\n length = result.shape[0]\n Theta1 = result[0:hidden_layer_size*(input_layer_size+1)].reshape(hidden_layer_size,input_layer_size+1)\n Theta2 = result[hidden_layer_size*(input_layer_size+1):length].reshape(out_put_layer,hidden_layer_size+1) \n display_data(Theta1[:,1:length])\n display_data(Theta2[:,1:length])\n '''预测'''\n p = predict(Theta1,Theta2,X)\n print (u\"预测准确度为:%f%%\"%np.mean(np.float64(p == y.reshape(-1,1))*100)) \n res = np.hstack((p,y.reshape(-1,1)))\n np.savetxt(\"predict.csv\", res, delimiter=',')\n \n\n# 加载mat文件\ndef loadmat_data(fileName):\n return spio.loadmat(fileName)\n\n# 显示100个数字\ndef display_data(imgData):\n sum = 0\n '''\n 显示100个数(若是一个一个绘制将会非常慢,可以将要画的数字整理好,放到一个矩阵中,显示这个矩阵即可)\n - 初始化一个二维数组\n - 将每行的数据调整成图像的矩阵,放进二维数组\n - 显示即可\n '''\n m,n = imgData.shape\n width = np.int32(np.round(np.sqrt(n)))\n height = np.int32(n/width);\n rows_count = np.int32(np.floor(np.sqrt(m)))\n cols_count = np.int32(np.ceil(m/rows_count))\n pad = 1\n display_array = -np.ones((pad+rows_count*(height+pad),pad+cols_count*(width+pad)))\n for i in range(rows_count):\n for j in range(cols_count):\n if sum >= m: #超过了行数,退出当前循环\n break;\n display_array[pad+i*(height+pad):pad+i*(height+pad)+height,pad+j*(width+pad):pad+j*(width+pad)+width] = imgData[sum,:].reshape(height,width,order=\"F\") # order=F指定以列优先,在matlab中是这样的,python中需要指定,默认以行\n sum += 1\n if sum >= m: #超过了行数,退出当前循环\n break;\n \n plt.imshow(display_array,cmap='gray') #显示灰度图像\n plt.axis('off')\n plt.show()\n\n# 代价函数\ndef nnCostFunction(nn_params,input_layer_size,hidden_layer_size,num_labels,X,y,Lambda):\n length = nn_params.shape[0] # theta的中长度\n # 还原theta1和theta2\n Theta1 = nn_params[0:hidden_layer_size*(input_layer_size+1)].reshape(hidden_layer_size,input_layer_size+1)\n Theta2 = nn_params[hidden_layer_size*(input_layer_size+1):length].reshape(num_labels,hidden_layer_size+1)\n \n # np.savetxt(\"Theta1.csv\",Theta1,delimiter=',')\n \n m = X.shape[0]\n class_y = np.zeros((m,num_labels)) # 数据的y对应0-9,需要映射为0/1的关系\n # 映射y\n for i in range(num_labels):\n class_y[:,i] = np.int32(y==i).reshape(1,-1) # 注意reshape(1,-1)才可以赋值\n \n '''去掉theta1和theta2的第一列,因为正则化时从1开始''' \n Theta1_colCount = Theta1.shape[1] \n Theta1_x = Theta1[:,1:Theta1_colCount]\n Theta2_colCount = Theta2.shape[1] \n Theta2_x = Theta2[:,1:Theta2_colCount]\n # 正则化向theta^2\n term = np.dot(np.transpose(np.vstack((Theta1_x.reshape(-1,1),Theta2_x.reshape(-1,1)))),np.vstack((Theta1_x.reshape(-1,1),Theta2_x.reshape(-1,1))))\n \n '''正向传播,每次需要补上一列1的偏置bias'''\n a1 = np.hstack((np.ones((m,1)),X)) \n z2 = np.dot(a1,np.transpose(Theta1)) \n a2 = sigmoid(z2)\n a2 = np.hstack((np.ones((m,1)),a2))\n z3 = np.dot(a2,np.transpose(Theta2))\n h = sigmoid(z3) \n '''代价''' \n J = -(np.dot(np.transpose(class_y.reshape(-1,1)),np.log(h.reshape(-1,1)))+np.dot(np.transpose(1-class_y.reshape(-1,1)),np.log(1-h.reshape(-1,1)))-Lambda*term/2)/m \n #temp1 = (h.reshape(-1,1)-class_y.reshape(-1,1))\n #temp2 = (temp1**2).sum()\n #J = 1/(2*m)*temp2\n return np.ravel(J)\n\n# 梯度\ndef nnGradient(nn_params,input_layer_size,hidden_layer_size,num_labels,X,y,Lambda):\n length = nn_params.shape[0]\n Theta1 = nn_params[0:hidden_layer_size*(input_layer_size+1)].reshape(hidden_layer_size,input_layer_size+1).copy() # 这里使用copy函数,否则下面修改Theta的值,nn_params也会一起修改\n Theta2 = nn_params[hidden_layer_size*(input_layer_size+1):length].reshape(num_labels,hidden_layer_size+1).copy()\n m = X.shape[0]\n class_y = np.zeros((m,num_labels)) # 数据的y对应0-9,需要映射为0/1的关系 \n # 映射y\n for i in range(num_labels):\n class_y[:,i] = np.int32(y==i).reshape(1,-1) # 注意reshape(1,-1)才可以赋值\n \n '''去掉theta1和theta2的第一列,因为正则化时从1开始'''\n Theta1_colCount = Theta1.shape[1] \n Theta1_x = Theta1[:,1:Theta1_colCount]\n Theta2_colCount = Theta2.shape[1] \n Theta2_x = Theta2[:,1:Theta2_colCount]\n \n Theta1_grad = np.zeros((Theta1.shape)) #第一层到第二层的权重\n Theta2_grad = np.zeros((Theta2.shape)) #第二层到第三层的权重\n \n \n '''正向传播,每次需要补上一列1的偏置bias'''\n a1 = np.hstack((np.ones((m,1)),X))\n z2 = np.dot(a1,np.transpose(Theta1))\n a2 = sigmoid(z2)\n a2 = np.hstack((np.ones((m,1)),a2))\n z3 = np.dot(a2,np.transpose(Theta2))\n h = sigmoid(z3)\n \n \n '''反向传播,delta为误差,'''\n delta3 = np.zeros((m,num_labels))\n delta2 = np.zeros((m,hidden_layer_size))\n for i in range(m):\n #delta3[i,:] = (h[i,:]-class_y[i,:])*sigmoidGradient(z3[i,:]) # 均方误差的误差率\n delta3[i,:] = h[i,:]-class_y[i,:] # 交叉熵误差率\n Theta2_grad = Theta2_grad+np.dot(np.transpose(delta3[i,:].reshape(1,-1)),a2[i,:].reshape(1,-1))\n delta2[i,:] = np.dot(delta3[i,:].reshape(1,-1),Theta2_x)*sigmoidGradient(z2[i,:])\n Theta1_grad = Theta1_grad+np.dot(np.transpose(delta2[i,:].reshape(1,-1)),a1[i,:].reshape(1,-1))\n \n Theta1[:,0] = 0\n Theta2[:,0] = 0 \n '''梯度'''\n grad = (np.vstack((Theta1_grad.reshape(-1,1),Theta2_grad.reshape(-1,1)))+Lambda*np.vstack((Theta1.reshape(-1,1),Theta2.reshape(-1,1))))/m\n return np.ravel(grad)\n\n# S型函数 \ndef sigmoid(z):\n h = np.zeros((len(z),1)) # 初始化,与z的长度一致\n \n h = 1.0/(1.0+np.exp(-z))\n return h\n\n# S型函数导数\ndef sigmoidGradient(z):\n g = sigmoid(z)*(1-sigmoid(z))\n return g\n\n# 随机初始化权重theta\ndef randInitializeWeights(L_in,L_out):\n W = np.zeros((L_out,1+L_in)) # 对应theta的权重\n epsilon_init = (6.0/(L_out+L_in))**0.5\n W = np.random.rand(L_out,1+L_in)*2*epsilon_init-epsilon_init # np.random.rand(L_out,1+L_in)产生L_out*(1+L_in)大小的随机矩阵\n return W\n\n\n# 检验梯度是否计算正确\ndef checkGradient(Lambda = 0):\n '''构造一个小型的神经网络验证,因为数值法计算梯度很浪费时间,而且验证正确后之后就不再需要验证了'''\n input_layer_size = 3\n hidden_layer_size = 5\n num_labels = 3\n m = 5\n initial_Theta1 = debugInitializeWeights(input_layer_size,hidden_layer_size); \n initial_Theta2 = debugInitializeWeights(hidden_layer_size,num_labels)\n X = debugInitializeWeights(input_layer_size-1,m)\n y = 1+np.transpose(np.mod(np.arange(1,m+1), num_labels))# 初始化y\n \n y = y.reshape(-1,1)\n nn_params = np.vstack((initial_Theta1.reshape(-1,1),initial_Theta2.reshape(-1,1))) #展开theta \n '''BP求出梯度'''\n grad = nnGradient(nn_params, input_layer_size, hidden_layer_size, \n num_labels, X, y, Lambda) \n '''使用数值法计算梯度'''\n num_grad = np.zeros((nn_params.shape[0]))\n step = np.zeros((nn_params.shape[0]))\n e = 1e-4\n for i in range(nn_params.shape[0]):\n step[i] = e\n loss1 = nnCostFunction(nn_params-step.reshape(-1,1), input_layer_size, hidden_layer_size, \n num_labels, X, y, \n Lambda)\n loss2 = nnCostFunction(nn_params+step.reshape(-1,1), input_layer_size, hidden_layer_size, \n num_labels, X, y, \n Lambda)\n num_grad[i] = (loss2-loss1)/(2*e)\n step[i]=0\n # 显示两列比较\n res = np.hstack((num_grad.reshape(-1,1),grad.reshape(-1,1)))\n print(\"检查梯度的结果,第一列为数值法计算得到的,第二列为BP得到的:\")\n print (res)\n\n# 初始化调试的theta权重\ndef debugInitializeWeights(fan_in,fan_out):\n W = np.zeros((fan_out,fan_in+1))\n x = np.arange(1,fan_out*(fan_in+1)+1)\n W = np.sin(x).reshape(W.shape)/10\n return W\n\n# 预测\ndef predict(Theta1,Theta2,X):\n m = X.shape[0]\n num_labels = Theta2.shape[0]\n #p = np.zeros((m,1))\n '''正向传播,预测结果'''\n X = np.hstack((np.ones((m,1)),X))\n h1 = sigmoid(np.dot(X,np.transpose(Theta1)))\n h1 = np.hstack((np.ones((m,1)),h1))\n h2 = sigmoid(np.dot(h1,np.transpose(Theta2)))\n \n '''\n 返回h中每一行最大值所在的列号\n - np.max(h, axis=1)返回h中每一行的最大值(是某个数字的最大概率)\n - 最后where找到的最大概率所在的列号(列号即是对应的数字)\n '''\n #np.savetxt(\"h2.csv\",h2,delimiter=',')\n p = np.array(np.where(h2[0,:] == np.max(h2, axis=1)[0])) \n for i in np.arange(1, m):\n t = np.array(np.where(h2[i,:] == np.max(h2, axis=1)[i]))\n p = np.vstack((p,t))\n return p \n\nif __name__ == \"__main__\":\n checkGradient()\n neuralNetwork(400, 25, 10)"
] | [
[
"matplotlib.pyplot.imshow",
"numpy.sqrt",
"numpy.max",
"numpy.exp",
"numpy.random.randint",
"numpy.arange",
"scipy.io.loadmat",
"numpy.sin",
"numpy.ceil",
"matplotlib.pyplot.axis",
"numpy.ravel",
"numpy.zeros",
"scipy.optimize.fmin_cg",
"matplotlib.font_manager.FontProperties",
"numpy.random.rand",
"numpy.transpose",
"numpy.savetxt",
"matplotlib.pyplot.show",
"numpy.int32",
"numpy.ones",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
bayesianbrad/pyprob | [
"a426fc51c1d6da13052979c21af447f9c4023642",
"a426fc51c1d6da13052979c21af447f9c4023642"
] | [
"pyprob/nn/dataset.py",
"pyprob/distributions/multivariate_normal.py"
] | [
"import torch\nfrom torch.utils.data import Dataset, ConcatDataset, Sampler\nimport torch.distributed as dist\nimport math\nimport os\nimport sys\nimport shelve\nfrom glob import glob\nimport numpy as np\nimport uuid\nfrom termcolor import colored\nfrom collections import Counter, OrderedDict\nimport random\n\nfrom .. import util\nfrom ..util import TraceMode, PriorInflation\nfrom ..concurrency import ConcurrentShelf\n\n\nclass Batch():\n def __init__(self, traces):\n self.traces = traces\n self.size = len(traces)\n sub_batches = {}\n total_length_controlled = 0\n for trace in traces:\n tl = trace.length_controlled\n if tl == 0:\n raise ValueError('Trace of length zero.')\n total_length_controlled += tl\n trace_hash = ''.join([variable.address for variable in trace.variables_controlled])\n if trace_hash not in sub_batches:\n sub_batches[trace_hash] = []\n sub_batches[trace_hash].append(trace)\n self.sub_batches = list(sub_batches.values())\n self.mean_length_controlled = total_length_controlled / self.size\n\n def __len__(self):\n return len(self.traces)\n\n def __getitem__(self, key):\n return self.traces[key]\n\n def to(self, device):\n for trace in self.traces:\n trace.to(device=device)\n\n\nclass OnlineDataset(Dataset):\n def __init__(self, model, length=None, prior_inflation=PriorInflation.DISABLED):\n self._model = model\n if length is None:\n length = int(1e6)\n self._length = length\n self._prior_inflation = prior_inflation\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, idx):\n return next(self._model._trace_generator(trace_mode=TraceMode.PRIOR_FOR_INFERENCE_NETWORK, prior_inflation=self._prior_inflation))\n\n @staticmethod\n def _prune_trace(trace):\n del(trace.variables)\n # trace.variables_controlled = []\n del(trace.variables_uncontrolled)\n del(trace.variables_replaced)\n del(trace.variables_observed)\n del(trace.variables_observable)\n del(trace.variables_tagged)\n del(trace.variables_dict_address)\n del(trace.variables_dict_address_base)\n # trace.named_variables = {}\n del(trace.result)\n del(trace.log_prob)\n del(trace.log_prob_observed)\n # del(trace.log_importance_weight)\n # trace.length = 0\n # trace.length_controlled = 0\n del(trace.execution_time_sec)\n for variable in trace.variables_controlled:\n # variable.distribution = distribution\n # if value is None:\n # variable.value = None\n # else:\n # variable.value = util.to_tensor(value)\n del(variable.address_base)\n # variable.address = address\n del(variable.instance)\n del(variable.log_prob)\n del(variable.control)\n del(variable.replace)\n del(variable.name)\n del(variable.observable)\n del(variable.observed)\n del(variable.reused)\n del(variable.tagged)\n for _, variable in trace.named_variables.items():\n controlled = False\n for v in trace.variables_controlled:\n if variable is v: # Needs to be implemented this way to compare object references instead of object hashes (which change as a result of potentially deleted fields)\n controlled = True\n break\n if not controlled:\n del(variable.distribution)\n # if value is None:\n # variable.value = None\n # else:\n # variable.value = util.to_tensor(value)\n del(variable.address_base)\n del(variable.address)\n del(variable.instance)\n del(variable.log_prob)\n del(variable.control)\n del(variable.replace)\n del(variable.name)\n del(variable.observable)\n del(variable.observed)\n del(variable.reused)\n del(variable.tagged)\n\n def save_dataset(self, dataset_dir, num_traces, num_traces_per_file, *args, **kwargs):\n num_files = math.ceil(num_traces / num_traces_per_file)\n util.progress_bar_init('Saving offline dataset, traces:{}, traces per file:{}, files:{}'.format(num_traces, num_traces_per_file, num_files), num_traces, 'Traces')\n i = 0\n while i < num_traces:\n i += num_traces_per_file\n file_name = os.path.join(dataset_dir, 'pyprob_traces_{}_{}'.format(num_traces_per_file, str(uuid.uuid4())))\n shelf = shelve.open(file_name, flag='c')\n for j in range(num_traces_per_file):\n trace = next(self._model._trace_generator(trace_mode=TraceMode.PRIOR, prior_inflation=self._prior_inflation, *args, **kwargs))\n self._prune_trace(trace)\n shelf[str(j)] = trace\n shelf['__length'] = j + 1\n shelf.close()\n util.progress_bar_update(i)\n util.progress_bar_end()\n\n\nclass OfflineDatasetFile(Dataset):\n cache = OrderedDict()\n cache_capacity = 8\n\n def __init__(self, file_name):\n self._file_name = file_name\n self._closed = False\n shelf = self._open()\n self._length = shelf['__length']\n\n def _open(self):\n # idea from https://www.kunxi.org/2014/05/lru-cache-in-python\n try:\n shelf = OfflineDatasetFile.cache.pop(self._file_name)\n # it was in the cache, put it back on the front\n OfflineDatasetFile.cache[self._file_name] = shelf\n return shelf\n except KeyError:\n # not in the cache\n if len(OfflineDatasetFile.cache) >= OfflineDatasetFile.cache_capacity:\n # cache is full, delete the last entry\n n, s = OfflineDatasetFile.cache.popitem(last=False)\n s.close()\n shelf = shelve.open(self._file_name, flag='r')\n OfflineDatasetFile.cache[self._file_name] = shelf\n return shelf\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, idx):\n shelf = self._open()\n return shelf[str(idx)]\n\n\nclass OfflineDataset(ConcatDataset):\n def __init__(self, dataset_dir):\n self._dataset_dir = dataset_dir\n # files = [name for name in os.listdir(self._dataset_dir)]\n files = sorted(glob(os.path.join(self._dataset_dir, 'pyprob_traces_sorted_*')))\n if len(files) > 0:\n self._sorted_on_disk = True\n else:\n self._sorted_on_disk = False\n files = sorted(glob(os.path.join(self._dataset_dir, 'pyprob_traces_*')))\n if len(files) == 0:\n raise RuntimeError('Cannot find any data set files at {}'.format(dataset_dir))\n datasets = []\n for file in files:\n try:\n dataset = OfflineDatasetFile(file)\n datasets.append(dataset)\n except Exception as e:\n print(e)\n print(colored('Warning: dataset file potentially corrupt, omitting: {}'.format(file), 'red', attrs=['bold']))\n super().__init__(datasets)\n print('OfflineDataset at: {}'.format(self._dataset_dir))\n print('Num. traces : {:,}'.format(len(self)))\n print('Sorted on disk : {}'.format(self._sorted_on_disk))\n if self._sorted_on_disk:\n self._sorted_indices = list(range(len(self)))\n else:\n file_name = os.path.join(self._dataset_dir, 'pyprob_hashes')\n try:\n hashes_file = shelve.open(file_name, 'r')\n hashes_exist = 'hashes' in hashes_file\n hashes_file.close()\n except:\n hashes_exist = False\n if hashes_exist:\n print('Using pre-computed hashes in: {}'.format(file_name))\n hashes_file = shelve.open(file_name, 'r')\n self._hashes = hashes_file['hashes']\n self._sorted_indices = hashes_file['sorted_indices']\n hashes_file.close()\n if torch.is_tensor(self._hashes):\n self._hashes = self._hashes.cpu().numpy()\n if len(self._sorted_indices) != len(self):\n raise RuntimeError('Length of pre-computed hashes ({}) and length of offline dataset ({}) do not match. Dataset files have been altered. Delete and re-generate pre-computed hash file: {}'.format(len(self._sorted_indices), len(self), file_name))\n else:\n print('No pre-computed hashes found, generating: {}'.format(file_name))\n hashes_file = shelve.open(file_name, 'c')\n hashes, sorted_indices = self._compute_hashes()\n hashes_file['hashes'] = hashes\n hashes_file['sorted_indices'] = sorted_indices\n hashes_file.close()\n self._sorted_indices = sorted_indices\n self._hashes = hashes\n print('Num. trace types : {:,}'.format(len(set(self._hashes))))\n hashes_and_counts = OrderedDict(sorted(Counter(self._hashes).items()))\n print('Trace hash\\tCount')\n for hash, count in hashes_and_counts.items():\n print('{:.8f}\\t{}'.format(hash, count))\n print()\n\n @staticmethod\n def _trace_hash(trace):\n h = hash(''.join([variable.address for variable in trace.variables_controlled])) + sys.maxsize + 1\n return float('{}.{}'.format(trace.length_controlled, h))\n\n def _compute_hashes(self):\n hashes = torch.zeros(len(self))\n util.progress_bar_init('Hashing offline dataset for sorting', len(self), 'Traces')\n for i in range(len(self)):\n hashes[i] = self._trace_hash(self[i])\n util.progress_bar_update(i)\n util.progress_bar_end()\n print('Sorting offline dataset')\n _, sorted_indices = torch.sort(hashes)\n print('Sorting done')\n return hashes.cpu().numpy(), sorted_indices.cpu().numpy()\n\n def save_sorted(self, sorted_dataset_dir, num_traces_per_file=None, num_files=None, begin_file_index=None, end_file_index=None):\n if num_traces_per_file is not None:\n if num_files is not None:\n raise ValueError('Expecting either num_traces_per_file or num_files')\n else:\n if num_files is None:\n raise ValueError('Expecting either num_traces_per_file or num_files')\n else:\n num_traces_per_file = math.ceil(len(self) / num_files)\n\n if os.path.exists(sorted_dataset_dir):\n if len(glob(os.path.join(sorted_dataset_dir, '*'))) > 0:\n print(colored('Warning: target directory is not empty: {})'.format(sorted_dataset_dir), 'red', attrs=['bold']))\n util.create_path(sorted_dataset_dir, directory=True)\n file_indices = list(util.chunks(list(self._sorted_indices), num_traces_per_file))\n num_traces = len(self)\n num_files = len(file_indices)\n num_files_digits = len(str(num_files))\n file_name_template = 'pyprob_traces_sorted_{{:d}}_{{:0{}d}}'.format(num_files_digits)\n file_names = list(map(lambda x: os.path.join(sorted_dataset_dir, file_name_template.format(num_traces_per_file, x)), range(num_files)))\n if begin_file_index is None:\n begin_file_index = 0\n if end_file_index is None:\n end_file_index = num_files\n if begin_file_index < 0 or begin_file_index > end_file_index or end_file_index > num_files or end_file_index < begin_file_index:\n raise ValueError('Invalid indexes begin_file_index:{} and end_file_index: {}'.format(begin_file_index, end_file_index))\n\n print('Sorted offline dataset, traces: {}, traces per file: {}, files: {} (overall)'.format(num_traces, num_traces_per_file, num_files))\n util.progress_bar_init('Saving sorted files with indices in range [{}, {}) ({} of {} files overall)'.format(begin_file_index, end_file_index, end_file_index - begin_file_index, num_files), end_file_index - begin_file_index + 1, 'Files')\n j = 0\n for i in range(begin_file_index, end_file_index):\n j += 1\n file_name = file_names[i]\n print(file_name)\n shelf = ConcurrentShelf(file_name)\n shelf.lock(write=True)\n for new_i, old_i in enumerate(file_indices[i]):\n shelf[str(new_i)] = self[old_i]\n shelf['__length'] = len(file_indices[i])\n shelf.unlock()\n util.progress_bar_update(j)\n util.progress_bar_end()\n\n\nclass TraceSampler(Sampler):\n def __init__(self, offline_dataset):\n if not isinstance(offline_dataset, OfflineDataset):\n raise TypeError('Expecting an OfflineDataset instance.')\n self._sorted_indices = offline_dataset._sorted_indices\n\n def __iter__(self):\n return iter(self._sorted_indices)\n\n def __len__(self):\n return len(self._offline_dataset)\n\n\nclass TraceBatchSampler(Sampler):\n def __init__(self, offline_dataset, batch_size, shuffle_batches=True):\n if not isinstance(offline_dataset, OfflineDataset):\n raise TypeError('Expecting an OfflineDataset instance.')\n self._batches = list(util.chunks(offline_dataset._sorted_indices, batch_size))\n self._shuffle_batches = shuffle_batches\n\n def __iter__(self):\n if self._shuffle_batches:\n np.random.shuffle(self._batches)\n return iter(self._batches)\n\n def __len__(self):\n return len(self._batches)\n\n\nclass DistributedTraceBatchSampler(Sampler):\n def __init__(self, offline_dataset, batch_size, shuffle_batches=True, num_buckets=None, shuffle_buckets=True):\n if not isinstance(offline_dataset, OfflineDataset):\n raise TypeError('Expecting an OfflineDataset instance.')\n if not dist.is_available():\n raise RuntimeError('Expecting distributed training.')\n self._world_size = dist.get_world_size()\n self._rank = dist.get_rank()\n # Randomly drop a number of traces so that the number of all minibatches in the whole dataset is an integer multiple of world size\n num_batches_to_drop = math.floor(len(offline_dataset._sorted_indices) / batch_size) % self._world_size\n num_traces_to_drop = num_batches_to_drop * batch_size\n # Ensure all ranks choose the same traces to drop\n st = random.getstate()\n random.seed(0)\n self._batches = list(util.chunks(util.drop_items(list(offline_dataset._sorted_indices), num_traces_to_drop), batch_size)) # List of all minibatches, where each minibatch is a list of trace indices\n random.setstate(st)\n # Discard last minibatch if it's smaller than batch_size\n if len(self._batches[-1]) < batch_size:\n del(self._batches[-1])\n if num_buckets is None:\n num_buckets = len(self._batches) / self._world_size\n self._num_buckets = num_buckets\n self._bucket_size = math.ceil(len(self._batches) / num_buckets)\n if self._bucket_size < self._world_size:\n raise RuntimeError('offline_dataset:{}, batch_size:{} and num_buckets:{} imply a bucket_size:{} smaller than world_size:{}'.format(len(offline_dataset), batch_size, num_buckets, self._bucket_size, self._world_size))\n # List of buckets, where each bucket is a list of minibatches\n self._buckets = list(util.chunks(self._batches, self._bucket_size))\n # Unify last two buckets if the last bucket is smaller than other buckets\n if len(self._buckets[-1]) < self._bucket_size:\n if len(self._buckets) < 2:\n raise RuntimeError('offline_dataset:{} too small for given batch_size:{} and num_buckets:{}'.format(len(offline_dataset), batch_size, num_buckets))\n self._buckets[-2].extend(self._buckets[-1])\n del(self._buckets[-1])\n self._shuffle_batches = shuffle_batches\n self._shuffle_buckets = shuffle_buckets\n self._epoch = 0\n self._current_bucket_id = 0\n\n print('DistributedTraceBatchSampler')\n print('OfflineDataset size : {:,}'.format(len(offline_dataset)))\n print('World size : {:,}'.format(self._world_size))\n print('Batch size : {:,}'.format(batch_size))\n print('Num. batches dropped: {:,}'.format(num_batches_to_drop))\n print('Num. batches : {:,}'.format(len(self._batches)))\n print('Bucket size : {:,}'.format(self._bucket_size))\n print('Num. buckets : {:,}'.format(self._num_buckets))\n\n def __iter__(self):\n self._epoch += 1\n bucket_ids = list(range(len(self._buckets)))\n if self._shuffle_buckets:\n # Shuffle the list of buckets (but not the order of minibatches inside each bucket) at the beginning of each epoch, deterministically based on the epoch number so that all nodes have the same bucket order\n # Idea from: https://github.com/pytorch/pytorch/blob/a3fb004b1829880547dd7b3e2cd9d16af657b869/torch/utils/data/distributed.py#L44\n st = np.random.get_state()\n np.random.seed(self._epoch)\n np.random.shuffle(bucket_ids)\n np.random.set_state(st)\n for bucket_id in bucket_ids:\n bucket = self._buckets[bucket_id]\n self._current_bucket_id = bucket_id\n # num_batches is needed to ensure that all nodes have the same number of minibatches (iterations) in each bucket, in cases where the bucket size is not divisible by world_size.\n num_batches = math.floor(len(bucket) / self._world_size)\n # Select a num_batches-sized subset of the current bucket for the current node\n # The part not selected by the current node will be selected by other nodes\n batches = bucket[self._rank:len(bucket):self._world_size][:num_batches]\n if self._shuffle_batches:\n # Shuffle the list of minibatches (but not the order trace indices inside each minibatch) selected for the current node\n np.random.shuffle(batches)\n for batch in batches:\n yield batch\n\n def __len__(self):\n return len(self._batches)\n",
"import torch\n\nfrom . import Distribution\nfrom .. import util\n\n\nclass MultivariateNormal(Distribution):\n def __init__(self,\n loc,\n covariance_matrix=None,\n precision_matrix=None,\n scale_tril=None,\n validate_args=None):\n loc = util.to_tensor(loc)\n scale = util.to_tensor(scale)\n covariance_matrix = util.to_tensor(covariance_matrix)\n precision_matrix = util.to_tensor(precision_matrix)\n scale_tril = util.to_tensor(scale_tril)\n super().__init__(name='Normal', address_suffix='Normal',\n torch_dist=torch.distributions.MultivariateNormal(loc,\n covariance_matrix,\n precision_matrix,\n scale_tril,\n validate_args))\n\n def __repr__(self):\n return 'MultivariateNormal(mean:{}, ' \\\n 'covariance_matrix:{},' \\\n 'precision_matrix:{},' \\\n 'scale_tril:{})'.format(self.locs,\n self.covariance_matrix,\n self.precision_matrix,\n self.scale_tril)\n\n def cdf(self, value):\n return self._torch_dist.cdf(value)\n\n def icdf(self, value):\n return self._torch_dist.icdf(value)\n"
] | [
[
"numpy.random.get_state",
"numpy.random.seed",
"torch.is_tensor",
"numpy.random.shuffle",
"torch.distributed.is_available",
"numpy.random.set_state",
"torch.sort",
"torch.distributed.get_rank",
"torch.distributed.get_world_size"
],
[
"torch.distributions.MultivariateNormal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hengwei-chan/graph_network_demo | [
"542f2a59b1b9708abdc718d77db7111f3ba2df96",
"542f2a59b1b9708abdc718d77db7111f3ba2df96",
"542f2a59b1b9708abdc718d77db7111f3ba2df96",
"542f2a59b1b9708abdc718d77db7111f3ba2df96",
"542f2a59b1b9708abdc718d77db7111f3ba2df96",
"542f2a59b1b9708abdc718d77db7111f3ba2df96",
"542f2a59b1b9708abdc718d77db7111f3ba2df96",
"542f2a59b1b9708abdc718d77db7111f3ba2df96",
"542f2a59b1b9708abdc718d77db7111f3ba2df96"
] | [
"reports/configs/only_logs_dmpnn4_1/other_config.py",
"reports/configs/only_logd_gin4_2/other_config.py",
"reports/configs/only_logp_dmpnn3_1/other_config.py",
"reports/configs/only_logp_dmpnn3_2/other_config.py",
"reports/configs/only_logd_dmpnn4_2/other_config.py",
"reports/configs/only_logdp_dmpnn4_2/other_config.py",
"reports/configs/only_logp_gin3_1/other_config.py",
"reports/configs/only_logsp_dmpnn4_2/other_config.py",
"reports/configs/only_logs_dmpnn8_1/other_config.py"
] | [
"from dataclasses import dataclass, field\nfrom typing import List\n\nimport tensorflow as tf\nfrom graph_networks.utilities import * \nimport logging\nimport os\n\nATOM_FEATURE_DIM = DGIN4_ATOM_FEATURE_DIM\nEDGE_FEATURE_DIM = DGIN4_EDGE_FEATURE_DIM\n\n@dataclass\nclass BasicModelConfig:\n \"\"\"\n Config for model1/2/3 run file.\n General model parameters\n \"\"\"\n model_name: str = 'only_logs_dmpnn4_1' # without h_w in DGIN gin part - added h_v_0 instead\n # whole train/eval split - no more double split within train data set\n # random train/test split in get_data_sd - only change overall_seed\n # CHANGES dgin3 10.02.2021:\n # *added new bondFeaturesDGIN2 and atomFeaturesDGIN2; DGIN2_ATOM_FEATURE_DIM; DGIN2_EDGE_FEATURE_DIM\n # *from project_path+'data/processed/lipo/pickled/train_frags3/' to project_path+'data/processed/lipo/pickled/test_frags3/'\n # CHANGES dgin3 16.02.2021:\n # *added new bondFeaturesDGIN3 and atomFeaturesDGIN3; DGIN3_ATOM_FEATURE_DIM; DGIN3_EDGE_FEATURE_DIM\n # *from project_path+'data/processed/lipo/pickled/train_frags_dgin3/' to project_path+'data/processed/lipo/pickled/test_frags_dgin3/'\n # CHANGES dgin4 16.02.2021:\n # *added add_species bool in model1 config - previously not there; for dgin2 featurization adds the species type after the dgin \n # encoding before logD prediction\n # test_frags_dgin4 was added for species inclusion in model2 call()\n batch_size: int =15\n override_if_exists: bool = True\n\n overall_seed: int = 2\n \n # path to the project folder \n project_path:str = \"./\" \n\n retrain_model: bool = False\n retrain_model_name: str = ''\n retrain_model_epoch: str = ''\n retrain_model_weights_dir: str = project_path+'reports/model_weights/'+retrain_model_name+'/epoch_'+retrain_model_epoch+'/checkp_'+retrain_model_epoch\n\n train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin4_logs/'\n test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin4_logs/'\n\n combined_dataset: bool = False\n\n add_train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin4_logs/'\n add_test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin4_logs/'\n\n test_model: bool = False\n test_model_epoch: str = '887'\n\n # define the number or test runs for the CI. \n # the mean and std of the RMSE and r^2 of the combined runs are taken as the output. \n test_n_times: int = 1 \n # do you want to test the model with consensus mode? \n # if yes, a defined ML model will be included in the consensus predictions during the testing. \n consensus: bool = False \n # include dropout during testing?\n include_dropout: bool = False\n test_model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/epoch_'+test_model_epoch+'/checkp_'+test_model_epoch\n\n # To save the prediction values for each property set to True \n # When this flag is True - the whole test dataset is taken an test_n_times is set to zero! \n save_predictions: bool = False \n # define the folder where you want to save the predictions. \n # For each property, a file is created under the property name (\"./logd.txt\",\"./logs.txt\",\"./logp.txt\",\"./others.txt\") \n test_prediction_output_folder: str = project_path+\"reports/predictions/\"+model_name+\"/\" \n encode_hidden: bool = False\n\n log_dir: str = project_path+'reports/logs/'+model_name+'.log' \n verbosity_level = logging.INFO\n\n model_type: str = 'DMPNN' # added 31.03.2021 to compare models like 'GIN' 'DMPNN' 'DGIN' 'MLP'\n \n plot_dir: str = project_path+'reports/figures/'+model_name+'/'\n tensorboard_log_dir: str = project_path+'reports/tensorboard/'+model_name+'/'\n config_log_dir: str = project_path+'reports/configs/'+model_name+'/'\n model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/'\n stats_log_dir: str = project_path+'reports/stats/'+model_name+'/'\n\n@dataclass\nclass DGINConfig:\n \"\"\"\n Config for direcpted-mpnn class.\n \"\"\"\n dropout_aggregate_dmpnn: bool = False\n layernorm_aggregate_dmpnn: bool = True\n dropout_passing_dmpnn: bool = False\n layernorm_passing_dmpnn: bool = True\n\n dropout_aggregate_gin: bool = False\n layernorm_aggregate_gin: bool = True\n dropout_passing_gin: bool = False\n layernorm_passing_gin: bool = True\n\n gin_aggregate_bias: bool = False\n dmpnn_passing_bias: bool = False\n init_bias: bool = False\n\n massge_iteration_dmpnn: int = 4\n message_iterations_gin: int = 4\n dropout_rate: float = 0.15\n input_size: int = (ATOM_FEATURE_DIM+EDGE_FEATURE_DIM) # combination of node feature len (33) and edge feature len (12)\n passing_hidden_size: int = 56 # this can be changed\n input_size_gin: int = (ATOM_FEATURE_DIM) # changed 31.03.2021\n\n return_hv: bool = True # model3 parameter\n\n@dataclass\nclass Model1Config:\n \"\"\"\n Config model1 class - no subclass configs are defined here.\n \"\"\"\n validation_split: float = 0.90\n learning_rate: float = 0.004\n clip_rate: float = 0.6\n optimizer = tf.keras.optimizers.Adam(learning_rate)\n lipo_loss_mse = tf.keras.losses.mse\n lipo_loss_mae = tf.keras.losses.mae\n logP_loss_mse = tf.keras.losses.mse\n logS_loss_mse = tf.keras.losses.mse\n other_loss_mse = tf.keras.losses.mse \n mw_loss_mse = tf.keras.losses.mse\n metric = tf.keras.losses.mae\n epochs: int = 1600\n # define the number of epochs for each test run. \n save_after_epoch: int = 3 \n # dropout rate for the general model - mainly the MLP for the different log predictions \n dropout_rate: float = 0.15 # the overall dropout rate of the readout functions \n # the seed to shuffle the training/validation dataset; For the same dataset, even when \n # combined_dataset is True, it is the same training/valiation instances \n train_data_seed: int = 0 \n dropout_rate: float = 0.15 # the overall dropout rate of the readout functions\n train_data_seed: int = 0\n\n hidden_readout_1: int = 32\n hidden_readout_2: int = 14\n activation_func_readout = tf.nn.relu\n \n include_logD: bool = False\n include_logS: bool = True\n include_logP: bool = False\n\n include_other: bool = False \n include_mw: bool = False\n include_rot_bond: bool = False\n include_HBA: bool = False\n include_HBD: bool = False\n\n # define the starting threshold for the RMSE of the model. When the comnbined RMSE \n # is below this threshold, the model weights are being safed and a new threshold \n # is set. It only serves as a starting threshold so that not too many models \n # are being safed. Depends on how many log endpoints are being taken into \n # consideration - as three endpoints have a higher combined RMSE as only one \n # endpoint. \n best_evaluation_threshold: float = 2.45 #was introduced on the 25.03.2021/ \n\n # define the individual thresholds. If one model is better, the corresponding \n # model weights are being saved. \n best_evaluation_threshold_logd: float = 1.85 \n best_evaluation_threshold_logp: float = 1.65 \n best_evaluation_threshold_logs: float = 2.15 \n best_evaluation_threshold_other: float = 2.15 \n # 2.45 for all_logs\n # 0.70 logP\n # 0.75 logD\n # 1.00 logS\n # 1.75 logSD\n # 1.70 logSP\n # 1.45 logDP\n\n include_fragment_conv: bool = False # was introduced on the 4.12.2020\n\n use_rmse: bool = True # uses RMSE instead of MSE for only lipo_loss\n shuffle_inside: bool = True # reshuffles the train/valid test seach in each epoch (generalizes)\n\n add_species: bool = False # 16.02 introduction; previously not there; for dgin3 adds the species type after the dgin encoding before logD prediction\n\n@dataclass\nclass FrACConfig:\n \"\"\"\n Config fragment aggregation class - no subclass configs are defined here.\n \"\"\"\n input_size_gin: int = 28\n layernorm_aggregate: bool = True\n reduce_mean: bool = True # when false -> reduce_sum\n\n@dataclass \nclass MLConfig: \n \"\"\" \n Configs for the ML algorithm \n \"\"\" \n # which algorithm do you want to use for the consensus? \n # possibilities are: \"SVM\", \"RF\", \"KNN\" or \"LR\" - all are regression models! \n # SVM: Support Vector Machine; RF: Random Forest, KNN: K-Nearest Neigbors; LR: Linear Regression;\n algorithm: str = \"SVM\" \n # which fingerprint to use - possibilities are: \"ECFP\" or \"MACCS\" \n fp_types: str = \"ECFP\" \n # If 'ECFP' fingerprint is used, define the number of bits - maximum is 2048! \n n_bits: int = 2048 \n # If \"ECFP\" fingerprint is used, define the radius \n radius: int = 4 \n # define if descriptors should be included into the non-GNN molecular representation \n include_descriptors: bool = True \n # define if the descriptors should be standardizedby scaling and centering (Sklearn) \n standardize: bool = True \n\n@dataclass\nclass Config():\n \"\"\"\n Overall config class for model2 and run file.\n Includes all submodels config\n \"\"\"\n basic_model_config: BasicModelConfig\n model1_config: Model1Config\n d_gin_config: DGINConfig\n frag_acc_config: FrACConfig\n\n ml_config: MLConfig \n model: str = 'model11'",
"from dataclasses import dataclass, field\nfrom typing import List\n\nimport tensorflow as tf\nfrom graph_networks.utilities import * \nimport logging\nimport os\n\nATOM_FEATURE_DIM = DGIN4_ATOM_FEATURE_DIM\nEDGE_FEATURE_DIM = DGIN4_EDGE_FEATURE_DIM\n\n@dataclass\nclass BasicModelConfig:\n \"\"\"\n Config for model1/2/3 run file.\n General model parameters\n \"\"\"\n model_name: str = 'only_logd_gin4_2' # without h_w in DGIN gin part - added h_v_0 instead\n # whole train/eval split - no more double split within train data set\n # random train/test split in get_data_sd - only change overall_seed\n # CHANGES dgin3 10.02.2021:\n # *added new bondFeaturesDGIN2 and atomFeaturesDGIN2; DGIN2_ATOM_FEATURE_DIM; DGIN2_EDGE_FEATURE_DIM\n # *from project_path+'data/processed/lipo/pickled/train_frags3/' to project_path+'data/processed/lipo/pickled/test_frags3/'\n # CHANGES dgin3 16.02.2021:\n # *added new bondFeaturesDGIN3 and atomFeaturesDGIN3; DGIN3_ATOM_FEATURE_DIM; DGIN3_EDGE_FEATURE_DIM\n # *from project_path+'data/processed/lipo/pickled/train_frags_dgin3/' to project_path+'data/processed/lipo/pickled/test_frags_dgin3/'\n # CHANGES dgin4 16.02.2021:\n # *added add_species bool in model1 config - previously not there; for dgin2 featurization adds the species type after the dgin \n # encoding before logD prediction\n # test_frags_dgin4 was added for species inclusion in model2 call()\n batch_size: int =15\n override_if_exists: bool = True\n\n overall_seed: int = 2\n \n # path to the project folder \n project_path:str = \"./\" \n\n retrain_model: bool = False\n retrain_model_name: str = ''\n retrain_model_epoch: str = ''\n retrain_model_weights_dir: str = project_path+'reports/model_weights/'+retrain_model_name+'/epoch_'+retrain_model_epoch+'/checkp_'+retrain_model_epoch\n\n train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin4_logd/'\n test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin4_logd/'\n\n combined_dataset: bool = False\n\n add_train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin4_logs/'\n add_test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin4_logs/'\n\n test_model: bool = False\n test_model_epoch: str = '887'\n\n # define the number or test runs for the CI. \n # the mean and std of the RMSE and r^2 of the combined runs are taken as the output. \n test_n_times: int = 1 \n # do you want to test the model with consensus mode? \n # if yes, a defined ML model will be included in the consensus predictions during the testing. \n consensus: bool = False \n # include dropout during testing?\n include_dropout: bool = False\n test_model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/epoch_'+test_model_epoch+'/checkp_'+test_model_epoch\n\n # To save the prediction values for each property set to True \n # When this flag is True - the whole test dataset is taken an test_n_times is set to zero! \n save_predictions: bool = False \n # define the folder where you want to save the predictions. \n # For each property, a file is created under the property name (\"./logd.txt\",\"./logs.txt\",\"./logp.txt\",\"./others.txt\") \n test_prediction_output_folder: str = project_path+\"reports/predictions/\"+model_name+\"/\" \n encode_hidden: bool = False\n\n log_dir: str = project_path+'reports/logs/'+model_name+'.log' \n verbosity_level = logging.INFO\n\n model_type: str = 'GIN' # added 31.03.2021 to compare models like 'GIN' 'DMPNN' 'DGIN' 'MLP'\n \n plot_dir: str = project_path+'reports/figures/'+model_name+'/'\n tensorboard_log_dir: str = project_path+'reports/tensorboard/'+model_name+'/'\n config_log_dir: str = project_path+'reports/configs/'+model_name+'/'\n model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/'\n stats_log_dir: str = project_path+'reports/stats/'+model_name+'/'\n\n@dataclass\nclass DGINConfig:\n \"\"\"\n Config for direcpted-mpnn class.\n \"\"\"\n dropout_aggregate_dmpnn: bool = False\n layernorm_aggregate_dmpnn: bool = True\n dropout_passing_dmpnn: bool = False\n layernorm_passing_dmpnn: bool = True\n\n dropout_aggregate_gin: bool = False\n layernorm_aggregate_gin: bool = True\n dropout_passing_gin: bool = False\n layernorm_passing_gin: bool = True\n\n gin_aggregate_bias: bool = False\n dmpnn_passing_bias: bool = False\n init_bias: bool = False\n\n massge_iteration_dmpnn: int = 4\n message_iterations_gin: int = 4\n dropout_rate: float = 0.15\n input_size: int = (ATOM_FEATURE_DIM+EDGE_FEATURE_DIM) # combination of node feature len (33) and edge feature len (12)\n passing_hidden_size: int = 56 # this can be changed\n input_size_gin: int = (ATOM_FEATURE_DIM) # changed 31.03.2021\n\n return_hv: bool = True # model3 parameter\n\n@dataclass\nclass Model1Config:\n \"\"\"\n Config model1 class - no subclass configs are defined here.\n \"\"\"\n validation_split: float = 0.90\n learning_rate: float = 0.004\n clip_rate: float = 0.6\n optimizer = tf.keras.optimizers.Adam(learning_rate)\n lipo_loss_mse = tf.keras.losses.mse\n lipo_loss_mae = tf.keras.losses.mae\n logP_loss_mse = tf.keras.losses.mse\n logS_loss_mse = tf.keras.losses.mse\n other_loss_mse = tf.keras.losses.mse \n mw_loss_mse = tf.keras.losses.mse\n metric = tf.keras.losses.mae\n epochs: int = 1600\n # define the number of epochs for each test run. \n save_after_epoch: int = 3 \n # dropout rate for the general model - mainly the MLP for the different log predictions \n dropout_rate: float = 0.15 # the overall dropout rate of the readout functions \n # the seed to shuffle the training/validation dataset; For the same dataset, even when \n # combined_dataset is True, it is the same training/valiation instances \n train_data_seed: int = 0 \n dropout_rate: float = 0.15 # the overall dropout rate of the readout functions\n train_data_seed: int = 0\n\n hidden_readout_1: int = 32\n hidden_readout_2: int = 14\n activation_func_readout = tf.nn.relu\n \n include_logD: bool = True\n include_logS: bool = False\n include_logP: bool = False\n\n include_other: bool = False \n include_mw: bool = False\n include_rot_bond: bool = False\n include_HBA: bool = False\n include_HBD: bool = False\n\n # define the starting threshold for the RMSE of the model. When the comnbined RMSE \n # is below this threshold, the model weights are being safed and a new threshold \n # is set. It only serves as a starting threshold so that not too many models \n # are being safed. Depends on how many log endpoints are being taken into \n # consideration - as three endpoints have a higher combined RMSE as only one \n # endpoint. \n best_evaluation_threshold: float = 2.45 #was introduced on the 25.03.2021/ \n\n # define the individual thresholds. If one model is better, the corresponding \n # model weights are being saved. \n best_evaluation_threshold_logd: float = 1.85 \n best_evaluation_threshold_logp: float = 1.65 \n best_evaluation_threshold_logs: float = 2.15 \n best_evaluation_threshold_other: float = 2.15 \n # 2.45 for all_logs\n # 0.70 logP\n # 0.75 logD\n # 1.00 logS\n # 1.75 logSD\n # 1.70 logSP\n # 1.45 logDP\n\n include_fragment_conv: bool = False # was introduced on the 4.12.2020\n\n use_rmse: bool = True # uses RMSE instead of MSE for only lipo_loss\n shuffle_inside: bool = True # reshuffles the train/valid test seach in each epoch (generalizes)\n\n add_species: bool = False # 16.02 introduction; previously not there; for dgin3 adds the species type after the dgin encoding before logD prediction\n\n@dataclass\nclass FrACConfig:\n \"\"\"\n Config fragment aggregation class - no subclass configs are defined here.\n \"\"\"\n input_size_gin: int = 28\n layernorm_aggregate: bool = True\n reduce_mean: bool = True # when false -> reduce_sum\n\n@dataclass \nclass MLConfig: \n \"\"\" \n Configs for the ML algorithm \n \"\"\" \n # which algorithm do you want to use for the consensus? \n # possibilities are: \"SVM\", \"RF\", \"KNN\" or \"LR\" - all are regression models! \n # SVM: Support Vector Machine; RF: Random Forest, KNN: K-Nearest Neigbors; LR: Linear Regression;\n algorithm: str = \"SVM\" \n # which fingerprint to use - possibilities are: \"ECFP\" or \"MACCS\" \n fp_types: str = \"ECFP\" \n # If 'ECFP' fingerprint is used, define the number of bits - maximum is 2048! \n n_bits: int = 2048 \n # If \"ECFP\" fingerprint is used, define the radius \n radius: int = 4 \n # define if descriptors should be included into the non-GNN molecular representation \n include_descriptors: bool = True \n # define if the descriptors should be standardizedby scaling and centering (Sklearn) \n standardize: bool = True \n\n@dataclass\nclass Config():\n \"\"\"\n Overall config class for model2 and run file.\n Includes all submodels config\n \"\"\"\n basic_model_config: BasicModelConfig\n model1_config: Model1Config\n d_gin_config: DGINConfig\n frag_acc_config: FrACConfig\n\n ml_config: MLConfig \n model: str = 'model11'",
"from dataclasses import dataclass, field\nfrom typing import List\n\nimport tensorflow as tf\nfrom graph_networks.utilities import * \nimport logging\nimport os\n\nATOM_FEATURE_DIM = DGIN3_ATOM_FEATURE_DIM\nEDGE_FEATURE_DIM = DGIN3_EDGE_FEATURE_DIM\n\n@dataclass\nclass BasicModelConfig:\n \"\"\"\n Config for model1/2/3 run file.\n General model parameters\n \"\"\"\n model_name: str = 'only_logp_dmpnn3_1' # without h_w in DGIN gin part - added h_v_0 instead\n # whole train/eval split - no more double split within train data set\n # random train/test split in get_data_sd - only change overall_seed\n # CHANGES dgin3 10.02.2021:\n # *added new bondFeaturesDGIN2 and atomFeaturesDGIN2; DGIN2_ATOM_FEATURE_DIM; DGIN2_EDGE_FEATURE_DIM\n # *from project_path+'data/processed/lipo/pickled/train_frags3/' to project_path+'data/processed/lipo/pickled/test_frags3/'\n # CHANGES dgin3 16.02.2021:\n # *added new bondFeaturesDGIN3 and atomFeaturesDGIN3; DGIN3_ATOM_FEATURE_DIM; DGIN3_EDGE_FEATURE_DIM\n # *from project_path+'data/processed/lipo/pickled/train_frags_dgin3/' to project_path+'data/processed/lipo/pickled/test_frags_dgin3/'\n # CHANGES dgin4 16.02.2021:\n # *added add_species bool in model1 config - previously not there; for dgin2 featurization adds the species type after the dgin \n # encoding before logD prediction\n # test_frags_dgin4 was added for species inclusion in model2 call()\n batch_size: int =15\n override_if_exists: bool = True\n\n overall_seed: int = 2\n \n # path to the project folder \n project_path:str = \"./\" \n\n retrain_model: bool = False\n retrain_model_name: str = ''\n retrain_model_epoch: str = ''\n retrain_model_weights_dir: str = project_path+'reports/model_weights/'+retrain_model_name+'/epoch_'+retrain_model_epoch+'/checkp_'+retrain_model_epoch\n\n train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin3_logd/'\n test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin3_logd/'\n\n combined_dataset: bool = True\n\n add_train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin3_logs/'\n add_test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin3_logs/'\n\n test_model: bool = False\n test_model_epoch: str = '887'\n\n # define the number or test runs for the CI. \n # the mean and std of the RMSE and r^2 of the combined runs are taken as the output. \n test_n_times: int = 1 \n # do you want to test the model with consensus mode? \n # if yes, a defined ML model will be included in the consensus predictions during the testing. \n consensus: bool = False \n # include dropout during testing?\n include_dropout: bool = False\n test_model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/epoch_'+test_model_epoch+'/checkp_'+test_model_epoch\n\n # To save the prediction values for each property set to True \n # When this flag is True - the whole test dataset is taken an test_n_times is set to zero! \n save_predictions: bool = False \n # define the folder where you want to save the predictions. \n # For each property, a file is created under the property name (\"./logd.txt\",\"./logs.txt\",\"./logp.txt\",\"./others.txt\") \n test_prediction_output_folder: str = project_path+\"reports/predictions/\"+model_name+\"/\" \n encode_hidden: bool = False\n\n log_dir: str = project_path+'reports/logs/'+model_name+'.log' \n verbosity_level = logging.INFO\n\n model_type: str = 'DMPNN' # added 31.03.2021 to compare models like 'GIN' 'DMPNN' 'DGIN' 'MLP'\n \n plot_dir: str = project_path+'reports/figures/'+model_name+'/'\n tensorboard_log_dir: str = project_path+'reports/tensorboard/'+model_name+'/'\n config_log_dir: str = project_path+'reports/configs/'+model_name+'/'\n model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/'\n stats_log_dir: str = project_path+'reports/stats/'+model_name+'/'\n\n@dataclass\nclass DGINConfig:\n \"\"\"\n Config for direcpted-mpnn class.\n \"\"\"\n dropout_aggregate_dmpnn: bool = False\n layernorm_aggregate_dmpnn: bool = True\n dropout_passing_dmpnn: bool = False\n layernorm_passing_dmpnn: bool = True\n\n dropout_aggregate_gin: bool = False\n layernorm_aggregate_gin: bool = True\n dropout_passing_gin: bool = False\n layernorm_passing_gin: bool = True\n\n gin_aggregate_bias: bool = False\n dmpnn_passing_bias: bool = False\n init_bias: bool = False\n\n massge_iteration_dmpnn: int = 4\n message_iterations_gin: int = 4\n dropout_rate: float = 0.15\n input_size: int = (ATOM_FEATURE_DIM+EDGE_FEATURE_DIM) # combination of node feature len (33) and edge feature len (12)\n passing_hidden_size: int = 56 # this can be changed\n input_size_gin: int = (ATOM_FEATURE_DIM) # changed 31.03.2021\n\n return_hv: bool = True # model3 parameter\n\n@dataclass\nclass Model1Config:\n \"\"\"\n Config model1 class - no subclass configs are defined here.\n \"\"\"\n validation_split: float = 0.90\n learning_rate: float = 0.004\n clip_rate: float = 0.6\n optimizer = tf.keras.optimizers.Adam(learning_rate)\n lipo_loss_mse = tf.keras.losses.mse\n lipo_loss_mae = tf.keras.losses.mae\n logP_loss_mse = tf.keras.losses.mse\n logS_loss_mse = tf.keras.losses.mse\n other_loss_mse = tf.keras.losses.mse \n mw_loss_mse = tf.keras.losses.mse\n metric = tf.keras.losses.mae\n epochs: int = 1600\n # define the number of epochs for each test run. \n save_after_epoch: int = 3 \n # dropout rate for the general model - mainly the MLP for the different log predictions \n dropout_rate: float = 0.15 # the overall dropout rate of the readout functions \n # the seed to shuffle the training/validation dataset; For the same dataset, even when \n # combined_dataset is True, it is the same training/valiation instances \n train_data_seed: int = 0 \n dropout_rate: float = 0.15 # the overall dropout rate of the readout functions\n train_data_seed: int = 0\n\n hidden_readout_1: int = 32\n hidden_readout_2: int = 14\n activation_func_readout = tf.nn.relu\n \n include_logD: bool = False\n include_logS: bool = False\n include_logP: bool = True\n\n include_other: bool = False \n include_mw: bool = False\n include_rot_bond: bool = False\n include_HBA: bool = False\n include_HBD: bool = False\n\n # define the starting threshold for the RMSE of the model. When the comnbined RMSE \n # is below this threshold, the model weights are being safed and a new threshold \n # is set. It only serves as a starting threshold so that not too many models \n # are being safed. Depends on how many log endpoints are being taken into \n # consideration - as three endpoints have a higher combined RMSE as only one \n # endpoint. \n best_evaluation_threshold: float = 2.45 #was introduced on the 25.03.2021/ \n\n # define the individual thresholds. If one model is better, the corresponding \n # model weights are being saved. \n best_evaluation_threshold_logd: float = 1.85 \n best_evaluation_threshold_logp: float = 1.65 \n best_evaluation_threshold_logs: float = 2.15 \n best_evaluation_threshold_other: float = 2.15 \n # 2.45 for all_logs\n # 0.70 logP\n # 0.75 logD\n # 1.00 logS\n # 1.75 logSD\n # 1.70 logSP\n # 1.45 logDP\n\n include_fragment_conv: bool = False # was introduced on the 4.12.2020\n\n use_rmse: bool = True # uses RMSE instead of MSE for only lipo_loss\n shuffle_inside: bool = True # reshuffles the train/valid test seach in each epoch (generalizes)\n\n add_species: bool = False # 16.02 introduction; previously not there; for dgin3 adds the species type after the dgin encoding before logD prediction\n\n@dataclass\nclass FrACConfig:\n \"\"\"\n Config fragment aggregation class - no subclass configs are defined here.\n \"\"\"\n input_size_gin: int = 28\n layernorm_aggregate: bool = True\n reduce_mean: bool = True # when false -> reduce_sum\n\n@dataclass \nclass MLConfig: \n \"\"\" \n Configs for the ML algorithm \n \"\"\" \n # which algorithm do you want to use for the consensus? \n # possibilities are: \"SVM\", \"RF\", \"KNN\" or \"LR\" - all are regression models! \n # SVM: Support Vector Machine; RF: Random Forest, KNN: K-Nearest Neigbors; LR: Linear Regression;\n algorithm: str = \"SVM\" \n # which fingerprint to use - possibilities are: \"ECFP\" or \"MACCS\" \n fp_types: str = \"ECFP\" \n # If 'ECFP' fingerprint is used, define the number of bits - maximum is 2048! \n n_bits: int = 2048 \n # If \"ECFP\" fingerprint is used, define the radius \n radius: int = 4 \n # define if descriptors should be included into the non-GNN molecular representation \n include_descriptors: bool = True \n # define if the descriptors should be standardizedby scaling and centering (Sklearn) \n standardize: bool = True \n\n@dataclass\nclass Config():\n \"\"\"\n Overall config class for model2 and run file.\n Includes all submodels config\n \"\"\"\n basic_model_config: BasicModelConfig\n model1_config: Model1Config\n d_gin_config: DGINConfig\n frag_acc_config: FrACConfig\n\n ml_config: MLConfig \n model: str = 'model11'",
"from dataclasses import dataclass, field\nfrom typing import List\n\nimport tensorflow as tf\nfrom graph_networks.utilities import * \nimport logging\nimport os\n\nATOM_FEATURE_DIM = DGIN3_ATOM_FEATURE_DIM\nEDGE_FEATURE_DIM = DGIN3_EDGE_FEATURE_DIM\n\n@dataclass\nclass BasicModelConfig:\n \"\"\"\n Config for model1/2/3 run file.\n General model parameters\n \"\"\"\n model_name: str = 'only_logp_dmpnn3_2' # without h_w in DGIN gin part - added h_v_0 instead\n # whole train/eval split - no more double split within train data set\n # random train/test split in get_data_sd - only change overall_seed\n # CHANGES dgin3 10.02.2021:\n # *added new bondFeaturesDGIN2 and atomFeaturesDGIN2; DGIN2_ATOM_FEATURE_DIM; DGIN2_EDGE_FEATURE_DIM\n # *from project_path+'data/processed/lipo/pickled/train_frags3/' to project_path+'data/processed/lipo/pickled/test_frags3/'\n # CHANGES dgin3 16.02.2021:\n # *added new bondFeaturesDGIN3 and atomFeaturesDGIN3; DGIN3_ATOM_FEATURE_DIM; DGIN3_EDGE_FEATURE_DIM\n # *from project_path+'data/processed/lipo/pickled/train_frags_dgin3/' to project_path+'data/processed/lipo/pickled/test_frags_dgin3/'\n # CHANGES dgin4 16.02.2021:\n # *added add_species bool in model1 config - previously not there; for dgin2 featurization adds the species type after the dgin \n # encoding before logD prediction\n # test_frags_dgin4 was added for species inclusion in model2 call()\n batch_size: int =15\n override_if_exists: bool = True\n\n overall_seed: int = 2\n \n # path to the project folder \n project_path:str = \"./\" \n\n retrain_model: bool = False\n retrain_model_name: str = ''\n retrain_model_epoch: str = ''\n retrain_model_weights_dir: str = project_path+'reports/model_weights/'+retrain_model_name+'/epoch_'+retrain_model_epoch+'/checkp_'+retrain_model_epoch\n\n train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin3_logd/'\n test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin3_logd/'\n\n combined_dataset: bool = True\n\n add_train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin3_logs/'\n add_test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin3_logs/'\n\n test_model: bool = False\n test_model_epoch: str = '887'\n\n # define the number or test runs for the CI. \n # the mean and std of the RMSE and r^2 of the combined runs are taken as the output. \n test_n_times: int = 1 \n # do you want to test the model with consensus mode? \n # if yes, a defined ML model will be included in the consensus predictions during the testing. \n consensus: bool = False \n # include dropout during testing?\n include_dropout: bool = False\n test_model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/epoch_'+test_model_epoch+'/checkp_'+test_model_epoch\n\n # To save the prediction values for each property set to True \n # When this flag is True - the whole test dataset is taken an test_n_times is set to zero! \n save_predictions: bool = False \n # define the folder where you want to save the predictions. \n # For each property, a file is created under the property name (\"./logd.txt\",\"./logs.txt\",\"./logp.txt\",\"./others.txt\") \n test_prediction_output_folder: str = project_path+\"reports/predictions/\"+model_name+\"/\" \n encode_hidden: bool = False\n\n log_dir: str = project_path+'reports/logs/'+model_name+'.log' \n verbosity_level = logging.INFO\n\n model_type: str = 'DMPNN' # added 31.03.2021 to compare models like 'GIN' 'DMPNN' 'DGIN' 'MLP'\n \n plot_dir: str = project_path+'reports/figures/'+model_name+'/'\n tensorboard_log_dir: str = project_path+'reports/tensorboard/'+model_name+'/'\n config_log_dir: str = project_path+'reports/configs/'+model_name+'/'\n model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/'\n stats_log_dir: str = project_path+'reports/stats/'+model_name+'/'\n\n@dataclass\nclass DGINConfig:\n \"\"\"\n Config for direcpted-mpnn class.\n \"\"\"\n dropout_aggregate_dmpnn: bool = False\n layernorm_aggregate_dmpnn: bool = True\n dropout_passing_dmpnn: bool = False\n layernorm_passing_dmpnn: bool = True\n\n dropout_aggregate_gin: bool = False\n layernorm_aggregate_gin: bool = True\n dropout_passing_gin: bool = False\n layernorm_passing_gin: bool = True\n\n gin_aggregate_bias: bool = False\n dmpnn_passing_bias: bool = False\n init_bias: bool = False\n\n massge_iteration_dmpnn: int = 4\n message_iterations_gin: int = 4\n dropout_rate: float = 0.15\n input_size: int = (ATOM_FEATURE_DIM+EDGE_FEATURE_DIM) # combination of node feature len (33) and edge feature len (12)\n passing_hidden_size: int = 56 # this can be changed\n input_size_gin: int = (ATOM_FEATURE_DIM) # changed 31.03.2021\n\n return_hv: bool = True # model3 parameter\n\n@dataclass\nclass Model1Config:\n \"\"\"\n Config model1 class - no subclass configs are defined here.\n \"\"\"\n validation_split: float = 0.90\n learning_rate: float = 0.004\n clip_rate: float = 0.6\n optimizer = tf.keras.optimizers.Adam(learning_rate)\n lipo_loss_mse = tf.keras.losses.mse\n lipo_loss_mae = tf.keras.losses.mae\n logP_loss_mse = tf.keras.losses.mse\n logS_loss_mse = tf.keras.losses.mse\n other_loss_mse = tf.keras.losses.mse \n mw_loss_mse = tf.keras.losses.mse\n metric = tf.keras.losses.mae\n epochs: int = 1600\n # define the number of epochs for each test run. \n save_after_epoch: int = 3 \n # dropout rate for the general model - mainly the MLP for the different log predictions \n dropout_rate: float = 0.15 # the overall dropout rate of the readout functions \n # the seed to shuffle the training/validation dataset; For the same dataset, even when \n # combined_dataset is True, it is the same training/valiation instances \n train_data_seed: int = 0 \n dropout_rate: float = 0.15 # the overall dropout rate of the readout functions\n train_data_seed: int = 0\n\n hidden_readout_1: int = 32\n hidden_readout_2: int = 14\n activation_func_readout = tf.nn.relu\n \n include_logD: bool = False\n include_logS: bool = False\n include_logP: bool = True\n\n include_other: bool = False \n include_mw: bool = False\n include_rot_bond: bool = False\n include_HBA: bool = False\n include_HBD: bool = False\n\n # define the starting threshold for the RMSE of the model. When the comnbined RMSE \n # is below this threshold, the model weights are being safed and a new threshold \n # is set. It only serves as a starting threshold so that not too many models \n # are being safed. Depends on how many log endpoints are being taken into \n # consideration - as three endpoints have a higher combined RMSE as only one \n # endpoint. \n best_evaluation_threshold: float = 2.45 #was introduced on the 25.03.2021/ \n\n # define the individual thresholds. If one model is better, the corresponding \n # model weights are being saved. \n best_evaluation_threshold_logd: float = 1.85 \n best_evaluation_threshold_logp: float = 1.65 \n best_evaluation_threshold_logs: float = 2.15 \n best_evaluation_threshold_other: float = 2.15 \n # 2.45 for all_logs\n # 0.70 logP\n # 0.75 logD\n # 1.00 logS\n # 1.75 logSD\n # 1.70 logSP\n # 1.45 logDP\n\n include_fragment_conv: bool = False # was introduced on the 4.12.2020\n\n use_rmse: bool = True # uses RMSE instead of MSE for only lipo_loss\n shuffle_inside: bool = True # reshuffles the train/valid test seach in each epoch (generalizes)\n\n add_species: bool = False # 16.02 introduction; previously not there; for dgin3 adds the species type after the dgin encoding before logD prediction\n\n@dataclass\nclass FrACConfig:\n \"\"\"\n Config fragment aggregation class - no subclass configs are defined here.\n \"\"\"\n input_size_gin: int = 28\n layernorm_aggregate: bool = True\n reduce_mean: bool = True # when false -> reduce_sum\n\n@dataclass \nclass MLConfig: \n \"\"\" \n Configs for the ML algorithm \n \"\"\" \n # which algorithm do you want to use for the consensus? \n # possibilities are: \"SVM\", \"RF\", \"KNN\" or \"LR\" - all are regression models! \n # SVM: Support Vector Machine; RF: Random Forest, KNN: K-Nearest Neigbors; LR: Linear Regression;\n algorithm: str = \"SVM\" \n # which fingerprint to use - possibilities are: \"ECFP\" or \"MACCS\" \n fp_types: str = \"ECFP\" \n # If 'ECFP' fingerprint is used, define the number of bits - maximum is 2048! \n n_bits: int = 2048 \n # If \"ECFP\" fingerprint is used, define the radius \n radius: int = 4 \n # define if descriptors should be included into the non-GNN molecular representation \n include_descriptors: bool = True \n # define if the descriptors should be standardizedby scaling and centering (Sklearn) \n standardize: bool = True \n\n@dataclass\nclass Config():\n \"\"\"\n Overall config class for model2 and run file.\n Includes all submodels config\n \"\"\"\n basic_model_config: BasicModelConfig\n model1_config: Model1Config\n d_gin_config: DGINConfig\n frag_acc_config: FrACConfig\n\n ml_config: MLConfig \n model: str = 'model11'",
"from dataclasses import dataclass, field\nfrom typing import List\n\nimport tensorflow as tf\nfrom graph_networks.utilities import * \nimport logging\nimport os\n\nATOM_FEATURE_DIM = DGIN4_ATOM_FEATURE_DIM\nEDGE_FEATURE_DIM = DGIN4_EDGE_FEATURE_DIM\n\n@dataclass\nclass BasicModelConfig:\n \"\"\"\n Config for model1/2/3 run file.\n General model parameters\n \"\"\"\n model_name: str = 'only_logd_dmpnn4_2' # without h_w in DGIN gin part - added h_v_0 instead\n # whole train/eval split - no more double split within train data set\n # random train/test split in get_data_sd - only change overall_seed\n # CHANGES dgin3 10.02.2021:\n # *added new bondFeaturesDGIN2 and atomFeaturesDGIN2; DGIN2_ATOM_FEATURE_DIM; DGIN2_EDGE_FEATURE_DIM\n # *from project_path+'data/processed/lipo/pickled/train_frags3/' to project_path+'data/processed/lipo/pickled/test_frags3/'\n # CHANGES dgin3 16.02.2021:\n # *added new bondFeaturesDGIN3 and atomFeaturesDGIN3; DGIN3_ATOM_FEATURE_DIM; DGIN3_EDGE_FEATURE_DIM\n # *from project_path+'data/processed/lipo/pickled/train_frags_dgin3/' to project_path+'data/processed/lipo/pickled/test_frags_dgin3/'\n # CHANGES dgin4 16.02.2021:\n # *added add_species bool in model1 config - previously not there; for dgin2 featurization adds the species type after the dgin \n # encoding before logD prediction\n # test_frags_dgin4 was added for species inclusion in model2 call()\n batch_size: int =15\n override_if_exists: bool = True\n\n overall_seed: int = 2\n \n # path to the project folder \n project_path:str = \"./\" \n\n retrain_model: bool = False\n retrain_model_name: str = ''\n retrain_model_epoch: str = ''\n retrain_model_weights_dir: str = project_path+'reports/model_weights/'+retrain_model_name+'/epoch_'+retrain_model_epoch+'/checkp_'+retrain_model_epoch\n\n train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin4_logd/'\n test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin4_logd/'\n\n combined_dataset: bool = False\n\n add_train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin4_logs/'\n add_test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin4_logs/'\n\n test_model: bool = False\n test_model_epoch: str = '887'\n\n # define the number or test runs for the CI. \n # the mean and std of the RMSE and r^2 of the combined runs are taken as the output. \n test_n_times: int = 1 \n # do you want to test the model with consensus mode? \n # if yes, a defined ML model will be included in the consensus predictions during the testing. \n consensus: bool = False \n # include dropout during testing?\n include_dropout: bool = False\n test_model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/epoch_'+test_model_epoch+'/checkp_'+test_model_epoch\n\n # To save the prediction values for each property set to True \n # When this flag is True - the whole test dataset is taken an test_n_times is set to zero! \n save_predictions: bool = False \n # define the folder where you want to save the predictions. \n # For each property, a file is created under the property name (\"./logd.txt\",\"./logs.txt\",\"./logp.txt\",\"./others.txt\") \n test_prediction_output_folder: str = project_path+\"reports/predictions/\"+model_name+\"/\" \n encode_hidden: bool = False\n\n log_dir: str = project_path+'reports/logs/'+model_name+'.log' \n verbosity_level = logging.INFO\n\n model_type: str = 'DMPNN' # added 31.03.2021 to compare models like 'GIN' 'DMPNN' 'DGIN' 'MLP'\n \n plot_dir: str = project_path+'reports/figures/'+model_name+'/'\n tensorboard_log_dir: str = project_path+'reports/tensorboard/'+model_name+'/'\n config_log_dir: str = project_path+'reports/configs/'+model_name+'/'\n model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/'\n stats_log_dir: str = project_path+'reports/stats/'+model_name+'/'\n\n@dataclass\nclass DGINConfig:\n \"\"\"\n Config for direcpted-mpnn class.\n \"\"\"\n dropout_aggregate_dmpnn: bool = False\n layernorm_aggregate_dmpnn: bool = True\n dropout_passing_dmpnn: bool = False\n layernorm_passing_dmpnn: bool = True\n\n dropout_aggregate_gin: bool = False\n layernorm_aggregate_gin: bool = True\n dropout_passing_gin: bool = False\n layernorm_passing_gin: bool = True\n\n gin_aggregate_bias: bool = False\n dmpnn_passing_bias: bool = False\n init_bias: bool = False\n\n massge_iteration_dmpnn: int = 4\n message_iterations_gin: int = 4\n dropout_rate: float = 0.15\n input_size: int = (ATOM_FEATURE_DIM+EDGE_FEATURE_DIM) # combination of node feature len (33) and edge feature len (12)\n passing_hidden_size: int = 56 # this can be changed\n input_size_gin: int = (ATOM_FEATURE_DIM) # changed 31.03.2021\n\n return_hv: bool = True # model3 parameter\n\n@dataclass\nclass Model1Config:\n \"\"\"\n Config model1 class - no subclass configs are defined here.\n \"\"\"\n validation_split: float = 0.90\n learning_rate: float = 0.004\n clip_rate: float = 0.6\n optimizer = tf.keras.optimizers.Adam(learning_rate)\n lipo_loss_mse = tf.keras.losses.mse\n lipo_loss_mae = tf.keras.losses.mae\n logP_loss_mse = tf.keras.losses.mse\n logS_loss_mse = tf.keras.losses.mse\n other_loss_mse = tf.keras.losses.mse \n mw_loss_mse = tf.keras.losses.mse\n metric = tf.keras.losses.mae\n epochs: int = 1600\n # define the number of epochs for each test run. \n save_after_epoch: int = 3 \n # dropout rate for the general model - mainly the MLP for the different log predictions \n dropout_rate: float = 0.15 # the overall dropout rate of the readout functions \n # the seed to shuffle the training/validation dataset; For the same dataset, even when \n # combined_dataset is True, it is the same training/valiation instances \n train_data_seed: int = 0 \n dropout_rate: float = 0.15 # the overall dropout rate of the readout functions\n train_data_seed: int = 0\n\n hidden_readout_1: int = 32\n hidden_readout_2: int = 14\n activation_func_readout = tf.nn.relu\n \n include_logD: bool = True\n include_logS: bool = False\n include_logP: bool = False\n\n include_other: bool = False \n include_mw: bool = False\n include_rot_bond: bool = False\n include_HBA: bool = False\n include_HBD: bool = False\n\n # define the starting threshold for the RMSE of the model. When the comnbined RMSE \n # is below this threshold, the model weights are being safed and a new threshold \n # is set. It only serves as a starting threshold so that not too many models \n # are being safed. Depends on how many log endpoints are being taken into \n # consideration - as three endpoints have a higher combined RMSE as only one \n # endpoint. \n best_evaluation_threshold: float = 2.45 #was introduced on the 25.03.2021/ \n\n # define the individual thresholds. If one model is better, the corresponding \n # model weights are being saved. \n best_evaluation_threshold_logd: float = 1.85 \n best_evaluation_threshold_logp: float = 1.65 \n best_evaluation_threshold_logs: float = 2.15 \n best_evaluation_threshold_other: float = 2.15 \n # 2.45 for all_logs\n # 0.70 logP\n # 0.75 logD\n # 1.00 logS\n # 1.75 logSD\n # 1.70 logSP\n # 1.45 logDP\n\n include_fragment_conv: bool = False # was introduced on the 4.12.2020\n\n use_rmse: bool = True # uses RMSE instead of MSE for only lipo_loss\n shuffle_inside: bool = True # reshuffles the train/valid test seach in each epoch (generalizes)\n\n add_species: bool = False # 16.02 introduction; previously not there; for dgin3 adds the species type after the dgin encoding before logD prediction\n\n@dataclass\nclass FrACConfig:\n \"\"\"\n Config fragment aggregation class - no subclass configs are defined here.\n \"\"\"\n input_size_gin: int = 28\n layernorm_aggregate: bool = True\n reduce_mean: bool = True # when false -> reduce_sum\n\n@dataclass \nclass MLConfig: \n \"\"\" \n Configs for the ML algorithm \n \"\"\" \n # which algorithm do you want to use for the consensus? \n # possibilities are: \"SVM\", \"RF\", \"KNN\" or \"LR\" - all are regression models! \n # SVM: Support Vector Machine; RF: Random Forest, KNN: K-Nearest Neigbors; LR: Linear Regression;\n algorithm: str = \"SVM\" \n # which fingerprint to use - possibilities are: \"ECFP\" or \"MACCS\" \n fp_types: str = \"ECFP\" \n # If 'ECFP' fingerprint is used, define the number of bits - maximum is 2048! \n n_bits: int = 2048 \n # If \"ECFP\" fingerprint is used, define the radius \n radius: int = 4 \n # define if descriptors should be included into the non-GNN molecular representation \n include_descriptors: bool = True \n # define if the descriptors should be standardizedby scaling and centering (Sklearn) \n standardize: bool = True \n\n@dataclass\nclass Config():\n \"\"\"\n Overall config class for model2 and run file.\n Includes all submodels config\n \"\"\"\n basic_model_config: BasicModelConfig\n model1_config: Model1Config\n d_gin_config: DGINConfig\n frag_acc_config: FrACConfig\n\n ml_config: MLConfig \n model: str = 'model11'",
"from dataclasses import dataclass, field\nfrom typing import List\n\nimport tensorflow as tf\nfrom graph_networks.utilities import * \nimport logging\nimport os\n\nATOM_FEATURE_DIM = DGIN4_ATOM_FEATURE_DIM\nEDGE_FEATURE_DIM = DGIN4_EDGE_FEATURE_DIM\n\n@dataclass\nclass BasicModelConfig:\n \"\"\"\n Config for model1/2/3 run file.\n General model parameters\n \"\"\"\n model_name: str = 'only_logdp_dmpnn4_2' # without h_w in DGIN gin part - added h_v_0 instead\n # whole train/eval split - no more double split within train data set\n # random train/test split in get_data_sd - only change overall_seed\n # CHANGES dgin3 10.02.2021:\n # *added new bondFeaturesDGIN2 and atomFeaturesDGIN2; DGIN2_ATOM_FEATURE_DIM; DGIN2_EDGE_FEATURE_DIM\n # *from project_path+'data/processed/lipo/pickled/train_frags3/' to project_path+'data/processed/lipo/pickled/test_frags3/'\n # CHANGES dgin3 16.02.2021:\n # *added new bondFeaturesDGIN3 and atomFeaturesDGIN3; DGIN3_ATOM_FEATURE_DIM; DGIN3_EDGE_FEATURE_DIM\n # *from project_path+'data/processed/lipo/pickled/train_frags_dgin3/' to project_path+'data/processed/lipo/pickled/test_frags_dgin3/'\n # CHANGES dgin4 16.02.2021:\n # *added add_species bool in model1 config - previously not there; for dgin2 featurization adds the species type after the dgin \n # encoding before logD prediction\n # test_frags_dgin4 was added for species inclusion in model2 call()\n batch_size: int =15\n override_if_exists: bool = True\n\n overall_seed: int = 2\n \n # path to the project folder \n project_path:str = \"./\" \n\n retrain_model: bool = False\n retrain_model_name: str = ''\n retrain_model_epoch: str = ''\n retrain_model_weights_dir: str = project_path+'reports/model_weights/'+retrain_model_name+'/epoch_'+retrain_model_epoch+'/checkp_'+retrain_model_epoch\n\n train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin4_logd/'\n test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin4_logd/'\n\n combined_dataset: bool = False\n\n add_train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin4_logs/'\n add_test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin4_logs/'\n\n test_model: bool = False\n test_model_epoch: str = '887'\n\n # define the number or test runs for the CI. \n # the mean and std of the RMSE and r^2 of the combined runs are taken as the output. \n test_n_times: int = 1 \n # do you want to test the model with consensus mode? \n # if yes, a defined ML model will be included in the consensus predictions during the testing. \n consensus: bool = False \n # include dropout during testing?\n include_dropout: bool = False\n test_model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/epoch_'+test_model_epoch+'/checkp_'+test_model_epoch\n\n # To save the prediction values for each property set to True \n # When this flag is True - the whole test dataset is taken an test_n_times is set to zero! \n save_predictions: bool = False \n # define the folder where you want to save the predictions. \n # For each property, a file is created under the property name (\"./logd.txt\",\"./logs.txt\",\"./logp.txt\",\"./others.txt\") \n test_prediction_output_folder: str = project_path+\"reports/predictions/\"+model_name+\"/\" \n encode_hidden: bool = False\n\n log_dir: str = project_path+'reports/logs/'+model_name+'.log' \n verbosity_level = logging.INFO\n\n model_type: str = 'DMPNN' # added 31.03.2021 to compare models like 'GIN' 'DMPNN' 'DGIN' 'MLP'\n \n plot_dir: str = project_path+'reports/figures/'+model_name+'/'\n tensorboard_log_dir: str = project_path+'reports/tensorboard/'+model_name+'/'\n config_log_dir: str = project_path+'reports/configs/'+model_name+'/'\n model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/'\n stats_log_dir: str = project_path+'reports/stats/'+model_name+'/'\n\n@dataclass\nclass DGINConfig:\n \"\"\"\n Config for direcpted-mpnn class.\n \"\"\"\n dropout_aggregate_dmpnn: bool = False\n layernorm_aggregate_dmpnn: bool = True\n dropout_passing_dmpnn: bool = False\n layernorm_passing_dmpnn: bool = True\n\n dropout_aggregate_gin: bool = False\n layernorm_aggregate_gin: bool = True\n dropout_passing_gin: bool = False\n layernorm_passing_gin: bool = True\n\n gin_aggregate_bias: bool = False\n dmpnn_passing_bias: bool = False\n init_bias: bool = False\n\n massge_iteration_dmpnn: int = 4\n message_iterations_gin: int = 4\n dropout_rate: float = 0.15\n input_size: int = (ATOM_FEATURE_DIM+EDGE_FEATURE_DIM) # combination of node feature len (33) and edge feature len (12)\n passing_hidden_size: int = 56 # this can be changed\n input_size_gin: int = (ATOM_FEATURE_DIM) # changed 31.03.2021\n\n return_hv: bool = True # model3 parameter\n\n@dataclass\nclass Model1Config:\n \"\"\"\n Config model1 class - no subclass configs are defined here.\n \"\"\"\n validation_split: float = 0.90\n learning_rate: float = 0.004\n clip_rate: float = 0.6\n optimizer = tf.keras.optimizers.Adam(learning_rate)\n lipo_loss_mse = tf.keras.losses.mse\n lipo_loss_mae = tf.keras.losses.mae\n logP_loss_mse = tf.keras.losses.mse\n logS_loss_mse = tf.keras.losses.mse\n other_loss_mse = tf.keras.losses.mse \n mw_loss_mse = tf.keras.losses.mse\n metric = tf.keras.losses.mae\n epochs: int = 1600\n # define the number of epochs for each test run. \n save_after_epoch: int = 3 \n # dropout rate for the general model - mainly the MLP for the different log predictions \n dropout_rate: float = 0.15 # the overall dropout rate of the readout functions \n # the seed to shuffle the training/validation dataset; For the same dataset, even when \n # combined_dataset is True, it is the same training/valiation instances \n train_data_seed: int = 0 \n dropout_rate: float = 0.15 # the overall dropout rate of the readout functions\n train_data_seed: int = 0\n\n hidden_readout_1: int = 32\n hidden_readout_2: int = 14\n activation_func_readout = tf.nn.relu\n \n include_logD: bool = True\n include_logS: bool = False\n include_logP: bool = True\n\n include_other: bool = False \n include_mw: bool = False\n include_rot_bond: bool = False\n include_HBA: bool = False\n include_HBD: bool = False\n\n # define the starting threshold for the RMSE of the model. When the comnbined RMSE \n # is below this threshold, the model weights are being safed and a new threshold \n # is set. It only serves as a starting threshold so that not too many models \n # are being safed. Depends on how many log endpoints are being taken into \n # consideration - as three endpoints have a higher combined RMSE as only one \n # endpoint. \n best_evaluation_threshold: float = 2.45 #was introduced on the 25.03.2021/ \n\n # define the individual thresholds. If one model is better, the corresponding \n # model weights are being saved. \n best_evaluation_threshold_logd: float = 1.85 \n best_evaluation_threshold_logp: float = 1.65 \n best_evaluation_threshold_logs: float = 2.15 \n best_evaluation_threshold_other: float = 2.15 \n # 2.45 for all_logs\n # 0.70 logP\n # 0.75 logD\n # 1.00 logS\n # 1.75 logSD\n # 1.70 logSP\n # 1.45 logDP\n\n include_fragment_conv: bool = False # was introduced on the 4.12.2020\n\n use_rmse: bool = True # uses RMSE instead of MSE for only lipo_loss\n shuffle_inside: bool = True # reshuffles the train/valid test seach in each epoch (generalizes)\n\n add_species: bool = False # 16.02 introduction; previously not there; for dgin3 adds the species type after the dgin encoding before logD prediction\n\n@dataclass\nclass FrACConfig:\n \"\"\"\n Config fragment aggregation class - no subclass configs are defined here.\n \"\"\"\n input_size_gin: int = 28\n layernorm_aggregate: bool = True\n reduce_mean: bool = True # when false -> reduce_sum\n\n@dataclass \nclass MLConfig: \n \"\"\" \n Configs for the ML algorithm \n \"\"\" \n # which algorithm do you want to use for the consensus? \n # possibilities are: \"SVM\", \"RF\", \"KNN\" or \"LR\" - all are regression models! \n # SVM: Support Vector Machine; RF: Random Forest, KNN: K-Nearest Neigbors; LR: Linear Regression;\n algorithm: str = \"SVM\" \n # which fingerprint to use - possibilities are: \"ECFP\" or \"MACCS\" \n fp_types: str = \"ECFP\" \n # If 'ECFP' fingerprint is used, define the number of bits - maximum is 2048! \n n_bits: int = 2048 \n # If \"ECFP\" fingerprint is used, define the radius \n radius: int = 4 \n # define if descriptors should be included into the non-GNN molecular representation \n include_descriptors: bool = True \n # define if the descriptors should be standardizedby scaling and centering (Sklearn) \n standardize: bool = True \n\n@dataclass\nclass Config():\n \"\"\"\n Overall config class for model2 and run file.\n Includes all submodels config\n \"\"\"\n basic_model_config: BasicModelConfig\n model1_config: Model1Config\n d_gin_config: DGINConfig\n frag_acc_config: FrACConfig\n\n ml_config: MLConfig \n model: str = 'model11'",
"from dataclasses import dataclass, field\nfrom typing import List\n\nimport tensorflow as tf\nfrom graph_networks.utilities import * \nimport logging\nimport os\n\nATOM_FEATURE_DIM = DGIN3_ATOM_FEATURE_DIM\nEDGE_FEATURE_DIM = DGIN3_EDGE_FEATURE_DIM\n\n@dataclass\nclass BasicModelConfig:\n \"\"\"\n Config for model1/2/3 run file.\n General model parameters\n \"\"\"\n model_name: str = 'only_logp_gin3_1' # without h_w in DGIN gin part - added h_v_0 instead\n # whole train/eval split - no more double split within train data set\n # random train/test split in get_data_sd - only change overall_seed\n # CHANGES dgin3 10.02.2021:\n # *added new bondFeaturesDGIN2 and atomFeaturesDGIN2; DGIN2_ATOM_FEATURE_DIM; DGIN2_EDGE_FEATURE_DIM\n # *from project_path+'data/processed/lipo/pickled/train_frags3/' to project_path+'data/processed/lipo/pickled/test_frags3/'\n # CHANGES dgin3 16.02.2021:\n # *added new bondFeaturesDGIN3 and atomFeaturesDGIN3; DGIN3_ATOM_FEATURE_DIM; DGIN3_EDGE_FEATURE_DIM\n # *from project_path+'data/processed/lipo/pickled/train_frags_dgin3/' to project_path+'data/processed/lipo/pickled/test_frags_dgin3/'\n # CHANGES dgin4 16.02.2021:\n # *added add_species bool in model1 config - previously not there; for dgin2 featurization adds the species type after the dgin \n # encoding before logD prediction\n # test_frags_dgin4 was added for species inclusion in model2 call()\n batch_size: int =15\n override_if_exists: bool = True\n\n overall_seed: int = 2\n \n # path to the project folder \n project_path:str = \"./\" \n\n retrain_model: bool = False\n retrain_model_name: str = ''\n retrain_model_epoch: str = ''\n retrain_model_weights_dir: str = project_path+'reports/model_weights/'+retrain_model_name+'/epoch_'+retrain_model_epoch+'/checkp_'+retrain_model_epoch\n\n train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin3_logd/'\n test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin3_logd/'\n\n combined_dataset: bool = True\n\n add_train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin3_logs/'\n add_test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin3_logs/'\n\n test_model: bool = False\n test_model_epoch: str = '887'\n\n # define the number or test runs for the CI. \n # the mean and std of the RMSE and r^2 of the combined runs are taken as the output. \n test_n_times: int = 1 \n # do you want to test the model with consensus mode? \n # if yes, a defined ML model will be included in the consensus predictions during the testing. \n consensus: bool = False \n # include dropout during testing?\n include_dropout: bool = False\n test_model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/epoch_'+test_model_epoch+'/checkp_'+test_model_epoch\n\n # To save the prediction values for each property set to True \n # When this flag is True - the whole test dataset is taken an test_n_times is set to zero! \n save_predictions: bool = False \n # define the folder where you want to save the predictions. \n # For each property, a file is created under the property name (\"./logd.txt\",\"./logs.txt\",\"./logp.txt\",\"./others.txt\") \n test_prediction_output_folder: str = project_path+\"reports/predictions/\"+model_name+\"/\" \n encode_hidden: bool = False\n\n log_dir: str = project_path+'reports/logs/'+model_name+'.log' \n verbosity_level = logging.INFO\n\n model_type: str = 'GIN' # added 31.03.2021 to compare models like 'GIN' 'DMPNN' 'DGIN' 'MLP'\n \n plot_dir: str = project_path+'reports/figures/'+model_name+'/'\n tensorboard_log_dir: str = project_path+'reports/tensorboard/'+model_name+'/'\n config_log_dir: str = project_path+'reports/configs/'+model_name+'/'\n model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/'\n stats_log_dir: str = project_path+'reports/stats/'+model_name+'/'\n\n@dataclass\nclass DGINConfig:\n \"\"\"\n Config for direcpted-mpnn class.\n \"\"\"\n dropout_aggregate_dmpnn: bool = False\n layernorm_aggregate_dmpnn: bool = True\n dropout_passing_dmpnn: bool = False\n layernorm_passing_dmpnn: bool = True\n\n dropout_aggregate_gin: bool = False\n layernorm_aggregate_gin: bool = True\n dropout_passing_gin: bool = False\n layernorm_passing_gin: bool = True\n\n gin_aggregate_bias: bool = False\n dmpnn_passing_bias: bool = False\n init_bias: bool = False\n\n massge_iteration_dmpnn: int = 4\n message_iterations_gin: int = 4\n dropout_rate: float = 0.15\n input_size: int = (ATOM_FEATURE_DIM+EDGE_FEATURE_DIM) # combination of node feature len (33) and edge feature len (12)\n passing_hidden_size: int = 56 # this can be changed\n input_size_gin: int = (ATOM_FEATURE_DIM) # changed 31.03.2021\n\n return_hv: bool = True # model3 parameter\n\n@dataclass\nclass Model1Config:\n \"\"\"\n Config model1 class - no subclass configs are defined here.\n \"\"\"\n validation_split: float = 0.90\n learning_rate: float = 0.004\n clip_rate: float = 0.6\n optimizer = tf.keras.optimizers.Adam(learning_rate)\n lipo_loss_mse = tf.keras.losses.mse\n lipo_loss_mae = tf.keras.losses.mae\n logP_loss_mse = tf.keras.losses.mse\n logS_loss_mse = tf.keras.losses.mse\n other_loss_mse = tf.keras.losses.mse \n mw_loss_mse = tf.keras.losses.mse\n metric = tf.keras.losses.mae\n epochs: int = 1600\n # define the number of epochs for each test run. \n save_after_epoch: int = 3 \n # dropout rate for the general model - mainly the MLP for the different log predictions \n dropout_rate: float = 0.15 # the overall dropout rate of the readout functions \n # the seed to shuffle the training/validation dataset; For the same dataset, even when \n # combined_dataset is True, it is the same training/valiation instances \n train_data_seed: int = 0 \n dropout_rate: float = 0.15 # the overall dropout rate of the readout functions\n train_data_seed: int = 0\n\n hidden_readout_1: int = 32\n hidden_readout_2: int = 14\n activation_func_readout = tf.nn.relu\n \n include_logD: bool = False\n include_logS: bool = False\n include_logP: bool = True\n\n include_other: bool = False \n include_mw: bool = False\n include_rot_bond: bool = False\n include_HBA: bool = False\n include_HBD: bool = False\n\n # define the starting threshold for the RMSE of the model. When the comnbined RMSE \n # is below this threshold, the model weights are being safed and a new threshold \n # is set. It only serves as a starting threshold so that not too many models \n # are being safed. Depends on how many log endpoints are being taken into \n # consideration - as three endpoints have a higher combined RMSE as only one \n # endpoint. \n best_evaluation_threshold: float = 2.45 #was introduced on the 25.03.2021/ \n\n # define the individual thresholds. If one model is better, the corresponding \n # model weights are being saved. \n best_evaluation_threshold_logd: float = 1.85 \n best_evaluation_threshold_logp: float = 1.65 \n best_evaluation_threshold_logs: float = 2.15 \n best_evaluation_threshold_other: float = 2.15 \n # 2.45 for all_logs\n # 0.70 logP\n # 0.75 logD\n # 1.00 logS\n # 1.75 logSD\n # 1.70 logSP\n # 1.45 logDP\n\n include_fragment_conv: bool = False # was introduced on the 4.12.2020\n\n use_rmse: bool = True # uses RMSE instead of MSE for only lipo_loss\n shuffle_inside: bool = True # reshuffles the train/valid test seach in each epoch (generalizes)\n\n add_species: bool = False # 16.02 introduction; previously not there; for dgin3 adds the species type after the dgin encoding before logD prediction\n\n@dataclass\nclass FrACConfig:\n \"\"\"\n Config fragment aggregation class - no subclass configs are defined here.\n \"\"\"\n input_size_gin: int = 28\n layernorm_aggregate: bool = True\n reduce_mean: bool = True # when false -> reduce_sum\n\n@dataclass \nclass MLConfig: \n \"\"\" \n Configs for the ML algorithm \n \"\"\" \n # which algorithm do you want to use for the consensus? \n # possibilities are: \"SVM\", \"RF\", \"KNN\" or \"LR\" - all are regression models! \n # SVM: Support Vector Machine; RF: Random Forest, KNN: K-Nearest Neigbors; LR: Linear Regression;\n algorithm: str = \"SVM\" \n # which fingerprint to use - possibilities are: \"ECFP\" or \"MACCS\" \n fp_types: str = \"ECFP\" \n # If 'ECFP' fingerprint is used, define the number of bits - maximum is 2048! \n n_bits: int = 2048 \n # If \"ECFP\" fingerprint is used, define the radius \n radius: int = 4 \n # define if descriptors should be included into the non-GNN molecular representation \n include_descriptors: bool = True \n # define if the descriptors should be standardizedby scaling and centering (Sklearn) \n standardize: bool = True \n\n@dataclass\nclass Config():\n \"\"\"\n Overall config class for model2 and run file.\n Includes all submodels config\n \"\"\"\n basic_model_config: BasicModelConfig\n model1_config: Model1Config\n d_gin_config: DGINConfig\n frag_acc_config: FrACConfig\n\n ml_config: MLConfig \n model: str = 'model11'",
"from dataclasses import dataclass, field\nfrom typing import List\n\nimport tensorflow as tf\nfrom graph_networks.utilities import * \nimport logging\nimport os\n\nATOM_FEATURE_DIM = DGIN4_ATOM_FEATURE_DIM\nEDGE_FEATURE_DIM = DGIN4_EDGE_FEATURE_DIM\n\n@dataclass\nclass BasicModelConfig:\n \"\"\"\n Config for model1/2/3 run file.\n General model parameters\n \"\"\"\n model_name: str = 'only_logsp_dmpnn4_2' # without h_w in DGIN gin part - added h_v_0 instead\n # whole train/eval split - no more double split within train data set\n # random train/test split in get_data_sd - only change overall_seed\n # CHANGES dgin3 10.02.2021:\n # *added new bondFeaturesDGIN2 and atomFeaturesDGIN2; DGIN2_ATOM_FEATURE_DIM; DGIN2_EDGE_FEATURE_DIM\n # *from project_path+'data/processed/lipo/pickled/train_frags3/' to project_path+'data/processed/lipo/pickled/test_frags3/'\n # CHANGES dgin3 16.02.2021:\n # *added new bondFeaturesDGIN3 and atomFeaturesDGIN3; DGIN3_ATOM_FEATURE_DIM; DGIN3_EDGE_FEATURE_DIM\n # *from project_path+'data/processed/lipo/pickled/train_frags_dgin3/' to project_path+'data/processed/lipo/pickled/test_frags_dgin3/'\n # CHANGES dgin4 16.02.2021:\n # *added add_species bool in model1 config - previously not there; for dgin2 featurization adds the species type after the dgin \n # encoding before logD prediction\n # test_frags_dgin4 was added for species inclusion in model2 call()\n batch_size: int =15\n override_if_exists: bool = True\n\n overall_seed: int = 2\n \n # path to the project folder \n project_path:str = \"./\" \n\n retrain_model: bool = False\n retrain_model_name: str = ''\n retrain_model_epoch: str = ''\n retrain_model_weights_dir: str = project_path+'reports/model_weights/'+retrain_model_name+'/epoch_'+retrain_model_epoch+'/checkp_'+retrain_model_epoch\n\n train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin4_logs/'\n test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin4_logs/'\n\n combined_dataset: bool = False\n\n add_train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin4_logs/'\n add_test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin4_logs/'\n\n test_model: bool = False\n test_model_epoch: str = '887'\n\n # define the number or test runs for the CI. \n # the mean and std of the RMSE and r^2 of the combined runs are taken as the output. \n test_n_times: int = 1 \n # do you want to test the model with consensus mode? \n # if yes, a defined ML model will be included in the consensus predictions during the testing. \n consensus: bool = False \n # include dropout during testing?\n include_dropout: bool = False\n test_model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/epoch_'+test_model_epoch+'/checkp_'+test_model_epoch\n\n # To save the prediction values for each property set to True \n # When this flag is True - the whole test dataset is taken an test_n_times is set to zero! \n save_predictions: bool = False \n # define the folder where you want to save the predictions. \n # For each property, a file is created under the property name (\"./logd.txt\",\"./logs.txt\",\"./logp.txt\",\"./others.txt\") \n test_prediction_output_folder: str = project_path+\"reports/predictions/\"+model_name+\"/\" \n encode_hidden: bool = False\n\n log_dir: str = project_path+'reports/logs/'+model_name+'.log' \n verbosity_level = logging.INFO\n\n model_type: str = 'DMPNN' # added 31.03.2021 to compare models like 'GIN' 'DMPNN' 'DGIN' 'MLP'\n \n plot_dir: str = project_path+'reports/figures/'+model_name+'/'\n tensorboard_log_dir: str = project_path+'reports/tensorboard/'+model_name+'/'\n config_log_dir: str = project_path+'reports/configs/'+model_name+'/'\n model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/'\n stats_log_dir: str = project_path+'reports/stats/'+model_name+'/'\n\n@dataclass\nclass DGINConfig:\n \"\"\"\n Config for direcpted-mpnn class.\n \"\"\"\n dropout_aggregate_dmpnn: bool = False\n layernorm_aggregate_dmpnn: bool = True\n dropout_passing_dmpnn: bool = False\n layernorm_passing_dmpnn: bool = True\n\n dropout_aggregate_gin: bool = False\n layernorm_aggregate_gin: bool = True\n dropout_passing_gin: bool = False\n layernorm_passing_gin: bool = True\n\n gin_aggregate_bias: bool = False\n dmpnn_passing_bias: bool = False\n init_bias: bool = False\n\n massge_iteration_dmpnn: int = 4\n message_iterations_gin: int = 4\n dropout_rate: float = 0.15\n input_size: int = (ATOM_FEATURE_DIM+EDGE_FEATURE_DIM) # combination of node feature len (33) and edge feature len (12)\n passing_hidden_size: int = 56 # this can be changed\n input_size_gin: int = (ATOM_FEATURE_DIM) # changed 31.03.2021\n\n return_hv: bool = True # model3 parameter\n\n@dataclass\nclass Model1Config:\n \"\"\"\n Config model1 class - no subclass configs are defined here.\n \"\"\"\n validation_split: float = 0.90\n learning_rate: float = 0.004\n clip_rate: float = 0.6\n optimizer = tf.keras.optimizers.Adam(learning_rate)\n lipo_loss_mse = tf.keras.losses.mse\n lipo_loss_mae = tf.keras.losses.mae\n logP_loss_mse = tf.keras.losses.mse\n logS_loss_mse = tf.keras.losses.mse\n other_loss_mse = tf.keras.losses.mse \n mw_loss_mse = tf.keras.losses.mse\n metric = tf.keras.losses.mae\n epochs: int = 1600\n # define the number of epochs for each test run. \n save_after_epoch: int = 3 \n # dropout rate for the general model - mainly the MLP for the different log predictions \n dropout_rate: float = 0.15 # the overall dropout rate of the readout functions \n # the seed to shuffle the training/validation dataset; For the same dataset, even when \n # combined_dataset is True, it is the same training/valiation instances \n train_data_seed: int = 0 \n dropout_rate: float = 0.15 # the overall dropout rate of the readout functions\n train_data_seed: int = 0\n\n hidden_readout_1: int = 32\n hidden_readout_2: int = 14\n activation_func_readout = tf.nn.relu\n \n include_logD: bool = False\n include_logS: bool = True\n include_logP: bool = True\n\n include_other: bool = False \n include_mw: bool = False\n include_rot_bond: bool = False\n include_HBA: bool = False\n include_HBD: bool = False\n\n # define the starting threshold for the RMSE of the model. When the comnbined RMSE \n # is below this threshold, the model weights are being safed and a new threshold \n # is set. It only serves as a starting threshold so that not too many models \n # are being safed. Depends on how many log endpoints are being taken into \n # consideration - as three endpoints have a higher combined RMSE as only one \n # endpoint. \n best_evaluation_threshold: float = 2.45 #was introduced on the 25.03.2021/ \n\n # define the individual thresholds. If one model is better, the corresponding \n # model weights are being saved. \n best_evaluation_threshold_logd: float = 1.85 \n best_evaluation_threshold_logp: float = 1.65 \n best_evaluation_threshold_logs: float = 2.15 \n best_evaluation_threshold_other: float = 2.15 \n # 2.45 for all_logs\n # 0.70 logP\n # 0.75 logD\n # 1.00 logS\n # 1.75 logSD\n # 1.70 logSP\n # 1.45 logDP\n\n include_fragment_conv: bool = False # was introduced on the 4.12.2020\n\n use_rmse: bool = True # uses RMSE instead of MSE for only lipo_loss\n shuffle_inside: bool = True # reshuffles the train/valid test seach in each epoch (generalizes)\n\n add_species: bool = False # 16.02 introduction; previously not there; for dgin3 adds the species type after the dgin encoding before logD prediction\n\n@dataclass\nclass FrACConfig:\n \"\"\"\n Config fragment aggregation class - no subclass configs are defined here.\n \"\"\"\n input_size_gin: int = 28\n layernorm_aggregate: bool = True\n reduce_mean: bool = True # when false -> reduce_sum\n\n@dataclass \nclass MLConfig: \n \"\"\" \n Configs for the ML algorithm \n \"\"\" \n # which algorithm do you want to use for the consensus? \n # possibilities are: \"SVM\", \"RF\", \"KNN\" or \"LR\" - all are regression models! \n # SVM: Support Vector Machine; RF: Random Forest, KNN: K-Nearest Neigbors; LR: Linear Regression;\n algorithm: str = \"SVM\" \n # which fingerprint to use - possibilities are: \"ECFP\" or \"MACCS\" \n fp_types: str = \"ECFP\" \n # If 'ECFP' fingerprint is used, define the number of bits - maximum is 2048! \n n_bits: int = 2048 \n # If \"ECFP\" fingerprint is used, define the radius \n radius: int = 4 \n # define if descriptors should be included into the non-GNN molecular representation \n include_descriptors: bool = True \n # define if the descriptors should be standardizedby scaling and centering (Sklearn) \n standardize: bool = True \n\n@dataclass\nclass Config():\n \"\"\"\n Overall config class for model2 and run file.\n Includes all submodels config\n \"\"\"\n basic_model_config: BasicModelConfig\n model1_config: Model1Config\n d_gin_config: DGINConfig\n frag_acc_config: FrACConfig\n\n ml_config: MLConfig \n model: str = 'model11'",
"from dataclasses import dataclass, field\nfrom typing import List\n\nimport tensorflow as tf\nfrom graph_networks.utilities import * \nimport logging\nimport os\n\nATOM_FEATURE_DIM = DGIN8_ATOM_FEATURE_DIM\nEDGE_FEATURE_DIM = DGIN8_EDGE_FEATURE_DIM\n\n@dataclass\nclass BasicModelConfig:\n \"\"\"\n Config for model1/2/3 run file.\n General model parameters\n \"\"\"\n model_name: str = 'only_logs_dmpnn8_1' # without h_w in DGIN gin part - added h_v_0 instead\n # whole train/eval split - no more double split within train data set\n # random train/test split in get_data_sd - only change overall_seed\n # CHANGES dgin3 10.02.2021:\n # *added new bondFeaturesDGIN2 and atomFeaturesDGIN2; DGIN2_ATOM_FEATURE_DIM; DGIN2_EDGE_FEATURE_DIM\n # *from project_path+'data/processed/lipo/pickled/train_frags3/' to project_path+'data/processed/lipo/pickled/test_frags3/'\n # CHANGES dgin3 16.02.2021:\n # *added new bondFeaturesDGIN3 and atomFeaturesDGIN3; DGIN3_ATOM_FEATURE_DIM; DGIN3_EDGE_FEATURE_DIM\n # *from project_path+'data/processed/lipo/pickled/train_frags_dgin3/' to project_path+'data/processed/lipo/pickled/test_frags_dgin3/'\n # CHANGES dgin4 16.02.2021:\n # *added add_species bool in model1 config - previously not there; for dgin2 featurization adds the species type after the dgin \n # encoding before logD prediction\n # test_frags_dgin4 was added for species inclusion in model2 call()\n batch_size: int =15\n override_if_exists: bool = True\n\n overall_seed: int = 2\n \n # path to the project folder \n project_path:str = \"./\" \n\n retrain_model: bool = False\n retrain_model_name: str = ''\n retrain_model_epoch: str = ''\n retrain_model_weights_dir: str = project_path+'reports/model_weights/'+retrain_model_name+'/epoch_'+retrain_model_epoch+'/checkp_'+retrain_model_epoch\n\n train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin8_logs/'\n test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin8_logs/'\n\n combined_dataset: bool = False\n\n add_train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin8_logs/'\n add_test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin8_logs/'\n\n test_model: bool = False\n test_model_epoch: str = '887'\n\n # define the number or test runs for the CI. \n # the mean and std of the RMSE and r^2 of the combined runs are taken as the output. \n test_n_times: int = 1 \n # do you want to test the model with consensus mode? \n # if yes, a defined ML model will be included in the consensus predictions during the testing. \n consensus: bool = False \n # include dropout during testing?\n include_dropout: bool = False\n test_model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/epoch_'+test_model_epoch+'/checkp_'+test_model_epoch\n\n # To save the prediction values for each property set to True \n # When this flag is True - the whole test dataset is taken an test_n_times is set to zero! \n save_predictions: bool = False \n # define the folder where you want to save the predictions. \n # For each property, a file is created under the property name (\"./logd.txt\",\"./logs.txt\",\"./logp.txt\",\"./others.txt\") \n test_prediction_output_folder: str = project_path+\"reports/predictions/\"+model_name+\"/\" \n encode_hidden: bool = False\n\n log_dir: str = project_path+'reports/logs/'+model_name+'.log' \n verbosity_level = logging.INFO\n\n model_type: str = 'DMPNN' # added 31.03.2021 to compare models like 'GIN' 'DMPNN' 'DGIN' 'MLP'\n \n plot_dir: str = project_path+'reports/figures/'+model_name+'/'\n tensorboard_log_dir: str = project_path+'reports/tensorboard/'+model_name+'/'\n config_log_dir: str = project_path+'reports/configs/'+model_name+'/'\n model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/'\n stats_log_dir: str = project_path+'reports/stats/'+model_name+'/'\n\n@dataclass\nclass DGINConfig:\n \"\"\"\n Config for direcpted-mpnn class.\n \"\"\"\n dropout_aggregate_dmpnn: bool = False\n layernorm_aggregate_dmpnn: bool = True\n dropout_passing_dmpnn: bool = False\n layernorm_passing_dmpnn: bool = True\n\n dropout_aggregate_gin: bool = False\n layernorm_aggregate_gin: bool = True\n dropout_passing_gin: bool = False\n layernorm_passing_gin: bool = True\n\n gin_aggregate_bias: bool = False\n dmpnn_passing_bias: bool = False\n init_bias: bool = False\n\n massge_iteration_dmpnn: int = 4\n message_iterations_gin: int = 4\n dropout_rate: float = 0.15\n input_size: int = (ATOM_FEATURE_DIM+EDGE_FEATURE_DIM) # combination of node feature len (33) and edge feature len (12)\n passing_hidden_size: int = 56 # this can be changed\n input_size_gin: int = (ATOM_FEATURE_DIM) # changed 31.03.2021\n\n return_hv: bool = True # model3 parameter\n\n@dataclass\nclass Model1Config:\n \"\"\"\n Config model1 class - no subclass configs are defined here.\n \"\"\"\n validation_split: float = 0.90\n learning_rate: float = 0.004\n clip_rate: float = 0.6\n optimizer = tf.keras.optimizers.Adam(learning_rate)\n lipo_loss_mse = tf.keras.losses.mse\n lipo_loss_mae = tf.keras.losses.mae\n logP_loss_mse = tf.keras.losses.mse\n logS_loss_mse = tf.keras.losses.mse\n other_loss_mse = tf.keras.losses.mse \n mw_loss_mse = tf.keras.losses.mse\n metric = tf.keras.losses.mae\n epochs: int = 1600\n # define the number of epochs for each test run. \n save_after_epoch: int = 3 \n # dropout rate for the general model - mainly the MLP for the different log predictions \n dropout_rate: float = 0.15 # the overall dropout rate of the readout functions \n # the seed to shuffle the training/validation dataset; For the same dataset, even when \n # combined_dataset is True, it is the same training/valiation instances \n train_data_seed: int = 0 \n dropout_rate: float = 0.15 # the overall dropout rate of the readout functions\n train_data_seed: int = 0\n\n hidden_readout_1: int = 32\n hidden_readout_2: int = 14\n activation_func_readout = tf.nn.relu\n \n include_logD: bool = False\n include_logS: bool = True\n include_logP: bool = False\n\n include_other: bool = False \n include_mw: bool = False\n include_rot_bond: bool = False\n include_HBA: bool = False\n include_HBD: bool = False\n\n # define the starting threshold for the RMSE of the model. When the comnbined RMSE \n # is below this threshold, the model weights are being safed and a new threshold \n # is set. It only serves as a starting threshold so that not too many models \n # are being safed. Depends on how many log endpoints are being taken into \n # consideration - as three endpoints have a higher combined RMSE as only one \n # endpoint. \n best_evaluation_threshold: float = 2.45 #was introduced on the 25.03.2021/ \n\n # define the individual thresholds. If one model is better, the corresponding \n # model weights are being saved. \n best_evaluation_threshold_logd: float = 1.85 \n best_evaluation_threshold_logp: float = 1.65 \n best_evaluation_threshold_logs: float = 2.15 \n best_evaluation_threshold_other: float = 2.15 \n # 2.45 for all_logs\n # 0.70 logP\n # 0.75 logD\n # 1.00 logS\n # 1.75 logSD\n # 1.70 logSP\n # 1.45 logDP\n\n include_fragment_conv: bool = False # was introduced on the 4.12.2020\n\n use_rmse: bool = True # uses RMSE instead of MSE for only lipo_loss\n shuffle_inside: bool = True # reshuffles the train/valid test seach in each epoch (generalizes)\n\n add_species: bool = False # 16.02 introduction; previously not there; for dgin3 adds the species type after the dgin encoding before logD prediction\n\n@dataclass\nclass FrACConfig:\n \"\"\"\n Config fragment aggregation class - no subclass configs are defined here.\n \"\"\"\n input_size_gin: int = 28\n layernorm_aggregate: bool = True\n reduce_mean: bool = True # when false -> reduce_sum\n\n@dataclass \nclass MLConfig: \n \"\"\" \n Configs for the ML algorithm \n \"\"\" \n # which algorithm do you want to use for the consensus? \n # possibilities are: \"SVM\", \"RF\", \"KNN\" or \"LR\" - all are regression models! \n # SVM: Support Vector Machine; RF: Random Forest, KNN: K-Nearest Neigbors; LR: Linear Regression;\n algorithm: str = \"SVM\" \n # which fingerprint to use - possibilities are: \"ECFP\" or \"MACCS\" \n fp_types: str = \"ECFP\" \n # If 'ECFP' fingerprint is used, define the number of bits - maximum is 2048! \n n_bits: int = 2048 \n # If \"ECFP\" fingerprint is used, define the radius \n radius: int = 4 \n # define if descriptors should be included into the non-GNN molecular representation \n include_descriptors: bool = True \n # define if the descriptors should be standardizedby scaling and centering (Sklearn) \n standardize: bool = True \n\n@dataclass\nclass Config():\n \"\"\"\n Overall config class for model2 and run file.\n Includes all submodels config\n \"\"\"\n basic_model_config: BasicModelConfig\n model1_config: Model1Config\n d_gin_config: DGINConfig\n frag_acc_config: FrACConfig\n\n ml_config: MLConfig \n model: str = 'model11'"
] | [
[
"tensorflow.keras.optimizers.Adam"
],
[
"tensorflow.keras.optimizers.Adam"
],
[
"tensorflow.keras.optimizers.Adam"
],
[
"tensorflow.keras.optimizers.Adam"
],
[
"tensorflow.keras.optimizers.Adam"
],
[
"tensorflow.keras.optimizers.Adam"
],
[
"tensorflow.keras.optimizers.Adam"
],
[
"tensorflow.keras.optimizers.Adam"
],
[
"tensorflow.keras.optimizers.Adam"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
Artcs1/RotationDetection | [
"095be17345ee9984d8de8f24eb6b5a0b2d764a06",
"095be17345ee9984d8de8f24eb6b5a0b2d764a06",
"095be17345ee9984d8de8f24eb6b5a0b2d764a06",
"095be17345ee9984d8de8f24eb6b5a0b2d764a06",
"095be17345ee9984d8de8f24eb6b5a0b2d764a06",
"095be17345ee9984d8de8f24eb6b5a0b2d764a06",
"095be17345ee9984d8de8f24eb6b5a0b2d764a06",
"095be17345ee9984d8de8f24eb6b5a0b2d764a06",
"095be17345ee9984d8de8f24eb6b5a0b2d764a06",
"095be17345ee9984d8de8f24eb6b5a0b2d764a06",
"095be17345ee9984d8de8f24eb6b5a0b2d764a06",
"095be17345ee9984d8de8f24eb6b5a0b2d764a06",
"095be17345ee9984d8de8f24eb6b5a0b2d764a06"
] | [
"tools/r3det_gwd/train.py",
"libs/configs_old/ICDAR2015/kl/cfgs_res50_icdar2015_kl_v2.py",
"utils/kld.py",
"libs/configs_old/DOTA/r3det_gwd/cfgs_res152_dota_r3det_gwd_v3.py",
"dataloader/dataset/DOTA/data_crop.py",
"libs/configs/DOTA2.0/dcl/cfgs_res50_dota2.0_dcl_v5.py",
"libs/configs/DOTA/atss/cfgs_res50_dota_atss_v3.py",
"libs/configs/DOTA1.5/retinanet/cfgs_res50_dota1.5_v15.py",
"libs/configs_old/DOTA/kl/cfgs_res152_dota_kl_v2.py",
"libs/models/losses/losses_ridet.py",
"libs/configs/DOTA/probiou/cfgs_res50_dota_probiou_v10.py",
"libs/configs/DOTA2.0/retinanet/cfgs_res50_dota2.0_v5.py",
"libs/configs/DOTA/fcos/cfgs_res50_dota_fcos_v2.py"
] | [
"# -*- coding:utf-8 -*-\n# Author: Xue Yang <[email protected]>\n#\n# License: Apache-2.0 license\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport os\nimport sys\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport numpy as np\nsys.path.append(\"../../\")\n\nfrom tools.train_base import Train\nfrom libs.configs import cfgs\nfrom libs.models.detectors.r3det_gwd import build_whole_network\nfrom libs.utils.coordinate_convert import backward_convert, get_horizen_minAreaRectangle\nfrom dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = cfgs.GPU_GROUP\n\n\nclass TrainR3DetGWD(Train):\n\n def get_gtboxes_and_label(self, gtboxes_and_label_h, gtboxes_and_label_r, num_objects):\n return gtboxes_and_label_h[:int(num_objects), :].astype(np.float32), \\\n gtboxes_and_label_r[:int(num_objects), :].astype(np.float32)\n\n def main(self):\n with tf.Graph().as_default() as graph, tf.device('/cpu:0'):\n\n num_gpu = len(cfgs.GPU_GROUP.strip().split(','))\n global_step = slim.get_or_create_global_step()\n lr = self.warmup_lr(cfgs.LR, global_step, cfgs.WARM_SETP, num_gpu)\n tf.summary.scalar('lr', lr)\n\n optimizer = tf.train.MomentumOptimizer(lr, momentum=cfgs.MOMENTUM)\n r3det_gwd = build_whole_network.DetectionNetworkR3DetGWD(cfgs=self.cfgs,\n is_training=True)\n\n with tf.name_scope('get_batch'):\n if cfgs.IMAGE_PYRAMID:\n shortside_len_list = tf.constant(cfgs.IMG_SHORT_SIDE_LEN)\n shortside_len = tf.random_shuffle(shortside_len_list)[0]\n\n else:\n shortside_len = cfgs.IMG_SHORT_SIDE_LEN\n\n img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch, img_h_batch, img_w_batch = \\\n self.reader.next_batch(dataset_name=cfgs.DATASET_NAME,\n batch_size=cfgs.BATCH_SIZE * num_gpu,\n shortside_len=shortside_len,\n is_training=True)\n\n # data processing\n inputs_list = []\n for i in range(num_gpu):\n img = tf.expand_dims(img_batch[i], axis=0)\n pretrain_zoo = PretrainModelZoo()\n if self.cfgs.NET_NAME in pretrain_zoo.pth_zoo or self.cfgs.NET_NAME in pretrain_zoo.mxnet_zoo:\n img = img / tf.constant([cfgs.PIXEL_STD])\n\n gtboxes_and_label_r = tf.py_func(backward_convert,\n inp=[gtboxes_and_label_batch[i]],\n Tout=tf.float32)\n gtboxes_and_label_r = tf.reshape(gtboxes_and_label_r, [-1, 6])\n\n gtboxes_and_label_h = get_horizen_minAreaRectangle(gtboxes_and_label_batch[i])\n gtboxes_and_label_h = tf.reshape(gtboxes_and_label_h, [-1, 5])\n\n num_objects = num_objects_batch[i]\n num_objects = tf.cast(tf.reshape(num_objects, [-1, ]), tf.float32)\n\n img_h = img_h_batch[i]\n img_w = img_w_batch[i]\n\n inputs_list.append([img, gtboxes_and_label_h, gtboxes_and_label_r, num_objects, img_h, img_w])\n\n tower_grads = []\n biases_regularizer = tf.no_regularizer\n weights_regularizer = tf.contrib.layers.l2_regularizer(cfgs.WEIGHT_DECAY)\n\n with tf.variable_scope(tf.get_variable_scope()):\n for i in range(num_gpu):\n with tf.device('/gpu:%d' % i):\n with tf.name_scope('tower_%d' % i):\n with slim.arg_scope(\n [slim.model_variable, slim.variable],\n device='/device:CPU:0'):\n with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane,\n slim.conv2d_transpose, slim.separable_conv2d,\n slim.fully_connected],\n weights_regularizer=weights_regularizer,\n biases_regularizer=biases_regularizer,\n biases_initializer=tf.constant_initializer(0.0)):\n\n gtboxes_and_label_h, gtboxes_and_label_r = tf.py_func(self.get_gtboxes_and_label,\n inp=[inputs_list[i][1],\n inputs_list[i][2],\n inputs_list[i][3]],\n Tout=[tf.float32, tf.float32])\n gtboxes_and_label_h = tf.reshape(gtboxes_and_label_h, [-1, 5])\n gtboxes_and_label_r = tf.reshape(gtboxes_and_label_r, [-1, 6])\n\n img = inputs_list[i][0]\n img_shape = inputs_list[i][-2:]\n img = tf.image.crop_to_bounding_box(image=img,\n offset_height=0,\n offset_width=0,\n target_height=tf.cast(img_shape[0], tf.int32),\n target_width=tf.cast(img_shape[1], tf.int32))\n\n outputs = r3det_gwd.build_whole_detection_network(input_img_batch=img,\n gtboxes_batch_h=gtboxes_and_label_h,\n gtboxes_batch_r=gtboxes_and_label_r,\n gpu_id=i)\n gtboxes_in_img_h = self.drawer.draw_boxes_with_categories(img_batch=img,\n boxes=gtboxes_and_label_h[\n :, :-1],\n labels=gtboxes_and_label_h[\n :, -1],\n method=0)\n gtboxes_in_img_r = self.drawer.draw_boxes_with_categories(img_batch=img,\n boxes=gtboxes_and_label_r[\n :, :-1],\n labels=gtboxes_and_label_r[\n :, -1],\n method=1)\n tf.summary.image('Compare/gtboxes_h_gpu:%d' % i, gtboxes_in_img_h)\n tf.summary.image('Compare/gtboxes_r_gpu:%d' % i, gtboxes_in_img_r)\n\n if cfgs.ADD_BOX_IN_TENSORBOARD:\n detections_in_img = self.drawer.draw_boxes_with_categories_and_scores(\n img_batch=img,\n boxes=outputs[0],\n scores=outputs[1],\n labels=outputs[2],\n method=1)\n tf.summary.image('Compare/final_detection_gpu:%d' % i, detections_in_img)\n\n loss_dict = outputs[-1]\n total_loss_dict, total_losses = self.loss_dict(loss_dict, num_gpu)\n\n if i == num_gpu - 1:\n regularization_losses = tf.get_collection(\n tf.GraphKeys.REGULARIZATION_LOSSES)\n # weight_decay_loss = tf.add_n(slim.losses.get_regularization_losses())\n total_losses = total_losses + tf.add_n(regularization_losses)\n\n tf.get_variable_scope().reuse_variables()\n grads = optimizer.compute_gradients(total_losses)\n if cfgs.GRADIENT_CLIPPING_BY_NORM is not None:\n grads = slim.learning.clip_gradient_norms(grads, cfgs.GRADIENT_CLIPPING_BY_NORM)\n tower_grads.append(grads)\n self.log_printer(r3det_gwd, optimizer, global_step, tower_grads, total_loss_dict, num_gpu, graph)\n\nif __name__ == '__main__':\n\n trainer = TrainR3DetGWD(cfgs)\n trainer.main()",
"# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function, absolute_import\nimport os\nimport tensorflow as tf\nimport math\n\nfrom dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo\n\n\"\"\"\n2021-02-06 kl\t74.00%\t82.28%\t77.92% 0.8\n2021-02-06\tkl\t75.25%\t80.61%\t77.84% 0.75\n2021-02-06 kl\t71.98%\t83.89%\t77.48% 0.85\n\"\"\"\n\n# ------------------------------------------------\nVERSION = 'RetinaNet_ICDAR2015_KL_2x_20210205'\nNET_NAME = 'resnet50_v1d' # 'MobilenetV2'\n\n# ---------------------------------------- System\nROOT_PATH = os.path.abspath('../../')\nprint(20*\"++--\")\nprint(ROOT_PATH)\nGPU_GROUP = \"3\"\nNUM_GPU = len(GPU_GROUP.strip().split(','))\nSHOW_TRAIN_INFO_INTE = 20\nSMRY_ITER = 200\nSAVE_WEIGHTS_INTE = 10000 * 2\n\nSUMMARY_PATH = os.path.join(ROOT_PATH, 'output/summary')\nTEST_SAVE_PATH = os.path.join(ROOT_PATH, 'tools/test_result')\n\npretrain_zoo = PretrainModelZoo()\nPRETRAINED_CKPT = pretrain_zoo.pretrain_weight_path(NET_NAME, ROOT_PATH)\nTRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')\nEVALUATE_R_DIR = os.path.join(ROOT_PATH, 'output/evaluate_result_pickle/')\n\n# ------------------------------------------ Train and test\nRESTORE_FROM_RPN = False\nFIXED_BLOCKS = 1 # allow 0~3\nFREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone\nUSE_07_METRIC = True\nADD_BOX_IN_TENSORBOARD = True\n\nMUTILPY_BIAS_GRADIENT = 2.0 # if None, will not multipy\nGRADIENT_CLIPPING_BY_NORM = 10.0 # if None, will not clip\n\nCLS_WEIGHT = 1.0\nREG_WEIGHT = 1.0\nANGLE_WEIGHT = 0.5\nREG_LOSS_MODE = 3\nALPHA = 1.0\nBETA = 1.0\n\nBATCH_SIZE = 1\nEPSILON = 1e-5\nMOMENTUM = 0.9\nLR = 1e-3\nDECAY_STEP = [SAVE_WEIGHTS_INTE*12, SAVE_WEIGHTS_INTE*16, SAVE_WEIGHTS_INTE*20]\nMAX_ITERATION = SAVE_WEIGHTS_INTE*20\nWARM_SETP = int(1.0 / 4.0 * SAVE_WEIGHTS_INTE)\n\n# -------------------------------------------- Dataset\nDATASET_NAME = 'ICDAR2015' # 'pascal', 'coco'\nPIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR\nPIXEL_MEAN_ = [0.485, 0.456, 0.406]\nPIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR\nIMG_SHORT_SIDE_LEN = 800\nIMG_MAX_LENGTH = 1000\nCLASS_NUM = 1\n\nIMG_ROTATE = True\nRGB2GRAY = False\nVERTICAL_FLIP = False\nHORIZONTAL_FLIP = True\nIMAGE_PYRAMID = False\n\n# --------------------------------------------- Network\nSUBNETS_WEIGHTS_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None)\nSUBNETS_BIAS_INITIALIZER = tf.constant_initializer(value=0.0)\nPROBABILITY = 0.01\nFINAL_CONV_BIAS_INITIALIZER = tf.constant_initializer(value=-math.log((1.0 - PROBABILITY) / PROBABILITY))\nWEIGHT_DECAY = 1e-4\nUSE_GN = False\nFPN_CHANNEL = 256\nNUM_SUBNET_CONV = 4\nFPN_MODE = 'fpn'\n\n# --------------------------------------------- Anchor\nLEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']\nBASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]\nANCHOR_STRIDE = [8, 16, 32, 64, 128]\nANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]\nANCHOR_RATIOS = [1, 1 / 2, 2., 1 / 3., 3., 5., 1 / 5.]\nANCHOR_ANGLES = [-90, -75, -60, -45, -30, -15]\nANCHOR_SCALE_FACTORS = None\nUSE_CENTER_OFFSET = True\nMETHOD = 'H'\nUSE_ANGLE_COND = False\nANGLE_RANGE = 90 # or 180\n\n# -------------------------------------------- Head\nSHARE_NET = True\nUSE_P5 = True\nIOU_POSITIVE_THRESHOLD = 0.5\nIOU_NEGATIVE_THRESHOLD = 0.4\n\nNMS = True\nNMS_IOU_THRESHOLD = 0.1\nMAXIMUM_DETECTIONS = 100\nFILTERED_SCORE = 0.05\nVIS_SCORE = 0.8\n\n# -------------------------------------------- KLD\nKL_TAU = 2.0\nKL_FUNC = 0\n",
"# -*- coding: utf-8 -*-\n\n# Author: Xue Yang <[email protected]>\n#\n# License: Apache-2.0 license\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport tensorflow as tf\n\nsys.path.append('../..')\n# from utils.gaussian_wasserstein_distance import get_element1, get_element4\nfrom libs.utils.coordinate_convert import *\n# from libs.utils.rbbox_overlaps import rbbx_overlaps\n# from libs.utils.iou_cpu import get_iou_matrix\n\n\ndef iou_rotate_calculate(boxes1, boxes2, use_gpu=True, gpu_id=0):\n '''\n\n :param boxes_list1:[N, 8] tensor\n :param boxes_list2: [M, 8] tensor\n :return:\n '''\n\n boxes1 = tf.cast(boxes1, tf.float32)\n boxes2 = tf.cast(boxes2, tf.float32)\n if use_gpu:\n\n iou_matrix = tf.py_func(rbbx_overlaps,\n inp=[boxes1, boxes2, gpu_id],\n Tout=tf.float32)\n else:\n iou_matrix = tf.py_func(get_iou_matrix, inp=[boxes1, boxes2],\n Tout=tf.float32)\n\n iou_matrix = tf.reshape(iou_matrix, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])\n\n return iou_matrix\n\n\ndef iou_rotate_calculate1(boxes1, boxes2, use_gpu=True, gpu_id=0):\n\n # start = time.time()\n if use_gpu:\n ious = rbbx_overlaps(boxes1, boxes2, gpu_id)\n else:\n area1 = boxes1[:, 2] * boxes1[:, 3]\n area2 = boxes2[:, 2] * boxes2[:, 3]\n ious = []\n for i, box1 in enumerate(boxes1):\n temp_ious = []\n r1 = ((box1[0], box1[1]), (box1[2], box1[3]), box1[4])\n for j, box2 in enumerate(boxes2):\n r2 = ((box2[0], box2[1]), (box2[2], box2[3]), box2[4])\n\n int_pts = cv2.rotatedRectangleIntersection(r1, r2)[1]\n if int_pts is not None:\n order_pts = cv2.convexHull(int_pts, returnPoints=True)\n\n int_area = cv2.contourArea(order_pts)\n\n inter = int_area * 1.0 / (area1[i] + area2[j] - int_area)\n temp_ious.append(inter)\n else:\n temp_ious.append(0.0)\n ious.append(temp_ious)\n\n # print('{}s'.format(time.time() - start))\n\n return np.array(ious, dtype=np.float32)\n\n\ndef iou_rotate_calculate2(boxes1, boxes2):\n ious = []\n if boxes1.shape[0] != 0:\n boxes1[:, 2] += 1.0\n boxes1[:, 3] += 1.0\n boxes2[:, 2] += 1.0\n boxes2[:, 3] += 1.0\n\n area1 = boxes1[:, 2] * boxes1[:, 3]\n area2 = boxes2[:, 2] * boxes2[:, 3]\n\n for i in range(boxes1.shape[0]):\n temp_ious = []\n r1 = ((boxes1[i][0], boxes1[i][1]), (boxes1[i][2], boxes1[i][3]), boxes1[i][4])\n r2 = ((boxes2[i][0], boxes2[i][1]), (boxes2[i][2], boxes2[i][3]), boxes2[i][4])\n\n int_pts = cv2.rotatedRectangleIntersection(r1, r2)[1]\n if int_pts is not None:\n order_pts = cv2.convexHull(int_pts, returnPoints=True)\n\n int_area = cv2.contourArea(order_pts)\n\n inter = int_area * 1.0 / (area1[i] + area2[i] - int_area + 1e-4)\n\n # if boxes1[i][2] < 0.1 or boxes1[i][3] < 0.1 or boxes2[i][2] < 0.1 or boxes2[i][3] < 0.1:\n # inter = 0\n\n inter = max(0.0, min(1.0, inter))\n\n temp_ious.append(inter)\n else:\n temp_ious.append(0.0)\n ious.append(temp_ious)\n\n return np.array(ious, dtype=np.float32)\n\n\ndef diou_rotate_calculate(boxes1, boxes2):\n\n if boxes1.shape[0] != 0:\n area1 = boxes1[:, 2] * boxes1[:, 3]\n area2 = boxes2[:, 2] * boxes2[:, 3]\n d = (boxes1[:, 0] - boxes2[:, 0]) ** 2 + (boxes1[:, 1] - boxes2[:, 1])\n\n boxes1_ = forward_convert(boxes1, with_label=False)\n boxes2_ = forward_convert(boxes2, with_label=False)\n\n xmin = np.minimum(np.min(boxes1_[:, 0::2]), np.min(boxes2_[:, 0::2]))\n xmax = np.maximum(np.max(boxes1_[:, 0::2]), np.max(boxes2_[:, 0::2]))\n ymin = np.minimum(np.min(boxes1_[:, 1::2]), np.min(boxes2_[:, 1::2]))\n ymax = np.maximum(np.max(boxes1_[:, 1::2]), np.max(boxes2_[:, 1::2]))\n\n c = (xmax - xmin) ** 2 + (ymax - ymin) ** 2\n ious = []\n for i in range(boxes1.shape[0]):\n r1 = ((boxes1[i][0], boxes1[i][1]), (boxes1[i][2], boxes1[i][3]), boxes1[i][4])\n r2 = ((boxes2[i][0], boxes2[i][1]), (boxes2[i][2], boxes2[i][3]), boxes2[i][4])\n\n int_pts = cv2.rotatedRectangleIntersection(r1, r2)[1]\n if int_pts is not None:\n order_pts = cv2.convexHull(int_pts, returnPoints=True)\n\n int_area = cv2.contourArea(order_pts)\n\n iou = int_area * 1.0 / (area1[i] + area2[i] - int_area)\n else:\n iou = 0.0\n\n ious.append(iou)\n ious = np.array(ious)\n\n dious = ious - d / c\n else:\n dious = []\n\n return np.reshape(np.array(dious, dtype=np.float32), [-1, 1])\n\n\ndef adiou_rotate_calculate(boxes1, boxes2):\n\n if boxes1.shape[0] != 0:\n area1 = boxes1[:, 2] * boxes1[:, 3]\n area2 = boxes2[:, 2] * boxes2[:, 3]\n d = (boxes1[:, 0] - boxes2[:, 0]) ** 2 + (boxes1[:, 1] - boxes2[:, 1])\n\n boxes1_ = forward_convert(boxes1, with_label=False)\n boxes2_ = forward_convert(boxes2, with_label=False)\n\n xmin = np.minimum(np.min(boxes1_[:, 0::2]), np.min(boxes2_[:, 0::2]))\n xmax = np.maximum(np.max(boxes1_[:, 0::2]), np.max(boxes2_[:, 0::2]))\n ymin = np.minimum(np.min(boxes1_[:, 1::2]), np.min(boxes2_[:, 1::2]))\n ymax = np.maximum(np.max(boxes1_[:, 1::2]), np.max(boxes2_[:, 1::2]))\n\n c = (xmax - xmin) ** 2 + (ymax - ymin) ** 2\n\n # v = (4 / (math.pi ** 2)) * (np.arctan(boxes1[:, 2]/boxes1[:, 3]) - np.arctan(boxes2[:, 2]/boxes2[:, 3])) ** 2\n\n ious = []\n for i in range(boxes1.shape[0]):\n r1 = ((boxes1[i][0], boxes1[i][1]), (boxes1[i][2], boxes1[i][3]), boxes1[i][4])\n r2 = ((boxes2[i][0], boxes2[i][1]), (boxes2[i][2], boxes2[i][3]), boxes2[i][4])\n\n int_pts = cv2.rotatedRectangleIntersection(r1, r2)[1]\n if int_pts is not None:\n order_pts = cv2.convexHull(int_pts, returnPoints=True)\n\n int_area = cv2.contourArea(order_pts)\n\n iou = int_area * 1.0 / (area1[i] + area2[i] - int_area)\n else:\n iou = 0.0\n\n ious.append(iou)\n ious = np.array(ious)\n\n # S = 1 - ious\n # alpha = v / (S + v)\n # w_temp = 2 * boxes1[:, 2]\n # ar = (8 / (math.pi ** 2)) * (np.arctan(boxes1[:, 2]/boxes1[:, 3]) - np.arctan(boxes2[:, 2]/boxes2[:, 3])) \\\n # * ((boxes1[:, 2] - w_temp) * boxes1[:, 3])\n # cious = ious - d / c - alpha * ar\n cious = (ious - d / c) * np.abs(np.cos(boxes1[:, 4] - boxes2[:, 4]))\n else:\n cious = []\n\n return np.reshape(np.array(cious, dtype=np.float32), [-1, 1])\n\n\ndef gaussian_wasserstein_distance_(boxes1, boxes2):\n boxes1 = coordinate_present_convert(boxes1, -1)\n boxes1[:, 4] += 90\n boxes1[:, 4] *= (-np.pi / 180)\n\n boxes2 = coordinate_present_convert(boxes2, -1)\n boxes2[:, 4] += 90\n boxes2[:, 4] *= (-np.pi / 180)\n\n dis = (boxes1[:, 0] - boxes2[:, 0])**2 + (boxes1[:, 1] - boxes2[:, 1])**2 + \\\n ((boxes1[:, 2] / 2 * np.cos(boxes1[:, 4])**2 + boxes1[:, 3] / 2 * np.sin(boxes1[:, 4])**2) - (boxes2[:, 2] / 2 * np.cos(boxes2[:, 4])**2 + boxes2[:, 3] / 2 * np.sin(boxes2[:, 4])**2))**2 + \\\n 2*((boxes1[:, 2] / 2 * np.cos(boxes1[:, 4])* np.sin(boxes1[:, 4] - boxes1[:, 3] / 2 * np.cos(boxes1[:, 4]) * np.sin(boxes1[:, 4]))) - (boxes2[:, 2] / 2 * np.cos(boxes2[:, 4]) * np.sin(boxes2[:, 4] - boxes2[:, 3] / 2 * np.cos(boxes2[:, 4]) * np.sin(boxes2[:, 4]))))**2 + \\\n ((boxes1[:, 2] / 2 * np.sin(boxes1[:, 4]) ** 2 + boxes1[:, 3] / 2 * np.cos(boxes1[:, 4]) ** 2) - (\n boxes2[:, 2] / 2 * np.sin(boxes2[:, 4]) ** 2 + boxes2[:, 3] / 2 * np.cos(boxes2[:, 4]) ** 2)) ** 2\n return dis\n\n\ndef gaussian_wasserstein_distance(boxes1, boxes2):\n from utils.gaussian_wasserstein_distance import get_element1, get_element4\n\n boxes1 = coordinate_present_convert(boxes1, -1)\n boxes1[:, 4] += 90\n boxes1[:, 4] *= (-np.pi / 180)\n #\n boxes2 = coordinate_present_convert(boxes2, -1)\n boxes2[:, 4] += 90\n boxes2[:, 4] *= (-np.pi / 180)\n\n element1 = get_element1(boxes1[:, 2], boxes1[:, 3], boxes1[:, 4], boxes2[:, 2], boxes2[:, 3], boxes2[:, 4])\n element4 = get_element4(boxes1[:, 2], boxes1[:, 3], boxes1[:, 4], boxes2[:, 2], boxes2[:, 3], boxes2[:, 4])\n dis = (boxes1[:, 0] - boxes2[:, 0])**2 + (boxes1[:, 1] - boxes2[:, 1])**2 + (element1 + element4)\n return dis\n\n\ndef wasserstein_diss_sigma(sigma1, sigma2):\n wasserstein_diss_item2 = tf.linalg.matmul(sigma1, sigma1) + tf.linalg.matmul(sigma2, sigma2) - 2 * tf.linalg.sqrtm(\n tf.linalg.matmul(tf.linalg.matmul(sigma1, tf.linalg.matmul(sigma2, sigma2)), sigma1))\n wasserstein_diss_item2 = tf.linalg.trace(wasserstein_diss_item2)\n return wasserstein_diss_item2\n\n\ndef gwd(boxes1, boxes2):\n x1, y1, w1, h1, theta1 = tf.unstack(boxes1, axis=1)\n x2, y2, w2, h2, theta2 = tf.unstack(boxes2, axis=1)\n x1 = tf.reshape(x1, [-1, 1])\n y1 = tf.reshape(y1, [-1, 1])\n h1 = tf.reshape(h1, [-1, 1])\n w1 = tf.reshape(w1, [-1, 1])\n theta1 = tf.reshape(theta1, [-1, 1])\n x2 = tf.reshape(x2, [-1, 1])\n y2 = tf.reshape(y2, [-1, 1])\n h2 = tf.reshape(h2, [-1, 1])\n w2 = tf.reshape(w2, [-1, 1])\n theta2 = tf.reshape(theta2, [-1, 1])\n theta1 *= (np.pi / 180)\n theta2 *= (np.pi / 180)\n\n sigma1_1 = w1 / 2 * tf.cos(theta1) ** 2 + h1 / 2 * tf.sin(theta1) ** 2\n sigma1_2 = w1 / 2 * tf.sin(theta1) * tf.cos(theta1) - h1 / 2 * tf.sin(theta1) * tf.cos(theta1)\n sigma1_3 = w1 / 2 * tf.sin(theta1) * tf.cos(theta1) - h1 / 2 * tf.sin(theta1) * tf.cos(theta1)\n sigma1_4 = w1 / 2 * tf.sin(theta1) ** 2 + h1 / 2 * tf.cos(theta1) ** 2\n sigma1 = tf.reshape(tf.concat([sigma1_1, sigma1_2, sigma1_3, sigma1_4], axis=-1), [-1, 2, 2])\n\n sigma2_1 = w2 / 2 * tf.cos(theta2) ** 2 + h2 / 2 * tf.sin(theta2) ** 2\n sigma2_2 = w2 / 2 * tf.sin(theta2) * tf.cos(theta2) - h2 / 2 * tf.sin(theta2) * tf.cos(theta2)\n sigma2_3 = w2 / 2 * tf.sin(theta2) * tf.cos(theta2) - h2 / 2 * tf.sin(theta2) * tf.cos(theta2)\n sigma2_4 = w2 / 2 * tf.sin(theta2) ** 2 + h2 / 2 * tf.cos(theta2) ** 2\n sigma2 = tf.reshape(tf.concat([sigma2_1, sigma2_2, sigma2_3, sigma2_4], axis=-1), [-1, 2, 2])\n\n wasserstein_diss_item1 = (x1 - x2) ** 2 + (y1 - y2) ** 2\n wasserstein_diss_item2 = tf.reshape(wasserstein_diss_sigma(sigma1, sigma2), [-1, 1])\n wasserstein_diss = wasserstein_diss_item1 + wasserstein_diss_item2\n return sigma1, sigma2, wasserstein_diss\n\n\ndef KL_divergence(mu1, mu2, mu1_T, mu2_T, sigma1, sigma2):\n sigma1_square = tf.linalg.matmul(sigma1, sigma1)\n sigma2_square = tf.linalg.matmul(sigma2, sigma2)\n item1 = tf.linalg.trace(tf.linalg.matmul(tf.linalg.inv(sigma2_square), sigma1_square))\n item2 = tf.linalg.matmul(tf.linalg.matmul(mu2 - mu1, tf.linalg.inv(sigma2_square)), mu2_T - mu1_T)\n item3 = tf.log(tf.linalg.det(sigma2_square) / tf.linalg.det(sigma1_square))\n item1 = tf.reshape(item1, [-1, ])\n item2 = tf.reshape(item2, [-1, ])\n item3 = tf.reshape(item3, [-1, ])\n return (item1 + item2 + item3 - 2) / 2.\n\n\ndef kl(boxes1, boxes2):\n x1, y1, w1, h1, theta1 = tf.unstack(boxes1, axis=1)\n x2, y2, w2, h2, theta2 = tf.unstack(boxes2, axis=1)\n x1 = tf.reshape(x1, [-1, 1])\n y1 = tf.reshape(y1, [-1, 1])\n h1 = tf.reshape(h1, [-1, 1])\n w1 = tf.reshape(w1, [-1, 1])\n theta1 = tf.reshape(theta1, [-1, 1])\n x2 = tf.reshape(x2, [-1, 1])\n y2 = tf.reshape(y2, [-1, 1])\n h2 = tf.reshape(h2, [-1, 1])\n w2 = tf.reshape(w2, [-1, 1])\n theta2 = tf.reshape(theta2, [-1, 1])\n theta1 *= (np.pi / 180)\n theta2 *= (np.pi / 180)\n\n sigma1_1 = w1 / 2 * tf.cos(theta1) ** 2 + h1 / 2 * tf.sin(theta1) ** 2\n sigma1_2 = w1 / 2 * tf.sin(theta1) * tf.cos(theta1) - h1 / 2 * tf.sin(theta1) * tf.cos(theta1)\n sigma1_3 = w1 / 2 * tf.sin(theta1) * tf.cos(theta1) - h1 / 2 * tf.sin(theta1) * tf.cos(theta1)\n sigma1_4 = w1 / 2 * tf.sin(theta1) ** 2 + h1 / 2 * tf.cos(theta1) ** 2\n sigma1 = tf.reshape(tf.concat([sigma1_1, sigma1_2, sigma1_3, sigma1_4], axis=-1), [-1, 2, 2])\n\n sigma2_1 = w2 / 2 * tf.cos(theta2) ** 2 + h2 / 2 * tf.sin(theta2) ** 2\n sigma2_2 = w2 / 2 * tf.sin(theta2) * tf.cos(theta2) - h2 / 2 * tf.sin(theta2) * tf.cos(theta2)\n sigma2_3 = w2 / 2 * tf.sin(theta2) * tf.cos(theta2) - h2 / 2 * tf.sin(theta2) * tf.cos(theta2)\n sigma2_4 = w2 / 2 * tf.sin(theta2) ** 2 + h2 / 2 * tf.cos(theta2) ** 2\n sigma2 = tf.reshape(tf.concat([sigma2_1, sigma2_2, sigma2_3, sigma2_4], axis=-1), [-1, 2, 2])\n\n mu1 = tf.reshape(tf.concat([x1, y1], axis=-1), [-1, 1, 2])\n mu2 = tf.reshape(tf.concat([x2, y2], axis=-1), [-1, 1, 2])\n\n mu1_T = tf.reshape(tf.concat([x1, y1], axis=-1), [-1, 2, 1])\n mu2_T = tf.reshape(tf.concat([x2, y2], axis=-1), [-1, 2, 1])\n\n KL_distance = tf.reshape(KL_divergence(mu1, mu2, mu1_T, mu2_T, sigma1, sigma2), [-1, 1])\n return sigma1, sigma2, KL_distance\n\n\ndef sigma(a, w, h):\n R = np.array([[np.cos(a), -np.sin(a)], [np.sin(a), np.cos(a)]])\n sig = np.array([[w/2, 0], [0, h/2]])\n res = np.dot(R, sig)\n res = np.dot(res, R.T)\n return res\n\n\nif __name__ == '__main__':\n from utils.gaussian_wasserstein_distance import get_element1, get_element4\n\n boxes1 = np.array([[50, 50, 10, 70, -30],\n [50, 50, 100, 700, -30]], np.float32)\n\n boxes2 = np.array([[10, 40, 10, 70, -30],\n [10, 40, 100, 700, -40]], np.float32)\n\n # boxes1 = np.array([ # prediction box\n # [50, 50, 10, 70, -35], # 90 <--> 180\n # [50, 50, 70, 10, -90.5], # 90 PoA + EoE\n # [50, 50, 70, 10, -90.5], # 180 PoA\n # [50, 50, 40, 40, -35], # 180 w=h\n # ], np.float32)\n #\n # boxes2 = np.array([ # ground truth\n # [50, 50, 70, 10, 55],\n # [50, 50, 10, 70, -0.5],\n # [50, 50, 70, 10, 89.5],\n # [50, 50, 40, 40, 55],\n # ], np.float32)\n\n print('iou', iou_rotate_calculate2(boxes1, boxes2).reshape(-1,)) # [0.9999996 0.9999998 0.9999998 1. ]\n # print(diou_rotate_calculate(boxes1, boxes2).reshape(-1,)) # [0.9999997 0.99999994 0.99999994 1. ]\n # print(gaussian_wasserstein_distance(boxes1, boxes2)) # [6.1035156e-05 3.1062821e-04 3.1062821e-04 0.0000000e+00]\n\n\n # tmp = np.maximum(np.log(gaussian_wasserstein_distance(boxes1, boxes2)+1e-3), 0)\n # print(np.log(gaussian_wasserstein_distance(boxes1, boxes2)))\n # print(tmp/(1+tmp))\n\n # print(np.argsort(iou_rotate_calculate2(boxes1, boxes2).reshape(-1, )*-1))\n # print(np.argsort(diou_rotate_calculate(boxes1, boxes2).reshape(-1, )*-1))\n # print(np.argsort(np.array(gaussian_wasserstein_distance(boxes1, boxes2))))\n #\n # # print(sigma(-np.pi*35/180, 10, 70))\n # # print(sigma(np.pi*(90-35)/180, 70, 10))\n #\n sigma1_tf1, sigma2_tf1, gwd_tf1 = gwd(coordinate_present_convert(boxes1, -1, False), coordinate_present_convert(boxes2, -1, False))\n sigma1_tf2, sigma2_tf2, gwd_tf2 = gwd(boxes1, boxes2)\n sigma1_tf3, sigma2_tf3, kl_tf3 = kl(boxes1, boxes2)\n\n with tf.Session() as sess:\n sigma1_tf_1, sigma2_tf_1, gwd_tf_1 = sess.run([sigma1_tf1, sigma2_tf1, gwd_tf1])\n sigma1_tf_2, sigma2_tf_2, gwd_tf_2 = sess.run([sigma1_tf2, sigma2_tf2, gwd_tf2])\n sigma1_tf_3, sigma2_tf_3, kl_tf_3 = sess.run([sigma1_tf3, sigma2_tf3, kl_tf3])\n # print(sigma1_tf_1)\n # print(sigma2_tf_1)\n # print('**'*10)\n # print(sigma1_tf_2)\n # print(sigma2_tf_2)\n # print('**' * 10)\n # print(sigma1_tf_3)\n # print(sigma2_tf_3)\n print('**' * 10)\n # print(np.reshape(gwd_tf_1, [-1, ]))\n # print(np.argsort(np.reshape(gwd_tf_1, [-1, ])))\n\n print('gwd', np.reshape(gwd_tf_2, [-1, ]))\n # print(np.argsort(np.reshape(gwd_tf_2, [-1, ])))\n\n print('kld', np.reshape(kl_tf_3, [-1, ]))\n # print(np.argsort(np.reshape(kl_tf_3, [-1, ])))\n\n # gwd_tf_2 = np.maximum(np.log(gwd_tf_2 + 1e-3), 0.0)\n # gwd_tf_2_ = np.maximum(np.log(gwd_tf_2 + 1e-3), 0.0)\n # print(gwd_tf_2_/(5+gwd_tf_2_))\n #\n # gwd_tf_2 = np.maximum(np.log(gwd_tf_2 + 1e-3), 0.0)\n # print(gwd_tf_2)\n # print(1-1 / (5 + gwd_tf_2))\n # print(gwd_tf_2_ / (2 + gwd_tf_2_))\n # print(gwd_tf_2_ / (3 + gwd_tf_2_))\n # print(gwd_tf_2_ / (5 + gwd_tf_2_))\n\n\n\n\n\n\n\n",
"# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function, absolute_import\nimport os\nimport tensorflow as tf\nimport math\n\nfrom dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo\n\n\"\"\"\nr3det+gwd (only refine stage) + sqrt tau=2 + data aug. + ms + res152 + 6x + 5*256conv head\n\nmulti-scale+flip\nThis is your result for task 1:\n\nmAP: 0.7787093225719842\nap of each class:\nplane:0.89461770093155,\nbaseball-diamond:0.8236018667686434,\nbridge:0.5021414308765855,\nground-track-field:0.7435082801347662,\nsmall-vehicle:0.7857255649492857,\nlarge-vehicle:0.8382285774947096,\nship:0.8757862302153405,\ntennis-court:0.8987684849725746,\nbasketball-court:0.8508137596719582,\nstorage-tank:0.868271286694195,\nsoccer-ball-field:0.6838162701460044,\nroundabout:0.6370974555832092,\nharbor:0.7542997372314082,\nswimming-pool:0.7673376041149634,\nhelicopter:0.7566255887945696\n\nThe submitted information is :\n\nDescription: RetinaNet_DOTA_R3Det_GWD_6x_20210101_291.6w_ms_f\nUsername: SJTU-Det\nInstitute: SJTU\nEmailadress: [email protected]\nTeamMembers: yangxue\n\nmulti-scale\nThis is your result for task 1:\n\nmAP: 0.781053817845219\nap of each class:\nplane:0.8916637153713546,\nbaseball-diamond:0.8404992598575666,\nbridge:0.514273743753299,\nground-track-field:0.7440553692445901,\nsmall-vehicle:0.7861094590168505,\nlarge-vehicle:0.8431559832540744,\nship:0.8751741786815641,\ntennis-court:0.896638838767339,\nbasketball-court:0.8516147950056526,\nstorage-tank:0.8679971722386121,\nsoccer-ball-field:0.6862493149004335,\nroundabout:0.667533488588698,\nharbor:0.7666314410601642,\nswimming-pool:0.7725622344190313,\nhelicopter:0.7116482735190545\n\nThe submitted information is :\n\nDescription: RetinaNet_DOTA_R3Det_GWD_6x_20210101_356.4w_ms\nUsername: DetectionTeamCSU\nInstitute: CSU\nEmailadress: [email protected]\nTeamMembers: YangXue\n\nmulti-scale + swa5\nThis is your result for task 1:\n\nmAP: 0.7846967729427209\nap of each class: plane:0.8919752020840401, baseball-diamond:0.832536141609128, bridge:0.5465600172547991, ground-track-field:0.7542388549761907, small-vehicle:0.7872751842388821, large-vehicle:0.8453935228446343, ship:0.8750340527454509, tennis-court:0.8908861239826896, basketball-court:0.8525540826454344, storage-tank:0.8675278695981044, soccer-ball-field:0.6893308041623426, roundabout:0.6497655415208251, harbor:0.7654674663658851, swimming-pool:0.7825265696577711, helicopter:0.7393801604546356\nThe submitted information is :\n\nDescription: RetinaNet_DOTA_R3Det_GWD_6x_20210101_356.4w_ms_swa10\nUsername: SJTU-Det\nInstitute: SJTU\nEmailadress: [email protected]\nTeamMembers: yangxue\n\nmulti-scale + swa10\nThis is your result for task 1:\n\nmAP: 0.7851001761526747\nap of each class: plane:0.8926517173170925, baseball-diamond:0.8382546327120208, bridge:0.5351538209043163, ground-track-field:0.7587322787466556, small-vehicle:0.78632256233217, large-vehicle:0.843705577938495, ship:0.8739304239920611, tennis-court:0.8974093700017866, basketball-court:0.8445686866573267, storage-tank:0.8657673140300304, soccer-ball-field:0.6765772398377256, roundabout:0.6677921239841383, harbor:0.7619364299879183, swimming-pool:0.7871213290604916, helicopter:0.7465791347878928\nThe submitted information is :\n\nDescription: RetinaNet_DOTA_R3Det_GWD_6x_20210101_356.4w_ms_swa5\nUsername: liuqingiqng\nInstitute: Central South University\nEmailadress: [email protected]\nTeamMembers: liuqingqing\n\n\nmulti-scale + swa10 + mss\nThis is your result for task 1:\n\nmAP: 0.7876536388287331\nap of each class:\nplane:0.8892676182348125,\nbaseball-diamond:0.8380399764265094,\nbridge:0.5417931344048306,\nground-track-field:0.8047689607468232,\nsmall-vehicle:0.7742709708367945,\nlarge-vehicle:0.8384061252976336,\nship:0.8667803304802113,\ntennis-court:0.8825490700386722,\nbasketball-court:0.849107009356962,\nstorage-tank:0.8635221779812626,\nsoccer-ball-field:0.718231766749811,\nroundabout:0.671254360392042,\nharbor:0.7670048672500374,\nswimming-pool:0.7766585596416251,\nhelicopter:0.7331496545929699\nThe submitted information is :\n\nDescription: RetinaNet_DOTA_R3Det_GWD_6x_20210101_356.4w_ms_swa10_mss\nUsername: SJTU-Det\nInstitute: SJTU\nEmailadress: [email protected]\nTeamMembers: yangxue\n\n\nsingle scale\nThis is your result for task 1:\n\nmAP: 0.7726868436788519\nap of each class:\nplane:0.890554603183652,\nbaseball-diamond:0.8328498635978271,\nbridge:0.5275470846341096,\nground-track-field:0.6972616812234654,\nsmall-vehicle:0.796455271567893,\nlarge-vehicle:0.8374070475478563,\nship:0.8791685072741147,\ntennis-court:0.891880103555982,\nbasketball-court:0.839987919843173,\nstorage-tank:0.8674737684273185,\nsoccer-ball-field:0.6625363589195475,\nroundabout:0.6819144040462316,\nharbor:0.7588559440508911,\nswimming-pool:0.7642584786452707,\nhelicopter:0.6621516186654459\n\nThe submitted information is :\n\nDescription: RetinaNet_DOTA_R3Det_GWD_6x_20210101_356.4w_ss\nUsername: liuqingiqng\nInstitute: Central South University\nEmailadress: [email protected]\nTeamMembers: liuqingqing\n\n\nsingle scale 800-200\nThis is your result for task 1:\n\nmAP: 0.7756815407317015\nap of each class:\nplane:0.8884095794629929,\nbaseball-diamond:0.8370358080077156,\nbridge:0.5295324501460803,\nground-track-field:0.6946597867417675,\nsmall-vehicle:0.7962974069553106,\nlarge-vehicle:0.8384071004202205,\nship:0.8783191492251149,\ntennis-court:0.8973098987336423,\nbasketball-court:0.8499055021679619,\nstorage-tank:0.8671241392841263,\nsoccer-ball-field:0.6872967384407853,\nroundabout:0.682377503221729,\nharbor:0.7577828422591375,\nswimming-pool:0.761615541499773,\nhelicopter:0.6691496644091638\n\nThe submitted information is :\n\nDescription: RetinaNet_DOTA_R3Det_GWD_6x_20210101_356.4w_s_800_200\nUsername: DetectionTeamCSU\nInstitute: CSU\nEmailadress: [email protected]\nTeamMembers: YangXue\n\n\"\"\"\n\n# ------------------------------------------------\nVERSION = 'RetinaNet_DOTA_R3Det_GWD_6x_20210101'\nNET_NAME = 'resnet152_v1d' # 'MobilenetV2'\n\n# ---------------------------------------- System\nROOT_PATH = os.path.abspath('../../')\nprint(20*\"++--\")\nprint(ROOT_PATH)\nGPU_GROUP = \"0,1,2,3,4,5,6,7\"\nNUM_GPU = len(GPU_GROUP.strip().split(','))\nSHOW_TRAIN_INFO_INTE = 20\nSMRY_ITER = 200\nSAVE_WEIGHTS_INTE = 27000 * 6\n\nSUMMARY_PATH = os.path.join(ROOT_PATH, 'output/summary')\nTEST_SAVE_PATH = os.path.join(ROOT_PATH, 'tools/test_result')\n\npretrain_zoo = PretrainModelZoo()\nPRETRAINED_CKPT = pretrain_zoo.pretrain_weight_path(NET_NAME, ROOT_PATH)\nTRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')\nEVALUATE_R_DIR = os.path.join(ROOT_PATH, 'output/evaluate_result_pickle/')\n\n# ------------------------------------------ Train and test\nRESTORE_FROM_RPN = False\nFIXED_BLOCKS = 1 # allow 0~3\nFREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone\nUSE_07_METRIC = True\nADD_BOX_IN_TENSORBOARD = True\n\nMUTILPY_BIAS_GRADIENT = 2.0 # if None, will not multipy\nGRADIENT_CLIPPING_BY_NORM = 10.0 # if None, will not clip\n\nCLS_WEIGHT = 1.0\nREG_WEIGHT = 2.0\n\nBATCH_SIZE = 1\nEPSILON = 1e-5\nMOMENTUM = 0.9\nLR = 1e-3\nDECAY_STEP = [SAVE_WEIGHTS_INTE*12, SAVE_WEIGHTS_INTE*16, SAVE_WEIGHTS_INTE*20]\nMAX_ITERATION = SAVE_WEIGHTS_INTE*20\nWARM_SETP = int(1.0 / 4.0 * SAVE_WEIGHTS_INTE)\n\n# -------------------------------------------- Dataset\nDATASET_NAME = 'DOTA' # 'pascal', 'coco'\nPIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR\nPIXEL_MEAN_ = [0.485, 0.456, 0.406]\nPIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR\nIMG_SHORT_SIDE_LEN = [800, 450, 500, 640, 700, 900, 1000, 1100, 1200]\nIMG_MAX_LENGTH = 1200\nCLASS_NUM = 15\n\nIMG_ROTATE = True\nRGB2GRAY = True\nVERTICAL_FLIP = True\nHORIZONTAL_FLIP = True\nIMAGE_PYRAMID = True\n\n# --------------------------------------------- Network\nSUBNETS_WEIGHTS_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None)\nSUBNETS_BIAS_INITIALIZER = tf.constant_initializer(value=0.0)\nPROBABILITY = 0.01\nFINAL_CONV_BIAS_INITIALIZER = tf.constant_initializer(value=-math.log((1.0 - PROBABILITY) / PROBABILITY))\nWEIGHT_DECAY = 1e-4\nUSE_GN = False\nNUM_SUBNET_CONV = 5\nNUM_REFINE_STAGE = 1\nUSE_RELU = False\nFPN_CHANNEL = 256\nFPN_MODE = 'fpn'\n\n# --------------------------------------------- Anchor\nLEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']\nBASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]\nANCHOR_STRIDE = [8, 16, 32, 64, 128]\nANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]\nANCHOR_RATIOS = [1, 1 / 2, 2., 1 / 3., 3., 5., 1 / 5.]\nANCHOR_ANGLES = [-90, -75, -60, -45, -30, -15]\nANCHOR_SCALE_FACTORS = None\nUSE_CENTER_OFFSET = True\nMETHOD = 'H'\nANGLE_RANGE = 90\n\n# -------------------------------------------- Head\nSHARE_NET = True\nUSE_P5 = True\nIOU_POSITIVE_THRESHOLD = 0.5\nIOU_NEGATIVE_THRESHOLD = 0.4\nREFINE_IOU_POSITIVE_THRESHOLD = [0.6, 0.7]\nREFINE_IOU_NEGATIVE_THRESHOLD = [0.5, 0.6]\n\nNMS = True\nNMS_IOU_THRESHOLD = 0.1\nMAXIMUM_DETECTIONS = 100\nFILTERED_SCORE = 0.05\nVIS_SCORE = 0.4\n\n# -------------------------------------------- GWD\nGWD_TAU = 2.0\nGWD_FUNC = tf.sqrt\n",
"import os\nfrom xml.dom.minidom import Document\nimport numpy as np\nimport copy\nimport cv2\nimport sys\nsys.path.append('../../..')\n\nfrom utils.tools import makedirs\nfrom libs.utils.coordinate_convert import backward_convert\n\n\ndef save_to_xml(save_path, im_height, im_width, objects_axis, label_name):\n im_depth = 0\n object_num = len(objects_axis)\n doc = Document()\n\n annotation = doc.createElement('annotation')\n doc.appendChild(annotation)\n\n folder = doc.createElement('folder')\n folder_name = doc.createTextNode('VOC2007')\n folder.appendChild(folder_name)\n annotation.appendChild(folder)\n\n filename = doc.createElement('filename')\n filename_name = doc.createTextNode('000024.jpg')\n filename.appendChild(filename_name)\n annotation.appendChild(filename)\n\n source = doc.createElement('source')\n annotation.appendChild(source)\n\n database = doc.createElement('database')\n database.appendChild(doc.createTextNode('The VOC2007 Database'))\n source.appendChild(database)\n\n annotation_s = doc.createElement('annotation')\n annotation_s.appendChild(doc.createTextNode('PASCAL VOC2007'))\n source.appendChild(annotation_s)\n\n image = doc.createElement('image')\n image.appendChild(doc.createTextNode('flickr'))\n source.appendChild(image)\n\n flickrid = doc.createElement('flickrid')\n flickrid.appendChild(doc.createTextNode('322409915'))\n source.appendChild(flickrid)\n\n owner = doc.createElement('owner')\n annotation.appendChild(owner)\n\n flickrid_o = doc.createElement('flickrid')\n flickrid_o.appendChild(doc.createTextNode('knautia'))\n owner.appendChild(flickrid_o)\n\n name_o = doc.createElement('name')\n name_o.appendChild(doc.createTextNode('yang'))\n owner.appendChild(name_o)\n\n size = doc.createElement('size')\n annotation.appendChild(size)\n width = doc.createElement('width')\n width.appendChild(doc.createTextNode(str(im_width)))\n height = doc.createElement('height')\n height.appendChild(doc.createTextNode(str(im_height)))\n depth = doc.createElement('depth')\n depth.appendChild(doc.createTextNode(str(im_depth)))\n size.appendChild(width)\n size.appendChild(height)\n size.appendChild(depth)\n segmented = doc.createElement('segmented')\n segmented.appendChild(doc.createTextNode('0'))\n annotation.appendChild(segmented)\n for i in range(object_num):\n objects = doc.createElement('object')\n annotation.appendChild(objects)\n object_name = doc.createElement('name')\n object_name.appendChild(doc.createTextNode(label_name[int(objects_axis[i][-1])]))\n objects.appendChild(object_name)\n pose = doc.createElement('pose')\n pose.appendChild(doc.createTextNode('Unspecified'))\n objects.appendChild(pose)\n truncated = doc.createElement('truncated')\n truncated.appendChild(doc.createTextNode('1'))\n objects.appendChild(truncated)\n difficult = doc.createElement('difficult')\n difficult.appendChild(doc.createTextNode('0'))\n objects.appendChild(difficult)\n bndbox = doc.createElement('bndbox')\n objects.appendChild(bndbox)\n\n x0 = doc.createElement('x0')\n x0.appendChild(doc.createTextNode(str((objects_axis[i][0]))))\n bndbox.appendChild(x0)\n y0 = doc.createElement('y0')\n y0.appendChild(doc.createTextNode(str((objects_axis[i][1]))))\n bndbox.appendChild(y0)\n\n x1 = doc.createElement('x1')\n x1.appendChild(doc.createTextNode(str((objects_axis[i][2]))))\n bndbox.appendChild(x1)\n y1 = doc.createElement('y1')\n y1.appendChild(doc.createTextNode(str((objects_axis[i][3]))))\n bndbox.appendChild(y1)\n\n x2 = doc.createElement('x2')\n x2.appendChild(doc.createTextNode(str((objects_axis[i][4]))))\n bndbox.appendChild(x2)\n y2 = doc.createElement('y2')\n y2.appendChild(doc.createTextNode(str((objects_axis[i][5]))))\n bndbox.appendChild(y2)\n\n x3 = doc.createElement('x3')\n x3.appendChild(doc.createTextNode(str((objects_axis[i][6]))))\n bndbox.appendChild(x3)\n y3 = doc.createElement('y3')\n y3.appendChild(doc.createTextNode(str((objects_axis[i][7]))))\n bndbox.appendChild(y3)\n\n f = open(save_path, 'w')\n f.write(doc.toprettyxml(indent=''))\n f.close()\n\n\nclass_list = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field',\n 'small-vehicle', 'large-vehicle', 'ship',\n 'tennis-court', 'basketball-court',\n 'storage-tank', 'soccer-ball-field',\n 'roundabout', 'harbor', 'swimming-pool', 'helicopter',\n 'container-crane',\n 'airport', 'helipad']\n\n\ndef format_label(txt_list):\n format_data = []\n for i in txt_list:\n if len(i.split(' ')) < 9:\n continue\n if 'turntable' in i:\n i = i.replace('turntable', 'roundabout')\n # if i.split(' ')[-1].split('\\n')[0] == '1':\n # print(i)\n # continue\n format_data.append(\n [float(xy) for xy in i.split(' ')[:8]] + [class_list.index(i.split(' ')[8])]\n )\n\n if i.split(' ')[8] not in class_list:\n print('warning found a new label :', i.split(' ')[8])\n exit()\n return np.array(format_data)\n\n\ndef clip_image(file_idx, image, boxes_all, width, height, stride_w, stride_h):\n min_pixel = 2\n print(file_idx)\n boxes_all_5 = backward_convert(boxes_all[:, :8], False)\n print(boxes_all[np.logical_or(boxes_all_5[:, 2] <= min_pixel, boxes_all_5[:, 3] <= min_pixel), :])\n boxes_all = boxes_all[np.logical_and(boxes_all_5[:, 2] > min_pixel, boxes_all_5[:, 3] > min_pixel), :]\n\n if boxes_all.shape[0] > 0:\n shape = image.shape\n for start_h in range(0, shape[0], stride_h):\n for start_w in range(0, shape[1], stride_w):\n boxes = copy.deepcopy(boxes_all)\n box = np.zeros_like(boxes_all)\n start_h_new = start_h\n start_w_new = start_w\n if start_h + height > shape[0]:\n start_h_new = shape[0] - height\n if start_w + width > shape[1]:\n start_w_new = shape[1] - width\n top_left_row = max(start_h_new, 0)\n top_left_col = max(start_w_new, 0)\n bottom_right_row = min(start_h + height, shape[0])\n bottom_right_col = min(start_w + width, shape[1])\n\n subImage = image[top_left_row:bottom_right_row, top_left_col: bottom_right_col]\n\n box[:, 0] = boxes[:, 0] - top_left_col\n box[:, 2] = boxes[:, 2] - top_left_col\n box[:, 4] = boxes[:, 4] - top_left_col\n box[:, 6] = boxes[:, 6] - top_left_col\n\n box[:, 1] = boxes[:, 1] - top_left_row\n box[:, 3] = boxes[:, 3] - top_left_row\n box[:, 5] = boxes[:, 5] - top_left_row\n box[:, 7] = boxes[:, 7] - top_left_row\n box[:, 8] = boxes[:, 8]\n center_y = 0.25 * (box[:, 1] + box[:, 3] + box[:, 5] + box[:, 7])\n center_x = 0.25 * (box[:, 0] + box[:, 2] + box[:, 4] + box[:, 6])\n\n cond1 = np.intersect1d(np.where(center_y[:] >= 0)[0], np.where(center_x[:] >= 0)[0])\n cond2 = np.intersect1d(np.where(center_y[:] <= (bottom_right_row - top_left_row))[0],\n np.where(center_x[:] <= (bottom_right_col - top_left_col))[0])\n idx = np.intersect1d(cond1, cond2)\n if len(idx) > 0 and (subImage.shape[0] > 5 and subImage.shape[1] > 5):\n makedirs(os.path.join(save_dir, 'images'))\n img = os.path.join(save_dir, 'images',\n \"%s_%04d_%04d.png\" % (file_idx, top_left_row, top_left_col))\n cv2.imwrite(img, subImage)\n\n makedirs(os.path.join(save_dir, 'labeltxt'))\n xml = os.path.join(save_dir, 'labeltxt',\n \"%s_%04d_%04d.xml\" % (file_idx, top_left_row, top_left_col))\n save_to_xml(xml, subImage.shape[0], subImage.shape[1], box[idx, :], class_list)\n\n\nprint('class_list', len(class_list))\nraw_data = '/data/dataset/DOTA/val/'\nraw_images_dir = os.path.join(raw_data, 'images', 'images')\nraw_label_dir = os.path.join(raw_data, 'labelTxt', 'labelTxt')\n\nsave_dir = '/data/dataset/DOTA/DOTA1.0/trainval/'\n\nimages = [i for i in os.listdir(raw_images_dir) if 'png' in i]\nlabels = [i for i in os.listdir(raw_label_dir) if 'txt' in i]\n\nprint('find image', len(images))\nprint('find label', len(labels))\n\nmin_length = 1e10\nmax_length = 1\n\nimg_h, img_w, stride_h, stride_w = 600, 600, 450, 450\n\nfor idx, img in enumerate(images):\n print(idx, 'read image', img)\n img_data = cv2.imread(os.path.join(raw_images_dir, img))\n\n txt_data = open(os.path.join(raw_label_dir, img.replace('png', 'txt')), 'r').readlines()\n box = format_label(txt_data)\n\n if box.shape[0] > 0:\n clip_image(img.strip('.png'), img_data, box, img_w, img_h, stride_w, stride_h)\n",
"# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\n\nfrom libs.configs._base_.models.retinanet_r50_fpn import *\nfrom libs.configs._base_.datasets.dota_detection import *\nfrom libs.configs._base_.schedules.schedule_1x import *\nfrom dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo\n\n# schedule\nBATCH_SIZE = 1\nGPU_GROUP = \"0,1,2\"\nNUM_GPU = len(GPU_GROUP.strip().split(','))\nSAVE_WEIGHTS_INTE = 40000 * 2\nDECAY_STEP = np.array(DECAY_EPOCH, np.int32) * SAVE_WEIGHTS_INTE\nMAX_ITERATION = SAVE_WEIGHTS_INTE * MAX_EPOCH\nWARM_SETP = int(WARM_EPOCH * SAVE_WEIGHTS_INTE)\n\n# dataset\nDATASET_NAME = 'DOTA2.0'\nCLASS_NUM = 18\n\n# model\n# backbone\npretrain_zoo = PretrainModelZoo()\nPRETRAINED_CKPT = pretrain_zoo.pretrain_weight_path(NET_NAME, ROOT_PATH)\nTRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')\n\n# bbox head\nANGLE_RANGE = 180\n\n# loss\nCLS_WEIGHT = 1.0\nREG_WEIGHT = 1.0\nANGLE_WEIGHT = 0.5\n\n# DCL\nOMEGA = 180 / 256. \nANGLE_MODE = 0 # {0: BCL, 1: GCL}\n\nVERSION = 'RetinaNet_DOTA2.0_DCL_B_2x_20210430'\n\n\"\"\"\nFLOPs: 877875705; Trainable params: 33486966\nThis is your evaluation result for task 1:\n\n mAP: 0.4545546886264481\n ap of each class:\n plane:0.7576838360163581,\n baseball-diamond:0.48071214438681525,\n bridge:0.367934070781644,\n ground-track-field:0.5815236628223699,\n small-vehicle:0.34513208572635395,\n large-vehicle:0.36598807625753177,\n ship:0.46939020389816366,\n tennis-court:0.7545404991770102,\n basketball-court:0.5731304185966594,\n storage-tank:0.5011136945493068,\n soccer-ball-field:0.4053102096300879,\n roundabout:0.49786355787385084,\n harbor:0.35938351489137554,\n swimming-pool:0.5031132619574917,\n helicopter:0.5421417282441151,\n container-crane:0.12772487037593397,\n airport:0.45636926716170634,\n helipad:0.09292929292929293\n\nThe submitted information is :\n\nDescription: RetinaNet_DOTA2.0_DCL_B_2x_20210430_104w\nUsername: sjtu-deter\nInstitute: SJTU\nEmailadress: [email protected]\nTeamMembers: yangxue\n\"\"\"\n\n",
"# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\n\nfrom libs.configs._base_.models.retinanet_r50_fpn import *\nfrom libs.configs._base_.datasets.dota_detection import *\nfrom libs.configs._base_.schedules.schedule_1x import *\nfrom dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo\n\n# schedule\nBATCH_SIZE = 1\nGPU_GROUP = \"0\"\nNUM_GPU = len(GPU_GROUP.strip().split(','))\nSAVE_WEIGHTS_INTE = 20673\nDECAY_STEP = np.array(DECAY_EPOCH, np.int32) * SAVE_WEIGHTS_INTE\nMAX_ITERATION = SAVE_WEIGHTS_INTE * MAX_EPOCH\nWARM_SETP = int(WARM_EPOCH * SAVE_WEIGHTS_INTE)\n\n# dataset\nDATASET_NAME = 'DOTATrain'\n\n# model\n# backbone\npretrain_zoo = PretrainModelZoo()\nPRETRAINED_CKPT = pretrain_zoo.pretrain_weight_path(NET_NAME, ROOT_PATH)\nTRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')\n\n# bbox head\nANCHOR_SCALES = [2 ** 0]\nANCHOR_RATIOS = [1.]\n\n# loss\nCLS_WEIGHT = 1.0\nREG_WEIGHT = 1.0 / 5.0\nREG_LOSS_MODE = None\n\nVERSION = 'RetinaNet_DOTA_ATSS_1x_20210901'\n\n\"\"\"\nRetinaNet-H + atss\nFLOPs: 468318345; Trainable params: 32080916\n\n\"\"\"\n\n\n\n",
"# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\n\nfrom libs.configs._base_.models.retinanet_r50_fpn import *\nfrom libs.configs._base_.datasets.dota_detection import *\nfrom libs.configs._base_.schedules.schedule_1x import *\nfrom dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo\n\n# schedule\nBATCH_SIZE = 1\nGPU_GROUP = \"0,1,2\"\nNUM_GPU = len(GPU_GROUP.strip().split(','))\nSAVE_WEIGHTS_INTE = 32000 * 2\nDECAY_STEP = np.array(DECAY_EPOCH, np.int32) * SAVE_WEIGHTS_INTE\nMAX_ITERATION = SAVE_WEIGHTS_INTE * MAX_EPOCH\nWARM_SETP = int(WARM_EPOCH * SAVE_WEIGHTS_INTE)\n\n# dataset\nDATASET_NAME = 'DOTA1.5'\nCLASS_NUM = 16\n\n# model\n# backbone\npretrain_zoo = PretrainModelZoo()\nPRETRAINED_CKPT = pretrain_zoo.pretrain_weight_path(NET_NAME, ROOT_PATH)\nTRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')\n\n# bbox head\nANGLE_RANGE = 180\n\n# loss\nCLS_WEIGHT = 1.0\nREG_WEIGHT = 1.0 / 5.0\nREG_LOSS_MODE = None\n\nVERSION = 'RetinaNet_DOTA1.5_2x_20210503'\n\n\"\"\"\nretinanet-180\nFLOPs: 862193566; Trainable params: 33051321\nThis is your evaluation result for task 1:\n\n mAP: 0.5610401956363628\n ap of each class:\n plane:0.7875460889719075,\n baseball-diamond:0.7387149439907101,\n bridge:0.37805634835169255,\n ground-track-field:0.6027540626851335,\n small-vehicle:0.39196873401154814,\n large-vehicle:0.44856173941511396,\n ship:0.6361687880707679,\n tennis-court:0.8540536580109865,\n basketball-court:0.7361866563787661,\n storage-tank:0.5859551738959604,\n soccer-ball-field:0.4861815416240239,\n roundabout:0.6460535463270851,\n harbor:0.4854161897067069,\n swimming-pool:0.6303611675387102,\n helicopter:0.4669898500543662,\n container-crane:0.10167464114832536\n\nThe submitted information is :\n\nDescription: RetinaNet_DOTA1.5_2x_20210503_83.2w\nUsername: AICyber\nInstitute: IECAS\nEmailadress: [email protected]\nTeamMembers: Yang Xue; Yang Jirui\n\"\"\"\n\n",
"# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function, absolute_import\nimport os\nimport tensorflow as tf\nimport math\n\nfrom dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo\n\n\"\"\"\nRetinaNet-H + kl + log + tau=1 + data aug. + ms + res152 + 6x + 5*conv + mss\nFLOPs: 1731833915; Trainable params: 68720548\n\"\"\"\n\n# ------------------------------------------------\nVERSION = 'RetinaNet_DOTA_KL_6x_20210320'\nNET_NAME = 'resnet152_v1d' # 'MobilenetV2'\n\n# ---------------------------------------- System\nROOT_PATH = os.path.abspath('../../')\nprint(20*\"++--\")\nprint(ROOT_PATH)\nGPU_GROUP = \"0,1,2\"\nNUM_GPU = len(GPU_GROUP.strip().split(','))\nSHOW_TRAIN_INFO_INTE = 20\nSMRY_ITER = 200\nSAVE_WEIGHTS_INTE = 27000 * 6\n\nSUMMARY_PATH = os.path.join(ROOT_PATH, 'output/summary')\nTEST_SAVE_PATH = os.path.join(ROOT_PATH, 'tools/test_result')\n\npretrain_zoo = PretrainModelZoo()\nPRETRAINED_CKPT = pretrain_zoo.pretrain_weight_path(NET_NAME, ROOT_PATH)\nTRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')\nEVALUATE_R_DIR = os.path.join(ROOT_PATH, 'output/evaluate_result_pickle/')\n\n# ------------------------------------------ Train and Test\nRESTORE_FROM_RPN = False\nFIXED_BLOCKS = 1 # allow 0~3\nFREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone\nUSE_07_METRIC = True\nADD_BOX_IN_TENSORBOARD = True\n\nMUTILPY_BIAS_GRADIENT = 2.0 # if None, will not multipy\nGRADIENT_CLIPPING_BY_NORM = 10.0 # if None, will not clip\n\nCLS_WEIGHT = 1.0\nREG_WEIGHT = 2.0\nREG_LOSS_MODE = 3\nALPHA = 1.0\nBETA = 1.0\n\nBATCH_SIZE = 1\nEPSILON = 1e-5\nMOMENTUM = 0.9\nLR = 1e-3\nDECAY_STEP = [SAVE_WEIGHTS_INTE*12, SAVE_WEIGHTS_INTE*16, SAVE_WEIGHTS_INTE*20]\nMAX_ITERATION = SAVE_WEIGHTS_INTE*20\nWARM_SETP = int(1.0 / 8.0 * SAVE_WEIGHTS_INTE)\n\n# -------------------------------------------- Dataset\nDATASET_NAME = 'DOTA' # 'pascal', 'coco'\nPIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR\nPIXEL_MEAN_ = [0.485, 0.456, 0.406]\nPIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR\nIMG_SHORT_SIDE_LEN = [800, 450, 500, 640, 700, 900, 1000, 1100, 1200]\nIMG_MAX_LENGTH = 1200\nCLASS_NUM = 15\n\nIMG_ROTATE = True\nRGB2GRAY = True\nVERTICAL_FLIP = True\nHORIZONTAL_FLIP = True\nIMAGE_PYRAMID = True\n\n# --------------------------------------------- Network\nSUBNETS_WEIGHTS_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None)\nSUBNETS_BIAS_INITIALIZER = tf.constant_initializer(value=0.0)\nPROBABILITY = 0.01\nFINAL_CONV_BIAS_INITIALIZER = tf.constant_initializer(value=-math.log((1.0 - PROBABILITY) / PROBABILITY))\nWEIGHT_DECAY = 1e-4\nUSE_GN = False\nFPN_CHANNEL = 256\nNUM_SUBNET_CONV = 5\nFPN_MODE = 'fpn'\n\n# --------------------------------------------- Anchor\nLEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']\nBASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]\nANCHOR_STRIDE = [8, 16, 32, 64, 128]\nANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]\nANCHOR_RATIOS = [1, 1 / 2, 2., 1 / 3., 3., 5., 1 / 5.]\nANCHOR_ANGLES = [-90, -75, -60, -45, -30, -15]\nANCHOR_SCALE_FACTORS = None\nUSE_CENTER_OFFSET = True\nMETHOD = 'H'\nUSE_ANGLE_COND = False\nANGLE_RANGE = 90 # or 180\n\n# -------------------------------------------- Head\nSHARE_NET = True\nUSE_P5 = True\nIOU_POSITIVE_THRESHOLD = 0.5\nIOU_NEGATIVE_THRESHOLD = 0.4\n\nNMS = True\nNMS_IOU_THRESHOLD = 0.1\nMAXIMUM_DETECTIONS = 100\nFILTERED_SCORE = 0.05\nVIS_SCORE = 0.4\n\n# -------------------------------------------- KLD\nKL_TAU = 1.0\nKL_FUNC = 1\n",
"# -*- coding: utf-8 -*-\n# Author: Qi Ming <[email protected]>\n# Xue Yang <[email protected]>\n#\n# License: Apache-2.0 license\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport numpy as np\nfrom scipy.optimize import linear_sum_assignment\n\nfrom libs.utils import bbox_transform\nfrom libs.models.losses.losses import Loss\nfrom utils.order_points import re_order\n\n\nclass LossRIDet(Loss):\n\n def smooth_l1_loss_quad(self, targets, preds, sigma=3.0):\n sigma_squared = sigma ** 2\n\n # compute smooth L1 loss\n # f(x) = 0.5 * (sigma * x)^2 if |x| < 1 / sigma / sigma\n # |x| - 0.5 / sigma / sigma otherwise\n regression_diff = preds - targets\n regression_diff = tf.abs(regression_diff)\n\n regression_loss = tf.where(\n tf.less(regression_diff, 1.0 / sigma_squared),\n 0.5 * sigma_squared * tf.pow(regression_diff, 2),\n regression_diff - 0.5 / sigma_squared\n )\n return regression_loss\n\n def linear_sum_assignment_np(self, losses):\n\n indices = []\n for cnt, loss in enumerate(losses):\n # loss [4, 4], row_ind [4, ], col_ind [4, ]\n row_ind, col_ind = linear_sum_assignment(loss)\n # [4, 3]\n indices.append(np.concatenate(\n [np.ones_like(row_ind.reshape([-1, 1])) * cnt, row_ind.reshape([-1, 1]), col_ind.reshape([-1, 1])],\n axis=1))\n # [-1, 4, 3]\n return np.array(indices, np.int32)\n\n def hungarian_loss_quad(self, targets, preds, anchor_state, anchors):\n targets = tf.reshape(targets[:, :-1], [-1, 8])\n\n indices = tf.reshape(tf.where(tf.equal(anchor_state, 1)), [-1, ])\n preds = tf.gather(preds, indices)\n targets = tf.gather(targets, indices)\n anchors = tf.gather(anchors, indices)\n\n # change from delta to abslote data\n if self.cfgs.METHOD == 'H':\n x_c = (anchors[:, 2] + anchors[:, 0]) / 2\n y_c = (anchors[:, 3] + anchors[:, 1]) / 2\n w = anchors[:, 2] - anchors[:, 0] + 1\n h = anchors[:, 3] - anchors[:, 1] + 1\n # theta = -90 * tf.ones_like(x_c)\n anchors = tf.transpose(tf.stack([x_c, y_c, w, h]))\n\n preds = bbox_transform.qbbox_transform_inv(boxes=anchors, deltas=preds)\n\n preds = tf.reshape(preds, [-1, 4, 2])\n targets = tf.reshape(targets, [-1, 4, 2])\n # [-1, 4, 4]\n cost_list = [tf.reduce_sum(self.smooth_l1_loss_quad(preds, tf.tile(tf.expand_dims(targets[:, i, :], axis=1), [1, 4, 1])), axis=2) for i in range(4)]\n cost = tf.concat(cost_list, axis=1)\n cost = tf.reshape(cost, [-1, 4, 4])\n\n indices = tf.py_func(self.linear_sum_assignment_np, inp=[cost], Tout=tf.int32)\n indices = tf.reshape(indices, [-1, 4, 3])\n loss = tf.reduce_sum(tf.gather_nd(cost, indices), axis=1)\n\n # prepare for normalization\n normalizer = tf.stop_gradient(tf.where(tf.equal(anchor_state, 1)))\n normalizer = tf.cast(tf.shape(normalizer)[0], tf.float32)\n normalizer = tf.maximum(1.0, normalizer)\n\n loss = tf.reduce_sum(loss) / normalizer\n\n return loss\n\n def hungarian_loss_arbitrary_shaped(self, targets, preds, anchor_state, anchors):\n targets = tf.reshape(targets[:, :-1], [-1, self.cfgs.POINT_SAMPLING_NUM * 2])\n\n indices = tf.reshape(tf.where(tf.equal(anchor_state, 1)), [-1, ])\n preds = tf.gather(preds, indices)\n targets = tf.gather(targets, indices)\n anchors = tf.gather(anchors, indices)\n\n # targets = tf.py_func(box_sample.rbox_border_sample,\n # inp=[targets],\n # Tout=tf.float32)\n\n # targets = tf.py_func(mask_sample.mask_sampling,\n # inp=[tf.reshape(targets, [-1, 4, 2]), self.cfgs.POINT_SAMPLING_NUM],\n # Tout=tf.float32)\n\n # change from delta to abslote data\n if self.cfgs.METHOD == 'H':\n x_c = (anchors[:, 2] + anchors[:, 0]) / 2\n y_c = (anchors[:, 3] + anchors[:, 1]) / 2\n w = anchors[:, 2] - anchors[:, 0] + 1\n h = anchors[:, 3] - anchors[:, 1] + 1\n # theta = -90 * tf.ones_like(x_c)\n anchors = tf.transpose(tf.stack([x_c, y_c, w, h]))\n\n preds = bbox_transform.poly_transform_inv(boxes=anchors, deltas=preds, point_num=self.cfgs.POINT_SAMPLING_NUM)\n\n preds = tf.reshape(preds, [-1, self.cfgs.POINT_SAMPLING_NUM, 2])\n targets = tf.reshape(targets, [-1, self.cfgs.POINT_SAMPLING_NUM, 2])\n # [-1, self.cfgs.POINT_SAMPLING_NUM, self.cfgs.POINT_SAMPLING_NUM]\n cost_list = [tf.reduce_sum(self.smooth_l1_loss_quad(preds, tf.tile(tf.expand_dims(targets[:, i, :], axis=1),\n [1, self.cfgs.POINT_SAMPLING_NUM, 1])),\n axis=2) for i in range(self.cfgs.POINT_SAMPLING_NUM)]\n\n cost = tf.concat(cost_list, axis=1)\n cost = tf.reshape(cost, [-1, self.cfgs.POINT_SAMPLING_NUM, self.cfgs.POINT_SAMPLING_NUM])\n\n indices = tf.py_func(self.linear_sum_assignment_np, inp=[cost], Tout=tf.int32)\n indices = tf.reshape(indices, [-1, self.cfgs.POINT_SAMPLING_NUM, 3])\n loss = tf.reduce_sum(tf.gather_nd(cost, indices), axis=1)\n\n # prepare for normalization\n normalizer = tf.stop_gradient(tf.where(tf.equal(anchor_state, 1)))\n normalizer = tf.cast(tf.shape(normalizer)[0], tf.float32)\n normalizer = tf.maximum(1.0, normalizer)\n\n loss = tf.reduce_sum(loss) / normalizer\n\n return loss\n\n",
"# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\n\nfrom libs.configs._base_.models.retinanet_r50_fpn import *\nfrom libs.configs._base_.datasets.dota_detection import *\nfrom libs.configs._base_.schedules.schedule_1x import *\nfrom dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo\n\n# schedule\nBATCH_SIZE = 1\nGPU_GROUP = '0'\nNUM_GPU = len(GPU_GROUP.strip().split(','))\nLR = 1e-3\nSAVE_WEIGHTS_INTE = 27000\nDECAY_STEP = np.array(DECAY_EPOCH, np.int32) * SAVE_WEIGHTS_INTE\nMAX_ITERATION = SAVE_WEIGHTS_INTE * MAX_EPOCH\nWARM_EPOCH = 1. / 8.\nWARM_SETP = int(WARM_EPOCH * SAVE_WEIGHTS_INTE)\n\n# dataset\n\n# model\npretrain_zoo = PretrainModelZoo()\nPRETRAINED_CKPT = pretrain_zoo.pretrain_weight_path(NET_NAME, ROOT_PATH)\nTRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')\n\n# loss\nCLS_WEIGHT = 1.0\nREG_WEIGHT = 10.0\nREG_LOSS_MODE = 4 # ProbIoU\n\nVERSION = 'RetinaNet_DOTA_PROBIOU_1x_20210619'\n\n\"\"\"\nProbIoU\nFLOPs: 484911745; Trainable params: 33002916\nThis is your result for task 1:\n\nmAP: 0.6699477920098148\nap of each class: plane:0.8839667840781353, baseball-diamond:0.6914718502944607, bridge:0.41256920258313484, ground-track-field:0.6475098459337166, small-vehicle:0.7431085118997922, large-vehicle:0.6649908934311772, ship:0.8010632589243115, tennis-court:0.8983652865397685, basketball-court:0.7704836125997283, storage-tank:0.7808487163560258, soccer-ball-field:0.487485991493025, roundabout:0.59191194379331, harbor:0.5724836001531287, swimming-pool:0.6315901674107062, helicopter:0.4713672146567996\nThe submitted information is :\n\nDescription: RetinaNet_DOTA_PROBIOU_1x_20210619_35.1w\nUsername: SJTU-Det\nInstitute: SJTU\nEmailadress: [email protected]\nTeamMembers: yangxue\n\"\"\"",
"# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\n\nfrom libs.configs._base_.models.retinanet_r50_fpn import *\nfrom libs.configs._base_.datasets.dota_detection import *\nfrom libs.configs._base_.schedules.schedule_1x import *\nfrom dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo\n\n# schedule\nBATCH_SIZE = 1\nGPU_GROUP = \"0,1\"\nNUM_GPU = len(GPU_GROUP.strip().split(','))\nSAVE_WEIGHTS_INTE = 40000 * 2\nDECAY_STEP = np.array(DECAY_EPOCH, np.int32) * SAVE_WEIGHTS_INTE\nMAX_ITERATION = SAVE_WEIGHTS_INTE * MAX_EPOCH\nWARM_SETP = int(WARM_EPOCH * SAVE_WEIGHTS_INTE)\n\n# dataset\nDATASET_NAME = 'DOTA2.0'\nCLASS_NUM = 18\n\n# model\n# backbone\npretrain_zoo = PretrainModelZoo()\nPRETRAINED_CKPT = pretrain_zoo.pretrain_weight_path(NET_NAME, ROOT_PATH)\nTRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')\n\n# loss\nCLS_WEIGHT = 1.0\nREG_WEIGHT = 1.0 / 5.0\nREG_LOSS_MODE = 1 # IoU-Smooth L1\n\nVERSION = 'RetinaNet_DOTA2.0_2x_20210427'\n\n\"\"\"\nRetinaNet-H + IoU-Smooth L1\nFLOPs: 676601855; Trainable params: 33148131\nThis is your evaluation result for task 1:\n\n mAP: 0.46306689567660153\n ap of each class:\n plane:0.7754947865172579,\n baseball-diamond:0.46434700034028786,\n bridge:0.39599890161126844,\n ground-track-field:0.5841579611023893,\n small-vehicle:0.347077308109511,\n large-vehicle:0.36984795081268984,\n ship:0.47937500010188855,\n tennis-court:0.7713614052919219,\n basketball-court:0.5712051108571043,\n storage-tank:0.5361292205829321,\n soccer-ball-field:0.4011674247207251,\n roundabout:0.5077467772636136,\n harbor:0.4002952079323899,\n swimming-pool:0.5416185098631832,\n helicopter:0.5162148043881953,\n container-crane:0.13870523708490112,\n airport:0.4057909924349168,\n helipad:0.1286705231636533\n\nThe submitted information is :\n\nDescription: RetinaNet_DOTA2.0_2x_20210427_104w\nUsername: yangxue\nInstitute: UCAS\nEmailadress: [email protected]\nTeamMembers: yangxue\n\"\"\"\n\n\n",
"# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\n\nfrom libs.configs._base_.models.retinanet_r50_fpn import *\nfrom libs.configs._base_.datasets.dota_detection import *\nfrom libs.configs._base_.schedules.schedule_1x import *\nfrom dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo\n\n# schedule\nBATCH_SIZE = 1\nGPU_GROUP = \"0\"\nNUM_GPU = len(GPU_GROUP.strip().split(','))\nLR = 1e-3 * BATCH_SIZE * NUM_GPU\nSAVE_WEIGHTS_INTE = 20673\nDECAY_STEP = np.array(DECAY_EPOCH, np.int32) * SAVE_WEIGHTS_INTE\nMAX_ITERATION = SAVE_WEIGHTS_INTE * MAX_EPOCH\nWARM_SETP = int(WARM_EPOCH * SAVE_WEIGHTS_INTE)\n\n# dataset\nDATASET_NAME = 'DOTATrain'\n\n# model\n# backbone\npretrain_zoo = PretrainModelZoo()\nPRETRAINED_CKPT = pretrain_zoo.pretrain_weight_path(NET_NAME, ROOT_PATH)\nTRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')\n\n# loss\nCLS_WEIGHT = 1.0\nREG_WEIGHT = 1.0\nCTR_WEIGHT = 1.0\nREG_LOSS_MODE = None\n\nVERSION = 'FCOS_DOTA_1x_20210616'\n\n\"\"\"\nFCOS\nFLOPs: 468484100; Trainable params: 32090136\nAP50:95: [0.657299425287604, 0.6241752209258004, 0.5771224126904312, 0.5226585606820188, 0.44830279891608976,\n 0.3569762828353407, 0.2545520110329005, 0.1552287494963553, 0.057532759028607015, 0.008582559012697001]\nmmAP: 0.3662430779907845\n++--++--++--++--++--++--++--++--++--++--++--++--++--++--++--++--++--++--++--++--\n{'0.5': {'plane': 0.8851570639970467, 'baseball-diamond': 0.6306127210807415, 'bridge': 0.4177832468389531, 'ground-track-field': 0.4931685224392381, 'small-vehicle': 0.6027799633246981, 'large-vehicle': 0.6661781662930775, 'ship': 0.7641297116105128, 'tennis-court': 0.8953644232693438, 'basketball-court': 0.6835566610500415, 'storage-tank': 0.8615740936231205, 'soccer-ball-field': 0.6519606425916767, 'roundabout': 0.669075001875759, 'harbor': 0.6050080530943827, 'swimming-pool': 0.5740066155848368, 'helicopter': 0.45913649264063144, 'mAP': 0.657299425287604}, '0.55': {'plane': 0.8773401921080234, 'baseball-diamond': 0.5882832150156297, 'bridge': 0.36601623649065135, 'ground-track-field': 0.4644889853998973, 'small-vehicle': 0.5821353796788801, 'large-vehicle': 0.6108299657875689, 'ship': 0.7533977349215739, 'tennis-court': 0.8935957870108021, 'basketball-court': 0.6167167532317788, 'storage-tank': 0.8499283753040496, 'soccer-ball-field': 0.6450199555282644, 'roundabout': 0.631729621797236, 'harbor': 0.5124942122847805, 'swimming-pool': 0.530493954219431, 'helicopter': 0.4401579451084402, 'mAP': 0.6241752209258004}, '0.6': {'plane': 0.8343469168872588, 'baseball-diamond': 0.47998571996205536, 'bridge': 0.2949871445251151, 'ground-track-field': 0.429248807073207, 'small-vehicle': 0.5451808201705569, 'large-vehicle': 0.5923391096465394, 'ship': 0.7323895108556205, 'tennis-court': 0.8918719339754393, 'basketball-court': 0.6167167532317788, 'storage-tank': 0.7896539464914193, 'soccer-ball-field': 0.6256437044190803, 'roundabout': 0.5822160910481705, 'harbor': 0.3911822309127578, 'swimming-pool': 0.4623056776438838, 'helicopter': 0.38876782351358624, 'mAP': 0.5771224126904312}, '0.65': {'plane': 0.7674602391012482, 'baseball-diamond': 0.39138267523398085, 'bridge': 0.22779722692766172, 'ground-track-field': 0.38227138540598427, 'small-vehicle': 0.48124259553054344, 'large-vehicle': 0.5606505203129073, 'ship': 0.6449503011199688, 'tennis-court': 0.8882547554383928, 'basketball-court': 0.5996495579090106, 'storage-tank': 0.7740037478302471, 'soccer-ball-field': 0.5890564227707867, 'roundabout': 0.5266423982671122, 'harbor': 0.27965898269520834, 'swimming-pool': 0.3512809815252128, 'helicopter': 0.37557662016201626, 'mAP': 0.5226585606820188}, '0.7': {'plane': 0.7480741942299921, 'baseball-diamond': 0.28491417176356304, 'bridge': 0.15422371408276112, 'ground-track-field': 0.34193771346922236, 'small-vehicle': 0.3800016165394783, 'large-vehicle': 0.4873424696443632, 'ship': 0.5971974256616549, 'tennis-court': 0.8844407748906453, 'basketball-court': 0.57019059163727, 'storage-tank': 0.6811209806639307, 'soccer-ball-field': 0.5139624028187078, 'roundabout': 0.468581334238386, 'harbor': 0.1879641389757574, 'swimming-pool': 0.17323546761840766, 'helicopter': 0.25135498750720736, 'mAP': 0.44830279891608976}, '0.75': {'plane': 0.638480491268426, 'baseball-diamond': 0.16595623005889915, 'bridge': 0.10635310635310635, 'ground-track-field': 0.28253134684456893, 'small-vehicle': 0.2374290083207463, 'large-vehicle': 0.37961045921485476, 'ship': 0.41281075154430213, 'tennis-court': 0.8595147355887445, 'basketball-court': 0.4950299319632033, 'storage-tank': 0.5722552204278989, 'soccer-ball-field': 0.43429171291541124, 'roundabout': 0.3682496734973011, 'harbor': 0.11899620074744276, 'swimming-pool': 0.11274833508133607, 'helicopter': 0.1703870387038704, 'mAP': 0.3569762828353407}, '0.8': {'plane': 0.5051838168885886, 'baseball-diamond': 0.11419068736141907, 'bridge': 0.045454545454545456, 'ground-track-field': 0.18824315994591587, 'small-vehicle': 0.12037899418595661, 'large-vehicle': 0.21871403506702516, 'ship': 0.25618136638116157, 'tennis-court': 0.7677670667094157, 'basketball-court': 0.3549626701800615, 'storage-tank': 0.44570292801193634, 'soccer-ball-field': 0.35910600950204913, 'roundabout': 0.24770569260824132, 'harbor': 0.061633281972265024, 'swimming-pool': 0.07244985061886469, 'helicopter': 0.0606060606060606, 'mAP': 0.2545520110329005}, '0.85': {'plane': 0.286499373204074, 'baseball-diamond': 0.09090909090909091, 'bridge': 0.01674641148325359, 'ground-track-field': 0.11586452762923352, 'small-vehicle': 0.024932135636471557, 'large-vehicle': 0.1137692716640085, 'ship': 0.08762125759106315, 'tennis-court': 0.6380042107163609, 'basketball-court': 0.19159825681564813, 'storage-tank': 0.25665402937253856, 'soccer-ball-field': 0.2582726616817526, 'roundabout': 0.16942148760330578, 'harbor': 0.012987012987012986, 'swimming-pool': 0.004545454545454546, 'helicopter': 0.0606060606060606, 'mAP': 0.1552287494963553}, '0.9': {'plane': 0.04070705426127113, 'baseball-diamond': 0.022727272727272728, 'bridge': 0.00505050505050505, 'ground-track-field': 0.004914004914004914, 'small-vehicle': 0.005509641873278237, 'large-vehicle': 0.011363636363636364, 'ship': 0.019762845849802372, 'tennis-court': 0.38972621438374866, 'basketball-court': 0.08629776021080368, 'storage-tank': 0.09508026695526696, 'soccer-ball-field': 0.15641511990245227, 'roundabout': 0.02097902097902098, 'harbor': 0.0021853146853146855, 'swimming-pool': 0.002272727272727273, 'helicopter': 0.0, 'mAP': 0.057532759028607015}, '0.95': {'plane': 0.0009921865310678407, 'baseball-diamond': 0.0, 'bridge': 0.0, 'ground-track-field': 0.0, 'small-vehicle': 0.0008045052292839903, 'large-vehicle': 0.0001607114158675738, 'ship': 0.004784688995215311, 'tennis-court': 0.036045879795879796, 'basketball-court': 0.018181818181818184, 'storage-tank': 0.045454545454545456, 'soccer-ball-field': 0.004132231404958678, 'roundabout': 0.018181818181818184, 'harbor': 0.0, 'swimming-pool': 0.0, 'helicopter': 0.0, 'mAP': 0.008582559012697001}, 'mmAP': 0.3662430779907845}\n\"\"\"\n\n\n\n"
] | [
[
"tensorflow.device",
"tensorflow.cast",
"tensorflow.random_shuffle",
"tensorflow.summary.scalar",
"tensorflow.add_n",
"tensorflow.py_func",
"tensorflow.Graph",
"tensorflow.contrib.slim.get_or_create_global_step",
"tensorflow.get_collection",
"tensorflow.summary.image",
"tensorflow.train.MomentumOptimizer",
"tensorflow.name_scope",
"tensorflow.contrib.slim.arg_scope",
"tensorflow.constant",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.constant_initializer",
"tensorflow.contrib.slim.learning.clip_gradient_norms",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.get_variable_scope"
],
[
"tensorflow.constant_initializer",
"tensorflow.random_normal_initializer"
],
[
"tensorflow.linalg.det",
"tensorflow.linalg.inv",
"tensorflow.sin",
"tensorflow.concat",
"tensorflow.cos",
"tensorflow.unstack",
"tensorflow.shape",
"tensorflow.reshape",
"tensorflow.cast",
"tensorflow.linalg.matmul",
"tensorflow.Session",
"tensorflow.linalg.trace",
"tensorflow.py_func"
],
[
"tensorflow.constant_initializer",
"tensorflow.random_normal_initializer"
],
[
"numpy.logical_and",
"numpy.logical_or",
"numpy.intersect1d",
"numpy.zeros_like",
"numpy.array",
"numpy.where"
],
[
"numpy.array"
],
[
"numpy.array"
],
[
"numpy.array"
],
[
"tensorflow.constant_initializer",
"tensorflow.random_normal_initializer"
],
[
"tensorflow.concat",
"tensorflow.gather_nd",
"tensorflow.less",
"tensorflow.pow",
"tensorflow.maximum",
"tensorflow.reduce_sum",
"tensorflow.reshape",
"tensorflow.equal",
"tensorflow.stack",
"tensorflow.shape",
"tensorflow.expand_dims",
"tensorflow.gather",
"tensorflow.py_func",
"scipy.optimize.linear_sum_assignment",
"numpy.array",
"tensorflow.abs"
],
[
"numpy.array"
],
[
"numpy.array"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.4",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wright/OpenMDAO | [
"58f9ff47197531f4fb4ef632c6bcca11e799ccf0"
] | [
"openmdao/core/tests/test_connections.py"
] | [
"\"\"\" Tests related to connecing inputs to outputs.\"\"\"\n\nimport unittest\nimport numpy as np\n\nfrom io import StringIO\n\nimport openmdao.api as om\nfrom openmdao.utils.assert_utils import assert_near_equal, assert_warning\nfrom openmdao.utils.mpi import MPI\n\ntry:\n from openmdao.vectors.petsc_vector import PETScVector\nexcept ImportError:\n PETScVector = None\n\n\nclass TestConnections(unittest.TestCase):\n\n def setUp(self):\n self.setup_model(None, None)\n\n def setup_model(self, c1meta=None, c3meta=None):\n self.p = om.Problem()\n root = self.p.model\n\n if c1meta is None:\n c1meta = {}\n\n if c3meta is None:\n c3meta = {}\n\n self.G1 = root.add_subsystem(\"G1\", om.Group())\n self.G2 = self.G1.add_subsystem(\"G2\", om.Group())\n self.C1 = self.G2.add_subsystem(\"C1\", om.ExecComp('y=x*2.0', **c1meta))\n self.C2 = self.G2.add_subsystem(\"C2\", om.IndepVarComp('x', 1.0))\n\n self.G3 = root.add_subsystem(\"G3\", om.Group())\n self.G4 = self.G3.add_subsystem(\"G4\", om.Group())\n self.C3 = self.G4.add_subsystem(\"C3\", om.ExecComp('y=x*2.0', **c3meta))\n self.C4 = self.G4.add_subsystem(\"C4\", om.ExecComp('y=x*2.0'))\n\n def test_no_conns(self):\n self.p.setup()\n\n self.p['G1.G2.C1.x'] = 111.\n self.p['G3.G4.C3.x'] = 222.\n self.p['G3.G4.C4.x'] = 333.\n\n self.p.run_model()\n\n self.assertEqual(self.C1._inputs['x'], 111.)\n self.assertEqual(self.C3._inputs['x'], 222.)\n self.assertEqual(self.C4._inputs['x'], 333.)\n\n def test_pull_size_from_source(self):\n raise unittest.SkipTest(\"setting input size based on src size not supported yet\")\n\n class Src(ExplicitComponent):\n\n def setup(self):\n\n self.add_input('x', 2.0)\n self.add_output('y1', np.zeros((3, )))\n self.add_output('y2', shape=((3, )))\n\n def solve_nonlinear(self, inputs, outputs, resids):\n x = inputs['x']\n\n outputs['y1'] = x * np.array([1.0, 2.0, 3.0])\n outputs['y2'] = x * np.array([1.0, 2.0, 3.0])\n\n class Tgt(ExplicitComponent):\n\n def setup(self):\n\n self.add_input('x1')\n self.add_input('x2')\n self.add_output('y1', 0.0)\n self.add_output('y2', 0.0)\n\n def solve_nonlinear(self, inputs, outputs, resids):\n x1 = inputs['x1']\n x2 = inputs['x2']\n\n outputs['y1'] = np.sum(x1)\n outputs['y2'] = np.sum(x2)\n\n p = om.Problem()\n p.model.add_subsystem('src', Src())\n p.model.add_subsystem('tgt', Tgt())\n\n p.model.connect('src.y1', 'tgt.x1')\n p.model.connect('src.y2', 'tgt.x2')\n\n p.setup()\n p.run_model()\n\n self.assertEqual(p['tgt.y1'], 12.0)\n self.assertEqual(p['tgt.y2'], 12.0)\n\n def test_pull_size_from_source_with_indices(self):\n raise unittest.SkipTest(\"setting input size based on src size not supported yet\")\n\n class Src(ExplicitComponent):\n\n def setup(self):\n\n self.add_input('x', 2.0)\n self.add_output('y1', np.zeros((3, )))\n self.add_output('y2', shape=((3, )))\n self.add_output('y3', 3.0)\n\n def solve_nonlinear(self, inputs, outputs, resids):\n \"\"\" counts up. \"\"\"\n\n x = inputs['x']\n\n outputs['y1'] = x * np.array([1.0, 2.0, 3.0])\n outputs['y2'] = x * np.array([1.0, 2.0, 3.0])\n outputs['y3'] = x * 4.0\n\n class Tgt(ExplicitComponent):\n\n def setup(self):\n\n self.add_input('x1')\n self.add_input('x2')\n self.add_input('x3')\n self.add_output('y1', 0.0)\n self.add_output('y2', 0.0)\n self.add_output('y3', 0.0)\n\n def solve_nonlinear(self, inputs, outputs, resids):\n \"\"\" counts up. \"\"\"\n\n x1 = inputs['x1']\n x2 = inputs['x2']\n x3 = inputs['x3']\n\n outputs['y1'] = np.sum(x1)\n outputs['y2'] = np.sum(x2)\n outputs['y3'] = np.sum(x3)\n\n top = om.Problem()\n top.model.add_subsystem('src', Src())\n top.model.add_subsystem('tgt', Tgt())\n\n top.model.connect('src.y1', 'tgt.x1', src_indices=(0, 1))\n top.model.connect('src.y2', 'tgt.x2', src_indices=(0, 1))\n top.model.connect('src.y3', 'tgt.x3')\n\n top.setup()\n top.run_model()\n\n self.assertEqual(top['tgt.y1'], 6.0)\n self.assertEqual(top['tgt.y2'], 6.0)\n self.assertEqual(top['tgt.y3'], 8.0)\n\n def test_inp_inp_conn_no_src(self):\n raise unittest.SkipTest(\"no setup testing yet\")\n self.p.model.connect('G3.G4.C3.x', 'G3.G4.C4.x')\n\n stream = StringIO()\n self.p.setup(out_stream=stream)\n\n self.p['G3.G4.C3.x'] = 999.\n self.assertEqual(self.p.model.G3.G4.C3._inputs['x'], 999.)\n self.assertEqual(self.p.model.G3.G4.C4._inputs['x'], 999.)\n\n content = stream.getvalue()\n self.assertTrue(\"The following parameters have no associated unknowns:\\n\"\n \"G1.G2.C1.x\\nG3.G4.C3.x\\nG3.G4.C4.x\" in content)\n self.assertTrue(\"The following components have no connections:\\n\"\n \"G1.G2.C1\\nG1.G2.C2\\nG3.G4.C3\\nG3.G4.C4\\n\" in content)\n self.assertTrue(\"No recorders have been specified, so no data will be saved.\" in content)\n\n\nclass TestConnectionsPromoted(unittest.TestCase):\n\n def test_inp_inp_promoted_w_prom_src(self):\n p = om.Problem()\n root = p.model\n\n G1 = root.add_subsystem(\"G1\", om.Group(), promotes=['x'])\n G2 = G1.add_subsystem(\"G2\", om.Group(), promotes=['x'])\n G2.add_subsystem(\"C1\", om.ExecComp('y=x*2.0'))\n G2.add_subsystem(\"C2\", om.IndepVarComp('x', 1.0), promotes=['x'])\n\n G3 = root.add_subsystem(\"G3\", om.Group(), promotes=['x'])\n G4 = G3.add_subsystem(\"G4\", om.Group(), promotes=['x'])\n C3 = G4.add_subsystem(\"C3\", om.ExecComp('y=x*2.0'), promotes=['x'])\n C4 = G4.add_subsystem(\"C4\", om.ExecComp('y=x*2.0'), promotes=['x'])\n\n p.setup()\n p.set_solver_print(level=0)\n\n # setting promoted name will set the value into the outputs, but will\n # not propagate it to the inputs. That will happen during run_model().\n p['x'] = 999.\n\n p.run_model()\n self.assertEqual(C3._inputs['x'], 999.)\n self.assertEqual(C4._inputs['x'], 999.)\n\n def test_inp_inp_promoted_w_explicit_src(self):\n p = om.Problem()\n root = p.model\n\n G1 = root.add_subsystem(\"G1\", om.Group())\n G2 = G1.add_subsystem(\"G2\", om.Group(), promotes=['x'])\n G2.add_subsystem(\"C1\", om.ExecComp('y=x*2.0'))\n G2.add_subsystem(\"C2\", om.IndepVarComp('x', 1.0), promotes=['x'])\n\n G3 = root.add_subsystem(\"G3\", om.Group())\n G4 = G3.add_subsystem(\"G4\", om.Group(), promotes=['x'])\n C3 = G4.add_subsystem(\"C3\", om.ExecComp('y=x*2.0'), promotes=['x'])\n C4 = G4.add_subsystem(\"C4\", om.ExecComp('y=x*2.0'), promotes=['x'])\n\n p.model.connect('G1.x', 'G3.x')\n p.setup()\n p.set_solver_print(level=0)\n\n # setting promoted name will set the value into the outputs, but will\n # not propagate it to the inputs. That will happen during run_model().\n p['G1.x'] = 999.\n\n p.run_model()\n self.assertEqual(C3._inputs['x'], 999.)\n self.assertEqual(C4._inputs['x'], 999.)\n\n def test_overlapping_system_names(self):\n # This ensures that _setup_connections does not think g1 and g1a are the same system\n prob = om.Problem()\n model = prob.model\n\n g1 = model.add_subsystem('g1', om.Group())\n g1a = model.add_subsystem('g1a', om.Group())\n\n g1.add_subsystem('c', om.ExecComp('y=x'))\n g1a.add_subsystem('c', om.ExecComp('y=x'))\n\n model.connect('g1.c.y', 'g1a.c.x')\n model.connect('g1a.c.y', 'g1.c.x')\n\n prob.setup(check=True)\n\n\nclass TestConnectionsIndices(unittest.TestCase):\n\n def setUp(self):\n class ArrayComp(om.ExplicitComponent):\n def setup(self):\n self.add_input('inp', val=np.ones((2)))\n self.add_input('inp1', val=0)\n self.add_output('out', val=np.zeros((2)))\n\n def compute(self, inputs, outputs):\n outputs['out'] = inputs['inp'] * 2.\n\n indep_var_comp = om.IndepVarComp()\n indep_var_comp.add_output('blammo', val=3.)\n indep_var_comp.add_output('arrout', val=np.ones(5))\n\n prob = om.Problem()\n prob.model.add_subsystem('idvp', indep_var_comp)\n prob.model.add_subsystem('arraycomp', ArrayComp())\n\n self.prob = prob\n\n def test_bad_shapes(self):\n # Should not be allowed because the source and target shapes do not match\n self.prob.model.connect('idvp.blammo', 'arraycomp.inp')\n\n expected = \"<model> <class Group>: The source and target shapes do not match or are \" + \\\n \"ambiguous for the connection 'idvp.blammo' to 'arraycomp.inp'. \" + \\\n \"The source shape is (1,) but the target shape is (2,).\"\n\n try:\n self.prob.setup()\n except ValueError as err:\n self.assertEqual(str(err), expected)\n else:\n self.fail('Exception expected.')\n\n self.prob.model._raise_connection_errors = False\n\n with assert_warning(UserWarning, expected):\n self.prob.setup()\n\n def test_bad_length(self):\n # Should not be allowed because the length of src_indices is greater than\n # the shape of arraycomp.inp\n self.prob.model.connect('idvp.blammo', 'arraycomp.inp', src_indices=[0, 1, 0])\n\n expected = \"<model> <class Group>: The source indices [0 1 0] do not specify a valid shape \" + \\\n \"for the connection 'idvp.blammo' to 'arraycomp.inp'. The target shape is \" + \\\n \"(2,) but indices are (3,).\"\n\n try:\n self.prob.setup()\n except ValueError as err:\n self.assertEqual(str(err), expected)\n else:\n self.fail('Exception expected.')\n\n self.prob.model._raise_connection_errors = False\n\n with assert_warning(UserWarning, expected):\n self.prob.setup()\n\n def test_bad_value(self):\n # Should not be allowed because the index value within src_indices is outside\n # the valid range for the source\n self.prob.model.connect('idvp.arrout', 'arraycomp.inp1', src_indices=[100000])\n\n expected = \"<model> <class Group>: The source indices do not specify a valid index \" + \\\n \"for the connection 'idvp.arrout' to 'arraycomp.inp1'. \" + \\\n \"Index '100000' is out of range for source dimension of size 5.\"\n\n try:\n self.prob.setup()\n except ValueError as err:\n self.assertEqual(str(err), expected)\n else:\n self.fail('Exception expected.')\n\n self.prob.model._raise_connection_errors = False\n\n with assert_warning(UserWarning, expected):\n self.prob.setup()\n\n def test_bad_value_bug(self):\n # Should not be allowed because the 2nd index value within src_indices is outside\n # the valid range for the source. A bug prevented this from being checked.\n self.prob.model.connect('idvp.arrout', 'arraycomp.inp', src_indices=[0, 100000])\n\n expected = \"<model> <class Group>: The source indices do not specify a valid index \" + \\\n \"for the connection 'idvp.arrout' to 'arraycomp.inp'. \" + \\\n \"Index '100000' is out of range for source dimension of size 5.\"\n\n try:\n self.prob.setup()\n except ValueError as err:\n self.assertEqual(str(err), expected)\n else:\n self.fail('Exception expected.')\n\n self.prob.model._raise_connection_errors = False\n\n with assert_warning(UserWarning, expected):\n self.prob.setup()\n\n\nclass TestShapes(unittest.TestCase):\n def test_connect_flat_array_to_row_vector(self):\n p = om.Problem()\n p.model.add_subsystem('indep', om.IndepVarComp('x', val=np.arange(10)))\n p.model.add_subsystem('C1',\n om.ExecComp('y=dot(x, A)',\n x={'value': np.zeros((1, 10))},\n A={'value': np.eye(10)},\n y={'value': np.zeros((1, 10))}))\n p.model.connect('indep.x', 'C1.x')\n p.setup()\n p.run_model()\n assert_near_equal(p['C1.y'], np.arange(10)[np.newaxis, :])\n\n def test_connect_flat_array_to_col_vector(self):\n p = om.Problem()\n p.model.add_subsystem('indep', om.IndepVarComp('x', val=np.arange(10)))\n p.model.add_subsystem('C1',\n om.ExecComp('y=dot(A, x)',\n x={'value': np.zeros((10, 1))},\n A={'value': np.eye(10)},\n y={'value': np.zeros((10, 1))}))\n p.model.connect('indep.x', 'C1.x')\n p.setup()\n p.run_model()\n assert_near_equal(p['C1.y'], np.arange(10)[:, np.newaxis])\n\n def test_connect_row_vector_to_flat_array(self):\n p = om.Problem()\n p.model.add_subsystem('indep', om.IndepVarComp('x', val=np.arange(10)[np.newaxis, :]))\n p.model.add_subsystem('C1', om.ExecComp('y=5*x',\n x={'value': np.zeros(10)},\n y={'value': np.zeros(10)}))\n p.model.connect('indep.x', 'C1.x')\n p.setup()\n p.run_model()\n assert_near_equal(p['C1.y'], 5 * np.arange(10))\n\n def test_connect_col_vector_to_flat_array(self):\n p = om.Problem()\n p.model.add_subsystem('indep', om.IndepVarComp('x', val=np.arange(10)[:, np.newaxis]))\n p.model.add_subsystem('C1', om.ExecComp('y=5*x',\n x={'value': np.zeros(10)},\n y={'value': np.zeros(10)}))\n p.model.connect('indep.x', 'C1.x')\n p.setup()\n p.run_model()\n assert_near_equal(p['C1.y'], 5 * np.arange(10))\n\n def test_connect_flat_to_3d_array(self):\n p = om.Problem()\n p.model.add_subsystem('indep', om.IndepVarComp('x', val=np.arange(10)))\n p.model.add_subsystem('C1', om.ExecComp('y=5*x',\n x={'value': np.zeros((1, 10, 1))},\n y={'value': np.zeros((1, 10, 1))}))\n p.model.connect('indep.x', 'C1.x')\n p.setup()\n p.run_model()\n assert_near_equal(p['C1.y'], 5 * np.arange(10)[np.newaxis, :, np.newaxis])\n\n def test_connect_flat_nd_to_flat_nd(self):\n p = om.Problem()\n p.model.add_subsystem('indep', om.IndepVarComp('x',\n val=np.arange(10)[np.newaxis, :, np.newaxis,\n np.newaxis]))\n p.model.add_subsystem('C1', om.ExecComp('y=5*x',\n x={'value': np.zeros((1, 1, 1, 10))},\n y={'value': np.zeros((1, 1, 1, 10))}))\n p.model.connect('indep.x', 'C1.x')\n p.setup()\n p.run_model()\n assert_near_equal(p['C1.y'],\n 5 * np.arange(10)[np.newaxis, np.newaxis, np.newaxis, :])\n\n def test_connect_incompatible_shapes(self):\n p = om.Problem()\n p.model.add_subsystem('indep', om.IndepVarComp('x', val=np.arange(10)[np.newaxis, :,\n np.newaxis, np.newaxis]))\n p.model.add_subsystem('C1', om.ExecComp('y=5*x',\n x={'value': np.zeros((5, 2))},\n y={'value': np.zeros((5, 2))}))\n p.model.connect('indep.x', 'C1.x')\n\n expected = \"<model> <class Group>: The source and target shapes do not match or are \" + \\\n \"ambiguous for the connection 'indep.x' to 'C1.x'. The source shape is \" + \\\n \"(1, 10, 1, 1) but the target shape is (5, 2).\"\n\n with self.assertRaises(Exception) as context:\n p.setup()\n\n self.assertEqual(str(context.exception), expected)\n\n p.model._raise_connection_errors = False\n\n with assert_warning(UserWarning, expected):\n p.setup()\n\n\nclass TestMultiConns(unittest.TestCase):\n\n def test_mult_conns(self):\n\n class SubGroup(om.Group):\n def setup(self):\n self.add_subsystem('c1', om.ExecComp('y = 2*x', x=np.ones(4), y=2*np.ones(4)),\n promotes=['y', 'x'])\n self.add_subsystem('c2', om.ExecComp('z = 2*y', y=np.ones(4), z=2*np.ones(4)),\n promotes=['z', 'y'])\n\n prob = om.Problem()\n indeps = prob.model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])\n indeps.add_output('x', 10*np.ones(4))\n indeps.add_output('y', np.ones(4))\n\n prob.model.add_subsystem('sub', SubGroup())\n\n prob.model.connect('x', 'sub.x')\n prob.model.connect('y', 'sub.y')\n\n expected = \"<model> <class Group>: The following inputs have multiple connections: \" + \\\n \"sub.c2.y from ['indeps.y', 'sub.c1.y']\"\n\n with self.assertRaises(Exception) as context:\n prob.setup()\n\n self.assertEqual(str(context.exception), expected)\n\n prob.model._raise_connection_errors = False\n\n with assert_warning(UserWarning, expected):\n prob.setup()\n\n def test_mixed_conns_same_level(self):\n\n prob = om.Problem()\n indeps = prob.model.add_subsystem('indeps', om.IndepVarComp())\n indeps.add_output('x', 10*np.ones(4))\n\n # c2.y is implicitly connected to c1.y\n prob.model.add_subsystem('c1', om.ExecComp('y = 2*x', x=np.ones(4), y=2*np.ones(4)),\n promotes=['y'])\n prob.model.add_subsystem('c2', om.ExecComp('z = 2*y', y=np.ones(4), z=2*np.ones(4)),\n promotes=['y'])\n\n # make a second, explicit, connection to y (which is c2.y promoted)\n prob.model.connect('indeps.x', 'y')\n\n expected = \"<model> <class Group>: Input 'c2.y' cannot be connected to 'indeps.x' \" + \\\n \"because it's already connected to 'c1.y'\"\n\n with self.assertRaises(Exception) as context:\n prob.setup()\n prob.final_setup()\n\n self.assertEqual(str(context.exception), expected)\n\n prob.model._raise_connection_errors = False\n\n with assert_warning(UserWarning, expected):\n prob.setup()\n\n def test_auto_ivc_ambiguous_with_src_indices_msg(self):\n\n class TComp(om.ExplicitComponent):\n\n def initialize(self):\n self.options.declare('src_idx', [0, 1])\n\n def setup(self):\n src = self.options['src_idx']\n self.add_input('x', shape=2, src_indices=src, val=-2038.0)\n self.add_output('y', shape=2)\n self.declare_partials('y', 'x')\n\n def compute(self, inputs, outputs):\n outputs['y'] = 2.0 * inputs['x']\n\n\n prob = om.Problem()\n model = prob.model\n\n prob.model.add_subsystem('c1', TComp(src_idx=[0, 1]), promotes_inputs=['x'])\n prob.model.add_subsystem('c2', TComp(src_idx=[2, 3]), promotes_inputs=['x'])\n prob.model.add_subsystem('d1', TComp(src_idx=[0, 1]), promotes_inputs=[('x', 'zz')])\n prob.model.add_subsystem('d2', TComp(src_idx=[1, 2]), promotes_inputs=[('x', 'zz')])\n\n with self.assertRaises(RuntimeError) as context:\n prob.setup()\n\n msg = \"The following inputs ['c1.x', 'c2.x'] are defined using src_indices but the total source \"\n msg += \"size is undetermined. You can specify the src size by setting 'val' or 'src_shape' in a call to set_input_defaults, or by adding an IndepVarComp as the source.\"\n\n err_msg = str(context.exception).split(':')[-1]\n self.assertEqual(err_msg, msg)\n\n\[email protected](MPI and PETScVector, \"MPI and PETSc are required.\")\nclass TestConnectionsDistrib(unittest.TestCase):\n N_PROCS = 2\n\n def test_serial_mpi_error(self):\n # Should still catch the bad index when we are running under mpi with no distributed comps.\n # A bug formerly prevented this.\n class TestComp(om.ExplicitComponent):\n\n def initialize(self):\n self.options['distributed'] = False\n\n def setup(self):\n self.add_input('x', shape=2, src_indices=[1, 2], val=-2038.0)\n self.add_output('y', shape=1)\n self.declare_partials('y', 'x')\n\n def compute(self, inputs, outputs):\n outputs['y'] = np.sum(inputs['x'])\n\n def compute_partials(self, inputs, J):\n J['y', 'x'] = np.ones((2,))\n\n prob = om.Problem()\n model = prob.model\n model.add_subsystem('p1', om.IndepVarComp('x', np.array([1.0, 3.0])))\n model.add_subsystem('c3', TestComp())\n model.connect(\"p1.x\", \"c3.x\")\n\n rank = prob.comm.rank\n expected = f\"Exception raised on rank {rank}: <model> <class Group>: The source indices do not specify a valid index \" + \\\n \"for the connection 'p1.x' to 'c3.x'. \" + \\\n \"Index '2' is out of range for source dimension of size 2.\"\n try:\n prob.setup()\n except Exception as err:\n self.assertEqual(str(err).splitlines()[-1], expected)\n else:\n self.fail('Exception expected.')\n\n def test_serial_mpi_error_flat(self):\n # Make sure the flat branch works too.\n class TestComp(om.ExplicitComponent):\n\n def initialize(self):\n self.options['distributed'] = False\n\n def setup(self):\n self.add_input('x', shape=2, src_indices=[1, 2], val=-2038.0, flat_src_indices=True)\n self.add_output('y', shape=1)\n self.declare_partials('y', 'x')\n\n def compute(self, inputs, outputs):\n outputs['y'] = np.sum(inputs['x'])\n\n def compute_partials(self, inputs, J):\n J['y', 'x'] = np.ones((2,))\n\n prob = om.Problem()\n model = prob.model\n model.add_subsystem('p1', om.IndepVarComp('x', np.array([1.0, 3.0])))\n model.add_subsystem('c3', TestComp())\n model.connect(\"p1.x\", \"c3.x\")\n\n rank = prob.comm.rank\n expected = f\"Exception raised on rank {rank}: <model> <class Group>: The source indices do not specify a valid index \" + \\\n \"for the connection 'p1.x' to 'c3.x'. \" + \\\n \"Index '2' is out of range for source dimension of size 2.\"\n\n try:\n prob.setup()\n except Exception as err:\n self.assertEqual(str(err).splitlines()[-1], expected)\n else:\n self.fail('Exception expected.')\n\[email protected](MPI, \"MPI is required.\")\nclass TestConnectionsError(unittest.TestCase):\n N_PROCS = 2\n\n def test_incompatible_src_indices(self):\n class TestCompDist(om.ExplicitComponent):\n # this comp is distributed and forces PETScTransfer\n def initialize(self):\n self.options['distributed'] = True\n\n def setup(self):\n self.add_input('x', shape=2)\n self.add_output('y', shape=1)\n self.declare_partials('y', 'x', val=1.0)\n\n def compute(self, inputs, outputs):\n outputs['y'] = np.sum(inputs['x'])\n\n class TestComp(om.ExplicitComponent):\n def initialize(self):\n self.options['distributed'] = False\n\n def setup(self):\n # read SRC_INDICES on each proc\n self.add_input('x', shape=2, src_indices=[1, 2], val=-2038.0)\n self.add_output('y', shape=1)\n self.declare_partials('y', 'x')\n\n def compute(self, inputs, outputs):\n outputs['y'] = np.sum(inputs['x'])\n\n def compute_partials(self, inputs, J):\n J['y', 'x'] = np.ones((2,))\n\n prob = om.Problem()\n model = prob.model\n\n rank = prob.comm.rank\n\n if rank == 0:\n setval = np.array([2.0, 3.0])\n else:\n setval = np.array([10.0, 20.0])\n\n # no parallel or distributed comps, so default_vector is used (local xfer only)\n model.add_subsystem('p1', om.IndepVarComp('x', setval))\n model.add_subsystem('c3', TestComp())\n model.add_subsystem('c4', TestCompDist())\n model.connect(\"p1.x\", \"c3.x\")\n model.connect(\"c3.y\", \"c4.x\")\n\n with self.assertRaises(ValueError) as context:\n prob.setup(check=False, mode='fwd')\n self.assertEqual(str(context.exception),\n f\"Exception raised on rank {rank}: <model> <class Group>: The source indices do not specify a valid index for \"\n \"the connection 'p1.x' to 'c3.x'. Index '2' is out of range for source \"\n \"dimension of size 2.\")\n\n\[email protected](MPI, \"MPI is required.\")\nclass TestConnectionsMPIBug(unittest.TestCase):\n N_PROCS = 2\n\n def test_bug_2d_src_indices(self):\n # This model gave an exception during setup.\n\n class Burn(om.ExplicitComponent):\n\n def setup(self):\n self.add_input('x', np.arange(12))\n self.add_output('y', np.arange(12))\n\n def compute(self, inputs, outputs):\n outputs['y'] = inputs['x'] * 2.0\n\n class LinkageComp(om.ExplicitComponent):\n\n def setup(self):\n self.add_input('in1', np.zeros((3, 2)))\n self.add_input('in2', np.zeros((3, 2)))\n self.add_output('out', np.zeros((3, 2)))\n\n def compute(self, inputs, outputs):\n outputs['out'] = 3 * inputs['in2'] - 2.5 * inputs['in1']\n\n class Phases(om.ParallelGroup):\n\n def setup(self):\n self.add_subsystem('burn1', Burn())\n self.add_subsystem('burn2', Burn())\n\n class Linkages(om.Group):\n\n def setup(self):\n self.add_subsystem('linkage', LinkageComp())\n\n class Traj(om.Group):\n\n def setup(self):\n self.add_subsystem('phases', Phases())\n self.add_subsystem('linkages', Linkages())\n\n def configure(self):\n self.connect('phases.burn1.y', 'linkages.linkage.in1', src_indices=np.array([[0, 3], [4, 6], [2, 1]]))\n self.connect('phases.burn2.y', 'linkages.linkage.in2', src_indices=np.array([[0, 3], [4, 6], [2, 1]]))\n\n prob = om.Problem(model=Traj())\n prob.setup()\n prob.run_model()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.arange",
"numpy.eye",
"numpy.ones",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sgascoin/extractViewAngle | [
"2ec54426714eac9628fa73b622519c88b8ab96b2"
] | [
"extractViewAngle.py"
] | [
"#!/usr/bin/python\n\n\"\"\"\nextractViewAngle.py\nScope: export points or raster of viewing incidences angles from a Theia L2A product (rasters are scaled by 100 as UInt16) \nAuthor: [email protected]\n\"\"\"\n\nimport csv\nimport gdal\nimport numpy as np\nimport ogr\nimport os\nimport osr\nimport sys\nimport xml.etree.ElementTree as ET\n\n\n# function to read points file as lon lat values delimited by tab without header line\ndef readPoints(f):\n with open(f,'r') as csvfile:\n reader = csv.reader(csvfile,delimiter=',')\n data = [r for r in reader]\n return data\n\n\n# function to write points values as csv\ndef writePoints(newPointsFn,outDictList):\n with open(newPointsFn, 'w') as csvfile:\n fieldnames = list(outDictList[0].keys())\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n for ouDict in outDictList:\n writer.writerow(ouDict)\n\n\n# function to write an array to a (multiband) geotiff \ndef array2geotiff(newRasterFn,geoTransform,array,noData,outSpatialRef,dataType=gdal.GDT_Float64):\n cols = array.shape[1]\n rows = array.shape[0]\n bands = array.shape[2]\n driver = gdal.GetDriverByName('GTiff')\n outRaster = driver.Create(newRasterFn, cols, rows, bands, dataType, options=['COMPRESS=DEFLATE'])\n outRaster.SetGeoTransform(geoTransform)\n # write bands\n for i in range(bands):\n outband = outRaster.GetRasterBand(i+1) # 1-based index\n outband.WriteArray(array[:,:,i])\n outband.SetNoDataValue(noData)\n outRaster.SetProjection(outSpatialRef.ExportToWkt())\n outRaster.FlushCache()\n\n\n# function to get mask file name and bit number to test which detector was used\ndef getDetector(productFolder,root,bandId,detectorId):\n # find node containing detector metadata based on the presence of attribute \"detector_id\" in subnodes\n n = root.find(\".//Product_Organisation//*[@detector_id]/..\")\n if n is None:\n print('this product version does not provide detector mask')\n maskFn = bitNumber = None\n else:\n # get MASK_FILE element for target band and detector\n s = \"./MASK_FILE/[@band_id='{}'][@detector_id='{}']\".format(bandId,detectorId)\n element = n.find(s)\n # get detector mask file from element value \n maskFn = os.path.join(productFolder,element.text)\n # get detector bit number from element attribute\n bitNumber = int(element.attrib['bit_number'])\n return maskFn, bitNumber\n\n\n# function to test if detector was used at this point\ndef testDetector(point,maskFn,bitNumber):\n # open the raster file\n ds = gdal.Open(maskFn,gdal.GA_ReadOnly)\n if ds is None:\n print('Could not open the mask file')\n sys.exit(1)\n band = ds.GetRasterBand(1) # 1-based index\n data = band.ReadAsArray() # we could save memory and time by reading only the pixel using ReadRaster?\n geoTransform = ds.GetGeoTransform()\n # get position in array\n col,row = pix2map(point.GetX(),point.GetY(),geoTransform)\n # check if point is outside the mask\n if (col < 0 or row < 0 or col > band.XSize or row > band.YSize):\n print('Point is outside the product mask extent')\n test = False\n else:\n value = data[int(col)][int(row)]\n test = testBit(value, bitNumber)\n return test\n\n\n# function which returns True if the bit number n is 1 in an integer value of base 10.\ndef testBit(value, n):\n mask = 1 << (n - 1) # bitNumber is 1-based index\n return(value & mask > 0)\n\n\n# find position of x,y coordinates in georeferenced array with the same projection system\ndef pix2map(x,y,geoTransform):\n col = np.floor((x - geoTransform[0]) / geoTransform[1]) #x pixel\n row = np.floor((y - geoTransform[3]) / geoTransform[5]) #y pixel\n return col,row\n\n\n# main function\ndef main(productFolder,outputFolder,points=None):\n # scale factor to export angles\n scale = 100\n # set no data value for UInt16 export\n noDataRaster = np.iinfo(np.uint16).max\n # set no data value for csv export\n noDataCsv = -10000 \n\n # MTD angle grid always have a 5 km resolution\n colstep = 5000\n rowstep = -5000\n # MTD angle grid always have an size of 23x23\n nx = ny = 23\n\n # open metadata file\n MTDFile = os.path.join(productFolder,os.path.basename(os.path.abspath(productFolder)+'_MTD_ALL.xml'))\n tree = ET.parse(MTDFile)\n root = tree.getroot()\n \n # get product id\n productId = root.find(\".//PRODUCT_ID\").text\n # get EPSG code\n epsg = root.find(\".//HORIZONTAL_CS_CODE\").text\n # get grid corners coordinates (warning in array geometry the lower left corner is the upper left in raster geometry)\n ulx = float(root.find(\".//*[@name='upperLeft']/X\").text)\n uly = float(root.find(\".//*[@name='upperLeft']/Y\").text)\n lrx = float(root.find(\".//*[@name='lowerRight']/X\").text)\n lry = float(root.find(\".//*[@name='lowerRight']/Y\").text) \n\n # We assume that the above coordinates correspond to the *centers* of corner pixels\n # otherwise the 23x23 grid would have an extra row and column somewhere\n ulxMTD = ulx - colstep/2\n ulyMTD = uly - rowstep/2\n\n # define the affine transformation coefficients\n geoTransform = (ulxMTD, colstep, 0, ulyMTD, 0, rowstep)\n\n # create output spatial reference\n outSpatialRef = osr.SpatialReference()\n outSpatialRef.ImportFromEPSG(int(epsg))\n\n if points is not None:\n # create coordinate transformation\n inSpatialRef = osr.SpatialReference()\n inSpatialRef.ImportFromEPSG(4326)\n # keep the traditionnal GIS order even if GDAL > 3\n try:\n inSpatialRef.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)\n except:\n pass\n coordTransform = osr.CoordinateTransformation(inSpatialRef, outSpatialRef)\n\n # loop through angle definition\n for angle in ('Azimuth','Zenith'):\n\n # initialize output list of dictionnaries for points\n if points is not None:\n outDictList = list()\n [outDictList.append(dict()) for i in points]\n\n # loop through bands\n for band in root.iter('Band_Viewing_Incidence_Angles_Grids_List'):\n # init stack of grids\n Zd = np.array([], dtype=float).reshape(nx,ny,0)\n # loop through detectors\n for detector in band.iter('Viewing_Incidence_Angles_Grids'):\n rows = detector.find(angle).findall('.//VALUES')\n grid = ''\n # loop through grid rows to read grid values as a string\n for row in iter(rows):\n grid = grid + row.text + '\\n'\n # array with grid values \n Z = np.fromstring(grid, dtype=float, sep=' ')\n # reshape to 2D array\n Z = Z.reshape((len(rows),-1))\n # add to the stack of detector grids\n Zd = np.dstack((Zd,Z))\n\n # display mean value for this angle and band\n bandId = band.attrib.get('band_id')\n print('{:s} {:s} mean value: {:g}'.format(bandId,angle,np.nanmean(Zd)))\n\n # export as multiband geotiff (we don't flatten the stack since the detector arrays overlap)\n if points is None:\n newRasterFn = os.path.join(\\\n outputFolder,'{:s}_{:s}_{:s}{:d}.tif'.format(productId,bandId,angle,scale))\n # scale \n Zd = scale * Zd\n # set no data\n Zd[np.isnan(Zd)] = noDataRaster\n # write to disk\n array2geotiff(newRasterFn,geoTransform,Zd,noDataRaster,outSpatialRef,gdal.GDT_UInt16)\n\n # find values at points\n else:\n for ipoint,pointCoord in enumerate(points):\n lon,lat = float(pointCoord[0]),float(pointCoord[1])\n # create a geometry from coordinates\n point = ogr.Geometry(ogr.wkbPoint)\n point.AddPoint(lon, lat) # traditionnal GIS order\n # transform point\n point.Transform(coordTransform)\n # find position in array\n col,row = pix2map(point.GetX(),point.GetY(),geoTransform)\n # check if point is out of the grid\n if (col < 0 or row < 0 or col > nx or row > ny):\n v = noDataCsv\n\n # otherwise retrieve the values in all bands\n else:\n vd = Zd[int(row),int(col),:]\n # select the non-NaN value(s)\n v = vd[np.isfinite(vd)]\n\n # check if point is in no data area\n if len(v) == 0:\n v = noDataCsv\n\n # check if more than one value is found in the stack \n # this can occur because angle grids overlap due to their coarse resolution\n elif len(v) > 1:\n print('solving an ambiguity for band = ' + bandId + ' at point ' + str(pointCoord))\n detectorList = [d.attrib for d in band.iter('Viewing_Incidence_Angles_Grids')]\n # indices where are the finite values\n indexList = np.argwhere(np.isfinite(vd))\n # look into the detector mask files to find which detector has measured this point \n test = False\n for ix in indexList :\n detectorId = detectorList[int(ix)]['detector_id']\n print('testing detector = ' + detectorId)\n maskFn,bitNumber = getDetector(productFolder,root,bandId,detectorId)\n # if the detector mask file is provided then we assign the first value\n if maskFn is None :\n print('takes first detector value by default')\n test = True\n test = testDetector(point,maskFn,bitNumber)\n if test: \n print('found it!')\n v = vd[ix]\n break\n # if test always false (point outside the mask) returns no data\n if test is False: \n v = noDataCsv\n\n outDictList[ipoint]['lon'] = lon\n outDictList[ipoint]['lat'] = lat\n # add this value to the output dictionnary \n if bandId in outDictList[ipoint]:\n outDictList[ipoint][bandId].append(float(v))\n else:\n outDictList[ipoint][bandId] = float(v)\n\n # dump data to text file for this angle and band\n if points is not None:\n newPointsFn = os.path.join(\\\n outputFolder,'{:s}_{:s}.csv'.format(productId,angle))\n writePoints(newPointsFn,outDictList)\n\nif __name__ == \"__main__\":\n\n # check arguments\n if len(sys.argv) == 4:\n print(\"Point mode\")\n pointFile = sys.argv[3]\n # check if input file exists\n if not(os.path.exists(pointFile)):\n print(\"Error: input point file does not exists\")\n sys.exit(1)\n points = readPoints(pointFile) \n\n elif len(sys.argv) == 3:\n print(\"Raster mode\")\n points = None\n\n else:\n print(\"Error: missing arguments\\n\")\n print(\"usage in raster mode: extractViewAngle.py productFolder outputFolder\\n\")\n print(\"usage in point mode: extractViewAngle.py productFolder outputFolder point_table_as_lon_lat.csv\\n\")\n print(\"example: python extractViewAngle.py SENTINEL2A_20180224-103018-463_L2A_T31TGK_C_V2-2 angles\\n\")\n print(\"example: python extractViewAngle.py SENTINEL2A_20180224-103018-463_L2A_T31TGK_C_V2-2 angles points.csv\\n\")\n sys.exit(1)\n\n # check if input file exists\n productFolder = sys.argv[1]\n if not(os.path.exists(productFolder)):\n print (\"Error: input folder does not exists\")\n sys.exit(1)\n\n # check if folder can be created\n outputFolder = sys.argv[2]\n try:\n os.makedirs(outputFolder,exist_ok=True)\n except OSError:\n print (\"Error: cannot create output folder\")\n sys.exit(1)\n else:\n main(productFolder,outputFolder,points)\n \n"
] | [
[
"numpy.isfinite",
"numpy.isnan",
"numpy.dstack",
"numpy.fromstring",
"numpy.iinfo",
"numpy.floor",
"numpy.nanmean",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yannikkellerde/TD3 | [
"6101baaa38a53bdaa34e33105f4e016eb84cf5a9"
] | [
"my_replay_buffer.py"
] | [
"import numpy as np\nimport torch\nimport pickle\nimport os\n\nclass ReplayBuffer_particles(object):\n def __init__(self, obs_space, action_space, max_size=int(1e6), load_folder=None):\n self.max_size = max_size\n self.store_np = [\"state_features\",\"state_particles\",\"action\",\n \"next_state_features\",\"next_state_particles\",\"reward\",\n \"not_done\"]\n self.store_pkl = [\"ptr\",\"size\"]\n if load_folder is None:\n self.ptr = 0\n self.size = 0\n self.state_features = np.zeros((max_size,obs_space[0].shape[0]))\n self.state_particles = np.zeros((max_size, *obs_space[1].shape))\n self.action = np.zeros((max_size, action_space.shape[0]))\n self.next_state_features = np.zeros((max_size,obs_space[0].shape[0]))\n self.next_state_particles = np.zeros((max_size, *obs_space[1].shape))\n self.reward = np.zeros((max_size, 1))\n self.not_done = np.zeros((max_size, 1))\n else:\n self.load(load_folder)\n\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n def save(self,folder):\n os.makedirs(folder,exist_ok=True)\n for attrib in self.store_pkl:\n with open(os.path.join(folder,attrib+\".pkl\"), \"wb\") as f:\n pickle.dump(self.__dict__[attrib],f,protocol=4)\n\n for attrib in self.store_np:\n with open(os.path.join(folder,attrib+\".pkl\"), \"wb\") as f:\n np.save(f,self.__dict__[attrib])\n \n def load(self,folder):\n for attrib in self.store_pkl:\n with open(os.path.join(folder,attrib+\".pkl\"), \"rb\") as f:\n self.__dict__[attrib] = pickle.load(f)\n for attrib in self.store_np:\n with open(os.path.join(folder,attrib+\".pkl\"), \"rb\") as f:\n self.__dict__[attrib] = np.load(f)\n\n def add(self, state, action, next_state, reward, done):\n self.state_features[self.ptr] = state[0]\n self.state_particles[self.ptr] = state[1]\n self.action[self.ptr] = action\n self.next_state_features[self.ptr] = next_state[0]\n self.next_state_particles[self.ptr] = next_state[1]\n self.reward[self.ptr] = reward\n self.not_done[self.ptr] = 1. - done\n\n self.ptr = (self.ptr + 1) % self.max_size\n self.size = min(self.size + 1, self.max_size)\n\n def sample(self, batch_size):\n ind = np.random.randint(0, self.size, size=batch_size)\n\n return (\n torch.FloatTensor(self.state_features[ind]).to(self.device),\n torch.FloatTensor(self.state_particles[ind]).to(self.device),\n torch.FloatTensor(self.action[ind]).to(self.device),\n torch.FloatTensor(self.next_state_features[ind]).to(self.device),\n torch.FloatTensor(self.next_state_particles[ind]).to(self.device),\n torch.FloatTensor(self.reward[ind]).to(self.device),\n torch.FloatTensor(self.not_done[ind]).to(self.device)\n )\n\n\nclass ReplayBuffer_featured(object):\n def __init__(self, obs_space, action_space, max_size=int(1e6),load_folder=None):\n self.max_size = max_size\n self.ptr = 0\n self.size = 0\n self.store_np = [\"state\",\"action\",\"next_state\",\"reward\",\"not_done\"]\n self.store_pkl = [\"ptr\",\"size\"]\n\n if load_folder is None:\n self.state = np.zeros((max_size, obs_space.shape[0]))\n self.action = np.zeros((max_size, action_space.shape[0]))\n self.next_state = np.zeros((max_size, obs_space.shape[0]))\n self.reward = np.zeros((max_size, 1))\n self.not_done = np.zeros((max_size, 1))\n else:\n self.load(load_folder)\n\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n def save(self,folder):\n os.makedirs(folder,exist_ok=True)\n for attrib in self.store_pkl:\n with open(os.path.join(folder,attrib+\".pkl\"), \"wb\") as f:\n pickle.dump(self.__dict__[attrib],f,protocol=4)\n\n for attrib in self.store_np:\n with open(os.path.join(folder,attrib+\".pkl\"), \"wb\") as f:\n np.save(f,self.__dict__[attrib])\n \n def load(self,folder):\n for attrib in self.store_pkl:\n with open(os.path.join(folder,attrib+\".pkl\"), \"rb\") as f:\n self.__dict__[attrib] = pickle.load(f)\n for attrib in self.store_np:\n with open(os.path.join(folder,attrib+\".pkl\"), \"rb\") as f:\n self.__dict__[attrib] = np.load(f)\n\n def add(self, state, action, next_state, reward, done):\n self.state[self.ptr] = state\n self.action[self.ptr] = action\n self.next_state[self.ptr] = next_state\n self.reward[self.ptr] = reward\n self.not_done[self.ptr] = 1. - done\n\n self.ptr = (self.ptr + 1) % self.max_size\n self.size = min(self.size + 1, self.max_size)\n\n def sample(self, batch_size):\n ind = np.random.randint(0, self.size, size=batch_size)\n\n return (\n torch.FloatTensor(self.state[ind]).to(self.device),\n torch.FloatTensor(self.action[ind]).to(self.device),\n torch.FloatTensor(self.next_state[ind]).to(self.device),\n torch.FloatTensor(self.reward[ind]).to(self.device),\n torch.FloatTensor(self.not_done[ind]).to(self.device)\n )\n\nif __name__ == \"__main__\":\n env = gym.make(\"water_pouring:Pouring-mdp-full-v0\")\n r = ReplayBuffer(env.observation_space, env.action_space)\n r.save(\"test.pkl\")"
] | [
[
"numpy.save",
"torch.FloatTensor",
"torch.cuda.is_available",
"numpy.load",
"numpy.zeros",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
savan77/nni | [
"510213393d9cae58c5a8cccd21f322f7bba4e0cf",
"510213393d9cae58c5a8cccd21f322f7bba4e0cf"
] | [
"examples/trials/cifar10_grad_match/cords/selectionstrategies/supervisedlearning/submodularselectionstrategy.py",
"test/ut/retiarii/test_serializer.py"
] | [
"import apricot\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom scipy.sparse import csr_matrix\nfrom .dataselectionstrategy import DataSelectionStrategy\nfrom torch.utils.data.sampler import SubsetRandomSampler\n\n\nclass SubmodularSelectionStrategy(DataSelectionStrategy):\n \"\"\"\n This class extends :class:`selectionstrategies.supervisedlearning.dataselectionstrategy.DataSelectionStrategy`\n to include submodular optmization functions using apricot for data selection.\n\n Parameters\n ----------\n trainloader: class\n Loading the training data using pytorch DataLoader\n valloader: class\n Loading the validation data using pytorch DataLoader\n model: class\n Model architecture used for training\n loss_type: class\n The type of loss criterion\n device: str\n The device being utilized - cpu | cuda\n num_classes: int\n The number of target classes in the dataset\n linear_layer: bool\n Apply linear transformation to the data\n if_convex: bool\n If convex or not\n selection_type: str\n PerClass or Supervised\n submod_func_type: str\n The type of submodular optimization function. Must be one of\n 'facility-location', 'graph-cut', 'sum-redundancy', 'saturated-coverage' \n \"\"\"\n\n def __init__(self, trainloader, valloader, model, loss_type,\n device, num_classes, linear_layer, if_convex, selection_type, submod_func_type):\n \"\"\"\n Constructer method\n \"\"\"\n\n super().__init__(trainloader, valloader, model, num_classes, linear_layer)\n\n self.loss_type = loss_type # Make sure it has reduction='none' instead of default\n self.device = device\n self.if_convex = if_convex\n self.selection_type = selection_type\n self.submod_func_type = submod_func_type\n\n\n def distance(self, x, y, exp=2):\n \"\"\"\n Compute the distance.\n \n Parameters\n ----------\n x: Tensor\n First input tensor\n y: Tensor\n Second input tensor\n exp: float, optional\n The exponent value (default: 2)\n \n Returns\n ----------\n dist: Tensor\n Output tensor \n \"\"\"\n\n n = x.size(0)\n m = y.size(0)\n d = x.size(1)\n x = x.unsqueeze(1).expand(n, m, d)\n y = y.unsqueeze(0).expand(n, m, d)\n dist = torch.pow(x - y, exp).sum(2)\n #dist = torch.exp(-1 * torch.pow(x - y, 2).sum(2))\n return dist\n\n\n def compute_score(self, model_params, idxs):\n \"\"\"\n Compute the score of the indices.\n\n Parameters\n ----------\n model_params: OrderedDict\n Python dictionary object containing models parameters\n idxs: list\n The indices\n \"\"\"\n\n trainset = self.trainloader.sampler.data_source\n subset_loader = torch.utils.data.DataLoader(trainset, batch_size=self.trainloader.batch_size, shuffle=False,\n sampler=SubsetRandomSampler(idxs),\n pin_memory=True)\n self.model.load_state_dict(model_params)\n self.N = 0\n g_is = []\n\n with torch.no_grad():\n if self.if_convex:\n for batch_idx, (inputs, targets) in enumerate(subset_loader):\n inputs, targets = inputs, targets\n self.N += inputs.size()[0]\n g_is.append(inputs.view(inputs.size()[0], -1))\n else:\n embDim = self.model.get_embedding_dim()\n for batch_idx, (inputs, targets) in enumerate(subset_loader):\n inputs, targets = inputs.to(self.device), targets.to(self.device, non_blocking=True)\n self.N += inputs.size()[0]\n with torch.no_grad():\n out, l1 = self.model(inputs, last=True)\n data = F.softmax(out, dim=1)\n outputs = torch.zeros(len(inputs), self.num_classes).to(self.device)\n outputs.scatter_(1, targets.view(-1, 1), 1)\n l0_grads = data - outputs\n if self.linear_layer:\n l0_expand = torch.repeat_interleave(l0_grads, embDim, dim=1)\n l1_grads = l0_expand * l1.repeat(1, self.num_classes)\n g_is.append(torch.cat((l0_grads, l1_grads), dim=1))\n else:\n g_is.append(l0_grads)\n \n self.dist_mat = torch.zeros([self.N, self.N], dtype=torch.float32)\n first_i = True\n for i, g_i in enumerate(g_is, 0):\n if first_i:\n size_b = g_i.size(0)\n first_i = False\n for j, g_j in enumerate(g_is, 0):\n self.dist_mat[i * size_b: i * size_b + g_i.size(0),\n j * size_b: j * size_b + g_j.size(0)] = self.distance(g_i, g_j)\n self.const = torch.max(self.dist_mat).item()\n self.dist_mat = (self.const - self.dist_mat).numpy()\n\n\n def compute_gamma(self, idxs):\n \"\"\"\n Compute the gamma values for the indices.\n\n Parameters\n ----------\n idxs: list\n The indices\n \n Returns\n ----------\n gamma: list\n Gradient values of the input indices \n \"\"\"\n\n if self.selection_type == 'PerClass':\n gamma = [0 for i in range(len(idxs))]\n best = self.dist_mat[idxs] # .to(self.device)\n rep = np.argmax(best, axis=0)\n for i in rep:\n gamma[i] += 1\n elif self.selection_type == 'Supervised':\n gamma = [0 for i in range(len(idxs))]\n best = self.dist_mat[idxs] # .to(self.device)\n rep = np.argmax(best, axis=0)\n for i in range(rep.shape[1]):\n gamma[rep[0, i]] += 1\n return gamma\n\n\n def get_similarity_kernel(self):\n \"\"\"\n Obtain the similarity kernel.\n\n Returns\n ----------\n kernel: ndarray\n Array of kernel values\n \"\"\"\n\n for batch_idx, (inputs, targets) in enumerate(self.trainloader):\n if batch_idx == 0:\n labels = targets\n else:\n tmp_target_i = targets\n labels = torch.cat((labels, tmp_target_i), dim=0)\n kernel = np.zeros((labels.shape[0], labels.shape[0]))\n for target in np.unique(labels):\n x = np.where(labels == target)[0]\n # prod = np.transpose([np.tile(x, len(x)), np.repeat(x, len(x))])\n for i in x:\n kernel[i, x] = 1\n return kernel\n\n\n def select(self, budget, model_params, optimizer):\n \"\"\"\n Data selection method using different submodular optimization\n functions.\n \n Parameters\n ----------\n budget: int\n The number of data points to be selected\n model_params: OrderedDict\n Python dictionary object containing models parameters\n optimizer: str\n The optimization approach for data selection. Must be one of\n 'random', 'modular', 'naive', 'lazy', 'approximate-lazy', 'two-stage',\n 'stochastic', 'sample', 'greedi', 'bidirectional'\n \n Returns\n ----------\n total_greedy_list: list\n List containing indices of the best datapoints \n gammas: list\n List containing gradients of datapoints present in greedySet\n \"\"\"\n\n for batch_idx, (inputs, targets) in enumerate(self.trainloader):\n if batch_idx == 0:\n x_trn, labels = inputs, targets\n else:\n tmp_inputs, tmp_target_i = inputs, targets\n labels = torch.cat((labels, tmp_target_i), dim=0)\n per_class_bud = int(budget / self.num_classes)\n total_greedy_list = []\n gammas = []\n if self.selection_type == 'PerClass':\n for i in range(self.num_classes):\n idxs = torch.where(labels == i)[0]\n self.compute_score(model_params, idxs)\n if self.submod_func_type == 'facility-location': \n fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0, metric='precomputed',\n n_samples=per_class_bud, optimizer=optimizer)\n elif self.submod_func_type == 'graph-cut':\n fl = apricot.functions.graphCut.GraphCutSelection(random_state=0, metric='precomputed',\n n_samples=per_class_bud, optimizer=optimizer)\n elif self.submod_func_type == 'sum-redundancy':\n fl = apricot.functions.sumRedundancy.SumRedundancySelection(random_state=0, metric='precomputed',\n n_samples=per_class_bud, optimizer=optimizer)\n elif self.submod_func_type == 'saturated-coverage':\n fl = apricot.functions.saturatedCoverage.SaturatedCoverageSelection(random_state=0, metric='precomputed',\n n_samples=per_class_bud, optimizer=optimizer)\n \n sim_sub = fl.fit_transform(self.dist_mat)\n greedyList = list(np.argmax(sim_sub, axis=1))\n gamma = self.compute_gamma(greedyList) \n total_greedy_list.extend(idxs[greedyList])\n gammas.extend(gamma)\n\n elif self.selection_type == 'Supervised':\n for i in range(self.num_classes):\n if i == 0:\n idxs = torch.where(labels == i)[0]\n N = len(idxs)\n self.compute_score(model_params, idxs)\n row = idxs.repeat_interleave(N)\n col = idxs.repeat(N)\n data = self.dist_mat.flatten()\n else:\n idxs = torch.where(labels == i)[0]\n N = len(idxs)\n self.compute_score(model_params, idxs)\n row = torch.cat((row, idxs.repeat_interleave(N)), dim=0)\n col = torch.cat((col, idxs.repeat(N)), dim=0)\n data = np.concatenate([data, self.dist_mat.flatten()], axis=0)\n sparse_simmat = csr_matrix((data, (row.numpy(), col.numpy())), shape=(self.N_trn, self.N_trn))\n self.dist_mat = sparse_simmat \n if self.submod_func_type == 'facility-location':\n fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0, metric='precomputed',\n n_samples=per_class_bud, optimizer=optimizer)\n elif self.submod_func_type == 'graph-cut':\n fl = apricot.functions.graphCut.GraphCutSelection(random_state=0, metric='precomputed',\n n_samples=per_class_bud, optimizer=optimizer)\n elif self.submod_func_type == 'sum-redundancy':\n fl = apricot.functions.sumRedundancy.SumRedundancySelection(random_state=0, metric='precomputed',\n n_samples=per_class_bud, optimizer=optimizer)\n elif self.submod_func_type == 'saturated-coverage':\n fl = apricot.functions.saturatedCoverage.SaturatedCoverageSelection(random_state=0, metric='precomputed',\n n_samples=per_class_bud, optimizer=optimizer)\n\n sim_sub = fl.fit_transform(sparse_simmat)\n total_greedy_list = list(np.array(np.argmax(sim_sub, axis=1)).reshape(-1))\n gammas = self.compute_gamma(total_greedy_list)\n return total_greedy_list, gammas\n",
"import json\nfrom pathlib import Path\nimport re\nimport sys\n\nimport torch\nfrom nni.retiarii import json_dumps, json_loads, blackbox\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom torchvision.datasets import MNIST\n\nsys.path.insert(0, Path(__file__).parent.as_posix())\n\nfrom imported.model import ImportTest\n\n\nclass Foo:\n def __init__(self, a, b=1):\n self.aa = a\n self.bb = [b + 1 for _ in range(1000)]\n\n def __eq__(self, other):\n return self.aa == other.aa and self.bb == other.bb\n\n\ndef test_blackbox():\n module = blackbox(Foo, 3)\n assert json_loads(json_dumps(module)) == module\n module = blackbox(Foo, b=2, a=1)\n assert json_loads(json_dumps(module)) == module\n\n module = blackbox(Foo, Foo(1), 5)\n dumped_module = json_dumps(module)\n assert len(dumped_module) > 200 # should not be too longer if the serialization is correct\n\n module = blackbox(Foo, blackbox(Foo, 1), 5)\n dumped_module = json_dumps(module)\n assert len(dumped_module) < 200 # should not be too longer if the serialization is correct\n assert json_loads(dumped_module) == module\n\n\ndef test_blackbox_module():\n module = ImportTest(3, 0.5)\n assert json_loads(json_dumps(module)) == module\n\n\ndef test_dataset():\n dataset = blackbox(MNIST, root='data/mnist', train=False, download=True)\n dataloader = blackbox(DataLoader, dataset, batch_size=10)\n\n dumped_ans = {\n \"__type__\": \"torch.utils.data.dataloader.DataLoader\",\n \"arguments\": {\n \"batch_size\": 10,\n \"dataset\": {\n \"__type__\": \"torchvision.datasets.mnist.MNIST\",\n \"arguments\": {\"root\": \"data/mnist\", \"train\": False, \"download\": True}\n }\n }\n }\n assert json_dumps(dataloader) == json_dumps(dumped_ans)\n dataloader = json_loads(json_dumps(dumped_ans))\n assert isinstance(dataloader, DataLoader)\n\n dataset = blackbox(MNIST, root='data/mnist', train=False, download=True,\n transform=blackbox(\n transforms.Compose,\n [blackbox(transforms.ToTensor), blackbox(transforms.Normalize, (0.1307,), (0.3081,))]\n ))\n dataloader = blackbox(DataLoader, dataset, batch_size=10)\n x, y = next(iter(json_loads(json_dumps(dataloader))))\n assert x.size() == torch.Size([10, 1, 28, 28])\n assert y.size() == torch.Size([10])\n\n dataset = blackbox(MNIST, root='data/mnist', train=False, download=True,\n transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]))\n dataloader = blackbox(DataLoader, dataset, batch_size=10)\n x, y = next(iter(json_loads(json_dumps(dataloader))))\n assert x.size() == torch.Size([10, 1, 28, 28])\n assert y.size() == torch.Size([10])\n\n\ndef test_type():\n assert json_dumps(torch.optim.Adam) == '{\"__typename__\": \"torch.optim.adam.Adam\"}'\n assert json_loads('{\"__typename__\": \"torch.optim.adam.Adam\"}') == torch.optim.Adam\n assert re.match(r'{\"__typename__\": \"(.*)test_serializer.Foo\"}', json_dumps(Foo))\n\n\nif __name__ == '__main__':\n test_blackbox()\n test_blackbox_module()\n test_dataset()\n test_type()\n"
] | [
[
"torch.nn.functional.softmax",
"torch.max",
"torch.zeros",
"numpy.unique",
"torch.cat",
"torch.utils.data.sampler.SubsetRandomSampler",
"torch.repeat_interleave",
"numpy.argmax",
"torch.no_grad",
"numpy.where",
"torch.where",
"numpy.zeros",
"torch.pow"
],
[
"torch.Size"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cloudhan/jax | [
"9781f365a1c5dbdf57bf78b98831c4390eb9ca5f"
] | [
"jax/interpreters/pxla.py"
] | [
"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Implementation of pmap and related functionality.\"\"\"\n\n# A ShardingSpec describes at a high level how a logical array is sharded across\n# devices (each ShardedDeviceArray has a ShardingSpec, and ShardingSpecs also\n# describe how to shard inputs to a parallel computation). spec_to_indices()\n# encodes exactly how a given ShardingSpec is translated to device buffers, i.e.\n# how the sharded array is \"laid out\" across devices. Given a sequence of\n# devices, we shard the data across the devices in row-major order, with\n# replication treated as an extra inner dimension.\n#\n# For example, given the logical data array [1, 2, 3, 4], if we were to\n# partition this array 4 ways with a replication factor of 2, for a total of 8\n# devices, the data on each device would be: [1, 1], [2, 2], [3, 3], [4, 4].\n#\n# This encoding is assumed by various parts of the system, e.g. generating\n# replica groups for collective operations.\n\nfrom contextlib import contextmanager\nfrom collections import defaultdict, OrderedDict\nimport dataclasses\nfrom functools import partial\nimport itertools as it\nimport operator as op\nimport threading\nfrom typing import (Any, Callable, Dict, List, NamedTuple, Optional,\n Sequence, Set, Tuple, Type, Union, Iterable)\nimport sys\n\nfrom absl import logging\nimport numpy as np\n\nfrom .._src.config import config\nfrom .. import core\nfrom .. import linear_util as lu\nfrom jax._src.abstract_arrays import array_types\nfrom ..core import ConcreteArray, ShapedArray\nfrom jax._src import device_array\nfrom .._src import source_info_util\nfrom .._src.util import (unzip3, prod, safe_map, safe_zip,\n extend_name_stack, wrap_name, assert_unreachable,\n tuple_insert, tuple_delete, distributed_debug_log)\nfrom ..errors import JAXTypeError\nfrom jax._src import dispatch\nfrom jax._src.lib import xla_bridge as xb\nfrom jax._src.lib import xla_client as xc\nfrom jax._src.lib import pmap_lib\nfrom ..tree_util import tree_flatten, tree_map\nfrom . import batching\nfrom . import partial_eval as pe\nfrom . import xla\nfrom . import ad\n\n# Built in Python lists don't support weak refs but subclasses of lists do.\nclass WeakRefList(list):\n pass\n\nif sys.version_info >= (3, 8):\n from functools import cached_property as maybe_cached_property\nelse:\n maybe_cached_property = property\n\nif sys.version_info >= (3, 9):\n OrderedDictType = OrderedDict\nelse:\n OrderedDictType = Dict\n\nxops = xc.ops\n\nunsafe_map, map = map, safe_map # type: ignore\n\nIndex = Union[int, slice, Tuple[Union[int, slice], ...]]\n\nNoSharding = pmap_lib.NoSharding\nChunked = pmap_lib.Chunked\nUnstacked = pmap_lib.Unstacked\n\nShardedAxis = pmap_lib.ShardedAxis\nReplicated = pmap_lib.Replicated\n\n_UNSHARDED_INSTANCE = NoSharding()\nAvalDimSharding = Union[Unstacked, Chunked, NoSharding]\nMeshDimAssignment = Union[ShardedAxis, Replicated]\nShardingSpec = pmap_lib.ShardingSpec\n\n\ndef sharding_spec_mesh_shape(self):\n sharded_axis_sizes = []\n for sharding in self.sharding:\n if isinstance(sharding, NoSharding):\n continue\n elif isinstance(sharding, Unstacked):\n sharded_axis_sizes.append(sharding.size)\n elif isinstance(sharding, Chunked):\n sharded_axis_sizes.extend(sharding.chunks)\n else:\n assert_unreachable(sharding)\n return tuple(sharded_axis_sizes[a.axis] if isinstance(a, ShardedAxis) else a.replicas\n for a in self.mesh_mapping)\n\ndef sharding_spec_sharding_proto(self):\n \"\"\"Converts a ShardingSpec to an OpSharding proto.\n\n See\n https://github.com/tensorflow/tensorflow/blob/master/tensorflow/compiler/xla/xla_data.proto#L601\n for details on the OpSharding proto.\n Unfortunately the semantics are not very well described in the proto spec, but the code here might help:\n https://github.com/tensorflow/tensorflow/blob/master/tensorflow/compiler/xla/experimental/xla_sharding/xla_sharding.py\n \"\"\"\n mesh_shape = self.mesh_shape\n mesh = np.arange(np.prod(mesh_shape)).reshape(mesh_shape)\n\n sharded_axes = {} # maps sharded axis identifiers to mesh axis indices to which they're mapped\n replicated_maxes = [] # lists mesh axis identifiers to replicate over\n for maxis, assignment in enumerate(self.mesh_mapping):\n if isinstance(assignment, Replicated):\n replicated_maxes.append(maxis)\n elif isinstance(assignment, ShardedAxis):\n sharded_axes[assignment.axis] = maxis\n else:\n assert_unreachable(assignment)\n\n proto = xc.OpSharding()\n if len(replicated_maxes) == len(self.mesh_mapping):\n proto.type = xc.OpSharding.Type.REPLICATED\n return proto\n else:\n proto.type = xc.OpSharding.Type.OTHER\n\n mesh_permutation = []\n new_mesh_shape = []\n next_sharded_axis = 0\n for axis, sharding in enumerate(self.sharding):\n if isinstance(sharding, NoSharding):\n new_mesh_shape.append(1) # Add a dummy mesh axis we won't be sharding over\n elif isinstance(sharding, Chunked):\n for nchunks in sharding.chunks:\n maxis = sharded_axes[next_sharded_axis]\n assert mesh_shape[maxis] == nchunks\n mesh_permutation.append(maxis)\n next_sharded_axis += 1\n new_mesh_shape.append(int(np.prod(sharding.chunks)))\n elif isinstance(sharding, Unstacked):\n raise RuntimeError(\"Cannot convert unstacked sharding specs to XLA OpSharding\")\n else:\n assert_unreachable(sharding)\n\n # Create the partial sharding proto if tensor is replicated over some mesh axes\n if replicated_maxes:\n new_mesh_shape.append(-1)\n mesh_permutation.extend(replicated_maxes)\n proto.replicate_on_last_tile_dim = True\n\n proto_mesh = mesh.transpose(mesh_permutation).reshape(new_mesh_shape)\n proto.tile_assignment_dimensions = list(proto_mesh.shape)\n proto.tile_assignment_devices = list(proto_mesh.flat)\n return proto\n\ndef sharding_spec_indices(self, shape: Tuple[int, ...]) -> np.ndarray:\n \"\"\"Returns NumPy-style indices corresponding to a sharding spec.\n\n Args:\n shape: The shape of the logical array being sharded.\n\n Returns:\n An ndarray with the same shape as the logical mesh (as derived form\n `mesh_mapping`). Each entry is a NumPy-style index selecting the subset of\n the data array to be placed on a corresponding device. The indices can be\n ints, slice objects with step=1, or tuples of those.\n \"\"\"\n assert len(shape) == len(self.sharding), (shape, self.sharding)\n\n axis_indices: List[Sequence[Index]] = []\n shard_indices_shape = []\n for dim, sharding in enumerate(self.sharding):\n axis_size = shape[dim]\n if isinstance(sharding, NoSharding):\n axis_indices.append([slice(None)])\n # NOTE: We don't append unsharded dimensions to shard_indices_shape here,\n # because they do not appear in the mesh mapping.\n elif isinstance(sharding, Unstacked):\n assert axis_size == sharding.size, f'{axis_size} != {sharding.size}'\n axis_indices.append(range(axis_size))\n shard_indices_shape.append(axis_size)\n elif isinstance(sharding, Chunked):\n total_chunks = int(np.prod(sharding.chunks))\n shard_size, ragged = divmod(axis_size, total_chunks)\n assert not ragged, (axis_size, total_chunks, dim)\n axis_indices.append([slice(i * shard_size, (i + 1) * shard_size)\n for i in range(total_chunks)])\n shard_indices_shape.extend(sharding.chunks)\n else:\n assert_unreachable(sharding)\n\n # shard_indices is an ndarray representing the sharded axes of the logical array,\n # with each dimension having size equal to the number of shards across the corresponding\n # logical array dimension, and each element containing the multi-dimensional index that\n # is used to extract the corresponding shard of the logical array.\n shard_indices = np.empty([prod(shard_indices_shape)], dtype=np.object_)\n for i, idxs in enumerate(it.product(*axis_indices)):\n shard_indices[i] = idxs\n shard_indices = shard_indices.reshape(shard_indices_shape)\n\n # Ensure that each sharded axis is used exactly once in the mesh mapping\n num_sharded_dim = len(shard_indices_shape)\n sharded_dim_perm = [a.axis for a in self.mesh_mapping if isinstance(a, ShardedAxis)]\n assert (set(sharded_dim_perm) == set(range(num_sharded_dim)) and\n len(sharded_dim_perm) == num_sharded_dim)\n # Replicate/reorder the indices according to the mesh mapping\n replica_sizes = tuple(a.replicas for a in self.mesh_mapping if isinstance(a, Replicated))\n replica_dim, sharded_dim = it.count(0), iter(sharded_dim_perm)\n perm = [next(replica_dim) if isinstance(a, Replicated) else\n len(replica_sizes) + next(sharded_dim)\n for a in self.mesh_mapping]\n return (np.broadcast_to(shard_indices, replica_sizes + shard_indices.shape)\n .transpose(perm))\n\ndef sharding_spec_repr(self):\n return f'ShardingSpec({self.sharding}, {self.mesh_mapping})'\n\n\nShardingSpec.mesh_shape = property(sharding_spec_mesh_shape)\nShardingSpec.sharding_proto = sharding_spec_sharding_proto\nShardingSpec.indices = sharding_spec_indices\n# mypy raises: error: Cannot assign to a method [assignment]\nShardingSpec.__repr__ = sharding_spec_repr # type: ignore\n# Do not pollute the namespace\ndel sharding_spec_mesh_shape, sharding_spec_indices, sharding_spec_repr\n\ndef spec_to_indices(shape: Tuple[int, ...],\n spec: ShardingSpec) -> Tuple[Index, ...]:\n \"\"\"Returns numpy-style indices corresponding to a sharding spec.\n\n Each index describes a shard of the array. The order of the indices is the\n same as the device_buffers of a ShardedDeviceArray (i.e. the data is laid out\n row-major).\n\n Args:\n shape: The shape of the logical array being sharded.\n spec: Describes how the array is sharded and how the shards are assigned to\n the logical mesh.\n\n Returns:\n A tuple of length equal to the size of the mesh (inferred as the product of\n sharded dimension sizes and all replication factors). Each element is an\n int, a slice object with step=1, or a tuple thereof, to be treated as an\n index into the full logical array.\n \"\"\"\n return tuple(spec.indices(shape).flat) # type: ignore\n\n\n### util\n\ndef identity(x): return x\n\ndef _shard_arg(arg, devices, arg_indices):\n \"\"\"Returns a list of size len(devices) containing per-device buffers.\n\n For the C++ pmap path, we fallback to Python (this function) to shard\n arguments that are not supported by the C++ `ShardArg`.\n\n Arrgs:\n arg: The Python argument.\n devices: The list of devices to shard over.\n arg_indices: A list of `len(devices)` indices to use to shard the argument.\n \"\"\"\n if isinstance(arg, ShardedDeviceArray) and arg_indices == arg.indices:\n # The shard_arg_handlers allow an extensible set of types to be sharded, but\n # inline handling for ShardedDeviceArray as a special case for performance\n # NOTE: we compare indices instead of sharding_spec because\n # pmap_benchmark.pmap_shard_args_benchmark indicates this is faster.\n return [\n buf if buf.device() == d else buf.copy_to_device(d)\n for d, buf in zip(devices, arg.device_buffers)\n ]\n else:\n arg = xla.canonicalize_dtype(arg)\n return shard_arg_handlers[type(arg)](arg, devices, arg_indices)\n\n\n\ndef shard_args(devices: Sequence[xb.xla_client.Device],\n indices: Sequence[Sequence[Index]],\n args) -> Sequence[Sequence[xb.xla_client.Buffer]]:\n \"\"\"Shard each argument data array along its leading axis.\n\n Args:\n devices: sequence of Devices mapping replica index to a physical device.\n indices: sequence of the same length as `args` describing how each arg\n should be sharded/replicated across `devices`. Each element in `indices`\n is the same length as `devices`.\n args: a sequence of JaxTypes representing arguments to be sharded according\n to `indices` and placed on `devices`.\n\n Returns:\n A list of length matching args, containing lists of per-device buffers\n for each argument.\n \"\"\"\n return [_shard_arg(arg, devices, indices[a]) for a, arg in enumerate(args)]\n\n\nshard_arg_handlers: Dict[Any, Callable[[Any, Any, Any], Sequence[Any]]] = {}\nshard_arg_handlers[core.Unit] = \\\n lambda x, devices, _: device_put(core.unit, devices, replicate=True)\ndef _shard_array(x, devices, indices):\n return device_put([x[i] for i in indices], devices)\nfor _t in array_types:\n shard_arg_handlers[_t] = _shard_array\n\ndef _shard_device_array(x, devices, indices):\n start_indices, limit_indices, removed_dims = unzip3(\n _as_slice_indices(x, idx) for idx in indices)\n shards = x._multi_slice(start_indices, limit_indices, removed_dims)\n return device_put(shards, devices)\nfor t in device_array.device_array_types:\n shard_arg_handlers[t] = _shard_device_array\n\n\n# NOTE(skye): we could refactor to generate _multi_slice parameters directly\n# from the input ShardingSpec, rather than the indices. However, this would\n# require duplicating the ordering logic of spec_to_indices, which is more\n# subtle and more likely to change than the index logic we have to support here.\ndef _as_slice_indices(arr: device_array.DeviceArrayProtocol, idx: Index) -> Tuple[\n Tuple[int, ...], Tuple[int, ...], Tuple[int, ...]]:\n \"\"\"Returns start_indices, limit_indices, removed_dims\"\"\"\n start_indices = [0] * arr.ndim\n limit_indices = list(arr.shape)\n removed_dims = []\n\n tuple_idx = idx if isinstance(idx, tuple) else (idx,)\n for dim, sub_idx in enumerate(tuple_idx):\n if isinstance(sub_idx, int):\n start_indices[dim] = sub_idx\n limit_indices[dim] = sub_idx + 1\n removed_dims.append(dim)\n elif sub_idx == slice(None):\n continue\n else:\n assert isinstance(sub_idx, slice), sub_idx\n assert isinstance(sub_idx.start, int), sub_idx\n assert isinstance(sub_idx.stop, int), sub_idx\n start_indices[dim] = sub_idx.start\n limit_indices[dim] = sub_idx.stop\n\n return tuple(start_indices), tuple(limit_indices), tuple(removed_dims) # type: ignore\n\n\ndef shard_aval(size, axis: int, aval):\n try:\n return shard_aval_handlers[type(aval)](size, axis, aval)\n except KeyError as err:\n raise TypeError(f\"No shard_aval handler for type: {type(aval)}\") from err\nshard_aval_handlers: Dict[Type[core.AbstractValue], Callable[[int, int, Any], Any]] = {}\nshard_aval_handlers[core.AbstractUnit] = lambda size, axis, x: x\ndef _shard_abstract_array(size, axis: int, x):\n try:\n if x.shape[axis] != size:\n raise ValueError(f\"Axis size {size} does not match dimension {axis} of \"\n f\"shape {x.shape}\")\n except IndexError:\n raise ValueError(\"Cannot split a {x.dim}D value along axis {axis}\") from None\n return x.update(shape=tuple_delete(x.shape, axis))\nshard_aval_handlers[ShapedArray] = _shard_abstract_array\n\nMeshAxisName = Any\n\"\"\"\nArrayMapping specifies how an ndarray should map to mesh axes.\n\nNote that the ordering is crucial for the cases when this mapping is non-injective\n(i.e. when multiple mesh axes map to the same positional axis). Then, the\norder of entries of the mapping determines a major-to-minor order on mesh axes,\naccording to which chunks of the value along the repeated dimension will be assigned.\n\nFor example, consider a mapping {'x': 1, 'y': 1} and a mesh with shape {'x': 2, 'y': 3}.\nThe second dimension of the value would get chunked into 6 pieces, and assigned to the\nmesh in a way that treats 'y' as the fastest changing (minor) dimension. In this case,\nthat would mean that a flat list of chunks would get assigned to a flattened list of\nmesh devices without any modifications. If the mapping was {'y': 1, 'x': 1}, then the\nmesh devices ndarray would have to be transposed before flattening and assignment.\n\"\"\"\nArrayMapping = OrderedDictType[MeshAxisName, int]\n\nAxisResource = Tuple[Optional[Tuple[Any, ...]], ...]\n\ndef array_mapping_to_axis_resources(array_mapping: ArrayMapping) -> AxisResource:\n if not array_mapping:\n return tuple()\n max_index = array_mapping[max(array_mapping, key=array_mapping.get)] # type: ignore\n reverse_map = defaultdict(list)\n for axis, index in array_mapping.items():\n reverse_map[index].append(axis)\n return tuple(\n tuple(reverse_map[i]) if reverse_map[i] else None for i in range(max_index + 1)\n )\n\ndef aval_to_result_handler(\n sharding_spec: Optional[ShardingSpec],\n indices: Optional[Tuple[Index]],\n aval: core.AbstractValue,\n global_aval: Optional[ShapedArray] = None,\n out_axis_resources: Optional[AxisResource] = None,\n global_mesh = None,\n) -> Callable[[List[xb.xla_client.Buffer]], Any]:\n \"\"\"Returns a function for handling the raw buffers of a single output aval.\n\n Args:\n sharding_spec: Indicates how the output is sharded across devices, or None\n for non-array avals.\n indices: The pre-computed result of spec_to_indices, or None for non-array\n avals.\n aval: The output AbstractValue.\n global_aval: Global output AbstractValue. Used for creating GSDAs.\n out_axis_resources: A tuple specifying the sharding of outputs.\n Used for creating GSDAs.\n global_mesh: The global device mesh that generated this output. Used\n for creating GSDAs.\n\n Returns:\n A function for handling the Buffers that will eventually be produced\n for this output. The function will return an object suitable for returning\n to the user, e.g. a ShardedDeviceArray.\n \"\"\"\n try:\n return pxla_result_handlers[type(aval)](sharding_spec, indices, aval,\n global_aval, out_axis_resources, global_mesh)\n except KeyError as err:\n raise TypeError(\"No pxla_result_handler for type: {}\".format(type(aval))\n ) from err\n\nPxlaResultHandler = Callable[..., Callable[[List[xb.xla_client.Buffer]], Any]]\npxla_result_handlers: Dict[Type[core.AbstractValue], PxlaResultHandler] = {}\npxla_result_handlers[core.AbstractUnit] = lambda *_: lambda _: core.unit\n\ndef array_result_handler(sharding_spec, indices, aval: ShapedArray, global_aval,\n out_axis_resources, global_mesh):\n if config.jax_gsda_out:\n return gsda_array_result_handler(global_aval, global_mesh, out_axis_resources)\n else:\n return sda_array_result_handler(sharding_spec, indices, aval)\n\npxla_result_handlers[ShapedArray] = array_result_handler\npxla_result_handlers[ConcreteArray] = array_result_handler\n\ndef sda_array_result_handler(sharding_spec, indices, aval: ShapedArray):\n return lambda bufs: make_sharded_device_array(aval, sharding_spec, bufs,\n indices)\n\ndef gsda_array_result_handler(global_aval, global_mesh, out_axis_resources):\n from ..experimental.gsda import GlobalShardedDeviceArray\n\n return lambda bufs: GlobalShardedDeviceArray(\n global_aval.shape, global_mesh, out_axis_resources, bufs)\n\n### lazy device-memory persistence and result handling\n\n# TODO(jblespiau): Consider removing this option.\n_USE_CPP_SDA = True\n\n\ndef make_sharded_device_array(\n aval: ShapedArray,\n sharding_spec: Optional[ShardingSpec],\n # Any is for JAX extensions implementing their own buffer.\n device_buffers: List[Union[Any, xb.xla_client.Buffer]],\n indices: Optional[Tuple[Index, ...]] = None,\n):\n \"\"\"Returns a ShardedDeviceArray implementation based on arguments.\n\n Returns either a C++ SDA or a Python DeviceArray when the buffers are not\n JAX buffers.\n\n Args:\n aval: The `ShapedArray` for this array.\n sharding_spec: If `None`, assumes a pmap-style ShardedDeviceArrays over the\n first dimension.\n device_buffers: If a list of Jax `Buffer` objects, a C++ SDA will be\n returned (if the version is high enough). Otherwise, a Python object will\n be returned, for JAX extensions not implementing the C++ API.\n indices: For caching purposes, will be computed if `None`.\n \"\"\"\n if sharding_spec is None:\n sharded_aval = aval.update(shape=aval.shape[1:])\n sharding_spec = _pmap_sharding_spec(aval.shape[0], aval.shape[0], 1, None,\n sharded_aval, 0)\n\n if indices is None:\n indices = spec_to_indices(aval.shape, sharding_spec)\n\n if (_USE_CPP_SDA and\n (not device_buffers or\n isinstance(device_buffers[0], xb.xla_client.Buffer))):\n return pmap_lib.ShardedDeviceArray.make(\n aval, sharding_spec, device_buffers,\n indices, aval.weak_type)\n\n return _ShardedDeviceArray(aval, sharding_spec, device_buffers, indices)\n\n\nif _USE_CPP_SDA:\n ShardedDeviceArrayBase = pmap_lib.ShardedDeviceArrayBase # type: ignore\n # We want the C++ SDA to extend the DeviceArrayBase. We want this both to\n # benefit from its methods, and to have isinstance(x, DeviceArray) return true\n ShardedDeviceArrayBase.__bases__ = ((device_array.DeviceArray,) + # type: ignore\n ShardedDeviceArrayBase.__bases__)\n _SDA_BASE_CLASS = pmap_lib.ShardedDeviceArrayBase # type: ignore\nelse:\n _SDA_BASE_CLASS: Type[device_array.DeviceArray] = device_array.DeviceArray # type: ignore\n\n\nclass _ShardedDeviceArray(_SDA_BASE_CLASS): # type: ignore\n \"\"\"A ShardedDeviceArray is an ndarray sharded across devices.\n\n The purpose of a ShardedDeviceArray is to reduce the number of transfers when\n executing replicated computations, by allowing results to persist on the\n devices that produced them. That way dispatching a similarly replicated\n computation that consumes the same sharded memory layout does not incur any\n transfers.\n\n A ShardedDeviceArray represents one logical ndarray value, and simulates the\n behavior of an ndarray so that it can be treated by user code as an ndarray;\n that is, it is only an optimization to reduce transfers.\n\n Attributes:\n aval: A ShapedArray indicating the shape and dtype of this array.\n sharding_spec: describes how this array is sharded across `device_buffers`.\n device_buffers: the buffers containing the data for this array. Each buffer\n is the same shape and on a different device. Buffers are in row-major\n order, with replication treated as an extra innermost dimension.\n indices: the result of spec_to_indices(sharding_spec). Can optionally be\n precomputed for efficiency. A list the same length as\n `device_buffers`. Each index indicates what portion of the full array is\n stored in the corresponding device buffer, i.e. `array[indices[i]] ==\n device_buffers[i].to_py()`.\n \"\"\"\n __slots__ = [\n \"aval\", \"device_buffers\", \"sharding_spec\", \"indices\",\n \"_one_replica_buffer_indices\", \"_npy_value\"\n ]\n\n def __init__(self,\n aval: ShapedArray,\n sharding_spec: ShardingSpec,\n device_buffers: List[xb.xla_client.Buffer],\n indices: Optional[Tuple[Index, ...]] = None):\n super().__init__()\n\n # TODO(skye): assert invariants. Keep performance in mind though.\n if indices is None:\n indices = spec_to_indices(aval.shape, sharding_spec)\n\n self.aval = aval\n self.device_buffers = device_buffers\n self.sharding_spec = sharding_spec\n self.indices = indices\n self._npy_value = None\n self._one_replica_buffer_indices = None\n if config.jax_enable_checks:\n assert type(aval) is ShapedArray\n\n @property\n def shape(self):\n return self.aval.shape\n\n @property\n def dtype(self):\n return self.aval.dtype\n\n @property\n def size(self):\n return prod(self.aval.shape)\n\n @property\n def ndim(self):\n return len(self.aval.shape)\n\n def delete(self):\n if self.device_buffers is None:\n return\n for buf in self.device_buffers:\n buf.delete()\n self.device_buffers = None\n self._npy_value = None\n\n\ndef _sda_one_replica_buffer_indices(self):\n \"\"\"Indices of buffers containing one complete copy of the array data.\"\"\"\n if self._one_replica_buffer_indices is None:\n one_replica_indices = []\n seen_index_hashes = set()\n for i, index in enumerate(self.indices):\n hashed_index = _hashable_index(index)\n if hashed_index not in seen_index_hashes:\n one_replica_indices.append(i)\n seen_index_hashes.add(hashed_index)\n self._one_replica_buffer_indices = one_replica_indices\n return self._one_replica_buffer_indices\n\n\ndef _sda_copy_to_host_async(self):\n for buffer_index in self.one_replica_buffer_indices:\n self.device_buffers[buffer_index].copy_to_host_async()\n\n\ndef _sda_check_if_deleted(self):\n if self.device_buffers is None:\n raise ValueError(\"ShardedDeviceArray has been deleted.\")\n\n\ndef _sda_block_until_ready(self):\n self._check_if_deleted()\n for buf in self.device_buffers:\n buf.block_host_until_ready()\n return self\n\n\ndef _sda_value(self):\n if self._npy_value is None:\n self.copy_to_host_async()\n npy_value = np.empty(self.aval.shape, self.aval.dtype)\n for i in self.one_replica_buffer_indices:\n npy_value[self.indices[i]] = self.device_buffers[i].to_py()\n self._npy_value = npy_value\n return self._npy_value\n\n\ndef _sda__getitem__(self, idx):\n self._check_if_deleted()\n if not isinstance(idx, tuple):\n cidx = (idx,) + (slice(None),) * (len(self.aval.shape) - 1)\n else:\n cidx = idx + (slice(None),) * (len(self.aval.shape) - len(idx))\n if self._npy_value is None:\n try:\n buf_idx = self.indices.index(cidx)\n except ValueError:\n buf_idx = None\n if buf_idx is not None:\n buf = self.device_buffers[buf_idx]\n aval = ShapedArray(buf.xla_shape().dimensions(), self.aval.dtype)\n return device_array.make_device_array(aval, None, buf)\n return super(self.__class__, self).__getitem__(idx)\n\n\ndef _sda__iter__(self):\n if self.ndim == 0:\n raise TypeError(\"iteration over a 0-d array\") # same as numpy error\n else:\n return (self[i] for i in range(self.shape[0]))\n\ndef _sda__reversed__(self):\n if self.ndim == 0:\n raise TypeError(\"iteration over a 0-d array\") # same as numpy error\n else:\n return (self[i] for i in range(self.shape[0] - 1, -1, -1))\n\n\nfor sda in [_ShardedDeviceArray, pmap_lib.ShardedDeviceArray]:\n setattr(sda, \"one_replica_buffer_indices\",\n property(_sda_one_replica_buffer_indices))\n setattr(sda, \"copy_to_host_async\", _sda_copy_to_host_async)\n setattr(sda, \"_check_if_deleted\", _sda_check_if_deleted)\n setattr(sda, \"block_until_ready\", _sda_block_until_ready)\n setattr(sda, \"_value\", property(_sda_value))\n setattr(sda, \"__getitem__\", _sda__getitem__)\n setattr(sda, \"__iter__\", _sda__iter__)\n setattr(sda, \"__reversed__\", _sda__reversed__)\n\ndel (_sda_one_replica_buffer_indices, _sda_copy_to_host_async,\n _sda_check_if_deleted, _sda_block_until_ready, _sda_value, _sda__getitem__)\n\n\nShardedDeviceArray: Type[object]\nif _USE_CPP_SDA:\n ShardedDeviceArray = pmap_lib.ShardedDeviceArrayBase\nelse:\n ShardedDeviceArray = _ShardedDeviceArray\n\n\n\ndef _hashable_index(idx):\n return tree_map(lambda x: (x.start, x.stop) if type(x) == slice else x,\n idx)\n\n# The fast path is handled directly in shard_args().\n# TODO(skye): is there a simpler way to rewrite this using sharding_spec?\ndef _shard_sharded_device_array_slow_path(x, devices, indices):\n candidates = defaultdict(list)\n for buf, idx in safe_zip(x.device_buffers, x.indices):\n candidates[_hashable_index(idx)].append(buf)\n\n bufs = []\n for idx, device in safe_zip(indices, devices):\n # Look up all buffers that contain the correct slice of the logical array.\n candidates_list = candidates[_hashable_index(idx)]\n if not candidates_list:\n # This array isn't sharded correctly. Reshard it via host roundtrip.\n # TODO(skye): more efficient reshard?\n return shard_arg_handlers[type(x._value)](x._value, devices, indices)\n # Try to find a candidate buffer already on the correct device,\n # otherwise copy one of them.\n for buf in candidates_list:\n if buf.device() == device:\n bufs.append(buf)\n break\n else:\n bufs.append(buf.copy_to_device(device))\n return bufs\n\n\ndef _sharded_device_array_constant_handler(c, val, canonicalize_types=True):\n return xla.pyval_to_ir_constants(c, np.asarray(val),\n canonicalize_types=canonicalize_types)\n\n\ndef _register_handlers_for_sharded_device_array(sda):\n shard_arg_handlers[sda] = _shard_sharded_device_array_slow_path\n xla.register_constant_handler(sda, _sharded_device_array_constant_handler)\n\n core.pytype_aval_mappings[sda] = ConcreteArray\n dispatch.device_put_handlers[sda] = dispatch._device_put_array\n xla.pytype_aval_mappings[sda] = op.attrgetter(\"aval\")\n xla.canonicalize_dtype_handlers[sda] = identity\n\n_register_handlers_for_sharded_device_array(_ShardedDeviceArray)\n_register_handlers_for_sharded_device_array(pmap_lib.ShardedDeviceArray)\n\n### the xla_pmap primitive and its rules are comparable to xla_call in xla.py\n\ndef xla_pmap_impl(fun: lu.WrappedFun, *args,\n backend: Optional[str],\n axis_name: core.AxisName,\n axis_size: int,\n global_axis_size: Optional[int],\n devices: Optional[Sequence[Any]],\n name: str,\n in_axes: Sequence[Optional[int]],\n out_axes_thunk: Callable[[], Sequence[Optional[int]]],\n donated_invars: Sequence[bool],\n global_arg_shapes: Sequence[Optional[Tuple[int, ...]]]):\n abstract_args = unsafe_map(xla.abstractify, args)\n compiled_fun, fingerprint = parallel_callable(\n fun, backend, axis_name, axis_size, global_axis_size, devices, name,\n in_axes, out_axes_thunk, donated_invars, global_arg_shapes,\n *abstract_args)\n\n # Don't re-abstractify args unless logging is enabled for performance.\n if config.jax_distributed_debug:\n distributed_debug_log((\"Running pmapped function\", name),\n (\"python function\", fun.f),\n (\"devices\", devices),\n (\"abstract args\", map(xla.abstractify, args)),\n (\"fingerprint\", fingerprint))\n return compiled_fun(*args)\n\n\[email protected]\ndef parallel_callable(fun: lu.WrappedFun,\n backend_name: Optional[str],\n axis_name: core.AxisName,\n axis_size: int,\n global_axis_size: Optional[int],\n devices: Optional[Sequence[Any]],\n name: str,\n in_axes: Sequence[Optional[int]],\n out_axes_thunk: Callable[[], Sequence[Optional[int]]],\n donated_invars: Sequence[bool],\n global_arg_shapes: Sequence[Optional[Tuple[int, ...]]],\n *avals):\n pmap_computation = lower_parallel_callable(\n fun, backend_name, axis_name, axis_size, global_axis_size, devices, name,\n in_axes, out_axes_thunk, donated_invars, global_arg_shapes, avals)\n pmap_executable = pmap_computation.compile()\n return WeakRefList([pmap_executable.unsafe_call, pmap_executable.fingerprint])\n\n\[email protected](frozen=True)\nclass ParallelCallableInfo:\n backend: Any # TODO(frostig): really xla.Backend, fix xla_bridge annotations\n axis_name: core.AxisName\n axis_size: int\n global_axis_size: Optional[int]\n devices: Optional[Sequence[xla.Device]]\n in_axes: Iterable[Optional[int]]\n out_axes_thunk: Callable[[], Sequence[Optional[int]]]\n avals: Sequence[core.AbstractValue]\n\n @maybe_cached_property\n def local_devices(self):\n if self.devices:\n out = [d for d in self.devices\n if d.process_index == xb.process_index(self.backend)]\n assert len(out) > 0\n else:\n out = None # type: ignore\n return out\n\n @maybe_cached_property\n def out_axes(self):\n return self.out_axes_thunk()\n\n\nclass ShardInfo(NamedTuple):\n sharded_avals: Sequence[core.AbstractValue]\n out_sharded_avals: Sequence[core.AbstractValue]\n global_sharded_avals: Sequence[core.AbstractValue]\n num_local_shards: int\n num_global_shards: int\n\n\nclass ReplicaInfo(NamedTuple):\n jaxpr_replicas: int\n num_local_replicas: int\n num_global_replicas: int\n\n\ndef find_replicas(jaxpr, axis_size, global_axis_size):\n # TODO(skyewm): replace this with a chain of pmaps and/or sharded_jits\n jaxpr_replicas = dispatch.jaxpr_replicas(jaxpr)\n num_local_replicas = axis_size * jaxpr_replicas\n num_global_replicas = global_axis_size * jaxpr_replicas\n return ReplicaInfo(jaxpr_replicas, num_local_replicas, num_global_replicas)\n\n\ndef tuple_args(shards: ShardInfo):\n # tuplify long arg lists for TPU\n return len(shards.global_sharded_avals) > 100\n\n\ndef stage_parallel_callable(\n pci: ParallelCallableInfo,\n fun: lu.WrappedFun,\n global_arg_shapes: Sequence[Optional[Tuple[int, ...]]]):\n sharded_avals = tuple(\n shard_aval(pci.axis_size, axis, aval) if axis is not None else aval\n for axis, aval in safe_zip(pci.in_axes, pci.avals))\n if any(s is not None for s in global_arg_shapes):\n # TODO(skye): we could take this branch unconditionally if we handled\n # grad of global_arg_shapes correctly.\n global_sharded_avals = [\n aval.update(shape=shape) if shape is not None else aval\n for shape, aval in safe_zip(global_arg_shapes, sharded_avals)]\n else:\n global_sharded_avals = sharded_avals # type: ignore\n\n with core.extend_axis_env(pci.axis_name, pci.global_axis_size, None): # type: ignore\n jaxpr, out_sharded_avals, consts = pe.trace_to_jaxpr_final(\n fun, global_sharded_avals, pe.debug_info_final(fun, \"pmap\"))\n jaxpr = dispatch.apply_outfeed_rewriter(jaxpr)\n\n assert len(out_sharded_avals) == len(pci.out_axes), (\n len(out_sharded_avals), len(pci.out_axes))\n\n # TODO(skye,mattjj): allow more collectives on multi-host as we test them, but\n # for now raise an error\n if pci.devices is not None:\n is_multi_host_pmap = len(pci.local_devices) != len(pci.devices)\n else:\n is_multi_host_pmap = xb.process_count(pci.backend) > 1\n if is_multi_host_pmap:\n check_multihost_collective_allowlist(jaxpr)\n\n replicas = find_replicas(jaxpr, pci.axis_size, pci.global_axis_size)\n parts = find_partitions(jaxpr)\n\n num_local_shards = replicas.num_local_replicas * parts.local_num_partitions\n num_global_shards = replicas.num_global_replicas * parts.num_partitions\n\n shards = ShardInfo(\n sharded_avals, out_sharded_avals, global_sharded_avals,\n num_local_shards, num_global_shards)\n\n return jaxpr, consts, replicas, parts, shards\n\n\ndef lower_parallel_callable(\n fun: lu.WrappedFun,\n backend_name: Optional[str],\n axis_name: core.AxisName,\n axis_size: int,\n global_axis_size: Optional[int],\n devices: Optional[Sequence[xla.Device]],\n name: str,\n in_axes: Iterable[Optional[int]],\n out_axes_thunk: Callable[[], Sequence[Optional[int]]],\n donated_invars: Sequence[bool],\n global_arg_shapes: Sequence[Optional[Tuple[int, ...]]],\n avals: Sequence[core.AbstractValue]):\n if devices is not None and len(devices) == 0:\n raise ValueError(\"'devices' argument to pmap must be non-empty, or None.\")\n\n # Determine global_axis_size for use in AxisEnv.\n # TODO(mattjj,skyewm): revive this check (inner_pmap always False now)\n # if xb.process_count() > 1 and global_axis_size is None and inner_pmap:\n # raise ValueError(\"'axis_size' must be specified for nested multi-host pmaps\")\n if (xb.process_count() == 1 and global_axis_size is not None and\n global_axis_size != axis_size):\n raise ValueError(\n f\"Specified axis_size {global_axis_size} doesn't match received \"\n f\"axis_size {axis_size}.\")\n\n if devices is not None and backend_name is None:\n backend = xb.get_device_backend(devices[0])\n else:\n backend = xb.get_backend(backend_name)\n\n must_run_on_all_devices = False\n no_nested_sharding = False\n if global_axis_size is None:\n if xb.process_count(backend) == 1:\n global_axis_size = axis_size\n elif devices:\n # This allows each host in a multi-host pmap to run on a different number\n # of devices, but precludes nested sharding (i.e. inner pmaps or\n # sharded_jits).\n global_axis_size = len(devices)\n no_nested_sharding = True\n else:\n # This assumes all hosts run on the same number of devices. We make sure\n # this assumption is true by requiring that the pmap is run on all devices\n # (and making the further assumption that each host has the same number of\n # devices). Nested sharding is ok in this case.\n global_axis_size = axis_size * xb.process_count(backend)\n assert all(\n len(xb.local_devices(process_index, backend)) == xb.local_device_count(backend)\n for process_index in range(xb.process_count(backend)))\n must_run_on_all_devices = True\n\n pci = ParallelCallableInfo(\n backend, axis_name, axis_size, global_axis_size, devices, in_axes,\n out_axes_thunk, avals)\n jaxpr, consts, replicas, parts, shards = stage_parallel_callable(\n pci, fun, global_arg_shapes)\n\n if logging.vlog_is_on(2):\n logging.vlog(2, \"sharded_avals: %s\", shards.sharded_avals)\n logging.vlog(2, \"global_sharded_avals: %s\", shards.global_sharded_avals)\n logging.vlog(2, \"num_replicas: %d num_local_replicas: %d\",\n replicas.num_global_replicas, replicas.num_local_replicas)\n logging.vlog(2, \"num_partitions: %d local_num_partitions: %d\",\n parts.num_partitions, parts.local_num_partitions)\n logging.vlog(2, \"arg_parts: %s\", parts.arg_parts)\n logging.vlog(2, \"local_arg_parts: %s\", parts.local_arg_parts)\n logging.vlog(2, \"out_parts: %s\", parts.out_parts)\n logging.vlog(2, \"local_out_parts: %s\", parts.local_out_parts)\n logging.vlog(2, \"devices: %s\", devices)\n logging.vlog(2, \"local_devices: %s\", pci.local_devices)\n\n if (xb.process_count(backend) > 1 and must_run_on_all_devices and\n shards.num_local_shards != xb.local_device_count(backend)):\n if shards.num_local_shards == axis_size:\n raise ValueError(\n f\"On multi-host platforms, the input to pmapped functions must have \"\n f\"leading axis size equal to the number of local devices if no \"\n f\"`devices` argument is specified. Got axis_size={axis_size}, \"\n f\"num_local_devices={xb.local_device_count(backend)}\")\n else:\n raise ValueError(\n f\"On multi-host platforms, pmapped functions must run across all \"\n f\"devices, i.e. num_replicas * num_partitions should equal the \"\n f\"number of local devices. Got \"\n f\"num_replicas={replicas.num_local_replicas}, \"\n f\"num_partitions={parts.num_partitions}, and \"\n f\"num_local_devices={xb.local_device_count(backend)}\")\n\n if no_nested_sharding and (\n replicas.jaxpr_replicas > 1 or parts.num_partitions > 1):\n raise ValueError(\n f\"On multi-host platforms, pmapped functions that both have `devices` \"\n f\"specified and contain an inner_pmap or sharded_jit must specify an \"\n f\"`axis_size` (or remove the `devices` argument). Got nested_replicas=\"\n f\"{replicas.jaxpr_replicas} and nested_partitions={parts.num_partitions}\")\n\n log_priority = logging.WARNING if config.jax_log_compiles else logging.DEBUG\n logging.log(log_priority,\n \"Compiling %s (%d) for %d devices with args %s. (num_replicas=%d\"\n \" num_partitions=%d)\", fun.__name__, id(fun),\n shards.num_global_shards, avals, replicas.num_global_replicas,\n parts.num_partitions)\n\n axis_env = xla.AxisEnv(\n replicas.num_global_replicas, (axis_name,), (global_axis_size,))\n\n c = xc.XlaBuilder(\"pmap_{}\".format(fun.__name__))\n xla_consts = map(partial(xla.pyval_to_ir_constant, c), consts)\n replicated_args = [axis is None for axis in in_axes]\n xla_args, donated_invars = xla._xla_callable_args(\n c, shards.global_sharded_avals, tuple_args(shards),\n replicated=replicated_args,\n partitions=parts.arg_parts,\n donated_invars=donated_invars)\n with maybe_extend_axis_env(axis_name, global_axis_size, None): # type: ignore\n ctx = xla.TranslationContext(c, backend.platform, axis_env,\n extend_name_stack(wrap_name(name, 'pmap')))\n out_nodes = xla.jaxpr_subcomp(ctx, jaxpr, xla_consts, *xla_args)\n build_out_tuple = partial(xops.Tuple, c, out_nodes)\n if parts.out_parts is not None:\n out_tuple = xb.with_sharding(c, parts.out_parts, build_out_tuple)\n else:\n out_tuple = build_out_tuple()\n\n if backend.platform in (\"gpu\", \"tpu\"):\n donated_invars = xla.set_up_aliases(c, xla_args, c.GetShape(out_tuple),\n donated_invars, tuple_args(shards))\n built = c.Build(out_tuple)\n\n return PmapComputation(built, pci, replicas, parts, shards)\n\n\nclass PmapComputation:\n def __init__(self, hlo, *compile_args):\n self._executable = None\n self.hlo = hlo\n self.compile_args = compile_args\n\n def compile(self):\n if self._executable is None:\n self._executable = PmapExecutable.from_hlo(self.hlo, *self.compile_args)\n return self._executable\n\n\nclass PmapExecutable:\n __slots__ = ['xla_executable', 'unsafe_call', 'fingerprint', 'in_avals']\n\n def __init__(self, xla_executable, unsafe_call, fingerprint, in_avals):\n self.xla_executable = xla_executable\n self.unsafe_call = unsafe_call\n self.fingerprint = fingerprint\n self.in_avals = in_avals\n\n @staticmethod\n def from_hlo(xla_computation,\n pci: ParallelCallableInfo,\n replicas: ReplicaInfo,\n parts: 'PartitionInfo',\n shards: ShardInfo):\n devices = pci.devices\n if devices is None:\n if shards.num_global_shards > xb.device_count(pci.backend):\n msg = (\"compiling computation that requires {} logical devices, but only {} XLA \"\n \"devices are available (num_replicas={}, num_partitions={})\")\n raise ValueError(msg.format(shards.num_global_shards,\n xb.device_count(pci.backend),\n replicas.num_global_replicas,\n parts.num_partitions))\n # On a single host, we use the platform's default device assignment to\n # potentially take advantage of device locality. On multiple hosts, the\n # default device assignment may interleave different hosts' replicas,\n # violating pmap's semantics where data is sharded across replicas in\n # row-major order. Instead, manually create a device assignment that ensures\n # each host is responsible for a continguous set of replicas.\n if shards.num_global_shards > shards.num_local_shards:\n # TODO(skye): use a locality-aware assignment that satisfies the above\n # constraint.\n devices = [d for process_index in range(xb.process_count(pci.backend))\n for d in xb.local_devices(process_index, pci.backend)]\n else:\n devices = xb.get_backend(pci.backend).get_default_device_assignment(\n replicas.num_global_replicas, parts.num_partitions)\n else:\n if shards.num_local_shards != len(pci.local_devices):\n local_devices_str = \", \".join(map(str, pci.local_devices))\n if shards.num_local_shards == pci.axis_size:\n raise ValueError(\n f\"Leading axis size of input to pmapped function must equal the \"\n f\"number of local devices passed to pmap. Got axis_size=\"\n f\"{pci.axis_size}, num_local_devices={len(pci.local_devices)}.\\n\"\n f\"(Local devices available to pmap: {local_devices_str})\")\n else:\n raise ValueError(\n f\"pmapped function requires {shards.num_local_shards} local \"\n f\"devices to run due to nested pmapped or other parallel \"\n f\"functions, but only {len(pci.local_devices)} are available.\\n\"\n f\"(outer axis size: {pci.axis_size}, local devices available to \"\n f\"pmap: {local_devices_str})\")\n if shards.num_global_shards != len(devices):\n raise ValueError(\"compiling computation that creates %s shards, \"\n \"but %s devices were specified\" %\n (shards.num_global_shards, len(devices)))\n\n # 'devices' may be 1D or 2D at this point (e.g.\n # get_default_device_assignment() returns 2D assignment, caller may have\n # provided 1D list of devices).\n device_assignment = tree_map(lambda d: d.id, devices)\n # Convert to 2D in case it's 1D and we have > 1 partitions.\n device_assignment = np.array(device_assignment).reshape(\n (replicas.num_global_replicas, parts.num_partitions))\n # TODO(b/162356737): Enabling SPMD partitioning causes issues with some\n # non-partitioned workloads, so disable unless needed.\n use_spmd_partitioning = parts.num_partitions > 1\n compile_options = xb.get_compile_options(\n num_replicas=replicas.num_global_replicas,\n num_partitions=parts.num_partitions,\n device_assignment=device_assignment,\n use_spmd_partitioning=use_spmd_partitioning,\n )\n compile_options.parameter_is_tupled_arguments = tuple_args(shards)\n\n local_arg_parts_ = parts.local_arg_parts or [None] * len(pci.avals)\n input_sharding_specs = [\n _pmap_sharding_spec(replicas.num_local_replicas, pci.axis_size,\n parts.local_num_partitions, arg_parts, aval, in_axis)\n if aval is not core.abstract_unit else None\n for aval, arg_parts, in_axis in safe_zip(\n shards.sharded_avals, local_arg_parts_, pci.in_axes)]\n input_indices = [spec_to_indices(aval.shape, spec)\n if spec is not None else None\n for aval, spec in safe_zip(pci.avals, input_sharding_specs)]\n nouts = len(shards.out_sharded_avals)\n\n out_parts, local_out_parts = parts.out_parts, parts.local_out_parts\n if parts.out_parts is None:\n out_parts = (None,) * nouts\n if parts.local_out_parts is None:\n local_out_parts = (None,) * nouts\n\n local_out_avals = [\n get_local_aval(aval, parts, lparts)\n for aval, parts, lparts\n in safe_zip(shards.out_sharded_avals, out_parts, local_out_parts)]\n local_unmapped_avals = [\n core.unmapped_aval(pci.axis_size, pci.axis_name, out_axis, aval)\n if out_axis is not None else aval\n for aval, out_axis in safe_zip(local_out_avals, pci.out_axes)]\n\n out_specs = [\n _pmap_sharding_spec(replicas.num_local_replicas, pci.axis_size,\n parts.local_num_partitions, out_parts, aval, out_axis)\n if aval is not core.abstract_unit else None\n for out_parts, aval, out_axis in safe_zip(\n local_out_parts, local_out_avals, pci.out_axes)]\n handle_outs = avals_to_results_handler(\n replicas.num_local_replicas, parts.local_num_partitions, out_specs,\n local_unmapped_avals)\n\n if hasattr(pci.backend, \"compile_replicated\"):\n execute_fun = pci.backend.compile_replicated(\n xla_computation, compile_options, input_indices, input_sharding_specs,\n handle_outs)\n # TODO(frostig): need `compile_replicated` to give us the XLA executable\n return PmapExecutable(None, execute_fun, None, pci.avals)\n\n compiled = dispatch.compile_or_get_cached(\n pci.backend, xla_computation, compile_options)\n handle_args = InputsHandler(\n compiled.local_devices(), input_sharding_specs, input_indices)\n execute_fun = partial(\n execute_replicated, compiled, pci.backend, handle_args, handle_outs)\n fingerprint = getattr(compiled, \"fingerprint\", None)\n\n return PmapExecutable(compiled, execute_fun, fingerprint, pci.avals)\n\n def call(self, *args):\n # TODO(frostig): do we need to check sharding and sharded avals?\n arg_avals = map(xla.abstractify, args)\n dispatch.check_arg_avals_for_call(self.in_avals, arg_avals)\n return self.unsafe_call(*args)\n\n\nmulti_host_supported_collectives: Set[core.Primitive] = set()\n\n\ndef check_multihost_collective_allowlist(jaxpr):\n used_collectives = set(xla.jaxpr_collectives(jaxpr))\n if not used_collectives.issubset(multi_host_supported_collectives):\n bad_collectives = used_collectives - multi_host_supported_collectives\n msg = \"using collectives that aren't supported for multi-host: {}\"\n raise TypeError(msg.format(\", \".join(map(str, bad_collectives))))\n\n\nPartitionsOrReplicated = Optional[Tuple[int, ...]]\n\nclass PartitionInfo(NamedTuple):\n arg_parts: Optional[Tuple[PartitionsOrReplicated, ...]]\n out_parts: Optional[Tuple[PartitionsOrReplicated, ...]]\n num_partitions: int\n local_arg_parts: Optional[Tuple[PartitionsOrReplicated, ...]]\n local_out_parts: Optional[Tuple[PartitionsOrReplicated, ...]]\n local_num_partitions: Optional[int]\n\ndef _find_partitions(jaxpr):\n \"\"\"Returns (in_partitions, out_partitions, num_partitions, local_in_parts,\n local_out_parts, local_num_partitions).\n \"\"\"\n for eqn in jaxpr.eqns:\n if eqn.primitive.name == \"sharded_call\":\n if len(jaxpr.eqns) > 1:\n raise NotImplementedError(\n \"pmap of sharded_jit + non-sharded operations not yet implemented.\")\n num_partitions = reconcile_num_partitions(eqn.params[\"call_jaxpr\"],\n eqn.params[\"nparts\"])\n return (eqn.params[\"in_parts\"],\n eqn.params[\"out_parts_thunk\"](),\n num_partitions,\n eqn.params[\"local_in_parts\"],\n eqn.params[\"local_out_parts_thunk\"](),\n eqn.params[\"local_nparts\"])\n return None, None, 1, None, None, None\n\ndef find_partitions(jaxpr) -> PartitionInfo:\n (arg_parts, out_parts, num_partitions, local_arg_parts, local_out_parts,\n local_num_partitions) = _find_partitions(jaxpr)\n\n if local_num_partitions is None:\n local_num_partitions = num_partitions\n if local_arg_parts is None:\n local_arg_parts = arg_parts\n if local_out_parts is None:\n local_out_parts = out_parts\n\n return PartitionInfo(arg_parts, out_parts, num_partitions,\n local_arg_parts, local_out_parts, local_num_partitions)\n\n\ndef reconcile_num_partitions(jaxpr, outer_num_parts: Optional[int]):\n \"\"\"Returns the total number of partitions to use.\n\n Validates that any inner partitioning matches outer_num_parts if provided, and\n returns the number of partitions to use based on outer_num_parts and any inner\n partitioning.\n \"\"\"\n inner_num_parts = _inner_partitions(jaxpr, outer_num_parts)\n if outer_num_parts is None and inner_num_parts is None:\n # No partitions specified anywhere, everything is replicated.\n return 1\n if outer_num_parts is None:\n return inner_num_parts\n return outer_num_parts\n\n\ndef _inner_partitions(jaxpr, expected_num_parts: Optional[int]):\n \"\"\"Returns the total number of partitions from PartitionSpecs inside `jaxpr`.\n\n Also validates that this number matches `expected_num_parts` if provided.\n \"\"\"\n for eqn in jaxpr.eqns:\n if eqn.primitive.name in [\"sharding_constraint\", \"infeed\"]:\n parts = eqn.params[\"partitions\"]\n nparts = get_num_partitions(parts)\n if expected_num_parts is None:\n expected_num_parts = nparts\n elif nparts is not None and nparts != expected_num_parts:\n # TODO(skye): raise this error as we trace the jaxpr\n raise ValueError(\n f\"with_sharding_constraint with partitions={parts} \"\n f\"(total partitions: {nparts}) doesn't match expected number of \"\n f\"partitions: {expected_num_parts}. If these partitions look \"\n f\"right, check outer sharded_jit and/or other \"\n f\"with_sharding_constraint calls.\")\n else:\n for subjaxpr in core.jaxprs_in_params(eqn.params):\n expected_num_parts = _inner_partitions(subjaxpr, expected_num_parts)\n return expected_num_parts\n\n\ndef get_num_partitions(*partitions):\n partition_specs = tree_flatten(partitions)[0]\n if len(partition_specs) == 0:\n # Everything is specified as replicated (all Nones).\n return None\n num_partitions_set = {np.prod(spec) for spec in partition_specs}\n if len(num_partitions_set) > 1:\n raise ValueError(\n f\"All partition specs must use the same number of total partitions, \"\n f\"got {partitions}, with distinct number of partitions \"\n f\"{num_partitions_set} (the total number of partitions is the product \"\n f\"of a partition spec)\")\n assert len(num_partitions_set) == 1\n return num_partitions_set.pop()\n\n\ndef get_global_aval(local_aval, global_parts: PartitionsOrReplicated,\n local_parts: PartitionsOrReplicated):\n if local_aval is core.abstract_unit:\n return local_aval\n if global_parts is None:\n return local_aval\n assert local_parts is not None\n global_shape = [dim * _safe_div(ngparts, nlparts)\n for dim, ngparts, nlparts\n in safe_zip(local_aval.shape, global_parts, local_parts)]\n return local_aval.update(shape=global_shape)\n\n\ndef get_local_aval(global_aval, global_parts: PartitionsOrReplicated,\n local_parts: PartitionsOrReplicated):\n if global_aval is core.abstract_unit:\n return global_aval\n if global_parts is None:\n return global_aval\n assert local_parts is not None\n local_shape = [_safe_div(dim, _safe_div(ngparts, nlparts))\n for dim, ngparts, nlparts\n in safe_zip(global_aval.shape, global_parts, local_parts)]\n return global_aval.update(shape=local_shape)\n\n\ndef _safe_div(x, y):\n result, ragged = divmod(x, y)\n assert not ragged, f\"{x} % {y} != 0\"\n return result\n\n\nclass InputsHandler:\n __slots__ = (\"handler\", \"local_devices\", \"sharding_specs\", \"input_indices\")\n\n def __init__(self, local_devices, sharding_specs, input_indices):\n self.handler = partial(shard_args, local_devices, input_indices)\n self.local_devices = local_devices\n self.sharding_specs = sharding_specs\n self.input_indices = input_indices\n\n def __call__(self, input_buffers):\n return self.handler(input_buffers)\n\n\nclass ResultsHandler:\n __slots__ = (\"handlers\", \"out_specs\", \"out_indices\", \"unmapped_local_out_avals\")\n\n def __init__(self, handlers, out_specs, out_indices, unmapped_local_out_avals):\n self.out_specs = out_specs\n self.out_indices = out_indices\n self.handlers = handlers\n self.unmapped_local_out_avals = unmapped_local_out_avals\n\n def __call__(self, out_bufs):\n return [h(bufs) for h, bufs in safe_zip(self.handlers, out_bufs)]\n\n\ndef avals_to_results_handler(\n nrep,\n npart,\n out_specs,\n unmapped_local_out_avals,\n global_out_avals: Optional[Sequence[ShapedArray]] = None,\n out_axis_resources: Optional[Sequence[AxisResource]] = None,\n global_mesh=None):\n out_indices = [spec_to_indices(aval.shape, spec)\n if aval is not core.abstract_unit else None\n for aval, spec in safe_zip(unmapped_local_out_avals, out_specs)] # pytype: disable=attribute-error\n if global_out_avals and out_axis_resources and global_mesh:\n handlers = [\n aval_to_result_handler(spec, idcs, aval, global_aval, out_axis, global_mesh)\n for spec, idcs, aval, global_aval, out_axis in safe_zip(\n out_specs, out_indices, unmapped_local_out_avals,\n global_out_avals, out_axis_resources)\n ]\n else:\n handlers = [\n aval_to_result_handler(spec, idcs, aval)\n for spec, idcs, aval, in safe_zip(out_specs, out_indices,\n unmapped_local_out_avals)\n ]\n\n return ResultsHandler(handlers, out_specs, out_indices, unmapped_local_out_avals)\n\ndef replicate(val, axis_size, nrep, devices=None, backend=None, in_axis=0):\n \"\"\"Replicates ``val`` across multiple devices.\n\n Args:\n val: the value to be replicated.\n axis_size: the length of the output, i.e. the logical number of replicas to\n create. Usually equal to `nrep`, but in the case of nested pmaps, `nrep` may\n be a multiple of `axis_size`.\n nrep: the number of replicas to create. If ``devices`` is set, must be equal\n to ``len(devices)``.\n devices: the devices to replicate across. If None, ``nrep`` will be used to\n generate a default device assignment.\n backend: string specifying which backend to use.\n in_axis: axis along which the value is to be replciated.\n\n Returns:\n A ShardedDeviceArray of length `axis_size` where each shard is equal to\n ``val``.\n \"\"\"\n device_count = (len(devices) if devices else xb.local_device_count(backend))\n if nrep > device_count:\n msg = (\"Cannot replicate across %d replicas because only %d local devices \"\n \"are available.\" % (nrep, device_count))\n if devices:\n msg += (\" (local devices = %s)\"\n % \", \".join(map(str, devices)) if devices else str(None))\n raise ValueError(msg)\n\n if devices is None:\n assert nrep is not None\n # TODO(skye): use different device assignment on multihost\n devices = xb.get_backend(backend).get_default_device_assignment(nrep)\n assert nrep == len(devices)\n\n aval = xla.abstractify(val) # type: ShapedArray\n if in_axis is not None:\n replicated_aval = aval.update(shape=(axis_size,) + aval.shape)\n else:\n replicated_aval = aval\n # TODO(skye): figure out how partitioning should work here\n sharding_spec = _pmap_sharding_spec(nrep, axis_size, 1, None, aval, in_axis)\n device_buffers = device_put(val, devices, replicate=True)\n return make_sharded_device_array(replicated_aval, sharding_spec,\n device_buffers)\n\n\ndef _pmap_sharding_spec(nrep, axis_size, npart, parts, sharded_aval,\n map_axis: Optional[int]) -> ShardingSpec:\n \"\"\"Sharding spec for arguments or results of a pmap.\n Args:\n nrep: number of local XLA replicas (product of local axis sizes)\n axis_size: local axis size for outer pmap\n npart: total number of XLA partitions (required by sharded_jit calls)\n parts: the partitioning of the value or None\n sharded_aval: the aval of the value inside the outer pmap, an instance of\n a ShapedArray.\n map_axis: the axis along which the value is mapped in the outer pmap\n Returns:\n A ShardingSpec.\n \"\"\"\n assert isinstance(sharded_aval, ShapedArray), sharded_aval\n replication_factor, ragged = divmod(nrep, axis_size)\n assert not ragged\n # get the sharding spec from inner sharded_jits as if we weren't in a pmap\n pspec = partitioned_sharding_spec(npart, parts, sharded_aval)\n maybe_replicate = () if replication_factor == 1 else (Replicated(replication_factor),)\n if map_axis is not None:\n sharded_in_axis = sum(not isinstance(s, NoSharding) for s in pspec.sharding[:map_axis])\n def shift_sharded_axis(a: MeshDimAssignment):\n if isinstance(a, ShardedAxis) and a.axis >= sharded_in_axis:\n return ShardedAxis(a.axis + 1)\n return a\n # replication_factor represents the product of inner pmaps, so it goes\n # after the outer pmapped axis at index 0\n return ShardingSpec(\n sharding=tuple_insert(pspec.sharding, map_axis, Unstacked(axis_size)),\n mesh_mapping=it.chain([ShardedAxis(sharded_in_axis)],\n maybe_replicate,\n map(shift_sharded_axis, pspec.mesh_mapping)))\n else:\n return ShardingSpec(\n sharding=pspec.sharding,\n mesh_mapping=(Replicated(axis_size),) + maybe_replicate + pspec.mesh_mapping)\n\ndef partitioned_sharding_spec(num_partitions: int,\n partitions: Optional[Sequence[int]],\n aval) -> ShardingSpec:\n if partitions is None:\n maybe_replicate = () if num_partitions == 1 else (Replicated(num_partitions),)\n return ShardingSpec(\n sharding=[_UNSHARDED_INSTANCE] * len(aval.shape),\n mesh_mapping=maybe_replicate)\n else:\n assert len(partitions) == len(aval.shape)\n return ShardingSpec(\n # Chunked expects a list of integers\n sharding=map(Chunked, [[x] for x in partitions]),\n mesh_mapping=map(ShardedAxis, range(len(partitions))))\n\n\ndef execute_replicated(compiled, backend, in_handler, out_handler, *args):\n input_bufs = in_handler(args)\n out_bufs = compiled.execute_sharded_on_local_devices(input_bufs)\n if dispatch.needs_check_special():\n for bufs in out_bufs:\n dispatch.check_special(\"parallel computation\", bufs)\n return out_handler(out_bufs)\n\n\nxla_pmap_p = core.MapPrimitive('xla_pmap')\nxla_pmap = xla_pmap_p.bind\nxla_pmap_p.def_impl(xla_pmap_impl)\n\n# Set param update handlers to update `donated_invars` just like xla_call_p\npe.call_param_updaters[xla_pmap_p] = pe.call_param_updaters[xla.xla_call_p]\nad.call_param_updaters[xla_pmap_p] = ad.call_param_updaters[xla.xla_call_p]\nad.call_transpose_param_updaters[xla_pmap_p] = \\\n ad.call_transpose_param_updaters[xla.xla_call_p]\n\ndef _pmap_translation_rule(c, axis_env,\n in_nodes, name_stack, axis_name, axis_size,\n global_axis_size, devices, name,\n call_jaxpr, *, backend=None, in_axes, out_axes,\n donated_invars, global_arg_shapes):\n del donated_invars # Unused.\n # We in-line here rather than generating a Call HLO as in the xla_call\n # translation rule just because the extra tuple stuff is a pain.\n if axis_env.names and devices is not None:\n raise ValueError(\"Nested pmap with explicit devices argument.\")\n if global_axis_size is None:\n global_axis_size = axis_size\n new_env = xla.extend_axis_env(axis_env, axis_name, global_axis_size)\n # Shard the in_nodes that are mapped\n in_avals = [v.aval for v in call_jaxpr.invars]\n in_nodes_sharded = (\n _xla_shard(c, aval, new_env, in_node, in_axis) if in_axis is not None else in_node\n for aval, in_node, in_axis in safe_zip(in_avals, in_nodes, in_axes))\n\n with maybe_extend_axis_env(axis_name, global_axis_size, None): # type: ignore\n ctx = xla.TranslationContext(\n c, backend, new_env,\n extend_name_stack(name_stack, wrap_name(name, 'pmap')))\n sharded_outs = xla.jaxpr_subcomp(ctx, call_jaxpr, (), *in_nodes_sharded)\n out_avals = [v.aval for v in call_jaxpr.outvars]\n outs = [_xla_unshard(c, aval, new_env, out_axis, shard, backend=backend)\n for aval, out_axis, shard in safe_zip(out_avals, out_axes, sharded_outs)]\n return xops.Tuple(c, outs)\n\nxla.call_translations[xla_pmap_p] = _pmap_translation_rule\nad.primitive_transposes[xla_pmap_p] = partial(ad.map_transpose, xla_pmap_p)\n\ndef _xla_shard(c, aval, axis_env, x, in_axis):\n if aval is core.abstract_unit:\n return x\n elif aval is core.abstract_token:\n return x\n elif isinstance(aval, ShapedArray):\n dims = list(c.get_shape(x).dimensions())\n zero = xops.Constant(c, np.zeros((), dtype=np.uint32))\n idxs = [zero] * (len(dims) - 1)\n idxs.insert(in_axis, _unravel_index(c, axis_env))\n dims_unsqueezed = dims.copy()\n dims_unsqueezed[in_axis] = 1\n dims_squeezed = dims.copy()\n dims_squeezed.pop(in_axis)\n return xops.Reshape(xops.DynamicSlice(x, idxs, dims_unsqueezed), dims_squeezed)\n else:\n raise TypeError((aval, c.get_shape(x)))\n\n# TODO(b/110096942): more efficient gather\ndef _xla_unshard(c, aval, axis_env, out_axis, x, backend):\n if aval is core.abstract_unit:\n return x\n elif aval is core.abstract_token:\n return x\n elif isinstance(aval, ShapedArray):\n # TODO(mattjj): remove this logic when AllReduce PRED supported on CPU / GPU\n convert_bool = (np.issubdtype(aval.dtype, np.bool_)\n and xb.get_backend(backend).platform in ('cpu', 'gpu'))\n if convert_bool:\n x = xops.ConvertElementType(\n x, xla.dtype_to_primitive_type(np.dtype(np.float32)))\n\n xla_shape = c.get_shape(x)\n dims = list(xla_shape.dimensions())\n padded = xops.Broadcast(\n xops.Constant(c, np.array(0, xla_shape.numpy_dtype())),\n [axis_env.sizes[-1]] + dims)\n zero = xops.Constant(c, np.zeros((), dtype=np.uint32))\n idxs = [_unravel_index(c, axis_env)] + [zero] * len(dims)\n padded = xops.DynamicUpdateSlice(padded, xops.Reshape(x, [1] + dims), idxs)\n replica_groups_protos = xc.make_replica_groups(\n xla.axis_groups(axis_env, axis_env.names[-1]))\n out = xops.CrossReplicaSum(padded, replica_groups_protos)\n if out_axis != 0:\n # TODO(apaszke,mattjj): Change the indices to DynamicUpdateSlice instead\n perm = list(range(1, len(dims)))\n perm.insert(out_axis, 0)\n out = xops.Transpose(out, perm)\n\n # TODO(mattjj): remove this logic when AllReduce PRED supported on CPU / GPU\n if convert_bool:\n nonzero = xops.Ne(out, xops.Constant(c, np.array(0, dtype=np.float32)))\n out = xops.ConvertElementType(\n nonzero, xla.dtype_to_primitive_type(np.dtype(np.bool_)))\n return out\n else:\n raise TypeError((aval, c.get_shape(x)))\n\ndef _unravel_index(c, axis_env):\n div = xops.Constant(c, np.array(axis_env.nreps // prod(axis_env.sizes),\n np.uint32))\n mod = xops.Constant(c, np.array(axis_env.sizes[-1], np.uint32))\n return xops.Rem(xops.Div(xops.ReplicaId(c), div), mod)\n\n# ------------------- xmap -------------------\n\nclass Mesh:\n\n def __init__(self, devices: np.ndarray, axis_names: Sequence[MeshAxisName]):\n assert devices.ndim == len(axis_names)\n # TODO: Make sure that devices are unique? At least with the quick and\n # dirty check that the array size is not larger than the number of\n # available devices?\n self.devices = devices.copy()\n self.devices.flags.writeable = False\n self.axis_names = tuple(axis_names)\n\n def __eq__(self, other):\n if not isinstance(other, Mesh):\n return False\n return (self.axis_names == other.axis_names and\n np.array_equal(self.devices, other.devices))\n\n def __hash__(self):\n if not hasattr(self, '_hash'):\n self._hash = hash((self.axis_names, tuple(self.devices.flat)))\n return self._hash\n\n def __setattr__(self, name, value):\n if hasattr(self, name):\n raise RuntimeError(\"Cannot reassign attributes of immutable mesh objects\")\n super().__setattr__(name, value)\n\n @property\n def shape(self):\n return OrderedDict((name, size) for name, size in safe_zip(self.axis_names, self.devices.shape))\n\n @property\n def size(self):\n return np.prod(list(self.shape.values()))\n\n @property\n def empty(self):\n return self.devices.ndim == 0\n\n @property\n def is_multi_process(self):\n return self.shape != self.local_mesh.shape\n\n @maybe_cached_property\n def local_mesh(self):\n if self.empty:\n return self\n process_index = xb.process_index()\n is_local_device = np.vectorize(\n lambda d: d.process_index == process_index, otypes=[bool])(self.devices)\n subcube_indices = []\n # We take the smallest slice of each dimension that doesn't skip any local device.\n for axis in range(self.devices.ndim):\n other_axes = tuple_delete(tuple(range(self.devices.ndim)), axis)\n # NOTE: This re-reduces over many axes multiple times, so we could definitely\n # optimize it, but I hope it won't be a bottleneck anytime soon.\n local_slices = is_local_device.any(other_axes, keepdims=False)\n nonzero_indices = np.flatnonzero(local_slices)\n start, end = int(np.min(nonzero_indices)), int(np.max(nonzero_indices))\n subcube_indices.append(slice(start, end + 1))\n subcube_indices = tuple(subcube_indices)\n # We only end up with all conditions being true if the local devices formed a\n # subcube of the full array. This is because we were biased towards taking a\n # \"hull\" spanned by the devices, and in case the local devices don't form a\n # subcube that hull will contain non-local devices.\n if not is_local_device[subcube_indices].all():\n raise ValueError(\"Devices connected to a single host must form a contiguous \"\n \"subcube of the global device mesh\")\n return Mesh(self.devices[subcube_indices], self.axis_names)\n\n @property\n def device_ids(self):\n assert not self.empty\n return np.vectorize(lambda d: d.id, otypes=[int])(self.devices)\n\n def __repr__(self):\n if self.empty:\n return \"Mesh([], ())\"\n return f\"Mesh({self.device_ids!r}, {self.axis_names!r})\"\n\n @maybe_cached_property\n def local_devices(self):\n process_index = xb.process_index()\n return [d for d in self.devices.flat if d.process_index == process_index]\n\n def local_to_global(self, axes: ArrayMapping, aval):\n return untile_aval_nd(self.shape, axes,\n tile_aval_nd(self.local_mesh.shape, axes, aval))\n\n def global_to_local(self, axes: ArrayMapping, aval):\n return untile_aval_nd(self.local_mesh.shape, axes,\n tile_aval_nd(self.shape, axes, aval))\n\n\ndef tile_aval_nd(axis_sizes, in_axes: ArrayMapping, aval, tiling_sizes=None):\n if tiling_sizes is None:\n tiling_sizes = axis_sizes\n if aval is core.abstract_unit:\n return aval\n assert isinstance(aval, ShapedArray)\n shape = list(aval.shape)\n named_shape = dict(aval.named_shape)\n for name, axis in in_axes.items():\n assert shape[axis] % tiling_sizes[name] == 0\n assert name not in named_shape\n named_shape[name] = axis_sizes[name]\n shape[axis] //= tiling_sizes[name]\n return aval.update(shape=tuple(shape), named_shape=named_shape)\n\ndef untile_aval_nd(axis_sizes, out_axes: ArrayMapping, aval):\n if aval is core.abstract_unit:\n return aval\n assert isinstance(aval, ShapedArray)\n shape = list(aval.shape)\n named_shape = dict(aval.named_shape)\n for name, axis in out_axes.items():\n shape[axis] *= axis_sizes[name]\n named_shape.pop(name, None) # The name might be missing --- it's a broadcast.\n return aval.update(shape=tuple(shape), named_shape=named_shape)\n\n\nclass SPMDBatchTrace(batching.BatchTrace):\n def get_axis_primitive_batcher(self, primitive, frame):\n if primitive in spmd_primitive_batchers:\n return partial(spmd_primitive_batchers[primitive],\n frame.size, frame.name, frame.main_trace.trace_type)\n return super().get_axis_primitive_batcher(primitive, frame)\n\n\nspmd_primitive_batchers: Dict[core.Primitive, Callable] = {}\n\n\ndef vtile_by_mesh(fun: lu.WrappedFun,\n mesh: Mesh,\n in_axes: Sequence[ArrayMapping],\n out_axes: Sequence[ArrayMapping]):\n # We vectorize in reversed order, because vmap is often biased towards\n # moving the batch axis to the front, and this way of stacking transforms\n # will order the batch axes according to the mesh axis order.\n # Not strictly necessary, but seems nicer than reversing it?\n for name, size in reversed(mesh.shape.items()):\n fun = batching.vtile(fun,\n tuple(a.get(name, None) for a in in_axes),\n tuple(a.get(name, None) for a in out_axes),\n tile_size=size,\n axis_name=name,\n main_type=SPMDBatchTrace)\n return fun\n\ndef lower_mesh_computation(\n fun: lu.WrappedFun,\n transformed_name: str,\n mesh: Mesh,\n in_axes: Sequence[ArrayMapping],\n out_axes: Union[Sequence[ArrayMapping], Callable[[], Sequence[ArrayMapping]]],\n donated_invars: Sequence[bool],\n spmd_lowering: bool,\n local_in_untiled_avals: Sequence[core.ShapedArray],\n tile_by_mesh_axes: bool):\n assert not mesh.empty\n backend = xb.get_device_backend(mesh.devices.flat[0])\n\n local_mesh = mesh.local_mesh\n global_axis_sizes = mesh.shape\n local_axis_sizes = local_mesh.shape\n\n log_priority = logging.WARNING if config.jax_log_compiles else logging.DEBUG\n logging.log(log_priority,\n \"Compiling %s (%d) for %s mesh with args %s. Argument mapping: \"\n \"%s.\",\n getattr(fun, '__name__', '<unnamed function>'), id(fun),\n tuple(global_axis_sizes.items()), local_in_untiled_avals,\n in_axes)\n\n # 1. Trace to jaxpr and preprocess/verify it\n # Note that we tile by the local axis sizes, but use global axis sizes for named_shape\n in_tiled_avals = [tile_aval_nd(global_axis_sizes, aval_in_axes, aval,\n tiling_sizes=local_axis_sizes)\n for aval, aval_in_axes in safe_zip(local_in_untiled_avals, in_axes)]\n if spmd_lowering:\n # TODO: Consider handling xmap's 'vectorize' in here. We can vmap once instead of vtile twice!\n if tile_by_mesh_axes:\n assert not callable(out_axes)\n fun = vtile_by_mesh(fun, mesh, in_axes, out_axes)\n global_in_untiled_avals = [untile_aval_nd(global_axis_sizes, aval_in_axes, aval)\n for aval, aval_in_axes in safe_zip(in_tiled_avals, in_axes)]\n in_jaxpr_avals = global_in_untiled_avals\n else:\n assert tile_by_mesh_axes\n in_jaxpr_avals = in_tiled_avals\n with core.extend_axis_env_nd(mesh.shape.items()):\n jaxpr, out_jaxpr_avals, consts = pe.trace_to_jaxpr_final(fun, in_jaxpr_avals)\n if callable(out_axes):\n out_axes = out_axes()\n assert len(out_axes) == len(out_jaxpr_avals)\n if spmd_lowering:\n global_out_untiled_avals = out_jaxpr_avals\n out_tiled_avals = [tile_aval_nd(global_axis_sizes, aval_out_axes, aval)\n for aval, aval_out_axes in safe_zip(global_out_untiled_avals, out_axes)]\n else:\n out_tiled_avals = out_jaxpr_avals\n local_out_untiled_avals = [untile_aval_nd(local_axis_sizes, aval_out_axes, aval)\n for aval, aval_out_axes in safe_zip(out_tiled_avals, out_axes)]\n _sanitize_mesh_jaxpr(jaxpr)\n if local_mesh.shape != mesh.shape:\n check_multihost_collective_allowlist(jaxpr)\n jaxpr = dispatch.apply_outfeed_rewriter(jaxpr)\n\n # 3. Build up the HLO\n c = xc.XlaBuilder(f\"xmap_{fun.__name__}\")\n xla_consts = map(partial(xla.pyval_to_ir_constant, c), consts)\n tuple_args = len(in_jaxpr_avals) > 100 # pass long arg lists as tuple for TPU\n in_partitions: Optional[List]\n if spmd_lowering:\n replicated_args = [False] * len(in_jaxpr_avals)\n global_sharding_spec = mesh_sharding_specs(global_axis_sizes, mesh.axis_names)\n in_partitions = [global_sharding_spec(aval, aval_in_axes).sharding_proto()\n if aval is not core.abstract_unit else None\n for aval, aval_in_axes in safe_zip(global_in_untiled_avals, in_axes)]\n out_partitions = [global_sharding_spec(aval, aval_out_axes).sharding_proto()\n for aval, aval_out_axes in safe_zip(global_out_untiled_avals, out_axes)]\n partitions_proto = True\n axis_env = xla.AxisEnv(nreps=1, names=(), sizes=()) # All named axes have been vmapped\n else:\n replicated_args = [not axis for axis in in_axes]\n in_partitions = None\n partitions_proto = False\n axis_env = xla.AxisEnv(nreps=mesh.size,\n names=tuple(global_axis_sizes.keys()),\n sizes=tuple(global_axis_sizes.values()))\n xla_args, donated_invars = xla._xla_callable_args(\n c, in_jaxpr_avals, tuple_args,\n replicated=replicated_args,\n partitions=in_partitions,\n partitions_proto=partitions_proto,\n donated_invars=donated_invars)\n with core.extend_axis_env_nd(mesh.shape.items()):\n ctx = xla.TranslationContext(\n c, backend.platform, axis_env,\n extend_name_stack(wrap_name(transformed_name, 'xmap')))\n out_nodes = xla.jaxpr_subcomp(ctx, jaxpr, xla_consts, *xla_args)\n if spmd_lowering:\n out_partitions_t = xb.tuple_sharding_proto(out_partitions)\n out_tuple = xb.with_sharding_proto(c, out_partitions_t, xops.Tuple, c, out_nodes)\n else:\n out_tuple = xops.Tuple(c, out_nodes)\n\n if backend.platform in (\"gpu\", \"tpu\"):\n xla.set_up_aliases(c, xla_args, c.GetShape(out_tuple), donated_invars,\n tuple_args)\n # TODO: Warn about unused donations?\n\n built = c.Build(out_tuple)\n return MeshComputation(\n built, donated_invars, mesh, local_in_untiled_avals,\n local_out_untiled_avals, (out_jaxpr_avals if spmd_lowering else None),\n in_axes, out_axes, spmd_lowering, tuple_args)\n\n\nclass MeshComputation:\n def __init__(self, hlo, donated_invars, *compile_args):\n self._executable = None\n self._hlo = hlo\n self._donated_invars = donated_invars\n self.compile_args = compile_args\n\n def hlo(self):\n # this is a method for api consistency with xla.XlaComputation\n return self._hlo\n\n def compile(self,\n _allow_propagation_to_outputs : bool = False,\n _allow_compile_replicated : bool = True) -> 'MeshExecutable':\n if self._executable is None:\n self._executable = MeshExecutable.from_hlo(\n self._hlo, *self.compile_args,\n _allow_propagation_to_outputs=_allow_propagation_to_outputs,\n _allow_compile_replicated=_allow_compile_replicated) # type: ignore\n return self._executable\n\n\nclass MeshExecutable:\n __slots__ = ['xla_executable', 'unsafe_call', '_local_in_untiled_avals']\n\n def __init__(self, xla_executable, unsafe_call, local_in_untiled_avals):\n self.xla_executable = xla_executable\n self.unsafe_call = unsafe_call\n self._local_in_untiled_avals = local_in_untiled_avals\n\n @staticmethod\n def from_hlo(computation: xc.XlaComputation,\n mesh: Mesh,\n local_in_untiled_avals: Sequence[ShapedArray],\n local_out_untiled_avals: Sequence[ShapedArray],\n global_out_avals: Optional[Sequence[ShapedArray]],\n in_axes: Sequence[ArrayMapping],\n out_axes: Sequence[ArrayMapping],\n spmd_lowering: bool, tuple_args: bool,\n _allow_propagation_to_outputs: bool,\n _allow_compile_replicated: bool):\n assert not mesh.empty\n backend = xb.get_device_backend(mesh.devices.flat[0])\n\n local_mesh = mesh.local_mesh\n local_axis_sizes = local_mesh.shape\n if spmd_lowering:\n num_replicas, num_partitions = 1, mesh.size\n num_local_replicas, num_local_partitions = 1, local_mesh.size\n else:\n num_replicas, num_partitions = mesh.size, 1\n num_local_replicas, num_local_partitions = local_mesh.size, 1\n device_assignment = mesh.device_ids.reshape((num_replicas, num_partitions))\n compile_options = xb.get_compile_options(\n num_replicas=num_replicas,\n num_partitions=num_partitions,\n device_assignment=device_assignment,\n use_spmd_partitioning=spmd_lowering,\n )\n compile_options.parameter_is_tupled_arguments = tuple_args\n compile_options.executable_build_options.allow_spmd_sharding_propagation_to_output = \\\n _allow_propagation_to_outputs\n\n local_sharding_spec = mesh_sharding_specs(local_axis_sizes, mesh.axis_names)\n local_input_specs = [local_sharding_spec(aval, aval_in_axes)\n if aval is not core.abstract_unit else None\n for aval, aval_in_axes in safe_zip(local_in_untiled_avals, in_axes)]\n input_indices = [spec_to_indices(aval.shape, spec)\n if spec is not None else None\n for aval, spec in safe_zip(local_in_untiled_avals, local_input_specs)]\n\n local_output_specs = [local_sharding_spec(aval, aval_out_axes)\n for aval, aval_out_axes in safe_zip(local_out_untiled_avals, out_axes)]\n out_axis_resources = [array_mapping_to_axis_resources(o) for o in out_axes]\n handle_outs = avals_to_results_handler(num_local_replicas, num_local_partitions,\n local_output_specs, local_out_untiled_avals,\n global_out_avals, out_axis_resources, mesh)\n\n if _allow_compile_replicated and hasattr(backend, \"compile_replicated\"):\n unsafe_call = backend.compile_replicated(\n computation, compile_options,\n input_indices, local_input_specs,\n handle_outs)\n xla_executable = None\n else:\n compiled = dispatch.compile_or_get_cached(backend, computation, compile_options)\n handle_args = InputsHandler(compiled.local_devices(), local_input_specs,\n input_indices)\n unsafe_call = partial(execute_replicated, compiled, backend, handle_args, handle_outs)\n xla_executable = compiled\n\n return MeshExecutable(xla_executable, unsafe_call, local_in_untiled_avals)\n\n def call(self, *args):\n arg_avals = map(xla.abstractify, args)\n ref_avals = self._local_in_untiled_avals\n dispatch.check_arg_avals_for_call(ref_avals, arg_avals)\n return self.unsafe_call(*args)\n\n\n_forbidden_primitives = {\n 'xla_pmap': 'pmap',\n 'sharded_call': 'sharded_jit',\n}\ndef _sanitize_mesh_jaxpr(jaxpr):\n if isinstance(jaxpr, core.ClosedJaxpr):\n jaxpr = jaxpr.jaxpr\n for eqn in jaxpr.eqns:\n if eqn.primitive.name in _forbidden_primitives:\n raise RuntimeError(f\"Nesting {_forbidden_primitives[eqn.primitive.name]} \"\n f\"inside xmaps not supported!\")\n core.traverse_jaxpr_params(_sanitize_mesh_jaxpr, eqn.params)\n\n\ncustom_resource_typing_rules: Dict[core.Primitive, Callable] = {}\n\ndef resource_typecheck(jaxpr, resource_env, axis_resources, what_jaxpr_thunk):\n if isinstance(jaxpr, core.ClosedJaxpr):\n jaxpr = jaxpr.jaxpr\n def _check_aval(aval, what_thunk):\n if not hasattr(aval, 'named_shape'):\n return\n resource_to_axis = {}\n for axis in aval.named_shape:\n for resource in axis_resources[axis]:\n if resource in resource_to_axis:\n other_axis = resource_to_axis[resource]\n axis, other_axis = sorted([str(axis), str(other_axis)])\n raise JAXTypeError(\n f\"Axes `{axis}` and `{other_axis}` are both mapped to the \"\n f\"resource `{resource}`, but they coincide in the named_shape \"\n f\"of {what_thunk()}\")\n resource_to_axis[resource] = axis\n\n what_thunk = lambda: (f\"an input to {what_jaxpr_thunk()}\")\n for v in jaxpr.constvars:\n _check_aval(v.aval, what_thunk)\n for v in jaxpr.invars:\n _check_aval(v.aval, what_thunk)\n what_thunk = lambda: (f\"a value returned from a primitive {eqn.primitive} created \"\n f\"at {source_info_util.summarize(eqn.source_info)}\")\n rec_what_jaxpr_thunk = lambda: (f\"a primitive {eqn.primitive} created at\"\n f\"{source_info_util.summarize(eqn.source_info)}\")\n for eqn in jaxpr.eqns:\n typing_rule = custom_resource_typing_rules.get(eqn.primitive, None)\n if typing_rule:\n typing_rule([v.aval for v in eqn.invars], eqn.params, eqn.source_info,\n resource_env, axis_resources)\n else:\n core.traverse_jaxpr_params(partial(resource_typecheck,\n resource_env=resource_env,\n axis_resources=axis_resources,\n what_jaxpr_thunk=rec_what_jaxpr_thunk),\n eqn.params)\n for v in eqn.outvars:\n _check_aval(v.aval, what_thunk)\n\n\ndef mesh_sharding_specs(axis_sizes, axis_names):\n mesh_axis_pos = {name: i for i, name in enumerate(axis_names)}\n # NOTE: This takes in the non-sharded avals!\n def mk_sharding_spec(aval, aval_axes):\n mesh_mapping = [Replicated(axis_size) for axis_size in axis_sizes.values()]\n if aval is core.abstract_token:\n assert not aval_axes\n return ShardingSpec([], mesh_mapping)\n sharding = [_UNSHARDED_INSTANCE] * len(aval.shape)\n next_sharded_axis = 0\n aval_shape = list(aval.shape)\n # NOTE: sorted is stable, which is important when multiple resources\n # map to the same axis.\n for name, axis in sorted(aval_axes.items(), key=lambda x: x[1]):\n assert aval_shape[axis] % axis_sizes[name] == 0, (axis_sizes[name], aval.shape[axis])\n aval_shape[axis] //= axis_sizes[name]\n if isinstance(sharding[axis], NoSharding):\n sharding[axis] = Chunked([])\n sharding[axis] = Chunked(sharding[axis].chunks + [axis_sizes[name]])\n assert isinstance(mesh_mapping[mesh_axis_pos[name]], Replicated), \\\n \"Value mapped to the same mesh axis twice\"\n mesh_mapping[mesh_axis_pos[name]] = ShardedAxis(next_sharded_axis)\n next_sharded_axis += 1\n return ShardingSpec(sharding, mesh_mapping)\n return mk_sharding_spec\n\n\n@contextmanager\ndef maybe_extend_axis_env(*args, **kwargs):\n with core.extend_axis_env(*args, **kwargs):\n yield\n\nclass DynamicAxisEnvFrame(object):\n __slots__ = [\"name\", \"pmap_trace\", \"hard_size\"]\n def __init__(self, name, pmap_trace, hard_size):\n self.name = name\n self.pmap_trace = pmap_trace\n self.hard_size = hard_size\n\nclass DynamicAxisEnv(list):\n def __contains__(self, axis_name):\n return axis_name in (frame.name for frame in self)\n\n def __getitem__(self, axis_name):\n if axis_name not in self:\n raise NameError(\"unbound axis name: {}\".format(axis_name))\n for frame in reversed(self):\n if frame.name == axis_name:\n return frame\n\n raise AssertionError\n\n @property\n def sizes(self):\n return tuple(frame.hard_size for frame in self)\n\n @property\n def nreps(self):\n return prod(frame.hard_size for frame in self)\n\nclass _ThreadLocalState(threading.local):\n def __init__(self):\n self.dynamic_axis_env = DynamicAxisEnv()\n\n_thread_local_state = _ThreadLocalState()\n\ndef device_put(x, devices: Sequence[xb.xla_client.Device], replicate: bool=False) -> List[xb.xla_client.Buffer]:\n \"\"\"Call device_put on a sequence of devices and return a flat sequence of buffers.\"\"\"\n if replicate:\n return list(it.chain.from_iterable(dispatch.device_put(x, device) for device in devices))\n else:\n return list(it.chain.from_iterable(dispatch.device_put(val, device) for val, device in safe_zip(x, devices)))\n"
] | [
[
"numpy.array_equal",
"numpy.min",
"numpy.asarray",
"numpy.issubdtype",
"numpy.dtype",
"numpy.flatnonzero",
"numpy.max",
"numpy.vectorize",
"numpy.broadcast_to",
"numpy.prod",
"numpy.array",
"numpy.zeros",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
enricovara/mne-python | [
"f6f2aa7a97c3ae7ae5276202805d2f45de7b64cc",
"f6f2aa7a97c3ae7ae5276202805d2f45de7b64cc",
"b8f5e5ce0da8acfeb7298c8eb1d26a75d5526eac"
] | [
"mne/time_frequency/_stft.py",
"tutorials/simulation/plot_creating_data_structures.py",
"tutorials/machine-learning/plot_sensors_decoding.py"
] | [
"from math import ceil\nimport numpy as np\n\nfrom ..fixes import rfft, irfft, rfftfreq\nfrom ..utils import logger, verbose\n\n\n@verbose\ndef stft(x, wsize, tstep=None, verbose=None):\n \"\"\"STFT Short-Term Fourier Transform using a sine window.\n\n The transformation is designed to be a tight frame that can be\n perfectly inverted. It only returns the positive frequencies.\n\n Parameters\n ----------\n x : array, shape (n_signals, n_times)\n Containing multi-channels signal.\n wsize : int\n Length of the STFT window in samples (must be a multiple of 4).\n tstep : int\n Step between successive windows in samples (must be a multiple of 2,\n a divider of wsize and smaller than wsize/2) (default: wsize/2).\n %(verbose)s\n\n Returns\n -------\n X : array, shape (n_signals, wsize // 2 + 1, n_step)\n STFT coefficients for positive frequencies with\n ``n_step = ceil(T / tstep)``.\n\n See Also\n --------\n istft\n stftfreq\n \"\"\"\n if not np.isrealobj(x):\n raise ValueError(\"x is not a real valued array\")\n\n if x.ndim == 1:\n x = x[None, :]\n\n n_signals, T = x.shape\n wsize = int(wsize)\n\n # Errors and warnings\n if wsize % 4:\n raise ValueError('The window length must be a multiple of 4.')\n\n if tstep is None:\n tstep = wsize / 2\n\n tstep = int(tstep)\n\n if (wsize % tstep) or (tstep % 2):\n raise ValueError('The step size must be a multiple of 2 and a '\n 'divider of the window length.')\n\n if tstep > wsize / 2:\n raise ValueError('The step size must be smaller than half the '\n 'window length.')\n\n n_step = int(ceil(T / float(tstep)))\n n_freq = wsize // 2 + 1\n logger.info(\"Number of frequencies: %d\" % n_freq)\n logger.info(\"Number of time steps: %d\" % n_step)\n\n X = np.zeros((n_signals, n_freq, n_step), dtype=np.complex128)\n\n if n_signals == 0:\n return X\n\n # Defining sine window\n win = np.sin(np.arange(.5, wsize + .5) / wsize * np.pi)\n win2 = win ** 2\n\n swin = np.zeros((n_step - 1) * tstep + wsize)\n for t in range(n_step):\n swin[t * tstep:t * tstep + wsize] += win2\n swin = np.sqrt(wsize * swin)\n\n # Zero-padding and Pre-processing for edges\n xp = np.zeros((n_signals, wsize + (n_step - 1) * tstep),\n dtype=x.dtype)\n xp[:, (wsize - tstep) // 2: (wsize - tstep) // 2 + T] = x\n x = xp\n\n for t in range(n_step):\n # Framing\n wwin = win / swin[t * tstep: t * tstep + wsize]\n frame = x[:, t * tstep: t * tstep + wsize] * wwin[None, :]\n # FFT\n X[:, :, t] = rfft(frame)\n\n return X\n\n\ndef istft(X, tstep=None, Tx=None):\n \"\"\"ISTFT Inverse Short-Term Fourier Transform using a sine window.\n\n Parameters\n ----------\n X : array, shape (..., wsize / 2 + 1, n_step)\n The STFT coefficients for positive frequencies.\n tstep : int\n Step between successive windows in samples (must be a multiple of 2,\n a divider of wsize and smaller than wsize/2) (default: wsize/2).\n Tx : int\n Length of returned signal. If None Tx = n_step * tstep.\n\n Returns\n -------\n x : array, shape (Tx,)\n Array containing the inverse STFT signal.\n\n See Also\n --------\n stft\n \"\"\"\n # Errors and warnings\n X = np.asarray(X)\n if X.ndim < 2:\n raise ValueError(f'X must have ndim >= 2, got {X.ndim}')\n n_win, n_step = X.shape[-2:]\n signal_shape = X.shape[:-2]\n if n_win % 2 == 0:\n raise ValueError('The number of rows of the STFT matrix must be odd.')\n\n wsize = 2 * (n_win - 1)\n if tstep is None:\n tstep = wsize / 2\n\n if wsize % tstep:\n raise ValueError('The step size must be a divider of two times the '\n 'number of rows of the STFT matrix minus two.')\n\n if wsize % 2:\n raise ValueError('The step size must be a multiple of 2.')\n\n if tstep > wsize / 2:\n raise ValueError('The step size must be smaller than the number of '\n 'rows of the STFT matrix minus one.')\n\n if Tx is None:\n Tx = n_step * tstep\n\n T = n_step * tstep\n\n x = np.zeros(signal_shape + (T + wsize - tstep,), dtype=np.float64)\n\n if np.prod(signal_shape) == 0:\n return x[..., :Tx]\n\n # Defining sine window\n win = np.sin(np.arange(.5, wsize + .5) / wsize * np.pi)\n # win = win / norm(win);\n\n # Pre-processing for edges\n swin = np.zeros(T + wsize - tstep, dtype=np.float64)\n for t in range(n_step):\n swin[t * tstep:t * tstep + wsize] += win ** 2\n swin = np.sqrt(swin / wsize)\n\n for t in range(n_step):\n # IFFT\n frame = irfft(X[..., t], wsize)\n # Overlap-add\n frame *= win / swin[t * tstep:t * tstep + wsize]\n x[..., t * tstep: t * tstep + wsize] += frame\n\n # Truncation\n x = x[..., (wsize - tstep) // 2: (wsize - tstep) // 2 + T + 1]\n x = x[..., :Tx].copy()\n return x\n\n\ndef stftfreq(wsize, sfreq=None): # noqa: D401\n \"\"\"Compute frequencies of stft transformation.\n\n Parameters\n ----------\n wsize : int\n Size of stft window.\n sfreq : float\n Sampling frequency. If None the frequencies are given between 0 and pi\n otherwise it's given in Hz.\n\n Returns\n -------\n freqs : array\n The positive frequencies returned by stft.\n\n See Also\n --------\n stft\n istft\n \"\"\"\n freqs = rfftfreq(wsize)\n if sfreq is not None:\n freqs *= float(sfreq)\n return freqs\n\n\ndef stft_norm2(X):\n \"\"\"Compute L2 norm of STFT transform.\n\n It takes into account that stft only return positive frequencies.\n As we use tight frame this quantity is conserved by the stft.\n\n Parameters\n ----------\n X : 3D complex array\n The STFT transforms\n\n Returns\n -------\n norms2 : array\n The squared L2 norm of every row of X.\n \"\"\"\n X2 = (X * X.conj()).real\n # compute all L2 coefs and remove first and last frequency once.\n norms2 = (2. * X2.sum(axis=2).sum(axis=1) - np.sum(X2[:, 0, :], axis=1) -\n np.sum(X2[:, -1, :], axis=1))\n return norms2\n\n\ndef stft_norm1(X):\n \"\"\"Compute L1 norm of STFT transform.\n\n It takes into account that stft only return positive frequencies.\n\n Parameters\n ----------\n X : 3D complex array\n The STFT transforms\n\n Returns\n -------\n norms : array\n The L1 norm of every row of X.\n \"\"\"\n X_abs = np.abs(X)\n # compute all L1 coefs and remove first and last frequency once.\n norms = (2. * X_abs.sum(axis=(1, 2)) -\n np.sum(X_abs[:, 0, :], axis=1) - np.sum(X_abs[:, -1, :], axis=1))\n return norms\n",
"\"\"\"\n.. _tut_creating_data_structures:\n\nCreating MNE-Python data structures from scratch\n================================================\n\nThis tutorial shows how to create MNE-Python's core data structures using an\nexisting :class:`NumPy array <numpy.ndarray>` of (real or synthetic) data.\n\n.. contents:: Page contents\n :local:\n :depth: 1\n\nWe begin by importing the necessary Python modules:\n\"\"\"\n\nimport mne\nimport numpy as np\n\n\n###############################################################################\n# Creating `~mne.Info` objects\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n#\n# .. sidebar:: Info objects\n#\n# For full documentation on the `~mne.Info` object, see\n# :ref:`tut-info-class`.\n#\n# The core data structures for continuous (`~mne.io.Raw`), discontinuous\n# (`~mne.Epochs`), and averaged (`~mne.Evoked`) data all have an ``info``\n# attribute comprising an `mne.Info` object. When reading recorded data using\n# one of the functions in the ``mne.io`` submodule, `~mne.Info` objects are\n# created and populated automatically. But if we want to create a\n# `~mne.io.Raw`, `~mne.Epochs`, or `~mne.Evoked` object from scratch, we need\n# to create an appropriate `~mne.Info` object as well. The easiest way to do\n# this is with the `mne.create_info` function to initialize the required info\n# fields. Additional fields can be assigned later as one would with a regular\n# :class:`dictionary <dict>`.\n#\n# To initialize a minimal `~mne.Info` object requires a list of channel names,\n# and the sampling frequency. As a convenience for simulated data, channel\n# names can be provided as a single integer, and the names will be\n# automatically created as sequential integers (starting with ``0``):\n\n# Create some dummy metadata\nn_channels = 32\nsampling_freq = 200 # in Hertz\ninfo = mne.create_info(n_channels, sfreq=sampling_freq)\nprint(info)\n\n###############################################################################\n# You can see in the output above that, by default, the channels are assigned\n# as type \"misc\" (where it says ``chs: 32 MISC``). You can assign the channel\n# type when initializing the `~mne.Info` object if you want:\n\nch_names = [f'MEG{n:03}' for n in range(1, 10)] + ['EOG001']\nch_types = ['mag', 'grad', 'grad'] * 3 + ['eog']\ninfo = mne.create_info(ch_names, ch_types=ch_types, sfreq=sampling_freq)\nprint(info)\n\n###############################################################################\n# If the channel names follow one of the standard montage naming schemes, their\n# spatial locations can be automatically added using the\n# `~mne.Info.set_montage` method:\n\nch_names = ['Fp1', 'Fp2', 'Fz', 'Cz', 'Pz', 'O1', 'O2']\nch_types = ['eeg'] * 7\ninfo = mne.create_info(ch_names, ch_types=ch_types, sfreq=sampling_freq)\ninfo.set_montage('standard_1020')\n\n###############################################################################\n# .. sidebar:: Info consistency\n#\n# When assigning new values to the fields of an `~mne.Info` object, it is\n# important that the fields stay consistent. if there are ``N`` channels:\n#\n# - The length of the channel information field ``chs`` must be ``N``.\n# - The length of the ``ch_names`` field must be ``N``.\n# - The ``ch_names`` field should be consistent with the ``name``\n# field of the channel information contained in ``chs``.\n#\n# Note the new field ``dig`` that includes our seven channel locations as well\n# as theoretical values for the three\n# :term:`cardinal scalp landmarks <fiducial point>`.\n#\n# Additional fields can be added in the same way that Python dictionaries are\n# modified, using square-bracket key assignment:\n\ninfo['description'] = 'My custom dataset'\ninfo['bads'] = ['O1'] # Names of bad channels\nprint(info)\n\n###############################################################################\n# Creating `~mne.io.Raw` objects\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n#\n# .. sidebar:: Units\n#\n# The expected units for the different channel types are:\n#\n# - Volts: eeg, eog, seeg, dbs, emg, ecg, bio, ecog\n# - Teslas: mag\n# - Teslas/meter: grad\n# - Molar: hbo, hbr\n# - Amperes: dipole\n# - Arbitrary units: misc\n#\n# To create a `~mne.io.Raw` object from scratch, you can use the\n# `mne.io.RawArray` class constructor, which takes an `~mne.Info` object and a\n# :class:`NumPy array <numpy.ndarray>` of shape ``(n_channels, n_samples)``.\n# Here, we'll create some sinusoidal data and plot it:\n\ntimes = np.linspace(0, 1, sampling_freq, endpoint=False)\nsine = np.sin(20 * np.pi * times)\ncosine = np.cos(10 * np.pi * times)\ndata = np.array([sine, cosine])\n\ninfo = mne.create_info(ch_names=['10 Hz sine', '5 Hz cosine'],\n ch_types=['misc'] * 2,\n sfreq=sampling_freq)\n\nsimulated_raw = mne.io.RawArray(data, info)\nsimulated_raw.plot(show_scrollbars=False, show_scalebars=False)\n\n\n###############################################################################\n# Creating `~mne.Epochs` objects\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n#\n# To create an `~mne.Epochs` object from scratch, you can use the\n# `mne.EpochsArray` class constructor, which takes an `~mne.Info` object and a\n# :class:`NumPy array <numpy.ndarray>` of shape ``(n_epochs, n_channels,\n# n_samples)``. Here we'll create 5 epochs of our 2-channel data, and plot it.\n# Notice that we have to pass ``picks='misc'`` to the `~mne.Epochs.plot`\n# method, because by default it only plots :term:`data channels`.\n\ndata = np.array([[0.2 * sine, 1.0 * cosine],\n [0.4 * sine, 0.8 * cosine],\n [0.6 * sine, 0.6 * cosine],\n [0.8 * sine, 0.4 * cosine],\n [1.0 * sine, 0.2 * cosine]])\n\nsimulated_epochs = mne.EpochsArray(data, info)\nsimulated_epochs.plot(picks='misc', show_scrollbars=False)\n\n###############################################################################\n# Since we did not supply an events array, the `~mne.EpochsArray` constructor\n# automatically created one for us, with all epochs having the same event\n# number:\n\nprint(simulated_epochs.events[:, -1])\n\n###############################################################################\n# If we want to simulate having different experimental conditions, we can pass\n# an event array (and an event ID dictionary) to the constructor. Since our\n# epochs are 1 second long and have 200 samples/second, we'll put our events\n# spaced 200 samples apart, and pass ``tmin=-0.5``, so that the events\n# land in the middle of each epoch (the events are always placed at time=0 in\n# each epoch).\n\nevents = np.column_stack((np.arange(0, 1000, sampling_freq),\n np.zeros(5, dtype=int),\n np.array([1, 2, 1, 2, 1])))\nevent_dict = dict(condition_A=1, condition_B=2)\nsimulated_epochs = mne.EpochsArray(data, info, tmin=-0.5, events=events,\n event_id=event_dict)\nsimulated_epochs.plot(picks='misc', show_scrollbars=False, events=events,\n event_id=event_dict)\n\n###############################################################################\n# You could also create simulated epochs by using the normal `~mne.Epochs`\n# (not `~mne.EpochsArray`) constructor on the simulated `~mne.io.RawArray`\n# object, by creating an events array (e.g., using\n# `mne.make_fixed_length_events`) and extracting epochs around those events.\n#\n#\n# Creating `~mne.Evoked` Objects\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n#\n# If you already have data that was averaged across trials, you can use it to\n# create an `~mne.Evoked` object using the `~mne.EvokedArray` class\n# constructor. It requires an `~mne.Info` object and a data array of shape\n# ``(n_channels, n_times)``, and has an optional ``tmin`` parameter like\n# `~mne.EpochsArray` does. It also has a parameter ``nave`` indicating how many\n# trials were averaged together, and a ``comment`` parameter useful for keeping\n# track of experimental conditions, etc. Here we'll do the averaging on our\n# NumPy array and use the resulting averaged data to make our `~mne.Evoked`.\n\n# Create the Evoked object\nevoked_array = mne.EvokedArray(data.mean(axis=0), info, tmin=-0.5,\n nave=data.shape[0], comment='simulated')\nprint(evoked_array)\nevoked_array.plot()\n",
"r\"\"\"\n===============\nDecoding (MVPA)\n===============\n\n.. contents:: Contents\n :local:\n :depth: 3\n\n.. include:: ../../links.inc\n\nDesign philosophy\n=================\nDecoding (a.k.a. MVPA) in MNE largely follows the machine\nlearning API of the scikit-learn package.\nEach estimator implements ``fit``, ``transform``, ``fit_transform``, and\n(optionally) ``inverse_transform`` methods. For more details on this design,\nvisit scikit-learn_. For additional theoretical insights into the decoding\nframework in MNE :footcite:`KingEtAl2018`.\n\nFor ease of comprehension, we will denote instantiations of the class using\nthe same name as the class but in small caps instead of camel cases.\n\nLet's start by loading data for a simple two-class problem:\n\"\"\"\n# sphinx_gallery_thumbnail_number = 6\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LogisticRegression\n\nimport mne\nfrom mne.datasets import sample\nfrom mne.decoding import (SlidingEstimator, GeneralizingEstimator, Scaler,\n cross_val_multiscore, LinearModel, get_coef,\n Vectorizer, CSP)\n\ndata_path = sample.data_path()\n\nsubjects_dir = data_path + '/subjects'\nraw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'\ntmin, tmax = -0.200, 0.500\nevent_id = {'Auditory/Left': 1, 'Visual/Left': 3} # just use two\nraw = mne.io.read_raw_fif(raw_fname, preload=True)\n\n# The subsequent decoding analyses only capture evoked responses, so we can\n# low-pass the MEG data. Usually a value more like 40 Hz would be used,\n# but here low-pass at 20 so we can more heavily decimate, and allow\n# the examlpe to run faster. The 2 Hz high-pass helps improve CSP.\nraw.filter(2, 20)\nevents = mne.find_events(raw, 'STI 014')\n\n# Set up pick list: EEG + MEG - bad channels (modify to your needs)\nraw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more\n\n# Read epochs\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,\n picks=('grad', 'eog'), baseline=(None, 0.), preload=True,\n reject=dict(grad=4000e-13, eog=150e-6), decim=10)\nepochs.pick_types(meg=True, exclude='bads') # remove stim and EOG\ndel raw\n\nX = epochs.get_data() # MEG signals: n_epochs, n_meg_channels, n_times\ny = epochs.events[:, 2] # target: auditory left vs visual left\n\n###############################################################################\n# Transformation classes\n# ======================\n#\n# Scaler\n# ^^^^^^\n# The :class:`mne.decoding.Scaler` will standardize the data based on channel\n# scales. In the simplest modes ``scalings=None`` or ``scalings=dict(...)``,\n# each data channel type (e.g., mag, grad, eeg) is treated separately and\n# scaled by a constant. This is the approach used by e.g.,\n# :func:`mne.compute_covariance` to standardize channel scales.\n#\n# If ``scalings='mean'`` or ``scalings='median'``, each channel is scaled using\n# empirical measures. Each channel is scaled independently by the mean and\n# standand deviation, or median and interquartile range, respectively, across\n# all epochs and time points during :class:`~mne.decoding.Scaler.fit`\n# (during training). The :meth:`~mne.decoding.Scaler.transform` method is\n# called to transform data (training or test set) by scaling all time points\n# and epochs on a channel-by-channel basis. To perform both the ``fit`` and\n# ``transform`` operations in a single call, the\n# :meth:`~mne.decoding.Scaler.fit_transform` method may be used. To invert the\n# transform, :meth:`~mne.decoding.Scaler.inverse_transform` can be used. For\n# ``scalings='median'``, scikit-learn_ version 0.17+ is required.\n#\n# .. note:: Using this class is different from directly applying\n# :class:`sklearn.preprocessing.StandardScaler` or\n# :class:`sklearn.preprocessing.RobustScaler` offered by\n# scikit-learn_. These scale each *classification feature*, e.g.\n# each time point for each channel, with mean and standard\n# deviation computed across epochs, whereas\n# :class:`mne.decoding.Scaler` scales each *channel* using mean and\n# standard deviation computed across all of its time points\n# and epochs.\n#\n# Vectorizer\n# ^^^^^^^^^^\n# Scikit-learn API provides functionality to chain transformers and estimators\n# by using :class:`sklearn.pipeline.Pipeline`. We can construct decoding\n# pipelines and perform cross-validation and grid-search. However scikit-learn\n# transformers and estimators generally expect 2D data\n# (n_samples * n_features), whereas MNE transformers typically output data\n# with a higher dimensionality\n# (e.g. n_samples * n_channels * n_frequencies * n_times). A Vectorizer\n# therefore needs to be applied between the MNE and the scikit-learn steps\n# like:\n\n# Uses all MEG sensors and time points as separate classification\n# features, so the resulting filters used are spatio-temporal\nclf = make_pipeline(Scaler(epochs.info),\n Vectorizer(),\n LogisticRegression(solver='lbfgs'))\n\nscores = cross_val_multiscore(clf, X, y, cv=5, n_jobs=1)\n\n# Mean scores across cross-validation splits\nscore = np.mean(scores, axis=0)\nprint('Spatio-temporal: %0.1f%%' % (100 * score,))\n\n###############################################################################\n# PSDEstimator\n# ^^^^^^^^^^^^\n# The :class:`mne.decoding.PSDEstimator`\n# computes the power spectral density (PSD) using the multitaper\n# method. It takes a 3D array as input, converts it into 2D and computes the\n# PSD.\n#\n# FilterEstimator\n# ^^^^^^^^^^^^^^^\n# The :class:`mne.decoding.FilterEstimator` filters the 3D epochs data.\n#\n# Spatial filters\n# ===============\n#\n# Just like temporal filters, spatial filters provide weights to modify the\n# data along the sensor dimension. They are popular in the BCI community\n# because of their simplicity and ability to distinguish spatially-separated\n# neural activity.\n#\n# Common spatial pattern\n# ^^^^^^^^^^^^^^^^^^^^^^\n#\n# :class:`mne.decoding.CSP` is a technique to analyze multichannel data based\n# on recordings from two classes :footcite:`Koles1991` (see also\n# https://en.wikipedia.org/wiki/Common_spatial_pattern).\n#\n# Let :math:`X \\in R^{C\\times T}` be a segment of data with\n# :math:`C` channels and :math:`T` time points. The data at a single time point\n# is denoted by :math:`x(t)` such that :math:`X=[x(t), x(t+1), ..., x(t+T-1)]`.\n# Common spatial pattern (CSP) finds a decomposition that projects the signal\n# in the original sensor space to CSP space using the following transformation:\n#\n# .. math:: x_{CSP}(t) = W^{T}x(t)\n# :label: csp\n#\n# where each column of :math:`W \\in R^{C\\times C}` is a spatial filter and each\n# row of :math:`x_{CSP}` is a CSP component. The matrix :math:`W` is also\n# called the de-mixing matrix in other contexts. Let\n# :math:`\\Sigma^{+} \\in R^{C\\times C}` and :math:`\\Sigma^{-} \\in R^{C\\times C}`\n# be the estimates of the covariance matrices of the two conditions.\n# CSP analysis is given by the simultaneous diagonalization of the two\n# covariance matrices\n#\n# .. math:: W^{T}\\Sigma^{+}W = \\lambda^{+}\n# :label: diagonalize_p\n# .. math:: W^{T}\\Sigma^{-}W = \\lambda^{-}\n# :label: diagonalize_n\n#\n# where :math:`\\lambda^{C}` is a diagonal matrix whose entries are the\n# eigenvalues of the following generalized eigenvalue problem\n#\n# .. math:: \\Sigma^{+}w = \\lambda \\Sigma^{-}w\n# :label: eigen_problem\n#\n# Large entries in the diagonal matrix corresponds to a spatial filter which\n# gives high variance in one class but low variance in the other. Thus, the\n# filter facilitates discrimination between the two classes.\n#\n# .. topic:: Examples\n#\n# * :ref:`sphx_glr_auto_examples_decoding_plot_decoding_csp_eeg.py`\n# * :ref:`sphx_glr_auto_examples_decoding_plot_decoding_csp_timefreq.py`\n#\n# .. note::\n#\n# The winning entry of the Grasp-and-lift EEG competition in Kaggle used\n# the :class:`~mne.decoding.CSP` implementation in MNE and was featured as\n# a `script of the week <sotw_>`_.\n#\n# .. _sotw: http://blog.kaggle.com/2015/08/12/july-2015-scripts-of-the-week/\n#\n# We can use CSP with these data with:\n\ncsp = CSP(n_components=3, norm_trace=False)\nclf_csp = make_pipeline(csp, LinearModel(LogisticRegression(solver='lbfgs')))\nscores = cross_val_multiscore(clf_csp, X, y, cv=5, n_jobs=1)\nprint('CSP: %0.1f%%' % (100 * scores.mean(),))\n\n###############################################################################\n# Source power comodulation (SPoC)\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n# Source Power Comodulation (:class:`mne.decoding.SPoC`)\n# :footcite:`DahneEtAl2014` identifies the composition of\n# orthogonal spatial filters that maximally correlate with a continuous target.\n#\n# SPoC can be seen as an extension of the CSP where the target is driven by a\n# continuous variable rather than a discrete variable. Typical applications\n# include extraction of motor patterns using EMG power or audio patterns using\n# sound envelope.\n#\n# .. topic:: Examples\n#\n# * :ref:`sphx_glr_auto_examples_decoding_plot_decoding_spoc_CMC.py`\n#\n# xDAWN\n# ^^^^^\n# :class:`mne.preprocessing.Xdawn` is a spatial filtering method designed to\n# improve the signal to signal + noise ratio (SSNR) of the ERP responses\n# :footcite:`RivetEtAl2009`. Xdawn was originally\n# designed for P300 evoked potential by enhancing the target response with\n# respect to the non-target response. The implementation in MNE-Python is a\n# generalization to any type of ERP.\n#\n# .. topic:: Examples\n#\n# * :ref:`sphx_glr_auto_examples_preprocessing_plot_xdawn_denoising.py`\n# * :ref:`sphx_glr_auto_examples_decoding_plot_decoding_xdawn_eeg.py`\n#\n# Effect-matched spatial filtering\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n# The result of :class:`mne.decoding.EMS` is a spatial filter at each time\n# point and a corresponding time course :footcite:`SchurgerEtAl2013`.\n# Intuitively, the result gives the similarity between the filter at\n# each time point and the data vector (sensors) at that time point.\n#\n# .. topic:: Examples\n#\n# * :ref:`sphx_glr_auto_examples_decoding_plot_ems_filtering.py`\n#\n# Patterns vs. filters\n# ^^^^^^^^^^^^^^^^^^^^\n#\n# When interpreting the components of the CSP (or spatial filters in general),\n# it is often more intuitive to think about how :math:`x(t)` is composed of\n# the different CSP components :math:`x_{CSP}(t)`. In other words, we can\n# rewrite Equation :eq:`csp` as follows:\n#\n# .. math:: x(t) = (W^{-1})^{T}x_{CSP}(t)\n# :label: patterns\n#\n# The columns of the matrix :math:`(W^{-1})^T` are called spatial patterns.\n# This is also called the mixing matrix. The example\n# :ref:`sphx_glr_auto_examples_decoding_plot_linear_model_patterns.py`\n# discusses the difference between patterns and filters.\n#\n# These can be plotted with:\n\n# Fit CSP on full data and plot\ncsp.fit(X, y)\ncsp.plot_patterns(epochs.info)\ncsp.plot_filters(epochs.info, scalings=1e-9)\n\n###############################################################################\n# Decoding over time\n# ==================\n#\n# This strategy consists in fitting a multivariate predictive model on each\n# time instant and evaluating its performance at the same instant on new\n# epochs. The :class:`mne.decoding.SlidingEstimator` will take as input a\n# pair of features :math:`X` and targets :math:`y`, where :math:`X` has\n# more than 2 dimensions. For decoding over time the data :math:`X`\n# is the epochs data of shape n_epochs x n_channels x n_times. As the\n# last dimension of :math:`X` is the time, an estimator will be fit\n# on every time instant.\n#\n# This approach is analogous to SlidingEstimator-based approaches in fMRI,\n# where here we are interested in when one can discriminate experimental\n# conditions and therefore figure out when the effect of interest happens.\n#\n# When working with linear models as estimators, this approach boils\n# down to estimating a discriminative spatial filter for each time instant.\n#\n# Temporal decoding\n# ^^^^^^^^^^^^^^^^^\n#\n# We'll use a Logistic Regression for a binary classification as machine\n# learning model.\n\n# We will train the classifier on all left visual vs auditory trials on MEG\n\nclf = make_pipeline(StandardScaler(), LogisticRegression(solver='lbfgs'))\n\ntime_decod = SlidingEstimator(clf, n_jobs=1, scoring='roc_auc', verbose=True)\nscores = cross_val_multiscore(time_decod, X, y, cv=5, n_jobs=1)\n\n# Mean scores across cross-validation splits\nscores = np.mean(scores, axis=0)\n\n# Plot\nfig, ax = plt.subplots()\nax.plot(epochs.times, scores, label='score')\nax.axhline(.5, color='k', linestyle='--', label='chance')\nax.set_xlabel('Times')\nax.set_ylabel('AUC') # Area Under the Curve\nax.legend()\nax.axvline(.0, color='k', linestyle='-')\nax.set_title('Sensor space decoding')\n\n###############################################################################\n# You can retrieve the spatial filters and spatial patterns if you explicitly\n# use a LinearModel\nclf = make_pipeline(StandardScaler(),\n LinearModel(LogisticRegression(solver='lbfgs')))\ntime_decod = SlidingEstimator(clf, n_jobs=1, scoring='roc_auc', verbose=True)\ntime_decod.fit(X, y)\n\ncoef = get_coef(time_decod, 'patterns_', inverse_transform=True)\nevoked_time_gen = mne.EvokedArray(coef, epochs.info, tmin=epochs.times[0])\njoint_kwargs = dict(ts_args=dict(time_unit='s'),\n topomap_args=dict(time_unit='s'))\nevoked_time_gen.plot_joint(times=np.arange(0., .500, .100), title='patterns',\n **joint_kwargs)\n\n###############################################################################\n# Temporal generalization\n# ^^^^^^^^^^^^^^^^^^^^^^^\n#\n# Temporal generalization is an extension of the decoding over time approach.\n# It consists in evaluating whether the model estimated at a particular\n# time instant accurately predicts any other time instant. It is analogous to\n# transferring a trained model to a distinct learning problem, where the\n# problems correspond to decoding the patterns of brain activity recorded at\n# distinct time instants.\n#\n# The object to for Temporal generalization is\n# :class:`mne.decoding.GeneralizingEstimator`. It expects as input :math:`X`\n# and :math:`y` (similarly to :class:`~mne.decoding.SlidingEstimator`) but\n# generates predictions from each model for all time instants. The class\n# :class:`~mne.decoding.GeneralizingEstimator` is generic and will treat the\n# last dimension as the one to be used for generalization testing. For\n# convenience, here, we refer to it as different tasks. If :math:`X`\n# corresponds to epochs data then the last dimension is time.\n#\n# This runs the analysis used in :footcite:`KingEtAl2014` and further detailed\n# in :footcite:`KingDehaene2014`:\n\n# define the Temporal generalization object\ntime_gen = GeneralizingEstimator(clf, n_jobs=1, scoring='roc_auc',\n verbose=True)\n\nscores = cross_val_multiscore(time_gen, X, y, cv=5, n_jobs=1)\n\n# Mean scores across cross-validation splits\nscores = np.mean(scores, axis=0)\n\n# Plot the diagonal (it's exactly the same as the time-by-time decoding above)\nfig, ax = plt.subplots()\nax.plot(epochs.times, np.diag(scores), label='score')\nax.axhline(.5, color='k', linestyle='--', label='chance')\nax.set_xlabel('Times')\nax.set_ylabel('AUC')\nax.legend()\nax.axvline(.0, color='k', linestyle='-')\nax.set_title('Decoding MEG sensors over time')\n\n###############################################################################\n# Plot the full (generalization) matrix:\n\nfig, ax = plt.subplots(1, 1)\nim = ax.imshow(scores, interpolation='lanczos', origin='lower', cmap='RdBu_r',\n extent=epochs.times[[0, -1, 0, -1]], vmin=0., vmax=1.)\nax.set_xlabel('Testing Time (s)')\nax.set_ylabel('Training Time (s)')\nax.set_title('Temporal generalization')\nax.axvline(0, color='k')\nax.axhline(0, color='k')\nplt.colorbar(im, ax=ax)\n\n###############################################################################\n# Projecting sensor-space patterns to source space\n# ================================================\n# If you use a linear classifier (or regressor) for your data, you can also\n# project these to source space. For example, using our ``evoked_time_gen``\n# from before:\n\ncov = mne.compute_covariance(epochs, tmax=0.)\ndel epochs\nfwd = mne.read_forward_solution(\n data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif')\ninv = mne.minimum_norm.make_inverse_operator(\n evoked_time_gen.info, fwd, cov, loose=0.)\nstc = mne.minimum_norm.apply_inverse(evoked_time_gen, inv, 1. / 9., 'dSPM')\ndel fwd, inv\n\n###############################################################################\n# And this can be visualized using :meth:`stc.plot <mne.SourceEstimate.plot>`:\nbrain = stc.plot(hemi='split', views=('lat', 'med'), initial_time=0.1,\n subjects_dir=subjects_dir)\n\n###############################################################################\n# Source-space decoding\n# =====================\n#\n# Source space decoding is also possible, but because the number of features\n# can be much larger than in the sensor space, univariate feature selection\n# using ANOVA f-test (or some other metric) can be done to reduce the feature\n# dimension. Interpreting decoding results might be easier in source space as\n# compared to sensor space.\n#\n# .. topic:: Examples\n#\n# * :ref:`tut_dec_st_source`\n#\n# Exercise\n# ========\n#\n# - Explore other datasets from MNE (e.g. Face dataset from SPM to predict\n# Face vs. Scrambled)\n#\n# References\n# ==========\n# .. footbibliography::\n"
] | [
[
"numpy.abs",
"numpy.sqrt",
"numpy.asarray",
"numpy.arange",
"numpy.isrealobj",
"numpy.prod",
"numpy.zeros",
"numpy.sum"
],
[
"numpy.linspace",
"numpy.arange",
"numpy.cos",
"numpy.sin",
"numpy.array",
"numpy.zeros"
],
[
"numpy.diag",
"sklearn.linear_model.LogisticRegression",
"numpy.arange",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.colorbar",
"numpy.mean",
"sklearn.preprocessing.StandardScaler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
scottyhq/xarray-sentinel | [
"3899a86e5bf5d56454e7467d9231bc97ebab8fe1"
] | [
"xarray_sentinel/sentinel1.py"
] | [
"\"\"\"Map Sentinel-1 data products to xarray.\n\nReferences:\n - Sentinel-1 document library: https://sentinels.copernicus.eu/web/sentinel/user-guides/sentinel-1-sar/document-library\n - Sentinel-1 Product Specification v3.9 07 May 2021 S1-RS-MDA-52-7441-3-9 documenting IPF 3.40\n https://sentinel.esa.int/documents/247904/1877131/S1-RS-MDA-52-7441-3-9-2_Sentinel-1ProductSpecification.pdf\n - Sentinel-1 Product Specification v3.7 27 February 2020 S1-RS-MDA-52-7441 documenting IPF 3.30\n https://sentinel.esa.int/documents/247904/1877131/Sentinel-1-Product-Specification\n\"\"\"\n\nimport contextlib\nimport os\nimport typing as T\nimport warnings\n\nimport fsspec\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\n\nfrom . import conventions, esa_safe\n\nSPEED_OF_LIGHT = 299_792_458 # m / s\nONE_SECOND = np.timedelta64(1, \"s\")\n\n\ndef get_fs_path(\n urlpath_or_path: esa_safe.PathType,\n fs: T.Optional[fsspec.AbstractFileSystem] = None,\n storage_options: T.Optional[T.Dict[str, T.Any]] = None,\n) -> T.Tuple[fsspec.AbstractFileSystem, str]:\n if fs is not None and storage_options is not None:\n raise TypeError(\"only one of 'fs' and 'storage_options' can be not None\")\n if fs is None:\n fs, _, paths = fsspec.get_fs_token_paths(\n urlpath_or_path, storage_options=storage_options\n )\n if len(paths) == 0:\n raise ValueError(f\"file or object not found {urlpath_or_path!r}\")\n elif len(paths) > 1:\n raise ValueError(f\"multiple files or objects found {urlpath_or_path!r}\")\n path = paths[0]\n else:\n path = str(urlpath_or_path)\n return fs, path\n\n\ndef normalise_group(group: T.Optional[str]) -> T.Tuple[str, T.Optional[int]]:\n if group is None:\n group = \"\"\n if group.startswith(\"/\"):\n group = group[1:]\n burst_index = None\n parent_group, _, last_name = group.rpartition(\"/\")\n if parent_group.count(\"/\") == 1 and last_name.isnumeric():\n burst_index = int(last_name)\n group = parent_group\n return group, burst_index\n\n\ndef open_calibration_dataset(calibration: esa_safe.PathType) -> xr.Dataset:\n calibration_vectors = esa_safe.parse_tag_as_list(\n calibration, \".//calibrationVector\", \"calibration\"\n )\n\n azimuth_time_list = []\n pixel_list = []\n line_list = []\n sigmaNought_list = []\n betaNought_list = []\n gamma_list = []\n dn_list = []\n for vector in calibration_vectors:\n azimuth_time_list.append(vector[\"azimuthTime\"])\n line_list.append(vector[\"line\"])\n pixel = np.fromstring(vector[\"pixel\"][\"$\"], dtype=int, sep=\" \") # type: ignore\n pixel_list.append(pixel)\n sigmaNought = np.fromstring(vector[\"sigmaNought\"][\"$\"], dtype=np.float32, sep=\" \") # type: ignore\n sigmaNought_list.append(sigmaNought)\n betaNought = np.fromstring(vector[\"betaNought\"][\"$\"], dtype=np.float32, sep=\" \") # type: ignore\n betaNought_list.append(betaNought)\n gamma = np.fromstring(vector[\"gamma\"][\"$\"], dtype=np.float32, sep=\" \") # type: ignore\n gamma_list.append(gamma)\n dn = np.fromstring(vector[\"dn\"][\"$\"], dtype=np.float32, sep=\" \") # type: ignore\n dn_list.append(dn)\n\n pixel = np.array(pixel_list)\n if not np.allclose(pixel, pixel[0]):\n raise ValueError(\n \"Unable to organise calibration vectors in a regular line-pixel grid\"\n )\n data_vars = {\n \"azimuth_time\": (\"line\", [np.datetime64(dt) for dt in azimuth_time_list]),\n \"sigmaNought\": ((\"line\", \"pixel\"), sigmaNought_list),\n \"betaNought\": ((\"line\", \"pixel\"), betaNought_list),\n \"gamma\": ((\"line\", \"pixel\"), gamma_list),\n \"dn\": ((\"line\", \"pixel\"), dn_list),\n }\n coords = {\"line\": line_list, \"pixel\": pixel_list[0]}\n\n return xr.Dataset(data_vars=data_vars, coords=coords)\n\n\ndef open_noise_range_dataset(noise: esa_safe.PathType) -> xr.Dataset:\n noise_vectors = esa_safe.parse_tag_as_list(noise, \".//noiseRangeVector\", \"noise\")\n\n azimuth_time_list = []\n pixel_list = []\n line_list = []\n noiseRangeLut_list = []\n for vector in noise_vectors:\n azimuth_time_list.append(vector[\"azimuthTime\"])\n line_list.append(vector[\"line\"])\n pixel = np.fromstring(vector[\"pixel\"][\"$\"], dtype=int, sep=\" \") # type: ignore\n pixel_list.append(pixel)\n noiseRangeLut = np.fromstring(vector[\"noiseRangeLut\"][\"$\"], dtype=np.float32, sep=\" \") # type: ignore\n noiseRangeLut_list.append(noiseRangeLut)\n\n pixel = np.array(pixel_list)\n if not np.allclose(pixel, pixel[0]):\n raise ValueError(\n \"Unable to organise noise vectors in a regular line-pixel grid\"\n )\n data_vars = {\n \"azimuth_time\": (\"line\", [np.datetime64(dt) for dt in azimuth_time_list]),\n \"noiseRangeLut\": ((\"line\", \"pixel\"), noiseRangeLut_list),\n }\n coords = {\"line\": line_list, \"pixel\": pixel_list[0]}\n\n return xr.Dataset(data_vars=data_vars, coords=coords)\n\n\ndef open_noise_azimuth_dataset(noise: esa_safe.PathType) -> xr.Dataset:\n noise_vectors = esa_safe.parse_tag_as_list(noise, \".//noiseAzimuthVector\", \"noise\")\n\n first_range_sample = []\n line_list = []\n noiseAzimuthLut_list = []\n for vector in noise_vectors:\n first_range_sample.append(vector[\"firstRangeSample\"])\n line = np.fromstring(vector[\"line\"][\"$\"], dtype=int, sep=\" \") # type: ignore\n line_list.append(line)\n noiseAzimuthLut = np.fromstring(vector[\"noiseAzimuthLut\"][\"$\"], dtype=np.float32, sep=\" \") # type: ignore\n noiseAzimuthLut_list.append(noiseAzimuthLut)\n\n # BROKEN: GRDs have line and noiseAzimuthLut of different size, we take the first one\n data_vars = {}\n coords = {}\n if first_range_sample:\n data_vars[\"noiseAzimuthLut\"] = (\"line\", noiseAzimuthLut_list[0])\n coords[\"line\"] = line_list[0]\n\n return xr.Dataset(data_vars=data_vars, coords=coords)\n\n\ndef open_coordinate_conversion_dataset(\n annotation_path: esa_safe.PathType,\n) -> xr.Dataset:\n coordinate_conversion = esa_safe.parse_tag_as_list(\n annotation_path, \".//coordinateConversionList/coordinateConversion\"\n )\n\n gr0 = []\n sr0 = []\n azimuth_time = []\n slant_range_time = []\n srgrCoefficients: T.List[T.List[float]] = []\n grsrCoefficients: T.List[T.List[float]] = []\n for values in coordinate_conversion:\n sr0.append(values[\"sr0\"])\n gr0.append(values[\"gr0\"])\n azimuth_time.append(values[\"azimuthTime\"])\n slant_range_time.append(values[\"slantRangeTime\"])\n srgrCoefficients.append(\n [float(v) for v in values[\"srgrCoefficients\"][\"$\"].split()]\n )\n grsrCoefficients.append(\n [float(v) for v in values[\"grsrCoefficients\"][\"$\"].split()]\n )\n\n coords: T.Dict[str, T.Any] = {}\n data_vars: T.Dict[str, T.Any] = {}\n if srgrCoefficients:\n coords[\"azimuth_time\"] = [np.datetime64(dt) for dt in azimuth_time]\n coords[\"degree\"] = list(range(len(srgrCoefficients[0])))\n\n data_vars[\"gr0\"] = (\"azimuth_time\", gr0)\n data_vars[\"sr0\"] = (\"azimuth_time\", sr0)\n data_vars[\"slant_range_time\"] = (\"azimuth_time\", slant_range_time)\n data_vars[\"srgrCoefficients\"] = ((\"azimuth_time\", \"degree\"), srgrCoefficients)\n data_vars[\"grsrCoefficients\"] = ((\"azimuth_time\", \"degree\"), grsrCoefficients)\n\n return xr.Dataset(data_vars=data_vars, coords=coords)\n\n\ndef open_gcp_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:\n geolocation_grid_points = esa_safe.parse_tag_as_list(\n annotation, \".//geolocationGridPoint\"\n )\n\n azimuth_time = []\n slant_range_time = []\n line_set = set()\n pixel_set = set()\n for ggp in geolocation_grid_points:\n if ggp[\"line\"] not in line_set:\n azimuth_time.append(np.datetime64(ggp[\"azimuthTime\"]))\n line_set.add(ggp[\"line\"])\n if ggp[\"pixel\"] not in pixel_set:\n slant_range_time.append(ggp[\"slantRangeTime\"])\n pixel_set.add(ggp[\"pixel\"])\n shape = (len(azimuth_time), len(slant_range_time))\n dims = (\"azimuth_time\", \"slant_range_time\")\n data_vars = {\n \"latitude\": (dims, np.full(shape, np.nan)),\n \"longitude\": (dims, np.full(shape, np.nan)),\n \"height\": (dims, np.full(shape, np.nan)),\n \"incidenceAngle\": (dims, np.full(shape, np.nan)),\n \"elevationAngle\": (dims, np.full(shape, np.nan)),\n }\n line = sorted(line_set)\n pixel = sorted(pixel_set)\n for ggp in geolocation_grid_points:\n for var in data_vars:\n j = line.index(ggp[\"line\"])\n i = pixel.index(ggp[\"pixel\"])\n data_vars[var][1][j, i] = ggp[var]\n\n ds = xr.Dataset(\n data_vars=data_vars,\n coords={\n \"azimuth_time\": [np.datetime64(dt) for dt in azimuth_time],\n \"slant_range_time\": slant_range_time,\n \"line\": (\"azimuth_time\", line),\n \"pixel\": (\"slant_range_time\", pixel),\n },\n )\n return ds\n\n\ndef open_attitude_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:\n attitudes = esa_safe.parse_tag_as_list(annotation, \".//attitude\")\n\n variables = [\"q0\", \"q1\", \"q2\", \"q3\", \"wx\", \"wy\", \"wz\", \"pitch\", \"roll\", \"yaw\"]\n azimuth_time: T.List[T.Any] = []\n data_vars: T.Dict[str, T.Any] = {var: (\"azimuth_time\", []) for var in variables}\n for attitude in attitudes:\n azimuth_time.append(attitude[\"time\"])\n for var in variables:\n data_vars[var][1].append(attitude[var])\n\n ds = xr.Dataset(\n data_vars=data_vars,\n coords={\"azimuth_time\": [np.datetime64(dt) for dt in azimuth_time]},\n )\n\n return ds\n\n\ndef open_orbit_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:\n orbits = esa_safe.parse_tag_as_list(annotation, \".//orbit\")\n\n reference_system = orbits[0][\"frame\"]\n variables = [\"position\", \"velocity\"]\n data: T.Dict[str, T.List[T.Any]] = {var: [[], [], []] for var in variables}\n azimuth_time: T.List[T.Any] = []\n for orbit in orbits:\n azimuth_time.append(orbit[\"time\"])\n data[\"position\"][0].append(orbit[\"position\"][\"x\"])\n data[\"position\"][1].append(orbit[\"position\"][\"y\"])\n data[\"position\"][2].append(orbit[\"position\"][\"z\"])\n data[\"velocity\"][0].append(orbit[\"velocity\"][\"x\"])\n data[\"velocity\"][1].append(orbit[\"velocity\"][\"y\"])\n data[\"velocity\"][2].append(orbit[\"velocity\"][\"z\"])\n if orbit[\"frame\"] != reference_system:\n warnings.warn(\n \"reference_system is not consistent in all the state vectors. \"\n )\n reference_system = None\n\n position = xr.Variable(data=data[\"position\"], dims=(\"axis\", \"azimuth_time\")) # type: ignore\n velocity = xr.Variable(data=data[\"velocity\"], dims=(\"axis\", \"azimuth_time\")) # type: ignore\n\n attrs = {}\n if reference_system is not None:\n attrs.update({\"reference_system\": reference_system})\n\n ds = xr.Dataset(\n data_vars={\"position\": position, \"velocity\": velocity},\n attrs=attrs,\n coords={\n \"azimuth_time\": [np.datetime64(dt) for dt in azimuth_time],\n \"axis\": [0, 1, 2],\n },\n )\n\n return ds\n\n\ndef open_dc_estimate_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:\n dc_estimates = esa_safe.parse_tag_as_list(annotation, \".//dcEstimate\")\n\n azimuth_time = []\n t0 = []\n data_dc_poly = []\n for dc_estimate in dc_estimates:\n azimuth_time.append(dc_estimate[\"azimuthTime\"])\n t0.append(dc_estimate[\"t0\"])\n data_dc_poly.append(\n [float(c) for c in dc_estimate[\"dataDcPolynomial\"][\"$\"].split()]\n )\n\n ds = xr.Dataset(\n data_vars={\n \"t0\": (\"azimuth_time\", t0),\n \"data_dc_polynomial\": ((\"azimuth_time\", \"degree\"), data_dc_poly),\n },\n coords={\n \"azimuth_time\": [np.datetime64(at) for at in azimuth_time],\n \"degree\": list(range(len(data_dc_poly[0]))),\n },\n )\n return ds\n\n\ndef open_azimuth_fm_rate_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:\n azimuth_fm_rates = esa_safe.parse_tag_as_list(annotation, \".//azimuthFmRate\")\n\n azimuth_time = []\n t0 = []\n azimuth_fm_rate_poly = []\n for azimuth_fm_rate in azimuth_fm_rates:\n azimuth_time.append(azimuth_fm_rate[\"azimuthTime\"])\n t0.append(azimuth_fm_rate[\"t0\"])\n azimuth_fm_rate_poly.append(\n [float(c) for c in azimuth_fm_rate[\"azimuthFmRatePolynomial\"][\"$\"].split()]\n )\n\n ds = xr.Dataset(\n data_vars={\n \"t0\": (\"azimuth_time\", t0),\n \"azimuth_fm_rate_polynomial\": (\n (\"azimuth_time\", \"degree\"),\n azimuth_fm_rate_poly,\n ),\n },\n coords={\n \"azimuth_time\": [np.datetime64(at) for at in azimuth_time],\n \"degree\": list(range(len(azimuth_fm_rate_poly[0]))),\n },\n )\n return ds\n\n\ndef find_available_groups(\n product_files: T.Dict[str, T.Tuple[str, str, str, str, str]],\n product_path: str,\n check_files_exist: bool = False,\n fs: fsspec.AbstractFileSystem = fsspec.filesystem(\"file\"),\n) -> T.Dict[str, T.List[str]]:\n groups: T.Dict[str, T.List[str]] = {}\n for path, (type, _, swath, polarization, _) in product_files.items():\n swath_pol_group = f\"{swath}/{polarization}\".upper()\n abspath = os.path.join(product_path, os.path.normpath(path))\n if check_files_exist:\n if not fs.exists(abspath):\n continue\n if type == \"s1Level1ProductSchema\":\n groups[swath.upper()] = [\"\"]\n groups[swath_pol_group] = [abspath] + groups.get(swath_pol_group, [])\n for metadata_group in [\n \"orbit\",\n \"attitude\",\n \"azimuth_fm_rate\",\n \"dc_estimate\",\n \"gcp\",\n \"coordinate_conversion\",\n ]:\n groups[f\"{swath_pol_group}/{metadata_group}\"] = [abspath]\n elif type == \"s1Level1CalibrationSchema\":\n groups[f\"{swath_pol_group}/calibration\"] = [abspath]\n elif type == \"s1Level1NoiseSchema\":\n groups[f\"{swath_pol_group}/noise_range\"] = [abspath]\n groups[f\"{swath_pol_group}/noise_azimuth\"] = [abspath]\n elif type == \"s1Level1MeasurementSchema\":\n groups[swath_pol_group] = [abspath] + groups.get(swath_pol_group, [])\n\n return groups\n\n\ndef open_pol_dataset(\n measurement: esa_safe.PathOrFileType,\n annotation: esa_safe.PathOrFileType,\n fs: T.Optional[fsspec.AbstractFileSystem] = None,\n) -> xr.Dataset:\n\n product_information = esa_safe.parse_tag(annotation, \".//productInformation\")\n image_information = esa_safe.parse_tag(annotation, \".//imageInformation\")\n swath_timing = esa_safe.parse_tag(annotation, \".//swathTiming\")\n incidence_angle_mid_swath = image_information[\"incidenceAngleMidSwath\"]\n\n number_of_samples = image_information[\"numberOfSamples\"]\n first_slant_range_time = image_information[\"slantRangeTime\"]\n slant_range_time_interval = 1 / product_information[\"rangeSamplingRate\"]\n\n number_of_lines = image_information[\"numberOfLines\"]\n first_azimuth_time = image_information[\"productFirstLineUtcTime\"]\n azimuth_time_interval = image_information[\"azimuthTimeInterval\"]\n number_of_bursts = swath_timing[\"burstList\"][\"@count\"]\n range_pixel_spaxing = image_information[\"rangePixelSpacing\"]\n anx_datetime = image_information[\"ascendingNodeTime\"]\n\n attrs = {\n \"sar:center_frequency\": product_information[\"radarFrequency\"] / 10 ** 9,\n \"sar:pixel_spacing_azimuth\": image_information[\"azimuthPixelSpacing\"],\n \"sar:pixel_spacing_range\": range_pixel_spaxing,\n \"azimuth_time_interval\": azimuth_time_interval,\n \"slant_range_time_interval\": slant_range_time_interval,\n \"incidence_angle_mid_swath\": incidence_angle_mid_swath,\n \"sat:anx_datetime\": anx_datetime + \"Z\",\n }\n encoding = {}\n swap_dims = {}\n chunks: T.Union[None, T.Dict[str, int]] = None\n\n azimuth_time = pd.date_range(\n start=first_azimuth_time,\n periods=number_of_lines,\n freq=pd.Timedelta(azimuth_time_interval, \"s\"),\n ).values\n if number_of_bursts == 0:\n swap_dims = {\"line\": \"azimuth_time\", \"pixel\": \"slant_range_time\"}\n else:\n lines_per_burst = swath_timing[\"linesPerBurst\"]\n attrs.update(\n {\n \"azimuth_steering_rate\": product_information[\"azimuthSteeringRate\"],\n \"number_of_bursts\": number_of_bursts,\n \"lines_per_burst\": lines_per_burst,\n }\n )\n for burst_index, burst in enumerate(swath_timing[\"burstList\"][\"burst\"]):\n first_azimuth_time_burst = burst[\"azimuthTime\"]\n azimuth_time_burst = pd.date_range(\n start=first_azimuth_time_burst,\n periods=lines_per_burst,\n freq=pd.Timedelta(azimuth_time_interval, \"s\"),\n )\n azimuth_time[\n lines_per_burst * burst_index : lines_per_burst * (burst_index + 1)\n ] = azimuth_time_burst\n\n # chunk at burst boundaries if dask is present\n try:\n import dask # noqa\n\n encoding[\"preferred_chunks\"] = {\"line\": lines_per_burst}\n chunks = {}\n except ModuleNotFoundError:\n pass\n\n coords = {\n \"pixel\": np.arange(0, number_of_samples, dtype=int),\n \"line\": np.arange(0, number_of_lines, dtype=int),\n \"azimuth_time\": (\"line\", azimuth_time),\n }\n\n if product_information[\"projection\"] == \"Slant Range\":\n slant_range_time = np.linspace(\n first_slant_range_time,\n first_slant_range_time\n + slant_range_time_interval * (number_of_samples - 1),\n number_of_samples,\n )\n coords[\"slant_range_time\"] = (\"pixel\", slant_range_time)\n elif product_information[\"projection\"] == \"Ground Range\":\n ground_range = np.linspace(\n 0,\n range_pixel_spaxing * (number_of_samples - 1),\n number_of_samples,\n )\n coords[\"ground_range\"] = (\"pixel\", ground_range)\n swap_dims = {\"line\": \"azimuth_time\", \"pixel\": \"ground_range\"}\n else:\n raise ValueError(f\"unknown projection {product_information['projection']}\")\n\n # temporary ugly work-around to get fsspec support with rasterio >= 1.3a3\n # the try block uses fsspec if rasterio >= 1.3a3 is installed\n # the except block falls back to standard file based rasterio\n # the with is needed to avoid polluting stderr when the try block fails\n with contextlib.redirect_stderr(open(\"/dev/null\", \"w\")):\n try:\n arr = xr.open_dataarray(fs.open(measurement), engine=\"rasterio\", chunks=chunks) # type: ignore\n except AttributeError:\n arr = xr.open_dataarray(measurement, engine=\"rasterio\") # type: ignore\n\n arr = arr.squeeze(\"band\").drop_vars([\"band\", \"spatial_ref\"])\n arr = arr.rename({\"y\": \"line\", \"x\": \"pixel\"})\n arr = arr.assign_coords(coords)\n arr = arr.swap_dims(swap_dims)\n\n arr.attrs.update(attrs)\n arr.encoding.update(encoding)\n\n return xr.Dataset(attrs=attrs, data_vars={\"measurement\": arr})\n\n\ndef find_bursts_index(\n pol_dataset: xr.Dataset,\n azimuth_anx_time: float,\n use_center: bool = False,\n) -> int:\n lines_per_burst = pol_dataset.attrs[\"lines_per_burst\"]\n anx_datetime = np.datetime64(pol_dataset.attrs[\"sat:anx_datetime\"].replace(\"Z\", \"\"))\n azimuth_anx_time = pd.Timedelta(azimuth_anx_time, unit=\"s\")\n if use_center:\n azimuth_anx_time_center = (\n pol_dataset.azimuth_time[lines_per_burst // 2 :: lines_per_burst]\n - anx_datetime\n )\n distance = abs(azimuth_anx_time_center - azimuth_anx_time)\n else:\n azimuth_anx_time_first_line = (\n pol_dataset.azimuth_time[::lines_per_burst] - anx_datetime\n )\n distance = abs(azimuth_anx_time_first_line - azimuth_anx_time)\n return distance.argmin().item() # type: ignore\n\n\ndef crop_burst_dataset(\n pol_dataset: xr.Dataset,\n burst_index: T.Optional[int] = None,\n azimuth_anx_time: T.Optional[float] = None,\n use_center: bool = False,\n) -> xr.Dataset:\n if (burst_index is not None) and (azimuth_anx_time is not None):\n raise TypeError(\n \"only one keyword between 'burst_index' and 'azimuth_anx_time' must be defined\"\n )\n\n if burst_index is None:\n if azimuth_anx_time is not None:\n burst_index = find_bursts_index(\n pol_dataset, azimuth_anx_time, use_center=use_center\n )\n else:\n raise TypeError(\n \"one keyword between 'burst_index' and 'azimuth_anx_time' must be defined\"\n )\n\n if burst_index < 0 or burst_index >= pol_dataset.attrs[\"number_of_bursts\"]:\n raise IndexError(f\"burst_index={burst_index} out of bounds\")\n\n lines_per_burst = pol_dataset.attrs[\"lines_per_burst\"]\n ds = pol_dataset.sel(\n line=slice(\n lines_per_burst * burst_index, lines_per_burst * (burst_index + 1) - 1\n )\n )\n\n anx_datetime = np.datetime64(pol_dataset.attrs[\"sat:anx_datetime\"].replace(\"Z\", \"\"))\n burst_azimuth_anx_times = ds.azimuth_time - anx_datetime\n ds.attrs[\"azimuth_anx_time\"] = burst_azimuth_anx_times.values[0] / ONE_SECOND\n ds = ds.swap_dims({\"line\": \"azimuth_time\", \"pixel\": \"slant_range_time\"})\n ds.attrs[\"burst_index\"] = burst_index\n\n return ds\n\n\ndef mosaic_slc_iw(slc_iw_image: xr.Dataset, crop: int = 90) -> xr.Dataset:\n bursts = []\n for i in range(slc_iw_image.attrs[\"number_of_bursts\"]):\n burst = crop_burst_dataset(slc_iw_image, burst_index=i)\n bursts.append(burst.isel(azimuth_time=slice(crop, -crop)))\n return xr.concat(bursts, dim=\"azimuth_time\")\n\n\ndef calibrate_amplitude(\n digital_number: xr.DataArray, calibration_lut: xr.DataArray\n) -> xr.DataArray:\n calibration = calibration_lut.interp(\n line=digital_number.line,\n pixel=digital_number.pixel,\n ).astype(np.float32)\n amplitude = digital_number / calibration\n amplitude.attrs.update(digital_number.attrs)\n try:\n lut_name = calibration_lut.attrs[\"long_name\"].partition(\"calibration LUT\")[0]\n amplitude.attrs[\"long_name\"] = f\"amplitude for {lut_name}\"\n amplitude.attrs[\"units\"] = calibration.attrs[\"units\"]\n except KeyError:\n pass\n return amplitude\n\n\ndef calibrate_intensity(\n digital_number: xr.DataArray,\n calibration_lut: xr.DataArray,\n as_db: bool = False,\n min_db: T.Optional[float] = -40.0,\n) -> xr.DataArray:\n amplitude = calibrate_amplitude(digital_number, calibration_lut)\n intensity = abs(amplitude) ** 2\n if as_db:\n intensity = 10.0 * np.log10(intensity)\n if min_db is not None:\n intensity = np.maximum(intensity, min_db)\n intensity.attrs.update(amplitude.attrs)\n intensity.attrs[\"units\"] = \"dB\"\n else:\n intensity.attrs.update(amplitude.attrs)\n intensity.attrs[\"units\"] = \"m2 m-2\"\n try:\n lut_name = amplitude.attrs[\"long_name\"].partition(\"amplitude for \")[2]\n intensity.attrs[\"long_name\"] = lut_name\n except KeyError:\n pass\n return intensity\n\n\ndef slant_range_time_to_ground_range(\n azimuth_time: xr.DataArray,\n slant_range_time: xr.DataArray,\n coordinate_conversion: xr.DataArray,\n) -> xr.DataArray:\n slant_range = SPEED_OF_LIGHT / 2.0 * slant_range_time\n cc = coordinate_conversion.interp(azimuth_time=azimuth_time)\n x = slant_range - cc.sr0\n ground_range = (cc.srgrCoefficients * x ** cc.degree).sum(\"degree\")\n return ground_range # type: ignore\n\n\ndef assign_slant_range_time_coord(\n measurement: xr.Dataset, coordinate_conversion: xr.Dataset\n) -> xr.Dataset:\n x = measurement.ground_range - coordinate_conversion.gr0\n slant_range = (\n coordinate_conversion.grsrCoefficients * x ** coordinate_conversion.degree\n ).sum(dim=\"degree\")\n slant_range_coord = slant_range.interp(\n azimuth_time=measurement.azimuth_time, ground_range=measurement.ground_range\n ).data\n slant_range_time = 2 / SPEED_OF_LIGHT * slant_range_coord\n measurement = measurement.assign_coords(\n slant_range_time=((\"azimuth_time\", \"ground_range\"), slant_range_time)\n ) # type: ignore\n return measurement\n\n\ndef build_burst_id(lat: float, lon: float, relative_orbit: int) -> str:\n lat = int(round(lat * 10))\n lon = int(round(lon * 10))\n\n n_or_s = \"N\" if lat >= 0 else \"S\"\n e_or_w = \"E\" if lon >= 0 else \"W\"\n burst_id = f\"R{relative_orbit:03}\" f\"-{n_or_s}{lat:03}\" f\"-{e_or_w}{lon:04}\"\n return burst_id\n\n\ndef compute_burst_centres(\n gcp: xr.Dataset,\n) -> T.Tuple[T.List[float], T.List[float]]:\n gcp_rolling = gcp.rolling(azimuth_time=2, min_periods=1)\n gc_az_win = gcp_rolling.construct(azimuth_time=\"az_win\")\n centre = gc_az_win.mean([\"az_win\", \"slant_range_time\"])\n centre = centre.isel(azimuth_time=slice(1, None))\n return centre.latitude.values.tolist(), centre.longitude.values.tolist()\n\n\nMETADATA_OPENERS = {\n \"orbit\": open_orbit_dataset,\n \"attitude\": open_attitude_dataset,\n \"azimuth_fm_rate\": open_azimuth_fm_rate_dataset,\n \"dc_estimate\": open_dc_estimate_dataset,\n \"gcp\": open_gcp_dataset,\n \"coordinate_conversion\": open_coordinate_conversion_dataset,\n \"calibration\": open_calibration_dataset,\n \"noise_range\": open_noise_range_dataset,\n \"noise_azimuth\": open_noise_azimuth_dataset,\n}\n\n\ndef do_override_product_files(\n template: str, product_files: T.Dict[str, T.Tuple[str, str, str, str, str]]\n) -> T.Dict[str, T.Tuple[str, str, str, str, str]]:\n overridden_product_files = {}\n for path, description in product_files.items():\n type, prefix, swath, polarization, date = description\n ext = os.path.splitext(path)[1]\n dirname = os.path.dirname(path)\n overridden_path = template.format(**locals())\n overridden_product_files[overridden_path] = description\n return overridden_product_files\n\n\ndef open_sentinel1_dataset(\n product_urlpath: esa_safe.PathType,\n *,\n drop_variables: T.Optional[T.Tuple[str]] = None,\n group: T.Optional[str] = None,\n fs: T.Optional[fsspec.AbstractFileSystem] = None,\n storage_options: T.Optional[T.Dict[str, T.Any]] = None,\n check_files_exist: bool = False,\n override_product_files: T.Optional[str] = None,\n) -> xr.Dataset:\n if drop_variables is not None:\n warnings.warn(\"'drop_variables' is currently ignored\")\n\n fs, manifest_path = get_fs_path(product_urlpath, fs, storage_options)\n if fs.isdir(manifest_path):\n manifest_path = os.path.join(manifest_path, \"manifest.safe\")\n product_path = os.path.dirname(manifest_path)\n\n with fs.open(manifest_path) as file:\n product_attrs, product_files = esa_safe.parse_manifest_sentinel1(file)\n\n if override_product_files:\n product_files = do_override_product_files(override_product_files, product_files)\n\n groups = find_available_groups(\n product_files, product_path, check_files_exist=check_files_exist, fs=fs\n )\n\n group, burst_index = normalise_group(group)\n absgroup = f\"/{group}\"\n if group != \"\" and group not in groups:\n raise ValueError(\n f\"Invalid group {group!r}, please select one of the following groups:\"\n f\"\\n{list(groups.keys())}\"\n )\n\n metadata = \"\"\n\n ds = xr.Dataset()\n if group == \"\":\n subgroups = list(groups)\n else:\n subgroups = [\n g[len(group) + 1 :] for g in groups if g.startswith(group) and g != group\n ]\n\n if group.count(\"/\") == 1:\n with fs.open(groups[group][1]) as annotation:\n ds = open_pol_dataset(groups[group][0], annotation, fs=fs)\n elif group.count(\"/\") == 2:\n _, _, metadata = group.split(\"/\", 2)\n with fs.open(groups[group][0]) as file:\n ds = METADATA_OPENERS[metadata](file)\n\n for data_var in ds.data_vars:\n ds.data_vars[data_var].attrs.update(product_attrs)\n\n product_attrs[\"group\"] = absgroup\n if len(subgroups):\n product_attrs[\"subgroups\"] = subgroups\n ds.attrs.update(product_attrs) # type: ignore\n\n if group.count(\"/\") == 1 and burst_index is not None:\n ds = crop_burst_dataset(ds, burst_index=burst_index)\n\n conventions.update_attributes(ds, group=metadata)\n\n return ds\n"
] | [
[
"numpy.maximum",
"numpy.allclose",
"numpy.linspace",
"numpy.arange",
"pandas.Timedelta",
"numpy.datetime64",
"numpy.timedelta64",
"numpy.full",
"numpy.fromstring",
"numpy.log10",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Christensen-Lab-Dartmouth/MethylCapsNet | [
"17b6b19809c5e1984de804eb34cc7494210f91a6",
"17b6b19809c5e1984de804eb34cc7494210f91a6"
] | [
"methylcapsnet/.ipynb_checkpoints/methylcaps_model_-checkpoint.py",
"methylcapsnet/.ipynb_checkpoints/build_capsules-checkpoint.py"
] | [
"import pandas as pd\nfrom pymethylprocess.MethylationDataTypes import MethylationArray\nfrom sklearn.metrics import mean_absolute_error, r2_score\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nfrom pybedtools import BedTool\nimport numpy as np\nfrom functools import reduce\nfrom torch.utils.data import Dataset, DataLoader\nimport torch\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\nimport os\nimport pysnooper\nimport argparse\nimport pickle\nfrom sklearn.metrics import classification_report\nimport click\nimport methylcapsnet\nfrom methylcapsnet.build_capsules import *\nfrom methylcapsnet.methylcaps_data_models import *\nimport sqlite3\nimport os\nimport glob\nimport dask\nfrom dask.diagnostics import ProgressBar\nfrom pathos.multiprocessing import Pool\nimport multiprocessing\nimport dask.bag as db\nfrom distributed import Client, LocalCluster, get_task_stream\nRANDOM_SEED=42\nnp.random.seed(RANDOM_SEED)\ntorch.manual_seed(RANDOM_SEED)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\[email protected]('train.log')\ndef model_capsnet_(train_methyl_array='train_val_test_sets/train_methyl_array.pkl',\n\t\t\t\t\tval_methyl_array='train_val_test_sets/val_methyl_array.pkl',\n\t\t\t\t\tinterest_col='disease',\n\t\t\t\t\tn_epochs=10,\n\t\t\t\t\tn_bins=0,\n\t\t\t\t\tbin_len=1000000,\n\t\t\t\t\tmin_capsule_len=300,\n\t\t\t\t\tprimary_caps_out_len=45,\n\t\t\t\t\tcaps_out_len=45,\n\t\t\t\t\thidden_topology='30,80,50',\n\t\t\t\t\tgamma=1e-2,\n\t\t\t\t\tdecoder_topology='100,300',\n\t\t\t\t\tlearning_rate=1e-2,\n\t\t\t\t\trouting_iterations=3,\n\t\t\t\t\toverlap=0.,\n\t\t\t\t\tcustom_loss='none',\n\t\t\t\t\tgamma2=1e-2,\n\t\t\t\t\tjob=0,\n\t\t\t\t\tcapsule_choice=['genomic_binned'],\n\t\t\t\t\tcustom_capsule_file='',\n\t\t\t\t\ttest_methyl_array='',\n\t\t\t\t\tpredict=False,\n\t\t\t\t\tbatch_size=16,\n\t\t\t\t\tlimited_capsule_names_file='',\n\t\t\t\t\tgsea_superset='',\n\t\t\t\t\ttissue='',\n\t\t\t\t\tnumber_sets=25,\n\t\t\t\t\tuse_set=False,\n\t\t\t\t\tgene_context=False,\n\t\t\t\t\tselect_subtypes=[],\n\t\t\t\t\tfit_spw=False,\n\t\t\t\t\tl1_l2='',\n\t\t\t\t\tcustom_capsule_file2='',\n\t\t\t\t\tmin_capsules=5):\n\n\tcapsule_choice=list(capsule_choice)\n\t#custom_capsule_file=list(custom_capsule_file)\n\thlt_list=filter(None,hidden_topology.split(','))\n\tif hlt_list:\n\t\thidden_topology=list(map(int,hlt_list))\n\telse:\n\t\thidden_topology=[]\n\thlt_list=filter(None,decoder_topology.split(','))\n\tif hlt_list:\n\t\tdecoder_topology=list(map(int,hlt_list))\n\telse:\n\t\tdecoder_topology=[]\n\n\thidden_caps_layers=[]\n\tinclude_last=False\n\n\tma=MethylationArray.from_pickle(train_methyl_array)\n\tma_v=MethylationArray.from_pickle(val_methyl_array)\n\tif test_methyl_array and predict:\n\t\tma_t=MethylationArray.from_pickle(test_methyl_array)\n\n\ttry:\n\t\tma.remove_na_samples(interest_col)\n\t\tma_v.remove_na_samples(interest_col)\n\t\tif test_methyl_array and predict:\n\t\t\tma_t.remove_na_samples(interest_col)\n\texcept:\n\t\tpass\n\n\tif select_subtypes:\n\t\tprint(ma.pheno[interest_col].unique())\n\t\tma.pheno=ma.pheno.loc[ma.pheno[interest_col].isin(select_subtypes)]\n\t\tma.beta=ma.beta.loc[ma.pheno.index]\n\t\tma_v.pheno=ma_v.pheno.loc[ma_v.pheno[interest_col].isin(select_subtypes)]\n\t\tma_v.beta=ma_v.beta.loc[ma_v.pheno.index]\n\t\tprint(ma.pheno[interest_col].unique())\n\n\t\tif test_methyl_array and predict:\n\t\t\tma_t.pheno=ma_t.pheno.loc[ma_t.pheno[interest_col].isin(select_subtypes)]\n\t\t\tma_t.beta=ma_t.beta.loc[ma_t.pheno.index]\n\n\tif custom_capsule_file2 and os.path.exists(custom_capsule_file2):\n\t\tcapsules_dict=torch.load(custom_capsule_file2)\n\t\tfinal_modules, modulecpgs, module_names=capsules_dict['final_modules'], capsules_dict['modulecpgs'], capsules_dict['module_names']\n\t\tif min_capsule_len>1:\n\t\t\tinclude_capsules=[len(x)>min_capsule_len for x in final_modules]\n\t\t\tfinal_modules=[final_modules[i] for i in range(len(final_modules)) if include_capsules[i]]\n\t\t\tmodule_names=[module_names[i] for i in range(len(module_names)) if include_capsules[i]]\n\t\t\tmodulecpgs=(reduce(np.union1d,final_modules)).tolist()\n\n\telse:\n\t\tfinal_modules, modulecpgs, module_names=build_capsules(capsule_choice,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\toverlap,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbin_len,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tma,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tinclude_last,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmin_capsule_len,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcustom_capsule_file,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tgsea_superset,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttissue,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tgene_context,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tuse_set,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnumber_sets,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlimited_capsule_names_file)\n\t\tif custom_capsule_file2:\n\t\t\ttorch.save(dict(final_modules=final_modules, modulecpgs=modulecpgs, module_names=module_names),custom_capsule_file2)\n\n\tassert len(final_modules) >= min_capsules , \"Below the number of allowed capsules.\"\n\n\tif fit_spw:\n\t\tmodulecpgs=list(reduce(lambda x,y:np.hstack((x,y)),final_modules))\n\n\tif not include_last: # ERROR HAPPENS HERE!\n\t\tma.beta=ma.beta.loc[:,modulecpgs]\n\t\tma_v.beta=ma_v.beta.loc[:,modulecpgs]\n\t\tif test_methyl_array and predict:\n\t\t\tma_t.beta=ma_t.beta.loc[:,modulecpgs]\n\t# https://github.com/higgsfield/Capsule-Network-Tutorial/blob/master/Capsule%20Network.ipynb\n\toriginal_interest_col=interest_col\n\tif n_bins:\n\t\tnew_interest_col=interest_col+'_binned'\n\t\tma.pheno.loc[:,new_interest_col],bins=pd.cut(ma.pheno[interest_col],bins=n_bins,retbins=True)\n\t\tma_v.pheno.loc[:,new_interest_col],_=pd.cut(ma_v.pheno[interest_col],bins=bins,retbins=True)\n\t\tif test_methyl_array and predict:\n\t\t\tma_t.pheno.loc[:,new_interest_col],_=pd.cut(ma_t.pheno[interest_col],bins=bins,retbins=True)\n\t\tinterest_col=new_interest_col\n\n\tdatasets=dict()\n\n\tdatasets['train']=MethylationDataset(ma,interest_col,modules=final_modules, module_names=module_names, original_interest_col=original_interest_col, run_spw=fit_spw)\n\tprint(datasets['train'].X.isnull().sum().sum())\n\tdatasets['val']=MethylationDataset(ma_v,interest_col,modules=final_modules, module_names=module_names, original_interest_col=original_interest_col, run_spw=fit_spw)\n\tif test_methyl_array and predict:\n\t\tdatasets['test']=MethylationDataset(ma_t,interest_col,modules=final_modules, module_names=module_names, original_interest_col=original_interest_col, run_spw=fit_spw)\n\n\tdataloaders=dict()\n\n\tdataloaders['train']=DataLoader(datasets['train'],batch_size=batch_size,shuffle=True,num_workers=8, pin_memory=True, drop_last=True)\n\tdataloaders['val']=DataLoader(datasets['val'],batch_size=batch_size,shuffle=False,num_workers=8, pin_memory=True, drop_last=False)\n\tn_primary=len(final_modules)\n\tif test_methyl_array and predict:\n\t\tdataloaders['test']=DataLoader(datasets['test'],batch_size=batch_size,shuffle=False,num_workers=8, pin_memory=True, drop_last=False)\n\n\tn_inputs=list(map(len,final_modules))\n\n\tn_out_caps=len(datasets['train'].y_unique)\n\n\tif not fit_spw:\n\t\tprint(\"Not fitting MethylSPWNet\")\n\t\tprimary_caps = PrimaryCaps(modules=final_modules,hidden_topology=hidden_topology,n_output=primary_caps_out_len)\n\t\thidden_caps = []\n\t\toutput_caps = CapsLayer(n_out_caps,n_primary,primary_caps_out_len,caps_out_len,routing_iterations=routing_iterations)\n\t\tdecoder = Decoder(n_out_caps*caps_out_len,len(list(ma.beta)),decoder_topology)\n\t\tmodel = CapsNet(primary_caps, hidden_caps, output_caps, decoder, gamma=gamma)\n\n\t\tif test_methyl_array and predict:\n\t\t\tmodel.load_state_dict(torch.load('capsnet_model.pkl'))\n\n\n\telse:\n\t\tprint(\"Fitting MethylSPWNet\")\n\t\tmodule_lens=[len(x) for x in final_modules]\n\t\tmodel=MethylSPWNet(module_lens, hidden_topology, dropout_p=0.2, n_output=n_out_caps)\n\t\tif test_methyl_array and predict:\n\t\t\tmodel.load_state_dict(torch.load('spwnet_model.pkl'))\n\n\tif torch.cuda.is_available():\n\t\tmodel=model.cuda()\n\n\n\t# extract all c_ij for all layers across all batches, or just last batch\n\n\tif l1_l2 and fit_spw:\n\t\tl1,l2=list(map(float,l1_l2.split(',')))\n\telif fit_spw:\n\t\tl1,l2=0.,0.\n\n\ttrainer=Trainer(model=model,\n\t\t\t\t\tvalidation_dataloader=dataloaders['val'],\n\t\t\t\t\tn_epochs=n_epochs,\n\t\t\t\t\tlr=learning_rate,\n\t\t\t\t\tn_primary=n_primary,\n\t\t\t\t\tcustom_loss=custom_loss,\n\t\t\t\t\tgamma2=gamma2,\n\t\t\t\t\tspw_mode=fit_spw,\n\t\t\t\t\tl1=l1 if fit_spw else 0.,\n\t\t\t\t\tl2=l2 if fit_spw else 0.)\n\n\tif not predict:\n\t\ttry:\n\t\t\t#assert 1==2\n\t\t\ttrainer.fit(dataloader=dataloaders['train'])\n\t\t\tval_loss=min(trainer.val_losses)\n\t\t\ttorch.save(trainer.model.state_dict(),'capsnet_model.pkl' if not fit_spw else 'spwnet_model.pkl')\n\t\t\tif fit_spw:\n\t\t\t\ttorch.save(dict(final_modules=final_modules, modulecpgs=modulecpgs, module_names=module_names), 'spwnet_capsules.pkl')\n\t\t\t\ttorch.save(dict(module_names=module_names,module_lens=module_lens,dropout_p=0.2,hidden_topology=hidden_topology,n_output=n_out_caps),'spwnet_config.pkl')\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\tval_loss=-2\n\n\t\twith sqlite3.connect('jobs.db', check_same_thread=False) as conn:\n\t\t\tpd.DataFrame([job,val_loss],index=['job','val_loss'],columns=[0]).T.to_sql('val_loss',conn,if_exists='append')\n\telse:\n\t\tif test_methyl_array:\n\t\t\ttrainer.weights=1.\n\t\t\tY=trainer.predict(dataloaders['test'])\n\t\t\tpickle.dump(Y,open('predictions.pkl','wb'))\n\t\t\tval_loss=-1\n\t#print(val_loss)\n\t# print([min(trainer.val_losses),n_epochs,\n\t# \t\tn_bins,\n\t# \t\tbin_len,\n\t# \t\tmin_capsule_len,\n\t# \t\tprimary_caps_out_len,\n\t# \t\tcaps_out_len,\n\t# \t\thidden_topology,\n\t# \t\tgamma,\n\t# \t\tdecoder_topology,\n\t# \t\tlearning_rate,\n\t# \t\trouting_iterations])\n\n\n\n\treturn val_loss\n",
"import pandas as pd\nfrom pymethylprocess.MethylationDataTypes import MethylationArray\nfrom sklearn.metrics import mean_absolute_error, r2_score\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nfrom pybedtools import BedTool\nimport numpy as np\nfrom functools import reduce\nfrom torch.utils.data import Dataset, DataLoader\nimport torch\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\nimport os\nimport pysnooper\nimport argparse\nimport pickle\nfrom sklearn.metrics import classification_report\nimport click\nimport methylcapsnet\nfrom methylcapsnet.methylcaps_data_models import *\nimport sqlite3\nimport os\nimport glob\nimport dask\nimport methyl_capsules\nfrom dask.diagnostics import ProgressBar\nfrom pathos.multiprocessing import Pool\nimport multiprocessing\nimport dask.bag as db\nfrom distributed import Client, LocalCluster, get_task_stream\nRANDOM_SEED=42\nnp.random.seed(RANDOM_SEED)\ntorch.manual_seed(RANDOM_SEED)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\ndef print_if_exists(f):\n\tif os.path.exists(f):\n\t\tprint('{} does exist'.format(f))\n\telse:\n\t\tprint('{} does not exist'.format(f))\n\nmethylcaps_dir=os.path.dirname(methyl_capsules.__file__)\nannotations450 = os.path.abspath(os.path.join(methylcaps_dir, 'data/450kannotations.bed'))\nhg19 = os.path.abspath(os.path.join(methylcaps_dir, 'data/hg19.genome'))\nselected_caps_file = os.path.abspath(os.path.join(methylcaps_dir, 'data/selected_capsules.p'))\n\ngsea_collections = os.path.abspath(os.path.join(methylcaps_dir, 'data/gsea_collections.symbols.p'))\ngene_set_weights = {os.path.basename(f).split('_')[1]: f for f in glob.glob(os.path.abspath(os.path.join(methylcaps_dir, 'data/SetTestWeights_*.txt')))}\ngene2cpg = os.path.abspath(os.path.join(methylcaps_dir, 'data/gene2cpg.p'))\n\nCAPSULES=['gene',\n\t\t\t'gene_context',\n\t\t\t'GSEA_C5.BP',\n\t\t\t'GSEA_C6',\n\t\t\t'GSEA_C1',\n\t\t\t'GSEA_H',\n\t\t\t'GSEA_C3.MIR',\n\t\t\t'GSEA_C2.CGP',\n\t\t\t'GSEA_C4.CM',\n\t\t\t'GSEA_C5.CC',\n\t\t\t'GSEA_C3.TFT',\n\t\t\t'GSEA_C5.MF',\n\t\t\t'GSEA_C7',\n\t\t\t'GSEA_C2.CP',\n\t\t\t'GSEA_C4.CGN',\n\t\t\t'UCSC_RefGene_Name',\n\t\t\t'UCSC_RefGene_Accession',\n\t\t\t'UCSC_RefGene_Group',\n\t\t\t'UCSC_CpG_Islands_Name',\n\t\t\t'Relation_to_UCSC_CpG_Island',\n\t\t\t'Phantom',\n\t\t\t'DMR',\n\t\t\t'Enhancer',\n\t\t\t'HMM_Island',\n\t\t\t'Regulatory_Feature_Name',\n\t\t\t'Regulatory_Feature_Group',\n\t\t\t'DHS']\n\nfinal_caps_files = {k: os.path.abspath(os.path.join(methylcaps_dir, 'data/final_capsules__{}.p'.format(k))) for k in CAPSULES }\n\n\nif 0:\n\tprint_if_exists(annotations450)\n\tprint_if_exists(hg19)\n\tprint_if_exists(selected_caps_file)\n\n# @pysnooper.snoop('get_mod.log')\ndef get_binned_modules(ma=None,a=annotations450,b='lola_vignette_data/activeDHS_universe.bed', include_last=False, min_capsule_len=2000):\n\tallcpgs=ma.beta.columns.values\n\ta=BedTool(a)\n\tb=BedTool(b)\n\t# a.saveas('a.bed')\n\t# b.saveas('b.bed')\n\ta_orig=a\n\tdf=BedTool(a).to_dataframe()\n\tdf.iloc[:,0]=df.iloc[:,0].astype(str)#.map(lambda x: 'chr'+x.split('.')[0])\n\tdf=df.set_index('name').loc[list(ma.beta)].reset_index().iloc[:,[1,2,3,0]]\n\ta=BedTool.from_dataframe(df)\n\t# df_bed=pd.read_table(b,header=None)\n\t# df_bed['features']=np.arange(df_bed.shape[0])\n\t# df_bed=df_bed.iloc[:,[0,1,2,-1]]\n\t# b=BedTool.from_dataframe(df)\n\t# a=BedTool.from_dataframe(df_bed)#('lola_vignette_data/activeDHS_universe.bed')\n\tdf_bed=BedTool(b).to_dataframe()\n\tif df_bed.shape[1]<4:\n\t\tdf_bed['features']=np.arange(df_bed.shape[0])\n\tb=BedTool.from_dataframe(df_bed)\n\ttry:\n\t\tc=b.intersect(a,wa=True,wb=True).sort()\n\t\t# c.saveas('c.bed')\n\t\td=c.groupby(g=[1,2,3,4],c=(8,8),o=('count','distinct'))\n\texcept:\n\t\tdf=BedTool(a_orig).to_dataframe()\n\t\tdf.iloc[:,0]=df.iloc[:,0].astype(str).map(lambda x: 'chr'+x.split('.')[0])\n\t\tdf=df.set_index('name').loc[list(ma.beta)].reset_index().iloc[:,[1,2,3,0]]\n\t\ta=BedTool.from_dataframe(df)\n\t\tc=b.intersect(a,wa=True,wb=True).sort()\n\t\t# c.saveas('c.bed')\n\t\td=c.groupby(g=[1,2,3,4],c=(8,8),o=('count','distinct'))\n\t#d.saveas('d.bed')\n\tdf2=d.to_dataframe()\n\tdf3=df2.loc[df2.iloc[:,-2]>min_capsule_len]\n\tmodules = [cpgs.split(',') for cpgs in df3.iloc[:,-1].values]\n\tmodulecpgs=np.array(list(set(list(reduce(lambda x,y:x+y,modules)))))\n\tif include_last:\n\t\tmissing_cpgs=np.setdiff1d(allcpgs,modulecpgs).tolist()\n\tfinal_modules = modules+([missing_cpgs] if include_last else [])\n\tmodule_names=(df3.iloc[:,0]+'_'+df3.iloc[:,1].astype(str)+'_'+df3.iloc[:,2].astype(str)).tolist()\n\treturn final_modules,modulecpgs.tolist(),module_names\n\ndef return_caps(capsule,allcpgs,min_capsule_len):\n\tcapsule=np.intersect1d(capsule,allcpgs).tolist()\n\tif len(capsule)>=min_capsule_len:\n\t\treturn capsule\n\telse:\n\t\treturn []\n\n#@pysnooper.snoop('reduce_caps.log')\ndef reduce_caps(capsules,allcpgs,min_capsule_len):\n\tcluster = LocalCluster(n_workers=multiprocessing.cpu_count()*2, threads_per_worker=20)\n\tclient = Client(cluster)\n\tcapsule_names=list(capsules.keys())\n\n\tcapsules_bag=db.from_sequence(list(capsules.values()))\n\tcapsules_intersect=capsules_bag.map(lambda x: np.intersect1d(x,allcpgs))\n\tcapsules_len=capsules_intersect.map(lambda x: x if len(x) >= min_capsule_len else [])\n\t# with get_task_stream(plot='save', filename=\"task-stream.html\") as ts:\n\tcapsules=capsules_len.compute()\n\tcapsules=dict([(capsule_names[i],capsules[i].tolist()) for i in range(len(capsule_names)) if len(capsules[i])])\n\t#print(list(capsules.keys()))\n\tclient.close()\n\treturn capsules\n\[email protected]('get_caps.log')\ndef return_custom_capsules(ma=None,capsule_file=selected_caps_file, capsule_sets=['all'], min_capsule_len=2000, include_last=False, limited_capsule_names_file=''):\n\tallcpgs=ma.beta.columns.values\n\tif limited_capsule_names_file:\n\t\twith open(limited_capsule_names_file) as f:\n\t\t\tlimited_capsule_names=f.read().replace('\\n',' ').split()\n\telse:\n\t\tlimited_capsule_names=[]\n\tcaps_dict=pickle.load(open(capsule_file,'rb'))\n\tcapsules={}\n\tif 'all' in capsule_sets:\n\t\tcapsule_sets=list(caps_dict.keys())\n\n\tfor caps_set in capsule_sets:\n\t\tif limited_capsule_names_file:\n\t\t\tcapsule_list=np.intersect1d(list(caps_dict[caps_set].keys()),limited_capsule_names).tolist()\n\t\telse:\n\t\t\tcapsule_list=list(caps_dict[caps_set].keys())\n\t\tfor capsule in capsule_list:\n\t\t\tcapsules[capsule]=caps_dict[caps_set][capsule]#dask.delayed(lambda x:return_caps(x,allcpgs,min_capsule_len))(caps_dict[caps_set][capsule])\n\n\n\tcapsules=reduce_caps(capsules,allcpgs,min_capsule_len)\n\t#capsules=dask.compute(capsules,scheduler='threading')[0]\n\t#capsules={capsule:capsules[capsule] for capsule in capsules if capsules[capsule]}\n\tmodules = [capsules[capsule] for capsule in capsules if capsules[capsule]]\n\tmodulecpgs=reduce(np.union1d,modules)#np.array(list(set(list(reduce(lambda x,y:x+y,modules)))))\n\tmodule_names=list(capsules.keys())#(df3.iloc[:,0]+'_'+df3.iloc[:,1].astype(str)+'_'+df3.iloc[:,2].astype(str)).tolist()\n\treturn modules,modulecpgs,module_names\n\ndef divide_chunks(l, n):\n\tfor i in range(0, len(l), len(l)//n):\n\t\tyield l[i:i + n]\n\[email protected]('gsea_build.log')\ndef return_gsea_capsules(ma=None,tissue='',context_on=False,use_set=False,gsea_superset='H',n_top_sets=25,min_capsule_len=2000, all_genes=False, union_cpgs=True, limited_capsule_names_file=''):\n\tglobal gene2cpg, gsea_collections, gene_set_weights\n\tif limited_capsule_names_file:\n\t\twith open(limited_capsule_names_file) as f:\n\t\t\tlimited_capsule_names=f.read().replace('\\n',' ').split()\n\telse:\n\t\tlimited_capsule_names=[]\n\tallcpgs=ma.beta.columns.values\n\tentire_sets=use_set\n\tcollection=gsea_superset\n\tgene2cpg=pickle.load(open(gene2cpg,'rb'))\n\tif all_genes:\n\t\tgene_sets=list(gene2cpg.keys())\n\telse:\n\t\tgsea=pickle.load(open(gsea_collections,'rb'))\n\t\tif tissue:\n\t\t\tgene_sets=pd.read_csv(gene_set_weights[collection],sep='\\t',index_col=0)\n\t\t\tif tissue!='ubiquitous':\n\t\t\t\tgene_sets=(gene_sets.quantile(1.,axis=1)-gene_sets.quantile(0.,axis=1)).sort_values().index.tolist()\n\t\t\telse:\n\t\t\t\tgene_sets=gene_sets[tissue].sort_values(ascending=False).index.tolist()\n\t\telse:\n\t\t\tgene_sets=list(gsea[collection].keys())\n\tintersect_context=False\n\tif limited_capsule_names_file:\n\t\tgene_sets_tmp=np.intersect1d(gene_sets,limited_capsule_names).tolist()\n\t\tprint('LIMITED GENE CAPS',gene_sets_tmp)\n\t\tif gene_sets_tmp:\n\t\t\tgene_sets=gene_sets_tmp\n\t\t\tintersect_context=True\n\tif not tissue:\n\t\tn_top_sets=0\n\tif n_top_sets and not all_genes:\n\t\tgene_sets=gene_sets[:n_top_sets]\n\n\tcapsules=dict()\n\tif all_genes:\n\t\tentire_sets=False\n\tif entire_sets:\n\t\tcontext_on=False\n\n\tdef process_gene_set(gene_set):\n\t\tcapsules=[]\n\t\tgene_set_cpgs=[]\n\t\tfor genename in (gsea[collection][gene_set] if not all_genes else [gene_set]):\n\t\t\tgene=gene2cpg.get(genename,{'Gene':[],'Upstream':[]})\n\t\t\tif context_on:\n\t\t\t\tfor k in ['Gene','Upstream']:\n\t\t\t\t\tcontext=gene.get(k,[])\n\t\t\t\t\tif len(context):\n\t\t\t\t\t\tcapsules.append(('{}_{}'.format(genename,k),list(context)))\n\t\t\t\t\t\t#capsules['{}_{}'.format(genename,k)]=context.tolist()\n\t\t\telse:\n\t\t\t\tif not entire_sets:\n\t\t\t\t\tcapsules.append((genename,np.union1d(gene.get('Gene',[]),gene.get('Upstream',[])).tolist()))\n\t\t\t\t\t#capsules[genename]=np.union1d(gene.get('Gene',[]),gene.get('Upstream',[])).tolist()\n\t\t\t\telse:\n\t\t\t\t\tupstream=gene.get('Upstream',[])\n\t\t\t\t\tgene=gene.get('Gene',[])\n\t\t\t\t\tcpg_set=np.union1d(gene,upstream)\n\t\t\t\t\tif cpg_set.tolist():\n\t\t\t\t\t\tgene_set_cpgs.append(cpg_set)\n\t\tif entire_sets and not all_genes:\n\t\t\tcapsules.append((gene_set,reduce(np.union1d,gene_set_cpgs).tolist()))\n\t\t\t#capsules[gene_set]=reduce(np.union1d,gene_set_cpgs).tolist()\n\t\treturn capsules\n\n\tdef process_chunk(chunk):\n\t\twith ProgressBar():\n\t\t\tchunk=dask.compute(*chunk,scheduler='threading')\n\t\treturn chunk\n\n\twith ProgressBar():\n\t\tcapsules=dict(list(reduce(lambda x,y: x+y,dask.compute(*[dask.delayed(process_gene_set)(gene_set) for gene_set in gene_sets],scheduler='threading'))))\n\n\n\tcapsules2=[]\n\t#caps_lens=np.array([len(capsules[capsule]) for capsule in capsules])\n\n\t# cluster = LocalCluster(n_workers=multiprocessing.cpu_count()*2, threads_per_worker=20)\n\t# client = Client(cluster)\n\tcapsule_names=list(capsules.keys())\n\n\tif intersect_context:\n\t\tcapsules_tmp_names=np.intersect1d(capsule_names,limited_capsule_names).tolist()\n\t\tif capsules_tmp_names:\n\t\t\tcapsules={k:capsules[k] for k in capsules_tmp_names}\n\t\t\tcapsule_names=capsules_tmp_names\n\n\tcapsules=reduce_caps(capsules,allcpgs,min_capsule_len)\n\n\t# print(capsule_names)\n\t# capsules_bag=db.from_sequence(list(capsules.values()))\n\t# capsules_intersect=capsules_bag.map(lambda x: np.intersect1d(x,allcpgs))\n\t# capsules_len=capsules_intersect.map(lambda x: x if len(x) >= min_capsule_len else [])\n\t# # with get_task_stream(plot='save', filename=\"task-stream.html\") as ts:\n\t# capsules=capsules_len.compute()\n\t# #print(capsules)\n\t# capsules=dict([(capsule_names[i],capsules[i].tolist()) for i in range(len(capsule_names)) if len(capsules[i])])\n\n\t# for capsule in capsules:\n\t# \tcapsules2.append([capsule,dask.delayed(return_caps)(capsules[capsule],allcpgs,min_capsule_len)])\n\t# cpus=multiprocessing.cpu_count()\n\t# caps_chunks=list(divide_chunks(capsules2,cpus))\n\t# p=Pool(cpus)\n\t# capsules=dict(list(reduce(lambda x,y: x+y,p.map(process_chunk,caps_chunks))))\n\n\t# with ProgressBar():\n\t# \tcapsules=dask.compute(capsules2,scheduler='threading')[0]\n\t#print(capsules)\n\tmodules = list(capsules.values())#[capsules[capsule] for capsule in capsules if capsules[capsule]]\n\tmodulecpgs=reduce((np.union1d if union_cpgs else (lambda x,y:x+y)),modules).tolist()\n\tmodule_names=list(capsules.keys())\n\n\treturn modules,modulecpgs,module_names\n\ndef get_gene_sets(cpgs,final_capsules,collection,tissue,n_top_sets):\n\tglobal gsea_collections, gene_set_weights\n\tgsea=pickle.load(open(gsea_collections,'rb'))\n\tif tissue:\n\t\tgene_sets=pd.read_csv(gene_set_weights[collection],sep='\\t',index_col=0)\n\t\tif tissue!='ubiquitous':\n\t\t\tgene_sets=(gene_sets.quantile(1.,axis=1)-gene_sets.quantile(0.,axis=1)).sort_values().index.tolist()\n\t\telse:\n\t\t\tgene_sets=gene_sets[tissue].sort_values(ascending=False).index.tolist()\n\telse:\n\t\tgene_sets=list(gsea[collection].keys())\n\tif n_top_sets:\n\t\tgene_sets=gene_sets[:n_top_sets]\n\tfinal_capsules=final_capsules['GSEA_{}'.format(collection)]\n\tfinal_capsules=final_capsules[final_capsules['cpg'].isin(cpgs)]\n\treturn final_capsules[final_capsules['feature'].isin(gene_sets)]['cpg'].values\n\n#@pysnooper.snoop('final_caps.log')\ndef return_final_capsules(methyl_array, capsule_choice, min_capsule_len, collection,tissue, n_top_sets, limited_capsule_names_file, gsea_superset, return_original_capsule_assignments=False):\n\tfrom sklearn.preprocessing import LabelEncoder\n\tglobal final_caps_files\n\tif limited_capsule_names_file:\n\t\twith open(limited_capsule_names_file) as f:\n\t\t\tlimited_capsule_names=f.read().replace('\\n',' ').split()\n\telse:\n\t\tlimited_capsule_names=[]\n\t#final_capsules=pickle.load(open(final_caps_files[capsule_choice],'rb'))\n\tif len(capsule_choice)>1:\n\t\tcpg_arr=pd.concat([pd.read_pickle(final_caps_files[caps_choice]) for caps_choice in capsule_choice])\n\telse:\n\t\tcpg_arr=pd.read_pickle(final_caps_files[capsule_choice[0]])\n\tcpgs=np.intersect1d(methyl_array.beta.columns.values,cpg_arr['cpg'].values)\n\tif gsea_superset:\n\t\tcpgs=get_gene_sets(cpgs,cpg_arr,gsea_superset,tissue,n_top_sets)\n\tif limited_capsule_names:\n\t\tprint(limited_capsule_names)\n\t\tcpg_arr=cpg_arr[cpg_arr['feature'].isin(limited_capsule_names)]#cpgs=np.intersect1d(cpgs,cpg_arr[cpg_arr['feature'].isin(limited_capsule_names)]['cpg'].values)\n\tcpg_arr=cpg_arr[cpg_arr['cpg'].isin(cpgs)]\n\tcapsules=[]\n\tcpgs=[]\n\tfeatures=[]\n\tcpg_arr=pd.DataFrame(cpg_arr.groupby('feature').filter(lambda x: len(x['cpg'])>=min_capsule_len))\n\t# for name, dff in .groupby('feature'):\n\t# \tcpg=dff['cpg'].values\n\t# \tcapsules.append(cpg)\n\t# \tcpgs.extend(cpg.tolist())\n\t# \tfeatures.append(name)\n\tcpgs,features=cpg_arr['cpg'].values,cpg_arr['feature'].unique()\n\tsplit_idx=np.cumsum(np.bincount(LabelEncoder().fit_transform(cpg_arr['feature'].values).flatten().astype(int)).flatten().astype(int)).flatten().astype(int)[:-1]\n\tcapsules=np.split(cpgs,split_idx)\n\t# print(capsules)\n\tif return_original_capsule_assignments:\n\t\treturn capsules,cpgs,features,cpg_arr[['feature','cpg']]\n\telse:\n\t\treturn capsules,cpgs,features#cpg_arr['feature'].unique()#cpg_arr['cpg'].values\n\[email protected]('build_caps.log')\ndef build_capsules(capsule_choice,\n\t\t\t\t\toverlap,\n\t\t\t\t\tbin_len,\n\t\t\t\t\tma,\n\t\t\t\t\tinclude_last,\n\t\t\t\t\tmin_capsule_len,\n\t\t\t\t\tcustom_capsule_file,\n\t\t\t\t\tgsea_superset,\n\t\t\t\t\ttissue,\n\t\t\t\t\tgene_context,\n\t\t\t\t\tuse_set,\n\t\t\t\t\tnumber_sets,\n\t\t\t\t\tlimited_capsule_names_file):\n\tcapsules,finalcpgs,capsule_names=[],[],[]\n\tannotation_file=annotations450\n\tif 'genomic_binned' in capsule_choice:\n\t\toverlap=int(overlap*bin_len)\n\t\tgenome_file=hg19\n\t\tgname=os.path.basename(genome_file).split('.')[0]\n\t\toverlap_file='{}.{}.overlap.{}.bed'.format(gname,bin_len,overlap)\n\t\tif not os.path.exists(overlap_file):\n\t\t\tBedTool(genome_file).makewindows(g=genome_file,w=bin_len,s=bin_len-overlap).saveas('{}.{}.overlap.{}.bed'.format(gname,bin_len,overlap))#.to_dataframe().shape\n\t\tprint(annotation_file,overlap_file)\n\t\tfinal_modules,modulecpgs,module_names=get_binned_modules(ma=ma,a=annotation_file,b=overlap_file,include_last=include_last, min_capsule_len=min_capsule_len)\n\t\tprint('LEN_MODULES',len(final_modules))\n\t\tcapsules.extend(final_modules)\n\t\tfinalcpgs.extend(modulecpgs)\n\t\tcapsule_names.extend(module_names)\n\n\tif 'custom_bed' in capsule_choice:\n\t\tfinal_modules,modulecpgs,module_names=get_binned_modules(ma=ma,a=annotation_file,b=custom_capsule_file,include_last=include_last, min_capsule_len=min_capsule_len)\n\t\tcapsules.extend(final_modules)\n\t\tfinalcpgs.extend(modulecpgs)\n\t\tcapsule_names.extend(module_names)\n\n\tif 'custom_set' in capsule_choice:\n\t\tfinal_modules,modulecpgs,module_names=return_custom_capsules(ma=ma,capsule_file=custom_capsule_file, capsule_sets=['all'], min_capsule_len=min_capsule_len, include_last=include_last)\n\t\tcapsules.extend(final_modules)\n\t\tfinalcpgs.extend(modulecpgs)\n\t\tcapsule_names.extend(module_names)\n\n\tif np.intersect1d(CAPSULES,capsule_choice).tolist():\n\t\tfinal_modules,modulecpgs,module_names=return_final_capsules(methyl_array=ma, capsule_choice=capsule_choice, min_capsule_len=min_capsule_len, collection=gsea_superset,tissue=tissue, n_top_sets=number_sets, limited_capsule_names_file=limited_capsule_names_file, gsea_superset=gsea_superset)\n\t\tcapsules.extend(final_modules)\n\t\tfinalcpgs.extend(modulecpgs)\n\t\tcapsule_names.extend(module_names)\n\n\t# if 0:\n\t#\n\t# \tselected_sets=np.intersect1d(['UCSC_RefGene_Name','UCSC_RefGene_Accession', 'UCSC_RefGene_Group', 'UCSC_CpG_Islands_Name', 'Relation_to_UCSC_CpG_Island', 'Phantom', 'DMR', 'Enhancer', 'HMM_Island', 'Regulatory_Feature_Name', 'Regulatory_Feature_Group', 'DHS'],capsule_choice).tolist()\n\t# \tif selected_sets:\n\t# \t\tfinal_modules,modulecpgs,module_names=return_custom_capsules(ma=ma,capsule_file=selected_caps_file, capsule_sets=selected_sets, min_capsule_len=min_capsule_len, include_last=include_last, limited_capsule_names_file=limited_capsule_names_file)\n\t# \t\tcapsules.extend(final_modules)\n\t# \t\tfinalcpgs.extend(modulecpgs)\n\t# \t\tcapsule_names.extend(module_names)\n\t#\n\t# \tgsea_bool=((\"GSEA\" in capsule_choice and gsea_superset) or 'all_gene_sets' in capsule_choice)\n\t#\n\t# \tif gsea_bool:\n\t# \t\tfinal_modules,modulecpgs,module_names=return_gsea_capsules(ma=ma,tissue=tissue,context_on=gene_context,use_set=use_set,gsea_superset=gsea_superset,n_top_sets=number_sets,min_capsule_len=min_capsule_len, all_genes=('all_gene_sets' in capsule_choice), limited_capsule_names_file=limited_capsule_names_file)\n\t# \t\tcapsules.extend(final_modules)\n\t# \t\tfinalcpgs.extend(modulecpgs)\n\t# \t\tcapsule_names.extend(module_names)\n\n\tfinal_modules=capsules\n\tmodulecpgs=list(set(finalcpgs))\n\tmodule_names=capsule_names\n\n\t# if limited_capsule_names_file and not (selected_sets or gsea_bool):\n\t# \twith open(limited_capsule_names_file) as f:\n\t# \t\tlimited_capsule_names=f.read().replace('\\n',' ').split()\n\t# \tcapsules=[]\n\t# \tcapsule_names=[]\n\t# \tfor i in range(len(module_names)):\n\t# \t\tif module_names[i] in limited_capsule_names:\n\t# \t\t\tcapsule_names.append(module_names[i])\n\t# \t\t\tcapsules.append(final_modules[i])\n\t#\n\t# \tmodulecpgs=list(set(list(reduce(lambda x,y: x+y,capsules))))\n\t# \tfinal_modules=capsules\n\t# \tmodule_names=capsule_names\n\n\tprint(\"{} modules, {} cpgs, {} module names, {} missing\".format(len(final_modules),len(modulecpgs),len(module_names),ma.beta.isnull().sum().sum()))\n\n\treturn final_modules, modulecpgs, module_names\n"
] | [
[
"numpy.hstack",
"numpy.random.seed",
"torch.load",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"pandas.DataFrame",
"pandas.cut",
"torch.cuda.is_available"
],
[
"numpy.split",
"pandas.read_csv",
"numpy.random.seed",
"numpy.arange",
"torch.manual_seed",
"numpy.union1d",
"numpy.setdiff1d",
"sklearn.preprocessing.LabelEncoder",
"numpy.intersect1d",
"pandas.read_pickle"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
Jeremy-Tian/Data-Lake | [
"62d2aad31e924ffc536cca98001da7671a7a9fde"
] | [
"Sprint3 Creating Redshift Cluster.py"
] | [
"\nimport pandas as pd\nimport boto3\nimport json\n\n\n\nimport configparser\nconfig = configparser.ConfigParser()\nconfig.read_file(open('dwh.cfg'))\n\nKEY = config.get('AWS','KEY')\nSECRET = config.get('AWS','SECRET')\n\nDWH_CLUSTER_TYPE = config.get(\"DWH\",\"DWH_CLUSTER_TYPE\")\nDWH_NUM_NODES = config.get(\"DWH\",\"DWH_NUM_NODES\")\nDWH_NODE_TYPE = config.get(\"DWH\",\"DWH_NODE_TYPE\")\n\nDWH_CLUSTER_IDENTIFIER = config.get(\"DWH\",\"DWH_CLUSTER_IDENTIFIER\")\nDWH_DB = config.get(\"DWH\",\"DWH_DB\")\nDWH_DB_USER = config.get(\"DWH\",\"DWH_DB_USER\")\nDWH_DB_PASSWORD = config.get(\"DWH\",\"DWH_DB_PASSWORD\")\nDWH_PORT = config.get(\"DWH\",\"DWH_PORT\")\n\nDWH_IAM_ROLE_NAME = config.get(\"DWH\", \"DWH_IAM_ROLE_NAME\")\n\n(DWH_DB_USER, DWH_DB_PASSWORD, DWH_DB)\n\npd.DataFrame({\"Param\":\n [\"DWH_CLUSTER_TYPE\", \"DWH_NUM_NODES\", \"DWH_NODE_TYPE\", \"DWH_CLUSTER_IDENTIFIER\", \"DWH_DB\", \"DWH_DB_USER\", \"DWH_DB_PASSWORD\", \"DWH_PORT\", \"DWH_IAM_ROLE_NAME\"],\n \"Value\":\n [DWH_CLUSTER_TYPE, DWH_NUM_NODES, DWH_NODE_TYPE, DWH_CLUSTER_IDENTIFIER, DWH_DB, DWH_DB_USER, DWH_DB_PASSWORD, DWH_PORT, DWH_IAM_ROLE_NAME]\n })\n\n\n# # Create clients for IAM, EC2, S3 and Redshift\n\n# In[69]:\n\n\nimport boto3\n\nec2 = boto3.resource('ec2',\n region_name=\"us-west-2\",\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET\n )\n\ns3 = boto3.resource('s3',\n region_name=\"us-west-2\",\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET\n )\n\niam = boto3.client('iam',aws_access_key_id=KEY,\n aws_secret_access_key=SECRET,\n region_name='us-west-2'\n )\n\nredshift = boto3.client('redshift',\n region_name=\"us-west-2\",\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET\n )\n\n\n\nsampleDbBucket = s3.Bucket(\"awssampledbuswest2\")\nfor obj in sampleDbBucket.objects.filter(Prefix=\"ssbgz\"):\n print(obj)\n\n\n\nfrom botocore.exceptions import ClientError\n\n#1.1 Create the role, \ntry:\n print(\"1.1 Creating a new IAM Role\") \n dwhRole = iam.create_role(\n Path='/',\n RoleName=DWH_IAM_ROLE_NAME,\n Description = \"Allows Redshift clusters to call AWS services on your behalf.\",\n AssumeRolePolicyDocument=json.dumps(\n {'Statement': [{'Action': 'sts:AssumeRole',\n 'Effect': 'Allow',\n 'Principal': {'Service': 'redshift.amazonaws.com'}}],\n 'Version': '2012-10-17'})\n ) \nexcept Exception as e:\n print(e)\n \n \nprint(\"1.2 Attaching Policy\")\n\niam.attach_role_policy(RoleName=DWH_IAM_ROLE_NAME,\n PolicyArn=\"arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess\"\n )['ResponseMetadata']['HTTPStatusCode']\n\nprint(\"1.3 Get the IAM role ARN\")\nroleArn = iam.get_role(RoleName=DWH_IAM_ROLE_NAME)['Role']['Arn']\n\nprint(roleArn)\n\n\n# # STEP 2: Redshift Cluster\n# \n# - Create a RedShift Cluster\n\n\n# In[83]:\n\n\ntry:\n response = redshift.create_cluster( \n #HW\n ClusterType=DWH_CLUSTER_TYPE,\n NodeType=DWH_NODE_TYPE,\n NumberOfNodes=int(DWH_NUM_NODES),\n\n #Identifiers & Credentials\n DBName=DWH_DB,\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,\n MasterUsername=DWH_DB_USER,\n MasterUserPassword=DWH_DB_PASSWORD,\n \n #Roles (for s3 access)\n IamRoles=[roleArn] \n )\nexcept Exception as e:\n print(e)\n\n\n# ## 2.1 *Describe* the cluster to see its status\n\n\n\n\ndef prettyRedshiftProps(props):\n pd.set_option('display.max_colwidth', -1)\n keysToShow = [\"ClusterIdentifier\", \"NodeType\", \"ClusterStatus\", \"MasterUsername\", \"DBName\", \"Endpoint\", \"NumberOfNodes\", 'VpcId']\n x = [(k, v) for k,v in props.items() if k in keysToShow]\n return pd.DataFrame(data=x, columns=[\"Key\", \"Value\"])\n\nmyClusterProps = redshift.describe_clusters(ClusterIdentifier=DWH_CLUSTER_IDENTIFIER)['Clusters'][0]\nprettyRedshiftProps(myClusterProps)\n\n\n# 2.2 Take note of the cluster <font color='red'> endpoint and role ARN </font> </h2>\n\n\nDWH_ENDPOINT = myClusterProps['Endpoint']['Address']\nDWH_ROLE_ARN = myClusterProps['IamRoles'][0]['IamRoleArn']\nprint(\"DWH_ENDPOINT :: \", endpoint)\nprint(\"DWH_ROLE_ARN :: \", roleArn)\n\n\n# ## STEP 3: Open an incoming TCP port to access the cluster ednpoint\n\n# In[84]:\n\n\ntry:\n vpc = ec2.Vpc(id=myClusterProps['VpcId'])\n defaultSg = list(vpc.security_groups.all())[0]\n print(defaultSg)\n defaultSg.authorize_ingress(\n GroupName=defaultSg.group_name,\n CidrIp='0.0.0.0/0',\n IpProtocol='TCP',\n FromPort=int(DWH_PORT),\n ToPort=int(DWH_PORT)\n )\nexcept Exception as e:\n print(e)\n\n\n# # STEP 4: Make sure you can connect to the cluster\n\n\n\nget_ipython().run_line_magic('load_ext', 'sql')\n\n\n\n\nconn_string=\"postgresql://{}:{}@{}:{}/{}\".format(DWH_DB_USER, DWH_DB_PASSWORD, DWH_ENDPOINT, DWH_PORT,DWH_DB)\nprint(conn_string)\nget_ipython().run_line_magic('sql', '$conn_string')\n\n\n"
] | [
[
"pandas.set_option",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
parthpatwa/autokeras | [
"2b23d870e91afdd2bc12663ff6e00e9df9ef855c"
] | [
"autokeras/task.py"
] | [
"import pandas as pd\n\nfrom autokeras import auto_model\nfrom autokeras.hypermodel import head\nfrom autokeras.hypermodel import node\n\n\nclass SupervisedImagePipeline(auto_model.AutoModel):\n\n def __init__(self, outputs, **kwargs):\n super().__init__(inputs=node.ImageInput(),\n outputs=outputs,\n **kwargs)\n\n\nclass ImageClassifier(SupervisedImagePipeline):\n \"\"\"AutoKeras image classification class.\n\n # Arguments\n num_classes: Int. Defaults to None. If None, it will infer from the data.\n multi_label: Boolean. Defaults to False.\n loss: A Keras loss function. Defaults to use 'binary_crossentropy' or\n 'categorical_crossentropy' based on the number of classes.\n metrics: A list of Keras metrics. Defaults to use 'accuracy'.\n name: String. The name of the AutoModel. Defaults to 'image_classifier'.\n max_trials: Int. The maximum number of different Keras Models to try.\n The search may finish before reaching the max_trials. Defaults to 100.\n directory: String. The path to a directory for storing the search outputs.\n Defaults to None, which would create a folder with the name of the\n AutoModel in the current directory.\n objective: String. Name of model metric to minimize\n or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.\n seed: Int. Random seed.\n \"\"\"\n\n def __init__(self,\n num_classes=None,\n multi_label=False,\n loss=None,\n metrics=None,\n name='image_classifier',\n max_trials=100,\n directory=None,\n objective='val_loss',\n seed=None):\n super().__init__(\n outputs=head.ClassificationHead(num_classes=num_classes,\n multi_label=multi_label,\n loss=loss,\n metrics=metrics),\n max_trials=max_trials,\n directory=directory,\n name=name,\n objective=objective,\n seed=seed)\n\n\nclass ImageRegressor(SupervisedImagePipeline):\n \"\"\"AutoKeras image regression class.\n\n # Arguments\n output_dim: Int. The number of output dimensions. Defaults to None.\n If None, it will infer from the data.\n multi_label: Boolean. Defaults to False.\n loss: A Keras loss function. Defaults to use 'mean_squared_error'.\n metrics: A list of Keras metrics. Defaults to use 'mean_squared_error'.\n name: String. The name of the AutoModel. Defaults to 'image_regressor'.\n max_trials: Int. The maximum number of different Keras Models to try.\n The search may finish before reaching the max_trials. Defaults to 100.\n directory: String. The path to a directory for storing the search outputs.\n Defaults to None, which would create a folder with the name of the\n AutoModel in the current directory.\n objective: String. Name of model metric to minimize\n or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.\n seed: Int. Random seed.\n \"\"\"\n\n def __init__(self,\n output_dim=None,\n loss=None,\n metrics=None,\n name='image_regressor',\n max_trials=100,\n directory=None,\n objective='val_loss',\n seed=None):\n super().__init__(\n outputs=head.RegressionHead(output_dim=output_dim,\n loss=loss,\n metrics=metrics),\n max_trials=max_trials,\n directory=directory,\n name=name,\n objective=objective,\n seed=seed)\n\n\nclass SupervisedTextPipeline(auto_model.AutoModel):\n\n def __init__(self, outputs, **kwargs):\n super().__init__(inputs=node.TextInput(),\n outputs=outputs,\n **kwargs)\n\n\nclass TextClassifier(SupervisedTextPipeline):\n \"\"\"AutoKeras text classification class.\n\n # Arguments\n num_classes: Int. Defaults to None. If None, it will infer from the data.\n multi_label: Boolean. Defaults to False.\n loss: A Keras loss function. Defaults to use 'binary_crossentropy' or\n 'categorical_crossentropy' based on the number of classes.\n metrics: A list of Keras metrics. Defaults to use 'accuracy'.\n name: String. The name of the AutoModel. Defaults to 'text_classifier'.\n max_trials: Int. The maximum number of different Keras Models to try.\n The search may finish before reaching the max_trials. Defaults to 100.\n directory: String. The path to a directory for storing the search outputs.\n Defaults to None, which would create a folder with the name of the\n AutoModel in the current directory.\n objective: String. Name of model metric to minimize\n or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.\n seed: Int. Random seed.\n \"\"\"\n\n def __init__(self,\n num_classes=None,\n multi_label=False,\n loss=None,\n metrics=None,\n name='text_classifier',\n max_trials=100,\n directory=None,\n objective='val_loss',\n seed=None):\n super().__init__(\n outputs=head.ClassificationHead(num_classes=num_classes,\n multi_label=multi_label,\n loss=loss,\n metrics=metrics),\n max_trials=max_trials,\n directory=directory,\n name=name,\n objective=objective,\n seed=seed)\n\n\nclass TextRegressor(SupervisedTextPipeline):\n \"\"\"AutoKeras text regression class.\n\n # Arguments\n output_dim: Int. The number of output dimensions. Defaults to None.\n If None, it will infer from the data.\n multi_label: Boolean. Defaults to False.\n loss: A Keras loss function. Defaults to use 'mean_squared_error'.\n metrics: A list of Keras metrics. Defaults to use 'mean_squared_error'.\n name: String. The name of the AutoModel. Defaults to 'text_regressor'.\n max_trials: Int. The maximum number of different Keras Models to try.\n The search may finish before reaching the max_trials. Defaults to 100.\n directory: String. The path to a directory for storing the search outputs.\n Defaults to None, which would create a folder with the name of the\n AutoModel in the current directory.\n objective: String. Name of model metric to minimize\n or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.\n seed: Int. Random seed.\n \"\"\"\n\n def __init__(self,\n output_dim=None,\n loss=None,\n metrics=None,\n name='text_regressor',\n max_trials=100,\n directory=None,\n objective='val_loss',\n seed=None):\n super().__init__(\n outputs=head.RegressionHead(output_dim=output_dim,\n loss=loss,\n metrics=metrics),\n max_trials=max_trials,\n directory=directory,\n name=name,\n objective=objective,\n seed=seed)\n\n\nclass SupervisedStructuredDataPipeline(auto_model.AutoModel):\n\n def __init__(self, outputs, column_names, column_types, **kwargs):\n inputs = node.StructuredDataInput()\n inputs.column_types = column_types\n inputs.column_names = column_names\n if column_types:\n for column_type in column_types.values():\n if column_type not in ['categorical', 'numerical']:\n raise ValueError(\n 'Column_types should be either \"categorical\" '\n 'or \"numerical\", but got {name}'.format(name=column_type))\n if column_names and column_types:\n for column_name in column_types:\n if column_name not in column_names:\n raise ValueError('Column_names and column_types are '\n 'mismatched. Cannot find column name '\n '{name} in the data.'.format(name=column_name))\n super().__init__(inputs=inputs,\n outputs=outputs,\n **kwargs)\n self._target_col_name = None\n\n def _read_from_csv(self, x, y):\n df = pd.read_csv(x)\n target = df.pop(y).to_numpy()\n return df, target\n\n def fit(self,\n x=None,\n y=None,\n epochs=None,\n callbacks=None,\n validation_split=0,\n validation_data=None,\n **kwargs):\n \"\"\"Search for the best model and hyperparameters for the task.\n\n # Arguments\n x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.\n Training data x. If the data is from a csv file, it should be a\n string specifying the path of the csv file of the training data.\n y: String, numpy.ndarray, or tensorflow.Dataset. Training data y.\n If the data is from a csv file, it should be a string corresponding\n to the label column.\n epochs: Int. The number of epochs to train each model during the search.\n If unspecified, we would use epochs equal to 1000 and early stopping\n with patience equal to 30.\n callbacks: List of Keras callbacks to apply during training and\n validation.\n validation_split: Float between 0 and 1.\n Fraction of the training data to be used as validation data.\n The model will set apart this fraction of the training data,\n will not train on it, and will evaluate\n the loss and any model metrics\n on this data at the end of each epoch.\n The validation data is selected from the last samples\n in the `x` and `y` data provided, before shuffling. This argument is\n not supported when `x` is a dataset.\n The best model found would be fit on the entire dataset including the\n validation data.\n validation_data: Data on which to evaluate the loss and any model metrics\n at the end of each epoch. The model will not be trained on this data.\n `validation_data` will override `validation_split`. The type of the\n validation data should be the same as the training data.\n The best model found would be fit on the training dataset without the\n validation data.\n **kwargs: Any arguments supported by keras.Model.fit.\n \"\"\"\n # x is file path of training data\n if isinstance(x, str):\n self._target_column_name = y\n x, y = self._read_from_csv(x, y)\n if validation_data:\n x_val, y_val = validation_data\n if isinstance(x_val, str):\n validation_data = self._read_from_csv(x_val, y_val)\n\n super().fit(x=x,\n y=y,\n epochs=epochs,\n callbacks=callbacks,\n validation_split=validation_split,\n validation_data=validation_data,\n **kwargs)\n\n def predict(self, x, batch_size=32, **kwargs):\n \"\"\"Predict the output for a given testing data.\n\n # Arguments\n x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.\n Testing data x. If the data is from a csv file, it should be a\n string specifying the path of the csv file of the testing data.\n batch_size: Int. Defaults to 32.\n **kwargs: Any arguments supported by keras.Model.predict.\n\n # Returns\n A list of numpy.ndarray objects or a single numpy.ndarray.\n The predicted results.\n \"\"\"\n if isinstance(x, str):\n x = pd.read_csv(x)\n if self._target_col_name in x:\n x.pop(self._target_col_name)\n\n return super().predict(x=x,\n batch_size=batch_size,\n **kwargs)\n\n def evaluate(self, x, y=None, batch_size=32, **kwargs):\n \"\"\"Evaluate the best model for the given data.\n\n # Arguments\n x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.\n Testing data x. If the data is from a csv file, it should be a\n string specifying the path of the csv file of the testing data.\n y: String, numpy.ndarray, or tensorflow.Dataset. Testing data y.\n If the data is from a csv file, it should be a string corresponding\n to the label column.\n batch_size: Int. Defaults to 32.\n **kwargs: Any arguments supported by keras.Model.evaluate.\n\n # Returns\n Scalar test loss (if the model has a single output and no metrics) or\n list of scalars (if the model has multiple outputs and/or metrics).\n The attribute model.metrics_names will give you the display labels for\n the scalar outputs.\n \"\"\"\n if isinstance(x, str):\n x, y = self._read_from_csv(x, y)\n return super().evaluate(x=x,\n y=y,\n batch_size=batch_size,\n **kwargs)\n\n\nclass StructuredDataClassifier(SupervisedStructuredDataPipeline):\n \"\"\"AutoKeras structured data classification class.\n\n # Arguments\n column_names: A list of strings specifying the names of the columns. The\n length of the list should be equal to the number of columns of the data.\n Defaults to None. If None, it will obtained from the header of the csv\n file or the pandas.DataFrame.\n column_types: Dict. The keys are the column names. The values should either\n be 'numerical' or 'categorical', indicating the type of that column.\n Defaults to None. If not None, the column_names need to be specified.\n If None, it will be inferred from the data.\n num_classes: Int. Defaults to None. If None, it will infer from the data.\n multi_label: Boolean. Defaults to False.\n loss: A Keras loss function. Defaults to use 'binary_crossentropy' or\n 'categorical_crossentropy' based on the number of classes.\n metrics: A list of Keras metrics. Defaults to use 'accuracy'.\n name: String. The name of the AutoModel. Defaults to\n 'structured_data_classifier'.\n max_trials: Int. The maximum number of different Keras Models to try.\n The search may finish before reaching the max_trials. Defaults to 100.\n directory: String. The path to a directory for storing the search outputs.\n Defaults to None, which would create a folder with the name of the\n AutoModel in the current directory.\n objective: String. Name of model metric to minimize\n or maximize. Defaults to 'val_accuracy'.\n seed: Int. Random seed.\n \"\"\"\n\n def __init__(self,\n column_names=None,\n column_types=None,\n num_classes=None,\n multi_label=False,\n loss=None,\n metrics=None,\n name='structured_data_classifier',\n max_trials=100,\n directory=None,\n objective='val_accuracy',\n seed=None):\n super().__init__(\n outputs=head.ClassificationHead(num_classes=num_classes,\n multi_label=multi_label,\n loss=loss,\n metrics=metrics),\n column_names=column_names,\n column_types=column_types,\n max_trials=max_trials,\n directory=directory,\n name=name,\n objective=objective,\n seed=seed)\n\n def fit(self,\n x=None,\n y=None,\n epochs=None,\n callbacks=None,\n validation_split=0,\n validation_data=None,\n **kwargs):\n \"\"\"Search for the best model and hyperparameters for the task.\n\n # Arguments\n x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.\n Training data x. If the data is from a csv file, it should be a\n string specifying the path of the csv file of the training data.\n y: String, numpy.ndarray, or tensorflow.Dataset. Training data y.\n If the data is from a csv file, it should be a string corresponding\n to the label column.\n epochs: Int. The number of epochs to train each model during the search.\n If unspecified, we would use epochs equal to 1000 and early stopping\n with patience equal to 30.\n callbacks: List of Keras callbacks to apply during training and\n validation.\n validation_split: Float between 0 and 1.\n Fraction of the training data to be used as validation data.\n The model will set apart this fraction of the training data,\n will not train on it, and will evaluate\n the loss and any model metrics\n on this data at the end of each epoch.\n The validation data is selected from the last samples\n in the `x` and `y` data provided, before shuffling. This argument is\n not supported when `x` is a dataset.\n validation_data: Data on which to evaluate the loss and any model metrics\n at the end of each epoch. The model will not be trained on this data.\n `validation_data` will override `validation_split`. The type of the\n validation data should be the same as the training data.\n **kwargs: Any arguments supported by keras.Model.fit.\n \"\"\"\n super().fit(x=x,\n y=y,\n epochs=epochs,\n callbacks=callbacks,\n validation_split=validation_split,\n validation_data=validation_data,\n **kwargs)\n\n\nclass StructuredDataRegressor(SupervisedStructuredDataPipeline):\n \"\"\"AutoKeras structured data regression class.\n\n # Arguments\n column_names: A list of strings specifying the names of the columns. The\n length of the list should be equal to the number of columns of the data.\n Defaults to None. If None, it will obtained from the header of the csv\n file or the pandas.DataFrame.\n column_types: Dict. The keys are the column names. The values should either\n be 'numerical' or 'categorical', indicating the type of that column.\n Defaults to None. If not None, the column_names need to be specified.\n If None, it will be inferred from the data.\n loss: A Keras loss function. Defaults to use 'mean_squared_error'.\n metrics: A list of Keras metrics. Defaults to use 'mean_squared_error'.\n max_trials: Int. The maximum number of different Keras Models to try.\n The search may finish before reaching the max_trials. Defaults to 100.\n directory: String. The path to a directory for storing the search outputs.\n Defaults to None, which would create a folder with the name of the\n AutoModel in the current directory.\n objective: String. Name of model metric to minimize\n or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.\n seed: Int. Random seed.\n \"\"\"\n\n def __init__(self,\n column_names=None,\n column_types=None,\n output_dim=None,\n loss=None,\n metrics=None,\n name='structured_data_regressor',\n max_trials=100,\n directory=None,\n objective='val_loss',\n seed=None):\n super().__init__(\n outputs=head.RegressionHead(output_dim=output_dim,\n loss=loss,\n metrics=metrics),\n column_names=column_names,\n column_types=column_types,\n max_trials=max_trials,\n directory=directory,\n name=name,\n objective=objective,\n seed=seed)\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
bhbai/tensorflow | [
"d4b5c606fc9fbd1a20b5b113b4bc831f31d889a3",
"d4b5c606fc9fbd1a20b5b113b4bc831f31d889a3",
"d4b5c606fc9fbd1a20b5b113b4bc831f31d889a3",
"d4b5c606fc9fbd1a20b5b113b4bc831f31d889a3",
"d4b5c606fc9fbd1a20b5b113b4bc831f31d889a3",
"d4b5c606fc9fbd1a20b5b113b4bc831f31d889a3"
] | [
"tensorflow/python/debug/wrappers/framework.py",
"tensorflow/contrib/distributions/python/ops/inverse_gamma.py",
"tensorflow/contrib/rnn/python/kernel_tests/core_rnn_cell_test.py",
"tensorflow/contrib/distributions/python/ops/transformed_distribution.py",
"tensorflow/python/tools/print_selective_registration_header.py",
"tensorflow/contrib/distributions/python/kernel_tests/bijector_test.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Framework of debug wrapper sessions.\n\nA debug wrapper session is a wrapper around a TensorFlow Python Session.\nThe wrapper preserves the Session interface, most importantly the run() method,\nwhile providing abilities to:\na) Intercept a run() call to a wrapped session and insert debug tensor watches\n according to externally-specified debug URLs.\n\nb) Release control to an external (i.e., non-Session) object before and after\n the run() call, so that the external object can perform actions such as\n launching a UI to let users inspect the intermediate tensors and partition\n graphs from the run() call.\n\nc) (To be implemented) Intercept a run() call and give control to DebugStepper\n to let it perform stepping / continuing-to actions on the graph.\n\nb) (To be implemented in a future CL) Enter an instruction loop to let an\n external object (e.g., remote client) launch run() and cont() calls\n remotely.\n\n*** The lifetime of a debug wrapper session: ***\n\n1) The wrapper session is created by calling the constructor with a\n wrapped (normal) session as the argument:\n wrapper = FooDebugWrapperSession(sess)\n wherein FooDebugWrapperSession is a concrete subclass implementing the\n abstract BaseDebugWrapperSession class below.\n\n2) Near the end of the constructor call, the on_session_init() callback is\n invoked, with a OnSessionInitRequest object as the argument. The object\n carries the wrapped (normal) session object.\n\n3) The callback handles the request and returns a OnSessionInitResponse\n object with an action field, directing the wrapper session what to do next.\n\nIf the action field in the OnSessionInitResponse is PROCEED, the constuctor\nreturns. Control is released back to the caller of the constructor, which can\ninvoke run() method of wrapper session with the same syntax as a non-wrapped\nsession, e.g.,:\n wrapper.run(fetches, feed_dict=feeds, options=run_options)\n\nBelow, A1 - A2 is the lifetime of a wrapper run() call if the action is\nPROCEED:\n\nA1) Right at the start of each run() call, the on_run_start() callback is\n invoked, with an OnRunStartRequest object carrying information such as\n the fetches, the feed dict, the run options and run metadata used in\n this run call, along with a count of how many run calls has occurred\n on this wrapper session. The callback then returns an OnRunStartResponse\n object, of which the action field directs what the wrapper session\n actually will do of the run() call.\n\n If the action is DEBUG_RUN, a debugged (tensor-watched) run will ensue,\n with the debug URLs supplied in the debug_urls field of the response.\n These can be file:// or grpc:// URLs, for example.\n\n If the action is NON_DEBUG_RUN, a non-debug (normal) run will ensue.\n\n If the action is INVOKE_STEPPER, no run() call will be issued to the\n wrapped session. But instead, a DebugStepper (i.e., \"continuation\n debugger\") will be used to perform stepping / continue-to actions on\n the graph.\n\nTODO(cais): The event loop for the DebugStepper will request additional\n callbacks including on_cont_start() and on_cont_end(). Add those.\n\nA2) Right before the run() returns, the on_run_end() callback is invoked,\n with an OnRunEndRequest object as the argument, which carries information\n including the actual action performed in the warpper run() call and the\n run_metadata from the run() call.\n\nHowever, if the action field in OnSessionInitResponse is\nREMOTE_INSTR_LOOP, the constructor will automatically invoke an instruction loop\nthat gives the control to a remote caller.\n\nIn the remote instruction loop, the following steps will happen:\n\nB1) Callback on_instr_start() is invoked. The callback will return an\n OnInstrStartResponse object with an action field which can order one of\n the following actions:\n i) a run() call with fetches, feeds and debug_urls specified.\n ii) a DebugStepper cont() call with target specified.\n iii) value overrides in the cached tensors from the DebugStepper.\n iv) exit the instruction loop.\n\nB2) The wrapper session carries out the action specified above.\n\nB3) If still in the instruction loop, the wrapper session invokes the\n on_instr_end() callback. After the on_instr_end() callback returns, jump\n back to B1.\n\nTODO(cais): Implemented the instruction loop in B1 - B3.\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.debug import debug_utils\nfrom tensorflow.python.debug import stepper\nfrom tensorflow.python.framework import errors\n\n\n# Helper function.\ndef _check_type(obj, expected_type):\n \"\"\"Check if an object is of the expected type.\n\n Args:\n obj: The object being checked.\n expected_type: (type) The expected type of obj.\n\n Raises:\n TypeError: If obj is not an instance of expected_type.\n \"\"\"\n if not isinstance(obj, expected_type):\n raise TypeError(\"Expected type %s; got type %s\" %\n (expected_type, type(obj)))\n\n\nclass OnSessionInitRequest(object):\n \"\"\"Request to an on-session-init callback.\n\n This callback is invoked during the __init__ call to a debug-wrapper session.\n \"\"\"\n\n def __init__(self, sess):\n \"\"\"Constructor.\n\n Args:\n sess: A tensorflow Session object.\n \"\"\"\n\n _check_type(sess, session.BaseSession)\n self.session = sess\n\n\nclass OnSessionInitAction(object):\n \"\"\"Enum-like values for possible action to take on session init.\"\"\"\n\n # Proceed, without special actions, in the wrapper session initialization.\n # What action the wrapper session performs next is determined by the caller\n # of the wrapper session. E.g., it can call run().\n PROCEED = \"proceed\"\n\n # Instead of letting the caller of the wrapper session determine what actions\n # the wrapper session will perform next, enter a loop to receive instructions\n # from a remote client.\n # For example, TensorBoard visual debugger can use this action so that it can\n # launch session.run() calls remotely.\n REMOTE_INSTR_LOOP = \"remote_instr_loop\"\n\n\nclass OnSessionInitResponse(object):\n \"\"\"Response from an on-session-init callback.\"\"\"\n\n def __init__(self, action):\n \"\"\"Constructor.\n\n Args:\n action: (`OnSessionInitAction`) Debugger action to take on session init.\n \"\"\"\n _check_type(action, str)\n self.action = action\n\n\nclass OnRunStartRequest(object):\n \"\"\"Request to an on-run-start callback.\n\n This callback is invoked during a run() call of the debug-wrapper\n session, immediately after the run() call counter is incremented.\n \"\"\"\n\n def __init__(self, fetches, feed_dict, run_options, run_metadata,\n run_call_count):\n \"\"\"Constructor of `OnRunStartRequest`.\n\n Args:\n fetches: Fetch targets of the run() call.\n feed_dict: The feed dictionary to the run() call.\n run_options: RunOptions input to the run() call.\n run_metadata: RunMetadata input to the run() call.\n The above four arguments are identical to the input arguments to the\n run() method of a non-wrapped TensorFlow session.\n run_call_count: 1-based count of how many run calls (including this one)\n has been invoked.\n \"\"\"\n self.fetches = fetches\n self.feed_dict = feed_dict\n self.run_options = run_options\n self.run_metadata = run_metadata\n self.run_call_count = run_call_count\n\n\nclass OnRunStartAction(object):\n \"\"\"Enum-like values for possible action to take on start of a run() call.\"\"\"\n\n # Run once with debug tensor-watching.\n DEBUG_RUN = \"debug_run\"\n\n # Run without debug tensor-watching.\n NON_DEBUG_RUN = \"non_debug_run\"\n\n # Instead of running the fetches as a whole, as would normally happen, invoke\n # the (to-be-implemented) debug stepper.\n # TODO(cais): Remove \"to-be-implemented\".\n INVOKE_STEPPER = \"invoke_stepper\"\n\n\nclass OnRunStartResponse(object):\n \"\"\"Request from an on-run-start callback.\n\n The caller of the callback can use this response object to specify what\n action the debug-wrapper session actually takes on the run() call.\n \"\"\"\n\n def __init__(self, action, debug_urls):\n \"\"\"Constructor of `OnRunStartResponse`.\n\n Args:\n action: (`OnRunStartAction`) the action actually taken by the wrapped\n session for the run() call.\n debug_urls: (list of str) debug_urls used in watching the tensors during\n the run() call.\n \"\"\"\n\n _check_type(action, str)\n self.action = action\n\n _check_type(debug_urls, list)\n self.debug_urls = debug_urls\n\n\nclass OnRunEndRequest(object):\n \"\"\"Request to an on-run-end callback.\n\n The callback is invoked immediately before the wrapped run() call ends.\n \"\"\"\n\n def __init__(self,\n performed_action,\n run_metadata=None,\n client_graph_def=None,\n tf_error=None):\n \"\"\"Constructor for `OnRunEndRequest`.\n\n Args:\n performed_action: (`OnRunStartAction`) Actually-performed action by the\n debug-wrapper session.\n run_metadata: run_metadata output from the run() call (if any).\n client_graph_def: (GraphDef) GraphDef from the client side, i.e., from\n the python front end of TensorFlow. Can be obtained with\n session.graph.as_graph_def().\n tf_error: (errors.OpError subtypes) TensorFlow OpError that occurred\n during the run (if any).\n \"\"\"\n\n _check_type(performed_action, str)\n self.performed_action = performed_action\n\n if run_metadata is not None:\n _check_type(run_metadata, config_pb2.RunMetadata)\n self.run_metadata = run_metadata\n self.client_graph_def = client_graph_def\n self.tf_error = tf_error\n\n\nclass OnRunEndResponse(object):\n \"\"\"Response from an on-run-end callback.\"\"\"\n\n def __init__(self):\n\n # Currently only a placeholder.\n pass\n\n\nclass BaseDebugWrapperSession(session.SessionInterface):\n \"\"\"Base class of debug-wrapper session classes.\n\n Concrete classes that inherit from this class need to implement the abstract\n methods such as on_session_init, on_run_start and on_run_end.\n \"\"\"\n\n # TODO(cais): Add on_cont_start and on_cont_end callbacks once the stepper is\n # is available.\n\n def __init__(self, sess):\n \"\"\"Constructor of `BaseDebugWrapperSession`.\n\n Args:\n sess: An (unwrapped) TensorFlow session instance.\n\n Raises:\n ValueError: On invalid `OnSessionInitAction` value.\n \"\"\"\n\n _check_type(sess, session.BaseSession)\n\n # The session being wrapped.\n self._sess = sess\n\n # Keeps track of number of run calls that have been performed on this\n # debug-wrapper session.\n self._run_call_count = 0\n\n # Invoke on-session-init callback.\n response = self.on_session_init(OnSessionInitRequest(self._sess))\n _check_type(response, OnSessionInitResponse)\n\n if response.action == OnSessionInitAction.PROCEED:\n pass\n elif response.action == OnSessionInitAction.REMOTE_INSTR_LOOP:\n # TODO(cais): Implement REMOTE_INSTR_LOOP\n raise NotImplementedError(\n \"OnSessionInitAction REMOTE_INSTR_LOOP has not been \"\n \"implemented.\")\n else:\n raise ValueError(\n \"Invalid OnSessionInitAction value: %s\" % response.action)\n\n @property\n def graph(self):\n return self._sess.graph\n\n @property\n def sess_str(self):\n return self._sess.sess_str\n\n @property\n def session(self):\n return self._sess\n\n def run(self, fetches, feed_dict=None, options=None, run_metadata=None):\n \"\"\"Wrapper around Session.run() that inserts tensor watch options.\n\n Args:\n fetches: Same as the `fetches` arg to regular `Session.run()`.\n feed_dict: Same as the `feed_dict` arg to regular `Session.run()`.\n options: Same as the `options` arg to regular `Session.run()`.\n run_metadata: Same as the `run_metadata` arg to regular `Session.run()`.\n\n Returns:\n Simply forwards the output of the wrapped `Session.run()` call.\n\n Raises:\n ValueError: On invalid `OnRunStartAction` value.\n \"\"\"\n\n self._run_call_count += 1\n\n # Invoke on-run-start callback and obtain response.\n run_start_resp = self.on_run_start(\n OnRunStartRequest(fetches, feed_dict, options, run_metadata,\n self._run_call_count))\n _check_type(run_start_resp, OnRunStartResponse)\n\n if run_start_resp.action == OnRunStartAction.DEBUG_RUN:\n # Decorate RunOption to fill in debugger tensor watch specifications.\n decorated_run_options = options or config_pb2.RunOptions()\n run_metadata = run_metadata or config_pb2.RunMetadata()\n\n self._decorate_run_options(decorated_run_options,\n run_start_resp.debug_urls)\n\n # Invoke the run() method of the wrapped Session. Catch any TensorFlow\n # runtime errors.\n tf_error = None\n try:\n retvals = self._sess.run(fetches,\n feed_dict=feed_dict,\n options=decorated_run_options,\n run_metadata=run_metadata)\n except errors.OpError as op_error:\n tf_error = op_error\n retvals = op_error\n\n run_end_req = OnRunEndRequest(\n run_start_resp.action,\n run_metadata=run_metadata,\n client_graph_def=self._sess.graph.as_graph_def(),\n tf_error=tf_error)\n\n elif (run_start_resp.action == OnRunStartAction.NON_DEBUG_RUN or\n run_start_resp.action == OnRunStartAction.INVOKE_STEPPER):\n if run_start_resp.action == OnRunStartAction.INVOKE_STEPPER:\n retvals = self.invoke_node_stepper(\n stepper.NodeStepper(self._sess, fetches, feed_dict),\n restore_variable_values_on_exit=True)\n\n # Invoke run() method of the wrapped session.\n retvals = self._sess.run(\n fetches,\n feed_dict=feed_dict,\n options=options,\n run_metadata=run_metadata)\n\n # Prepare arg for the on-run-end callback.\n run_end_req = OnRunEndRequest(run_start_resp.action)\n else:\n raise ValueError(\n \"Invalid OnRunStartAction value: %s\" % run_start_resp.action)\n\n # Invoke on-run-end callback and obtain response.\n run_end_resp = self.on_run_end(run_end_req)\n _check_type(run_end_resp, OnRunEndResponse)\n # Currently run_end_resp is only a placeholder. No action is taken on it.\n\n return retvals\n\n def partial_run_setup(self, fetches, feeds=None):\n \"\"\"Sets up the feeds and fetches for partial runs in the session.\"\"\"\n raise NotImplementedError(\n \"partial_run_setup is not implemented for debug-wrapper sessions.\")\n\n def partial_run(self, handle, fetches, feed_dict=None):\n raise NotImplementedError(\n \"partial_run is not implemented for debug-wrapper sessions.\")\n\n def _decorate_run_options(self, run_options, debug_urls):\n \"\"\"Modify a RunOptions object for debug tensor watching.\n\n Specifies request for outputting partition graphs. Adds\n debug_tensor_watch_opts with proper debug URLs.\n\n Args:\n run_options: (RunOptions) the modified RunOptions object.\n debug_urls: (list of str) debug URLs to be entered in run_options.\n debug_tensor_watch_opts.\n \"\"\"\n\n run_options.output_partition_graphs = True\n debug_utils.watch_graph(\n run_options, self._sess.graph, debug_urls=debug_urls)\n\n @abc.abstractmethod\n def on_session_init(self, request):\n \"\"\"Callback invoked during construction of the debug-wrapper session.\n\n This is a blocking callback.\n The invocation happens right before the constructor ends.\n\n Args:\n request: (`OnSessionInitRequest`) callback request carrying information\n such as the session being wrapped.\n\n Returns:\n An instance of `OnSessionInitResponse`.\n \"\"\"\n\n @abc.abstractmethod\n def on_run_start(self, request):\n \"\"\"Callback invoked on run() calls to the debug-wrapper session.\n\n This is a blocking callback.\n The invocation happens after the wrapper's run() call is entered,\n after an increment of run call counter.\n\n Args:\n request: (`OnRunStartRequest`) callback request object carrying\n information about the run call such as the fetches, feed dict, run\n options, run metadata, and how many `run()` calls to this wrapper\n session have occurred.\n\n Returns:\n An instance of `OnRunStartResponse`, carrying information to\n 1) direct the wrapper session to perform a specified action (e.g., run\n with or without debug tensor watching, invoking the stepper.)\n 2) debug URLs used to watch the tensors.\n \"\"\"\n\n @abc.abstractmethod\n def on_run_end(self, request):\n \"\"\"Callback invoked on run() calls to the debug-wrapper session.\n\n This is a blocking callback.\n The invocation happens right before the wrapper exits its run() call.\n\n Args:\n request: (`OnRunEndRequest`) callback request object carrying information\n such as the actual action performed by the session wrapper for the\n run() call.\n\n Returns:\n An instance of `OnRunStartResponse`.\n \"\"\"\n\n def __enter__(self):\n return self._sess.__enter__()\n\n def __exit__(self, exec_type, exec_value, exec_tb):\n self._sess.__exit__(exec_type, exec_value, exec_tb)\n\n def close(self):\n self._sess.close()\n\n # TODO(cais): Add _node_name_regex_whitelist and\n # _node_op_type_regex_whitelist.\n\n @abc.abstractmethod\n def invoke_node_stepper(self,\n node_stepper,\n restore_variable_values_on_exit=True):\n \"\"\"Callback invoked when the client intends to step through graph nodes.\n\n Args:\n node_stepper: (stepper.NodeStepper) An instance of NodeStepper to be used\n in this stepping session.\n restore_variable_values_on_exit: (bool) Whether any variables whose values\n have been altered during this node-stepper invocation should be restored\n to their old values when this invocation ends.\n\n Returns:\n The same return values as the `Session.run()` call on the same fetches as\n the NodeStepper.\n \"\"\"\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"The InverseGamma distribution class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.contrib.distributions.python.ops import distribution\nfrom tensorflow.contrib.distributions.python.ops import distribution_util\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import random_ops\n\n\nclass InverseGamma(distribution.Distribution):\n \"\"\"The `InverseGamma` distribution with parameter alpha and beta.\n\n The parameters are the shape and inverse scale parameters alpha, beta.\n\n The PDF of this distribution is:\n\n ```pdf(x) = (beta^alpha)/Gamma(alpha)(x^(-alpha-1))e^(-beta/x), x > 0```\n\n and the CDF of this distribution is:\n\n ```cdf(x) = GammaInc(alpha, beta / x) / Gamma(alpha), x > 0```\n\n where GammaInc is the upper incomplete Gamma function.\n\n Examples:\n\n ```python\n dist = InverseGamma(alpha=3.0, beta=2.0)\n dist2 = InverseGamma(alpha=[3.0, 4.0], beta=[2.0, 3.0])\n ```\n\n \"\"\"\n\n def __init__(self,\n alpha,\n beta,\n validate_args=False,\n allow_nan_stats=True,\n name=\"InverseGamma\"):\n \"\"\"Construct InverseGamma distributions with parameters `alpha` and `beta`.\n\n The parameters `alpha` and `beta` must be shaped in a way that supports\n broadcasting (e.g. `alpha + beta` is a valid operation).\n\n Args:\n alpha: Floating point tensor, the shape params of the\n distribution(s).\n alpha must contain only positive values.\n beta: Floating point tensor, the scale params of the distribution(s).\n beta must contain only positive values.\n validate_args: `Boolean`, default `False`. Whether to assert that\n `a > 0`, `b > 0`, and that `x > 0` in the methods `prob(x)` and\n `log_prob(x)`. If `validate_args` is `False` and the inputs are\n invalid, correct behavior is not guaranteed.\n allow_nan_stats: `Boolean`, default `True`. If `False`, raise an\n exception if a statistic (e.g. mean/mode/etc...) is undefined for any\n batch member. If `True`, batch members with valid parameters leading to\n undefined statistics will return NaN for this statistic.\n name: The name to prepend to all ops created by this distribution.\n\n Raises:\n TypeError: if `alpha` and `beta` are different dtypes.\n \"\"\"\n parameters = locals()\n parameters.pop(\"self\")\n with ops.name_scope(name, values=[alpha, beta]) as ns:\n with ops.control_dependencies([\n check_ops.assert_positive(alpha),\n check_ops.assert_positive(beta),\n ] if validate_args else []):\n self._alpha = array_ops.identity(alpha, name=\"alpha\")\n self._beta = array_ops.identity(beta, name=\"beta\")\n super(InverseGamma, self).__init__(\n dtype=self._alpha.dtype,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n is_continuous=True,\n is_reparameterized=False,\n parameters=parameters,\n graph_parents=[self._alpha, self._beta],\n name=ns)\n\n @staticmethod\n def _param_shapes(sample_shape):\n return dict(\n zip((\"alpha\", \"beta\"), ([ops.convert_to_tensor(\n sample_shape, dtype=dtypes.int32)] * 2)))\n\n @property\n def alpha(self):\n \"\"\"Shape parameter.\"\"\"\n return self._alpha\n\n @property\n def beta(self):\n \"\"\"Scale parameter.\"\"\"\n return self._beta\n\n def _batch_shape(self):\n return array_ops.broadcast_dynamic_shape(\n array_ops.shape(self.alpha), array_ops.shape(self.beta))\n\n def _get_batch_shape(self):\n return array_ops.broadcast_static_shape(\n self.alpha.get_shape(), self.beta.get_shape())\n\n def _event_shape(self):\n return constant_op.constant([], dtype=dtypes.int32)\n\n def _get_event_shape(self):\n return tensor_shape.scalar()\n\n def _sample_n(self, n, seed=None):\n \"\"\"See the documentation for tf.random_gamma for more details.\"\"\"\n return 1. / random_ops.random_gamma([n], self.alpha, beta=self.beta,\n dtype=self.dtype, seed=seed)\n\n def _log_prob(self, x):\n x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if\n self.validate_args else [], x)\n return (self.alpha * math_ops.log(self.beta) -\n math_ops.lgamma(self.alpha) -\n (self.alpha + 1.) * math_ops.log(x) - self.beta / x)\n\n def _prob(self, x):\n return math_ops.exp(self._log_prob(x))\n\n def _log_cdf(self, x):\n return math_ops.log(self._cdf(x))\n\n def _cdf(self, x):\n x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if\n self.validate_args else [], x)\n # Note that igammac returns the upper regularized incomplete gamma\n # function Q(a, x), which is what we want for the CDF.\n return math_ops.igammac(self.alpha, self.beta / x)\n\n @distribution_util.AppendDocstring(\n \"\"\"This is defined to be\n\n ```\n entropy = alpha - log(beta) + log(Gamma(alpha))\n + (1-alpha)digamma(alpha)\n ```\n\n where digamma(alpha) is the digamma function.\"\"\")\n def _entropy(self):\n return (self.alpha +\n math_ops.log(self.beta) +\n math_ops.lgamma(self.alpha) -\n (1. + self.alpha) * math_ops.digamma(self.alpha))\n\n @distribution_util.AppendDocstring(\n \"\"\"The mean of an inverse gamma distribution is `beta / (alpha - 1)`,\n when `alpha > 1`, and `NaN` otherwise. If `self.allow_nan_stats` is\n `False`, an exception will be raised rather than returning `NaN`\"\"\")\n def _mean(self):\n mean = self.beta / (self.alpha - 1.)\n if self.allow_nan_stats:\n nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())\n return array_ops.where(\n self.alpha > 1., mean,\n array_ops.fill(self.batch_shape(), nan, name=\"nan\"))\n else:\n return control_flow_ops.with_dependencies([\n check_ops.assert_less(\n array_ops.ones((), self.dtype), self.alpha,\n message=\"mean not defined for components of self.alpha <= 1\"),\n ], mean)\n\n @distribution_util.AppendDocstring(\n \"\"\"Variance for inverse gamma is defined only for `alpha > 2`. If\n `self.allow_nan_stats` is `False`, an exception will be raised rather\n than returning `NaN`.\"\"\")\n def _variance(self):\n var = (math_ops.square(self.beta) /\n (math_ops.square(self.alpha - 1.) * (self.alpha - 2.)))\n if self.allow_nan_stats:\n nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())\n return array_ops.where(\n self.alpha > 2., var,\n array_ops.fill(self.batch_shape(), nan, name=\"nan\"))\n else:\n return control_flow_ops.with_dependencies([\n check_ops.assert_less(\n constant_op.constant(2., dtype=self.dtype), self.alpha,\n message=\"variance not defined for components of alpha <= 2\"),\n ], var)\n\n def _mode(self):\n \"\"\"The mode of an inverse gamma distribution is `beta / (alpha + 1)`.\"\"\"\n return self.beta / (self.alpha + 1.)\n\n\nclass InverseGammaWithSoftplusAlphaBeta(InverseGamma):\n \"\"\"Inverse Gamma with softplus applied to `alpha` and `beta`.\"\"\"\n\n def __init__(self,\n alpha,\n beta,\n validate_args=False,\n allow_nan_stats=True,\n name=\"InverseGammaWithSoftplusAlphaBeta\"):\n parameters = locals()\n parameters.pop(\"self\")\n with ops.name_scope(name, values=[alpha, beta]) as ns:\n super(InverseGammaWithSoftplusAlphaBeta, self).__init__(\n alpha=nn.softplus(alpha, name=\"softplus_alpha\"),\n beta=nn.softplus(beta, name=\"softplus_gamma\"),\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n name=ns)\n self._parameters = parameters\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for RNN cells.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport sys\n\n# TODO: #6568 Remove this hack that makes dlopen() not crash.\nif hasattr(sys, \"getdlopenflags\") and hasattr(sys, \"setdlopenflags\"):\n import ctypes\n sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)\n\nimport numpy as np\n\n# TODO(ebrevdo): Remove once _linear is fully deprecated.\n# pylint: disable=protected-access\n\nfrom tensorflow.contrib.rnn.python.ops import core_rnn_cell_impl\nfrom tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl import _linear as linear\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import rnn\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables as variables_lib\nfrom tensorflow.python.platform import test\n\n# pylint: enable=protected-access\n\n\nclass RNNCellTest(test.TestCase):\n\n def testLinear(self):\n with self.test_session() as sess:\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(1.0)):\n x = array_ops.zeros([1, 2])\n l = linear([x], 2, False)\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run([l], {x.name: np.array([[1., 2.]])})\n self.assertAllClose(res[0], [[3.0, 3.0]])\n\n # Checks prevent you from accidentally creating a shared function.\n with self.assertRaises(ValueError):\n l1 = linear([x], 2, False)\n\n # But you can create a new one in a new scope and share the variables.\n with variable_scope.variable_scope(\"l1\") as new_scope:\n l1 = linear([x], 2, False)\n with variable_scope.variable_scope(new_scope, reuse=True):\n linear([l1], 2, False)\n self.assertEqual(len(variables_lib.trainable_variables()), 2)\n\n def testBasicRNNCell(self):\n with self.test_session() as sess:\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros([1, 2])\n m = array_ops.zeros([1, 2])\n g, _ = core_rnn_cell_impl.BasicRNNCell(2)(x, m)\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run(\n [g], {x.name: np.array([[1., 1.]]),\n m.name: np.array([[0.1, 0.1]])})\n self.assertEqual(res[0].shape, (1, 2))\n\n def testGRUCell(self):\n with self.test_session() as sess:\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros([1, 2])\n m = array_ops.zeros([1, 2])\n g, _ = core_rnn_cell_impl.GRUCell(2)(x, m)\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run(\n [g], {x.name: np.array([[1., 1.]]),\n m.name: np.array([[0.1, 0.1]])})\n # Smoke test\n self.assertAllClose(res[0], [[0.175991, 0.175991]])\n with variable_scope.variable_scope(\n \"other\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros(\n [1, 3]) # Test GRUCell with input_size != num_units.\n m = array_ops.zeros([1, 2])\n g, _ = core_rnn_cell_impl.GRUCell(2)(x, m)\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run(\n [g],\n {x.name: np.array([[1., 1., 1.]]),\n m.name: np.array([[0.1, 0.1]])})\n # Smoke test\n self.assertAllClose(res[0], [[0.156736, 0.156736]])\n\n def testBasicLSTMCell(self):\n with self.test_session() as sess:\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros([1, 2])\n m = array_ops.zeros([1, 8])\n g, out_m = core_rnn_cell_impl.MultiRNNCell(\n [core_rnn_cell_impl.BasicLSTMCell(\n 2, state_is_tuple=False)] * 2,\n state_is_tuple=False)(x, m)\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run(\n [g, out_m],\n {x.name: np.array([[1., 1.]]),\n m.name: 0.1 * np.ones([1, 8])})\n self.assertEqual(len(res), 2)\n variables = variables_lib.global_variables()\n self.assertEqual(4, len(variables))\n self.assertEquals(variables[0].op.name,\n \"root/multi_rnn_cell/cell_0/basic_lstm_cell/weights\")\n self.assertEquals(variables[1].op.name,\n \"root/multi_rnn_cell/cell_0/basic_lstm_cell/biases\")\n self.assertEquals(variables[2].op.name,\n \"root/multi_rnn_cell/cell_1/basic_lstm_cell/weights\")\n self.assertEquals(variables[3].op.name,\n \"root/multi_rnn_cell/cell_1/basic_lstm_cell/biases\")\n # The numbers in results were not calculated, this is just a smoke test.\n self.assertAllClose(res[0], [[0.24024698, 0.24024698]])\n expected_mem = np.array([[\n 0.68967271, 0.68967271, 0.44848421, 0.44848421, 0.39897051,\n 0.39897051, 0.24024698, 0.24024698\n ]])\n self.assertAllClose(res[1], expected_mem)\n with variable_scope.variable_scope(\n \"other\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros(\n [1, 3]) # Test BasicLSTMCell with input_size != num_units.\n m = array_ops.zeros([1, 4])\n g, out_m = core_rnn_cell_impl.BasicLSTMCell(\n 2, state_is_tuple=False)(x, m)\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run(\n [g, out_m],\n {x.name: np.array([[1., 1., 1.]]),\n m.name: 0.1 * np.ones([1, 4])})\n self.assertEqual(len(res), 2)\n\n def testBasicLSTMCellStateTupleType(self):\n with self.test_session():\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros([1, 2])\n m0 = (array_ops.zeros([1, 2]),) * 2\n m1 = (array_ops.zeros([1, 2]),) * 2\n cell = core_rnn_cell_impl.MultiRNNCell(\n [core_rnn_cell_impl.BasicLSTMCell(2)] * 2, state_is_tuple=True)\n self.assertTrue(isinstance(cell.state_size, tuple))\n self.assertTrue(\n isinstance(cell.state_size[0], core_rnn_cell_impl.LSTMStateTuple))\n self.assertTrue(\n isinstance(cell.state_size[1], core_rnn_cell_impl.LSTMStateTuple))\n\n # Pass in regular tuples\n _, (out_m0, out_m1) = cell(x, (m0, m1))\n self.assertTrue(isinstance(out_m0, core_rnn_cell_impl.LSTMStateTuple))\n self.assertTrue(isinstance(out_m1, core_rnn_cell_impl.LSTMStateTuple))\n\n # Pass in LSTMStateTuples\n variable_scope.get_variable_scope().reuse_variables()\n zero_state = cell.zero_state(1, dtypes.float32)\n self.assertTrue(isinstance(zero_state, tuple))\n self.assertTrue(\n isinstance(zero_state[0], core_rnn_cell_impl.LSTMStateTuple))\n self.assertTrue(\n isinstance(zero_state[1], core_rnn_cell_impl.LSTMStateTuple))\n _, (out_m0, out_m1) = cell(x, zero_state)\n self.assertTrue(isinstance(out_m0, core_rnn_cell_impl.LSTMStateTuple))\n self.assertTrue(isinstance(out_m1, core_rnn_cell_impl.LSTMStateTuple))\n\n def testBasicLSTMCellWithStateTuple(self):\n with self.test_session() as sess:\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros([1, 2])\n m0 = array_ops.zeros([1, 4])\n m1 = array_ops.zeros([1, 4])\n cell = core_rnn_cell_impl.MultiRNNCell(\n [core_rnn_cell_impl.BasicLSTMCell(\n 2, state_is_tuple=False)] * 2,\n state_is_tuple=True)\n g, (out_m0, out_m1) = cell(x, (m0, m1))\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run([g, out_m0, out_m1], {\n x.name: np.array([[1., 1.]]),\n m0.name: 0.1 * np.ones([1, 4]),\n m1.name: 0.1 * np.ones([1, 4])\n })\n self.assertEqual(len(res), 3)\n # The numbers in results were not calculated, this is just a smoke test.\n # Note, however, these values should match the original\n # version having state_is_tuple=False.\n self.assertAllClose(res[0], [[0.24024698, 0.24024698]])\n expected_mem0 = np.array(\n [[0.68967271, 0.68967271, 0.44848421, 0.44848421]])\n expected_mem1 = np.array(\n [[0.39897051, 0.39897051, 0.24024698, 0.24024698]])\n self.assertAllClose(res[1], expected_mem0)\n self.assertAllClose(res[2], expected_mem1)\n\n def testLSTMCell(self):\n with self.test_session() as sess:\n num_units = 8\n num_proj = 6\n state_size = num_units + num_proj\n batch_size = 3\n input_size = 2\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros([batch_size, input_size])\n m = array_ops.zeros([batch_size, state_size])\n cell = core_rnn_cell_impl.LSTMCell(\n num_units=num_units,\n num_proj=num_proj,\n forget_bias=1.0,\n state_is_tuple=False)\n output, state = cell(x, m)\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run([output, state], {\n x.name: np.array([[1., 1.], [2., 2.], [3., 3.]]),\n m.name: 0.1 * np.ones((batch_size, state_size))\n })\n self.assertEqual(len(res), 2)\n # The numbers in results were not calculated, this is mostly just a\n # smoke test.\n self.assertEqual(res[0].shape, (batch_size, num_proj))\n self.assertEqual(res[1].shape, (batch_size, state_size))\n # Different inputs so different outputs and states\n for i in range(1, batch_size):\n self.assertTrue(\n float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)\n self.assertTrue(\n float(np.linalg.norm((res[1][0, :] - res[1][i, :]))) > 1e-6)\n\n def testLSTMCellVariables(self):\n with self.test_session():\n num_units = 8\n num_proj = 6\n state_size = num_units + num_proj\n batch_size = 3\n input_size = 2\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros([batch_size, input_size])\n m = array_ops.zeros([batch_size, state_size])\n cell = core_rnn_cell_impl.LSTMCell(\n num_units=num_units,\n num_proj=num_proj,\n forget_bias=1.0,\n state_is_tuple=False)\n cell(x, m) # Execute to create variables\n variables = variables_lib.global_variables()\n self.assertEquals(variables[0].op.name, \"root/lstm_cell/weights\")\n self.assertEquals(variables[1].op.name, \"root/lstm_cell/biases\")\n self.assertEquals(variables[2].op.name,\n \"root/lstm_cell/projection/weights\")\n\n def testOutputProjectionWrapper(self):\n with self.test_session() as sess:\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros([1, 3])\n m = array_ops.zeros([1, 3])\n cell = core_rnn_cell_impl.OutputProjectionWrapper(\n core_rnn_cell_impl.GRUCell(3), 2)\n g, new_m = cell(x, m)\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run([g, new_m], {\n x.name: np.array([[1., 1., 1.]]),\n m.name: np.array([[0.1, 0.1, 0.1]])\n })\n self.assertEqual(res[1].shape, (1, 3))\n # The numbers in results were not calculated, this is just a smoke test.\n self.assertAllClose(res[0], [[0.231907, 0.231907]])\n\n def testInputProjectionWrapper(self):\n with self.test_session() as sess:\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros([1, 2])\n m = array_ops.zeros([1, 3])\n cell = core_rnn_cell_impl.InputProjectionWrapper(\n core_rnn_cell_impl.GRUCell(3), num_proj=3)\n g, new_m = cell(x, m)\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run(\n [g, new_m],\n {x.name: np.array([[1., 1.]]),\n m.name: np.array([[0.1, 0.1, 0.1]])})\n self.assertEqual(res[1].shape, (1, 3))\n # The numbers in results were not calculated, this is just a smoke test.\n self.assertAllClose(res[0], [[0.154605, 0.154605, 0.154605]])\n\n def testDropoutWrapper(self):\n with self.test_session() as sess:\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros([1, 3])\n m = array_ops.zeros([1, 3])\n keep = array_ops.zeros([]) + 1\n g, new_m = core_rnn_cell_impl.DropoutWrapper(\n core_rnn_cell_impl.GRUCell(3), keep, keep)(x, m)\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run([g, new_m], {\n x.name: np.array([[1., 1., 1.]]),\n m.name: np.array([[0.1, 0.1, 0.1]])\n })\n self.assertEqual(res[1].shape, (1, 3))\n # The numbers in results were not calculated, this is just a smoke test.\n self.assertAllClose(res[0], [[0.154605, 0.154605, 0.154605]])\n\n def testEmbeddingWrapper(self):\n with self.test_session() as sess:\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros([1, 1], dtype=dtypes.int32)\n m = array_ops.zeros([1, 2])\n embedding_cell = core_rnn_cell_impl.EmbeddingWrapper(\n core_rnn_cell_impl.GRUCell(2),\n embedding_classes=3,\n embedding_size=2)\n self.assertEqual(embedding_cell.output_size, 2)\n g, new_m = embedding_cell(x, m)\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run(\n [g, new_m],\n {x.name: np.array([[1]]),\n m.name: np.array([[0.1, 0.1]])})\n self.assertEqual(res[1].shape, (1, 2))\n # The numbers in results were not calculated, this is just a smoke test.\n self.assertAllClose(res[0], [[0.17139, 0.17139]])\n\n def testEmbeddingWrapperWithDynamicRnn(self):\n with self.test_session() as sess:\n with variable_scope.variable_scope(\"root\"):\n inputs = ops.convert_to_tensor([[[0], [0]]], dtype=dtypes.int64)\n input_lengths = ops.convert_to_tensor([2], dtype=dtypes.int64)\n embedding_cell = core_rnn_cell_impl.EmbeddingWrapper(\n core_rnn_cell_impl.BasicLSTMCell(\n 1, state_is_tuple=True),\n embedding_classes=1,\n embedding_size=2)\n outputs, _ = rnn.dynamic_rnn(\n cell=embedding_cell,\n inputs=inputs,\n sequence_length=input_lengths,\n dtype=dtypes.float32)\n sess.run([variables_lib.global_variables_initializer()])\n # This will fail if output's dtype is inferred from input's.\n sess.run(outputs)\n\n def testMultiRNNCell(self):\n with self.test_session() as sess:\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros([1, 2])\n m = array_ops.zeros([1, 4])\n _, ml = core_rnn_cell_impl.MultiRNNCell(\n [core_rnn_cell_impl.GRUCell(2)] * 2, state_is_tuple=False)(x, m)\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run(ml, {\n x.name: np.array([[1., 1.]]),\n m.name: np.array([[0.1, 0.1, 0.1, 0.1]])\n })\n # The numbers in results were not calculated, this is just a smoke test.\n self.assertAllClose(res, [[0.175991, 0.175991, 0.13248, 0.13248]])\n\n def testMultiRNNCellWithStateTuple(self):\n with self.test_session() as sess:\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros([1, 2])\n m_bad = array_ops.zeros([1, 4])\n m_good = (array_ops.zeros([1, 2]), array_ops.zeros([1, 2]))\n\n # Test incorrectness of state\n with self.assertRaisesRegexp(ValueError, \"Expected state .* a tuple\"):\n core_rnn_cell_impl.MultiRNNCell(\n [core_rnn_cell_impl.GRUCell(2)] * 2,\n state_is_tuple=True)(x, m_bad)\n\n _, ml = core_rnn_cell_impl.MultiRNNCell(\n [core_rnn_cell_impl.GRUCell(2)] * 2, state_is_tuple=True)(x, m_good)\n\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run(ml, {\n x.name: np.array([[1., 1.]]),\n m_good[0].name: np.array([[0.1, 0.1]]),\n m_good[1].name: np.array([[0.1, 0.1]])\n })\n\n # The numbers in results were not calculated, this is just a\n # smoke test. However, these numbers should match those of\n # the test testMultiRNNCell.\n self.assertAllClose(res[0], [[0.175991, 0.175991]])\n self.assertAllClose(res[1], [[0.13248, 0.13248]])\n\n\nclass SlimRNNCellTest(test.TestCase):\n\n def testBasicRNNCell(self):\n with self.test_session() as sess:\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n x = array_ops.zeros([1, 2])\n m = array_ops.zeros([1, 2])\n my_cell = functools.partial(basic_rnn_cell, num_units=2)\n # pylint: disable=protected-access\n g, _ = core_rnn_cell_impl._SlimRNNCell(my_cell)(x, m)\n # pylint: enable=protected-access\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run(\n [g], {x.name: np.array([[1., 1.]]),\n m.name: np.array([[0.1, 0.1]])})\n self.assertEqual(res[0].shape, (1, 2))\n\n def testBasicRNNCellMatch(self):\n batch_size = 32\n input_size = 100\n num_units = 10\n with self.test_session() as sess:\n with variable_scope.variable_scope(\n \"root\", initializer=init_ops.constant_initializer(0.5)):\n inputs = random_ops.random_uniform((batch_size, input_size))\n _, initial_state = basic_rnn_cell(inputs, None, num_units)\n my_cell = functools.partial(basic_rnn_cell, num_units=num_units)\n # pylint: disable=protected-access\n slim_cell = core_rnn_cell_impl._SlimRNNCell(my_cell)\n # pylint: enable=protected-access\n slim_outputs, slim_state = slim_cell(inputs, initial_state)\n rnn_cell = core_rnn_cell_impl.BasicRNNCell(num_units)\n variable_scope.get_variable_scope().reuse_variables()\n outputs, state = rnn_cell(inputs, initial_state)\n self.assertEqual(slim_outputs.get_shape(), outputs.get_shape())\n self.assertEqual(slim_state.get_shape(), state.get_shape())\n sess.run([variables_lib.global_variables_initializer()])\n res = sess.run([slim_outputs, slim_state, outputs, state])\n self.assertAllClose(res[0], res[2])\n self.assertAllClose(res[1], res[3])\n\n\ndef basic_rnn_cell(inputs, state, num_units, scope=None):\n if state is None:\n if inputs is not None:\n batch_size = inputs.get_shape()[0]\n dtype = inputs.dtype\n else:\n batch_size = 0\n dtype = dtypes.float32\n init_output = array_ops.zeros(\n array_ops.stack([batch_size, num_units]), dtype=dtype)\n init_state = array_ops.zeros(\n array_ops.stack([batch_size, num_units]), dtype=dtype)\n init_output.set_shape([batch_size, num_units])\n init_state.set_shape([batch_size, num_units])\n return init_output, init_state\n else:\n with variable_scope.variable_scope(scope, \"basic_rnn_cell\",\n [inputs, state]):\n output = math_ops.tanh(linear([inputs, state], num_units, True))\n return output, output\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A Transformed Distribution class.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.contrib.distributions.python.ops import bijector as bijectors\nfrom tensorflow.contrib.distributions.python.ops import distribution as distributions\nfrom tensorflow.contrib.distributions.python.ops import distribution_util\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\n\n__all__ = [\n \"TransformedDistribution\",\n]\n\n_condition_kwargs_dict = {\n \"bijector_kwargs\": (\"Python dictionary of arg names/values \"\n \"forwarded to the bijector.\"),\n \"distribution_kwargs\": (\"Python dictionary of arg names/values \"\n \"forwarded to the distribution.\"),\n}\n\n\n# The following helper functions attempt to statically perform a TF operation.\n# These functions make debugging easier since we can do more validation during\n# graph construction.\n\n\ndef _static_value(x):\n \"\"\"Returns the static value of a `Tensor` or `None`.\"\"\"\n return tensor_util.constant_value(ops.convert_to_tensor(x))\n\n\ndef _logical_and(*args):\n \"\"\"Convenience function which attempts to statically `reduce_all`.\"\"\"\n args_ = [_static_value(x) for x in args]\n if any(x is not None and not bool(x) for x in args_):\n return constant_op.constant(False)\n if all(x is not None and bool(x) for x in args_):\n return constant_op.constant(True)\n if len(args) == 2:\n return math_ops.logical_and(*args)\n return math_ops.reduce_all(args)\n\n\ndef _logical_equal(x, y):\n \"\"\"Convenience function which attempts to statically compute `x == y`.\"\"\"\n x_ = _static_value(x)\n y_ = _static_value(y)\n if x_ is None or y_ is None:\n return math_ops.equal(x, y)\n return constant_op.constant(np.array_equal(x_, y_))\n\n\ndef _logical_not(x):\n \"\"\"Convenience function which attempts to statically apply `logical_not`.\"\"\"\n x_ = _static_value(x)\n if x_ is None:\n return math_ops.logical_not(x)\n return constant_op.constant(np.logical_not(x_))\n\n\ndef _concat_vectors(*args):\n \"\"\"Convenience function which concatenates input vectors.\"\"\"\n args_ = [_static_value(x) for x in args]\n if any(x_ is None for x_ in args_):\n return array_ops.concat(args, 0)\n return constant_op.constant([x_ for vec_ in args_ for x_ in vec_])\n\n\ndef _pick_scalar_condition(pred, cond_true, cond_false):\n \"\"\"Convenience function which chooses the condition based on the predicate.\"\"\"\n # Note: This function is only valid if all of pred, cond_true, and cond_false\n # are scalars. This means its semantics are arguably more like tf.cond than\n # tf.select even though we use tf.select to implement it.\n pred_ = _static_value(pred)\n if pred_ is None:\n return array_ops.where(pred, cond_true, cond_false)\n return cond_true if pred_ else cond_false\n\n\ndef _ones_like(x):\n \"\"\"Convenience function attempts to statically construct `ones_like`.\"\"\"\n # Should only be used for small vectors.\n if x.get_shape().is_fully_defined():\n return array_ops.ones(x.get_shape().as_list(), dtype=x.dtype)\n return array_ops.ones_like(x)\n\n\ndef _ndims_from_shape(shape):\n \"\"\"Returns `Tensor`'s `rank` implied by a `Tensor` shape.\"\"\"\n if shape.get_shape().ndims not in (None, 1):\n raise ValueError(\"input is not a valid shape: not 1D\")\n if not shape.dtype.is_integer:\n raise TypeError(\"input is not a valid shape: wrong dtype\")\n if shape.get_shape().is_fully_defined():\n return constant_op.constant(shape.get_shape().as_list()[0])\n return array_ops.shape(shape)[0]\n\n\ndef _is_scalar_from_shape(shape):\n \"\"\"Returns `True` `Tensor` if `Tensor` shape implies a scalar.\"\"\"\n return _logical_equal(_ndims_from_shape(shape), 0)\n\n\nclass TransformedDistribution(distributions.Distribution):\n \"\"\"A Transformed Distribution.\n\n A `TransformedDistribution` models `p(y)` given a base distribution `p(x)`,\n and a deterministic, invertible, differentiable transform, `Y = g(X)`. The\n transform is typically an instance of the `Bijector` class and the base\n distribution is typically an instance of the `Distribution` class.\n\n A `Bijector` is expected to implement the following functions:\n - `forward`,\n - `inverse`,\n - `inverse_log_det_jacobian`.\n The semantics of these functions are outlined in the `Bijector` documentation.\n\n We now describe how a `TransformedDistribution` alters the input/outputs of a\n `Distribution` associated with a random variable (rv) `X`.\n\n Write `cdf(Y=y)` for an absolutely continuous cumulative distribution function\n of random variable `Y`; write the probability density function `pdf(Y=y) :=\n d^k / (dy_1,...,dy_k) cdf(Y=y)` for its derivative wrt to `Y` evaluated at\n `y`. Assume that `Y = g(X)` where `g` is a deterministic diffeomorphism,\n i.e., a non-random, continuous, differentiable, and invertible function.\n Write the inverse of `g` as `X = g^{-1}(Y)` and `(J o g)(x)` for the Jacobian\n of `g` evaluated at `x`.\n\n A `TransformedDistribution` implements the following operations:\n\n * `sample`:\n\n Mathematically:\n\n ```none\n Y = g(X)\n ```\n\n Programmatically:\n\n ```python\n return bijector.forward(distribution.sample(...))\n ```\n\n * `log_prob`:\n\n Mathematically:\n\n ```none\n (log o pdf)(Y=y) = (log o pdf o g^{-1})(y) +\n (log o abs o det o J o g^{-1})(y)\n ```\n\n Programmatically:\n\n ```python\n return (distribution.log_prob(bijector.inverse(x)) +\n bijector.inverse_log_det_jacobian(x))\n ```\n\n * `log_cdf`:\n\n Mathematically:\n\n ```none\n (log o cdf)(Y=y) = (log o cdf o g^{-1})(y)\n ```\n\n Programmatically:\n\n ```python\n return distribution.log_cdf(bijector.inverse(x))\n ```\n\n * and similarly for: `cdf`, `prob`, `log_survival_function`,\n `survival_function`.\n\n A simple example constructing a Log-Normal distribution from a Normal\n distribution:\n\n ```python\n ds = tf.contrib.distributions\n log_normal = ds.TransformedDistribution(\n distribution=ds.Normal(mu=mu, sigma=sigma),\n bijector=ds.bijector.Exp(),\n name=\"LogNormalTransformedDistribution\")\n ```\n\n A `LogNormal` made from callables:\n\n ```python\n ds = tf.contrib.distributions\n log_normal = ds.TransformedDistribution(\n distribution=ds.Normal(mu=mu, sigma=sigma),\n bijector=ds.bijector.Inline(\n forward_fn=tf.exp,\n inverse_fn=tf.log,\n inverse_log_det_jacobian_fn=(\n lambda y: -tf.reduce_sum(tf.log(y), reduction_indices=-1)),\n name=\"LogNormalTransformedDistribution\")\n ```\n\n Another example constructing a Normal from a StandardNormal:\n\n ```python\n ds = tf.contrib.distributions\n normal = ds.TransformedDistribution(\n distribution=ds.Normal(mu=0, sigma=1),\n bijector=ds.bijector.ScaleAndShift(loc=mu, scale=sigma, event_ndims=0),\n name=\"NormalTransformedDistribution\")\n ```\n\n A `TransformedDistribution`'s batch- and event-shape are implied by the base\n distribution unless explicitly overridden by `batch_shape` or `event_shape`\n arguments. Specifying an overriding `batch_shape` (`event_shape`) is\n permitted only if the base distribution has scalar batch-shape (event-shape).\n The bijector is applied to the distribution as if the distribution possessed\n the overridden shape(s). The following example demonstrates how to construct a\n multivariate Normal as a `TransformedDistribution`.\n\n ```python\n bs = tf.contrib.distributions.bijector\n ds = tf.contrib.distributions\n # We will create two MVNs with batch_shape = event_shape = 2.\n mean = [[-1., 0], # batch:0\n [0., 1]] # batch:1\n chol_cov = [[[1., 0],\n [0, 1]], # batch:0\n [[1, 0],\n [2, 2]]] # batch:1\n mvn1 = ds.TransformedDistribution(\n distribution=ds.Normal(mu=0., sigma=1.),\n bijector=bs.Affine(shift=mean, tril=chol_cov),\n batch_shape=[2], # Valid because base_distribution.batch_shape == [].\n event_shape=[2]) # Valid because base_distribution.event_shape == [].\n mvn2 = ds.MultivariateNormalCholesky(mu=mean, chol=chol_cov)\n # mvn1.log_prob(x) == mvn2.log_prob(x)\n ```\n\n \"\"\"\n\n def __init__(self,\n distribution,\n bijector=None,\n batch_shape=None,\n event_shape=None,\n validate_args=False,\n name=None):\n \"\"\"Construct a Transformed Distribution.\n\n Args:\n distribution: The base distribution instance to transform. Typically an\n instance of `Distribution`.\n bijector: The object responsible for calculating the transformation.\n Typically an instance of `Bijector`. `None` means `Identity()`.\n batch_shape: `integer` vector `Tensor` which overrides `distribution`\n `batch_shape`; valid only if `distribution.is_scalar_batch()`.\n event_shape: `integer` vector `Tensor` which overrides `distribution`\n `event_shape`; valid only if `distribution.is_scalar_event()`.\n validate_args: Python Boolean. Whether to validate input with asserts.\n If `validate_args` is `False`, and the inputs are invalid,\n correct behavior is not guaranteed.\n name: The name for the distribution. Default:\n `bijector.name + distribution.name`.\n \"\"\"\n parameters = locals()\n parameters.pop(\"self\")\n name = name or ((\"\" if bijector is None else bijector.name) +\n distribution.name)\n with ops.name_scope(name, values=[event_shape, batch_shape]):\n # For convenience we define some handy constants.\n self._zero = constant_op.constant(0, dtype=dtypes.int32, name=\"zero\")\n self._empty = constant_op.constant([], dtype=dtypes.int32, name=\"empty\")\n\n if bijector is None:\n bijector = bijectors.Identity(validate_args=validate_args)\n\n # We will keep track of a static and dynamic version of\n # self._is_{batch,event}_override. This way we can do more prior to graph\n # execution, including possibly raising Python exceptions.\n\n self._override_batch_shape = self._maybe_validate_shape_override(\n batch_shape, distribution.is_scalar_batch(), validate_args,\n \"batch_shape\")\n self._is_batch_override = _logical_not(_logical_equal(\n _ndims_from_shape(self._override_batch_shape), self._zero))\n self._is_maybe_batch_override = bool(\n tensor_util.constant_value(self._override_batch_shape) is None or\n tensor_util.constant_value(self._override_batch_shape))\n\n self._override_event_shape = self._maybe_validate_shape_override(\n event_shape, distribution.is_scalar_event(), validate_args,\n \"event_shape\")\n self._is_event_override = _logical_not(_logical_equal(\n _ndims_from_shape(self._override_event_shape), self._zero))\n self._is_maybe_event_override = bool(\n tensor_util.constant_value(self._override_event_shape) is None or\n tensor_util.constant_value(self._override_event_shape))\n\n # To convert a scalar distribution into a multivariate distribution we\n # will draw dims from the sample dims, which are otherwise iid. This is\n # easy to do except in the case that the base distribution has batch dims\n # and we're overriding event shape. When that case happens the event dims\n # will incorrectly be to the left of the batch dims. In this case we'll\n # cyclically permute left the new dims.\n self._needs_rotation = _logical_and(\n self._is_event_override,\n _logical_not(self._is_batch_override),\n _logical_not(distribution.is_scalar_batch()))\n override_event_ndims = _ndims_from_shape(self._override_event_shape)\n self._rotate_ndims = _pick_scalar_condition(\n self._needs_rotation, override_event_ndims, 0)\n # We'll be reducing the head dims (if at all), i.e., this will be []\n # if we don't need to reduce.\n self._reduce_event_indices = math_ops.range(\n self._rotate_ndims - override_event_ndims, self._rotate_ndims)\n\n self._distribution = distribution\n self._bijector = bijector\n super(TransformedDistribution, self).__init__(\n dtype=self._distribution.dtype,\n is_continuous=self._distribution.is_continuous,\n is_reparameterized=self._distribution.is_reparameterized,\n validate_args=validate_args,\n allow_nan_stats=self._distribution.allow_nan_stats,\n parameters=parameters,\n # We let TransformedDistribution access _graph_parents since this class\n # is more like a baseclass than derived.\n graph_parents=(distribution._graph_parents + # pylint: disable=protected-access\n bijector.graph_parents),\n name=name)\n\n @property\n def distribution(self):\n \"\"\"Base distribution, p(x).\"\"\"\n return self._distribution\n\n @property\n def bijector(self):\n \"\"\"Function transforming x => y.\"\"\"\n return self._bijector\n\n def _event_shape(self):\n return self.bijector.forward_event_shape(distribution_util.pick_vector(\n self._is_event_override,\n self._override_event_shape,\n self.distribution.event_shape()))\n\n def _get_event_shape(self):\n static_override = tensor_util.constant_value(self._override_event_shape)\n return self.bijector.get_forward_event_shape(\n self.distribution.get_event_shape()\n if static_override is not None and not static_override\n else tensor_shape.TensorShape(static_override))\n\n def _batch_shape(self):\n return distribution_util.pick_vector(\n self._is_batch_override,\n self._override_batch_shape,\n self.distribution.batch_shape())\n\n def _get_batch_shape(self):\n static_override = tensor_util.constant_value(self._override_batch_shape)\n if static_override is not None and not static_override:\n return self.distribution.get_batch_shape()\n return tensor_shape.TensorShape(static_override)\n\n @distribution_util.AppendDocstring(\n \"\"\"Samples from the base distribution and then passes through\n the bijector's forward transform.\"\"\",\n condition_kwargs_dict=_condition_kwargs_dict)\n def _sample_n(self, n, seed=None,\n bijector_kwargs=None, distribution_kwargs=None):\n bijector_kwargs = bijector_kwargs or {}\n distribution_kwargs = distribution_kwargs or {}\n sample_shape = _concat_vectors(\n distribution_util.pick_vector(self._needs_rotation, self._empty, [n]),\n self._override_batch_shape,\n self._override_event_shape,\n distribution_util.pick_vector(self._needs_rotation, [n], self._empty))\n x = self.distribution.sample(sample_shape=sample_shape, seed=seed,\n **distribution_kwargs)\n x = self._maybe_rotate_dims(x)\n return self.bijector.forward(x, **bijector_kwargs)\n\n @distribution_util.AppendDocstring(\n \"\"\"Implements `(log o p o g^{-1})(y) + (log o abs o det o J o g^{-1})(y)`,\n where `g^{-1}` is the inverse of `transform`.\n\n Also raises a `ValueError` if `inverse` was not provided to the\n distribution and `y` was not returned from `sample`.\"\"\",\n condition_kwargs_dict=_condition_kwargs_dict)\n def _log_prob(self, y, bijector_kwargs=None, distribution_kwargs=None):\n bijector_kwargs = bijector_kwargs or {}\n distribution_kwargs = distribution_kwargs or {}\n x, ildj = self.bijector.inverse_and_inverse_log_det_jacobian(\n y, **bijector_kwargs)\n x = self._maybe_rotate_dims(x, rotate_right=True)\n log_prob = self.distribution.log_prob(x, **distribution_kwargs)\n if self._is_maybe_event_override:\n log_prob = math_ops.reduce_sum(log_prob, self._reduce_event_indices)\n return ildj + log_prob\n\n @distribution_util.AppendDocstring(\n \"\"\"Implements `p(g^{-1}(y)) det|J(g^{-1}(y))|`, where `g^{-1}` is the\n inverse of `transform`.\n\n Also raises a `ValueError` if `inverse` was not provided to the\n distribution and `y` was not returned from `sample`.\"\"\",\n condition_kwargs_dict=_condition_kwargs_dict)\n def _prob(self, y, bijector_kwargs=None, distribution_kwargs=None):\n bijector_kwargs = bijector_kwargs or {}\n distribution_kwargs = distribution_kwargs or {}\n x, ildj = self.bijector.inverse_and_inverse_log_det_jacobian(\n y, **bijector_kwargs)\n x = self._maybe_rotate_dims(x, rotate_right=True)\n prob = self.distribution.prob(x, **distribution_kwargs)\n if self._is_maybe_event_override:\n prob = math_ops.reduce_prod(prob, self._reduce_event_indices)\n return math_ops.exp(ildj) * prob\n\n @distribution_util.AppendDocstring(\n condition_kwargs_dict=_condition_kwargs_dict)\n def _log_cdf(self, y, bijector_kwargs=None, distribution_kwargs=None):\n if self._is_maybe_event_override:\n raise NotImplementedError(\"log_cdf is not implemented when overriding \"\n \"event_shape\")\n bijector_kwargs = bijector_kwargs or {}\n distribution_kwargs = distribution_kwargs or {}\n x = self.bijector.inverse(y, **bijector_kwargs)\n return self.distribution.log_cdf(x, **distribution_kwargs)\n\n @distribution_util.AppendDocstring(\n condition_kwargs_dict=_condition_kwargs_dict)\n def _cdf(self, y, bijector_kwargs=None, distribution_kwargs=None):\n if self._is_maybe_event_override:\n raise NotImplementedError(\"cdf is not implemented when overriding \"\n \"event_shape\")\n bijector_kwargs = bijector_kwargs or {}\n distribution_kwargs = distribution_kwargs or {}\n x = self.bijector.inverse(y, **bijector_kwargs)\n return self.distribution.cdf(x, **distribution_kwargs)\n\n @distribution_util.AppendDocstring(\n condition_kwargs_dict=_condition_kwargs_dict)\n def _log_survival_function(self, y,\n bijector_kwargs=None, distribution_kwargs=None):\n if self._is_maybe_event_override:\n raise NotImplementedError(\"log_survival_function is not implemented when \"\n \"overriding event_shape\")\n bijector_kwargs = bijector_kwargs or {}\n distribution_kwargs = distribution_kwargs or {}\n x = self.bijector.inverse(y, **bijector_kwargs)\n return self.distribution.log_survival_function(x, **distribution_kwargs)\n\n @distribution_util.AppendDocstring(\n condition_kwargs_dict=_condition_kwargs_dict)\n def _survival_function(self, y,\n bijector_kwargs=None, distribution_kwargs=None):\n if self._is_maybe_event_override:\n raise NotImplementedError(\"survival_function is not implemented when \"\n \"overriding event_shape\")\n bijector_kwargs = bijector_kwargs or {}\n distribution_kwargs = distribution_kwargs or {}\n x = self.bijector.inverse(y, **bijector_kwargs)\n return self.distribution.survival_function(x, **distribution_kwargs)\n\n def _entropy(self):\n if (not self.distribution.is_continuous or\n not self.bijector.is_constant_jacobian):\n raise NotImplementedError(\"entropy is not implemented\")\n # Suppose Y = g(X) where g is a diffeomorphism and X is a continuous rv. It\n # can be shown that:\n # H[Y] = H[X] + E_X[(log o abs o det o J o g)(X)].\n # If is_constant_jacobian then:\n # E_X[(log o abs o det o J o g)(X)] = (log o abs o det o J o g)(c)\n # where c can by anything.\n entropy = self.distribution.entropy()\n if self._is_maybe_event_override:\n # H[X] = sum_i H[X_i] if X_i are mutually independent.\n # This means that a reduce_sum is a simple rescaling.\n entropy *= math_ops.cast(math_ops.reduce_prod(self._override_event_shape),\n dtype=entropy.dtype.base_dtype)\n if self._is_maybe_batch_override:\n new_shape = array_ops.concat([\n _ones_like(self._override_batch_shape),\n self.distribution.batch_shape()\n ], 0)\n entropy = array_ops.reshape(entropy, new_shape)\n multiples = array_ops.concat([\n self._override_batch_shape,\n _ones_like(self.distribution.batch_shape())\n ], 0)\n entropy = array_ops.tile(entropy, multiples)\n dummy = 0.\n return entropy - self.bijector.inverse_log_det_jacobian(dummy)\n\n def _maybe_validate_shape_override(self, override_shape, base_is_scalar,\n validate_args, name):\n \"\"\"Helper to __init__ which ensures override batch/event_shape are valid.\"\"\"\n if override_shape is None:\n override_shape = []\n\n override_shape = ops.convert_to_tensor(override_shape, dtype=dtypes.int32,\n name=name)\n\n if not override_shape.dtype.is_integer:\n raise TypeError(\"shape override must be an integer\")\n\n override_is_scalar = _is_scalar_from_shape(override_shape)\n if tensor_util.constant_value(override_is_scalar):\n return self._empty\n\n dynamic_assertions = []\n\n if override_shape.get_shape().ndims is not None:\n if override_shape.get_shape().ndims != 1:\n raise ValueError(\"shape override must be a vector\")\n elif validate_args:\n dynamic_assertions += [check_ops.assert_rank(\n override_shape, 1,\n message=\"shape override must be a vector\")]\n\n if tensor_util.constant_value(override_shape) is not None:\n if any(s <= 0 for s in tensor_util.constant_value(override_shape)):\n raise ValueError(\"shape override must have positive elements\")\n elif validate_args:\n dynamic_assertions += [check_ops.assert_positive(\n override_shape,\n message=\"shape override must have positive elements\")]\n\n is_both_nonscalar = _logical_and(_logical_not(base_is_scalar),\n _logical_not(override_is_scalar))\n if tensor_util.constant_value(is_both_nonscalar) is not None:\n if tensor_util.constant_value(is_both_nonscalar):\n raise ValueError(\"base distribution not scalar\")\n elif validate_args:\n dynamic_assertions += [check_ops.assert_equal(\n is_both_nonscalar, False,\n message=\"base distribution not scalar\")]\n\n if not dynamic_assertions:\n return override_shape\n return control_flow_ops.with_dependencies(\n dynamic_assertions, override_shape)\n\n def _maybe_rotate_dims(self, x, rotate_right=False):\n \"\"\"Helper which rolls left event_dims left or right event_dims right.\"\"\"\n if tensor_util.constant_value(self._needs_rotation) is False:\n return x\n ndims = array_ops.rank(x)\n n = (ndims - self._rotate_ndims) if rotate_right else self._rotate_ndims\n return array_ops.transpose(\n x, _concat_vectors(math_ops.range(n, ndims), math_ops.range(0, n)))\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nr\"\"\"Prints a header file to be used with SELECTIVE_REGISTRATION.\n\nExample usage:\n print_selective_registration_header \\\n --graphs=path/to/graph.pb > ops_to_register.h\n\n Then when compiling tensorflow, include ops_to_register.h in the include\n search path and pass -DSELECTIVE_REGISTRATION - see\n core/framework/selective_registration.h for more details.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\n\nfrom google.protobuf import text_format\n\nfrom tensorflow.core.framework import graph_pb2\nfrom tensorflow.python import pywrap_tensorflow\nfrom tensorflow.python.platform import app\nfrom tensorflow.python.platform import flags\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import tf_logging\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('proto_fileformat', 'rawproto',\n 'Format of proto file, either textproto or rawproto')\n\nflags.DEFINE_string(\n 'graphs', '',\n 'Comma-separated list of paths to model files to be analyzed.')\n\nflags.DEFINE_string(\n 'default_ops', 'NoOp:NoOp,_Recv:RecvOp,_Send:SendOp',\n 'Default operator:kernel pairs to always include implementation for. '\n 'Pass \"all\" to have all operators and kernels included; note that this '\n 'should be used only when it is useful compared with simply not using '\n 'selective registration, as it can in some cases limit the effect of '\n 'compilation caches')\n\n\ndef get_ops_and_kernels(proto_fileformat, proto_files, default_ops_str):\n \"\"\"Gets the ops and kernels needed from the model files.\"\"\"\n ops = set()\n\n for proto_file in proto_files:\n tf_logging.info('Loading proto file %s', proto_file)\n # Load GraphDef.\n file_data = gfile.GFile(proto_file).read()\n if proto_fileformat == 'rawproto':\n graph_def = graph_pb2.GraphDef.FromString(file_data)\n else:\n assert proto_fileformat == 'textproto'\n graph_def = text_format.Parse(file_data, graph_pb2.GraphDef())\n\n # Find all ops and kernels used by the graph.\n for node_def in graph_def.node:\n if not node_def.device:\n node_def.device = '/cpu:0'\n kernel_class = pywrap_tensorflow.TryFindKernelClass(\n node_def.SerializeToString())\n if kernel_class:\n op_and_kernel = (str(node_def.op), kernel_class.decode('utf-8'))\n if op_and_kernel not in ops:\n ops.add(op_and_kernel)\n else:\n print(\n 'Warning: no kernel found for op %s' % node_def.op, file=sys.stderr)\n\n # Add default ops.\n if default_ops_str != 'all':\n for s in default_ops_str.split(','):\n op, kernel = s.split(':')\n op_and_kernel = (op, kernel)\n if op_and_kernel not in ops:\n ops.add(op_and_kernel)\n\n return list(sorted(ops))\n\n\ndef get_header(ops_and_kernels, include_all_ops_and_kernels):\n \"\"\"Returns a header for use with tensorflow SELECTIVE_REGISTRATION.\n\n Args:\n ops_and_kernels: a set of (op_name, kernel_class_name) pairs to include.\n include_all_ops_and_kernels: if True, ops_and_kernels is ignored and all op\n kernels are included.\n\n Returns:\n the string of the header that should be written as ops_to_register.h.\n \"\"\"\n ops = set([op for op, _ in ops_and_kernels])\n result_list = []\n\n def append(s):\n result_list.append(s)\n\n append('#ifndef OPS_TO_REGISTER')\n append('#define OPS_TO_REGISTER')\n\n if include_all_ops_and_kernels:\n append('#define SHOULD_REGISTER_OP(op) true')\n append('#define SHOULD_REGISTER_OP_KERNEL(clz) true')\n append('#define SHOULD_REGISTER_OP_GRADIENT true')\n else:\n append('constexpr inline bool ShouldRegisterOp(const char op[]) {')\n append(' return false')\n for op in sorted(ops):\n append(' || (strcmp(op, \"%s\") == 0)' % op)\n append(' ;')\n append('}')\n append('#define SHOULD_REGISTER_OP(op) ShouldRegisterOp(op)')\n append('')\n\n line = 'const char kNecessaryOpKernelClasses[] = \",\"\\n'\n for _, kernel_class in ops_and_kernels:\n line += '\"%s,\"\\n' % kernel_class\n line += ';'\n append(line)\n append('#define SHOULD_REGISTER_OP_KERNEL(clz) '\n '(strstr(kNecessaryOpKernelClasses, \",\" clz \",\") != nullptr)')\n append('')\n\n append('#define SHOULD_REGISTER_OP_GRADIENT ' + (\n 'true' if 'SymbolicGradient' in ops else 'false'))\n\n append('#endif')\n return '\\n'.join(result_list)\n\n\ndef main(unused_argv):\n if not FLAGS.graphs:\n print('--graphs is required')\n return 1\n graphs = FLAGS.graphs.split(',')\n ops_and_kernels = get_ops_and_kernels(FLAGS.proto_fileformat, graphs,\n FLAGS.default_ops)\n if not ops_and_kernels:\n print('Error reading graph!')\n return 1\n\n print(get_header(ops_and_kernels, FLAGS.default_ops == 'all'))\n\n\nif __name__ == '__main__':\n app.run()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Bijector.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport itertools\nimport math\n\nimport numpy as np\nimport six\nfrom tensorflow.contrib import distributions as distributions_lib\nfrom tensorflow.contrib import linalg as linalg_lib\nfrom tensorflow.contrib.distributions.python.ops import bijector as bijector_lib\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import test\n\nbijectors = bijector_lib\ndistributions = distributions_lib\nlinalg = linalg_lib\nrng = np.random.RandomState(42)\n\n\ndef assert_finite(array):\n if not np.isfinite(array).all():\n raise AssertionError(\"array was not all finite. %s\" % array[:15])\n\n\ndef assert_strictly_increasing(array):\n np.testing.assert_array_less(0.0, np.diff(array))\n\n\ndef assert_strictly_decreasing(array):\n np.testing.assert_array_less(np.diff(array), 0.0)\n\n\ndef assert_strictly_monotonic(array):\n if array[0] < array[-1]:\n assert_strictly_increasing(array)\n else:\n assert_strictly_decreasing(array)\n\n\ndef assert_scalar_congruency(bijector,\n lower_x,\n upper_x,\n n=10000,\n rtol=0.01,\n sess=None):\n \"\"\"Assert `bijector`'s forward/inverse/inverse_log_det_jacobian are congruent.\n\n We draw samples `X ~ U(lower_x, upper_x)`, then feed these through the\n `bijector` in order to check that:\n\n 1. the forward is strictly monotonic.\n 2. the forward/inverse methods are inverses of each other.\n 3. the jacobian is the correct change of measure.\n\n This can only be used for a Bijector mapping open subsets of the real line\n to themselves. This is due to the fact that this test compares the pdf\n before/after transformation with the Lebesgue measure on the line.\n\n Args:\n bijector: Instance of Bijector\n lower_x: Python scalar.\n upper_x: Python scalar. Must have `lower_x < upper_x`, and both must be in\n the domain of the `bijector`. The `bijector` should probably not produce\n huge variation in values in the interval `(lower_x, upper_x)`, or else\n the variance based check of the Jacobian will require small `rtol` or\n huge `n`.\n n: Number of samples to draw for the checks.\n rtol: Positive number. Used for the Jacobian check.\n sess: `tf.Session`. Defaults to the default session.\n\n Raises:\n AssertionError: If tests fail.\n \"\"\"\n\n # Checks and defaults.\n assert bijector.shaper is None or bijector.shaper.event_ndims.eval() == 0\n if sess is None:\n sess = ops.get_default_session()\n\n # Should be monotonic over this interval\n ten_x_pts = np.linspace(lower_x, upper_x, num=10).astype(np.float32)\n if bijector.dtype is not None:\n ten_x_pts = ten_x_pts.astype(bijector.dtype.as_numpy_dtype)\n forward_on_10_pts = bijector.forward(ten_x_pts)\n\n # Set the lower/upper limits in the range of the bijector.\n lower_y, upper_y = sess.run(\n [bijector.forward(lower_x), bijector.forward(upper_x)])\n if upper_y < lower_y: # If bijector.forward is a decreasing function.\n lower_y, upper_y = upper_y, lower_y\n\n # Uniform samples from the domain, range.\n uniform_x_samps = distributions.Uniform(a=lower_x, b=upper_x).sample(n)\n uniform_y_samps = distributions.Uniform(a=lower_y, b=upper_y).sample(n)\n\n # These compositions should be the identity.\n inverse_forward_x = bijector.inverse(bijector.forward(uniform_x_samps))\n forward_inverse_y = bijector.forward(bijector.inverse(uniform_y_samps))\n\n # For a < b, and transformation y = y(x),\n # (b - a) = \\int_a^b dx = \\int_{y(a)}^{y(b)} |dx/dy| dy\n # \"change_measure_dy_dx\" below is a Monte Carlo approximation to the right\n # hand side, which should then be close to the left, which is (b - a).\n dy_dx = math_ops.exp(bijector.inverse_log_det_jacobian(uniform_y_samps))\n # E[|dx/dy|] under Uniform[lower_y, upper_y]\n # = \\int_{y(a)}^{y(b)} |dx/dy| dP(u), where dP(u) is the uniform measure\n expectation_of_dy_dx_under_uniform = math_ops.reduce_mean(dy_dx)\n # dy = dP(u) * (upper_y - lower_y)\n change_measure_dy_dx = (\n (upper_y - lower_y) * expectation_of_dy_dx_under_uniform)\n\n # We'll also check that dy_dx = 1 / dx_dy.\n dx_dy = math_ops.exp(\n bijector.forward_log_det_jacobian(bijector.inverse(uniform_y_samps)))\n\n (\n forward_on_10_pts_v,\n dy_dx_v,\n dx_dy_v,\n change_measure_dy_dx_v,\n uniform_x_samps_v,\n uniform_y_samps_v,\n inverse_forward_x_v,\n forward_inverse_y_v,) = sess.run([\n forward_on_10_pts,\n dy_dx,\n dx_dy,\n change_measure_dy_dx,\n uniform_x_samps,\n uniform_y_samps,\n inverse_forward_x,\n forward_inverse_y,\n ])\n\n assert_strictly_monotonic(forward_on_10_pts_v)\n # Composition of forward/inverse should be the identity.\n np.testing.assert_allclose(\n inverse_forward_x_v, uniform_x_samps_v, atol=1e-5, rtol=1e-3)\n np.testing.assert_allclose(\n forward_inverse_y_v, uniform_y_samps_v, atol=1e-5, rtol=1e-3)\n # Change of measure should be correct.\n np.testing.assert_allclose(\n upper_x - lower_x, change_measure_dy_dx_v, atol=0, rtol=rtol)\n # Inverse Jacobian should be equivalent to the reciprocal of the forward\n # Jacobian.\n np.testing.assert_allclose(\n dy_dx_v, np.divide(1., dx_dy_v), atol=1e-5, rtol=1e-3)\n\n\ndef assert_bijective_and_finite(bijector, x, y, atol=0, rtol=1e-5, sess=None):\n \"\"\"Assert that forward/inverse (along with jacobians) are inverses and finite.\n\n It is recommended to use x and y values that are very very close to the edge\n of the Bijector's domain.\n\n Args:\n bijector: A Bijector instance.\n x: np.array of values in the domain of bijector.forward.\n y: np.array of values in the domain of bijector.inverse.\n atol: Absolute tolerance.\n rtol: Relative tolerance.\n sess: TensorFlow session. Defaults to the default session.\n\n Raises:\n AssertionError: If tests fail.\n \"\"\"\n sess = sess or ops.get_default_session()\n\n # These are the incoming points, but people often create a crazy range of\n # values for which these end up being bad, especially in 16bit.\n assert_finite(x)\n assert_finite(y)\n np.testing.assert_array_less(0, y)\n\n f_x = bijector.forward(x)\n g_y = bijector.inverse(y)\n\n (\n x_from_x,\n y_from_y,\n ildj_f_x,\n fldj_x,\n ildj_y,\n fldj_g_y,\n f_x_v,\n g_y_v,) = sess.run([\n bijector.inverse(f_x),\n bijector.forward(g_y),\n bijector.inverse_log_det_jacobian(f_x),\n bijector.forward_log_det_jacobian(x),\n bijector.inverse_log_det_jacobian(y),\n bijector.forward_log_det_jacobian(g_y),\n f_x,\n g_y,\n ])\n\n assert_finite(x_from_x)\n assert_finite(y_from_y)\n assert_finite(ildj_f_x)\n assert_finite(fldj_x)\n assert_finite(ildj_y)\n assert_finite(fldj_g_y)\n assert_finite(f_x_v)\n assert_finite(g_y_v)\n\n np.testing.assert_allclose(x_from_x, x, atol=atol, rtol=rtol)\n np.testing.assert_allclose(y_from_y, y, atol=atol, rtol=rtol)\n np.testing.assert_allclose(-ildj_f_x, fldj_x, atol=atol, rtol=rtol)\n np.testing.assert_allclose(-ildj_y, fldj_g_y, atol=atol, rtol=rtol)\n\n\nclass BaseBijectorTest(test.TestCase):\n \"\"\"Tests properties of the Bijector base-class.\"\"\"\n\n def testBijector(self):\n with self.test_session():\n with self.assertRaisesRegexp(TypeError,\n (\"Can't instantiate abstract class Bijector \"\n \"with abstract methods __init__\")):\n bijectors.Bijector()\n\n\nclass IntentionallyMissingError(Exception):\n pass\n\n\nclass BrokenBijectorWithInverseAndInverseLogDetJacobian(bijectors.Bijector):\n \"\"\"Bijector with broken directions.\n\n This BrokenBijector implements _inverse_and_inverse_log_det_jacobian.\n \"\"\"\n\n def __init__(self, forward_missing=False, inverse_missing=False):\n super(BrokenBijectorWithInverseAndInverseLogDetJacobian, self).__init__(\n batch_ndims=0,\n event_ndims=0,\n validate_args=False,\n name=\"BrokenBijectorDual\")\n self._forward_missing = forward_missing\n self._inverse_missing = inverse_missing\n\n def _forward(self, x):\n if self._forward_missing:\n raise IntentionallyMissingError\n return 2. * x\n\n def _inverse_and_inverse_log_det_jacobian(self, y):\n if self._inverse_missing:\n raise IntentionallyMissingError\n return y / 2., -math_ops.log(2.)\n\n def _forward_log_det_jacobian(self, x): # pylint:disable=unused-argument\n if self._forward_missing:\n raise IntentionallyMissingError\n return math_ops.log(2.)\n\n\nclass BrokenBijectorSeparateInverseAndInverseLogDetJacobian(bijectors.Bijector):\n \"\"\"Forward and inverse are not inverses of each other.\n\n This BrokenBijector implements _inverse and _inverse_log_det_jacobian as\n separate functions.\n \"\"\"\n\n def __init__(self, forward_missing=False, inverse_missing=False):\n super(BrokenBijectorSeparateInverseAndInverseLogDetJacobian, self).__init__(\n batch_ndims=0, event_ndims=0, validate_args=False, name=\"broken\")\n self._forward_missing = forward_missing\n self._inverse_missing = inverse_missing\n\n def _forward(self, x):\n if self._forward_missing:\n raise IntentionallyMissingError\n return 2 * x\n\n def _inverse(self, y):\n if self._inverse_missing:\n raise IntentionallyMissingError\n return y / 2.\n\n def _inverse_log_det_jacobian(self, y): # pylint:disable=unused-argument\n if self._inverse_missing:\n raise IntentionallyMissingError\n return -math_ops.log(2.)\n\n def _forward_log_det_jacobian(self, x): # pylint:disable=unused-argument\n if self._forward_missing:\n raise IntentionallyMissingError\n return math_ops.log(2.)\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass BijectorCachingTest(object):\n\n @abc.abstractproperty\n def broken_bijector_cls(self):\n # return a BrokenBijector type Bijector, since this will test the caching.\n raise IntentionallyMissingError(\"Not implemented\")\n\n def testCachingOfForwardResultsWhenCalledOneByOne(self):\n broken_bijector = self.broken_bijector_cls(inverse_missing=True)\n with self.test_session():\n x = constant_op.constant(1.1)\n\n # Call forward and forward_log_det_jacobian one-by-one (not together).\n y = broken_bijector.forward(x)\n _ = broken_bijector.forward_log_det_jacobian(x)\n\n # Now, everything should be cached if the argument is y.\n try:\n broken_bijector.inverse(y)\n broken_bijector.inverse_log_det_jacobian(y)\n broken_bijector.inverse_and_inverse_log_det_jacobian(y)\n except IntentionallyMissingError:\n raise AssertionError(\"Tests failed! Cached values not used.\")\n\n def testCachingOfInverseResultsWhenCalledOneByOne(self):\n broken_bijector = self.broken_bijector_cls(forward_missing=True)\n with self.test_session():\n y = constant_op.constant(1.1)\n\n # Call inverse and inverse_log_det_jacobian one-by-one (not together).\n x = broken_bijector.inverse(y)\n _ = broken_bijector.inverse_log_det_jacobian(y)\n\n # Now, everything should be cached if the argument is x.\n try:\n broken_bijector.forward(x)\n broken_bijector.forward_log_det_jacobian(x)\n except IntentionallyMissingError:\n raise AssertionError(\"Tests failed! Cached values not used.\")\n\n def testCachingOfInverseResultsWhenCalledTogether(self):\n broken_bijector = self.broken_bijector_cls(forward_missing=True)\n with self.test_session():\n y = constant_op.constant(1.1)\n\n # Call inverse and inverse_log_det_jacobian one-by-one (not together).\n x, _ = broken_bijector.inverse_and_inverse_log_det_jacobian(y)\n\n # Now, everything should be cached if the argument is x.\n try:\n broken_bijector.forward(x)\n broken_bijector.forward_log_det_jacobian(x)\n except IntentionallyMissingError:\n raise AssertionError(\"Tests failed! Cached values not used.\")\n\n\nclass SeparateCallsBijectorCachingTest(BijectorCachingTest, test.TestCase):\n \"\"\"Test caching with BrokenBijectorSeparateInverseAndInverseLogDetJacobian.\n\n These bijectors implement forward, inverse,... all as separate functions.\n \"\"\"\n\n @property\n def broken_bijector_cls(self):\n return BrokenBijectorSeparateInverseAndInverseLogDetJacobian\n\n\nclass JointCallsBijectorCachingTest(BijectorCachingTest, test.TestCase):\n \"\"\"Test caching with BrokenBijectorWithInverseAndInverseLogDetJacobian.\n\n These bijectors implement _inverse_and_inverse_log_det_jacobian, which is two\n functionalities together.\n \"\"\"\n\n @property\n def broken_bijector_cls(self):\n return BrokenBijectorWithInverseAndInverseLogDetJacobian\n\n\nclass IdentityBijectorTest(test.TestCase):\n \"\"\"Tests correctness of the Y = g(X) = X transformation.\"\"\"\n\n def testBijector(self):\n with self.test_session():\n bijector = bijectors.Identity()\n self.assertEqual(\"identity\", bijector.name)\n x = [[[0.], [1.]]]\n self.assertAllEqual(x, bijector.forward(x).eval())\n self.assertAllEqual(x, bijector.inverse(x).eval())\n self.assertAllEqual(0., bijector.inverse_log_det_jacobian(x).eval())\n self.assertAllEqual(0., bijector.forward_log_det_jacobian(x).eval())\n rev, jac = bijector.inverse_and_inverse_log_det_jacobian(x)\n self.assertAllEqual(x, rev.eval())\n self.assertAllEqual(0., jac.eval())\n\n def testScalarCongruency(self):\n with self.test_session():\n bijector = bijectors.Identity()\n assert_scalar_congruency(bijector, lower_x=-2., upper_x=2.)\n\n\nclass ExpBijectorTest(test.TestCase):\n \"\"\"Tests correctness of the Y = g(X) = exp(X) transformation.\"\"\"\n\n def testBijector(self):\n with self.test_session():\n bijector = bijectors.Exp(event_ndims=1)\n self.assertEqual(\"exp\", bijector.name)\n x = [[[1.], [2.]]]\n y = np.exp(x)\n self.assertAllClose(y, bijector.forward(x).eval())\n self.assertAllClose(x, bijector.inverse(y).eval())\n self.assertAllClose(\n -np.sum(np.log(y), axis=-1),\n bijector.inverse_log_det_jacobian(y).eval())\n self.assertAllClose(-bijector.inverse_log_det_jacobian(np.exp(x)).eval(),\n bijector.forward_log_det_jacobian(x).eval())\n rev, jac = bijector.inverse_and_inverse_log_det_jacobian(y)\n self.assertAllClose(x, rev.eval())\n self.assertAllClose(-np.sum(np.log(y), axis=-1), jac.eval())\n\n def testScalarCongruency(self):\n with self.test_session():\n bijector = bijectors.Exp()\n assert_scalar_congruency(bijector, lower_x=-2., upper_x=1.5, rtol=0.05)\n\n def testBijectiveAndFinite(self):\n with self.test_session():\n bijector = bijectors.Exp(event_ndims=0)\n x = np.linspace(-10, 10, num=10).astype(np.float32)\n y = np.logspace(-10, 10, num=10).astype(np.float32)\n assert_bijective_and_finite(bijector, x, y)\n\n\nclass PowerTransformBijectorTest(test.TestCase):\n \"\"\"Tests correctness of the power transformation.\"\"\"\n\n def testBijector(self):\n with self.test_session():\n c = 0.2\n bijector = bijectors.PowerTransform(\n power=c, event_ndims=1, validate_args=True)\n self.assertEqual(\"power_transform\", bijector.name)\n x = np.array([[[-1.], [2.], [-5. + 1e-4]]])\n y = (1. + x * c)**(1. / c)\n self.assertAllClose(y, bijector.forward(x).eval())\n self.assertAllClose(x, bijector.inverse(y).eval())\n self.assertAllClose(\n (c - 1.) * np.sum(np.log(y), axis=-1),\n bijector.inverse_log_det_jacobian(y).eval())\n self.assertAllClose(\n -bijector.inverse_log_det_jacobian(y).eval(),\n bijector.forward_log_det_jacobian(x).eval(),\n rtol=1e-4,\n atol=0.)\n rev, jac = bijector.inverse_and_inverse_log_det_jacobian(y)\n self.assertAllClose(x, rev.eval())\n self.assertAllClose((c - 1.) * np.sum(np.log(y), axis=-1), jac.eval())\n\n def testScalarCongruency(self):\n with self.test_session():\n bijector = bijectors.PowerTransform(power=0.2, validate_args=True)\n assert_scalar_congruency(bijector, lower_x=-2., upper_x=1.5, rtol=0.05)\n\n def testBijectiveAndFinite(self):\n with self.test_session():\n bijector = bijectors.PowerTransform(\n power=0.2, event_ndims=0, validate_args=True)\n x = np.linspace(-4.999, 10, num=10).astype(np.float32)\n y = np.logspace(0.001, 10, num=10).astype(np.float32)\n assert_bijective_and_finite(bijector, x, y, rtol=1e-3)\n\n\nclass InlineBijectorTest(test.TestCase):\n \"\"\"Tests correctness of the inline constructed bijector.\"\"\"\n\n def testBijector(self):\n with self.test_session():\n exp = bijectors.Exp(event_ndims=1)\n inline = bijectors.Inline(\n forward_fn=math_ops.exp,\n inverse_fn=math_ops.log,\n inverse_log_det_jacobian_fn=(\n lambda y: -math_ops.reduce_sum(math_ops.log(y), reduction_indices=-1)),\n forward_log_det_jacobian_fn=(\n lambda x: math_ops.reduce_sum(x, reduction_indices=-1)),\n name=\"exp\")\n\n self.assertEqual(exp.name, inline.name)\n x = [[[1., 2.], [3., 4.], [5., 6.]]]\n y = np.exp(x)\n self.assertAllClose(y, inline.forward(x).eval())\n self.assertAllClose(x, inline.inverse(y).eval())\n self.assertAllClose(\n -np.sum(np.log(y), axis=-1),\n inline.inverse_log_det_jacobian(y).eval())\n self.assertAllClose(-inline.inverse_log_det_jacobian(y).eval(),\n inline.forward_log_det_jacobian(x).eval())\n rev, jac = inline.inverse_and_inverse_log_det_jacobian(y)\n self.assertAllClose(x, rev.eval())\n self.assertAllClose(-np.sum(np.log(y), axis=-1), jac.eval())\n\n def testShapeGetters(self):\n with self.test_session():\n bijector = bijectors.Inline(\n forward_event_shape_fn=lambda x: array_ops.concat((x, [1]), 0),\n get_forward_event_shape_fn=lambda x: x.as_list() + [1],\n inverse_event_shape_fn=lambda x: x[:-1],\n get_inverse_event_shape_fn=lambda x: x[:-1],\n name=\"shape_only\")\n x = tensor_shape.TensorShape([1, 2, 3])\n y = tensor_shape.TensorShape([1, 2, 3, 1])\n self.assertAllEqual(y, bijector.get_forward_event_shape(x))\n self.assertAllEqual(y.as_list(),\n bijector.forward_event_shape(x.as_list()).eval())\n self.assertAllEqual(x, bijector.get_inverse_event_shape(y))\n self.assertAllEqual(x.as_list(),\n bijector.inverse_event_shape(y.as_list()).eval())\n\n\nclass AffineLinearOperatorTest(test.TestCase):\n\n def testIdentity(self):\n with self.test_session():\n affine = bijectors.AffineLinearOperator(validate_args=True)\n x = np.array([[1, 0, -1], [2, 3, 4]], dtype=np.float32)\n y = x\n ildj = 0.\n\n self.assertEqual(affine.name, \"affine_linear_operator\")\n self.assertAllClose(y, affine.forward(x).eval())\n self.assertAllClose(x, affine.inverse(y).eval())\n self.assertAllClose(ildj, affine.inverse_log_det_jacobian(y).eval())\n self.assertAllClose(-affine.inverse_log_det_jacobian(y).eval(),\n affine.forward_log_det_jacobian(x).eval())\n rev, actual_ildj = affine.inverse_and_inverse_log_det_jacobian(y)\n self.assertAllClose(x, rev.eval())\n self.assertAllClose(ildj, actual_ildj.eval())\n\n def testDiag(self):\n with self.test_session():\n shift = np.array([-1, 0, 1], dtype=np.float32)\n diag = np.array([[1, 2, 3],\n [2, 5, 6]], dtype=np.float32)\n scale = linalg.LinearOperatorDiag(diag, is_non_singular=True)\n affine = bijectors.AffineLinearOperator(\n shift=shift, scale=scale, validate_args=True)\n\n x = np.array([[1, 0, -1], [2, 3, 4]], dtype=np.float32)\n y = diag * x + shift\n ildj = -np.sum(np.log(np.abs(diag)), axis=-1)\n\n self.assertEqual(affine.name, \"affine_linear_operator\")\n self.assertAllClose(y, affine.forward(x).eval())\n self.assertAllClose(x, affine.inverse(y).eval())\n self.assertAllClose(ildj, affine.inverse_log_det_jacobian(y).eval())\n self.assertAllClose(-affine.inverse_log_det_jacobian(y).eval(),\n affine.forward_log_det_jacobian(x).eval())\n rev, actual_ildj = affine.inverse_and_inverse_log_det_jacobian(y)\n self.assertAllClose(x, rev.eval())\n self.assertAllClose(ildj, actual_ildj.eval())\n\n def testTriL(self):\n with self.test_session():\n shift = np.array([-1, 0, 1], dtype=np.float32)\n tril = np.array([[[1, 0, 0],\n [2, -1, 0],\n [3, 2, 1]],\n [[2, 0, 0],\n [3, -2, 0],\n [4, 3, 2]]],\n dtype=np.float32)\n scale = linalg.LinearOperatorTriL(tril, is_non_singular=True)\n affine = bijectors.AffineLinearOperator(\n shift=shift, scale=scale, validate_args=True)\n\n x = np.array([[[1, 0, -1],\n [2, 3, 4]],\n [[4, 1, -7],\n [6, 9, 8]]],\n dtype=np.float32)\n # If we made the bijector do x*A+b then this would be simplified to:\n # y = np.matmul(x, tril) + shift.\n y = np.squeeze(np.matmul(tril, np.expand_dims(x, -1)), -1) + shift\n ildj = -np.sum(np.log(np.abs(np.diagonal(\n tril, axis1=-2, axis2=-1))),\n axis=-1)\n\n self.assertEqual(affine.name, \"affine_linear_operator\")\n self.assertAllClose(y, affine.forward(x).eval())\n self.assertAllClose(x, affine.inverse(y).eval())\n self.assertAllClose(ildj, affine.inverse_log_det_jacobian(y).eval())\n self.assertAllClose(-affine.inverse_log_det_jacobian(y).eval(),\n affine.forward_log_det_jacobian(x).eval())\n rev, actual_ildj = affine.inverse_and_inverse_log_det_jacobian(y)\n self.assertAllClose(x, rev.eval())\n self.assertAllClose(ildj, actual_ildj.eval())\n\n\nclass AffineBijectorTest(test.TestCase):\n \"\"\"Tests correctness of the Y = scale @ x + shift transformation.\"\"\"\n\n def testProperties(self):\n with self.test_session():\n mu = -1.\n # scale corresponds to 1.\n bijector = bijectors.Affine(shift=mu, event_ndims=0)\n self.assertEqual(\"affine\", bijector.name)\n\n def testNoBatchScalarViaIdentity(self):\n with self.test_session() as sess:\n\n def static_run(fun, x):\n return fun(x).eval()\n\n def dynamic_run(fun, x_value):\n x_value = np.array(x_value)\n x = array_ops.placeholder(dtypes.float32, name=\"x\")\n return sess.run(fun(x), feed_dict={x: x_value})\n\n for run in (static_run, dynamic_run):\n mu = -1.\n # Corresponds to scale = 2\n bijector = bijectors.Affine(\n shift=mu, scale_identity_multiplier=2., event_ndims=0)\n self.assertEqual(0, bijector.shaper.event_ndims.eval()) # \"is scalar\"\n x = [1., 2, 3] # Three scalar samples (no batches).\n self.assertAllClose([1., 3, 5], run(bijector.forward, x))\n self.assertAllClose([1., 1.5, 2.], run(bijector.inverse, x))\n self.assertAllClose(-math.log(2.),\n run(bijector.inverse_log_det_jacobian, x))\n\n def testNoBatchScalarViaDiag(self):\n with self.test_session() as sess:\n\n def static_run(fun, x):\n return fun(x).eval()\n\n def dynamic_run(fun, x_value):\n x_value = np.array(x_value)\n x = array_ops.placeholder(dtypes.float32, name=\"x\")\n return sess.run(fun(x), feed_dict={x: x_value})\n\n for run in (static_run, dynamic_run):\n mu = -1.\n # Corresponds to scale = 2\n bijector = bijectors.Affine(shift=mu, scale_diag=[2.], event_ndims=0)\n self.assertEqual(0, bijector.shaper.event_ndims.eval()) # \"is scalar\"\n x = [1., 2, 3] # Three scalar samples (no batches).\n self.assertAllClose([1., 3, 5], run(bijector.forward, x))\n self.assertAllClose([1., 1.5, 2.], run(bijector.inverse, x))\n self.assertAllClose(-math.log(2.),\n run(bijector.inverse_log_det_jacobian, x))\n\n def testWeirdSampleNoBatchScalarViaIdentity(self):\n with self.test_session() as sess:\n\n def static_run(fun, x):\n return fun(x).eval()\n\n def dynamic_run(fun, x_value):\n x_value = np.array(x_value)\n x = array_ops.placeholder(dtypes.float32, name=\"x\")\n return sess.run(fun(x), feed_dict={x: x_value})\n\n for run in (static_run, dynamic_run):\n mu = -1.\n # Corresponds to scale = 2.\n bijector = bijectors.Affine(\n shift=mu, scale_identity_multiplier=2., event_ndims=0)\n self.assertEqual(0, bijector.shaper.event_ndims.eval()) # \"is scalar\"\n x = [[1., 2, 3], [4, 5, 6]] # Weird sample shape.\n self.assertAllClose([[1., 3, 5],\n [7, 9, 11]],\n run(bijector.forward, x))\n self.assertAllClose([[1., 1.5, 2.],\n [2.5, 3, 3.5]],\n run(bijector.inverse, x))\n self.assertAllClose(-math.log(2.),\n run(bijector.inverse_log_det_jacobian, x))\n\n def testOneBatchScalarViaIdentity(self):\n with self.test_session() as sess:\n\n def static_run(fun, x):\n return fun(x).eval()\n\n def dynamic_run(fun, x_value):\n x_value = np.array(x_value)\n x = array_ops.placeholder(dtypes.float32, name=\"x\")\n return sess.run(fun(x), feed_dict={x: x_value})\n\n for run in (static_run, dynamic_run):\n mu = [1.]\n # One batch, scalar.\n # Corresponds to scale = 1.\n bijector = bijectors.Affine(shift=mu, event_ndims=0)\n self.assertEqual(0, bijector.shaper.event_ndims.eval()) # \"is scalar\"\n x = [1.] # One sample from one batches.\n self.assertAllClose([2.], run(bijector.forward, x))\n self.assertAllClose([0.], run(bijector.inverse, x))\n self.assertAllClose(0., run(bijector.inverse_log_det_jacobian, x))\n\n def testOneBatchScalarViaDiag(self):\n with self.test_session() as sess:\n\n def static_run(fun, x):\n return fun(x).eval()\n\n def dynamic_run(fun, x_value):\n x_value = np.array(x_value)\n x = array_ops.placeholder(dtypes.float32, name=\"x\")\n return sess.run(fun(x), feed_dict={x: x_value})\n\n for run in (static_run, dynamic_run):\n mu = [1.]\n # One batch, scalar.\n # Corresponds to scale = 1.\n bijector = bijectors.Affine(shift=mu, scale_diag=[1.], event_ndims=0)\n self.assertEqual(0, bijector.shaper.event_ndims.eval()) # \"is scalar\"\n x = [1.] # One sample from one batches.\n self.assertAllClose([2.], run(bijector.forward, x))\n self.assertAllClose([0.], run(bijector.inverse, x))\n self.assertAllClose(0., run(bijector.inverse_log_det_jacobian, x))\n\n def testTwoBatchScalarIdentityViaIdentity(self):\n with self.test_session() as sess:\n\n def static_run(fun, x):\n return fun(x).eval()\n\n def dynamic_run(fun, x_value):\n x_value = np.array(x_value)\n x = array_ops.placeholder(dtypes.float32, name=\"x\")\n return sess.run(fun(x), feed_dict={x: x_value})\n\n for run in (static_run, dynamic_run):\n mu = [1., -1]\n # Univariate, two batches.\n # Corresponds to scale = 1.\n bijector = bijectors.Affine(shift=mu, event_ndims=0)\n self.assertEqual(0, bijector.shaper.event_ndims.eval()) # \"is scalar\"\n x = [1., 1] # One sample from each of two batches.\n self.assertAllClose([2., 0], run(bijector.forward, x))\n self.assertAllClose([0., 2], run(bijector.inverse, x))\n self.assertAllClose(0., run(bijector.inverse_log_det_jacobian, x))\n\n def testTwoBatchScalarIdentityViaDiag(self):\n with self.test_session() as sess:\n\n def static_run(fun, x):\n return fun(x).eval()\n\n def dynamic_run(fun, x_value):\n x_value = np.array(x_value)\n x = array_ops.placeholder(dtypes.float32, name=\"x\")\n return sess.run(fun(x), feed_dict={x: x_value})\n\n for run in (static_run, dynamic_run):\n mu = [1., -1]\n # Univariate, two batches.\n # Corresponds to scale = 1.\n bijector = bijectors.Affine(shift=mu, scale_diag=[1.], event_ndims=0)\n self.assertEqual(0, bijector.shaper.event_ndims.eval()) # \"is scalar\"\n x = [1., 1] # One sample from each of two batches.\n self.assertAllClose([2., 0], run(bijector.forward, x))\n self.assertAllClose([0., 2], run(bijector.inverse, x))\n self.assertAllClose(0., run(bijector.inverse_log_det_jacobian, x))\n\n def testNoBatchMultivariateIdentity(self):\n with self.test_session() as sess:\n\n def static_run(fun, x):\n return fun(x).eval()\n\n def dynamic_run(fun, x_value):\n x_value = np.array(x_value)\n x = array_ops.placeholder(dtypes.float32, name=\"x\")\n return sess.run(fun(x), feed_dict={x: x_value})\n\n for run in (static_run, dynamic_run):\n mu = [1., -1]\n # Multivariate\n # Corresponds to scale = [[1., 0], [0, 1.]]\n bijector = bijectors.Affine(shift=mu)\n self.assertEqual(1, bijector.shaper.event_ndims.eval()) # \"is vector\"\n x = [1., 1]\n # matmul(sigma, x) + shift\n # = [-1, -1] + [1, -1]\n self.assertAllClose([2., 0], run(bijector.forward, x))\n self.assertAllClose([0., 2], run(bijector.inverse, x))\n\n # x is a 2-batch of 2-vectors.\n # The first vector is [1, 1], the second is [-1, -1].\n # Each undergoes matmul(sigma, x) + shift.\n x = [[1., 1], [-1., -1]]\n self.assertAllClose([[2., 0], [0., -2]], run(bijector.forward, x))\n self.assertAllClose([[0., 2], [-2., 0]], run(bijector.inverse, x))\n self.assertAllClose(0., run(bijector.inverse_log_det_jacobian, x))\n\n def testNoBatchMultivariateDiag(self):\n with self.test_session() as sess:\n\n def static_run(fun, x):\n return fun(x).eval()\n\n def dynamic_run(fun, x_value):\n x_value = np.array(x_value)\n x = array_ops.placeholder(dtypes.float32, name=\"x\")\n return sess.run(fun(x), feed_dict={x: x_value})\n\n for run in (static_run, dynamic_run):\n mu = [1., -1]\n # Multivariate\n # Corresponds to scale = [[2., 0], [0, 1.]]\n bijector = bijectors.Affine(shift=mu, scale_diag=[2., 1])\n self.assertEqual(1, bijector.shaper.event_ndims.eval()) # \"is vector\"\n x = [1., 1]\n # matmul(sigma, x) + shift\n # = [-1, -1] + [1, -1]\n self.assertAllClose([3., 0], run(bijector.forward, x))\n self.assertAllClose([0., 2], run(bijector.inverse, x))\n self.assertAllClose(-math.log(2.),\n run(bijector.inverse_log_det_jacobian, x))\n\n # x is a 2-batch of 2-vectors.\n # The first vector is [1, 1], the second is [-1, -1].\n # Each undergoes matmul(sigma, x) + shift.\n x = [[1., 1],\n [-1., -1]]\n self.assertAllClose([[3., 0],\n [-1., -2]],\n run(bijector.forward, x))\n self.assertAllClose([[0., 2],\n [-1., 0]],\n run(bijector.inverse, x))\n self.assertAllClose(-math.log(2.),\n run(bijector.inverse_log_det_jacobian, x))\n\n def testNoBatchMultivariateFullDynamic(self):\n with self.test_session() as sess:\n x = array_ops.placeholder(dtypes.float32, name=\"x\")\n mu = array_ops.placeholder(dtypes.float32, name=\"mu\")\n scale_diag = array_ops.placeholder(dtypes.float32, name=\"scale_diag\")\n event_ndims = array_ops.placeholder(dtypes.int32, name=\"event_ndims\")\n\n x_value = np.array([[1., 1]], dtype=np.float32)\n mu_value = np.array([1., -1], dtype=np.float32)\n scale_diag_value = np.array([2., 2], dtype=np.float32)\n event_ndims_value = np.array(1, dtype=np.int32)\n feed_dict = {\n x: x_value,\n mu: mu_value,\n scale_diag: scale_diag_value,\n event_ndims: event_ndims_value\n }\n\n bijector = bijectors.Affine(\n shift=mu, scale_diag=scale_diag, event_ndims=event_ndims)\n self.assertEqual(1, sess.run(bijector.shaper.event_ndims, feed_dict))\n self.assertAllClose([[3., 1]], sess.run(bijector.forward(x), feed_dict))\n self.assertAllClose([[0., 1]], sess.run(bijector.inverse(x), feed_dict))\n self.assertAllClose(\n -math.log(4),\n sess.run(bijector.inverse_log_det_jacobian(x), feed_dict))\n\n def testBatchMultivariateIdentity(self):\n with self.test_session() as sess:\n\n def static_run(fun, x):\n return fun(x).eval()\n\n def dynamic_run(fun, x_value):\n x_value = np.array(x_value, dtype=np.float32)\n x = array_ops.placeholder(dtypes.float32, name=\"x\")\n return sess.run(fun(x), feed_dict={x: x_value})\n\n for run in (static_run, dynamic_run):\n mu = [[1., -1]]\n # Corresponds to 1 2x2 matrix, with twos on the diagonal.\n scale = 2.\n bijector = bijectors.Affine(shift=mu, scale_identity_multiplier=scale)\n self.assertEqual(1, bijector.shaper.event_ndims.eval()) # \"is vector\"\n x = [[[1., 1]]]\n self.assertAllClose([[[3., 1]]], run(bijector.forward, x))\n self.assertAllClose([[[0., 1]]], run(bijector.inverse, x))\n self.assertAllClose(-math.log(4),\n run(bijector.inverse_log_det_jacobian, x))\n\n def testBatchMultivariateDiag(self):\n with self.test_session() as sess:\n\n def static_run(fun, x):\n return fun(x).eval()\n\n def dynamic_run(fun, x_value):\n x_value = np.array(x_value, dtype=np.float32)\n x = array_ops.placeholder(dtypes.float32, name=\"x\")\n return sess.run(fun(x), feed_dict={x: x_value})\n\n for run in (static_run, dynamic_run):\n mu = [[1., -1]]\n # Corresponds to 1 2x2 matrix, with twos on the diagonal.\n scale_diag = [[2., 2]]\n bijector = bijectors.Affine(shift=mu, scale_diag=scale_diag)\n self.assertEqual(1, bijector.shaper.event_ndims.eval()) # \"is vector\"\n x = [[[1., 1]]]\n self.assertAllClose([[[3., 1]]], run(bijector.forward, x))\n self.assertAllClose([[[0., 1]]], run(bijector.inverse, x))\n self.assertAllClose([-math.log(4)],\n run(bijector.inverse_log_det_jacobian, x))\n\n def testBatchMultivariateFullDynamic(self):\n with self.test_session() as sess:\n x = array_ops.placeholder(dtypes.float32, name=\"x\")\n mu = array_ops.placeholder(dtypes.float32, name=\"mu\")\n scale_diag = array_ops.placeholder(dtypes.float32, name=\"scale_diag\")\n event_ndims = array_ops.placeholder(dtypes.int32, name=\"event_ndims\")\n\n x_value = np.array([[[1., 1]]], dtype=np.float32)\n mu_value = np.array([[1., -1]], dtype=np.float32)\n scale_diag_value = np.array([[2., 2]], dtype=np.float32)\n event_ndims_value = 1\n\n feed_dict = {\n x: x_value,\n mu: mu_value,\n scale_diag: scale_diag_value,\n event_ndims: event_ndims_value\n }\n\n bijector = bijectors.Affine(\n shift=mu, scale_diag=scale_diag, event_ndims=event_ndims)\n self.assertEqual(1, sess.run(bijector.shaper.event_ndims, feed_dict))\n self.assertAllClose([[[3., 1]]], sess.run(bijector.forward(x), feed_dict))\n self.assertAllClose([[[0., 1]]], sess.run(bijector.inverse(x), feed_dict))\n self.assertAllClose([-math.log(4)],\n sess.run(\n bijector.inverse_log_det_jacobian(x), feed_dict))\n\n def testIdentityWithDiagUpdate(self):\n with self.test_session() as sess:\n\n def static_run(fun, x):\n return fun(x).eval()\n\n def dynamic_run(fun, x_value):\n x_value = np.array(x_value)\n x = array_ops.placeholder(dtypes.float32, name=\"x\")\n return sess.run(fun(x), feed_dict={x: x_value})\n\n for run in (static_run, dynamic_run):\n mu = -1.\n # Corresponds to scale = 2\n bijector = bijectors.Affine(\n shift=mu,\n scale_identity_multiplier=1.,\n scale_diag=[1.],\n event_ndims=0)\n self.assertEqual(0, bijector.shaper.event_ndims.eval()) # \"is vector\"\n x = [1., 2, 3] # Three scalar samples (no batches).\n self.assertAllClose([1., 3, 5], run(bijector.forward, x))\n self.assertAllClose([1., 1.5, 2.], run(bijector.inverse, x))\n self.assertAllClose(-math.log(2.),\n run(bijector.inverse_log_det_jacobian, x))\n\n def testIdentityWithTriL(self):\n with self.test_session() as sess:\n\n def static_run(fun, x):\n return fun(x).eval()\n\n def dynamic_run(fun, x_value):\n x_value = np.array(x_value)\n x = array_ops.placeholder(dtypes.float32, name=\"x\")\n return sess.run(fun(x), feed_dict={x: x_value})\n\n for run in (static_run, dynamic_run):\n mu = -1.\n # scale = [[2., 0], [2, 2]]\n bijector = bijectors.Affine(\n shift=mu,\n scale_identity_multiplier=1.,\n scale_tril=[[1., 0], [2., 1]])\n self.assertEqual(1, bijector.shaper.event_ndims.eval()) # \"is vector\"\n x = [[1., 2]] # One multivariate sample.\n self.assertAllClose([[1., 5]], run(bijector.forward, x))\n self.assertAllClose([[1., 0.5]], run(bijector.inverse, x))\n self.assertAllClose(-math.log(4.),\n run(bijector.inverse_log_det_jacobian, x))\n\n def testDiagWithTriL(self):\n with self.test_session() as sess:\n\n def static_run(fun, x):\n return fun(x).eval()\n\n def dynamic_run(fun, x_value):\n x_value = np.array(x_value)\n x = array_ops.placeholder(dtypes.float32, name=\"x\")\n return sess.run(fun(x), feed_dict={x: x_value})\n\n for run in (static_run, dynamic_run):\n mu = -1.\n # scale = [[2., 0], [2, 3]]\n bijector = bijectors.Affine(\n shift=mu, scale_diag=[1., 2.], scale_tril=[[1., 0], [2., 1]])\n self.assertEqual(1, bijector.shaper.event_ndims.eval()) # \"is vector\"\n x = [[1., 2]] # One multivariate sample.\n self.assertAllClose([[1., 7]], run(bijector.forward, x))\n self.assertAllClose([[1., 1 / 3.]], run(bijector.inverse, x))\n self.assertAllClose(-math.log(6.),\n run(bijector.inverse_log_det_jacobian, x))\n\n def testIdentityAndDiagWithTriL(self):\n with self.test_session() as sess:\n\n def static_run(fun, x):\n return fun(x).eval()\n\n def dynamic_run(fun, x_value):\n x_value = np.array(x_value)\n x = array_ops.placeholder(dtypes.float32, name=\"x\")\n return sess.run(fun(x), feed_dict={x: x_value})\n\n for run in (static_run, dynamic_run):\n mu = -1.\n # scale = [[3., 0], [2, 4]]\n bijector = bijectors.Affine(\n shift=mu,\n scale_identity_multiplier=1.0,\n scale_diag=[1., 2.],\n scale_tril=[[1., 0], [2., 1]])\n self.assertEqual(1, bijector.shaper.event_ndims.eval()) # \"is vector\"\n x = [[1., 2]] # One multivariate sample.\n self.assertAllClose([[2., 9]], run(bijector.forward, x))\n self.assertAllClose([[2 / 3., 5 / 12.]], run(bijector.inverse, x))\n self.assertAllClose(-math.log(12.),\n run(bijector.inverse_log_det_jacobian, x))\n\n def testIdentityWithVDVTUpdate(self):\n with self.test_session() as sess:\n\n def static_run(fun, x):\n return fun(x).eval()\n\n def dynamic_run(fun, x_value):\n x_value = np.array(x_value)\n x = array_ops.placeholder(dtypes.float32, name=\"x\")\n return sess.run(fun(x), feed_dict={x: x_value})\n\n for run in (static_run, dynamic_run):\n mu = -1.\n # Corresponds to scale = [[10, 0, 0], [0, 2, 0], [0, 0, 3]]\n bijector = bijectors.Affine(\n shift=mu,\n scale_identity_multiplier=2.,\n scale_perturb_diag=[2., 1],\n scale_perturb_factor=[[2., 0],\n [0., 0],\n [0, 1]])\n bijector_ref = bijectors.Affine(shift=mu, scale_diag=[10., 2, 3])\n\n self.assertEqual(1, bijector.shaper.event_ndims.eval()) # \"is vector\"\n x = [1., 2, 3] # Vector.\n self.assertAllClose([9., 3, 8], run(bijector.forward, x))\n self.assertAllClose(\n run(bijector_ref.forward, x), run(bijector.forward, x))\n\n self.assertAllClose([0.2, 1.5, 4 / 3.], run(bijector.inverse, x))\n self.assertAllClose(\n run(bijector_ref.inverse, x), run(bijector.inverse, x))\n self.assertAllClose(-math.log(60.),\n run(bijector.inverse_log_det_jacobian, x))\n self.assertAllClose(\n run(bijector.inverse_log_det_jacobian, x),\n run(bijector_ref.inverse_log_det_jacobian, x))\n\n def testDiagWithVDVTUpdate(self):\n with self.test_session() as sess:\n\n def static_run(fun, x):\n return fun(x).eval()\n\n def dynamic_run(fun, x_value):\n x_value = np.array(x_value)\n x = array_ops.placeholder(dtypes.float32, name=\"x\")\n return sess.run(fun(x), feed_dict={x: x_value})\n\n for run in (static_run, dynamic_run):\n mu = -1.\n # Corresponds to scale = [[10, 0, 0], [0, 3, 0], [0, 0, 5]]\n bijector = bijectors.Affine(\n shift=mu,\n scale_diag=[2., 3, 4],\n scale_perturb_diag=[2., 1],\n scale_perturb_factor=[[2., 0],\n [0., 0],\n [0, 1]])\n bijector_ref = bijectors.Affine(shift=mu, scale_diag=[10., 3, 5])\n\n self.assertEqual(1, bijector.shaper.event_ndims.eval()) # \"is vector\"\n x = [1., 2, 3] # Vector.\n self.assertAllClose([9., 5, 14], run(bijector.forward, x))\n self.assertAllClose(\n run(bijector_ref.forward, x), run(bijector.forward, x))\n self.assertAllClose([0.2, 1., 0.8], run(bijector.inverse, x))\n self.assertAllClose(\n run(bijector_ref.inverse, x), run(bijector.inverse, x))\n self.assertAllClose(-math.log(150.),\n run(bijector.inverse_log_det_jacobian, x))\n self.assertAllClose(\n run(bijector.inverse_log_det_jacobian, x),\n run(bijector_ref.inverse_log_det_jacobian, x))\n\n def testTriLWithVDVTUpdate(self):\n with self.test_session() as sess:\n\n def static_run(fun, x):\n return fun(x).eval()\n\n def dynamic_run(fun, x_value):\n x_value = np.array(x_value)\n x = array_ops.placeholder(dtypes.float32, name=\"x\")\n return sess.run(fun(x), feed_dict={x: x_value})\n\n for run in (static_run, dynamic_run):\n mu = -1.\n # Corresponds to scale = [[10, 0, 0], [1, 3, 0], [2, 3, 5]]\n bijector = bijectors.Affine(\n shift=mu,\n scale_tril=[[2., 0, 0],\n [1, 3, 0],\n [2, 3, 4]],\n scale_perturb_diag=[2., 1],\n scale_perturb_factor=[[2., 0],\n [0., 0],\n [0, 1]])\n bijector_ref = bijectors.Affine(\n shift=mu, scale_tril=[[10., 0, 0],\n [1, 3, 0],\n [2, 3, 5]])\n\n self.assertEqual(1, bijector.shaper.event_ndims.eval()) # \"is vector\"\n x = [1., 2, 3] # Vector.\n self.assertAllClose([9., 6, 22], run(bijector.forward, x))\n self.assertAllClose(\n run(bijector_ref.forward, x), run(bijector.forward, x))\n self.assertAllClose([0.2, 14 / 15., 4 / 25.], run(bijector.inverse, x))\n self.assertAllClose(\n run(bijector_ref.inverse, x), run(bijector.inverse, x))\n self.assertAllClose(-math.log(150.),\n run(bijector.inverse_log_det_jacobian, x))\n self.assertAllClose(\n run(bijector.inverse_log_det_jacobian, x),\n run(bijector_ref.inverse_log_det_jacobian, x))\n\n def testTriLWithVDVTUpdateNoDiagonal(self):\n with self.test_session() as sess:\n\n def static_run(fun, x):\n return fun(x).eval()\n\n def dynamic_run(fun, x_value):\n x_value = np.array(x_value)\n x = array_ops.placeholder(dtypes.float32, name=\"x\")\n return sess.run(fun(x), feed_dict={x: x_value})\n\n for run in (static_run, dynamic_run):\n mu = -1.\n # Corresponds to scale = [[6, 0, 0], [1, 3, 0], [2, 3, 5]]\n bijector = bijectors.Affine(\n shift=mu,\n scale_tril=[[2., 0, 0], [1, 3, 0], [2, 3, 4]],\n scale_perturb_diag=None,\n scale_perturb_factor=[[2., 0], [0., 0], [0, 1]])\n bijector_ref = bijectors.Affine(\n shift=mu, scale_tril=[[6., 0, 0], [1, 3, 0], [2, 3, 5]])\n\n self.assertEqual(1, bijector.shaper.event_ndims.eval()) # \"is vector\"\n x = [1., 2, 3] # Vector.\n self.assertAllClose([5., 6, 22], run(bijector.forward, x))\n self.assertAllClose(\n run(bijector_ref.forward, x), run(bijector.forward, x))\n self.assertAllClose([1 / 3., 8 / 9., 4 / 30.], run(bijector.inverse, x))\n self.assertAllClose(\n run(bijector_ref.inverse, x), run(bijector.inverse, x))\n self.assertAllClose(-math.log(90.),\n run(bijector.inverse_log_det_jacobian, x))\n self.assertAllClose(\n run(bijector.inverse_log_det_jacobian, x),\n run(bijector_ref.inverse_log_det_jacobian, x))\n\n def testNoBatchMultivariateRaisesWhenSingular(self):\n with self.test_session():\n mu = [1., -1]\n bijector = bijectors.Affine(\n shift=mu,\n # Has zero on the diagonal.\n scale_diag=[0., 1],\n validate_args=True)\n with self.assertRaisesOpError(\"Condition x > 0\"):\n bijector.forward([1., 1.]).eval()\n\n def testEventNdimsLargerThanOneRaises(self):\n with self.test_session():\n mu = [1., -1]\n # Scale corresponds to 2x2 identity matrix.\n bijector = bijectors.Affine(shift=mu, event_ndims=2, validate_args=True)\n bijector.forward([1., 1.]).eval()\n\n def testScaleZeroScalarRaises(self):\n with self.test_session():\n mu = -1.\n # Check Identity matrix with zero scaling.\n bijector = bijectors.Affine(\n shift=mu,\n scale_identity_multiplier=0.0,\n event_ndims=0,\n validate_args=True)\n with self.assertRaisesOpError(\"Condition x > 0\"):\n bijector.forward(1.).eval()\n\n # Check Diag matrix with zero scaling.\n bijector = bijectors.Affine(\n shift=mu, scale_diag=[0.0], event_ndims=0, validate_args=True)\n with self.assertRaisesOpError(\"Condition x > 0\"):\n bijector.forward(1.).eval()\n\n def testScalarCongruency(self):\n with self.test_session():\n bijector = bijectors.Affine(\n shift=3.6, scale_identity_multiplier=0.42, event_ndims=0)\n assert_scalar_congruency(bijector, lower_x=-2., upper_x=2.)\n\n def _makeScale(self,\n x,\n scale_identity_multiplier=None,\n scale_diag=None,\n scale_tril=None,\n scale_perturb_factor=None,\n scale_perturb_diag=None):\n \"\"\"Create a scale matrix. Return None if it can not be created.\"\"\"\n c = scale_identity_multiplier\n d1 = scale_diag\n tril = scale_tril\n v = scale_perturb_factor\n d2 = scale_perturb_diag\n\n # Ambiguous low rank update.\n if v is None and d2 is not None:\n return None\n\n if c is None and d1 is None and tril is None:\n # Special case when no scale args are passed in. This means use an\n # identity matrix.\n if v is None and d2 is None:\n c = 1.\n # No scale.\n else:\n return None\n\n matrix = np.float32(0.)\n if c is not None:\n # Infer the dimension from x.\n matrix += c * self._matrix_diag(np.ones_like(x))\n if d1 is not None:\n matrix += self._matrix_diag(np.array(d1, dtype=np.float32))\n if tril is not None:\n matrix += np.array(tril, dtype=np.float32)\n if v is not None:\n v = np.array(v, dtype=np.float32)\n if v.ndim < 2:\n vt = v.T\n else:\n vt = np.swapaxes(v, axis1=v.ndim - 2, axis2=v.ndim - 1)\n if d2 is not None:\n d2 = self._matrix_diag(np.array(d2, dtype=np.float32))\n right = np.matmul(d2, vt)\n else:\n right = vt\n matrix += np.matmul(v, right)\n return matrix\n\n def _matrix_diag(self, d):\n \"\"\"Batch version of np.diag.\"\"\"\n orig_shape = d.shape\n d = np.reshape(d, (np.prod(d.shape[:-1]), d.shape[-1]))\n diag_list = []\n for i in range(d.shape[0]):\n diag_list.append(np.diag(d[i, ...]))\n return np.reshape(diag_list, orig_shape + (d.shape[-1],))\n\n def _testLegalInputs(self, shift=None, scale_params=None, x=None):\n\n def _powerset(x):\n s = list(x)\n return itertools.chain.from_iterable(\n itertools.combinations(s, r) for r in range(len(s) + 1))\n\n with self.test_session():\n for args in _powerset(scale_params.items()):\n args = dict(args)\n\n scale_args = dict({\"x\": x}, **args)\n scale = self._makeScale(**scale_args)\n\n bijector_args = dict({\"event_ndims\": 1}, **args)\n\n # We haven't specified enough information for the scale.\n if scale is None:\n with self.assertRaisesRegexp(ValueError, (\"must be specified.\")):\n bijector = bijectors.Affine(shift=shift, **bijector_args)\n else:\n bijector = bijectors.Affine(shift=shift, **bijector_args)\n np_x = x\n # For the case a vector is passed in, we need to make the shape\n # match the matrix for matmul to work.\n if x.ndim == scale.ndim - 1:\n np_x = np.expand_dims(x, axis=-1)\n\n forward = np.matmul(scale, np_x) + shift\n if x.ndim == scale.ndim - 1:\n forward = np.squeeze(forward, axis=-1)\n self.assertAllClose(forward, bijector.forward(x).eval())\n\n backward = np.linalg.solve(scale, np_x - shift)\n if x.ndim == scale.ndim - 1:\n backward = np.squeeze(backward, axis=-1)\n self.assertAllClose(backward, bijector.inverse(x).eval())\n\n ildj = -np.log(np.abs(np.linalg.det(scale)))\n # TODO(jvdillon): We need to make it so the scale_identity_multiplier\n # case does not deviate in expected shape. Fixing this will get rid of\n # these special cases.\n if (ildj.ndim > 0 and (len(scale_args) == 1 or (\n len(scale_args) == 2 and\n scale_args.get(\"scale_identity_multiplier\", None) is not None))):\n ildj = np.squeeze(ildj[0])\n elif ildj.ndim < scale.ndim - 2:\n ildj = np.reshape(ildj, scale.shape[0:-2])\n self.assertAllClose(ildj, bijector.inverse_log_det_jacobian(x).eval())\n\n def testLegalInputs(self):\n self._testLegalInputs(\n shift=np.float32(-1),\n scale_params={\n \"scale_identity_multiplier\": 2.,\n \"scale_diag\": [2., 3.],\n \"scale_tril\": [[1., 0.],\n [-3., 3.]],\n \"scale_perturb_factor\": [[1., 0],\n [1.5, 3.]],\n \"scale_perturb_diag\": [3., 1.]\n },\n x=np.array(\n [1., 2], dtype=np.float32))\n\n def testLegalInputsWithBatch(self):\n # Shape of scale is [2, 1, 2, 2]\n self._testLegalInputs(\n shift=np.float32(-1),\n scale_params={\n \"scale_identity_multiplier\": 2.,\n \"scale_diag\": [[[2., 3.]], [[1., 2]]],\n \"scale_tril\": [[[[1., 0.], [-3., 3.]]], [[[0.5, 0.], [1., 1.]]]],\n \"scale_perturb_factor\": [[[[1., 0], [1.5, 3.]]],\n [[[1., 0], [1., 1.]]]],\n \"scale_perturb_diag\": [[[3., 1.]], [[0.5, 1.]]]\n },\n x=np.array(\n [[[1., 2]], [[3., 4]]], dtype=np.float32))\n\n def testNegativeDetTrilPlusVDVT(self):\n # scale = [[3.7, 2.7],\n # [-0.3, -1.3]]\n # inv(scale) = [[0.325, 0.675],\n # [-0.075, -0.925]]\n # eig(scale) = [3.5324, -1.1324]\n self._testLegalInputs(\n shift=np.float32(-1),\n scale_params={\n \"scale_tril\": [[1., 0], [-3, -4]],\n \"scale_perturb_factor\": [[0.1, 0], [0.5, 0.3]],\n \"scale_perturb_diag\": [3., 1]\n },\n x=np.array(\n [1., 2], dtype=np.float32))\n\n def testScalePropertyAssertsCorrectly(self):\n with self.test_session():\n with self.assertRaises(NotImplementedError):\n scale = bijectors.Affine( # pylint:disable=unused-variable\n scale_tril=[[1., 0], [2, 1]],\n scale_perturb_factor=[2., 1.]).scale\n\n\nclass SoftplusBijectorTest(test.TestCase):\n \"\"\"Tests the correctness of the Y = g(X) = Log[1 + exp(X)] transformation.\"\"\"\n\n def _softplus(self, x):\n return np.log(1 + np.exp(x))\n\n def _softplus_inverse(self, y):\n return np.log(np.exp(y) - 1)\n\n def _softplus_ildj_before_reduction(self, y):\n \"\"\"Inverse log det jacobian, before being reduced.\"\"\"\n return -np.log(1 - np.exp(-y))\n\n def testBijectorForwardInverseEventDimsZero(self):\n with self.test_session():\n bijector = bijectors.Softplus(event_ndims=0)\n self.assertEqual(\"softplus\", bijector.name)\n x = 2 * rng.randn(2, 10)\n y = self._softplus(x)\n\n self.assertAllClose(y, bijector.forward(x).eval())\n self.assertAllClose(x, bijector.inverse(y).eval())\n self.assertAllClose(\n x, bijector.inverse_and_inverse_log_det_jacobian(y)[0].eval())\n\n def testBijectorLogDetJacobianEventDimsZero(self):\n with self.test_session():\n bijector = bijectors.Softplus(event_ndims=0)\n y = 2 * rng.rand(2, 10)\n # No reduction needed if event_dims = 0.\n ildj = self._softplus_ildj_before_reduction(y)\n\n self.assertAllClose(ildj, bijector.inverse_log_det_jacobian(y).eval())\n self.assertAllClose(\n ildj, bijector.inverse_and_inverse_log_det_jacobian(y)[1].eval())\n\n def testBijectorForwardInverseEventDimsOne(self):\n with self.test_session():\n bijector = bijectors.Softplus(event_ndims=1)\n self.assertEqual(\"softplus\", bijector.name)\n x = 2 * rng.randn(2, 10)\n y = self._softplus(x)\n\n self.assertAllClose(y, bijector.forward(x).eval())\n self.assertAllClose(x, bijector.inverse(y).eval())\n self.assertAllClose(\n x, bijector.inverse_and_inverse_log_det_jacobian(y)[0].eval())\n\n def testBijectorLogDetJacobianEventDimsOne(self):\n with self.test_session():\n bijector = bijectors.Softplus(event_ndims=1)\n y = 2 * rng.rand(2, 10)\n ildj_before = self._softplus_ildj_before_reduction(y)\n ildj = np.sum(ildj_before, axis=1)\n\n self.assertAllClose(ildj, bijector.inverse_log_det_jacobian(y).eval())\n self.assertAllClose(\n ildj, bijector.inverse_and_inverse_log_det_jacobian(y)[1].eval())\n\n def testScalarCongruency(self):\n with self.test_session():\n bijector = bijectors.Softplus(event_ndims=0)\n assert_scalar_congruency(bijector, lower_x=-2., upper_x=2.)\n\n def testBijectiveAndFinite32bit(self):\n with self.test_session():\n bijector = bijectors.Softplus(event_ndims=0)\n x = np.linspace(-20., 20., 100).astype(np.float32)\n y = np.logspace(-10, 10, 100).astype(np.float32)\n assert_bijective_and_finite(bijector, x, y, rtol=1e-2, atol=1e-2)\n\n def testBijectiveAndFinite16bit(self):\n with self.test_session():\n bijector = bijectors.Softplus(event_ndims=0)\n # softplus(-20) is zero, so we can't use such a large range as in 32bit.\n x = np.linspace(-10., 20., 100).astype(np.float16)\n # Note that float16 is only in the open set (0, inf) for a smaller\n # logspace range. The actual range was (-7, 4), so use something smaller\n # for the test.\n y = np.logspace(-6, 3, 100).astype(np.float16)\n assert_bijective_and_finite(bijector, x, y, rtol=1e-1, atol=1e-3)\n\n\nclass SoftmaxCenteredBijectorTest(test.TestCase):\n \"\"\"Tests correctness of the Y = g(X) = exp(X) / sum(exp(X)) transformation.\"\"\"\n\n def testBijectorScalar(self):\n with self.test_session():\n softmax = bijectors.SoftmaxCentered() # scalar by default\n self.assertEqual(\"softmax_centered\", softmax.name)\n x = np.log([[2., 3, 4],\n [4., 8, 12]])\n y = [[[2. / 3, 1. / 3],\n [3. / 4, 1. / 4],\n [4. / 5, 1. / 5]],\n [[4. / 5, 1. / 5],\n [8. / 9, 1. / 9],\n [12. / 13, 1. / 13]]]\n self.assertAllClose(y, softmax.forward(x).eval())\n self.assertAllClose(x, softmax.inverse(y).eval())\n self.assertAllClose(\n -np.sum(np.log(y), axis=2),\n softmax.inverse_log_det_jacobian(y).eval(),\n atol=0.,\n rtol=1e-7)\n self.assertAllClose(\n -softmax.inverse_log_det_jacobian(y).eval(),\n softmax.forward_log_det_jacobian(x).eval(),\n atol=0.,\n rtol=1e-7)\n\n def testBijectorVector(self):\n with self.test_session():\n softmax = bijectors.SoftmaxCentered(event_ndims=1)\n self.assertEqual(\"softmax_centered\", softmax.name)\n x = np.log([[2., 3, 4], [4., 8, 12]])\n y = [[0.2, 0.3, 0.4, 0.1], [0.16, 0.32, 0.48, 0.04]]\n self.assertAllClose(y, softmax.forward(x).eval())\n self.assertAllClose(x, softmax.inverse(y).eval())\n self.assertAllClose(\n -np.sum(np.log(y), axis=1),\n softmax.inverse_log_det_jacobian(y).eval(),\n atol=0.,\n rtol=1e-7)\n self.assertAllClose(\n -softmax.inverse_log_det_jacobian(y).eval(),\n softmax.forward_log_det_jacobian(x).eval(),\n atol=0.,\n rtol=1e-7)\n\n def testShapeGetters(self):\n with self.test_session():\n for x, y, b in ((tensor_shape.TensorShape([]),\n tensor_shape.TensorShape([2]), bijectors.SoftmaxCentered(\n event_ndims=0, validate_args=True)),\n (tensor_shape.TensorShape([4]),\n tensor_shape.TensorShape([5]), bijectors.SoftmaxCentered(\n event_ndims=1, validate_args=True))):\n self.assertAllEqual(y, b.get_forward_event_shape(x))\n self.assertAllEqual(y.as_list(),\n b.forward_event_shape(x.as_list()).eval())\n self.assertAllEqual(x, b.get_inverse_event_shape(y))\n self.assertAllEqual(x.as_list(),\n b.inverse_event_shape(y.as_list()).eval())\n\n def testBijectiveAndFinite(self):\n with self.test_session():\n softmax = bijectors.SoftmaxCentered(event_ndims=1)\n x = np.linspace(-50, 50, num=10).reshape(5, 2).astype(np.float32)\n # Make y values on the simplex with a wide range.\n y_0 = np.ones(5).astype(np.float32)\n y_1 = (1e-5 * rng.rand(5)).astype(np.float32)\n y_2 = (1e1 * rng.rand(5)).astype(np.float32)\n y = np.array([y_0, y_1, y_2])\n y /= y.sum(axis=0)\n y = y.T # y.shape = [5, 3]\n assert_bijective_and_finite(softmax, x, y)\n\n\nclass SigmoidCenteredBijectorTest(test.TestCase):\n \"\"\"Tests correctness of the Y = g(X) = (1 + exp(-X))^-1 transformation.\"\"\"\n\n def testBijector(self):\n with self.test_session():\n sigmoid = bijectors.SigmoidCentered()\n self.assertEqual(\"sigmoid_centered\", sigmoid.name)\n x = np.log([[2., 3, 4],\n [4., 8, 12]])\n y = [[[2. / 3, 1. / 3],\n [3. / 4, 1. / 4],\n [4. / 5, 1. / 5]],\n [[4. / 5, 1. / 5],\n [8. / 9, 1. / 9],\n [12. / 13, 1. / 13]]]\n self.assertAllClose(y, sigmoid.forward(x).eval())\n self.assertAllClose(x, sigmoid.inverse(y).eval())\n self.assertAllClose(\n -np.sum(np.log(y), axis=2),\n sigmoid.inverse_log_det_jacobian(y).eval(),\n atol=0.,\n rtol=1e-7)\n self.assertAllClose(\n -sigmoid.inverse_log_det_jacobian(y).eval(),\n sigmoid.forward_log_det_jacobian(x).eval(),\n atol=0.,\n rtol=1e-7)\n\n\nclass CholeskyOuterProductBijectorTest(test.TestCase):\n \"\"\"Tests the correctness of the Y = X * X^T transformation.\"\"\"\n\n def testBijectorMatrix(self):\n with self.test_session():\n bijector = bijectors.CholeskyOuterProduct(\n event_ndims=2, validate_args=True)\n self.assertEqual(\"cholesky_outer_product\", bijector.name)\n x = [[[1., 0], [2, 1]], [[math.sqrt(2.), 0], [math.sqrt(8.), 1]]]\n y = np.matmul(x, np.transpose(x, axes=(0, 2, 1)))\n # Fairly easy to compute differentials since we have 2x2.\n dx_dy = [[[2. * 1, 0, 0],\n [2, 1, 0],\n [0, 2 * 2, 2 * 1]],\n [[2 * math.sqrt(2.), 0, 0],\n [math.sqrt(8.), math.sqrt(2.), 0],\n [0, 2 * math.sqrt(8.), 2 * 1]]]\n ildj = -np.sum(\n np.log(np.asarray(dx_dy).diagonal(\n offset=0, axis1=1, axis2=2)),\n axis=1)\n self.assertAllEqual((2, 2, 2), bijector.forward(x).get_shape())\n self.assertAllEqual((2, 2, 2), bijector.inverse(y).get_shape())\n self.assertAllClose(y, bijector.forward(x).eval())\n self.assertAllClose(x, bijector.inverse(y).eval())\n self.assertAllClose(\n ildj, bijector.inverse_log_det_jacobian(y).eval(), atol=0., rtol=1e-7)\n self.assertAllClose(\n -bijector.inverse_log_det_jacobian(y).eval(),\n bijector.forward_log_det_jacobian(x).eval(),\n atol=0.,\n rtol=1e-7)\n\n def testBijectorScalar(self):\n with self.test_session():\n bijector = bijectors.CholeskyOuterProduct(\n event_ndims=0, validate_args=True)\n self.assertEqual(\"cholesky_outer_product\", bijector.name)\n x = [[[1., 5],\n [2, 1]],\n [[math.sqrt(2.), 3],\n [math.sqrt(8.), 1]]]\n y = np.square(x)\n ildj = -math.log(2.) - np.log(x)\n self.assertAllClose(y, bijector.forward(x).eval())\n self.assertAllClose(x, bijector.inverse(y).eval())\n self.assertAllClose(\n ildj, bijector.inverse_log_det_jacobian(y).eval(), atol=0., rtol=1e-7)\n self.assertAllClose(\n -bijector.inverse_log_det_jacobian(y).eval(),\n bijector.forward_log_det_jacobian(x).eval(),\n atol=0.,\n rtol=1e-7)\n\n def testScalarCongruency(self):\n with self.test_session():\n bijector = bijectors.CholeskyOuterProduct(\n event_ndims=0, validate_args=True)\n assert_scalar_congruency(bijector, lower_x=1e-3, upper_x=1.5, rtol=0.05)\n\n\nclass ChainBijectorTest(test.TestCase):\n \"\"\"Tests the correctness of the Y = Chain(bij1, bij2, bij3) transformation.\"\"\"\n\n def testBijector(self):\n with self.test_session():\n chain = bijectors.Chain((bijectors.Exp(event_ndims=1),\n bijectors.Softplus(event_ndims=1)))\n self.assertEqual(\"chain_of_exp_of_softplus\", chain.name)\n x = np.asarray([[[1., 2.],\n [2., 3.]]])\n self.assertAllClose(1. + np.exp(x), chain.forward(x).eval())\n self.assertAllClose(np.log(x - 1.), chain.inverse(x).eval())\n self.assertAllClose(\n -np.sum(np.log(x - 1.), axis=2),\n chain.inverse_log_det_jacobian(x).eval())\n self.assertAllClose(\n np.sum(x, axis=2), chain.forward_log_det_jacobian(x).eval())\n\n def testBijectorIdentity(self):\n with self.test_session():\n chain = bijectors.Chain()\n self.assertEqual(\"identity\", chain.name)\n x = np.asarray([[[1., 2.],\n [2., 3.]]])\n self.assertAllClose(x, chain.forward(x).eval())\n self.assertAllClose(x, chain.inverse(x).eval())\n self.assertAllClose(0., chain.inverse_log_det_jacobian(x).eval())\n self.assertAllClose(0., chain.forward_log_det_jacobian(x).eval())\n\n def testScalarCongruency(self):\n with self.test_session():\n bijector = bijectors.Chain((bijectors.Exp(), bijectors.Softplus()))\n assert_scalar_congruency(bijector, lower_x=1e-3, upper_x=1.5, rtol=0.05)\n\n def testShapeGetters(self):\n with self.test_session():\n bijector = bijectors.Chain((bijectors.SoftmaxCentered(\n event_ndims=1, validate_args=True), bijectors.SoftmaxCentered(\n event_ndims=0, validate_args=True)))\n x = tensor_shape.TensorShape([])\n y = tensor_shape.TensorShape([2 + 1])\n self.assertAllEqual(y, bijector.get_forward_event_shape(x))\n self.assertAllEqual(y.as_list(),\n bijector.forward_event_shape(x.as_list()).eval())\n self.assertAllEqual(x, bijector.get_inverse_event_shape(y))\n self.assertAllEqual(x.as_list(),\n bijector.inverse_event_shape(y.as_list()).eval())\n\n\nclass InvertBijectorTest(test.TestCase):\n \"\"\"Tests the correctness of the Y = Invert(bij) transformation.\"\"\"\n\n def testBijector(self):\n with self.test_session():\n for fwd in [\n bijectors.Identity(),\n bijectors.Exp(event_ndims=1),\n bijectors.Affine(\n shift=[0., 1.], scale_diag=[2., 3.], event_ndims=1),\n bijectors.Softplus(event_ndims=1),\n bijectors.SoftmaxCentered(event_ndims=1),\n bijectors.SigmoidCentered(),\n ]:\n rev = bijectors.Invert(fwd)\n self.assertEqual(\"_\".join([\"invert\", fwd.name]), rev.name)\n x = [[[1., 2.],\n [2., 3.]]]\n self.assertAllClose(fwd.inverse(x).eval(), rev.forward(x).eval())\n self.assertAllClose(fwd.forward(x).eval(), rev.inverse(x).eval())\n self.assertAllClose(\n fwd.forward_log_det_jacobian(x).eval(),\n rev.inverse_log_det_jacobian(x).eval())\n self.assertAllClose(\n fwd.inverse_log_det_jacobian(x).eval(),\n rev.forward_log_det_jacobian(x).eval())\n inv, jac = rev.inverse_and_inverse_log_det_jacobian(x)\n self.assertAllClose(fwd.forward(x).eval(), inv.eval())\n self.assertAllClose(fwd.forward_log_det_jacobian(x).eval(), jac.eval())\n\n def testScalarCongruency(self):\n with self.test_session():\n bijector = bijectors.Invert(bijectors.Exp())\n assert_scalar_congruency(bijector, lower_x=1e-3, upper_x=1.5, rtol=0.05)\n\n def testShapeGetters(self):\n with self.test_session():\n bijector = bijectors.Invert(bijectors.SigmoidCentered(validate_args=True))\n x = tensor_shape.TensorShape([2])\n y = tensor_shape.TensorShape([])\n self.assertAllEqual(y, bijector.get_forward_event_shape(x))\n self.assertAllEqual(y.as_list(),\n bijector.forward_event_shape(x.as_list()).eval())\n self.assertAllEqual(x, bijector.get_inverse_event_shape(y))\n self.assertAllEqual(x.as_list(),\n bijector.inverse_event_shape(y.as_list()).eval())\n\n\nif __name__ == \"__main__\":\n test.main()\n"
] | [
[
"tensorflow.python.debug.debug_utils.watch_graph",
"tensorflow.core.protobuf.config_pb2.RunOptions",
"tensorflow.core.protobuf.config_pb2.RunMetadata",
"tensorflow.python.debug.stepper.NodeStepper"
],
[
"tensorflow.python.framework.tensor_shape.scalar",
"tensorflow.python.ops.math_ops.log",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.check_ops.assert_positive",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.ops.math_ops.digamma",
"tensorflow.python.ops.random_ops.random_gamma",
"tensorflow.python.ops.nn.softplus",
"tensorflow.contrib.distributions.python.ops.distribution_util.AppendDocstring",
"tensorflow.python.ops.math_ops.igammac",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.math_ops.lgamma",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.variables.trainable_variables",
"tensorflow.python.ops.init_ops.constant_initializer",
"tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl.BasicLSTMCell",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.ops.variables.global_variables",
"tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl.LSTMCell",
"tensorflow.python.ops.variable_scope.get_variable_scope",
"tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl._SlimRNNCell",
"tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl.BasicRNNCell",
"tensorflow.python.ops.rnn.dynamic_rnn",
"tensorflow.python.framework.ops.convert_to_tensor",
"numpy.array",
"tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl.GRUCell",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl._linear",
"numpy.linalg.norm",
"numpy.ones",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.ops.random_ops.random_uniform"
],
[
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.math_ops.exp",
"tensorflow.python.ops.math_ops.logical_and",
"tensorflow.python.ops.math_ops.logical_not",
"tensorflow.contrib.distributions.python.ops.distribution_util.pick_vector",
"tensorflow.python.ops.array_ops.rank",
"tensorflow.python.ops.math_ops.reduce_all",
"tensorflow.python.ops.control_flow_ops.with_dependencies",
"tensorflow.python.ops.array_ops.where",
"tensorflow.contrib.distributions.python.ops.distribution_util.AppendDocstring",
"tensorflow.python.ops.math_ops.reduce_prod",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.contrib.distributions.python.ops.bijector.Identity",
"numpy.logical_not",
"tensorflow.python.ops.check_ops.assert_rank",
"tensorflow.python.ops.array_ops.tile",
"tensorflow.python.ops.math_ops.equal",
"tensorflow.python.ops.check_ops.assert_equal",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.array_ops.ones_like",
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.ops.array_ops.concat",
"numpy.array_equal",
"tensorflow.python.ops.check_ops.assert_positive",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.platform.app.run",
"tensorflow.python.platform.gfile.GFile",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.core.framework.graph_pb2.GraphDef.FromString",
"tensorflow.python.platform.flags.DEFINE_string",
"tensorflow.core.framework.graph_pb2.GraphDef"
],
[
"tensorflow.python.ops.math_ops.log",
"numpy.diag",
"tensorflow.python.framework.tensor_shape.TensorShape",
"numpy.expand_dims",
"numpy.linspace",
"numpy.asarray",
"numpy.squeeze",
"tensorflow.python.ops.array_ops.placeholder",
"numpy.exp",
"numpy.divide",
"numpy.square",
"numpy.swapaxes",
"numpy.ones_like",
"numpy.reshape",
"numpy.matmul",
"tensorflow.python.framework.ops.get_default_session",
"numpy.linalg.det",
"tensorflow.python.platform.test.main",
"numpy.diff",
"numpy.float32",
"numpy.log",
"numpy.logspace",
"tensorflow.python.ops.math_ops.reduce_mean",
"numpy.testing.assert_allclose",
"numpy.transpose",
"numpy.array",
"numpy.random.RandomState",
"numpy.sum",
"numpy.diagonal",
"numpy.linalg.solve",
"tensorflow.python.ops.array_ops.concat",
"numpy.isfinite",
"numpy.abs",
"numpy.ones",
"numpy.testing.assert_array_less",
"numpy.prod",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.framework.constant_op.constant"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"0.12",
"1.0"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"0.12",
"1.0"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
}
] |
student-work-agu-gis2021/lesson7-matplotlib-AbeRyusei | [
"2adc657c1c1c02014a5a113b25f28756df377619"
] | [
"Exercise_7_problem_1.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# ## Problem 1: Simple scatter plot using random \n# \n# We can generate random numbers using using a method `random.rand()` from the [NumPy package](https://numpy.org/). This example generates 10 random values:\n# \n# ```\n# import numpy as np\n# random_numbers = np.random.rand(10)\n# \n# ```\n# \n# ### Part 1\n# \n# Create an new data frame called `data` and add 1000 random numbers (`float`) into a new column `x` and another 1000 random numbers (`float`) into a new column `y`.\n\nimport numpy as np\nimport pandas as pd\n\n# YOUR CODE HERE 1 to set data\nx = np.random.rand(1000)\ny = np.random.rand(1000)\ndata = pd.DataFrame()\ndata[\"x\"] = x\ndata[\"y\"] = y\n# Check your random values\nprint(data.head())\n\n# Check that you have the correct number of rows\nassert len(data) == 1000, \"There should be 1000 rows of data.\"\n\n\n# ### Part 2\n# \n\n# YOUR CODE HERE 2 to set colors\ncolors = np.random.rand(1000)\n# This test print should print out 10 first numbers in the variable colors\nprint(colors[0:10])\n\n# Check that the length matches\nassert len(colors) == 1000, \"There should be 1000 random numbers for colors\"\n\n\n# ### Part 3 \n# \n# #### Part 3.1\n# \n# Create a scatter plot of points with random colors\n# \n# #### Part 3.2\n# \n# #### Part 3.3\n# \n\n# Plot a scatter plot\n# YOUR CODE HERE 3\nimport matplotlib.pyplot as plt\nplt.scatter(x, y, s = 50, c = colors, cmap = 'rainbow', edgecolor = 'black')\n# Add labels and title\n# YOUR CODE HERE 4\nplt.title(\"My random candy points\")\nplt.xlabel(\"X-label\")\nplt.ylabel(\"Y-label\")\nplt.show()\n# Save the plot as a png file:\noutputfp = \"my_first_plot.png\"\n\n# YOUR CODE HERE 5\n\n# This test print statement should print the output filename of your figure\nprint(\"Saved my first plot as:\", outputfp)\n\n#Check that the file exists (also go and open the file to check that everything is ok!)\nimport os\n\nassert os.path.exists(outputfp), \"Can't find the output image.\"\n\n\n# Remember to commit your changes (including the image file) to your GitHub repo!\n# \n# ### Done!\n# \n# Now you can move to [problem 2](Exercise-7-problem-2.ipynb).\n"
] | [
[
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"pandas.DataFrame",
"numpy.random.rand",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
MingAtUWA/SimpleMPM2 | [
"7a1d7c257c621123d85a0630e93d42ae25c70fb4",
"7a1d7c257c621123d85a0630e93d42ae25c70fb4"
] | [
"PyUtilities/OneDConsolidation.py",
"PyUtilities/hdf5_stress_range.py"
] | [
"import math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass OneDConsolidation:\n \"\"\"\n z = 0, free flow boundary condition\n z = H, impermeable boundary condition\n Parameters:\n 1. Cv, coefficient of consolidation;\n 2. Es, one dimensional compressive modulus\n 3. u0, initial pore pressure;\n 4. H, depth of soil;\n 5. error_ratio, used to control the calculation precision.\n \"\"\"\n def __init__(self, Cv, Es, u0, H, error_ratio = 1.0e-3):\n self.Cv = Cv\n self.Es = Es\n self.u0 = u0\n self.H = H\n # Final settlement\n self.dH_final = -H * u0 / Es\n self.error_ratio = error_ratio\n \n def calPorePressure(self, t, z):\n Tv = self.Cv * t / (self.H * self.H)\n p = 0.0\n z = z / self.H\n i = 0\n while True:\n M = (2*i+1) * math.pi / 2.0\n inc = 2.0/M * math.sin(M*z) * math.exp(-M*M*Tv)\n p += inc\n i += 1\n if abs(inc) < self.error_ratio:\n break\n if (p > 1.0): p = 1.0\n p *= self.u0\n return p\n \n def calSettlement(self, t):\n Tv = self.Cv * t / (self.H * self.H)\n dH = 0.0\n i = 0\n while True:\n M = (2*i+1) * math.pi / 2.0\n inc = 2.0/(M*M) * math.exp(-M*M*Tv)\n dH += inc\n i += 1\n if abs(inc) < self.error_ratio:\n break\n dH = self.dH_final * (1.0 - dH)\n return dH\n \nif __name__ == \"__main__\":\n Es = 40.0e6\n kv = 1.0e-5\n miu = 1.0 # dynamic viscosity\n Cv = kv * Es / miu\n u0 = 40.0e3\n H = 10.0\n con_res = OneDConsolidation(Cv, Es, u0, H)\n \n fig = plt.figure()\n plot1 = fig.subplots(1, 1)\n plot1.set_title('Settlement - Time relation')\n plot1.set_xlabel('Time')\n plot1.set_ylabel('Settlement')\n \n data_num = 100\n t_list = np.zeros(data_num)\n p_list = np.zeros(data_num)\n u_list = np.zeros(data_num)\n for i in range(data_num):\n t_list[i] = 0.01 * float(i)\n p_list[i] = con_res.calPorePressure(t_list[i], 10.0)\n u_list[i] = con_res.calSettlement(t_list[i])\n \n plot1.set_xlim([t_list[0], t_list[data_num-1]])\n \n plot1.plot(t_list, p_list, 'k--')\n #plot1.plot(t_list, u_list, 'k--')\n \n plt.show()\n",
"import numpy as np\nimport h5py as py\nimport matplotlib.pyplot as plt\nimport sys\n\nhdf5_file = py.File(\"..\\\\Build\\\\TestsWithGL\\\\t2d_mpm_chm_t_bar_conference_restart.hdf5\", \"r\")\nframe_id = 0\n\nth_grp = hdf5_file['TimeHistory']['penetration']\npcl_dset = th_grp['frame_%d' % frame_id]['ParticleData']\npcl_num = pcl_dset.attrs['pcl_num']\nprint(pcl_num)\n\npcl_stress = np.zeros([pcl_num, 4])\np_min_id = 0\np_min = sys.float_info.min\np_max_id = 0\np_max = -sys.float_info.max\nfor pcl_id in range(pcl_num):\n pcl_data = pcl_dset[pcl_id]\n pcl_stress[pcl_id][0] = pcl_data['s11']\n pcl_stress[pcl_id][1] = pcl_data['s22']\n pcl_stress[pcl_id][2] = pcl_data['s12']\n pcl_stress[pcl_id][3] = pcl_data['p']\n #p = pcl_stress[pcl_id][3]\n p = (pcl_stress[pcl_id][0] + pcl_stress[pcl_id][1] + pcl_stress[pcl_id][2]) / 3.0\n if (p < p_min):\n p_min = p\n p_min_id = pcl_id\n if (p > p_max):\n p_max = p\n p_max_id = pcl_id\n\nprint(\"p min: %f pcl %d\\np max: %f pcl %d\" % (p_min, p_min_id, p_max, p_max_id))\nhdf5_file.close()\n"
] | [
[
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.figure"
],
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
YuJaceKim/Activity-Recognition-with-Combination-of-Deeply-Learned-Visual-Attention-and-Pose-Estimation | [
"23b9191f150d0edb981cf22a47a618feb55578b9",
"23b9191f150d0edb981cf22a47a618feb55578b9"
] | [
"AR/exp/common/ntu_tools.py",
"VA/datasets/UCF-11/extract-resize-videos.py"
] | [
"import os\n\nimport numpy as np\nimport json\nimport time\n\nfrom keras.callbacks import Callback\n\nfrom deephar.data import BatchLoader\nfrom deephar.utils import *\n\n\ndef eval_singleclip_gt_bbox_generator(model, datagen, verbose=1):\n\n num_blocks = len(model.outputs)\n num_samples = len(datagen)\n\n start = time.time()\n for i in range(num_samples):\n if verbose > 1:\n printcn('', 'pred %05d/%05d' % (i+1, num_samples))\n\n [x], [y] = datagen[i]\n if 'y_true' not in locals():\n y_true = np.zeros((num_samples,) + y.shape[1:])\n y_pred = np.zeros((num_samples, num_blocks) + y.shape[1:])\n\n y_true[i, :] = y\n pred = model.predict(x)\n for b in range(num_blocks):\n y_pred[i, b, :] = pred[b]\n\n dt = time.time() - start\n\n if verbose:\n printc(WARNING, 'NTU, single-clip, GT bbox, action acc.%:')\n\n scores = []\n for b in range(num_blocks):\n correct = np.equal(np.argmax(y_true, axis=-1),\n np.argmax(y_pred[:, b, :], axis=-1), dtype=np.float)\n scores.append(sum(correct) / len(correct))\n if verbose:\n printc(WARNING, ' %.1f ' % (100*scores[-1]))\n\n if verbose:\n printcn('', '\\n%d samples in %.1f sec: %.1f clips per sec' \\\n % (num_samples, dt, num_samples / dt))\n\n return scores\n\n\ndef eval_multiclip_dataset(model, ntu, subsampling, bboxes_file=None,\n logdir=None, verbose=1):\n \"\"\"If bboxes_file if not given, use ground truth bounding boxes.\"\"\"\n\n num_samples = ntu.get_length(TEST_MODE)\n num_blocks = len(model.outputs)\n\n \"\"\"Save and reset some original configs from the dataset.\"\"\"\n org_hflip = ntu.dataconf.fixed_hflip\n org_use_gt_bbox = ntu.use_gt_bbox\n\n cnt_corr = 0\n cnt_total = 0\n\n action_shape = (num_samples,) + ntu.get_shape('ntuaction')\n a_true = np.zeros(action_shape)\n a_pred = np.ones((num_blocks,) + action_shape)\n missing_clips = {}\n\n if bboxes_file is not None:\n with open(bboxes_file, 'r') as fid:\n bboxes_data = json.load(fid)\n ntu.use_gt_bbox = False\n bboxes_info = 'Using bounding boxes from file \"{}\"'.format(bboxes_file)\n else:\n bboxes_data = None\n ntu.use_gt_bbox = True\n bboxes_info = 'Using ground truth bounding boxes.'\n\n for i in range(num_samples):\n if verbose:\n printc(OKBLUE, '%04d/%04d\\t' % (i, num_samples))\n\n frame_list = ntu.get_clip_index(i, TEST_MODE, subsamples=[subsampling])\n\n \"\"\"Variable to hold all preditions for this sequence.\n 2x frame_list due to hflip.\n \"\"\"\n allpred = np.ones((num_blocks, 2*len(frame_list)) + action_shape[1:])\n\n for f in range(len(frame_list)):\n for hflip in range(2):\n preds_clip = []\n try:\n ntu.dataconf.fixed_hflip = hflip # Force horizontal flip\n\n bbox = None\n if bboxes_data is not None:\n key = '%04d.%d.%03d.%d' % (i, subsampling, f, hflip)\n try:\n bbox = np.array(bboxes_data[key])\n except:\n warning('Missing bounding box key ' + str(key))\n\n \"\"\"Load clip and predict action.\"\"\"\n data = ntu.get_data(i, TEST_MODE, frame_list=frame_list[f],\n bbox=bbox)\n a_true[i, :] = data['ntuaction']\n\n pred = model.predict(np.expand_dims(data['frame'], axis=0))\n for b in range(num_blocks):\n allpred[b, 2*f+hflip, :] = pred[b][0]\n a_pred[b, i, :] *= pred[b][0]\n\n if np.argmax(a_true[i]) != np.argmax(a_pred[-1, i]):\n missing_clips['%04d.%03d.%d' % (i, f, hflip)] = [\n int(np.argmax(a_true[i])),\n int(np.argmax(a_pred[-1, i]))]\n\n except Exception as e:\n warning('eval_multiclip, exception on sample ' \\\n + str(i) + ' frame ' + str(f) + ': ' + str(e))\n\n if verbose:\n cor = int(np.argmax(a_true[i]) == np.argmax(a_pred[-1, i]))\n\n cnt_total += 1\n cnt_corr += cor\n printnl('%d : %.1f' % (cor, 100 * cnt_corr / cnt_total))\n\n if logdir is not None:\n np.save('%s/a_pred.npy' % logdir, a_pred)\n np.save('%s/a_true.npy' % logdir, a_true)\n with open(os.path.join(logdir, 'missing-clips.json'), 'w') as fid:\n json.dump(missing_clips, fid)\n\n a_true = np.expand_dims(a_true, axis=0)\n a_true = np.tile(a_true, (num_blocks, 1, 1))\n correct = np.argmax(a_true, axis=-1) == np.argmax(a_pred, axis=-1)\n scores = 100*np.sum(correct, axis=-1) / num_samples\n if verbose:\n printcn(WARNING, 'NTU, multi-clip. ' + bboxes_info + '\\n')\n printcn(WARNING, np.array2string(np.array(scores), precision=2))\n printcn(WARNING, 'NTU best: %.2f' % max(scores))\n\n ntu.dataconf.fixed_hflip = org_hflip\n ntu.use_gt_bbox = org_use_gt_bbox\n\n return scores\n\n\nclass NtuEvalCallback(Callback):\n\n def __init__(self, data, eval_model=None, logdir=None):\n\n assert type(data) == BatchLoader, \\\n 'data must be a BatchLoader instance, ' \\\n + 'got {} instead'.format(data)\n\n self.data = data\n self.eval_model = eval_model\n self.scores = {}\n self.logdir = logdir\n\n def on_epoch_end(self, epoch, logs={}):\n if self.eval_model is not None:\n model = self.eval_model\n else:\n model = self.model\n\n scores = eval_singleclip_gt_bbox_generator(model, self.data)\n\n epoch += 1\n if self.logdir is not None:\n if not hasattr(self, 'logarray'):\n self.logarray = {}\n self.logarray[epoch] = scores\n with open(os.path.join(self.logdir, 'ntu_val.json'), 'w') as f:\n json.dump(self.logarray, f)\n\n cur_best = max(scores)\n self.scores[epoch] = cur_best\n\n printcn(OKBLUE, 'Best score is %.1f at epoch %d' % \\\n (100*self.best_score, self.best_epoch))\n\n @property\n def best_epoch(self):\n if len(self.scores) > 0:\n # Get the key of the maximum value from a dict\n return max(self.scores, key=self.scores.get)\n else:\n return np.inf\n\n @property\n def best_score(self):\n if len(self.scores) > 0:\n # Get the maximum value from a dict\n return self.scores[self.best_epoch]\n else:\n return 0\n\n# Aliases.\neval_singleclip_generator = eval_singleclip_gt_bbox_generator\n",
"#!/usr/bin/env python2\n\nimport os\nimport sys\nimport glob\nimport threading\n\nimport numpy as np\nfrom PIL import Image\n\nimport cv2\n\nresize_ratio = 1/2.\n\n\ndef mkdir(path):\n if os.path.isdir(path) is False:\n os.mkdir(path)\n\n\ndef extract_video(video_file, output_dir, frames, jpeg_quality=95):\n\n try:\n vidcap = cv2.VideoCapture(video_file)\n except Exception as e:\n sys.stderr.write(str(e) + '\\n')\n sys.stderr.write('Error loading file \"{}\"\\n'.format(video_file))\n return\n\n mkdir(output_dir)\n sys.stdout.write('Extracting video \"{}\"\\n'.format(video_file))\n sys.stdout.flush()\n\n f = 0\n while True:\n success, image = vidcap.read()\n f += 1\n if not success:\n break\n if f not in frames:\n continue # Skip if not in the frame list\n\n ncols, nrows, rgb = image.shape\n dsize = (int(resize_ratio*nrows), int(resize_ratio*ncols))\n image = cv2.resize(image, dsize, interpolation=cv2.INTER_CUBIC)\n\n frame_file = os.path.join(output_dir, '%05d.jpg' % f)\n cv2.imwrite(frame_file, image,\n [cv2.IMWRITE_JPEG_QUALITY, jpeg_quality])\n\n vidcap.release()\n\n\ndef extract_resize_videos(input_path, output_path, annot_path, subjects,\n num_C=3, num_P=40, num_R=2, num_A=60):\n\n if os.path.isdir(input_path) is False:\n raise Exception('Could not find the \"{}\" folder!'.format(input_path))\n\n mkdir(output_path)\n\n for s in subjects:\n for c in range(1,num_C+1):\n for p in range(1,num_P+1):\n for r in range(1,num_R+1):\n for a in range(1,num_A+1):\n\n sequence_id = 'S%03dC%03dP%03dR%03dA%03d' \\\n % (s, c, p, r, a)\n\n video_file = os.path.join(input_path,\n sequence_id + '_rgb.avi')\n\n annot_file = os.path.join(annot_path,\n sequence_id + '.npy')\n if not os.path.isfile(annot_file):\n continue # Ignore missing annotation files\n\n annot = np.load(annot_file)\n # frames = annot[1::2,0] # Divide video fps by 2\n frames = annot[:,0]\n\n output_dir = os.path.join(output_path, sequence_id)\n extract_video(video_file, output_dir, frames)\n\n\nif __name__ == \"__main__\":\n try:\n assert len(sys.argv) == 2, \\\n 'Expected 1 parameter, got {}'.format(sys.argv)\n setlist = [int(sys.argv[-1])]\n extract_resize_videos(\n 'UCF11', 'images-small', 'UCF11_numpy', setlist)\n except Exception as e:\n print (e)\n\n"
] | [
[
"numpy.expand_dims",
"numpy.tile",
"numpy.save",
"numpy.ones",
"numpy.argmax",
"numpy.array",
"numpy.zeros",
"numpy.sum"
],
[
"numpy.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
deepguider/RoadGPS | [
"7db4669a54da98a854886b89b6922fb8c7a60f33",
"7db4669a54da98a854886b89b6922fb8c7a60f33",
"c9689abedd7f6de0efd4effffb204aa32a8e4ef3"
] | [
"src/ocr_recog/ocr_recognizer.py",
"src/logo_recog/utils.py",
"src/door_detect/utils/datasets.py"
] | [
"import os\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\" #CUDA_VISIBLE_DEVICES=0 (always use the first GPU only)\n\nimport time\nimport string\nimport argparse\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.utils.data\n\nfrom utils import AttnLabelConverter\nfrom model import Model\n\nfrom demo import detect_ocr\nfrom craft.craft import CRAFT\nfrom collections import OrderedDict\n\n#####################################\n# 21.06.04 Astrid\n# https://github.com/googleapis/oauth2client/issues/642#issuecomment-279643203\n'''\nSolving this error \nFile \"./../src/ocr_recog/ocr_recognizer.py\", line 41, in __init__\n self.opt_craft, self.opt_recog = self.setup_parser()\n File \"./../src/ocr_recog/ocr_recognizer.py\", line 120, in setup_parser\n parser_craft = argparse.ArgumentParser(description='CRAFT Text Detection')\n File \"/usr/lib/python3.6/argparse.py\", line 1635, in __init__\n prog = _os.path.basename(_sys.argv[0])\nAttributeError: module 'sys' has no attribute 'argv'\n'''\nimport sys\n\nif not hasattr(sys, 'argv'):\n sys.argv = ['']\n#####################################\n\ndef str2bool(v):\n return v.lower() in (\"yes\", \"y\", \"true\", \"t\", \"1\")\n\ndef copyStateDict(state_dict):\n if list(state_dict.keys())[0].startswith(\"module\"):\n start_idx = 1\n else:\n start_idx = 0\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n name = \".\".join(k.split(\".\")[start_idx:])\n new_state_dict[name] = v\n return new_state_dict\n\nclass OCRRecognizer:\n def __init__(self):\n self.net = None #detect\n self.model = None #recog\n self.converter = None\n #self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n \n self.res_imagefileName = None\n\n self.opt_craft, self.opt_recog = self.setup_parser()\n\n self.args_craft= vars(self.opt_craft)\n self.args = vars(self.opt_recog)\n\n self.detect_time = 0.0\n self.recog_time = 0.0\n self.total_time =0.0\n # print(\"~~~~~~~~ Hyperparameters used: ~~~~~~~\")\n # for x, y in self.args.items():\n # print(\"{} : {}\".format(x, y))\n self.__dict__.update(self.args_craft)\n self.__dict__.update(self.args)\n\n\n def initialize(self):\n\n start = time.time()\n\n\n\n # self.saved_model = '/home_hongdo/sungeun.kim/checkpoints/ocr/ocr_train_addKorean_synth/best_accuracy.pth'\n # self.craft_trained_model = '/home_hongdo/sungeun.kim/checkpoints/ocr/ocr_train/craft_mlt_25k.pth'\n # self.saved_model = '/home_hongdo/sungeun.kim/checkpoints/ocr/ocr_train_v2/best_accuracy.pth'\n # self.craft_trained_model = '/home_hongdo/sungeun.kim/checkpoints/ocr/ocr_train_v2/best_accuracy_craft.pth'\n #\n # official\n\n self.saved_model = './data_ocr/best_accuracy.pth'\n self.craft_trained_model = './data_ocr/best_craft.pth'\n self.logfilepath = './data_ocr/log_ocr_result.txt'\n \n if torch.cuda.is_available():\n self.device = torch.device('cuda')\n self.cuda = True\n cudnn.benchmark = False\n else:\n self.device = torch.device('cpu')\n self.cuda = False\n cudnn.benchmark = True\n\n\n \"\"\" vocab / character number configuration \"\"\"\n # if self.sensitive:\n # self.character = string.printable[:-6] # same with ASTER setting (use 94 char).\n cudnn.deterministic = True\n\n #self.num_gpu = torch.cuda.device_count()\n\n \"\"\" model configuration \"\"\"\n # detetion\n self.net = CRAFT(self).to(self.device) # initialize\n print('Loading detection weights from checkpoint ' + self.craft_trained_model)\n self.net.load_state_dict(copyStateDict(torch.load(self.craft_trained_model, map_location=self.device)))\n #self.net = torch.nn.DataParallel(self.net).to(self.device)\n self.net.to(self.device)\n\n self.converter = AttnLabelConverter(self.character)\n self.num_class = len(self.converter.character)\n\n if self.rgb:\n self.input_channel = 3\n self.model = Model(self, self.num_class).to(self.device)\n # load model\n #self.model = torch.nn.DataParallel(self.model).to(self.device)\n print('Loading recognition weights from checkpoint %s' % self.saved_model)\n #ckpt = torch.load(self.saved_model, map_location=self.device)\n self.model.load_state_dict(torch.load(self.saved_model, map_location=self.device))\n self.model.to(self.device)\n \n print('Initialization Done! It tooks {:.2f} sec.\\n'.format(time.time() - start))\n return True\n\n def setup_parser(self):\n \"\"\"\n Sets up an argument parser\n \"\"\"\n\n parser_craft = argparse.ArgumentParser(description='CRAFT Text Detection')\n\n parser_craft.add_argument('--craft_trained_model', default='weights/craft_mlt_25k.pth', type=str,\n help='pretrained model')\n parser_craft.add_argument('--text_threshold', default=0.7, type=float, help='text confidence threshold')\n parser_craft.add_argument('--low_text', default=0.4, type=float, help='text low-bound score')\n parser_craft.add_argument('--link_threshold', default=0.4, type=float, help='link confidence threshold')\n parser_craft.add_argument('--cuda', default=False, type=str2bool, help='Use cuda for inference')\n parser_craft.add_argument('--canvas_size', default=1280, type=int, help='image size for inference')\n parser_craft.add_argument('--mag_ratio', default=1.5, type=float, help='image magnification ratio')\n parser_craft.add_argument('--poly', default=False, action='store_true', help='enable polygon type')\n parser_craft.add_argument('--show_time', default=False, action='store_true', help='show processing time')\n parser_craft.add_argument('--test_folder', default='/data/', type=str, help='folder path to input images')\n parser_craft.add_argument('--result_folder', default='./results/', type=str, help='result folder path')\n parser_craft.add_argument('--refine', default=False, action='store_true', help='enable link refiner')\n parser_craft.add_argument('--refiner_model', default='weights/craft_refiner_CTW1500.pth', type=str,\n help='pretrained refiner model')\n\n args_craft = parser_craft.parse_args()\n\n parser_recog = argparse.ArgumentParser(description='ocr recognition')\n parser_recog.add_argument('--image_path', help='path to image_folder or image_file which contains text images')\n parser_recog.add_argument('--workers', type=int, help='number of data loading workers', default=4)\n parser_recog.add_argument('--batch_size', type=int, default=1, help='input batch size')\n parser_recog.add_argument('--saved_model', help=\"path to saved_model to evaluation\")\n parser_recog.add_argument('--logfilepath', help=\"path to log to demo\")\n\n \"\"\" Data processing \"\"\"\n parser_recog.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length')\n parser_recog.add_argument('--imgH', type=int, default=32, help='the height of the input image')\n parser_recog.add_argument('--imgW', type=int, default=100, help='the width of the input image')\n parser_recog.add_argument('--rgb', action='store_true', help='use rgb input')\n # parser.add_argument('--character', type=str, default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label')\n\n parser_recog.add_argument('--character', type=str,\n default='0123456789abcdefghijklmnopqrstuvwxyz가각간갇갈감갑값갓강갖같갚갛개객걀걔거걱건걷걸검겁것겉게겨격겪견결겹경곁계고곡곤곧골곰곱곳공과관광괜괴굉교구국군굳굴굵굶굽궁권귀귓규균귤그극근글긁금급긋긍기긴길김깅깊까깍깎깐깔깜깝깡깥깨꺼꺾껌껍껏껑께껴꼬꼭꼴꼼꼽꽂꽃꽉꽤꾸꾼꿀꿈뀌끄끈끊끌끓끔끗끝끼낌나낙낚난날낡남납낫낭낮낯낱낳내냄냇냉냐냥너넉넌널넓넘넣네넥넷녀녁년념녕노녹논놀놈농높놓놔뇌뇨누눈눕뉘뉴늄느늑는늘늙능늦늬니닐님다닥닦단닫달닭닮담답닷당닿대댁댐댓더덕던덜덟덤덥덧덩덮데델도독돈돌돕돗동돼되된두둑둘둠둡둥뒤뒷드득든듣들듬듭듯등디딩딪따딱딴딸땀땅때땜떠떡떤떨떻떼또똑뚜뚫뚱뛰뜨뜩뜯뜰뜻띄라락란람랍랑랗래랜램랫략량러럭런럴럼럽럿렁렇레렉렌려력련렬렵령례로록론롬롭롯료루룩룹룻뤄류륙률륭르른름릇릎리릭린림립릿링마막만많말맑맘맙맛망맞맡맣매맥맨맵맺머먹먼멀멈멋멍멎메멘멩며면멸명몇모목몬몰몸몹못몽묘무묵묶문묻물뭄뭇뭐뭘뭣므미민믿밀밉밌및밑바박밖반받발밝밟밤밥방밭배백뱀뱃뱉버번벌범법벗베벤벨벼벽변별볍병볕보복볶본볼봄봇봉뵈뵙부북분불붉붐붓붕붙뷰브븐블비빌빔빗빚빛빠빡빨빵빼뺏뺨뻐뻔뻗뼈뼉뽑뿌뿐쁘쁨사삭산살삶삼삿상새색샌생샤서석섞선설섬섭섯성세섹센셈셋셔션소속손솔솜솟송솥쇄쇠쇼수숙순숟술숨숫숭숲쉬쉰쉽슈스슨슬슴습슷승시식신싣실싫심십싯싱싶싸싹싼쌀쌍쌓써썩썰썹쎄쏘쏟쑤쓰쓴쓸씀씌씨씩씬씹씻아악안앉않알앓암압앗앙앞애액앨야약얀얄얇양얕얗얘어억언얹얻얼엄업없엇엉엊엌엎에엔엘여역연열엷염엽엿영옆예옛오옥온올옮옳옷옹와완왕왜왠외왼요욕용우욱운울움웃웅워원월웨웬위윗유육율으윽은을음응의이익인일읽잃임입잇있잊잎자작잔잖잘잠잡잣장잦재쟁쟤저적전절젊점접젓정젖제젠젯져조족존졸좀좁종좋좌죄주죽준줄줌줍중쥐즈즉즌즐즘증지직진질짐집짓징짙짚짜짝짧째쨌쩌쩍쩐쩔쩜쪽쫓쭈쭉찌찍찢차착찬찮찰참찻창찾채책챔챙처척천철첩첫청체쳐초촉촌촛총촬최추축춘출춤춥춧충취츠측츰층치칙친칠침칫칭카칸칼캄캐캠커컨컬컴컵컷케켓켜코콘콜콤콩쾌쿄쿠퀴크큰클큼키킬타탁탄탈탑탓탕태택탤터턱턴털텅테텍텔템토톤톨톱통퇴투툴툼퉁튀튜트특튼튿틀틈티틱팀팅파팎판팔팝패팩팬퍼퍽페펜펴편펼평폐포폭폰표푸푹풀품풍퓨프플픔피픽필핏핑하학한할함합항해핵핸햄햇행향허헌험헤헬혀현혈협형혜호혹혼홀홈홉홍화확환활황회획횟횡효후훈훌훔훨휘휴흉흐흑흔흘흙흡흥흩희흰히힘',\n help='character label')\n\n parser_recog.add_argument('--sensitive', action='store_true', help='for sensitive character mode')\n parser_recog.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize')\n \"\"\" Model Architecture \"\"\"\n parser_recog.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')\n parser_recog.add_argument('--input_channel', type=int, default=1,\n help='the number of input channel of Feature extractor')\n parser_recog.add_argument('--output_channel', type=int, default=512,\n help='the number of output channel of Feature extractor')\n parser_recog.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state')\n\n args_recog= parser_recog.parse_args()\n\n return args_craft , args_recog\n\n\n\n def apply(self, image, timestamp, save_img=False):\n #coordinate : list\n save_log = False\n pred, timestamp = detect_ocr(self, image, timestamp, save_img, save_log)\n return pred, timestamp",
"'''\nModified from Logohunter, https://github.com/ilmonteux/logohunter\n'''\n\nimport cv2\nimport os\nimport h5py\nimport time\nimport colorsys\nimport numpy as np\n\nfrom keras import Model\nfrom PIL import Image, ImageDraw, ImageFont\nfrom matplotlib.colors import rgb_to_hsv, hsv_to_rgb\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport tensorflow as tf\n\n\ndef draw_matches(image, label_list, prediction, matches):\n '''Draw bounding boxes on image with matching results.'''\n \n if len(prediction) == 0:\n return image\n\n image = Image.fromarray(image)\n colors = bbox_colors(len(label_list))\n # for internal consistency, colors in BGR notation\n colors = np.array(colors)[:, ::-1]\n \n match_bbox = []\n for i in range(len(label_list)):\n match_bbox.append([])\n for i_cand, (i_match, cdf) in matches.items():\n if i==i_match:\n match_bbox[i].append(prediction[i_cand])\n new_image = draw_annotated_box(image, match_bbox, label_list, colors)\n return np.array(new_image)\n\n\ndef bbox_colors(num_colors):\n '''Select n distinct bounding box colors.'''\n\n hsv_tuples = [(x / num_colors, 1., 1.) for x in range(num_colors)]\n colors = 255 * np.array([colorsys.hsv_to_rgb(*x) for x in hsv_tuples])\n\n np.random.seed(1234)\n np.random.shuffle(colors)\n np.random.seed(None)\n\n return colors.astype(int)\n\n\ndef draw_annotated_box(image, bbox_list, label_list, color_list):\n '''Draw box and overhead label on image.'''\n\n font_path = os.path.join(os.path.dirname(__file__), 'model/keras_yolo3/font/FiraMono-Medium.otf')\n font = ImageFont.truetype(font=font_path, size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))\n thickness = (image.size[0] + image.size[1]) // 300\n\n draw = ImageDraw.Draw(image)\n for bbox, label, color in zip(bbox_list, label_list, color_list):\n if not isinstance(color, tuple):\n color = tuple(color)\n \n for b in bbox:\n if len(b) < 4:\n continue\n \n logo_label = str(label)\n if len(b) > 4:\n logo_label += ' {:.2f}'.format(b[-1]) # adding confidence\n label_size = draw.textsize(logo_label, font)\n\n xmin, ymin, xmax, ymax = b[:4]\n xmin = max(0, np.floor(xmin + 0.5).astype('int32'))\n ymin = max(0, np.floor(ymin + 0.5).astype('int32'))\n xmax = min(image.size[0], np.floor(xmax + 0.5).astype('int32'))\n ymax = min(image.size[1], np.floor(ymax + 0.5).astype('int32'))\n\n if ymin - label_size[1] >= 0:\n text_origin = np.array([xmin, ymin - label_size[1]])\n else:\n text_origin = np.array([xmin, ymax])\n\n for i in range(thickness):\n draw.rectangle([xmin + i, ymin + i, xmax - i, ymax - i], outline=color)\n draw.rectangle([tuple(text_origin), tuple(text_origin + label_size)], fill=color)\n draw.text(text_origin, logo_label, fill=(0, 0, 0), font=font)\n del draw\n return image\n\n\ndef pad_image(img, shape, mode = 'constant_mean'):\n '''Resize and pad image to given size.'''\n \n if mode == 'constant_mean':\n mode_args = {'mode': 'constant', 'constant_values': np.mean(img)}\n else:\n mode_args = {'mode': mode}\n\n ih, iw = img.shape[:2]\n h, w = shape[:2]\n\n # first rescale image so that largest dimension matches target\n scale = min(w/iw, h/ih)\n nw, nh = int(iw * scale), int(ih * scale)\n img = cv2.resize(img, (nw, nh))\n\n # center-pad rest of image: compute padding and split in two\n xpad, ypad = shape[1]-nw, shape[0]-nh\n xpad = (xpad//2, xpad//2+xpad%2)\n ypad = (ypad//2, ypad//2+ypad%2)\n\n new_im = np.pad(img, pad_width=(ypad, xpad, (0,0)), **mode_args)\n\n return new_im\n\n\ndef extract_features(img, model, preprocess, batch_size=100):\n '''Extract features from image array.'''\n \n if len(img) == 0:\n return np.array([])\n\n steps = len(img) // batch_size + 1\n img_gen = chunks(img, batch_size, preprocessing_function = preprocess)\n\n with graph_logo_extractor_model.as_default(): # jylee, July19, 2020 (to resolve keras error when threaded run)\n features = model.predict_generator(img_gen, steps = steps)\n\n # if the generator has looped past end of array, cut it down\n features = features[:len(img)]\n\n # flatten last three dimension to one\n features = features.reshape(features.shape[0], np.prod(features.shape[1:]))\n return features\n\n\ndef chunks(l, n, preprocessing_function = None):\n '''Yield successive n-sized chunks from l.'''\n\n func = (lambda x: x) if (preprocessing_function is None) else preprocessing_function\n\n # in predict_generator, steps argument sets how many times looped through 'while True'\n while True:\n for i in range(0, len(l), n):\n yield np.array([func(d) for d in l[i:i+n]])\n\n\ndef load_features(model_name):\n '''Load features.'''\n start = time.time()\n \n if model_name == 'InceptionV3':\n filename = './model/inception_logo_features_200_trunc_248.hdf5'\n elif model_name == 'VGG16':\n filename = './model/vgg16_logo_features_128.hdf5'\n \n # get database features\n with h5py.File(filename, 'r') as hf:\n #brand_map = list(hf.get('brand_map'))\n #input_shape = list(hf.get('input_shape'))\n features = hf.get('features')\n features = np.array(features)\n \n print('Loaded {} features from {} in {:.2f}sec'.format(features.shape, filename, time.time()-start))\n\n return features#, brand_map, input_shape\n\n\ndef save_features(filename, features, brand_map, input_shape):\n '''Save features to compressed HDF5 file.'''\n # reduce file size by saving as float16\n features = features.astype(np.float16)\n \n start = time.time()\n with h5py.File(filename, 'w') as hf:\n hf.create_dataset('features', data = features, compression='lzf')\n hf.create_dataset('brand_map', data = brand_map)\n hf.create_dataset('input_shape', data = input_shape)\n\n print('Saving {} features into {} in {:.2f} secs'.format(features.shape, filename, time.time() - start))\n \n\ndef load_extractor_model(model_name):\n '''Load variant of specified model.'''\n \n start = time.time()\n if model_name == 'InceptionV3':\n from keras.applications.inception_v3 import InceptionV3\n from keras.applications.inception_v3 import preprocess_input\n model = InceptionV3(weights='imagenet', include_top=False)\n\n trunc_layer = [-1, 279, 248, 228, -1]\n i_layer = 2\n model_out = Model(inputs=model.inputs, \n outputs=model.layers[trunc_layer[i_layer]].output)\n input_shape = (200, 200, 3) #(299,299,3) if flavor==0 else (200,200,3)\n\n global graph_logo_extractor_model # jylee, July19, 2020 (to resolve keras error when threaded run)\n graph_logo_extractor_model = tf.get_default_graph() # jylee, July19, 2020 (to resolve keras error when threaded run)\n\n elif model_name == 'VGG16':\n from keras.applications.vgg16 import VGG16\n from keras.applications.vgg16 import preprocess_input\n model_out = VGG16(weights='imagenet', include_top=False)\n input_length = 128 #[224,128,64][flavor]\n input_shape = (input_length,input_length,3)\n\n print('Loaded {} feature extractor in {:.2f}sec'.format(model_name, time.time()-start))\n \n return model_out, preprocess_input, input_shape\n \n\ndef construct_DB(DB_list, model_name, DB_path):\n '''Consturct the database of features from img_path.'''\n \n start = time.time()\n # load pre-trained recognition model\n model, preprocessed, input_shape = load_extractor_model(model_name)\n new_preprocess = lambda x: preprocessed(pad_image(x, input_shape))\n \n # extract the litw features\n all_logos, brand_map = extract_litw_logos(DB_list)\n features = extract_features(all_logos, model, new_preprocess)\n \n if model_name == 'InceptionV3':\n save_features('./model/inception_logo_features_200_trunc_248.hdf5',\n features, brand_map, input_shape)\n elif model_name == 'VGG16':\n save_features('./modelvgg16_logo_features_128.hdf5',\n features, brand_map, input_shape)\n print('Elapsed Time: {:.2f}'.format((time.time() - start) / 60))\n \n \ndef extract_litw_logos(filename):\n '''Extract the litw features.'''\n \n with open(filename, 'r') as file:\n img_list = []\n bbox_list = []\n for line in file.read().splitlines():\n img, bbox = line.split(' ')[0], line.split(' ')[1:]\n img_list.append(img)\n\n bbox = [ bb for bb in bbox if bb != '' ]\n\n # skip if no predictions made\n if len(bbox)==0:\n bbox_list.append([])\n continue\n\n if len(bbox[0].split(','))==5:\n bbox = [[int(x) for x in bb.split(',')] for bb in bbox]\n elif len(bbox[0].split(','))==6:\n bbox = [[int(x) for x in bb.split(',')[:-1]] + [float(bb.split(',')[-1])] for bb in bbox]\n else:\n print(bbox[0])\n\n # sort objects by prediction confidence\n bbox = sorted(bbox, key = lambda x: x[-1], reverse=True)\n bbox_list.append(bbox)\n \n all_logos = []\n brand_map = []\n for idx in range(len(bbox_list)):\n img = cv2.imread(img_list[idx])[:,:,::-1]\n \n for bb in bbox_list[idx]:\n if bb[3]-bb[1] < 10 or bb[2]-bb[1] < 10 or bb[3]>img.shape[0] or bb[2]> img.shape[0]:\n continue\n all_logos.append(img[bb[1]:bb[3], bb[0]:bb[2]])\n brand_map.append(bb[-1])\n\n return all_logos, brand_map\n\ndef similarity_cutoff(feat_input, features, threshold):\n \"\"\"\n Given list of input feature and feature database, compute distribution of\n cosine similarityof the database with respect to each input. Find similarity\n cutoff below which threshold fraction of database features lay.\n \"\"\"\n\n start = time.time()\n cs = cosine_similarity(X = feat_input, Y = features)\n\n cutoff_list = []\n cdf_list = []\n for i, cs1 in enumerate(cs):\n hist, bins = np.histogram(cs1, bins=np.arange(0,1,0.001))\n cdf = np.cumsum(hist)/len(cs1)\n cutoff = bins[np.where(cdf < threshold)][-1]\n cutoff_list.append(cutoff)\n cdf_list.append(cdf)\n end = time.time()\n print('Computed similarity cutoffs given inputs in {:.2f}sec'.format(end - start))\n\n return cutoff_list, (bins, cdf_list)",
"# YOLOv5 dataset utils and dataloaders\n\nimport glob\nimport hashlib\nimport json\nimport logging\nimport os\nimport random\nimport shutil\nimport time\nfrom itertools import repeat\nfrom multiprocessing.pool import ThreadPool, Pool\nfrom pathlib import Path\nfrom threading import Thread\n\nimport cv2\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport yaml\nfrom PIL import Image, ExifTags\nfrom torch.utils.data import Dataset\nfrom tqdm import tqdm\n\nfrom utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective\nfrom utils.general import check_requirements, check_file, check_dataset, xywh2xyxy, xywhn2xyxy, xyxy2xywhn, \\\n xyn2xy, segments2boxes, clean_str\nfrom utils.torch_utils import torch_distributed_zero_first\n\n# Parameters\nHELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'\nIMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes\nVID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes\nNUM_THREADS = min(8, os.cpu_count()) # number of multiprocessing threads\n\n# Get orientation exif tag\nfor orientation in ExifTags.TAGS.keys():\n if ExifTags.TAGS[orientation] == 'Orientation':\n break\n\n\ndef get_hash(paths):\n # Returns a single hash value of a list of paths (files or dirs)\n size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes\n h = hashlib.md5(str(size).encode()) # hash sizes\n h.update(''.join(paths).encode()) # hash paths\n return h.hexdigest() # return hash\n\n\ndef exif_size(img):\n # Returns exif-corrected PIL size\n s = img.size # (width, height)\n try:\n rotation = dict(img._getexif().items())[orientation]\n if rotation == 6: # rotation 270\n s = (s[1], s[0])\n elif rotation == 8: # rotation 90\n s = (s[1], s[0])\n except:\n pass\n\n return s\n\n\ndef exif_transpose(image):\n \"\"\"\n Transpose a PIL image accordingly if it has an EXIF Orientation tag.\n From https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py\n\n :param image: The image to transpose.\n :return: An image.\n \"\"\"\n exif = image.getexif()\n orientation = exif.get(0x0112, 1) # default 1\n if orientation > 1:\n method = {2: Image.FLIP_LEFT_RIGHT,\n 3: Image.ROTATE_180,\n 4: Image.FLIP_TOP_BOTTOM,\n 5: Image.TRANSPOSE,\n 6: Image.ROTATE_270,\n 7: Image.TRANSVERSE,\n 8: Image.ROTATE_90,\n }.get(orientation)\n if method is not None:\n image = image.transpose(method)\n del exif[0x0112]\n image.info[\"exif\"] = exif.tobytes()\n return image\n\n\ndef create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0,\n rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix=''):\n # Make sure only the first process in DDP process the dataset first, and the following others can use the cache\n with torch_distributed_zero_first(rank):\n dataset = LoadImagesAndLabels(path, imgsz, batch_size,\n augment=augment, # augment images\n hyp=hyp, # augmentation hyperparameters\n rect=rect, # rectangular training\n cache_images=cache,\n single_cls=single_cls,\n stride=int(stride),\n pad=pad,\n image_weights=image_weights,\n prefix=prefix)\n\n batch_size = min(batch_size, len(dataset))\n nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, workers]) # number of workers\n sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None\n loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader\n # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()\n dataloader = loader(dataset,\n batch_size=batch_size,\n num_workers=nw,\n sampler=sampler,\n pin_memory=True,\n collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)\n return dataloader, dataset\n\n\nclass InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):\n \"\"\" Dataloader that reuses workers\n\n Uses same syntax as vanilla DataLoader\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))\n self.iterator = super().__iter__()\n\n def __len__(self):\n return len(self.batch_sampler.sampler)\n\n def __iter__(self):\n for i in range(len(self)):\n yield next(self.iterator)\n\n\nclass _RepeatSampler(object):\n \"\"\" Sampler that repeats forever\n\n Args:\n sampler (Sampler)\n \"\"\"\n\n def __init__(self, sampler):\n self.sampler = sampler\n\n def __iter__(self):\n while True:\n yield from iter(self.sampler)\n\n\nclass LoadImages: # for inference\n def __init__(self, path, img_size=640, stride=32):\n p = str(Path(path).absolute()) # os-agnostic absolute path\n if '*' in p:\n files = sorted(glob.glob(p, recursive=True)) # glob\n elif os.path.isdir(p):\n files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir\n elif os.path.isfile(p):\n files = [p] # files\n else:\n raise Exception(f'ERROR: {p} does not exist')\n\n images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]\n videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]\n ni, nv = len(images), len(videos)\n\n self.img_size = img_size\n self.stride = stride\n self.files = images + videos\n self.nf = ni + nv # number of files\n self.video_flag = [False] * ni + [True] * nv\n self.mode = 'image'\n if any(videos):\n self.new_video(videos[0]) # new video\n else:\n self.cap = None\n assert self.nf > 0, f'No images or videos found in {p}. ' \\\n f'Supported formats are:\\nimages: {IMG_FORMATS}\\nvideos: {VID_FORMATS}'\n\n def __iter__(self):\n self.count = 0\n return self\n\n def __next__(self):\n if self.count == self.nf:\n raise StopIteration\n path = self.files[self.count]\n\n if self.video_flag[self.count]:\n # Read video\n self.mode = 'video'\n ret_val, img0 = self.cap.read()\n if not ret_val:\n self.count += 1\n self.cap.release()\n if self.count == self.nf: # last video\n raise StopIteration\n else:\n path = self.files[self.count]\n self.new_video(path)\n ret_val, img0 = self.cap.read()\n\n self.frame += 1\n print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ', end='')\n\n else:\n # Read image\n self.count += 1\n img0 = cv2.imread(path) # BGR\n assert img0 is not None, 'Image Not Found ' + path\n print(f'image {self.count}/{self.nf} {path}: ', end='')\n\n # Padded resize\n img = letterbox(img0, self.img_size, stride=self.stride)[0]\n\n # Convert\n img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n img = np.ascontiguousarray(img)\n\n return path, img, img0, self.cap\n\n def new_video(self, path):\n self.frame = 0\n self.cap = cv2.VideoCapture(path)\n self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n def __len__(self):\n return self.nf # number of files\n\n\nclass LoadWebcam: # for inference\n def __init__(self, pipe='0', img_size=640, stride=32):\n self.img_size = img_size\n self.stride = stride\n self.pipe = eval(pipe) if pipe.isnumeric() else pipe\n self.cap = cv2.VideoCapture(self.pipe) # video capture object\n self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size\n\n def __iter__(self):\n self.count = -1\n return self\n\n def __next__(self):\n self.count += 1\n if cv2.waitKey(1) == ord('q'): # q to quit\n self.cap.release()\n cv2.destroyAllWindows()\n raise StopIteration\n\n # Read frame\n ret_val, img0 = self.cap.read()\n img0 = cv2.flip(img0, 1) # flip left-right\n\n # Print\n assert ret_val, f'Camera Error {self.pipe}'\n img_path = 'webcam.jpg'\n print(f'webcam {self.count}: ', end='')\n\n # Padded resize\n img = letterbox(img0, self.img_size, stride=self.stride)[0]\n\n # Convert\n img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n img = np.ascontiguousarray(img)\n\n return img_path, img, img0, None\n\n def __len__(self):\n return 0\n\n\nclass LoadStreams: # multiple IP or RTSP cameras\n def __init__(self, sources='streams.txt', img_size=640, stride=32):\n self.mode = 'stream'\n self.img_size = img_size\n self.stride = stride\n\n if os.path.isfile(sources):\n with open(sources, 'r') as f:\n sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]\n else:\n sources = [sources]\n\n n = len(sources)\n self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n\n self.sources = [clean_str(x) for x in sources] # clean source names for later\n for i, s in enumerate(sources): # index, source\n # Start thread to read frames from video stream\n print(f'{i + 1}/{n}: {s}... ', end='')\n if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video\n check_requirements(('pafy', 'youtube_dl'))\n import pafy\n s = pafy.new(s).getbest(preftype=\"mp4\").url # YouTube URL\n s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam\n cap = cv2.VideoCapture(s)\n assert cap.isOpened(), f'Failed to open {s}'\n w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback\n self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback\n\n _, self.imgs[i] = cap.read() # guarantee first frame\n self.threads[i] = Thread(target=self.update, args=([i, cap]), daemon=True)\n print(f\" success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)\")\n self.threads[i].start()\n print('') # newline\n\n # check for common shapes\n s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes\n self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal\n if not self.rect:\n print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')\n\n def update(self, i, cap):\n # Read stream `i` frames in daemon thread\n n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame\n while cap.isOpened() and n < f:\n n += 1\n # _, self.imgs[index] = cap.read()\n cap.grab()\n if n % read == 0:\n success, im = cap.retrieve()\n self.imgs[i] = im if success else self.imgs[i] * 0\n time.sleep(1 / self.fps[i]) # wait time\n\n def __iter__(self):\n self.count = -1\n return self\n\n def __next__(self):\n self.count += 1\n if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit\n cv2.destroyAllWindows()\n raise StopIteration\n\n # Letterbox\n img0 = self.imgs.copy()\n img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0]\n\n # Stack\n img = np.stack(img, 0)\n\n # Convert\n img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW\n img = np.ascontiguousarray(img)\n\n return self.sources, img, img0, None\n\n def __len__(self):\n return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years\n\n\ndef img2label_paths(img_paths):\n # Define label paths as a function of image paths\n sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings\n return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths]\n\n\nclass LoadImagesAndLabels(Dataset): # for training/testing\n def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,\n cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):\n self.img_size = img_size\n self.augment = augment\n self.hyp = hyp\n self.image_weights = image_weights\n self.rect = False if image_weights else rect\n self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)\n self.mosaic_border = [-img_size // 2, -img_size // 2]\n self.stride = stride\n self.path = path\n self.albumentations = Albumentations() if augment else None\n\n try:\n f = [] # image files\n for p in path if isinstance(path, list) else [path]:\n p = Path(p) # os-agnostic\n if p.is_dir(): # dir\n f += glob.glob(str(p / '**' / '*.*'), recursive=True)\n # f = list(p.rglob('**/*.*')) # pathlib\n elif p.is_file(): # file\n with open(p, 'r') as t:\n t = t.read().strip().splitlines()\n parent = str(p.parent) + os.sep\n f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path\n # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)\n else:\n raise Exception(f'{prefix}{p} does not exist')\n self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS])\n # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib\n assert self.img_files, f'{prefix}No images found'\n except Exception as e:\n raise Exception(f'{prefix}Error loading data from {path}: {e}\\nSee {HELP_URL}')\n\n # Check cache\n self.label_files = img2label_paths(self.img_files) # labels\n cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache')\n try:\n cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict\n assert cache['version'] == 0.4 and cache['hash'] == get_hash(self.label_files + self.img_files)\n except:\n cache, exists = self.cache_labels(cache_path, prefix), False # cache\n\n # Display cache\n nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total\n if exists:\n d = f\"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted\"\n tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results\n if cache['msgs']:\n logging.info('\\n'.join(cache['msgs'])) # display warnings\n assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}'\n\n # Read cache\n [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items\n labels, shapes, self.segments = zip(*cache.values())\n self.labels = list(labels)\n self.shapes = np.array(shapes, dtype=np.float64)\n self.img_files = list(cache.keys()) # update\n self.label_files = img2label_paths(cache.keys()) # update\n if single_cls:\n for x in self.labels:\n x[:, 0] = 0\n\n n = len(shapes) # number of images\n bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index\n nb = bi[-1] + 1 # number of batches\n self.batch = bi # batch index of image\n self.n = n\n self.indices = range(n)\n\n # Rectangular Training\n if self.rect:\n # Sort by aspect ratio\n s = self.shapes # wh\n ar = s[:, 1] / s[:, 0] # aspect ratio\n irect = ar.argsort()\n self.img_files = [self.img_files[i] for i in irect]\n self.label_files = [self.label_files[i] for i in irect]\n self.labels = [self.labels[i] for i in irect]\n self.shapes = s[irect] # wh\n ar = ar[irect]\n\n # Set training image shapes\n shapes = [[1, 1]] * nb\n for i in range(nb):\n ari = ar[bi == i]\n mini, maxi = ari.min(), ari.max()\n if maxi < 1:\n shapes[i] = [maxi, 1]\n elif mini > 1:\n shapes[i] = [1, 1 / mini]\n\n self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride\n\n # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)\n self.imgs = [None] * n\n if cache_images:\n gb = 0 # Gigabytes of cached images\n self.img_hw0, self.img_hw = [None] * n, [None] * n\n results = ThreadPool(NUM_THREADS).imap(lambda x: load_image(*x), zip(repeat(self), range(n)))\n pbar = tqdm(enumerate(results), total=n)\n for i, x in pbar:\n self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)\n gb += self.imgs[i].nbytes\n pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)'\n pbar.close()\n\n def cache_labels(self, path=Path('./labels.cache'), prefix=''):\n # Cache dataset labels, check images and read shapes\n x = {} # dict\n nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages\n desc = f\"{prefix}Scanning '{path.parent / path.stem}' images and labels...\"\n with Pool(NUM_THREADS) as pool:\n pbar = tqdm(pool.imap_unordered(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))),\n desc=desc, total=len(self.img_files))\n for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:\n nm += nm_f\n nf += nf_f\n ne += ne_f\n nc += nc_f\n if im_file:\n x[im_file] = [l, shape, segments]\n if msg:\n msgs.append(msg)\n pbar.desc = f\"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupted\"\n\n pbar.close()\n if msgs:\n logging.info('\\n'.join(msgs))\n if nf == 0:\n logging.info(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}')\n x['hash'] = get_hash(self.label_files + self.img_files)\n x['results'] = nf, nm, ne, nc, len(self.img_files)\n x['msgs'] = msgs # warnings\n x['version'] = 0.4 # cache version\n try:\n np.save(path, x) # save cache for next time\n path.with_suffix('.cache.npy').rename(path) # remove .npy suffix\n logging.info(f'{prefix}New cache created: {path}')\n except Exception as e:\n logging.info(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # path not writeable\n return x\n\n def __len__(self):\n return len(self.img_files)\n\n # def __iter__(self):\n # self.count = -1\n # print('ran dataset iter')\n # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)\n # return self\n\n def __getitem__(self, index):\n index = self.indices[index] # linear, shuffled, or image_weights\n\n hyp = self.hyp\n mosaic = self.mosaic and random.random() < hyp['mosaic']\n if mosaic:\n # Load mosaic\n img, labels = load_mosaic(self, index)\n shapes = None\n\n # MixUp augmentation\n if random.random() < hyp['mixup']:\n img, labels = mixup(img, labels, *load_mosaic(self, random.randint(0, self.n - 1)))\n\n else:\n # Load image\n img, (h0, w0), (h, w) = load_image(self, index)\n\n # Letterbox\n shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape\n img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)\n shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling\n\n labels = self.labels[index].copy()\n if labels.size: # normalized xywh to pixel xyxy format\n labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])\n\n if self.augment:\n img, labels = random_perspective(img, labels,\n degrees=hyp['degrees'],\n translate=hyp['translate'],\n scale=hyp['scale'],\n shear=hyp['shear'],\n perspective=hyp['perspective'])\n\n nl = len(labels) # number of labels\n if nl:\n labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3)\n\n if self.augment:\n # Albumentations\n img, labels = self.albumentations(img, labels)\n\n # HSV color-space\n augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])\n\n # Flip up-down\n if random.random() < hyp['flipud']:\n img = np.flipud(img)\n if nl:\n labels[:, 2] = 1 - labels[:, 2]\n\n # Flip left-right\n if random.random() < hyp['fliplr']:\n img = np.fliplr(img)\n if nl:\n labels[:, 1] = 1 - labels[:, 1]\n\n # Cutouts\n # labels = cutout(img, labels, p=0.5)\n\n labels_out = torch.zeros((nl, 6))\n if nl:\n labels_out[:, 1:] = torch.from_numpy(labels)\n\n # Convert\n img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n img = np.ascontiguousarray(img)\n\n return torch.from_numpy(img), labels_out, self.img_files[index], shapes\n\n @staticmethod\n def collate_fn(batch):\n img, label, path, shapes = zip(*batch) # transposed\n for i, l in enumerate(label):\n l[:, 0] = i # add target image index for build_targets()\n return torch.stack(img, 0), torch.cat(label, 0), path, shapes\n\n @staticmethod\n def collate_fn4(batch):\n img, label, path, shapes = zip(*batch) # transposed\n n = len(shapes) // 4\n img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]\n\n ho = torch.tensor([[0., 0, 0, 1, 0, 0]])\n wo = torch.tensor([[0., 0, 1, 0, 0, 0]])\n s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale\n for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW\n i *= 4\n if random.random() < 0.5:\n im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[\n 0].type(img[i].type())\n l = label[i]\n else:\n im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)\n l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s\n img4.append(im)\n label4.append(l)\n\n for i, l in enumerate(label4):\n l[:, 0] = i # add target image index for build_targets()\n\n return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4\n\n\n# Ancillary functions --------------------------------------------------------------------------------------------------\ndef load_image(self, index):\n # loads 1 image from dataset, returns img, original hw, resized hw\n img = self.imgs[index]\n if img is None: # not cached\n path = self.img_files[index]\n img = cv2.imread(path) # BGR\n assert img is not None, 'Image Not Found ' + path\n h0, w0 = img.shape[:2] # orig hw\n r = self.img_size / max(h0, w0) # ratio\n if r != 1: # if sizes are not equal\n img = cv2.resize(img, (int(w0 * r), int(h0 * r)),\n interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR)\n return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized\n else:\n return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized\n\n\ndef load_mosaic(self, index):\n # loads images in a 4-mosaic\n\n labels4, segments4 = [], []\n s = self.img_size\n yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y\n indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices\n for i, index in enumerate(indices):\n # Load image\n img, _, (h, w) = load_image(self, index)\n\n # place img in img4\n if i == 0: # top left\n img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles\n x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)\n x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)\n elif i == 1: # top right\n x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc\n x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h\n elif i == 2: # bottom left\n x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)\n x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)\n elif i == 3: # bottom right\n x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)\n x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)\n\n img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]\n padw = x1a - x1b\n padh = y1a - y1b\n\n # Labels\n labels, segments = self.labels[index].copy(), self.segments[index].copy()\n if labels.size:\n labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format\n segments = [xyn2xy(x, w, h, padw, padh) for x in segments]\n labels4.append(labels)\n segments4.extend(segments)\n\n # Concat/clip labels\n labels4 = np.concatenate(labels4, 0)\n for x in (labels4[:, 1:], *segments4):\n np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()\n # img4, labels4 = replicate(img4, labels4) # replicate\n\n # Augment\n img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste'])\n img4, labels4 = random_perspective(img4, labels4, segments4,\n degrees=self.hyp['degrees'],\n translate=self.hyp['translate'],\n scale=self.hyp['scale'],\n shear=self.hyp['shear'],\n perspective=self.hyp['perspective'],\n border=self.mosaic_border) # border to remove\n\n return img4, labels4\n\n\ndef load_mosaic9(self, index):\n # loads images in a 9-mosaic\n\n labels9, segments9 = [], []\n s = self.img_size\n indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices\n for i, index in enumerate(indices):\n # Load image\n img, _, (h, w) = load_image(self, index)\n\n # place img in img9\n if i == 0: # center\n img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles\n h0, w0 = h, w\n c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates\n elif i == 1: # top\n c = s, s - h, s + w, s\n elif i == 2: # top right\n c = s + wp, s - h, s + wp + w, s\n elif i == 3: # right\n c = s + w0, s, s + w0 + w, s + h\n elif i == 4: # bottom right\n c = s + w0, s + hp, s + w0 + w, s + hp + h\n elif i == 5: # bottom\n c = s + w0 - w, s + h0, s + w0, s + h0 + h\n elif i == 6: # bottom left\n c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h\n elif i == 7: # left\n c = s - w, s + h0 - h, s, s + h0\n elif i == 8: # top left\n c = s - w, s + h0 - hp - h, s, s + h0 - hp\n\n padx, pady = c[:2]\n x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords\n\n # Labels\n labels, segments = self.labels[index].copy(), self.segments[index].copy()\n if labels.size:\n labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format\n segments = [xyn2xy(x, w, h, padx, pady) for x in segments]\n labels9.append(labels)\n segments9.extend(segments)\n\n # Image\n img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]\n hp, wp = h, w # height, width previous\n\n # Offset\n yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y\n img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]\n\n # Concat/clip labels\n labels9 = np.concatenate(labels9, 0)\n labels9[:, [1, 3]] -= xc\n labels9[:, [2, 4]] -= yc\n c = np.array([xc, yc]) # centers\n segments9 = [x - c for x in segments9]\n\n for x in (labels9[:, 1:], *segments9):\n np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()\n # img9, labels9 = replicate(img9, labels9) # replicate\n\n # Augment\n img9, labels9 = random_perspective(img9, labels9, segments9,\n degrees=self.hyp['degrees'],\n translate=self.hyp['translate'],\n scale=self.hyp['scale'],\n shear=self.hyp['shear'],\n perspective=self.hyp['perspective'],\n border=self.mosaic_border) # border to remove\n\n return img9, labels9\n\n\ndef create_folder(path='./new'):\n # Create folder\n if os.path.exists(path):\n shutil.rmtree(path) # delete output folder\n os.makedirs(path) # make new output folder\n\n\ndef flatten_recursive(path='../datasets/coco128'):\n # Flatten a recursive directory by bringing all files to top level\n new_path = Path(path + '_flat')\n create_folder(new_path)\n for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):\n shutil.copyfile(file, new_path / Path(file).name)\n\n\ndef extract_boxes(path='../datasets/coco128'): # from utils.datasets import *; extract_boxes()\n # Convert detection dataset into classification dataset, with one directory per class\n path = Path(path) # images dir\n shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing\n files = list(path.rglob('*.*'))\n n = len(files) # number of files\n for im_file in tqdm(files, total=n):\n if im_file.suffix[1:] in IMG_FORMATS:\n # image\n im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB\n h, w = im.shape[:2]\n\n # labels\n lb_file = Path(img2label_paths([str(im_file)])[0])\n if Path(lb_file).exists():\n with open(lb_file, 'r') as f:\n lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels\n\n for j, x in enumerate(lb):\n c = int(x[0]) # class\n f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename\n if not f.parent.is_dir():\n f.parent.mkdir(parents=True)\n\n b = x[1:] * [w, h, w, h] # box\n # b[2:] = b[2:].max() # rectangle to square\n b[2:] = b[2:] * 1.2 + 3 # pad\n b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)\n\n b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image\n b[[1, 3]] = np.clip(b[[1, 3]], 0, h)\n assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'\n\n\ndef autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False):\n \"\"\" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files\n Usage: from utils.datasets import *; autosplit()\n Arguments\n path: Path to images directory\n weights: Train, val, test weights (list, tuple)\n annotated_only: Only use images with an annotated txt file\n \"\"\"\n path = Path(path) # images dir\n files = sum([list(path.rglob(f\"*.{img_ext}\")) for img_ext in IMG_FORMATS], []) # image files only\n n = len(files) # number of files\n random.seed(0) # for reproducibility\n indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split\n\n txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files\n [(path.parent / x).unlink(missing_ok=True) for x in txt] # remove existing\n\n print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)\n for i, img in tqdm(zip(indices, files), total=n):\n if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label\n with open(path.parent / txt[i], 'a') as f:\n f.write('./' + img.relative_to(path.parent).as_posix() + '\\n') # add image to txt file\n\n\ndef verify_image_label(args):\n # Verify one image-label pair\n im_file, lb_file, prefix = args\n nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, corrupt\n try:\n # verify images\n im = Image.open(im_file)\n im.verify() # PIL verify\n shape = exif_size(im) # image size\n assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'\n assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}'\n if im.format.lower() in ('jpg', 'jpeg'):\n with open(im_file, 'rb') as f:\n f.seek(-2, 2)\n assert f.read() == b'\\xff\\xd9', 'corrupted JPEG'\n\n # verify labels\n segments = [] # instance segments\n if os.path.isfile(lb_file):\n nf = 1 # label found\n with open(lb_file, 'r') as f:\n l = [x.split() for x in f.read().strip().splitlines() if len(x)]\n if any([len(x) > 8 for x in l]): # is segment\n classes = np.array([x[0] for x in l], dtype=np.float32)\n segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)\n l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)\n l = np.array(l, dtype=np.float32)\n if len(l):\n assert l.shape[1] == 5, 'labels require 5 columns each'\n assert (l >= 0).all(), 'negative labels'\n assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'\n assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'\n else:\n ne = 1 # label empty\n l = np.zeros((0, 5), dtype=np.float32)\n else:\n nm = 1 # label missing\n l = np.zeros((0, 5), dtype=np.float32)\n return im_file, l, shape, segments, nm, nf, ne, nc, ''\n except Exception as e:\n nc = 1\n msg = f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}'\n return [None, None, None, None, nm, nf, ne, nc, msg]\n\n\ndef dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profile=False, hub=False):\n \"\"\" Return dataset statistics dictionary with images and instances counts per split per class\n To run in parent directory: export PYTHONPATH=\"$PWD/yolov5\"\n Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', autodownload=True)\n Usage2: from utils.datasets import *; dataset_stats('../datasets/coco128_with_yaml.zip')\n Arguments\n path: Path to data.yaml or data.zip (with data.yaml inside data.zip)\n autodownload: Attempt to download dataset if not found locally\n verbose: Print stats dictionary\n \"\"\"\n\n def round_labels(labels):\n # Update labels to integer class and 6 decimal place floats\n return [[int(c), *[round(x, 4) for x in points]] for c, *points in labels]\n\n def unzip(path):\n # Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/'\n if str(path).endswith('.zip'): # path is data.zip\n assert Path(path).is_file(), f'Error unzipping {path}, file not found'\n assert os.system(f'unzip -q {path} -d {path.parent}') == 0, f'Error unzipping {path}'\n dir = path.with_suffix('') # dataset directory\n return True, str(dir), next(dir.rglob('*.yaml')) # zipped, data_dir, yaml_path\n else: # path is data.yaml\n return False, None, path\n\n def hub_ops(f, max_dim=1920):\n # HUB ops for 1 image 'f'\n im = Image.open(f)\n r = max_dim / max(im.height, im.width) # ratio\n if r < 1.0: # image too large\n im = im.resize((int(im.width * r), int(im.height * r)))\n im.save(im_dir / Path(f).name, quality=75) # save\n\n zipped, data_dir, yaml_path = unzip(Path(path))\n with open(check_file(yaml_path), encoding='ascii', errors='ignore') as f:\n data = yaml.safe_load(f) # data dict\n if zipped:\n data['path'] = data_dir # TODO: should this be dir.resolve()?\n check_dataset(data, autodownload) # download dataset if missing\n hub_dir = Path(data['path'] + ('-hub' if hub else ''))\n stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary\n for split in 'train', 'val', 'test':\n if data.get(split) is None:\n stats[split] = None # i.e. no test set\n continue\n x = []\n dataset = LoadImagesAndLabels(data[split]) # load dataset\n for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'):\n x.append(np.bincount(label[:, 0].astype(int), minlength=data['nc']))\n x = np.array(x) # shape(128x80)\n stats[split] = {'instance_stats': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()},\n 'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()),\n 'per_class': (x > 0).sum(0).tolist()},\n 'labels': [{str(Path(k).name): round_labels(v.tolist())} for k, v in\n zip(dataset.img_files, dataset.labels)]}\n\n if hub:\n im_dir = hub_dir / 'images'\n im_dir.mkdir(parents=True, exist_ok=True)\n for _ in tqdm(ThreadPool(NUM_THREADS).imap(hub_ops, dataset.img_files), total=dataset.n, desc='HUB Ops'):\n pass\n\n # Profile\n stats_path = hub_dir / 'stats.json'\n if profile:\n for _ in range(1):\n file = stats_path.with_suffix('.npy')\n t1 = time.time()\n np.save(file, stats)\n t2 = time.time()\n x = np.load(file, allow_pickle=True)\n print(f'stats.npy times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')\n\n file = stats_path.with_suffix('.json')\n t1 = time.time()\n with open(file, 'w') as f:\n json.dump(stats, f) # save stats *.json\n t2 = time.time()\n with open(file, 'r') as f:\n x = json.load(f) # load hyps dict\n print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')\n\n # Save, print and return\n if hub:\n print(f'Saving {stats_path.resolve()}...')\n with open(stats_path, 'w') as f:\n json.dump(stats, f) # save stats.json\n if verbose:\n print(json.dumps(stats, indent=2, sort_keys=False))\n return stats\n"
] | [
[
"torch.device",
"torch.cuda.is_available",
"torch.load"
],
[
"numpy.pad",
"numpy.random.seed",
"numpy.arange",
"sklearn.metrics.pairwise.cosine_similarity",
"numpy.cumsum",
"numpy.random.shuffle",
"numpy.mean",
"numpy.prod",
"numpy.floor",
"tensorflow.get_default_graph",
"numpy.array",
"numpy.where"
],
[
"torch.zeros",
"torch.cat",
"numpy.flipud",
"numpy.concatenate",
"numpy.all",
"torch.utils.data.distributed.DistributedSampler",
"numpy.clip",
"numpy.fliplr",
"numpy.arange",
"numpy.unique",
"torch.from_numpy",
"numpy.stack",
"torch.tensor",
"numpy.save",
"numpy.full",
"numpy.load",
"numpy.zeros",
"numpy.ascontiguousarray",
"torch.stack",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fada-catec/advice_AI4EU_experiment | [
"624a1051e0502b60abe6122450ea53f80e9e4f8a"
] | [
"advice-road-crop/roadnet/train_valid_split.py"
] | [
"import os\nimport shutil\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom tqdm import tqdm\nfrom PIL import Image\n\nrandom.seed(2020)\nIMG_CROP = True\n\n# save gt_image_2 into gt_image, so that road is assigned to 255 and non-road is 0\ntrain_gt_path = \"../../data_road/training/gt_image_2/\"\nsave_gt_path = \"../../data_road/training/gt_image/\"\ngt_list = [f for f in os.listdir(train_gt_path) if f.endswith('.png')]\n\ntry:\n shutil.rmtree(save_gt_path)\nexcept OSError:\n pass\nos.mkdir(save_gt_path)\n\npbar = tqdm(total=289)\nfor gt in gt_list:\n if \"road\" in gt:\n img = np.array(Image.open(train_gt_path+gt))\n height = img.shape[0]\n width = img.shape[1]\n gtId = np.zeros((height, width), dtype=np.uint8)\n for i in range(height):\n for j in range(width):\n # print(img[i, j, :])\n if sum(img[i, j, :] == [255, 0, 255]) == 3:\n gtId[i, j] = 7\n else:\n gtId[i, j] = 0\n gt_name = gt.split('_road_')\n Image.fromarray(gtId).save(save_gt_path+gt_name[0]+'_'+gt_name[1])\n pbar.update(1)\n\n\n# split the training and validation data by 9:1\ndef traval_split(data_path, sub='um', seed=1):\n random.seed(seed)\n data_list = [f for f in os.listdir(data_path) if sub+'_' in f]\n\n train_len = round(len(data_list)*0.9)\n random.shuffle(data_list)\n train_list = data_list[:train_len]\n valid_list = data_list[train_len:]\n # print(len(train_list))\n # print(len(valid_list))\n return train_list, valid_list\n\n\n# load path\nimg_src_path = '../../data_road/training/image_2/'\ngt_src_path = '../../data_road/training/gt_image/'\n# save path\nbase_dir = '../../data_road_3/'\ntry:\n shutil.rmtree(base_dir)\nexcept OSError:\n pass\nos.mkdir(base_dir)\ntry:\n shutil.rmtree(base_dir+'training')\nexcept OSError:\n pass\nos.mkdir(base_dir+'training')\ntry:\n shutil.rmtree(base_dir+'validation')\nexcept OSError:\n pass\nos.mkdir(base_dir+'validation')\n\nimg_tra_path = base_dir+'training/image/'\ngt_tra_path = base_dir+'training/gt_image/'\nimg_val_path = base_dir+'validation/image/'\ngt_val_path = base_dir+'validation/gt_image/'\n\ntry:\n shutil.rmtree(img_tra_path)\nexcept OSError:\n pass\nos.mkdir(img_tra_path)\ntry:\n shutil.rmtree(gt_tra_path)\nexcept OSError:\n pass\nos.mkdir(gt_tra_path)\ntry:\n shutil.rmtree(img_val_path)\nexcept OSError:\n pass\nos.mkdir(img_val_path)\ntry:\n shutil.rmtree(gt_val_path)\nexcept OSError:\n pass\nos.mkdir(gt_val_path)\n\nname_list = ['um', 'umm', 'uu']\n\n\ndef image_crop(img):\n return img.crop((0, int(img.size[1]*0.45), img.size[0], img.size[1]))\n\n\nfor name in name_list:\n train_list, valid_list = traval_split(img_src_path, sub=name)\n for valid_img in valid_list:\n if IMG_CROP:\n img = Image.open(img_src_path+valid_img)\n img_crop = image_crop(img)\n img_crop.save(img_val_path+valid_img)\n\n gt = Image.open(gt_src_path+valid_img)\n gt_crop = image_crop(gt)\n gt_crop.save(gt_val_path+valid_img)\n else:\n shutil.copy(img_src_path+valid_img, img_val_path+valid_img)\n shutil.copy(gt_src_path+valid_img, gt_val_path+valid_img)\n for train_img in train_list:\n if IMG_CROP:\n img = Image.open(img_src_path+train_img)\n img_crop = image_crop(img)\n img_crop.save(img_tra_path+train_img)\n\n gt = Image.open(gt_src_path+train_img)\n gt_crop = image_crop(gt)\n gt_crop.save(gt_tra_path+train_img)\n else:\n shutil.copy(img_src_path+train_img, img_tra_path+train_img)\n shutil.copy(gt_src_path+train_img, gt_tra_path+train_img)\n\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
adler-j/jax | [
"3d7f884ccfe15da1b218903b37b255769223b4cf"
] | [
"tests/dtypes_test.py"
] | [
"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\nimport operator\nimport unittest\n\nimport six\n\nif six.PY3:\n import enum\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nimport numpy as onp\n\nimport jax\nfrom jax import dtypes\nfrom jax import numpy as np\nfrom jax import test_util as jtu\n\nfrom jax.config import config\nconfig.parse_flags_with_absl()\nFLAGS = config.FLAGS\n\nbool_dtypes = [onp.dtype('bool')]\n\nsigned_dtypes = [onp.dtype('int8'), onp.dtype('int16'), onp.dtype('int32'),\n onp.dtype('int64')]\n\nunsigned_dtypes = [onp.dtype('uint8'), onp.dtype('uint16'), onp.dtype('uint32'),\n onp.dtype('uint64')]\n\nonp_float_dtypes = [onp.dtype('float16'), onp.dtype('float32'),\n onp.dtype('float64')]\n\nfloat_dtypes = [onp.dtype(dtypes.bfloat16)] + onp_float_dtypes\n\ncomplex_dtypes = [onp.dtype('complex64'), onp.dtype('complex128')]\n\n\nall_dtypes = (bool_dtypes + signed_dtypes + unsigned_dtypes + float_dtypes +\n complex_dtypes)\n\n\nclass DtypesTest(jtu.JaxTestCase):\n\n @parameterized.named_parameters(\n {\"testcase_name\": \"_type={}\".format(type.__name__), \"type\": type,\n \"dtype\": dtype}\n for type, dtype in [(bool, np.bool_), (int, np.int_), (float, np.float_),\n (complex, np.complex_)])\n def testDefaultTypes(self, type, dtype):\n for f in [np.array, jax.jit(np.array), jax.jit(lambda x: x)]:\n y = f(type(0))\n self.assertTrue(isinstance(y, np.ndarray), msg=(f, y))\n self.assertEqual(y.dtype, dtypes.canonicalize_dtype(dtype), msg=(f, y))\n\n @parameterized.named_parameters(\n {\"testcase_name\": \"_swap={}_jit={}\".format(swap, jit),\n \"swap\": swap, \"jit\": jit} \n for swap in [False, True] for jit in [False, True])\n @jtu.skip_on_devices(\"tpu\") # F16 not supported on TPU\n def testBinaryPromotion(self, swap, jit):\n testcases = [\n (np.array(1.), 0., np.float_),\n (np.array(1.), np.array(0.), np.float_),\n (np.array(1.), np.array(0., dtype=np.float16), np.float_),\n (np.array(1.), np.array(0., dtype=np.float32), np.float_),\n (np.array(1.), np.array(0., dtype=np.float64), np.float64),\n (np.array(1., dtype=np.float16), 0., np.float16),\n (np.array(1., dtype=np.float32), 0., np.float32),\n (np.array(1., dtype=np.float64), 0., np.float64),\n (np.array(1., dtype=np.float16), np.array(0., dtype=np.float16), np.float16),\n (np.array(1., dtype=np.float16), np.array(0., dtype=np.float32), np.float32),\n (np.array(1., dtype=np.float16), np.array(0., dtype=np.float64), np.float64),\n (np.array(1., dtype=np.float32), np.array(0., dtype=np.float32), np.float32),\n (np.array(1., dtype=np.float32), np.array(0., dtype=np.float64), np.float64),\n (np.array(1., dtype=np.float64), np.array(0., dtype=np.float64), np.float64),\n (np.array([1.]), 0., np.float_),\n (np.array([1.]), np.array(0.), np.float_),\n (np.array([1.]), np.array(0., dtype=np.float16), np.float_),\n (np.array([1.]), np.array(0., dtype=np.float32), np.float_),\n (np.array([1.]), np.array(0., dtype=np.float64), np.float64),\n (np.array([1.], dtype=np.float32), np.array(0., dtype=np.float16), np.float32),\n (np.array([1.], dtype=np.float16), np.array(0., dtype=np.float32), np.float32),\n (np.array([1.], dtype=np.float16), 0., np.float16),\n ]\n op = jax.jit(operator.add) if jit else operator.add\n for x, y, dtype in testcases:\n x, y = (y, x) if swap else (x, y)\n z = x + y\n self.assertTrue(isinstance(z, np.ndarray), msg=(x, y, z))\n self.assertEqual(z.dtype, dtypes.canonicalize_dtype(dtype), msg=(x, y, z))\n\n def testPromoteDtypes(self):\n for t1 in all_dtypes:\n self.assertEqual(t1, dtypes.promote_types(t1, t1))\n\n self.assertEqual(t1, dtypes.promote_types(t1, onp.bool_))\n self.assertEqual(onp.dtype(onp.complex128),\n dtypes.promote_types(t1, onp.complex128))\n\n for t2 in all_dtypes:\n # Symmetry\n self.assertEqual(dtypes.promote_types(t1, t2),\n dtypes.promote_types(t2, t1))\n\n self.assertEqual(onp.dtype(onp.float32),\n dtypes.promote_types(onp.float16, dtypes.bfloat16))\n\n # Promotions of non-inexact types against inexact types always prefer\n # the inexact types.\n for t in float_dtypes + complex_dtypes:\n for i in bool_dtypes + signed_dtypes + unsigned_dtypes:\n self.assertEqual(t, dtypes.promote_types(t, i))\n\n # Promotions between exact types, or between inexact types, match NumPy.\n for groups in [bool_dtypes + signed_dtypes + unsigned_dtypes,\n onp_float_dtypes + complex_dtypes]:\n for t1, t2 in itertools.combinations(groups, 2):\n self.assertEqual(onp.promote_types(t1, t2),\n dtypes.promote_types(t1, t2))\n\n\n @unittest.skipIf(six.PY2, \"Test requires Python 3\")\n def testEnumPromotion(self):\n class AnEnum(enum.IntEnum):\n A = 42\n B = 101\n onp.testing.assert_equal(onp.array(42), onp.array(AnEnum.A))\n onp.testing.assert_equal(np.array(42), np.array(AnEnum.A))\n onp.testing.assert_equal(onp.int32(101), onp.int32(AnEnum.B))\n onp.testing.assert_equal(np.int32(101), np.int32(AnEnum.B))\n\nif __name__ == \"__main__\":\n absltest.main()\n"
] | [
[
"numpy.array",
"numpy.promote_types",
"numpy.dtype",
"numpy.int32"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ZHG2017/scipy | [
"859c1061b3d5aa30c4466824049d69edde5499a2",
"859c1061b3d5aa30c4466824049d69edde5499a2"
] | [
"scipy/linalg/lapack.py",
"scipy/stats/morestats.py"
] | [
"\"\"\"\nLow-level LAPACK functions (:mod:`scipy.linalg.lapack`)\n=======================================================\n\nThis module contains low-level functions from the LAPACK library.\n\nThe `*gegv` family of routines have been removed from LAPACK 3.6.0\nand have been deprecated in SciPy 0.17.0. They will be removed in\na future release.\n\n.. versionadded:: 0.12.0\n\n.. note::\n\n The common ``overwrite_<>`` option in many routines, allows the\n input arrays to be overwritten to avoid extra memory allocation.\n However this requires the array to satisfy two conditions\n which are memory order and the data type to match exactly the\n order and the type expected by the routine.\n\n As an example, if you pass a double precision float array to any\n ``S....`` routine which expects single precision arguments, f2py\n will create an intermediate array to match the argument types and\n overwriting will be performed on that intermediate array.\n\n Similarly, if a C-contiguous array is passed, f2py will pass a\n FORTRAN-contiguous array internally. Please make sure that these\n details are satisfied. More information can be found in the f2py\n documentation.\n\n.. warning::\n\n These functions do little to no error checking.\n It is possible to cause crashes by mis-using them,\n so prefer using the higher-level routines in `scipy.linalg`.\n\nFinding functions\n-----------------\n\n.. autosummary::\n :toctree: generated/\n\n get_lapack_funcs\n\nAll functions\n-------------\n\n.. autosummary::\n :toctree: generated/\n\n sgbsv\n dgbsv\n cgbsv\n zgbsv\n\n sgbtrf\n dgbtrf\n cgbtrf\n zgbtrf\n\n sgbtrs\n dgbtrs\n cgbtrs\n zgbtrs\n\n sgebal\n dgebal\n cgebal\n zgebal\n\n sgecon\n dgecon\n cgecon\n zgecon\n\n sgeequ\n dgeequ\n cgeequ\n zgeequ\n\n sgeequb\n dgeequb\n cgeequb\n zgeequb\n\n sgees\n dgees\n cgees\n zgees\n\n sgeev\n dgeev\n cgeev\n zgeev\n\n sgeev_lwork\n dgeev_lwork\n cgeev_lwork\n zgeev_lwork\n\n sgegv\n dgegv\n cgegv\n zgegv\n\n sgehrd\n dgehrd\n cgehrd\n zgehrd\n\n sgehrd_lwork\n dgehrd_lwork\n cgehrd_lwork\n zgehrd_lwork\n\n sgejsv\n dgejsv\n\n sgels\n dgels\n cgels\n zgels\n\n sgels_lwork\n dgels_lwork\n cgels_lwork\n zgels_lwork\n\n sgelsd\n dgelsd\n cgelsd\n zgelsd\n\n sgelsd_lwork\n dgelsd_lwork\n cgelsd_lwork\n zgelsd_lwork\n\n sgelss\n dgelss\n cgelss\n zgelss\n\n sgelss_lwork\n dgelss_lwork\n cgelss_lwork\n zgelss_lwork\n\n sgelsy\n dgelsy\n cgelsy\n zgelsy\n\n sgelsy_lwork\n dgelsy_lwork\n cgelsy_lwork\n zgelsy_lwork\n\n sgeqp3\n dgeqp3\n cgeqp3\n zgeqp3\n\n sgeqrf\n dgeqrf\n cgeqrf\n zgeqrf\n\n sgeqrf_lwork\n dgeqrf_lwork\n cgeqrf_lwork\n zgeqrf_lwork\n\n sgeqrfp\n dgeqrfp\n cgeqrfp\n zgeqrfp\n\n sgeqrfp_lwork\n dgeqrfp_lwork\n cgeqrfp_lwork\n zgeqrfp_lwork\n\n sgerqf\n dgerqf\n cgerqf\n zgerqf\n\n sgesdd\n dgesdd\n cgesdd\n zgesdd\n\n sgesdd_lwork\n dgesdd_lwork\n cgesdd_lwork\n zgesdd_lwork\n\n sgesv\n dgesv\n cgesv\n zgesv\n\n sgesvd\n dgesvd\n cgesvd\n zgesvd\n\n sgesvd_lwork\n dgesvd_lwork\n cgesvd_lwork\n zgesvd_lwork\n\n sgesvx\n dgesvx\n cgesvx\n zgesvx\n\n sgetrf\n dgetrf\n cgetrf\n zgetrf\n\n sgetc2\n dgetc2\n cgetc2\n zgetc2\n\n sgetri\n dgetri\n cgetri\n zgetri\n\n sgetri_lwork\n dgetri_lwork\n cgetri_lwork\n zgetri_lwork\n\n sgetrs\n dgetrs\n cgetrs\n zgetrs\n\n sgesc2\n dgesc2\n cgesc2\n zgesc2\n\n sgges\n dgges\n cgges\n zgges\n\n sggev\n dggev\n cggev\n zggev\n\n sgglse\n dgglse\n cgglse\n zgglse\n\n sgglse_lwork\n dgglse_lwork\n cgglse_lwork\n zgglse_lwork\n\n sgtsv\n dgtsv\n cgtsv\n zgtsv\n\n sgtsvx\n dgtsvx\n cgtsvx\n zgtsvx\n\n chbevd\n zhbevd\n\n chbevx\n zhbevx\n\n checon\n zhecon\n\n cheequb\n zheequb\n\n cheev\n zheev\n\n cheev_lwork\n zheev_lwork\n\n cheevd\n zheevd\n\n cheevd_lwork\n zheevd_lwork\n\n cheevr\n zheevr\n\n cheevr_lwork\n zheevr_lwork\n\n cheevx\n zheevx\n\n cheevx_lwork\n zheevx_lwork\n\n chegst\n zhegst\n\n chegv\n zhegv\n\n chegv_lwork\n zhegv_lwork\n\n chegvd\n zhegvd\n\n chegvx\n zhegvx\n\n chegvx_lwork\n zhegvx_lwork\n\n chesv\n zhesv\n\n chesv_lwork\n zhesv_lwork\n\n chesvx\n zhesvx\n\n chesvx_lwork\n zhesvx_lwork\n\n chetrd\n zhetrd\n\n chetrd_lwork\n zhetrd_lwork\n\n chetrf\n zhetrf\n\n chetrf_lwork\n zhetrf_lwork\n\n chfrk\n zhfrk\n\n slamch\n dlamch\n\n slange\n dlange\n clange\n zlange\n\n slarf\n dlarf\n clarf\n zlarf\n\n slarfg\n dlarfg\n clarfg\n zlarfg\n\n slartg\n dlartg\n clartg\n zlartg\n\n slasd4\n dlasd4\n\n slaswp\n dlaswp\n claswp\n zlaswp\n\n slauum\n dlauum\n clauum\n zlauum\n\n sorcsd\n dorcsd\n sorcsd_lwork\n dorcsd_lwork\n\n sorghr\n dorghr\n sorghr_lwork\n dorghr_lwork\n\n sorgqr\n dorgqr\n\n sorgrq\n dorgrq\n\n sormqr\n dormqr\n\n sormrz\n dormrz\n\n sormrz_lwork\n dormrz_lwork\n\n spbsv\n dpbsv\n cpbsv\n zpbsv\n\n spbtrf\n dpbtrf\n cpbtrf\n zpbtrf\n\n spbtrs\n dpbtrs\n cpbtrs\n zpbtrs\n\n spftrf\n dpftrf\n cpftrf\n zpftrf\n\n spftri\n dpftri\n cpftri\n zpftri\n\n spftrs\n dpftrs\n cpftrs\n zpftrs\n\n spocon\n dpocon\n cpocon\n zpocon\n\n spstrf\n dpstrf\n cpstrf\n zpstrf\n\n spstf2\n dpstf2\n cpstf2\n zpstf2\n\n sposv\n dposv\n cposv\n zposv\n\n sposvx\n dposvx\n cposvx\n zposvx\n\n spotrf\n dpotrf\n cpotrf\n zpotrf\n\n spotri\n dpotri\n cpotri\n zpotri\n\n spotrs\n dpotrs\n cpotrs\n zpotrs\n\n sppcon\n dppcon\n cppcon\n zppcon\n\n sppsv\n dppsv\n cppsv\n zppsv\n\n spptrf\n dpptrf\n cpptrf\n zpptrf\n\n spptri\n dpptri\n cpptri\n zpptri\n\n spptrs\n dpptrs\n cpptrs\n zpptrs\n\n sptsv\n dptsv\n cptsv\n zptsv\n\n sptsvx\n dptsvx\n cptsvx\n zptsvx\n\n spttrf\n dpttrf\n cpttrf\n zpttrf\n\n spttrs\n dpttrs\n cpttrs\n zpttrs\n\n spteqr\n dpteqr\n cpteqr\n zpteqr\n\n crot\n zrot\n\n ssbev\n dsbev\n\n ssbevd\n dsbevd\n\n ssbevx\n dsbevx\n\n ssfrk\n dsfrk\n\n sstebz\n dstebz\n\n sstein\n dstein\n\n sstemr\n dstemr\n\n sstemr_lwork\n dstemr_lwork\n\n ssterf\n dsterf\n\n sstev\n dstev\n\n ssycon\n dsycon\n csycon\n zsycon\n\n ssyconv\n dsyconv\n csyconv\n zsyconv\n\n ssyequb\n dsyequb\n csyequb\n zsyequb\n\n ssyev\n dsyev\n\n ssyev_lwork\n dsyev_lwork\n\n ssyevd\n dsyevd\n\n ssyevd_lwork\n dsyevd_lwork\n\n ssyevr\n dsyevr\n\n ssyevr_lwork\n dsyevr_lwork\n\n ssyevx\n dsyevx\n\n ssyevx_lwork\n dsyevx_lwork\n\n ssygst\n dsygst\n\n ssygv\n dsygv\n\n ssygv_lwork\n dsygv_lwork\n\n ssygvd\n dsygvd\n\n ssygvx\n dsygvx\n\n ssygvx_lwork\n dsygvx_lwork\n\n ssysv\n dsysv\n csysv\n zsysv\n\n ssysv_lwork\n dsysv_lwork\n csysv_lwork\n zsysv_lwork\n\n ssysvx\n dsysvx\n csysvx\n zsysvx\n\n ssysvx_lwork\n dsysvx_lwork\n csysvx_lwork\n zsysvx_lwork\n\n ssytf2\n dsytf2\n csytf2\n zsytf2\n\n ssytrd\n dsytrd\n\n ssytrd_lwork\n dsytrd_lwork\n\n ssytrf\n dsytrf\n csytrf\n zsytrf\n\n ssytrf_lwork\n dsytrf_lwork\n csytrf_lwork\n zsytrf_lwork\n\n stbtrs\n dtbtrs\n ctbtrs\n ztbtrs\n\n stfsm\n dtfsm\n ctfsm\n ztfsm\n\n stfttp\n dtfttp\n ctfttp\n ztfttp\n\n stfttr\n dtfttr\n ctfttr\n ztfttr\n\n stgexc\n dtgexc\n ctgexc\n ztgexc\n\n stgsen\n dtgsen\n ctgsen\n ztgsen\n\n stpttf\n dtpttf\n ctpttf\n ztpttf\n\n stpttr\n dtpttr\n ctpttr\n ztpttr\n\n strsyl\n dtrsyl\n ctrsyl\n ztrsyl\n\n strtri\n dtrtri\n ctrtri\n ztrtri\n\n strtrs\n dtrtrs\n ctrtrs\n ztrtrs\n\n strttf\n dtrttf\n ctrttf\n ztrttf\n\n strttp\n dtrttp\n ctrttp\n ztrttp\n\n stzrzf\n dtzrzf\n ctzrzf\n ztzrzf\n\n stzrzf_lwork\n dtzrzf_lwork\n ctzrzf_lwork\n ztzrzf_lwork\n\n cunghr\n zunghr\n\n cunghr_lwork\n zunghr_lwork\n\n cungqr\n zungqr\n\n cungrq\n zungrq\n\n cunmqr\n zunmqr\n\n sgeqrt\n dgeqrt\n cgeqrt\n zgeqrt\n\n sgemqrt\n dgemqrt\n cgemqrt\n zgemqrt\n\n sgttrf\n dgttrf\n cgttrf\n zgttrf\n\n sgttrs\n dgttrs\n cgttrs\n zgttrs\n\n stpqrt\n dtpqrt\n ctpqrt\n ztpqrt\n\n stpmqrt\n dtpmqrt\n ctpmqrt\n ztpmqrt\n\n cuncsd\n zuncsd\n\n cuncsd_lwork\n zuncsd_lwork\n\n cunmrz\n zunmrz\n\n cunmrz_lwork\n zunmrz_lwork\n\n ilaver\n\n\"\"\"\n#\n# Author: Pearu Peterson, March 2002\n#\n\nimport numpy as _np\nfrom .blas import _get_funcs, _memoize_get_funcs\nfrom scipy.linalg import _flapack\nfrom re import compile as regex_compile\ntry:\n from scipy.linalg import _clapack\nexcept ImportError:\n _clapack = None\n\ntry:\n from scipy.linalg import _flapack_64\n HAS_ILP64 = True\nexcept ImportError:\n HAS_ILP64 = False\n _flapack_64 = None\n\n# Backward compatibility\nfrom scipy._lib._util import DeprecatedImport as _DeprecatedImport\nclapack = _DeprecatedImport(\"scipy.linalg.blas.clapack\", \"scipy.linalg.lapack\")\nflapack = _DeprecatedImport(\"scipy.linalg.blas.flapack\", \"scipy.linalg.lapack\")\n\n# Expose all functions (only flapack --- clapack is an implementation detail)\nempty_module = None\nfrom scipy.linalg._flapack import *\ndel empty_module\n\n__all__ = ['get_lapack_funcs']\n\n_dep_message = \"\"\"The `*gegv` family of routines has been deprecated in\nLAPACK 3.6.0 in favor of the `*ggev` family of routines.\nThe corresponding wrappers will be removed from SciPy in\na future release.\"\"\"\n\ncgegv = _np.deprecate(cgegv, old_name='cgegv', message=_dep_message)\ndgegv = _np.deprecate(dgegv, old_name='dgegv', message=_dep_message)\nsgegv = _np.deprecate(sgegv, old_name='sgegv', message=_dep_message)\nzgegv = _np.deprecate(zgegv, old_name='zgegv', message=_dep_message)\n\n# Modify _flapack in this scope so the deprecation warnings apply to\n# functions returned by get_lapack_funcs.\n_flapack.cgegv = cgegv\n_flapack.dgegv = dgegv\n_flapack.sgegv = sgegv\n_flapack.zgegv = zgegv\n\n# some convenience alias for complex functions\n_lapack_alias = {\n 'corghr': 'cunghr', 'zorghr': 'zunghr',\n 'corghr_lwork': 'cunghr_lwork', 'zorghr_lwork': 'zunghr_lwork',\n 'corgqr': 'cungqr', 'zorgqr': 'zungqr',\n 'cormqr': 'cunmqr', 'zormqr': 'zunmqr',\n 'corgrq': 'cungrq', 'zorgrq': 'zungrq',\n}\n\n\n# Place guards against docstring rendering issues with special characters\np1 = regex_compile(r'with bounds (?P<b>.*?)( and (?P<s>.*?) storage){0,1}\\n')\np2 = regex_compile(r'Default: (?P<d>.*?)\\n')\n\n\ndef backtickrepl(m):\n if m.group('s'):\n return ('with bounds ``{}`` with ``{}`` storage\\n'\n ''.format(m.group('b'), m.group('s')))\n else:\n return 'with bounds ``{}``\\n'.format(m.group('b'))\n\n\nfor routine in [ssyevr, dsyevr, cheevr, zheevr,\n ssyevx, dsyevx, cheevx, zheevx,\n ssygvd, dsygvd, chegvd, zhegvd]:\n if routine.__doc__:\n routine.__doc__ = p1.sub(backtickrepl, routine.__doc__)\n routine.__doc__ = p2.sub('Default ``\\\\1``\\n', routine.__doc__)\n else:\n continue\n\ndel regex_compile, p1, p2, backtickrepl\n\n\n@_memoize_get_funcs\ndef get_lapack_funcs(names, arrays=(), dtype=None, ilp64=False):\n \"\"\"Return available LAPACK function objects from names.\n\n Arrays are used to determine the optimal prefix of LAPACK routines.\n\n Parameters\n ----------\n names : str or sequence of str\n Name(s) of LAPACK functions without type prefix.\n\n arrays : sequence of ndarrays, optional\n Arrays can be given to determine optimal prefix of LAPACK\n routines. If not given, double-precision routines will be\n used, otherwise the most generic type in arrays will be used.\n\n dtype : str or dtype, optional\n Data-type specifier. Not used if `arrays` is non-empty.\n\n ilp64 : {True, False, 'preferred'}, optional\n Whether to return ILP64 routine variant.\n Choosing 'preferred' returns ILP64 routine if available, and\n otherwise the 32-bit routine. Default: False\n\n Returns\n -------\n funcs : list\n List containing the found function(s).\n\n Notes\n -----\n This routine automatically chooses between Fortran/C\n interfaces. Fortran code is used whenever possible for arrays with\n column major order. In all other cases, C code is preferred.\n\n In LAPACK, the naming convention is that all functions start with a\n type prefix, which depends on the type of the principal\n matrix. These can be one of {'s', 'd', 'c', 'z'} for the NumPy\n types {float32, float64, complex64, complex128} respectively, and\n are stored in attribute ``typecode`` of the returned functions.\n\n Examples\n --------\n Suppose we would like to use '?lange' routine which computes the selected\n norm of an array. We pass our array in order to get the correct 'lange'\n flavor.\n\n >>> import scipy.linalg as LA\n >>> a = np.random.rand(3,2)\n >>> x_lange = LA.get_lapack_funcs('lange', (a,))\n >>> x_lange.typecode\n 'd'\n >>> x_lange = LA.get_lapack_funcs('lange',(a*1j,))\n >>> x_lange.typecode\n 'z'\n\n Several LAPACK routines work best when its internal WORK array has\n the optimal size (big enough for fast computation and small enough to\n avoid waste of memory). This size is determined also by a dedicated query\n to the function which is often wrapped as a standalone function and\n commonly denoted as ``###_lwork``. Below is an example for ``?sysv``\n\n >>> import scipy.linalg as LA\n >>> a = np.random.rand(1000,1000)\n >>> b = np.random.rand(1000,1)*1j\n >>> # We pick up zsysv and zsysv_lwork due to b array\n ... xsysv, xlwork = LA.get_lapack_funcs(('sysv', 'sysv_lwork'), (a, b))\n >>> opt_lwork, _ = xlwork(a.shape[0]) # returns a complex for 'z' prefix\n >>> udut, ipiv, x, info = xsysv(a, b, lwork=int(opt_lwork.real))\n\n \"\"\"\n if isinstance(ilp64, str):\n if ilp64 == 'preferred':\n ilp64 = HAS_ILP64\n else:\n raise ValueError(\"Invalid value for 'ilp64'\")\n\n if not ilp64:\n return _get_funcs(names, arrays, dtype,\n \"LAPACK\", _flapack, _clapack,\n \"flapack\", \"clapack\", _lapack_alias,\n ilp64=False)\n else:\n if not HAS_ILP64:\n raise RuntimeError(\"LAPACK ILP64 routine requested, but Scipy \"\n \"compiled only with 32-bit BLAS\")\n return _get_funcs(names, arrays, dtype,\n \"LAPACK\", _flapack_64, None,\n \"flapack_64\", None, _lapack_alias,\n ilp64=True)\n\n\n_int32_max = _np.iinfo(_np.int32).max\n_int64_max = _np.iinfo(_np.int64).max\n\n\ndef _compute_lwork(routine, *args, **kwargs):\n \"\"\"\n Round floating-point lwork returned by lapack to integer.\n\n Several LAPACK routines compute optimal values for LWORK, which\n they return in a floating-point variable. However, for large\n values of LWORK, single-precision floating point is not sufficient\n to hold the exact value --- some LAPACK versions (<= 3.5.0 at\n least) truncate the returned integer to single precision and in\n some cases this can be smaller than the required value.\n\n Examples\n --------\n >>> from scipy.linalg import lapack\n >>> n = 5000\n >>> s_r, s_lw = lapack.get_lapack_funcs(('sysvx', 'sysvx_lwork'))\n >>> lwork = lapack._compute_lwork(s_lw, n)\n >>> lwork\n 32000\n\n \"\"\"\n dtype = getattr(routine, 'dtype', None)\n int_dtype = getattr(routine, 'int_dtype', None)\n ret = routine(*args, **kwargs)\n if ret[-1] != 0:\n raise ValueError(\"Internal work array size computation failed: \"\n \"%d\" % (ret[-1],))\n\n if len(ret) == 2:\n return _check_work_float(ret[0].real, dtype, int_dtype)\n else:\n return tuple(_check_work_float(x.real, dtype, int_dtype)\n for x in ret[:-1])\n\n\ndef _check_work_float(value, dtype, int_dtype):\n \"\"\"\n Convert LAPACK-returned work array size float to integer,\n carefully for single-precision types.\n \"\"\"\n\n if dtype == _np.float32 or dtype == _np.complex64:\n # Single-precision routine -- take next fp value to work\n # around possible truncation in LAPACK code\n value = _np.nextafter(value, _np.inf, dtype=_np.float32)\n\n value = int(value)\n if int_dtype.itemsize == 4:\n if value < 0 or value > _int32_max:\n raise ValueError(\"Too large work array required -- computation \"\n \"cannot be performed with standard 32-bit\"\n \" LAPACK.\")\n elif int_dtype.itemsize == 8:\n if value < 0 or value > _int64_max:\n raise ValueError(\"Too large work array required -- computation\"\n \" cannot be performed with standard 64-bit\"\n \" LAPACK.\")\n return value\n",
"import math\nimport warnings\nfrom collections import namedtuple\n\nimport numpy as np\nfrom numpy import (isscalar, r_, log, around, unique, asarray,\n zeros, arange, sort, amin, amax, any, atleast_1d,\n sqrt, ceil, floor, array, compress,\n pi, exp, ravel, count_nonzero, sin, cos, arctan2, hypot)\n\nfrom scipy import optimize\nfrom scipy import special\nfrom . import statlib\nfrom . import stats\nfrom .stats import find_repeats, _contains_nan\nfrom .contingency import chi2_contingency\nfrom . import distributions\nfrom ._distn_infrastructure import rv_generic\nfrom ._hypotests import _get_wilcoxon_distr\n\n\n__all__ = ['mvsdist',\n 'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot',\n 'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot',\n 'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test',\n 'fligner', 'mood', 'wilcoxon', 'median_test',\n 'circmean', 'circvar', 'circstd', 'anderson_ksamp',\n 'yeojohnson_llf', 'yeojohnson', 'yeojohnson_normmax',\n 'yeojohnson_normplot'\n ]\n\n\nMean = namedtuple('Mean', ('statistic', 'minmax'))\nVariance = namedtuple('Variance', ('statistic', 'minmax'))\nStd_dev = namedtuple('Std_dev', ('statistic', 'minmax'))\n\n\ndef bayes_mvs(data, alpha=0.90):\n r\"\"\"\n Bayesian confidence intervals for the mean, var, and std.\n\n Parameters\n ----------\n data : array_like\n Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`.\n Requires 2 or more data points.\n alpha : float, optional\n Probability that the returned confidence interval contains\n the true parameter.\n\n Returns\n -------\n mean_cntr, var_cntr, std_cntr : tuple\n The three results are for the mean, variance and standard deviation,\n respectively. Each result is a tuple of the form::\n\n (center, (lower, upper))\n\n with `center` the mean of the conditional pdf of the value given the\n data, and `(lower, upper)` a confidence interval, centered on the\n median, containing the estimate to a probability ``alpha``.\n\n See Also\n --------\n mvsdist\n\n Notes\n -----\n Each tuple of mean, variance, and standard deviation estimates represent\n the (center, (lower, upper)) with center the mean of the conditional pdf\n of the value given the data and (lower, upper) is a confidence interval\n centered on the median, containing the estimate to a probability\n ``alpha``.\n\n Converts data to 1-D and assumes all data has the same mean and variance.\n Uses Jeffrey's prior for variance and std.\n\n Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))``\n\n References\n ----------\n T.E. Oliphant, \"A Bayesian perspective on estimating mean, variance, and\n standard-deviation from data\", https://scholarsarchive.byu.edu/facpub/278,\n 2006.\n\n Examples\n --------\n First a basic example to demonstrate the outputs:\n\n >>> from scipy import stats\n >>> data = [6, 9, 12, 7, 8, 8, 13]\n >>> mean, var, std = stats.bayes_mvs(data)\n >>> mean\n Mean(statistic=9.0, minmax=(7.103650222612533, 10.896349777387467))\n >>> var\n Variance(statistic=10.0, minmax=(3.176724206..., 24.45910382...))\n >>> std\n Std_dev(statistic=2.9724954732045084, minmax=(1.7823367265645143, 4.945614605014631))\n\n Now we generate some normally distributed random data, and get estimates of\n mean and standard deviation with 95% confidence intervals for those\n estimates:\n\n >>> n_samples = 100000\n >>> data = stats.norm.rvs(size=n_samples)\n >>> res_mean, res_var, res_std = stats.bayes_mvs(data, alpha=0.95)\n\n >>> import matplotlib.pyplot as plt\n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> ax.hist(data, bins=100, density=True, label='Histogram of data')\n >>> ax.vlines(res_mean.statistic, 0, 0.5, colors='r', label='Estimated mean')\n >>> ax.axvspan(res_mean.minmax[0],res_mean.minmax[1], facecolor='r',\n ... alpha=0.2, label=r'Estimated mean (95% limits)')\n >>> ax.vlines(res_std.statistic, 0, 0.5, colors='g', label='Estimated scale')\n >>> ax.axvspan(res_std.minmax[0],res_std.minmax[1], facecolor='g', alpha=0.2,\n ... label=r'Estimated scale (95% limits)')\n\n >>> ax.legend(fontsize=10)\n >>> ax.set_xlim([-4, 4])\n >>> ax.set_ylim([0, 0.5])\n >>> plt.show()\n\n \"\"\"\n m, v, s = mvsdist(data)\n if alpha >= 1 or alpha <= 0:\n raise ValueError(\"0 < alpha < 1 is required, but alpha=%s was given.\"\n % alpha)\n\n m_res = Mean(m.mean(), m.interval(alpha))\n v_res = Variance(v.mean(), v.interval(alpha))\n s_res = Std_dev(s.mean(), s.interval(alpha))\n\n return m_res, v_res, s_res\n\n\ndef mvsdist(data):\n \"\"\"\n 'Frozen' distributions for mean, variance, and standard deviation of data.\n\n Parameters\n ----------\n data : array_like\n Input array. Converted to 1-D using ravel.\n Requires 2 or more data-points.\n\n Returns\n -------\n mdist : \"frozen\" distribution object\n Distribution object representing the mean of the data.\n vdist : \"frozen\" distribution object\n Distribution object representing the variance of the data.\n sdist : \"frozen\" distribution object\n Distribution object representing the standard deviation of the data.\n\n See Also\n --------\n bayes_mvs\n\n Notes\n -----\n The return values from ``bayes_mvs(data)`` is equivalent to\n ``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``.\n\n In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)``\n on the three distribution objects returned from this function will give\n the same results that are returned from `bayes_mvs`.\n\n References\n ----------\n T.E. Oliphant, \"A Bayesian perspective on estimating mean, variance, and\n standard-deviation from data\", https://scholarsarchive.byu.edu/facpub/278,\n 2006.\n\n Examples\n --------\n >>> from scipy import stats\n >>> data = [6, 9, 12, 7, 8, 8, 13]\n >>> mean, var, std = stats.mvsdist(data)\n\n We now have frozen distribution objects \"mean\", \"var\" and \"std\" that we can\n examine:\n\n >>> mean.mean()\n 9.0\n >>> mean.interval(0.95)\n (6.6120585482655692, 11.387941451734431)\n >>> mean.std()\n 1.1952286093343936\n\n \"\"\"\n x = ravel(data)\n n = len(x)\n if n < 2:\n raise ValueError(\"Need at least 2 data-points.\")\n xbar = x.mean()\n C = x.var()\n if n > 1000: # gaussian approximations for large n\n mdist = distributions.norm(loc=xbar, scale=math.sqrt(C / n))\n sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C / (2. * n)))\n vdist = distributions.norm(loc=C, scale=math.sqrt(2.0 / n) * C)\n else:\n nm1 = n - 1\n fac = n * C / 2.\n val = nm1 / 2.\n mdist = distributions.t(nm1, loc=xbar, scale=math.sqrt(C / nm1))\n sdist = distributions.gengamma(val, -2, scale=math.sqrt(fac))\n vdist = distributions.invgamma(val, scale=fac)\n return mdist, vdist, sdist\n\n\ndef kstat(data, n=2):\n r\"\"\"\n Return the nth k-statistic (1<=n<=4 so far).\n\n The nth k-statistic k_n is the unique symmetric unbiased estimator of the\n nth cumulant kappa_n.\n\n Parameters\n ----------\n data : array_like\n Input array. Note that n-D input gets flattened.\n n : int, {1, 2, 3, 4}, optional\n Default is equal to 2.\n\n Returns\n -------\n kstat : float\n The nth k-statistic.\n\n See Also\n --------\n kstatvar: Returns an unbiased estimator of the variance of the k-statistic.\n moment: Returns the n-th central moment about the mean for a sample.\n\n Notes\n -----\n For a sample size n, the first few k-statistics are given by:\n\n .. math::\n\n k_{1} = \\mu\n k_{2} = \\frac{n}{n-1} m_{2}\n k_{3} = \\frac{ n^{2} } {(n-1) (n-2)} m_{3}\n k_{4} = \\frac{ n^{2} [(n + 1)m_{4} - 3(n - 1) m^2_{2}]} {(n-1) (n-2) (n-3)}\n\n where :math:`\\mu` is the sample mean, :math:`m_2` is the sample\n variance, and :math:`m_i` is the i-th sample central moment.\n\n References\n ----------\n http://mathworld.wolfram.com/k-Statistic.html\n\n http://mathworld.wolfram.com/Cumulant.html\n\n Examples\n --------\n >>> from scipy import stats\n >>> rndm = np.random.RandomState(1234)\n\n As sample size increases, n-th moment and n-th k-statistic converge to the\n same number (although they aren't identical). In the case of the normal\n distribution, they converge to zero.\n\n >>> for n in [2, 3, 4, 5, 6, 7]:\n ... x = rndm.normal(size=10**n)\n ... m, k = stats.moment(x, 3), stats.kstat(x, 3)\n ... print(\"%.3g %.3g %.3g\" % (m, k, m-k))\n -0.631 -0.651 0.0194\n 0.0282 0.0283 -8.49e-05\n -0.0454 -0.0454 1.36e-05\n 7.53e-05 7.53e-05 -2.26e-09\n 0.00166 0.00166 -4.99e-09\n -2.88e-06 -2.88e-06 8.63e-13\n \"\"\"\n if n > 4 or n < 1:\n raise ValueError(\"k-statistics only supported for 1<=n<=4\")\n n = int(n)\n S = np.zeros(n + 1, np.float64)\n data = ravel(data)\n N = data.size\n\n # raise ValueError on empty input\n if N == 0:\n raise ValueError(\"Data input must not be empty\")\n\n # on nan input, return nan without warning\n if np.isnan(np.sum(data)):\n return np.nan\n\n for k in range(1, n + 1):\n S[k] = np.sum(data**k, axis=0)\n if n == 1:\n return S[1] * 1.0/N\n elif n == 2:\n return (N*S[2] - S[1]**2.0) / (N*(N - 1.0))\n elif n == 3:\n return (2*S[1]**3 - 3*N*S[1]*S[2] + N*N*S[3]) / (N*(N - 1.0)*(N - 2.0))\n elif n == 4:\n return ((-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 -\n 4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) /\n (N*(N-1.0)*(N-2.0)*(N-3.0)))\n else:\n raise ValueError(\"Should not be here.\")\n\n\ndef kstatvar(data, n=2):\n r\"\"\"\n Return an unbiased estimator of the variance of the k-statistic.\n\n See `kstat` for more details of the k-statistic.\n\n Parameters\n ----------\n data : array_like\n Input array. Note that n-D input gets flattened.\n n : int, {1, 2}, optional\n Default is equal to 2.\n\n Returns\n -------\n kstatvar : float\n The nth k-statistic variance.\n\n See Also\n --------\n kstat: Returns the n-th k-statistic.\n moment: Returns the n-th central moment about the mean for a sample.\n\n Notes\n -----\n The variances of the first few k-statistics are given by:\n\n .. math::\n\n var(k_{1}) = \\frac{\\kappa^2}{n}\n var(k_{2}) = \\frac{\\kappa^4}{n} + \\frac{2\\kappa^2_{2}}{n - 1}\n var(k_{3}) = \\frac{\\kappa^6}{n} + \\frac{9 \\kappa_2 \\kappa_4}{n - 1} +\n \\frac{9 \\kappa^2_{3}}{n - 1} +\n \\frac{6 n \\kappa^3_{2}}{(n-1) (n-2)}\n var(k_{4}) = \\frac{\\kappa^8}{n} + \\frac{16 \\kappa_2 \\kappa_6}{n - 1} +\n \\frac{48 \\kappa_{3} \\kappa_5}{n - 1} +\n \\frac{34 \\kappa^2_{4}}{n-1} + \\frac{72 n \\kappa^2_{2} \\kappa_4}{(n - 1) (n - 2)} +\n \\frac{144 n \\kappa_{2} \\kappa^2_{3}}{(n - 1) (n - 2)} +\n \\frac{24 (n + 1) n \\kappa^4_{2}}{(n - 1) (n - 2) (n - 3)}\n \"\"\"\n data = ravel(data)\n N = len(data)\n if n == 1:\n return kstat(data, n=2) * 1.0/N\n elif n == 2:\n k2 = kstat(data, n=2)\n k4 = kstat(data, n=4)\n return (2*N*k2**2 + (N-1)*k4) / (N*(N+1))\n else:\n raise ValueError(\"Only n=1 or n=2 supported.\")\n\n\ndef _calc_uniform_order_statistic_medians(n):\n \"\"\"\n Approximations of uniform order statistic medians.\n\n Parameters\n ----------\n n : int\n Sample size.\n\n Returns\n -------\n v : 1d float array\n Approximations of the order statistic medians.\n\n References\n ----------\n .. [1] James J. Filliben, \"The Probability Plot Correlation Coefficient\n Test for Normality\", Technometrics, Vol. 17, pp. 111-117, 1975.\n\n Examples\n --------\n Order statistics of the uniform distribution on the unit interval\n are marginally distributed according to beta distributions.\n The expectations of these order statistic are evenly spaced across\n the interval, but the distributions are skewed in a way that\n pushes the medians slightly towards the endpoints of the unit interval:\n\n >>> n = 4\n >>> k = np.arange(1, n+1)\n >>> from scipy.stats import beta\n >>> a = k\n >>> b = n-k+1\n >>> beta.mean(a, b)\n array([ 0.2, 0.4, 0.6, 0.8])\n >>> beta.median(a, b)\n array([ 0.15910358, 0.38572757, 0.61427243, 0.84089642])\n\n The Filliben approximation uses the exact medians of the smallest\n and greatest order statistics, and the remaining medians are approximated\n by points spread evenly across a sub-interval of the unit interval:\n\n >>> from scipy.morestats import _calc_uniform_order_statistic_medians\n >>> _calc_uniform_order_statistic_medians(n)\n array([ 0.15910358, 0.38545246, 0.61454754, 0.84089642])\n\n This plot shows the skewed distributions of the order statistics\n of a sample of size four from a uniform distribution on the unit interval:\n\n >>> import matplotlib.pyplot as plt\n >>> x = np.linspace(0.0, 1.0, num=50, endpoint=True)\n >>> pdfs = [beta.pdf(x, a[i], b[i]) for i in range(n)]\n >>> plt.figure()\n >>> plt.plot(x, pdfs[0], x, pdfs[1], x, pdfs[2], x, pdfs[3])\n\n \"\"\"\n v = np.empty(n, dtype=np.float64)\n v[-1] = 0.5**(1.0 / n)\n v[0] = 1 - v[-1]\n i = np.arange(2, n)\n v[1:-1] = (i - 0.3175) / (n + 0.365)\n return v\n\n\ndef _parse_dist_kw(dist, enforce_subclass=True):\n \"\"\"Parse `dist` keyword.\n\n Parameters\n ----------\n dist : str or stats.distributions instance.\n Several functions take `dist` as a keyword, hence this utility\n function.\n enforce_subclass : bool, optional\n If True (default), `dist` needs to be a\n `_distn_infrastructure.rv_generic` instance.\n It can sometimes be useful to set this keyword to False, if a function\n wants to accept objects that just look somewhat like such an instance\n (for example, they have a ``ppf`` method).\n\n \"\"\"\n if isinstance(dist, rv_generic):\n pass\n elif isinstance(dist, str):\n try:\n dist = getattr(distributions, dist)\n except AttributeError as e:\n raise ValueError(\"%s is not a valid distribution name\" % dist) from e\n elif enforce_subclass:\n msg = (\"`dist` should be a stats.distributions instance or a string \"\n \"with the name of such a distribution.\")\n raise ValueError(msg)\n\n return dist\n\n\ndef _add_axis_labels_title(plot, xlabel, ylabel, title):\n \"\"\"Helper function to add axes labels and a title to stats plots\"\"\"\n try:\n if hasattr(plot, 'set_title'):\n # Matplotlib Axes instance or something that looks like it\n plot.set_title(title)\n plot.set_xlabel(xlabel)\n plot.set_ylabel(ylabel)\n else:\n # matplotlib.pyplot module\n plot.title(title)\n plot.xlabel(xlabel)\n plot.ylabel(ylabel)\n except Exception:\n # Not an MPL object or something that looks (enough) like it.\n # Don't crash on adding labels or title\n pass\n\n\ndef probplot(x, sparams=(), dist='norm', fit=True, plot=None, rvalue=False):\n \"\"\"\n Calculate quantiles for a probability plot, and optionally show the plot.\n\n Generates a probability plot of sample data against the quantiles of a\n specified theoretical distribution (the normal distribution by default).\n `probplot` optionally calculates a best-fit line for the data and plots the\n results using Matplotlib or a given plot function.\n\n Parameters\n ----------\n x : array_like\n Sample/response data from which `probplot` creates the plot.\n sparams : tuple, optional\n Distribution-specific shape parameters (shape parameters plus location\n and scale).\n dist : str or stats.distributions instance, optional\n Distribution or distribution function name. The default is 'norm' for a\n normal probability plot. Objects that look enough like a\n stats.distributions instance (i.e. they have a ``ppf`` method) are also\n accepted.\n fit : bool, optional\n Fit a least-squares regression (best-fit) line to the sample data if\n True (default).\n plot : object, optional\n If given, plots the quantiles and least squares fit.\n `plot` is an object that has to have methods \"plot\" and \"text\".\n The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,\n or a custom object with the same methods.\n Default is None, which means that no plot is created.\n\n Returns\n -------\n (osm, osr) : tuple of ndarrays\n Tuple of theoretical quantiles (osm, or order statistic medians) and\n ordered responses (osr). `osr` is simply sorted input `x`.\n For details on how `osm` is calculated see the Notes section.\n (slope, intercept, r) : tuple of floats, optional\n Tuple containing the result of the least-squares fit, if that is\n performed by `probplot`. `r` is the square root of the coefficient of\n determination. If ``fit=False`` and ``plot=None``, this tuple is not\n returned.\n\n Notes\n -----\n Even if `plot` is given, the figure is not shown or saved by `probplot`;\n ``plt.show()`` or ``plt.savefig('figname.png')`` should be used after\n calling `probplot`.\n\n `probplot` generates a probability plot, which should not be confused with\n a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this\n type, see ``statsmodels.api.ProbPlot``.\n\n The formula used for the theoretical quantiles (horizontal axis of the\n probability plot) is Filliben's estimate::\n\n quantiles = dist.ppf(val), for\n\n 0.5**(1/n), for i = n\n val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1\n 1 - 0.5**(1/n), for i = 1\n\n where ``i`` indicates the i-th ordered value and ``n`` is the total number\n of values.\n\n Examples\n --------\n >>> from scipy import stats\n >>> import matplotlib.pyplot as plt\n >>> nsample = 100\n >>> np.random.seed(7654321)\n\n A t distribution with small degrees of freedom:\n\n >>> ax1 = plt.subplot(221)\n >>> x = stats.t.rvs(3, size=nsample)\n >>> res = stats.probplot(x, plot=plt)\n\n A t distribution with larger degrees of freedom:\n\n >>> ax2 = plt.subplot(222)\n >>> x = stats.t.rvs(25, size=nsample)\n >>> res = stats.probplot(x, plot=plt)\n\n A mixture of two normal distributions with broadcasting:\n\n >>> ax3 = plt.subplot(223)\n >>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5],\n ... size=(nsample//2,2)).ravel()\n >>> res = stats.probplot(x, plot=plt)\n\n A standard normal distribution:\n\n >>> ax4 = plt.subplot(224)\n >>> x = stats.norm.rvs(loc=0, scale=1, size=nsample)\n >>> res = stats.probplot(x, plot=plt)\n\n Produce a new figure with a loggamma distribution, using the ``dist`` and\n ``sparams`` keywords:\n\n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> x = stats.loggamma.rvs(c=2.5, size=500)\n >>> res = stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax)\n >>> ax.set_title(\"Probplot for loggamma dist with shape parameter 2.5\")\n\n Show the results with Matplotlib:\n\n >>> plt.show()\n\n \"\"\"\n x = np.asarray(x)\n _perform_fit = fit or (plot is not None)\n if x.size == 0:\n if _perform_fit:\n return (x, x), (np.nan, np.nan, 0.0)\n else:\n return x, x\n\n osm_uniform = _calc_uniform_order_statistic_medians(len(x))\n dist = _parse_dist_kw(dist, enforce_subclass=False)\n if sparams is None:\n sparams = ()\n if isscalar(sparams):\n sparams = (sparams,)\n if not isinstance(sparams, tuple):\n sparams = tuple(sparams)\n\n osm = dist.ppf(osm_uniform, *sparams)\n osr = sort(x)\n if _perform_fit:\n # perform a linear least squares fit.\n slope, intercept, r, prob, _ = stats.linregress(osm, osr)\n\n if plot is not None:\n plot.plot(osm, osr, 'bo', osm, slope*osm + intercept, 'r-')\n _add_axis_labels_title(plot, xlabel='Theoretical quantiles',\n ylabel='Ordered Values',\n title='Probability Plot')\n\n # Add R^2 value to the plot as text\n if rvalue:\n xmin = amin(osm)\n xmax = amax(osm)\n ymin = amin(x)\n ymax = amax(x)\n posx = xmin + 0.70 * (xmax - xmin)\n posy = ymin + 0.01 * (ymax - ymin)\n plot.text(posx, posy, \"$R^2=%1.4f$\" % r**2)\n\n if fit:\n return (osm, osr), (slope, intercept, r)\n else:\n return osm, osr\n\n\ndef ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda'):\n \"\"\"\n Calculate the shape parameter that maximizes the PPCC.\n\n The probability plot correlation coefficient (PPCC) plot can be used to\n determine the optimal shape parameter for a one-parameter family of\n distributions. ppcc_max returns the shape parameter that would maximize the\n probability plot correlation coefficient for the given data to a\n one-parameter family of distributions.\n\n Parameters\n ----------\n x : array_like\n Input array.\n brack : tuple, optional\n Triple (a,b,c) where (a<b<c). If bracket consists of two numbers (a, c)\n then they are assumed to be a starting interval for a downhill bracket\n search (see `scipy.optimize.brent`).\n dist : str or stats.distributions instance, optional\n Distribution or distribution function name. Objects that look enough\n like a stats.distributions instance (i.e. they have a ``ppf`` method)\n are also accepted. The default is ``'tukeylambda'``.\n\n Returns\n -------\n shape_value : float\n The shape parameter at which the probability plot correlation\n coefficient reaches its max value.\n\n See Also\n --------\n ppcc_plot, probplot, boxcox\n\n Notes\n -----\n The brack keyword serves as a starting point which is useful in corner\n cases. One can use a plot to obtain a rough visual estimate of the location\n for the maximum to start the search near it.\n\n References\n ----------\n .. [1] J.J. Filliben, \"The Probability Plot Correlation Coefficient Test for\n Normality\", Technometrics, Vol. 17, pp. 111-117, 1975.\n\n .. [2] https://www.itl.nist.gov/div898/handbook/eda/section3/ppccplot.htm\n\n Examples\n --------\n First we generate some random data from a Tukey-Lambda distribution,\n with shape parameter -0.7:\n\n >>> from scipy import stats\n >>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,\n ... random_state=1234567) + 1e4\n\n Now we explore this data with a PPCC plot as well as the related\n probability plot and Box-Cox normplot. A red line is drawn where we\n expect the PPCC value to be maximal (at the shape parameter -0.7 used\n above):\n\n >>> import matplotlib.pyplot as plt\n >>> fig = plt.figure(figsize=(8, 6))\n >>> ax = fig.add_subplot(111)\n >>> res = stats.ppcc_plot(x, -5, 5, plot=ax)\n\n We calculate the value where the shape should reach its maximum and a red\n line is drawn there. The line should coincide with the highest point in the\n ppcc_plot.\n\n >>> max = stats.ppcc_max(x)\n >>> ax.vlines(max, 0, 1, colors='r', label='Expected shape value')\n\n >>> plt.show()\n\n \"\"\"\n dist = _parse_dist_kw(dist)\n osm_uniform = _calc_uniform_order_statistic_medians(len(x))\n osr = sort(x)\n\n # this function computes the x-axis values of the probability plot\n # and computes a linear regression (including the correlation)\n # and returns 1-r so that a minimization function maximizes the\n # correlation\n def tempfunc(shape, mi, yvals, func):\n xvals = func(mi, shape)\n r, prob = stats.pearsonr(xvals, yvals)\n return 1 - r\n\n return optimize.brent(tempfunc, brack=brack, args=(osm_uniform, osr, dist.ppf))\n\n\ndef ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80):\n \"\"\"\n Calculate and optionally plot probability plot correlation coefficient.\n\n The probability plot correlation coefficient (PPCC) plot can be used to\n determine the optimal shape parameter for a one-parameter family of\n distributions. It cannot be used for distributions without shape parameters\n (like the normal distribution) or with multiple shape parameters.\n\n By default a Tukey-Lambda distribution (`stats.tukeylambda`) is used. A\n Tukey-Lambda PPCC plot interpolates from long-tailed to short-tailed\n distributions via an approximately normal one, and is therefore particularly\n useful in practice.\n\n Parameters\n ----------\n x : array_like\n Input array.\n a, b : scalar\n Lower and upper bounds of the shape parameter to use.\n dist : str or stats.distributions instance, optional\n Distribution or distribution function name. Objects that look enough\n like a stats.distributions instance (i.e. they have a ``ppf`` method)\n are also accepted. The default is ``'tukeylambda'``.\n plot : object, optional\n If given, plots PPCC against the shape parameter.\n `plot` is an object that has to have methods \"plot\" and \"text\".\n The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,\n or a custom object with the same methods.\n Default is None, which means that no plot is created.\n N : int, optional\n Number of points on the horizontal axis (equally distributed from\n `a` to `b`).\n\n Returns\n -------\n svals : ndarray\n The shape values for which `ppcc` was calculated.\n ppcc : ndarray\n The calculated probability plot correlation coefficient values.\n\n See Also\n --------\n ppcc_max, probplot, boxcox_normplot, tukeylambda\n\n References\n ----------\n J.J. Filliben, \"The Probability Plot Correlation Coefficient Test for\n Normality\", Technometrics, Vol. 17, pp. 111-117, 1975.\n\n Examples\n --------\n First we generate some random data from a Tukey-Lambda distribution,\n with shape parameter -0.7:\n\n >>> from scipy import stats\n >>> import matplotlib.pyplot as plt\n >>> np.random.seed(1234567)\n >>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4\n\n Now we explore this data with a PPCC plot as well as the related\n probability plot and Box-Cox normplot. A red line is drawn where we\n expect the PPCC value to be maximal (at the shape parameter -0.7 used\n above):\n\n >>> fig = plt.figure(figsize=(12, 4))\n >>> ax1 = fig.add_subplot(131)\n >>> ax2 = fig.add_subplot(132)\n >>> ax3 = fig.add_subplot(133)\n >>> res = stats.probplot(x, plot=ax1)\n >>> res = stats.boxcox_normplot(x, -5, 5, plot=ax2)\n >>> res = stats.ppcc_plot(x, -5, 5, plot=ax3)\n >>> ax3.vlines(-0.7, 0, 1, colors='r', label='Expected shape value')\n >>> plt.show()\n\n \"\"\"\n if b <= a:\n raise ValueError(\"`b` has to be larger than `a`.\")\n\n svals = np.linspace(a, b, num=N)\n ppcc = np.empty_like(svals)\n for k, sval in enumerate(svals):\n _, r2 = probplot(x, sval, dist=dist, fit=True)\n ppcc[k] = r2[-1]\n\n if plot is not None:\n plot.plot(svals, ppcc, 'x')\n _add_axis_labels_title(plot, xlabel='Shape Values',\n ylabel='Prob Plot Corr. Coef.',\n title='(%s) PPCC Plot' % dist)\n\n return svals, ppcc\n\n\ndef boxcox_llf(lmb, data):\n r\"\"\"The boxcox log-likelihood function.\n\n Parameters\n ----------\n lmb : scalar\n Parameter for Box-Cox transformation. See `boxcox` for details.\n data : array_like\n Data to calculate Box-Cox log-likelihood for. If `data` is\n multi-dimensional, the log-likelihood is calculated along the first\n axis.\n\n Returns\n -------\n llf : float or ndarray\n Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`,\n an array otherwise.\n\n See Also\n --------\n boxcox, probplot, boxcox_normplot, boxcox_normmax\n\n Notes\n -----\n The Box-Cox log-likelihood function is defined here as\n\n .. math::\n\n llf = (\\lambda - 1) \\sum_i(\\log(x_i)) -\n N/2 \\log(\\sum_i (y_i - \\bar{y})^2 / N),\n\n where ``y`` is the Box-Cox transformed input data ``x``.\n\n Examples\n --------\n >>> from scipy import stats\n >>> import matplotlib.pyplot as plt\n >>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes\n >>> np.random.seed(1245)\n\n Generate some random variates and calculate Box-Cox log-likelihood values\n for them for a range of ``lmbda`` values:\n\n >>> x = stats.loggamma.rvs(5, loc=10, size=1000)\n >>> lmbdas = np.linspace(-2, 10)\n >>> llf = np.zeros(lmbdas.shape, dtype=float)\n >>> for ii, lmbda in enumerate(lmbdas):\n ... llf[ii] = stats.boxcox_llf(lmbda, x)\n\n Also find the optimal lmbda value with `boxcox`:\n\n >>> x_most_normal, lmbda_optimal = stats.boxcox(x)\n\n Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a\n horizontal line to check that that's really the optimum:\n\n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> ax.plot(lmbdas, llf, 'b.-')\n >>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r')\n >>> ax.set_xlabel('lmbda parameter')\n >>> ax.set_ylabel('Box-Cox log-likelihood')\n\n Now add some probability plots to show that where the log-likelihood is\n maximized the data transformed with `boxcox` looks closest to normal:\n\n >>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'\n >>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):\n ... xt = stats.boxcox(x, lmbda=lmbda)\n ... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)\n ... ax_inset = inset_axes(ax, width=\"20%\", height=\"20%\", loc=loc)\n ... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')\n ... ax_inset.set_xticklabels([])\n ... ax_inset.set_yticklabels([])\n ... ax_inset.set_title(r'$\\lambda=%1.2f$' % lmbda)\n\n >>> plt.show()\n\n \"\"\"\n data = np.asarray(data)\n N = data.shape[0]\n if N == 0:\n return np.nan\n\n logdata = np.log(data)\n\n # Compute the variance of the transformed data.\n if lmb == 0:\n variance = np.var(logdata, axis=0)\n else:\n # Transform without the constant offset 1/lmb. The offset does\n # not effect the variance, and the subtraction of the offset can\n # lead to loss of precision.\n variance = np.var(data**lmb / lmb, axis=0)\n\n return (lmb - 1) * np.sum(logdata, axis=0) - N/2 * np.log(variance)\n\n\ndef _boxcox_conf_interval(x, lmax, alpha):\n # Need to find the lambda for which\n # f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1\n fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1)\n target = boxcox_llf(lmax, x) - fac\n\n def rootfunc(lmbda, data, target):\n return boxcox_llf(lmbda, data) - target\n\n # Find positive endpoint of interval in which answer is to be found\n newlm = lmax + 0.5\n N = 0\n while (rootfunc(newlm, x, target) > 0.0) and (N < 500):\n newlm += 0.1\n N += 1\n\n if N == 500:\n raise RuntimeError(\"Could not find endpoint.\")\n\n lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target))\n\n # Now find negative interval in the same way\n newlm = lmax - 0.5\n N = 0\n while (rootfunc(newlm, x, target) > 0.0) and (N < 500):\n newlm -= 0.1\n N += 1\n\n if N == 500:\n raise RuntimeError(\"Could not find endpoint.\")\n\n lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target))\n return lmminus, lmplus\n\n\ndef boxcox(x, lmbda=None, alpha=None):\n r\"\"\"\n Return a dataset transformed by a Box-Cox power transformation.\n\n Parameters\n ----------\n x : ndarray\n Input array. Must be positive 1-dimensional. Must not be constant.\n lmbda : {None, scalar}, optional\n If `lmbda` is not None, do the transformation for that value.\n\n If `lmbda` is None, find the lambda that maximizes the log-likelihood\n function and return it as the second output argument.\n alpha : {None, float}, optional\n If ``alpha`` is not None, return the ``100 * (1-alpha)%`` confidence\n interval for `lmbda` as the third output argument.\n Must be between 0.0 and 1.0.\n\n Returns\n -------\n boxcox : ndarray\n Box-Cox power transformed array.\n maxlog : float, optional\n If the `lmbda` parameter is None, the second returned argument is\n the lambda that maximizes the log-likelihood function.\n (min_ci, max_ci) : tuple of float, optional\n If `lmbda` parameter is None and ``alpha`` is not None, this returned\n tuple of floats represents the minimum and maximum confidence limits\n given ``alpha``.\n\n See Also\n --------\n probplot, boxcox_normplot, boxcox_normmax, boxcox_llf\n\n Notes\n -----\n The Box-Cox transform is given by::\n\n y = (x**lmbda - 1) / lmbda, for lmbda != 0\n log(x), for lmbda = 0\n\n `boxcox` requires the input data to be positive. Sometimes a Box-Cox\n transformation provides a shift parameter to achieve this; `boxcox` does\n not. Such a shift parameter is equivalent to adding a positive constant to\n `x` before calling `boxcox`.\n\n The confidence limits returned when ``alpha`` is provided give the interval\n where:\n\n .. math::\n\n llf(\\hat{\\lambda}) - llf(\\lambda) < \\frac{1}{2}\\chi^2(1 - \\alpha, 1),\n\n with ``llf`` the log-likelihood function and :math:`\\chi^2` the chi-squared\n function.\n\n References\n ----------\n G.E.P. Box and D.R. Cox, \"An Analysis of Transformations\", Journal of the\n Royal Statistical Society B, 26, 211-252 (1964).\n\n Examples\n --------\n >>> from scipy import stats\n >>> import matplotlib.pyplot as plt\n\n We generate some random variates from a non-normal distribution and make a\n probability plot for it, to show it is non-normal in the tails:\n\n >>> fig = plt.figure()\n >>> ax1 = fig.add_subplot(211)\n >>> x = stats.loggamma.rvs(5, size=500) + 5\n >>> prob = stats.probplot(x, dist=stats.norm, plot=ax1)\n >>> ax1.set_xlabel('')\n >>> ax1.set_title('Probplot against normal distribution')\n\n We now use `boxcox` to transform the data so it's closest to normal:\n\n >>> ax2 = fig.add_subplot(212)\n >>> xt, _ = stats.boxcox(x)\n >>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2)\n >>> ax2.set_title('Probplot after Box-Cox transformation')\n\n >>> plt.show()\n\n \"\"\"\n x = np.asarray(x)\n if x.ndim != 1:\n raise ValueError(\"Data must be 1-dimensional.\")\n\n if x.size == 0:\n return x\n\n if np.all(x == x[0]):\n raise ValueError(\"Data must not be constant.\")\n\n if any(x <= 0):\n raise ValueError(\"Data must be positive.\")\n\n if lmbda is not None: # single transformation\n return special.boxcox(x, lmbda)\n\n # If lmbda=None, find the lmbda that maximizes the log-likelihood function.\n lmax = boxcox_normmax(x, method='mle')\n y = boxcox(x, lmax)\n\n if alpha is None:\n return y, lmax\n else:\n # Find confidence interval\n interval = _boxcox_conf_interval(x, lmax, alpha)\n return y, lmax, interval\n\n\ndef boxcox_normmax(x, brack=(-2.0, 2.0), method='pearsonr'):\n \"\"\"Compute optimal Box-Cox transform parameter for input data.\n\n Parameters\n ----------\n x : array_like\n Input array.\n brack : 2-tuple, optional\n The starting interval for a downhill bracket search with\n `optimize.brent`. Note that this is in most cases not critical; the\n final result is allowed to be outside this bracket.\n method : str, optional\n The method to determine the optimal transform parameter (`boxcox`\n ``lmbda`` parameter). Options are:\n\n 'pearsonr' (default)\n Maximizes the Pearson correlation coefficient between\n ``y = boxcox(x)`` and the expected values for ``y`` if `x` would be\n normally-distributed.\n\n 'mle'\n Minimizes the log-likelihood `boxcox_llf`. This is the method used\n in `boxcox`.\n\n 'all'\n Use all optimization methods available, and return all results.\n Useful to compare different methods.\n\n Returns\n -------\n maxlog : float or ndarray\n The optimal transform parameter found. An array instead of a scalar\n for ``method='all'``.\n\n See Also\n --------\n boxcox, boxcox_llf, boxcox_normplot\n\n Examples\n --------\n >>> from scipy import stats\n >>> import matplotlib.pyplot as plt\n >>> np.random.seed(1234) # make this example reproducible\n\n Generate some data and determine optimal ``lmbda`` in various ways:\n\n >>> x = stats.loggamma.rvs(5, size=30) + 5\n >>> y, lmax_mle = stats.boxcox(x)\n >>> lmax_pearsonr = stats.boxcox_normmax(x)\n\n >>> lmax_mle\n 7.177...\n >>> lmax_pearsonr\n 7.916...\n >>> stats.boxcox_normmax(x, method='all')\n array([ 7.91667384, 7.17718692])\n\n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> prob = stats.boxcox_normplot(x, -10, 10, plot=ax)\n >>> ax.axvline(lmax_mle, color='r')\n >>> ax.axvline(lmax_pearsonr, color='g', ls='--')\n\n >>> plt.show()\n\n \"\"\"\n\n def _pearsonr(x, brack):\n osm_uniform = _calc_uniform_order_statistic_medians(len(x))\n xvals = distributions.norm.ppf(osm_uniform)\n\n def _eval_pearsonr(lmbda, xvals, samps):\n # This function computes the x-axis values of the probability plot\n # and computes a linear regression (including the correlation) and\n # returns ``1 - r`` so that a minimization function maximizes the\n # correlation.\n y = boxcox(samps, lmbda)\n yvals = np.sort(y)\n r, prob = stats.pearsonr(xvals, yvals)\n return 1 - r\n\n return optimize.brent(_eval_pearsonr, brack=brack, args=(xvals, x))\n\n def _mle(x, brack):\n def _eval_mle(lmb, data):\n # function to minimize\n return -boxcox_llf(lmb, data)\n\n return optimize.brent(_eval_mle, brack=brack, args=(x,))\n\n def _all(x, brack):\n maxlog = np.empty(2, dtype=float)\n maxlog[0] = _pearsonr(x, brack)\n maxlog[1] = _mle(x, brack)\n return maxlog\n\n methods = {'pearsonr': _pearsonr,\n 'mle': _mle,\n 'all': _all}\n if method not in methods.keys():\n raise ValueError(\"Method %s not recognized.\" % method)\n\n optimfunc = methods[method]\n return optimfunc(x, brack)\n\n\ndef _normplot(method, x, la, lb, plot=None, N=80):\n \"\"\"Compute parameters for a Box-Cox or Yeo-Johnson normality plot,\n optionally show it. See `boxcox_normplot` or `yeojohnson_normplot` for\n details.\"\"\"\n\n if method == 'boxcox':\n title = 'Box-Cox Normality Plot'\n transform_func = boxcox\n else:\n title = 'Yeo-Johnson Normality Plot'\n transform_func = yeojohnson\n\n x = np.asarray(x)\n if x.size == 0:\n return x\n\n if lb <= la:\n raise ValueError(\"`lb` has to be larger than `la`.\")\n\n lmbdas = np.linspace(la, lb, num=N)\n ppcc = lmbdas * 0.0\n for i, val in enumerate(lmbdas):\n # Determine for each lmbda the square root of correlation coefficient\n # of transformed x\n z = transform_func(x, lmbda=val)\n _, (_, _, r) = probplot(z, dist='norm', fit=True)\n ppcc[i] = r\n\n if plot is not None:\n plot.plot(lmbdas, ppcc, 'x')\n _add_axis_labels_title(plot, xlabel='$\\\\lambda$',\n ylabel='Prob Plot Corr. Coef.',\n title=title)\n\n return lmbdas, ppcc\n\n\ndef boxcox_normplot(x, la, lb, plot=None, N=80):\n \"\"\"Compute parameters for a Box-Cox normality plot, optionally show it.\n\n A Box-Cox normality plot shows graphically what the best transformation\n parameter is to use in `boxcox` to obtain a distribution that is close\n to normal.\n\n Parameters\n ----------\n x : array_like\n Input array.\n la, lb : scalar\n The lower and upper bounds for the ``lmbda`` values to pass to `boxcox`\n for Box-Cox transformations. These are also the limits of the\n horizontal axis of the plot if that is generated.\n plot : object, optional\n If given, plots the quantiles and least squares fit.\n `plot` is an object that has to have methods \"plot\" and \"text\".\n The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,\n or a custom object with the same methods.\n Default is None, which means that no plot is created.\n N : int, optional\n Number of points on the horizontal axis (equally distributed from\n `la` to `lb`).\n\n Returns\n -------\n lmbdas : ndarray\n The ``lmbda`` values for which a Box-Cox transform was done.\n ppcc : ndarray\n Probability Plot Correlelation Coefficient, as obtained from `probplot`\n when fitting the Box-Cox transformed input `x` against a normal\n distribution.\n\n See Also\n --------\n probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max\n\n Notes\n -----\n Even if `plot` is given, the figure is not shown or saved by\n `boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``\n should be used after calling `probplot`.\n\n Examples\n --------\n >>> from scipy import stats\n >>> import matplotlib.pyplot as plt\n\n Generate some non-normally distributed data, and create a Box-Cox plot:\n\n >>> x = stats.loggamma.rvs(5, size=500) + 5\n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> prob = stats.boxcox_normplot(x, -20, 20, plot=ax)\n\n Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in\n the same plot:\n\n >>> _, maxlog = stats.boxcox(x)\n >>> ax.axvline(maxlog, color='r')\n\n >>> plt.show()\n\n \"\"\"\n return _normplot('boxcox', x, la, lb, plot, N)\n\n\ndef yeojohnson(x, lmbda=None):\n r\"\"\"\n Return a dataset transformed by a Yeo-Johnson power transformation.\n\n Parameters\n ----------\n x : ndarray\n Input array. Should be 1-dimensional.\n lmbda : float, optional\n If ``lmbda`` is ``None``, find the lambda that maximizes the\n log-likelihood function and return it as the second output argument.\n Otherwise the transformation is done for the given value.\n\n Returns\n -------\n yeojohnson: ndarray\n Yeo-Johnson power transformed array.\n maxlog : float, optional\n If the `lmbda` parameter is None, the second returned argument is\n the lambda that maximizes the log-likelihood function.\n\n See Also\n --------\n probplot, yeojohnson_normplot, yeojohnson_normmax, yeojohnson_llf, boxcox\n\n Notes\n -----\n The Yeo-Johnson transform is given by::\n\n y = ((x + 1)**lmbda - 1) / lmbda, for x >= 0, lmbda != 0\n log(x + 1), for x >= 0, lmbda = 0\n -((-x + 1)**(2 - lmbda) - 1) / (2 - lmbda), for x < 0, lmbda != 2\n -log(-x + 1), for x < 0, lmbda = 2\n\n Unlike `boxcox`, `yeojohnson` does not require the input data to be\n positive.\n\n .. versionadded:: 1.2.0\n\n\n References\n ----------\n I. Yeo and R.A. Johnson, \"A New Family of Power Transformations to\n Improve Normality or Symmetry\", Biometrika 87.4 (2000):\n\n\n Examples\n --------\n >>> from scipy import stats\n >>> import matplotlib.pyplot as plt\n\n We generate some random variates from a non-normal distribution and make a\n probability plot for it, to show it is non-normal in the tails:\n\n >>> fig = plt.figure()\n >>> ax1 = fig.add_subplot(211)\n >>> x = stats.loggamma.rvs(5, size=500) + 5\n >>> prob = stats.probplot(x, dist=stats.norm, plot=ax1)\n >>> ax1.set_xlabel('')\n >>> ax1.set_title('Probplot against normal distribution')\n\n We now use `yeojohnson` to transform the data so it's closest to normal:\n\n >>> ax2 = fig.add_subplot(212)\n >>> xt, lmbda = stats.yeojohnson(x)\n >>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2)\n >>> ax2.set_title('Probplot after Yeo-Johnson transformation')\n\n >>> plt.show()\n\n \"\"\"\n\n x = np.asarray(x)\n if x.size == 0:\n return x\n\n if np.issubdtype(x.dtype, np.complexfloating):\n raise ValueError('Yeo-Johnson transformation is not defined for '\n 'complex numbers.')\n\n if np.issubdtype(x.dtype, np.integer):\n x = x.astype(np.float64, copy=False)\n\n if lmbda is not None:\n return _yeojohnson_transform(x, lmbda)\n\n # if lmbda=None, find the lmbda that maximizes the log-likelihood function.\n lmax = yeojohnson_normmax(x)\n y = _yeojohnson_transform(x, lmax)\n\n return y, lmax\n\n\ndef _yeojohnson_transform(x, lmbda):\n \"\"\"Return x transformed by the Yeo-Johnson power transform with given\n parameter lmbda.\"\"\"\n\n out = np.zeros_like(x)\n pos = x >= 0 # binary mask\n\n # when x >= 0\n if abs(lmbda) < np.spacing(1.):\n out[pos] = np.log1p(x[pos])\n else: # lmbda != 0\n out[pos] = (np.power(x[pos] + 1, lmbda) - 1) / lmbda\n\n # when x < 0\n if abs(lmbda - 2) > np.spacing(1.):\n out[~pos] = -(np.power(-x[~pos] + 1, 2 - lmbda) - 1) / (2 - lmbda)\n else: # lmbda == 2\n out[~pos] = -np.log1p(-x[~pos])\n\n return out\n\n\ndef yeojohnson_llf(lmb, data):\n r\"\"\"The yeojohnson log-likelihood function.\n\n Parameters\n ----------\n lmb : scalar\n Parameter for Yeo-Johnson transformation. See `yeojohnson` for\n details.\n data : array_like\n Data to calculate Yeo-Johnson log-likelihood for. If `data` is\n multi-dimensional, the log-likelihood is calculated along the first\n axis.\n\n Returns\n -------\n llf : float\n Yeo-Johnson log-likelihood of `data` given `lmb`.\n\n See Also\n --------\n yeojohnson, probplot, yeojohnson_normplot, yeojohnson_normmax\n\n Notes\n -----\n The Yeo-Johnson log-likelihood function is defined here as\n\n .. math::\n\n llf = -N/2 \\log(\\hat{\\sigma}^2) + (\\lambda - 1)\n \\sum_i \\text{ sign }(x_i)\\log(|x_i| + 1)\n\n where :math:`\\hat{\\sigma}^2` is estimated variance of the the Yeo-Johnson\n transformed input data ``x``.\n\n .. versionadded:: 1.2.0\n\n Examples\n --------\n >>> from scipy import stats\n >>> import matplotlib.pyplot as plt\n >>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes\n >>> np.random.seed(1245)\n\n Generate some random variates and calculate Yeo-Johnson log-likelihood\n values for them for a range of ``lmbda`` values:\n\n >>> x = stats.loggamma.rvs(5, loc=10, size=1000)\n >>> lmbdas = np.linspace(-2, 10)\n >>> llf = np.zeros(lmbdas.shape, dtype=float)\n >>> for ii, lmbda in enumerate(lmbdas):\n ... llf[ii] = stats.yeojohnson_llf(lmbda, x)\n\n Also find the optimal lmbda value with `yeojohnson`:\n\n >>> x_most_normal, lmbda_optimal = stats.yeojohnson(x)\n\n Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a\n horizontal line to check that that's really the optimum:\n\n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> ax.plot(lmbdas, llf, 'b.-')\n >>> ax.axhline(stats.yeojohnson_llf(lmbda_optimal, x), color='r')\n >>> ax.set_xlabel('lmbda parameter')\n >>> ax.set_ylabel('Yeo-Johnson log-likelihood')\n\n Now add some probability plots to show that where the log-likelihood is\n maximized the data transformed with `yeojohnson` looks closest to normal:\n\n >>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'\n >>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):\n ... xt = stats.yeojohnson(x, lmbda=lmbda)\n ... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)\n ... ax_inset = inset_axes(ax, width=\"20%\", height=\"20%\", loc=loc)\n ... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')\n ... ax_inset.set_xticklabels([])\n ... ax_inset.set_yticklabels([])\n ... ax_inset.set_title(r'$\\lambda=%1.2f$' % lmbda)\n\n >>> plt.show()\n\n \"\"\"\n data = np.asarray(data)\n n_samples = data.shape[0]\n\n if n_samples == 0:\n return np.nan\n\n trans = _yeojohnson_transform(data, lmb)\n\n loglike = -n_samples / 2 * np.log(trans.var(axis=0))\n loglike += (lmb - 1) * (np.sign(data) * np.log(np.abs(data) + 1)).sum(axis=0)\n\n return loglike\n\n\ndef yeojohnson_normmax(x, brack=(-2, 2)):\n \"\"\"\n Compute optimal Yeo-Johnson transform parameter.\n\n Compute optimal Yeo-Johnson transform parameter for input data, using\n maximum likelihood estimation.\n\n Parameters\n ----------\n x : array_like\n Input array.\n brack : 2-tuple, optional\n The starting interval for a downhill bracket search with\n `optimize.brent`. Note that this is in most cases not critical; the\n final result is allowed to be outside this bracket.\n\n Returns\n -------\n maxlog : float\n The optimal transform parameter found.\n\n See Also\n --------\n yeojohnson, yeojohnson_llf, yeojohnson_normplot\n\n Notes\n -----\n .. versionadded:: 1.2.0\n\n Examples\n --------\n >>> from scipy import stats\n >>> import matplotlib.pyplot as plt\n >>> np.random.seed(1234) # make this example reproducible\n\n Generate some data and determine optimal ``lmbda``\n\n >>> x = stats.loggamma.rvs(5, size=30) + 5\n >>> lmax = stats.yeojohnson_normmax(x)\n\n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> prob = stats.yeojohnson_normplot(x, -10, 10, plot=ax)\n >>> ax.axvline(lmax, color='r')\n\n >>> plt.show()\n\n \"\"\"\n\n def _neg_llf(lmbda, data):\n return -yeojohnson_llf(lmbda, data)\n\n return optimize.brent(_neg_llf, brack=brack, args=(x,))\n\n\ndef yeojohnson_normplot(x, la, lb, plot=None, N=80):\n \"\"\"Compute parameters for a Yeo-Johnson normality plot, optionally show it.\n\n A Yeo-Johnson normality plot shows graphically what the best\n transformation parameter is to use in `yeojohnson` to obtain a\n distribution that is close to normal.\n\n Parameters\n ----------\n x : array_like\n Input array.\n la, lb : scalar\n The lower and upper bounds for the ``lmbda`` values to pass to\n `yeojohnson` for Yeo-Johnson transformations. These are also the\n limits of the horizontal axis of the plot if that is generated.\n plot : object, optional\n If given, plots the quantiles and least squares fit.\n `plot` is an object that has to have methods \"plot\" and \"text\".\n The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,\n or a custom object with the same methods.\n Default is None, which means that no plot is created.\n N : int, optional\n Number of points on the horizontal axis (equally distributed from\n `la` to `lb`).\n\n Returns\n -------\n lmbdas : ndarray\n The ``lmbda`` values for which a Yeo-Johnson transform was done.\n ppcc : ndarray\n Probability Plot Correlelation Coefficient, as obtained from `probplot`\n when fitting the Box-Cox transformed input `x` against a normal\n distribution.\n\n See Also\n --------\n probplot, yeojohnson, yeojohnson_normmax, yeojohnson_llf, ppcc_max\n\n Notes\n -----\n Even if `plot` is given, the figure is not shown or saved by\n `boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``\n should be used after calling `probplot`.\n\n .. versionadded:: 1.2.0\n\n Examples\n --------\n >>> from scipy import stats\n >>> import matplotlib.pyplot as plt\n\n Generate some non-normally distributed data, and create a Yeo-Johnson plot:\n\n >>> x = stats.loggamma.rvs(5, size=500) + 5\n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> prob = stats.yeojohnson_normplot(x, -20, 20, plot=ax)\n\n Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in\n the same plot:\n\n >>> _, maxlog = stats.yeojohnson(x)\n >>> ax.axvline(maxlog, color='r')\n\n >>> plt.show()\n\n \"\"\"\n return _normplot('yeojohnson', x, la, lb, plot, N)\n\n\nShapiroResult = namedtuple('ShapiroResult', ('statistic', 'pvalue'))\n\ndef shapiro(x):\n \"\"\"\n Perform the Shapiro-Wilk test for normality.\n\n The Shapiro-Wilk test tests the null hypothesis that the\n data was drawn from a normal distribution.\n\n Parameters\n ----------\n x : array_like\n Array of sample data.\n\n Returns\n -------\n statistic : float\n The test statistic.\n p-value : float\n The p-value for the hypothesis test.\n\n See Also\n --------\n anderson : The Anderson-Darling test for normality\n kstest : The Kolmogorov-Smirnov test for goodness of fit.\n\n Notes\n -----\n The algorithm used is described in [4]_ but censoring parameters as\n described are not implemented. For N > 5000 the W test statistic is accurate\n but the p-value may not be.\n\n The chance of rejecting the null hypothesis when it is true is close to 5%\n regardless of sample size.\n\n References\n ----------\n .. [1] https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm\n .. [2] Shapiro, S. S. & Wilk, M.B (1965). An analysis of variance test for\n normality (complete samples), Biometrika, Vol. 52, pp. 591-611.\n .. [3] Razali, N. M. & Wah, Y. B. (2011) Power comparisons of Shapiro-Wilk,\n Kolmogorov-Smirnov, Lilliefors and Anderson-Darling tests, Journal of\n Statistical Modeling and Analytics, Vol. 2, pp. 21-33.\n .. [4] ALGORITHM AS R94 APPL. STATIST. (1995) VOL. 44, NO. 4.\n\n Examples\n --------\n >>> from scipy import stats\n >>> np.random.seed(12345678)\n >>> x = stats.norm.rvs(loc=5, scale=3, size=100)\n >>> shapiro_test = stats.shapiro(x)\n >>> shapiro_test\n ShapiroResult(statistic=0.9772805571556091, pvalue=0.08144091814756393)\n >>> shapiro_test.statistic\n 0.9772805571556091\n >>> shapiro_test.pvalue\n 0.08144091814756393\n\n \"\"\"\n x = np.ravel(x)\n\n N = len(x)\n if N < 3:\n raise ValueError(\"Data must be at least length 3.\")\n\n a = zeros(N, 'f')\n init = 0\n\n y = sort(x)\n a, w, pw, ifault = statlib.swilk(y, a[:N//2], init)\n if ifault not in [0, 2]:\n warnings.warn(\"Input data for shapiro has range zero. The results \"\n \"may not be accurate.\")\n if N > 5000:\n warnings.warn(\"p-value may not be accurate for N > 5000.\")\n\n return ShapiroResult(w, pw)\n\n\n# Values from Stephens, M A, \"EDF Statistics for Goodness of Fit and\n# Some Comparisons\", Journal of the American Statistical\n# Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737\n_Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092])\n_Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957])\n# From Stephens, M A, \"Goodness of Fit for the Extreme Value Distribution\",\n# Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588.\n_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038])\n# From Stephens, M A, \"Tests of Fit for the Logistic Distribution Based\n# on the Empirical Distribution Function.\", Biometrika,\n# Vol. 66, Issue 3, Dec. 1979, pp 591-595.\n_Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010])\n\n\nAndersonResult = namedtuple('AndersonResult', ('statistic',\n 'critical_values',\n 'significance_level'))\n\n\ndef anderson(x, dist='norm'):\n \"\"\"\n Anderson-Darling test for data coming from a particular distribution.\n\n The Anderson-Darling test tests the null hypothesis that a sample is\n drawn from a population that follows a particular distribution.\n For the Anderson-Darling test, the critical values depend on\n which distribution is being tested against. This function works\n for normal, exponential, logistic, or Gumbel (Extreme Value\n Type I) distributions.\n\n Parameters\n ----------\n x : array_like\n Array of sample data.\n dist : {'norm', 'expon', 'logistic', 'gumbel', 'gumbel_l', 'gumbel_r', 'extreme1'}, optional\n The type of distribution to test against. The default is 'norm'.\n The names 'extreme1', 'gumbel_l' and 'gumbel' are synonyms for the\n same distribution.\n\n Returns\n -------\n statistic : float\n The Anderson-Darling test statistic.\n critical_values : list\n The critical values for this distribution.\n significance_level : list\n The significance levels for the corresponding critical values\n in percents. The function returns critical values for a\n differing set of significance levels depending on the\n distribution that is being tested against.\n\n See Also\n --------\n kstest : The Kolmogorov-Smirnov test for goodness-of-fit.\n\n Notes\n -----\n Critical values provided are for the following significance levels:\n\n normal/exponential\n 15%, 10%, 5%, 2.5%, 1%\n logistic\n 25%, 10%, 5%, 2.5%, 1%, 0.5%\n Gumbel\n 25%, 10%, 5%, 2.5%, 1%\n\n If the returned statistic is larger than these critical values then\n for the corresponding significance level, the null hypothesis that\n the data come from the chosen distribution can be rejected.\n The returned statistic is referred to as 'A2' in the references.\n\n References\n ----------\n .. [1] https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm\n .. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and\n Some Comparisons, Journal of the American Statistical Association,\n Vol. 69, pp. 730-737.\n .. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit\n Statistics with Unknown Parameters, Annals of Statistics, Vol. 4,\n pp. 357-369.\n .. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value\n Distribution, Biometrika, Vol. 64, pp. 583-588.\n .. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference\n to Tests for Exponentiality , Technical Report No. 262,\n Department of Statistics, Stanford University, Stanford, CA.\n .. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution\n Based on the Empirical Distribution Function, Biometrika, Vol. 66,\n pp. 591-595.\n\n \"\"\"\n if dist not in ['norm', 'expon', 'gumbel', 'gumbel_l',\n 'gumbel_r', 'extreme1', 'logistic']:\n raise ValueError(\"Invalid distribution; dist must be 'norm', \"\n \"'expon', 'gumbel', 'extreme1' or 'logistic'.\")\n y = sort(x)\n xbar = np.mean(x, axis=0)\n N = len(y)\n if dist == 'norm':\n s = np.std(x, ddof=1, axis=0)\n w = (y - xbar) / s\n logcdf = distributions.norm.logcdf(w)\n logsf = distributions.norm.logsf(w)\n sig = array([15, 10, 5, 2.5, 1])\n critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N), 3)\n elif dist == 'expon':\n w = y / xbar\n logcdf = distributions.expon.logcdf(w)\n logsf = distributions.expon.logsf(w)\n sig = array([15, 10, 5, 2.5, 1])\n critical = around(_Avals_expon / (1.0 + 0.6/N), 3)\n elif dist == 'logistic':\n def rootfunc(ab, xj, N):\n a, b = ab\n tmp = (xj - a) / b\n tmp2 = exp(tmp)\n val = [np.sum(1.0/(1+tmp2), axis=0) - 0.5*N,\n np.sum(tmp*(1.0-tmp2)/(1+tmp2), axis=0) + N]\n return array(val)\n\n sol0 = array([xbar, np.std(x, ddof=1, axis=0)])\n sol = optimize.fsolve(rootfunc, sol0, args=(x, N), xtol=1e-5)\n w = (y - sol[0]) / sol[1]\n logcdf = distributions.logistic.logcdf(w)\n logsf = distributions.logistic.logsf(w)\n sig = array([25, 10, 5, 2.5, 1, 0.5])\n critical = around(_Avals_logistic / (1.0 + 0.25/N), 3)\n elif dist == 'gumbel_r':\n xbar, s = distributions.gumbel_r.fit(x)\n w = (y - xbar) / s\n logcdf = distributions.gumbel_r.logcdf(w)\n logsf = distributions.gumbel_r.logsf(w)\n sig = array([25, 10, 5, 2.5, 1])\n critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3)\n else: # (dist == 'gumbel') or (dist == 'gumbel_l') or (dist == 'extreme1')\n xbar, s = distributions.gumbel_l.fit(x)\n w = (y - xbar) / s\n logcdf = distributions.gumbel_l.logcdf(w)\n logsf = distributions.gumbel_l.logsf(w)\n sig = array([25, 10, 5, 2.5, 1])\n critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3)\n\n i = arange(1, N + 1)\n A2 = -N - np.sum((2*i - 1.0) / N * (logcdf + logsf[::-1]), axis=0)\n\n return AndersonResult(A2, critical, sig)\n\n\ndef _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N):\n \"\"\"\n Compute A2akN equation 7 of Scholz and Stephens.\n\n Parameters\n ----------\n samples : sequence of 1-D array_like\n Array of sample arrays.\n Z : array_like\n Sorted array of all observations.\n Zstar : array_like\n Sorted array of unique observations.\n k : int\n Number of samples.\n n : array_like\n Number of observations in each sample.\n N : int\n Total number of observations.\n\n Returns\n -------\n A2aKN : float\n The A2aKN statistics of Scholz and Stephens 1987.\n \"\"\"\n\n A2akN = 0.\n Z_ssorted_left = Z.searchsorted(Zstar, 'left')\n if N == Zstar.size:\n lj = 1.\n else:\n lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left\n Bj = Z_ssorted_left + lj / 2.\n for i in arange(0, k):\n s = np.sort(samples[i])\n s_ssorted_right = s.searchsorted(Zstar, side='right')\n Mij = s_ssorted_right.astype(float)\n fij = s_ssorted_right - s.searchsorted(Zstar, 'left')\n Mij -= fij / 2.\n inner = lj / float(N) * (N*Mij - Bj*n[i])**2 / (Bj*(N - Bj) - N*lj/4.)\n A2akN += inner.sum() / n[i]\n A2akN *= (N - 1.) / N\n return A2akN\n\n\ndef _anderson_ksamp_right(samples, Z, Zstar, k, n, N):\n \"\"\"\n Compute A2akN equation 6 of Scholz & Stephens.\n\n Parameters\n ----------\n samples : sequence of 1-D array_like\n Array of sample arrays.\n Z : array_like\n Sorted array of all observations.\n Zstar : array_like\n Sorted array of unique observations.\n k : int\n Number of samples.\n n : array_like\n Number of observations in each sample.\n N : int\n Total number of observations.\n\n Returns\n -------\n A2KN : float\n The A2KN statistics of Scholz and Stephens 1987.\n \"\"\"\n\n A2kN = 0.\n lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1],\n 'left')\n Bj = lj.cumsum()\n for i in arange(0, k):\n s = np.sort(samples[i])\n Mij = s.searchsorted(Zstar[:-1], side='right')\n inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj))\n A2kN += inner.sum() / n[i]\n return A2kN\n\n\nAnderson_ksampResult = namedtuple('Anderson_ksampResult',\n ('statistic', 'critical_values',\n 'significance_level'))\n\n\ndef anderson_ksamp(samples, midrank=True):\n \"\"\"The Anderson-Darling test for k-samples.\n\n The k-sample Anderson-Darling test is a modification of the\n one-sample Anderson-Darling test. It tests the null hypothesis\n that k-samples are drawn from the same population without having\n to specify the distribution function of that population. The\n critical values depend on the number of samples.\n\n Parameters\n ----------\n samples : sequence of 1-D array_like\n Array of sample data in arrays.\n midrank : bool, optional\n Type of Anderson-Darling test which is computed. Default\n (True) is the midrank test applicable to continuous and\n discrete populations. If False, the right side empirical\n distribution is used.\n\n Returns\n -------\n statistic : float\n Normalized k-sample Anderson-Darling test statistic.\n critical_values : array\n The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%,\n 0.5%, 0.1%.\n significance_level : float\n An approximate significance level at which the null hypothesis for the\n provided samples can be rejected. The value is floored / capped at\n 0.1% / 25%.\n\n Raises\n ------\n ValueError\n If less than 2 samples are provided, a sample is empty, or no\n distinct observations are in the samples.\n\n See Also\n --------\n ks_2samp : 2 sample Kolmogorov-Smirnov test\n anderson : 1 sample Anderson-Darling test\n\n Notes\n -----\n [1]_ defines three versions of the k-sample Anderson-Darling test:\n one for continuous distributions and two for discrete\n distributions, in which ties between samples may occur. The\n default of this routine is to compute the version based on the\n midrank empirical distribution function. This test is applicable\n to continuous and discrete data. If midrank is set to False, the\n right side empirical distribution is used for a test for discrete\n data. According to [1]_, the two discrete test statistics differ\n only slightly if a few collisions due to round-off errors occur in\n the test not adjusted for ties between samples.\n\n The critical values corresponding to the significance levels from 0.01\n to 0.25 are taken from [1]_. p-values are floored / capped\n at 0.1% / 25%. Since the range of critical values might be extended in\n future releases, it is recommended not to test ``p == 0.25``, but rather\n ``p >= 0.25`` (analogously for the lower bound).\n\n .. versionadded:: 0.14.0\n\n References\n ----------\n .. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample\n Anderson-Darling Tests, Journal of the American Statistical\n Association, Vol. 82, pp. 918-924.\n\n Examples\n --------\n >>> from scipy import stats\n >>> np.random.seed(314159)\n\n The null hypothesis that the two random samples come from the same\n distribution can be rejected at the 5% level because the returned\n test value is greater than the critical value for 5% (1.961) but\n not at the 2.5% level. The interpolation gives an approximate\n significance level of 3.2%:\n\n >>> stats.anderson_ksamp([np.random.normal(size=50),\n ... np.random.normal(loc=0.5, size=30)])\n (2.4615796189876105,\n array([ 0.325, 1.226, 1.961, 2.718, 3.752, 4.592, 6.546]),\n 0.03176687568842282)\n\n\n The null hypothesis cannot be rejected for three samples from an\n identical distribution. The reported p-value (25%) has been capped and\n may not be very accurate (since it corresponds to the value 0.449\n whereas the statistic is -0.731):\n\n >>> stats.anderson_ksamp([np.random.normal(size=50),\n ... np.random.normal(size=30), np.random.normal(size=20)])\n (-0.73091722665244196,\n array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856,\n 4.07210043, 5.56419101]),\n 0.25)\n\n \"\"\"\n k = len(samples)\n if (k < 2):\n raise ValueError(\"anderson_ksamp needs at least two samples\")\n\n samples = list(map(np.asarray, samples))\n Z = np.sort(np.hstack(samples))\n N = Z.size\n Zstar = np.unique(Z)\n if Zstar.size < 2:\n raise ValueError(\"anderson_ksamp needs more than one distinct \"\n \"observation\")\n\n n = np.array([sample.size for sample in samples])\n if any(n == 0):\n raise ValueError(\"anderson_ksamp encountered sample without \"\n \"observations\")\n\n if midrank:\n A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N)\n else:\n A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N)\n\n H = (1. / n).sum()\n hs_cs = (1. / arange(N - 1, 1, -1)).cumsum()\n h = hs_cs[-1] + 1\n g = (hs_cs / arange(2, N)).sum()\n\n a = (4*g - 6) * (k - 1) + (10 - 6*g)*H\n b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6\n c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h\n d = (2*h + 6)*k**2 - 4*h*k\n sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.))\n m = k - 1\n A2 = (A2kN - m) / math.sqrt(sigmasq)\n\n # The b_i values are the interpolation coefficients from Table 2\n # of Scholz and Stephens 1987\n b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326, 2.573, 3.085])\n b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822, 2.364, 3.615])\n b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396, -0.345, -0.154])\n critical = b0 + b1 / math.sqrt(m) + b2 / m\n\n sig = np.array([0.25, 0.1, 0.05, 0.025, 0.01, 0.005, 0.001])\n if A2 < critical.min():\n p = sig.max()\n warnings.warn(\"p-value capped: true value larger than {}\".format(p),\n stacklevel=2)\n elif A2 > critical.max():\n p = sig.min()\n warnings.warn(\"p-value floored: true value smaller than {}\".format(p),\n stacklevel=2)\n else:\n # interpolation of probit of significance level\n pf = np.polyfit(critical, log(sig), 2)\n p = math.exp(np.polyval(pf, A2))\n\n return Anderson_ksampResult(A2, critical, p)\n\n\nAnsariResult = namedtuple('AnsariResult', ('statistic', 'pvalue'))\n\n\ndef ansari(x, y):\n \"\"\"\n Perform the Ansari-Bradley test for equal scale parameters.\n\n The Ansari-Bradley test ([1]_, [2]_) is a non-parametric test\n for the equality of the scale parameter of the distributions\n from which two samples were drawn.\n\n Parameters\n ----------\n x, y : array_like\n Arrays of sample data.\n\n Returns\n -------\n statistic : float\n The Ansari-Bradley test statistic.\n pvalue : float\n The p-value of the hypothesis test.\n\n See Also\n --------\n fligner : A non-parametric test for the equality of k variances\n mood : A non-parametric test for the equality of two scale parameters\n\n Notes\n -----\n The p-value given is exact when the sample sizes are both less than\n 55 and there are no ties, otherwise a normal approximation for the\n p-value is used.\n\n References\n ----------\n .. [1] Ansari, A. R. and Bradley, R. A. (1960) Rank-sum tests for\n dispersions, Annals of Mathematical Statistics, 31, 1174-1189.\n .. [2] Sprent, Peter and N.C. Smeeton. Applied nonparametric\n statistical methods. 3rd ed. Chapman and Hall/CRC. 2001.\n Section 5.8.2.\n\n Examples\n --------\n >>> from scipy.stats import ansari\n\n For these examples, we'll create three random data sets. The first\n two, with sizes 35 and 25, are drawn from a normal distribution with\n mean 0 and standard deviation 2. The third data set has size 25 and\n is drawn from a normal distribution with standard deviation 1.25.\n\n >>> np.random.seed(1234567890)\n >>> x1 = np.random.normal(loc=0, scale=2, size=35)\n >>> x2 = np.random.normal(loc=0, scale=2, size=25)\n >>> x3 = np.random.normal(loc=0, scale=1.25, size=25)\n\n First we apply `ansari` to `x1` and `x2`. These samples are drawn\n from the same distribution, so we expect the Ansari-Bradley test\n should not lead us to conclude that the scales of the distributions\n are different.\n\n >>> ansari(x1, x2)\n AnsariResult(statistic=511.0, pvalue=0.35506083719834347)\n\n With a p-value of 0.355, we cannot conclude that there is a\n significant difference in the scales (as expected).\n\n Now apply the test to `x1` and `x3`:\n\n >>> ansari(x1, x3)\n AnsariResult(statistic=452.0, pvalue=0.006280278681971285)\n\n With a p-value of 0.00628, the test provides strong evidence that\n the scales of the distributions from which the samples were drawn\n are not equal.\n \"\"\"\n x, y = asarray(x), asarray(y)\n n = len(x)\n m = len(y)\n if m < 1:\n raise ValueError(\"Not enough other observations.\")\n if n < 1:\n raise ValueError(\"Not enough test observations.\")\n\n N = m + n\n xy = r_[x, y] # combine\n rank = stats.rankdata(xy)\n symrank = amin(array((rank, N - rank + 1)), 0)\n AB = np.sum(symrank[:n], axis=0)\n uxy = unique(xy)\n repeats = (len(uxy) != len(xy))\n exact = ((m < 55) and (n < 55) and not repeats)\n if repeats and (m < 55 or n < 55):\n warnings.warn(\"Ties preclude use of exact statistic.\")\n if exact:\n astart, a1, ifault = statlib.gscale(n, m)\n ind = AB - astart\n total = np.sum(a1, axis=0)\n if ind < len(a1)/2.0:\n cind = int(ceil(ind))\n if ind == cind:\n pval = 2.0 * np.sum(a1[:cind+1], axis=0) / total\n else:\n pval = 2.0 * np.sum(a1[:cind], axis=0) / total\n else:\n find = int(floor(ind))\n if ind == floor(ind):\n pval = 2.0 * np.sum(a1[find:], axis=0) / total\n else:\n pval = 2.0 * np.sum(a1[find+1:], axis=0) / total\n return AnsariResult(AB, min(1.0, pval))\n\n # otherwise compute normal approximation\n if N % 2: # N odd\n mnAB = n * (N+1.0)**2 / 4.0 / N\n varAB = n * m * (N+1.0) * (3+N**2) / (48.0 * N**2)\n else:\n mnAB = n * (N+2.0) / 4.0\n varAB = m * n * (N+2) * (N-2.0) / 48 / (N-1.0)\n if repeats: # adjust variance estimates\n # compute np.sum(tj * rj**2,axis=0)\n fac = np.sum(symrank**2, axis=0)\n if N % 2: # N odd\n varAB = m * n * (16*N*fac - (N+1)**4) / (16.0 * N**2 * (N-1))\n else: # N even\n varAB = m * n * (16*fac - N*(N+2)**2) / (16.0 * N * (N-1))\n\n z = (AB - mnAB) / sqrt(varAB)\n pval = distributions.norm.sf(abs(z)) * 2.0\n return AnsariResult(AB, pval)\n\n\nBartlettResult = namedtuple('BartlettResult', ('statistic', 'pvalue'))\n\n\ndef bartlett(*args):\n \"\"\"\n Perform Bartlett's test for equal variances.\n\n Bartlett's test tests the null hypothesis that all input samples\n are from populations with equal variances. For samples\n from significantly non-normal populations, Levene's test\n `levene` is more robust.\n\n Parameters\n ----------\n sample1, sample2,... : array_like\n arrays of sample data. Only 1d arrays are accepted, they may have\n different lengths.\n\n Returns\n -------\n statistic : float\n The test statistic.\n pvalue : float\n The p-value of the test.\n\n See Also\n --------\n fligner : A non-parametric test for the equality of k variances\n levene : A robust parametric test for equality of k variances\n\n Notes\n -----\n Conover et al. (1981) examine many of the existing parametric and\n nonparametric tests by extensive simulations and they conclude that the\n tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be\n superior in terms of robustness of departures from normality and power\n ([3]_).\n\n References\n ----------\n .. [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm\n\n .. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical\n Methods, Eighth Edition, Iowa State University Press.\n\n .. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and\n Hypothesis Testing based on Quadratic Inference Function. Technical\n Report #99-03, Center for Likelihood Studies, Pennsylvania State\n University.\n\n .. [4] Bartlett, M. S. (1937). Properties of Sufficiency and Statistical\n Tests. Proceedings of the Royal Society of London. Series A,\n Mathematical and Physical Sciences, Vol. 160, No.901, pp. 268-282.\n\n Examples\n --------\n Test whether or not the lists `a`, `b` and `c` come from populations\n with equal variances.\n\n >>> from scipy.stats import bartlett\n >>> a = [8.88, 9.12, 9.04, 8.98, 9.00, 9.08, 9.01, 8.85, 9.06, 8.99]\n >>> b = [8.88, 8.95, 9.29, 9.44, 9.15, 9.58, 8.36, 9.18, 8.67, 9.05]\n >>> c = [8.95, 9.12, 8.95, 8.85, 9.03, 8.84, 9.07, 8.98, 8.86, 8.98]\n >>> stat, p = bartlett(a, b, c)\n >>> p\n 1.1254782518834628e-05\n\n The very small p-value suggests that the populations do not have equal\n variances.\n\n This is not surprising, given that the sample variance of `b` is much\n larger than that of `a` and `c`:\n\n >>> [np.var(x, ddof=1) for x in [a, b, c]]\n [0.007054444444444413, 0.13073888888888888, 0.008890000000000002]\n \"\"\"\n # Handle empty input and input that is not 1d\n for a in args:\n if np.asanyarray(a).size == 0:\n return BartlettResult(np.nan, np.nan)\n if np.asanyarray(a).ndim > 1:\n raise ValueError('Samples must be one-dimensional.')\n\n k = len(args)\n if k < 2:\n raise ValueError(\"Must enter at least two input sample vectors.\")\n Ni = np.empty(k)\n ssq = np.empty(k, 'd')\n for j in range(k):\n Ni[j] = len(args[j])\n ssq[j] = np.var(args[j], ddof=1)\n Ntot = np.sum(Ni, axis=0)\n spsq = np.sum((Ni - 1)*ssq, axis=0) / (1.0*(Ntot - k))\n numer = (Ntot*1.0 - k) * log(spsq) - np.sum((Ni - 1.0)*log(ssq), axis=0)\n denom = 1.0 + 1.0/(3*(k - 1)) * ((np.sum(1.0/(Ni - 1.0), axis=0)) -\n 1.0/(Ntot - k))\n T = numer / denom\n pval = distributions.chi2.sf(T, k - 1) # 1 - cdf\n\n return BartlettResult(T, pval)\n\n\nLeveneResult = namedtuple('LeveneResult', ('statistic', 'pvalue'))\n\n\ndef levene(*args, center='median', proportiontocut=0.05):\n \"\"\"\n Perform Levene test for equal variances.\n\n The Levene test tests the null hypothesis that all input samples\n are from populations with equal variances. Levene's test is an\n alternative to Bartlett's test `bartlett` in the case where\n there are significant deviations from normality.\n\n Parameters\n ----------\n sample1, sample2, ... : array_like\n The sample data, possibly with different lengths. Only one-dimensional\n samples are accepted.\n center : {'mean', 'median', 'trimmed'}, optional\n Which function of the data to use in the test. The default\n is 'median'.\n proportiontocut : float, optional\n When `center` is 'trimmed', this gives the proportion of data points\n to cut from each end. (See `scipy.stats.trim_mean`.)\n Default is 0.05.\n\n Returns\n -------\n statistic : float\n The test statistic.\n pvalue : float\n The p-value for the test.\n\n Notes\n -----\n Three variations of Levene's test are possible. The possibilities\n and their recommended usages are:\n\n * 'median' : Recommended for skewed (non-normal) distributions>\n * 'mean' : Recommended for symmetric, moderate-tailed distributions.\n * 'trimmed' : Recommended for heavy-tailed distributions.\n\n The test version using the mean was proposed in the original article\n of Levene ([2]_) while the median and trimmed mean have been studied by\n Brown and Forsythe ([3]_), sometimes also referred to as Brown-Forsythe\n test.\n\n References\n ----------\n .. [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm\n .. [2] Levene, H. (1960). In Contributions to Probability and Statistics:\n Essays in Honor of Harold Hotelling, I. Olkin et al. eds.,\n Stanford University Press, pp. 278-292.\n .. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American\n Statistical Association, 69, 364-367\n\n Examples\n --------\n Test whether or not the lists `a`, `b` and `c` come from populations\n with equal variances.\n\n >>> from scipy.stats import levene\n >>> a = [8.88, 9.12, 9.04, 8.98, 9.00, 9.08, 9.01, 8.85, 9.06, 8.99]\n >>> b = [8.88, 8.95, 9.29, 9.44, 9.15, 9.58, 8.36, 9.18, 8.67, 9.05]\n >>> c = [8.95, 9.12, 8.95, 8.85, 9.03, 8.84, 9.07, 8.98, 8.86, 8.98]\n >>> stat, p = levene(a, b, c)\n >>> p\n 0.002431505967249681\n\n The small p-value suggests that the populations do not have equal\n variances.\n\n This is not surprising, given that the sample variance of `b` is much\n larger than that of `a` and `c`:\n\n >>> [np.var(x, ddof=1) for x in [a, b, c]]\n [0.007054444444444413, 0.13073888888888888, 0.008890000000000002]\n \"\"\"\n if center not in ['mean', 'median', 'trimmed']:\n raise ValueError(\"center must be 'mean', 'median' or 'trimmed'.\")\n\n k = len(args)\n if k < 2:\n raise ValueError(\"Must enter at least two input sample vectors.\")\n # check for 1d input\n for j in range(k):\n if np.asanyarray(args[j]).ndim > 1:\n raise ValueError('Samples must be one-dimensional.')\n\n Ni = np.empty(k)\n Yci = np.empty(k, 'd')\n\n if center == 'median':\n func = lambda x: np.median(x, axis=0)\n elif center == 'mean':\n func = lambda x: np.mean(x, axis=0)\n else: # center == 'trimmed'\n args = tuple(stats.trimboth(np.sort(arg), proportiontocut)\n for arg in args)\n func = lambda x: np.mean(x, axis=0)\n\n for j in range(k):\n Ni[j] = len(args[j])\n Yci[j] = func(args[j])\n Ntot = np.sum(Ni, axis=0)\n\n # compute Zij's\n Zij = [None] * k\n for i in range(k):\n Zij[i] = abs(asarray(args[i]) - Yci[i])\n\n # compute Zbari\n Zbari = np.empty(k, 'd')\n Zbar = 0.0\n for i in range(k):\n Zbari[i] = np.mean(Zij[i], axis=0)\n Zbar += Zbari[i] * Ni[i]\n\n Zbar /= Ntot\n numer = (Ntot - k) * np.sum(Ni * (Zbari - Zbar)**2, axis=0)\n\n # compute denom_variance\n dvar = 0.0\n for i in range(k):\n dvar += np.sum((Zij[i] - Zbari[i])**2, axis=0)\n\n denom = (k - 1.0) * dvar\n\n W = numer / denom\n pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf\n return LeveneResult(W, pval)\n\n\ndef binom_test(x, n=None, p=0.5, alternative='two-sided'):\n \"\"\"\n Perform a test that the probability of success is p.\n\n Note: `binom_test` is deprecated; it is recommended that `binomtest`\n be used instead.\n\n This is an exact, two-sided test of the null hypothesis\n that the probability of success in a Bernoulli experiment\n is `p`.\n\n Parameters\n ----------\n x : int or array_like\n The number of successes, or if x has length 2, it is the\n number of successes and the number of failures.\n n : int\n The number of trials. This is ignored if x gives both the\n number of successes and failures.\n p : float, optional\n The hypothesized probability of success. ``0 <= p <= 1``. The\n default value is ``p = 0.5``.\n alternative : {'two-sided', 'greater', 'less'}, optional\n Indicates the alternative hypothesis. The default value is\n 'two-sided'.\n\n Returns\n -------\n p-value : float\n The p-value of the hypothesis test.\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Binomial_test\n\n Examples\n --------\n >>> from scipy import stats\n\n A car manufacturer claims that no more than 10% of their cars are unsafe.\n 15 cars are inspected for safety, 3 were found to be unsafe. Test the\n manufacturer's claim:\n\n >>> stats.binom_test(3, n=15, p=0.1, alternative='greater')\n 0.18406106910639114\n\n The null hypothesis cannot be rejected at the 5% level of significance\n because the returned p-value is greater than the critical value of 5%.\n\n \"\"\"\n x = atleast_1d(x).astype(np.int_)\n if len(x) == 2:\n n = x[1] + x[0]\n x = x[0]\n elif len(x) == 1:\n x = x[0]\n if n is None or n < x:\n raise ValueError(\"n must be >= x\")\n n = np.int_(n)\n else:\n raise ValueError(\"Incorrect length for x.\")\n\n if (p > 1.0) or (p < 0.0):\n raise ValueError(\"p must be in range [0,1]\")\n\n if alternative not in ('two-sided', 'less', 'greater'):\n raise ValueError(\"alternative not recognized\\n\"\n \"should be 'two-sided', 'less' or 'greater'\")\n\n if alternative == 'less':\n pval = distributions.binom.cdf(x, n, p)\n return pval\n\n if alternative == 'greater':\n pval = distributions.binom.sf(x-1, n, p)\n return pval\n\n # if alternative was neither 'less' nor 'greater', then it's 'two-sided'\n d = distributions.binom.pmf(x, n, p)\n rerr = 1 + 1e-7\n if x == p * n:\n # special case as shortcut, would also be handled by `else` below\n pval = 1.\n elif x < p * n:\n i = np.arange(np.ceil(p * n), n+1)\n y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)\n pval = (distributions.binom.cdf(x, n, p) +\n distributions.binom.sf(n - y, n, p))\n else:\n i = np.arange(np.floor(p*n) + 1)\n y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)\n pval = (distributions.binom.cdf(y-1, n, p) +\n distributions.binom.sf(x-1, n, p))\n\n return min(1.0, pval)\n\n\ndef _apply_func(x, g, func):\n # g is list of indices into x\n # separating x into different groups\n # func should be applied over the groups\n g = unique(r_[0, g, len(x)])\n output = [func(x[g[k]:g[k+1]]) for k in range(len(g) - 1)]\n\n return asarray(output)\n\n\nFlignerResult = namedtuple('FlignerResult', ('statistic', 'pvalue'))\n\n\ndef fligner(*args, center='median', proportiontocut=0.05):\n \"\"\"\n Perform Fligner-Killeen test for equality of variance.\n\n Fligner's test tests the null hypothesis that all input samples\n are from populations with equal variances. Fligner-Killeen's test is\n distribution free when populations are identical [2]_.\n\n Parameters\n ----------\n sample1, sample2, ... : array_like\n Arrays of sample data. Need not be the same length.\n center : {'mean', 'median', 'trimmed'}, optional\n Keyword argument controlling which function of the data is used in\n computing the test statistic. The default is 'median'.\n proportiontocut : float, optional\n When `center` is 'trimmed', this gives the proportion of data points\n to cut from each end. (See `scipy.stats.trim_mean`.)\n Default is 0.05.\n\n Returns\n -------\n statistic : float\n The test statistic.\n pvalue : float\n The p-value for the hypothesis test.\n\n See Also\n --------\n bartlett : A parametric test for equality of k variances in normal samples\n levene : A robust parametric test for equality of k variances\n\n Notes\n -----\n As with Levene's test there are three variants of Fligner's test that\n differ by the measure of central tendency used in the test. See `levene`\n for more information.\n\n Conover et al. (1981) examine many of the existing parametric and\n nonparametric tests by extensive simulations and they conclude that the\n tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be\n superior in terms of robustness of departures from normality and power [3]_.\n\n References\n ----------\n .. [1] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and\n Hypothesis Testing based on Quadratic Inference Function. Technical\n Report #99-03, Center for Likelihood Studies, Pennsylvania State\n University.\n https://cecas.clemson.edu/~cspark/cv/paper/qif/draftqif2.pdf\n\n .. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample\n tests for scale. 'Journal of the American Statistical Association.'\n 71(353), 210-213.\n\n .. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and\n Hypothesis Testing based on Quadratic Inference Function. Technical\n Report #99-03, Center for Likelihood Studies, Pennsylvania State\n University.\n\n .. [4] Conover, W. J., Johnson, M. E. and Johnson M. M. (1981). A\n comparative study of tests for homogeneity of variances, with\n applications to the outer continental shelf biding data.\n Technometrics, 23(4), 351-361.\n\n Examples\n --------\n Test whether or not the lists `a`, `b` and `c` come from populations\n with equal variances.\n\n >>> from scipy.stats import fligner\n >>> a = [8.88, 9.12, 9.04, 8.98, 9.00, 9.08, 9.01, 8.85, 9.06, 8.99]\n >>> b = [8.88, 8.95, 9.29, 9.44, 9.15, 9.58, 8.36, 9.18, 8.67, 9.05]\n >>> c = [8.95, 9.12, 8.95, 8.85, 9.03, 8.84, 9.07, 8.98, 8.86, 8.98]\n >>> stat, p = fligner(a, b, c)\n >>> p\n 0.00450826080004775\n\n The small p-value suggests that the populations do not have equal\n variances.\n\n This is not surprising, given that the sample variance of `b` is much\n larger than that of `a` and `c`:\n\n >>> [np.var(x, ddof=1) for x in [a, b, c]]\n [0.007054444444444413, 0.13073888888888888, 0.008890000000000002]\n \"\"\"\n if center not in ['mean', 'median', 'trimmed']:\n raise ValueError(\"center must be 'mean', 'median' or 'trimmed'.\")\n\n # Handle empty input\n for a in args:\n if np.asanyarray(a).size == 0:\n return FlignerResult(np.nan, np.nan)\n\n k = len(args)\n if k < 2:\n raise ValueError(\"Must enter at least two input sample vectors.\")\n\n if center == 'median':\n func = lambda x: np.median(x, axis=0)\n elif center == 'mean':\n func = lambda x: np.mean(x, axis=0)\n else: # center == 'trimmed'\n args = tuple(stats.trimboth(arg, proportiontocut) for arg in args)\n func = lambda x: np.mean(x, axis=0)\n\n Ni = asarray([len(args[j]) for j in range(k)])\n Yci = asarray([func(args[j]) for j in range(k)])\n Ntot = np.sum(Ni, axis=0)\n # compute Zij's\n Zij = [abs(asarray(args[i]) - Yci[i]) for i in range(k)]\n allZij = []\n g = [0]\n for i in range(k):\n allZij.extend(list(Zij[i]))\n g.append(len(allZij))\n\n ranks = stats.rankdata(allZij)\n a = distributions.norm.ppf(ranks / (2*(Ntot + 1.0)) + 0.5)\n\n # compute Aibar\n Aibar = _apply_func(a, g, np.sum) / Ni\n anbar = np.mean(a, axis=0)\n varsq = np.var(a, axis=0, ddof=1)\n Xsq = np.sum(Ni * (asarray(Aibar) - anbar)**2.0, axis=0) / varsq\n pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf\n return FlignerResult(Xsq, pval)\n\n\ndef mood(x, y, axis=0):\n \"\"\"\n Perform Mood's test for equal scale parameters.\n\n Mood's two-sample test for scale parameters is a non-parametric\n test for the null hypothesis that two samples are drawn from the\n same distribution with the same scale parameter.\n\n Parameters\n ----------\n x, y : array_like\n Arrays of sample data.\n axis : int, optional\n The axis along which the samples are tested. `x` and `y` can be of\n different length along `axis`.\n If `axis` is None, `x` and `y` are flattened and the test is done on\n all values in the flattened arrays.\n\n Returns\n -------\n z : scalar or ndarray\n The z-score for the hypothesis test. For 1-D inputs a scalar is\n returned.\n p-value : scalar ndarray\n The p-value for the hypothesis test.\n\n See Also\n --------\n fligner : A non-parametric test for the equality of k variances\n ansari : A non-parametric test for the equality of 2 variances\n bartlett : A parametric test for equality of k variances in normal samples\n levene : A parametric test for equality of k variances\n\n Notes\n -----\n The data are assumed to be drawn from probability distributions ``f(x)``\n and ``f(x/s) / s`` respectively, for some probability density function f.\n The null hypothesis is that ``s == 1``.\n\n For multi-dimensional arrays, if the inputs are of shapes\n ``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the\n resulting z and p values will have shape ``(n0, n2, n3)``. Note that\n ``n1`` and ``m1`` don't have to be equal, but the other dimensions do.\n\n Examples\n --------\n >>> from scipy import stats\n >>> np.random.seed(1234)\n >>> x2 = np.random.randn(2, 45, 6, 7)\n >>> x1 = np.random.randn(2, 30, 6, 7)\n >>> z, p = stats.mood(x1, x2, axis=1)\n >>> p.shape\n (2, 6, 7)\n\n Find the number of points where the difference in scale is not significant:\n\n >>> (p > 0.1).sum()\n 74\n\n Perform the test with different scales:\n\n >>> x1 = np.random.randn(2, 30)\n >>> x2 = np.random.randn(2, 35) * 10.0\n >>> stats.mood(x1, x2, axis=1)\n (array([-5.7178125 , -5.25342163]), array([ 1.07904114e-08, 1.49299218e-07]))\n\n \"\"\"\n x = np.asarray(x, dtype=float)\n y = np.asarray(y, dtype=float)\n\n if axis is None:\n x = x.flatten()\n y = y.flatten()\n axis = 0\n\n # Determine shape of the result arrays\n res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis])\n if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if\n ax != axis])):\n raise ValueError(\"Dimensions of x and y on all axes except `axis` \"\n \"should match\")\n\n n = x.shape[axis]\n m = y.shape[axis]\n N = m + n\n if N < 3:\n raise ValueError(\"Not enough observations.\")\n\n xy = np.concatenate((x, y), axis=axis)\n if axis != 0:\n xy = np.rollaxis(xy, axis)\n\n xy = xy.reshape(xy.shape[0], -1)\n\n # Generalized to the n-dimensional case by adding the axis argument, and\n # using for loops, since rankdata is not vectorized. For improving\n # performance consider vectorizing rankdata function.\n all_ranks = np.empty_like(xy)\n for j in range(xy.shape[1]):\n all_ranks[:, j] = stats.rankdata(xy[:, j])\n\n Ri = all_ranks[:n]\n M = np.sum((Ri - (N + 1.0) / 2)**2, axis=0)\n # Approx stat.\n mnM = n * (N * N - 1.0) / 12\n varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180\n z = (M - mnM) / sqrt(varM)\n\n # sf for right tail, cdf for left tail. Factor 2 for two-sidedness\n z_pos = z > 0\n pval = np.zeros_like(z)\n pval[z_pos] = 2 * distributions.norm.sf(z[z_pos])\n pval[~z_pos] = 2 * distributions.norm.cdf(z[~z_pos])\n\n if res_shape == ():\n # Return scalars, not 0-D arrays\n z = z[0]\n pval = pval[0]\n else:\n z.shape = res_shape\n pval.shape = res_shape\n\n return z, pval\n\n\nWilcoxonResult = namedtuple('WilcoxonResult', ('statistic', 'pvalue'))\n\n\ndef wilcoxon(x, y=None, zero_method=\"wilcox\", correction=False,\n alternative=\"two-sided\", mode='auto'):\n \"\"\"\n Calculate the Wilcoxon signed-rank test.\n\n The Wilcoxon signed-rank test tests the null hypothesis that two\n related paired samples come from the same distribution. In particular,\n it tests whether the distribution of the differences x - y is symmetric\n about zero. It is a non-parametric version of the paired T-test.\n\n Parameters\n ----------\n x : array_like\n Either the first set of measurements (in which case ``y`` is the second\n set of measurements), or the differences between two sets of\n measurements (in which case ``y`` is not to be specified.) Must be\n one-dimensional.\n y : array_like, optional\n Either the second set of measurements (if ``x`` is the first set of\n measurements), or not specified (if ``x`` is the differences between\n two sets of measurements.) Must be one-dimensional.\n zero_method : {\"pratt\", \"wilcox\", \"zsplit\"}, optional\n The following options are available (default is \"wilcox\"):\n\n * \"pratt\": Includes zero-differences in the ranking process,\n but drops the ranks of the zeros, see [4]_, (more conservative).\n * \"wilcox\": Discards all zero-differences, the default.\n * \"zsplit\": Includes zero-differences in the ranking process and\n split the zero rank between positive and negative ones.\n correction : bool, optional\n If True, apply continuity correction by adjusting the Wilcoxon rank\n statistic by 0.5 towards the mean value when computing the\n z-statistic if a normal approximation is used. Default is False.\n alternative : {\"two-sided\", \"greater\", \"less\"}, optional\n The alternative hypothesis to be tested, see Notes. Default is\n \"two-sided\".\n mode : {\"auto\", \"exact\", \"approx\"}\n Method to calculate the p-value, see Notes. Default is \"auto\".\n\n Returns\n -------\n statistic : float\n If ``alternative`` is \"two-sided\", the sum of the ranks of the\n differences above or below zero, whichever is smaller.\n Otherwise the sum of the ranks of the differences above zero.\n pvalue : float\n The p-value for the test depending on ``alternative`` and ``mode``.\n\n See Also\n --------\n kruskal, mannwhitneyu\n\n Notes\n -----\n The test has been introduced in [4]_. Given n independent samples\n (xi, yi) from a bivariate distribution (i.e. paired samples),\n it computes the differences di = xi - yi. One assumption of the test\n is that the differences are symmetric, see [2]_.\n The two-sided test has the null hypothesis that the median of the\n differences is zero against the alternative that it is different from\n zero. The one-sided test has the null hypothesis that the median is\n positive against the alternative that it is negative\n (``alternative == 'less'``), or vice versa (``alternative == 'greater.'``).\n\n To derive the p-value, the exact distribution (``mode == 'exact'``)\n can be used for sample sizes of up to 25. The default ``mode == 'auto'``\n uses the exact distribution if there are at most 25 observations and no\n ties, otherwise a normal approximation is used (``mode == 'approx'``).\n\n The treatment of ties can be controlled by the parameter `zero_method`.\n If ``zero_method == 'pratt'``, the normal approximation is adjusted as in\n [5]_. A typical rule is to require that n > 20 ([2]_, p. 383).\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test\n .. [2] Conover, W.J., Practical Nonparametric Statistics, 1971.\n .. [3] Pratt, J.W., Remarks on Zeros and Ties in the Wilcoxon Signed\n Rank Procedures, Journal of the American Statistical Association,\n Vol. 54, 1959, pp. 655-667. :doi:`10.1080/01621459.1959.10501526`\n .. [4] Wilcoxon, F., Individual Comparisons by Ranking Methods,\n Biometrics Bulletin, Vol. 1, 1945, pp. 80-83. :doi:`10.2307/3001968`\n .. [5] Cureton, E.E., The Normal Approximation to the Signed-Rank\n Sampling Distribution When Zero Differences are Present,\n Journal of the American Statistical Association, Vol. 62, 1967,\n pp. 1068-1069. :doi:`10.1080/01621459.1967.10500917`\n\n Examples\n --------\n In [4]_, the differences in height between cross- and self-fertilized\n corn plants is given as follows:\n\n >>> d = [6, 8, 14, 16, 23, 24, 28, 29, 41, -48, 49, 56, 60, -67, 75]\n\n Cross-fertilized plants appear to be be higher. To test the null\n hypothesis that there is no height difference, we can apply the\n two-sided test:\n\n >>> from scipy.stats import wilcoxon\n >>> w, p = wilcoxon(d)\n >>> w, p\n (24.0, 0.041259765625)\n\n Hence, we would reject the null hypothesis at a confidence level of 5%,\n concluding that there is a difference in height between the groups.\n To confirm that the median of the differences can be assumed to be\n positive, we use:\n\n >>> w, p = wilcoxon(d, alternative='greater')\n >>> w, p\n (96.0, 0.0206298828125)\n\n This shows that the null hypothesis that the median is negative can be\n rejected at a confidence level of 5% in favor of the alternative that\n the median is greater than zero. The p-values above are exact. Using the\n normal approximation gives very similar values:\n\n >>> w, p = wilcoxon(d, mode='approx')\n >>> w, p\n (24.0, 0.04088813291185591)\n\n Note that the statistic changed to 96 in the one-sided case (the sum\n of ranks of positive differences) whereas it is 24 in the two-sided\n case (the minimum of sum of ranks above and below zero).\n\n \"\"\"\n if mode not in [\"auto\", \"approx\", \"exact\"]:\n raise ValueError(\"mode must be either 'auto', 'approx' or 'exact'\")\n\n if zero_method not in [\"wilcox\", \"pratt\", \"zsplit\"]:\n raise ValueError(\"Zero method must be either 'wilcox' \"\n \"or 'pratt' or 'zsplit'\")\n\n if alternative not in [\"two-sided\", \"less\", \"greater\"]:\n raise ValueError(\"Alternative must be either 'two-sided', \"\n \"'greater' or 'less'\")\n\n if y is None:\n d = asarray(x)\n if d.ndim > 1:\n raise ValueError('Sample x must be one-dimensional.')\n else:\n x, y = map(asarray, (x, y))\n if x.ndim > 1 or y.ndim > 1:\n raise ValueError('Samples x and y must be one-dimensional.')\n if len(x) != len(y):\n raise ValueError('The samples x and y must have the same length.')\n d = x - y\n\n if mode == \"auto\":\n if len(d) <= 25:\n mode = \"exact\"\n else:\n mode = \"approx\"\n\n n_zero = np.sum(d == 0)\n if n_zero > 0 and mode == \"exact\":\n mode = \"approx\"\n warnings.warn(\"Exact p-value calculation does not work if there are \"\n \"ties. Switching to normal approximation.\")\n\n if mode == \"approx\":\n if zero_method in [\"wilcox\", \"pratt\"]:\n if n_zero == len(d):\n raise ValueError(\"zero_method 'wilcox' and 'pratt' do not \"\n \"work if x - y is zero for all elements.\")\n if zero_method == \"wilcox\":\n # Keep all non-zero differences\n d = compress(np.not_equal(d, 0), d)\n\n count = len(d)\n if count < 10 and mode == \"approx\":\n warnings.warn(\"Sample size too small for normal approximation.\")\n\n r = stats.rankdata(abs(d))\n r_plus = np.sum((d > 0) * r)\n r_minus = np.sum((d < 0) * r)\n\n if zero_method == \"zsplit\":\n r_zero = np.sum((d == 0) * r)\n r_plus += r_zero / 2.\n r_minus += r_zero / 2.\n\n # return min for two-sided test, but r_plus for one-sided test\n # the literature is not consistent here\n # r_plus is more informative since r_plus + r_minus = count*(count+1)/2,\n # i.e. the sum of the ranks, so r_minus and the min can be inferred\n # (If alternative='pratt', r_plus + r_minus = count*(count+1)/2 - r_zero.)\n # [3] uses the r_plus for the one-sided test, keep min for two-sided test\n # to keep backwards compatibility\n if alternative == \"two-sided\":\n T = min(r_plus, r_minus)\n else:\n T = r_plus\n\n if mode == \"approx\":\n mn = count * (count + 1.) * 0.25\n se = count * (count + 1.) * (2. * count + 1.)\n\n if zero_method == \"pratt\":\n r = r[d != 0]\n # normal approximation needs to be adjusted, see Cureton (1967)\n mn -= n_zero * (n_zero + 1.) * 0.25\n se -= n_zero * (n_zero + 1.) * (2. * n_zero + 1.)\n\n replist, repnum = find_repeats(r)\n if repnum.size != 0:\n # Correction for repeated elements.\n se -= 0.5 * (repnum * (repnum * repnum - 1)).sum()\n\n se = sqrt(se / 24)\n\n # apply continuity correction if applicable\n d = 0\n if correction:\n if alternative == \"two-sided\":\n d = 0.5 * np.sign(T - mn)\n elif alternative == \"less\":\n d = -0.5\n else:\n d = 0.5\n\n # compute statistic and p-value using normal approximation\n z = (T - mn - d) / se\n if alternative == \"two-sided\":\n prob = 2. * distributions.norm.sf(abs(z))\n elif alternative == \"greater\":\n # large T = r_plus indicates x is greater than y; i.e.\n # accept alternative in that case and return small p-value (sf)\n prob = distributions.norm.sf(z)\n else:\n prob = distributions.norm.cdf(z)\n elif mode == \"exact\":\n # get frequencies cnt of the possible positive ranksums r_plus\n cnt = _get_wilcoxon_distr(count)\n # note: r_plus is int (ties not allowed), need int for slices below\n r_plus = int(r_plus)\n if alternative == \"two-sided\":\n if r_plus == (len(cnt) - 1) // 2:\n # r_plus is the center of the distribution.\n prob = 1.0\n else:\n p_less = np.sum(cnt[:r_plus + 1]) / 2**count\n p_greater = np.sum(cnt[r_plus:]) / 2**count\n prob = 2*min(p_greater, p_less)\n elif alternative == \"greater\":\n prob = np.sum(cnt[r_plus:]) / 2**count\n else:\n prob = np.sum(cnt[:r_plus + 1]) / 2**count\n\n return WilcoxonResult(T, prob)\n\n\ndef median_test(*args, ties='below', correction=True, lambda_=1,\n nan_policy='propagate'):\n \"\"\"\n Perform a Mood's median test.\n\n Test that two or more samples come from populations with the same median.\n\n Let ``n = len(args)`` be the number of samples. The \"grand median\" of\n all the data is computed, and a contingency table is formed by\n classifying the values in each sample as being above or below the grand\n median. The contingency table, along with `correction` and `lambda_`,\n are passed to `scipy.stats.chi2_contingency` to compute the test statistic\n and p-value.\n\n Parameters\n ----------\n sample1, sample2, ... : array_like\n The set of samples. There must be at least two samples.\n Each sample must be a one-dimensional sequence containing at least\n one value. The samples are not required to have the same length.\n ties : str, optional\n Determines how values equal to the grand median are classified in\n the contingency table. The string must be one of::\n\n \"below\":\n Values equal to the grand median are counted as \"below\".\n \"above\":\n Values equal to the grand median are counted as \"above\".\n \"ignore\":\n Values equal to the grand median are not counted.\n\n The default is \"below\".\n correction : bool, optional\n If True, *and* there are just two samples, apply Yates' correction\n for continuity when computing the test statistic associated with\n the contingency table. Default is True.\n lambda_ : float or str, optional\n By default, the statistic computed in this test is Pearson's\n chi-squared statistic. `lambda_` allows a statistic from the\n Cressie-Read power divergence family to be used instead. See\n `power_divergence` for details.\n Default is 1 (Pearson's chi-squared statistic).\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan. 'propagate' returns nan,\n 'raise' throws an error, 'omit' performs the calculations ignoring nan\n values. Default is 'propagate'.\n\n Returns\n -------\n stat : float\n The test statistic. The statistic that is returned is determined by\n `lambda_`. The default is Pearson's chi-squared statistic.\n p : float\n The p-value of the test.\n m : float\n The grand median.\n table : ndarray\n The contingency table. The shape of the table is (2, n), where\n n is the number of samples. The first row holds the counts of the\n values above the grand median, and the second row holds the counts\n of the values below the grand median. The table allows further\n analysis with, for example, `scipy.stats.chi2_contingency`, or with\n `scipy.stats.fisher_exact` if there are two samples, without having\n to recompute the table. If ``nan_policy`` is \"propagate\" and there\n are nans in the input, the return value for ``table`` is ``None``.\n\n See Also\n --------\n kruskal : Compute the Kruskal-Wallis H-test for independent samples.\n mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y.\n\n Notes\n -----\n .. versionadded:: 0.15.0\n\n References\n ----------\n .. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill\n (1950), pp. 394-399.\n .. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010).\n See Sections 8.12 and 10.15.\n\n Examples\n --------\n A biologist runs an experiment in which there are three groups of plants.\n Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants.\n Each plant produces a number of seeds. The seed counts for each group\n are::\n\n Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49\n Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99\n Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84\n\n The following code applies Mood's median test to these samples.\n\n >>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49]\n >>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99]\n >>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84]\n >>> from scipy.stats import median_test\n >>> stat, p, med, tbl = median_test(g1, g2, g3)\n\n The median is\n\n >>> med\n 34.0\n\n and the contingency table is\n\n >>> tbl\n array([[ 5, 10, 7],\n [11, 5, 10]])\n\n `p` is too large to conclude that the medians are not the same:\n\n >>> p\n 0.12609082774093244\n\n The \"G-test\" can be performed by passing ``lambda_=\"log-likelihood\"`` to\n `median_test`.\n\n >>> g, p, med, tbl = median_test(g1, g2, g3, lambda_=\"log-likelihood\")\n >>> p\n 0.12224779737117837\n\n The median occurs several times in the data, so we'll get a different\n result if, for example, ``ties=\"above\"`` is used:\n\n >>> stat, p, med, tbl = median_test(g1, g2, g3, ties=\"above\")\n >>> p\n 0.063873276069553273\n\n >>> tbl\n array([[ 5, 11, 9],\n [11, 4, 8]])\n\n This example demonstrates that if the data set is not large and there\n are values equal to the median, the p-value can be sensitive to the\n choice of `ties`.\n\n \"\"\"\n if len(args) < 2:\n raise ValueError('median_test requires two or more samples.')\n\n ties_options = ['below', 'above', 'ignore']\n if ties not in ties_options:\n raise ValueError(\"invalid 'ties' option '%s'; 'ties' must be one \"\n \"of: %s\" % (ties, str(ties_options)[1:-1]))\n\n data = [np.asarray(arg) for arg in args]\n\n # Validate the sizes and shapes of the arguments.\n for k, d in enumerate(data):\n if d.size == 0:\n raise ValueError(\"Sample %d is empty. All samples must \"\n \"contain at least one value.\" % (k + 1))\n if d.ndim != 1:\n raise ValueError(\"Sample %d has %d dimensions. All \"\n \"samples must be one-dimensional sequences.\" %\n (k + 1, d.ndim))\n\n cdata = np.concatenate(data)\n contains_nan, nan_policy = _contains_nan(cdata, nan_policy)\n if contains_nan and nan_policy == 'propagate':\n return np.nan, np.nan, np.nan, None\n\n if contains_nan:\n grand_median = np.median(cdata[~np.isnan(cdata)])\n else:\n grand_median = np.median(cdata)\n # When the minimum version of numpy supported by scipy is 1.9.0,\n # the above if/else statement can be replaced by the single line:\n # grand_median = np.nanmedian(cdata)\n\n # Create the contingency table.\n table = np.zeros((2, len(data)), dtype=np.int64)\n for k, sample in enumerate(data):\n sample = sample[~np.isnan(sample)]\n\n nabove = count_nonzero(sample > grand_median)\n nbelow = count_nonzero(sample < grand_median)\n nequal = sample.size - (nabove + nbelow)\n table[0, k] += nabove\n table[1, k] += nbelow\n if ties == \"below\":\n table[1, k] += nequal\n elif ties == \"above\":\n table[0, k] += nequal\n\n # Check that no row or column of the table is all zero.\n # Such a table can not be given to chi2_contingency, because it would have\n # a zero in the table of expected frequencies.\n rowsums = table.sum(axis=1)\n if rowsums[0] == 0:\n raise ValueError(\"All values are below the grand median (%r).\" %\n grand_median)\n if rowsums[1] == 0:\n raise ValueError(\"All values are above the grand median (%r).\" %\n grand_median)\n if ties == \"ignore\":\n # We already checked that each sample has at least one value, but it\n # is possible that all those values equal the grand median. If `ties`\n # is \"ignore\", that would result in a column of zeros in `table`. We\n # check for that case here.\n zero_cols = np.nonzero((table == 0).all(axis=0))[0]\n if len(zero_cols) > 0:\n msg = (\"All values in sample %d are equal to the grand \"\n \"median (%r), so they are ignored, resulting in an \"\n \"empty sample.\" % (zero_cols[0] + 1, grand_median))\n raise ValueError(msg)\n\n stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_,\n correction=correction)\n return stat, p, grand_median, table\n\n\ndef _circfuncs_common(samples, high, low, nan_policy='propagate'):\n # Ensure samples are array-like and size is not zero\n samples = np.asarray(samples)\n if samples.size == 0:\n return np.nan, np.asarray(np.nan), np.asarray(np.nan), None\n\n # Recast samples as radians that range between 0 and 2 pi and calculate\n # the sine and cosine\n sin_samp = sin((samples - low)*2.*pi / (high - low))\n cos_samp = cos((samples - low)*2.*pi / (high - low))\n\n # Apply the NaN policy\n contains_nan, nan_policy = _contains_nan(samples, nan_policy)\n if contains_nan and nan_policy == 'omit':\n mask = np.isnan(samples)\n # Set the sines and cosines that are NaN to zero\n sin_samp[mask] = 0.0\n cos_samp[mask] = 0.0\n else:\n mask = None\n\n return samples, sin_samp, cos_samp, mask\n\n\ndef circmean(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'):\n \"\"\"\n Compute the circular mean for samples in a range.\n\n Parameters\n ----------\n samples : array_like\n Input array.\n high : float or int, optional\n High boundary for circular mean range. Default is ``2*pi``.\n low : float or int, optional\n Low boundary for circular mean range. Default is 0.\n axis : int, optional\n Axis along which means are computed. The default is to compute\n the mean of the flattened array.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan. 'propagate' returns nan,\n 'raise' throws an error, 'omit' performs the calculations ignoring nan\n values. Default is 'propagate'.\n\n Returns\n -------\n circmean : float\n Circular mean.\n\n Examples\n --------\n >>> from scipy.stats import circmean\n >>> circmean([0.1, 2*np.pi+0.2, 6*np.pi+0.3])\n 0.2\n\n >>> from scipy.stats import circmean\n >>> circmean([0.2, 1.4, 2.6], high = 1, low = 0)\n 0.4\n\n \"\"\"\n samples, sin_samp, cos_samp, nmask = _circfuncs_common(samples, high, low,\n nan_policy=nan_policy)\n sin_sum = sin_samp.sum(axis=axis)\n cos_sum = cos_samp.sum(axis=axis)\n res = arctan2(sin_sum, cos_sum)\n\n mask_nan = ~np.isnan(res)\n if mask_nan.ndim > 0:\n mask = res[mask_nan] < 0\n else:\n mask = res < 0\n\n if mask.ndim > 0:\n mask_nan[mask_nan] = mask\n res[mask_nan] += 2*pi\n elif mask:\n res += 2*pi\n\n # Set output to NaN if no samples went into the mean\n if nmask is not None:\n if nmask.all():\n res = np.full(shape=res.shape, fill_value=np.nan)\n else:\n # Find out if any of the axis that are being averaged consist\n # entirely of NaN. If one exists, set the result (res) to NaN\n nshape = 0 if axis is None else axis\n smask = nmask.shape[nshape] == nmask.sum(axis=axis)\n if smask.any():\n res[smask] = np.nan\n\n return res*(high - low)/2.0/pi + low\n\n\ndef circvar(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'):\n \"\"\"\n Compute the circular variance for samples assumed to be in a range.\n\n Parameters\n ----------\n samples : array_like\n Input array.\n high : float or int, optional\n High boundary for circular variance range. Default is ``2*pi``.\n low : float or int, optional\n Low boundary for circular variance range. Default is 0.\n axis : int, optional\n Axis along which variances are computed. The default is to compute\n the variance of the flattened array.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan. 'propagate' returns nan,\n 'raise' throws an error, 'omit' performs the calculations ignoring nan\n values. Default is 'propagate'.\n\n Returns\n -------\n circvar : float\n Circular variance.\n\n Notes\n -----\n This uses a definition of circular variance that in the limit of small\n angles returns a number close to the 'linear' variance.\n\n Examples\n --------\n >>> from scipy.stats import circvar\n >>> circvar([0, 2*np.pi/3, 5*np.pi/3])\n 2.19722457734\n\n \"\"\"\n samples, sin_samp, cos_samp, mask = _circfuncs_common(samples, high, low,\n nan_policy=nan_policy)\n if mask is None:\n sin_mean = sin_samp.mean(axis=axis)\n cos_mean = cos_samp.mean(axis=axis)\n else:\n nsum = np.asarray(np.sum(~mask, axis=axis).astype(float))\n nsum[nsum == 0] = np.nan\n sin_mean = sin_samp.sum(axis=axis) / nsum\n cos_mean = cos_samp.sum(axis=axis) / nsum\n # hypot can go slightly above 1 due to rounding errors\n with np.errstate(invalid='ignore'):\n R = np.minimum(1, hypot(sin_mean, cos_mean))\n\n return ((high - low)/2.0/pi)**2 * -2 * log(R)\n\n\ndef circstd(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'):\n \"\"\"\n Compute the circular standard deviation for samples assumed to be in the\n range [low to high].\n\n Parameters\n ----------\n samples : array_like\n Input array.\n high : float or int, optional\n High boundary for circular standard deviation range.\n Default is ``2*pi``.\n low : float or int, optional\n Low boundary for circular standard deviation range. Default is 0.\n axis : int, optional\n Axis along which standard deviations are computed. The default is\n to compute the standard deviation of the flattened array.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan. 'propagate' returns nan,\n 'raise' throws an error, 'omit' performs the calculations ignoring nan\n values. Default is 'propagate'.\n\n Returns\n -------\n circstd : float\n Circular standard deviation.\n\n Notes\n -----\n This uses a definition of circular standard deviation that in the limit of\n small angles returns a number close to the 'linear' standard deviation.\n\n Examples\n --------\n >>> from scipy.stats import circstd\n >>> circstd([0, 0.1*np.pi/2, 0.001*np.pi, 0.03*np.pi/2])\n 0.063564063306\n\n \"\"\"\n samples, sin_samp, cos_samp, mask = _circfuncs_common(samples, high, low,\n nan_policy=nan_policy)\n if mask is None:\n sin_mean = sin_samp.mean(axis=axis)\n cos_mean = cos_samp.mean(axis=axis)\n else:\n nsum = np.asarray(np.sum(~mask, axis=axis).astype(float))\n nsum[nsum == 0] = np.nan\n sin_mean = sin_samp.sum(axis=axis) / nsum\n cos_mean = cos_samp.sum(axis=axis) / nsum\n # hypot can go slightly above 1 due to rounding errors\n with np.errstate(invalid='ignore'):\n R = np.minimum(1, hypot(sin_mean, cos_mean))\n\n return ((high - low)/2.0/pi) * sqrt(-2*log(R))\n"
] | [
[
"numpy.deprecate",
"numpy.nextafter",
"numpy.iinfo",
"scipy._lib._util.DeprecatedImport"
],
[
"numpy.rollaxis",
"numpy.amax",
"numpy.sqrt",
"numpy.linspace",
"numpy.asarray",
"numpy.around",
"numpy.issubdtype",
"numpy.all",
"numpy.concatenate",
"numpy.arctan2",
"numpy.zeros_like",
"numpy.any",
"numpy.mean",
"numpy.var",
"numpy.exp",
"numpy.polyval",
"numpy.hypot",
"numpy.hstack",
"numpy.unique",
"numpy.arange",
"numpy.empty_like",
"numpy.sin",
"numpy.atleast_1d",
"numpy.full",
"numpy.std",
"scipy.optimize.brent",
"numpy.ceil",
"numpy.asanyarray",
"numpy.count_nonzero",
"numpy.ravel",
"numpy.log1p",
"numpy.zeros",
"numpy.log",
"scipy.optimize.fsolve",
"numpy.spacing",
"numpy.power",
"numpy.isnan",
"numpy.amin",
"numpy.median",
"numpy.int_",
"scipy.special.boxcox",
"numpy.floor",
"numpy.errstate",
"numpy.not_equal",
"numpy.array",
"numpy.sum",
"numpy.abs",
"numpy.cos",
"numpy.sort",
"numpy.sign",
"numpy.isscalar",
"scipy.optimize.brentq",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
Expert68/hotel_recommendation | [
"a6c1035c7e3ff2d824039855a2349b50f9143d37"
] | [
"GBDT_modeling.py"
] | [
"import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport xgboost as xgb\nimport h5py\nimport os\nfrom data_clean import pre_process,get_agg\n\n#------------------------------定义评估标准---------------------------\ndef map5eval(preds,dtrain):\n actual = dtrain.get_label()\n predicted = preds.argsort(axis=1)[:-np.arange(5)]\n metric = 0\n for i in range(5):\n metric += np.sum(actual==predicted[:i])/(i+1)\n metric /= actual.shape[0]\n\n return 'map5',-metric\n\n#------------------------------对模型进行训练-----------------------------------\nclf = xgb.XGBClassifier(objective='multi:softmax',max_depth=5,n_estimators=300,learning_rate=0.01,nthread=4,subsample=0.7,colsample_bytree=0.7,min_child_weight=3,silent=False)\ndestinations = pd.read_csv('input/destinations.csv')\nresult = pd.read_csv('input/sample_result.csv')\nagg1 = pd.read_csv('output/srch_dest_hc_hm_agg.csv')\n\nif os.path.exists('rows_complete.txt'):\n with open('rows_complete.txt','r') as f:\n skipsize = int(f.readline())\nelse:\n skipsize = 0\n\nskip = 0 if skipsize==0 else range(1,skipsize)\ntchunksize = 1000000\nprint('%d rows will be skipped and next %d rows will be used for training' % (skipsize, tchunksize))\ntrain = pd.read_csv('input/train.csv', parse_dates=['date_time', 'srch_ci', 'srch_co'], skiprows=skip, nrows=tchunksize)\ntrain = train[train.is_booking==1]\ntrain = pd.merge(train, destinations, how='left', on='srch_destination_id')\ntrain = pd.merge(train, agg1, how='left', on=['srch_destination_id','hotel_country','hotel_market'])\npre_process(train)\ny = train.hotel_cluster\ntrain.drop(['cnt', 'hotel_cluster', 'is_booking'], axis=1, inplace=True)\nX_train, X_test, y_train, y_test = train_test_split(train, y, stratify=y, test_size=0.2)\nclf.fit(X_train, y_train, early_stopping_rounds=50, eval_metric=map5eval, eval_set=[(X_train, y_train),(X_test, y_test)])\n\n#-----------------------------对测试数据进行预测-----------------------------------\ncount = 0\nchunksize = 10000\npreds = np.empty((result.shape[0],clf.n_classes_))\nreader = pd.read_csv('input/test.csv', parse_dates=['date_time', 'srch_ci', 'srch_co'], chunksize=chunksize)\nfor chunk in reader:\n chunk = pd.merge(chunk, destinations, how='left', on='srch_destination_id')\n chunk = pd.merge(chunk, agg1, how='left', on=['srch_destination_id', 'hotel_country', 'hotel_market'])\n chunk.drop(['id'], axis=1, inplace=True)\n pre_process(chunk)\n\n pred = clf.predict_proba(chunk)\n preds[count:(count + chunk.shape[0]), :] = pred\n count = count + chunksize\n print('%d rows completed' % count)\n\ndel clf\ndel agg1\nif os.path.exists('output/probs/allpreds_xgb.h5'):\n with h5py.File('output/probs/allpreds_xgb.h5', 'r+') as hf:\n print('reading in and combining probabilities')\n predshf = hf['preds']\n preds += predshf.value\n print('writing latest probabilities to file')\n predshf[...] = preds\nelse:\n with h5py.File('../output/probs/allpreds_xgb.h5', 'w') as hf:\n print('writing latest probabilities to file')\n hf.create_dataset('preds', data=preds)\n\nprint('generating submission')\ncol_ind = np.argsort(-preds, axis=1)[:,:5]\nhc = [' '.join(row.astype(str)) for row in col_ind]\n\nsub = pd.DataFrame(data=hc, index=result.id)\nsub.reset_index(inplace=True)\nsub.columns = result.columns\nsub.to_csv('output/pred_sub.csv', index=False)\n\n\nskipsize += tchunksize\nwith open('rows_complete.txt', 'w') as f:\n f.write(str(skipsize))"
] | [
[
"pandas.merge",
"pandas.read_csv",
"numpy.arange",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"numpy.argsort",
"numpy.sum",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
vincentcheny/models | [
"afb1a59fc1bc792ac72d1a3e22e2469020529788",
"afb1a59fc1bc792ac72d1a3e22e2469020529788",
"afb1a59fc1bc792ac72d1a3e22e2469020529788",
"afb1a59fc1bc792ac72d1a3e22e2469020529788",
"afb1a59fc1bc792ac72d1a3e22e2469020529788",
"afb1a59fc1bc792ac72d1a3e22e2469020529788",
"afb1a59fc1bc792ac72d1a3e22e2469020529788",
"afb1a59fc1bc792ac72d1a3e22e2469020529788",
"afb1a59fc1bc792ac72d1a3e22e2469020529788",
"afb1a59fc1bc792ac72d1a3e22e2469020529788",
"afb1a59fc1bc792ac72d1a3e22e2469020529788",
"afb1a59fc1bc792ac72d1a3e22e2469020529788",
"afb1a59fc1bc792ac72d1a3e22e2469020529788",
"afb1a59fc1bc792ac72d1a3e22e2469020529788",
"afb1a59fc1bc792ac72d1a3e22e2469020529788",
"afb1a59fc1bc792ac72d1a3e22e2469020529788",
"afb1a59fc1bc792ac72d1a3e22e2469020529788",
"afb1a59fc1bc792ac72d1a3e22e2469020529788",
"afb1a59fc1bc792ac72d1a3e22e2469020529788",
"afb1a59fc1bc792ac72d1a3e22e2469020529788",
"afb1a59fc1bc792ac72d1a3e22e2469020529788",
"afb1a59fc1bc792ac72d1a3e22e2469020529788",
"afb1a59fc1bc792ac72d1a3e22e2469020529788",
"afb1a59fc1bc792ac72d1a3e22e2469020529788",
"afb1a59fc1bc792ac72d1a3e22e2469020529788",
"afb1a59fc1bc792ac72d1a3e22e2469020529788",
"afb1a59fc1bc792ac72d1a3e22e2469020529788"
] | [
"research/attention_ocr/python/demo_inference.py",
"research/syntaxnet/dragnn/python/transformer_units.py",
"research/autoencoder/autoencoder_models/VariationalAutoencoder.py",
"research/audioset/vggish/vggish_smoke_test.py",
"research/object_detection/dataset_tools/oid_tfrecord_creation.py",
"research/syntaxnet/dragnn/python/visualization_test.py",
"samples/cookbook/regression/regression_test.py",
"research/minigo/sgf_wrapper_test.py",
"research/gan/progressive_gan/networks_test.py",
"official/bert/classifier_data_lib.py",
"research/fivo/fivo/models/vrnn.py",
"research/real_nvp/real_nvp_utils.py",
"research/cognitive_mapping_and_planning/src/graph_utils.py",
"research/object_detection/inputs_test.py",
"research/learned_optimizer/problems/datasets.py",
"research/object_detection/predictors/heads/box_head_test.py",
"research/compression/entropy_coder/lib/blocks_masked_conv2d_test.py",
"research/ptn/losses.py",
"research/syntaxnet/dragnn/python/digraph_ops_test.py",
"research/minigo/features.py",
"research/object_detection/legacy/trainer_test.py",
"research/maskgan/data/imdb_loader.py",
"research/syntaxnet/dragnn/tools/evaluator.py",
"research/object_detection/core/box_list.py",
"research/seq2species/run_training_test.py",
"research/deeplab/core/utils_test.py",
"research/video_prediction/prediction_train.py"
] | [
"\"\"\"A script to run inference on a set of image files.\r\n\r\nNOTE #1: The Attention OCR model was trained only using FSNS train dataset and\r\nit will work only for images which look more or less similar to french street\r\nnames. In order to apply it to images from a different distribution you need\r\nto retrain (or at least fine-tune) it using images from that distribution.\r\n\r\nNOTE #2: This script exists for demo purposes only. It is highly recommended\r\nto use tools and mechanisms provided by the TensorFlow Serving system to run\r\ninference on TensorFlow models in production:\r\nhttps://www.tensorflow.org/serving/serving_basic\r\n\r\nUsage:\r\npython demo_inference.py --batch_size=32 \\\r\n --checkpoint=model.ckpt-399731\\\r\n --image_path_pattern=./datasets/data/fsns/temp/fsns_train_%02d.png\r\n\"\"\"\r\nimport numpy as np\r\nimport PIL.Image\r\n\r\nimport tensorflow as tf\r\nfrom tensorflow.python.platform import flags\r\nfrom tensorflow.python.training import monitored_session\r\n\r\nimport common_flags\r\nimport datasets\r\nimport data_provider\r\n\r\nFLAGS = flags.FLAGS\r\ncommon_flags.define()\r\n\r\n# e.g. ./datasets/data/fsns/temp/fsns_train_%02d.png\r\nflags.DEFINE_string('image_path_pattern', '',\r\n 'A file pattern with a placeholder for the image index.')\r\n\r\n\r\ndef get_dataset_image_size(dataset_name):\r\n # Ideally this info should be exposed through the dataset interface itself.\r\n # But currently it is not available by other means.\r\n ds_module = getattr(datasets, dataset_name)\r\n height, width, _ = ds_module.DEFAULT_CONFIG['image_shape']\r\n return width, height\r\n\r\n\r\ndef load_images(file_pattern, batch_size, dataset_name):\r\n width, height = get_dataset_image_size(dataset_name)\r\n images_actual_data = np.ndarray(shape=(batch_size, height, width, 3),\r\n dtype='uint8')\r\n for i in range(batch_size):\r\n path = file_pattern % i\r\n print(\"Reading %s\" % path)\r\n pil_image = PIL.Image.open(tf.gfile.GFile(path))\r\n images_actual_data[i, ...] = np.asarray(pil_image)\r\n return images_actual_data\r\n\r\n\r\ndef create_model(batch_size, dataset_name):\r\n width, height = get_dataset_image_size(dataset_name)\r\n dataset = common_flags.create_dataset(split_name=FLAGS.split_name)\r\n model = common_flags.create_model(\r\n num_char_classes=dataset.num_char_classes,\r\n seq_length=dataset.max_sequence_length,\r\n num_views=dataset.num_of_views,\r\n null_code=dataset.null_code,\r\n charset=dataset.charset)\r\n raw_images = tf.placeholder(tf.uint8, shape=[batch_size, height, width, 3])\r\n images = tf.map_fn(data_provider.preprocess_image, raw_images,\r\n dtype=tf.float32)\r\n endpoints = model.create_base(images, labels_one_hot=None)\r\n return raw_images, endpoints\r\n\r\n\r\ndef run(checkpoint, batch_size, dataset_name, image_path_pattern):\r\n images_placeholder, endpoints = create_model(batch_size,\r\n dataset_name)\r\n images_data = load_images(image_path_pattern, batch_size,\r\n dataset_name)\r\n session_creator = monitored_session.ChiefSessionCreator(\r\n checkpoint_filename_with_path=checkpoint)\r\n with monitored_session.MonitoredSession(\r\n session_creator=session_creator) as sess:\r\n predictions = sess.run(endpoints.predicted_text,\r\n feed_dict={images_placeholder: images_data})\r\n return predictions.tolist()\r\n\r\n\r\ndef main(_):\r\n print(\"Predicted strings:\")\r\n predictions = run(FLAGS.checkpoint, FLAGS.batch_size, FLAGS.dataset_name,\r\n FLAGS.image_path_pattern)\r\n for line in predictions:\r\n print(line)\r\n\r\n\r\nif __name__ == '__main__':\r\n tf.app.run()\r\n",
"# Copyright 2017 Google Inc. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"Network units implementing the Transformer network (Vaswani et al. 2017).\r\n\r\nHeavily adapted from the tensor2tensor implementation of the Transformer,\r\ndescribed in detail here: https://arxiv.org/abs/1706.03762.\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\nfrom dragnn.python import network_units\r\n\r\n\r\ndef add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):\r\n \"\"\"Adds a bunch of sinusoids of different frequencies to a Tensor.\r\n\r\n Each channel of the input Tensor is incremented by a sinusoid of a different\r\n frequency and phase.\r\n\r\n This allows attention to learn to use absolute and relative positions.\r\n Timing signals should be added to some precursors of both the query and the\r\n memory inputs to attention.\r\n\r\n The use of relative position is possible because sin(x+y) and cos(x+y) can be\r\n expressed in terms of y, sin(x) and cos(x).\r\n\r\n In particular, we use a geometric sequence of timescales starting with\r\n min_timescale and ending with max_timescale. The number of different\r\n timescales is equal to channels / 2. For each timescale, we\r\n generate the two sinusoidal signals sin(timestep/timescale) and\r\n cos(timestep/timescale). All of these sinusoids are concatenated in\r\n the channels dimension.\r\n\r\n Args:\r\n x: a Tensor with shape [batch, length, channels]\r\n min_timescale: a float\r\n max_timescale: a float\r\n\r\n Returns:\r\n a Tensor the same shape as x.\r\n \"\"\"\r\n length = tf.shape(x)[1]\r\n channels = tf.shape(x)[2]\r\n pos = tf.to_float(tf.range(length))\r\n num_timescales = channels // 2\r\n log_timescale_increment = (\r\n np.log(float(max_timescale) / float(min_timescale)) /\r\n (tf.to_float(num_timescales) - 1))\r\n inv_timescales = min_timescale * tf.exp(\r\n tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)\r\n scaled_time = tf.expand_dims(pos, 1) * tf.expand_dims(inv_timescales, 0)\r\n signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)\r\n signal = tf.pad(signal, [[0, 0], [0, tf.mod(channels, 2)]])\r\n signal = tf.reshape(signal, [1, length, channels])\r\n return x + signal\r\n\r\n\r\ndef split_last_dimension(x, n):\r\n \"\"\"Partitions x so that the last dimension becomes two dimensions.\r\n\r\n The first of these two dimensions is n.\r\n\r\n Args:\r\n x: a Tensor with shape [..., m]\r\n n: an integer.\r\n\r\n Returns:\r\n a Tensor with shape [..., n, m/n]\r\n \"\"\"\r\n old_shape = x.get_shape().dims\r\n last = old_shape[-1]\r\n new_shape = old_shape[:-1] + [n] + [last // n if last else None]\r\n ret = tf.reshape(x, tf.concat([tf.shape(x)[:-1], [n, -1]], 0))\r\n ret.set_shape(new_shape)\r\n return ret\r\n\r\n\r\ndef combine_last_two_dimensions(x):\r\n \"\"\"Reshape x so that the last two dimensions become one.\r\n\r\n Args:\r\n x: a Tensor with shape [..., a, b]\r\n\r\n Returns:\r\n a Tensor with shape [..., ab]\r\n \"\"\"\r\n old_shape = x.get_shape().dims\r\n a, b = old_shape[-2:]\r\n new_shape = old_shape[:-2] + [a * b if a and b else None]\r\n ret = tf.reshape(x, tf.concat([tf.shape(x)[:-2], [-1]], 0))\r\n ret.set_shape(new_shape)\r\n return ret\r\n\r\n\r\ndef split_heads(x, num_heads):\r\n \"\"\"Splits channels (dimension 3) into multiple heads (becomes dimension 1).\r\n\r\n Args:\r\n x: a Tensor with shape [batch, length, channels]\r\n num_heads: an integer\r\n\r\n Returns:\r\n a Tensor with shape [batch, num_heads, length, channels / num_heads]\r\n \"\"\"\r\n return tf.transpose(split_last_dimension(x, num_heads), [0, 2, 1, 3])\r\n\r\n\r\ndef combine_heads(x):\r\n \"\"\"Performs the inverse of split_heads.\r\n\r\n Args:\r\n x: a Tensor with shape [batch, num_heads, length, channels / num_heads]\r\n\r\n Returns:\r\n a Tensor with shape [batch, length, channels]\r\n \"\"\"\r\n return combine_last_two_dimensions(tf.transpose(x, [0, 2, 1, 3]))\r\n\r\n\r\ndef compute_padding_mask(lengths):\r\n \"\"\"Computes an additive mask for padding.\r\n\r\n Given the non-padded sequence lengths for the batch, computes a mask that will\r\n send padding attention to 0 when added to logits before applying a softmax.\r\n\r\n Args:\r\n lengths: a Tensor containing the sequence length of each batch element\r\n\r\n Returns:\r\n A Tensor of shape [batch_size, 1, 1, max_len] with zeros in non-padding\r\n entries and -1e9 in padding entries.\r\n \"\"\"\r\n lengths = tf.reshape(lengths, [-1])\r\n mask = tf.sequence_mask(lengths)\r\n\r\n # This will be used as an additive mask, so we want the inverse of the mask\r\n # produced by tf.sequence_mask.\r\n inv_mask = tf.to_float(tf.logical_not(mask))\r\n\r\n mem_padding = inv_mask * -1e9\r\n return tf.expand_dims(tf.expand_dims(mem_padding, 1), 1)\r\n\r\n\r\ndef dot_product_attention(queries, keys, values, dropout_keep_rate, bias=None):\r\n \"\"\"Computes dot-product attention.\r\n\r\n Args:\r\n queries: a Tensor with shape [batch, heads, seq_len, depth_keys]\r\n keys: a Tensor with shape [batch, heads, seq_len, depth_keys]\r\n values: a Tensor with shape [batch, heads, seq_len, depth_values]\r\n dropout_keep_rate: dropout proportion of units to keep\r\n bias: A bias to add before applying the softmax, or None. This can be used\r\n for masking padding in the batch.\r\n\r\n Returns:\r\n A Tensor with shape [batch, heads, seq_len, depth_values].\r\n \"\"\"\r\n # [batch, num_heads, seq_len, seq_len]\r\n logits = tf.matmul(queries, keys, transpose_b=True)\r\n if bias is not None:\r\n logits += bias\r\n\r\n attn_weights = tf.nn.softmax(logits)\r\n\r\n # Dropping out the attention links for each of the heads\r\n attn_weights = network_units.maybe_apply_dropout(attn_weights,\r\n dropout_keep_rate,\r\n False)\r\n return tf.matmul(attn_weights, values)\r\n\r\n\r\ndef residual(old_input, new_input, dropout_keep_rate, layer_norm):\r\n \"\"\"Residual layer combining old_input and new_input.\r\n\r\n Computes old_input + dropout(new_input) if layer_norm is None; otherwise:\r\n layer_norm(old_input + dropout(new_input)).\r\n\r\n Args:\r\n old_input: old float32 Tensor input to residual layer\r\n new_input: new float32 Tensor input to residual layer\r\n dropout_keep_rate: dropout proportion of units to keep\r\n layer_norm: network_units.LayerNorm to apply to residual output, or None\r\n\r\n Returns:\r\n float32 Tensor output of residual layer.\r\n \"\"\"\r\n res_sum = old_input + network_units.maybe_apply_dropout(new_input,\r\n dropout_keep_rate,\r\n False)\r\n return layer_norm.normalize(res_sum) if layer_norm else res_sum\r\n\r\n\r\ndef mlp(component, input_tensor, dropout_keep_rate, depth):\r\n \"\"\"Feed the input through an MLP.\r\n\r\n Each layer except the last is followed by a ReLU activation and dropout.\r\n\r\n Args:\r\n component: the DRAGNN Component containing parameters for the MLP\r\n input_tensor: the float32 Tensor input to the MLP.\r\n dropout_keep_rate: dropout proportion of units to keep\r\n depth: depth of the MLP.\r\n\r\n Returns:\r\n the float32 output Tensor\r\n \"\"\"\r\n for i in range(depth):\r\n ff_weights = component.get_variable('ff_weights_%d' % i)\r\n input_tensor = tf.nn.conv2d(input_tensor,\r\n ff_weights,\r\n [1, 1, 1, 1],\r\n padding='SAME')\r\n # Apply ReLU and dropout to all but the last layer\r\n if i < depth - 1:\r\n input_tensor = tf.nn.relu(input_tensor)\r\n input_tensor = network_units.maybe_apply_dropout(input_tensor,\r\n dropout_keep_rate,\r\n False)\r\n return input_tensor\r\n\r\n\r\nclass TransformerEncoderNetwork(network_units.NetworkUnitInterface):\r\n \"\"\"Implementation of the Transformer network encoder.\"\"\"\r\n\r\n def __init__(self, component):\r\n \"\"\"Initializes parameters for this Transformer unit.\r\n\r\n Args:\r\n component: parent ComponentBuilderBase object.\r\n\r\n Parameters used to construct the network:\r\n num_layers: number of transformer layers (attention + MLP)\r\n hidden_size: size of hidden layers in MLPs\r\n filter_size: filter width for each attention head\r\n num_heads: number of attention heads\r\n residual_dropout: dropout keep rate for residual layers\r\n attention_dropout: dropout keep rate for attention weights\r\n mlp_dropout: dropout keep rate for mlp layers\r\n initialization: initialization scheme to use for model parameters\r\n bias_init: initial value for bias parameters\r\n scale_attention: whether to scale attention parameters by filter_size^-0.5\r\n layer_norm_residuals: whether to perform layer normalization on residual\r\n layers\r\n timing_signal: whether to add a position-wise timing signal to the input\r\n kernel: kernel width in middle MLP layers\r\n mlp_layers: number of MLP layers. Must be >= 2.\r\n\r\n Raises:\r\n ValueError: if mlp_layers < 2.\r\n\r\n The input depth of the first layer is inferred from the total concatenated\r\n size of the input features, minus 1 to account for the sequence lengths.\r\n\r\n Hyperparameters used:\r\n dropout_rate: The probability that an input is not dropped. This is the\r\n default when the |dropout_keep_prob| parameter is unset.\r\n \"\"\"\r\n\r\n super(TransformerEncoderNetwork, self).__init__(component)\r\n default_dropout_rate = component.master.hyperparams.dropout_rate\r\n self._attrs = network_units.get_attrs_with_defaults(\r\n component.spec.network_unit.parameters, defaults={\r\n 'num_layers': 4,\r\n 'hidden_size': 256,\r\n 'filter_size': 64,\r\n 'num_heads': 8,\r\n 'residual_drop': default_dropout_rate,\r\n 'attention_drop': default_dropout_rate,\r\n 'mlp_drop': default_dropout_rate,\r\n 'initialization': 'xavier',\r\n 'bias_init': 0.001,\r\n 'scale_attention': True,\r\n 'layer_norm_residuals': True,\r\n 'timing_signal': True,\r\n 'kernel': 1,\r\n 'mlp_layers': 2})\r\n\r\n self._num_layers = self._attrs['num_layers']\r\n self._hidden_size = self._attrs['hidden_size']\r\n self._filter_size = self._attrs['filter_size']\r\n self._num_heads = self._attrs['num_heads']\r\n self._residual_dropout = self._attrs['residual_drop']\r\n self._attention_dropout = self._attrs['attention_drop']\r\n self._mlp_dropout = self._attrs['mlp_drop']\r\n self._initialization = self._attrs['initialization']\r\n self._bias_init = self._attrs['bias_init']\r\n self._scale_attn = self._attrs['scale_attention']\r\n self._layer_norm_res = self._attrs['layer_norm_residuals']\r\n self._timing_signal = self._attrs['timing_signal']\r\n self._kernel = self._attrs['kernel']\r\n self._mlp_depth = self._attrs['mlp_layers']\r\n\r\n if self._mlp_depth < 2:\r\n raise ValueError('TransformerEncoderNetwork needs mlp_layers >= 2')\r\n\r\n self._combined_filters = self._num_heads * self._filter_size\r\n\r\n self._weights = []\r\n self._biases = []\r\n self._layer_norms = {}\r\n\r\n # Hacky: one dimension comes from the lengths input; subtract it.\r\n self._concatenated_input_dim -= 1\r\n\r\n # Initial projection of inputs, this is mainly to project input down to the\r\n # right size for residual layers\r\n proj_shape = [1, 1, self._concatenated_input_dim, self._combined_filters]\r\n self._weights.append(\r\n network_units.add_var_initialized('init_proj', proj_shape,\r\n self._initialization))\r\n self._biases.append(tf.get_variable('init_bias',\r\n self._combined_filters,\r\n initializer=tf.constant_initializer(\r\n self._bias_init),\r\n dtype=tf.float32))\r\n\r\n for i in range(self._num_layers):\r\n with tf.variable_scope('transform_%d' % i):\r\n # Attention weights: 3 * self.combined_filters = (q, k, v)\r\n # We assume that q, k and v all have the same dimension\r\n attn_shape = [1, 1, self._combined_filters, 3 * self._combined_filters]\r\n self._weights.append(\r\n network_units.add_var_initialized('attn_weights',\r\n attn_shape,\r\n self._initialization))\r\n\r\n # Attention final projection weights\r\n proj_shape = [1, 1, self._combined_filters, self._combined_filters]\r\n self._weights.append(\r\n network_units.add_var_initialized('proj_weights',\r\n proj_shape,\r\n self._initialization))\r\n\r\n # MLP weights\r\n with tf.variable_scope('mlp'):\r\n ff_shape = [1, 1, self._combined_filters, self._hidden_size]\r\n self._weights.append(\r\n network_units.add_var_initialized('ff_weights_0',\r\n ff_shape,\r\n self._initialization))\r\n ff_shape = [1, self._kernel, self._hidden_size, self._hidden_size]\r\n for j in range(1, self._mlp_depth - 1):\r\n self._weights.append(\r\n network_units.add_var_initialized('ff_weights_%d' % j,\r\n ff_shape,\r\n self._initialization))\r\n ff_shape = [1, 1, self._hidden_size, self._combined_filters]\r\n self._weights.append(\r\n network_units.add_var_initialized('ff_weights_%d' %\r\n (self._mlp_depth - 1),\r\n ff_shape,\r\n self._initialization))\r\n\r\n # Layer normalization for residual layers\r\n if self._layer_norm_res:\r\n attn_layer_norm = network_units.LayerNorm(component,\r\n 'attn_layer_norm_%d' % i,\r\n self._combined_filters,\r\n tf.float32)\r\n self._layer_norms['attn_layer_norm_%d' % i] = attn_layer_norm\r\n\r\n ff_layer_norm = network_units.LayerNorm(component,\r\n 'ff_layer_norm_%d' % i,\r\n self._combined_filters,\r\n tf.float32)\r\n self._layer_norms['ff_layer_norm_%d' % i] = ff_layer_norm\r\n\r\n # Layer norm parameters are not added to self._weights,\r\n # which means that they are not l2 regularized\r\n self._params.extend(attn_layer_norm.params + ff_layer_norm.params)\r\n\r\n self._params.extend(self._weights)\r\n self._params.extend(self._biases)\r\n self._regularized_weights.extend(self._weights)\r\n self._layers.append(\r\n network_units.Layer(component, name='transformer_output',\r\n dim=self._combined_filters))\r\n\r\n def create(self,\r\n fixed_embeddings,\r\n linked_embeddings,\r\n context_tensor_arrays,\r\n attention_tensor,\r\n during_training,\r\n stride=None):\r\n \"\"\"Requires |stride|; otherwise see base class.\"\"\"\r\n del context_tensor_arrays, attention_tensor\r\n if stride is None:\r\n raise RuntimeError(\"TransformerEncoderNetwork needs 'stride' and must be \"\r\n \"called in the bulk feature extractor component.\")\r\n\r\n lengths = network_units.lookup_named_tensor('lengths', linked_embeddings)\r\n lengths_s = tf.to_int32(tf.squeeze(lengths.tensor, [1]))\r\n num_steps = tf.reduce_max(lengths_s)\r\n\r\n in_tensor = network_units.lookup_named_tensor('features', linked_embeddings)\r\n input_tensor = tf.reshape(in_tensor.tensor, [stride, num_steps, -1])\r\n\r\n if self._timing_signal:\r\n input_tensor = add_timing_signal_1d(input_tensor)\r\n\r\n # Adds a dimension for conv2d\r\n input_tensor = tf.expand_dims(input_tensor, 1)\r\n\r\n # For masking padding in attention\r\n mask = compute_padding_mask(lengths_s)\r\n\r\n conv = tf.nn.conv2d(input_tensor,\r\n self._component.get_variable('init_proj'),\r\n [1, 1, 1, 1], padding='SAME')\r\n conv = tf.nn.bias_add(conv, self._component.get_variable('init_bias'))\r\n\r\n for i in range(self._num_layers):\r\n with tf.variable_scope('transform_%d' % i, reuse=True):\r\n attn_weights = self._component.get_variable('attn_weights')\r\n attn_combined = tf.nn.conv2d(conv,\r\n attn_weights,\r\n [1, 1, 1, 1],\r\n padding='SAME')\r\n attn_combined = tf.squeeze(attn_combined, 1)\r\n\r\n # Splits combined projection into queries, keys, and values\r\n queries, keys, values = tf.split(attn_combined,\r\n [self._combined_filters]*3,\r\n axis=2)\r\n\r\n # Splits each of queries, keys, values into attention heads\r\n queries = split_heads(queries, self._num_heads)\r\n keys = split_heads(keys, self._num_heads)\r\n values = split_heads(values, self._num_heads)\r\n if self._scale_attn:\r\n queries *= self._filter_size**-0.5\r\n\r\n # Performs dot product attention and concatenates the resulting heads\r\n attended = dot_product_attention(queries, keys, values,\r\n self._attention_dropout, mask)\r\n attended = combine_heads(attended)\r\n\r\n # Projects combined heads\r\n attended = tf.expand_dims(attended, 1)\r\n proj = tf.nn.conv2d(attended,\r\n self._component.get_variable('proj_weights'),\r\n [1, 1, 1, 1],\r\n padding='SAME')\r\n\r\n # Residual connection between input and attended input\r\n attn_layer_norm_params = None\r\n if self._layer_norm_res:\r\n attn_layer_norm_params = self._layer_norms['attn_layer_norm_%d' % i]\r\n proj_res = residual(conv, proj, self._residual_dropout,\r\n attn_layer_norm_params)\r\n\r\n # Feed forward\r\n with tf.variable_scope('mlp'):\r\n ff = mlp(self._component, proj_res, self._mlp_dropout,\r\n self._mlp_depth)\r\n\r\n # Residual connection between attended input and feed forward layers\r\n ff_layer_norm_params = None\r\n if self._layer_norm_res:\r\n ff_layer_norm_params = self._layer_norms['ff_layer_norm_%d' % i]\r\n conv = residual(proj_res, ff, self._residual_dropout,\r\n ff_layer_norm_params)\r\n\r\n return [tf.reshape(conv, [-1, self._combined_filters],\r\n name='reshape_activations')]\r\n\r\n\r\nclass PairwiseBilinearLabelNetwork(network_units.NetworkUnitInterface):\r\n r\"\"\"Network unit that computes pairwise bilinear label scores.\r\n\r\n Given source and target representations for each token, this network unit\r\n computes bilinear scores for each label for each of the N^2 combinations of\r\n source and target tokens, rather than for only N already-computed\r\n source/target pairs (as is performed by the biaffine_units). The output is\r\n suitable as input to e.g. the heads_labels transition system.\r\n Specifically, a weights tensor W called `bilinear' is used to compute bilinear\r\n scores B for input tensors S and T:\r\n\r\n B_{bnml} = \\sum_{i,j} S_{bni} W_{ilj} T{bmj}\r\n\r\n for batches b, steps n and m and labels l.\r\n\r\n Parameters:\r\n num_labels: The number of dependency labels, L.\r\n\r\n Features:\r\n sources: [B * N, S] matrix of batched activations for source tokens.\r\n targets: [B * N, T] matrix of batched activations for target tokens.\r\n\r\n Layers:\r\n bilinear_scores: [B * N, N * L] matrix where vector b*N*N*L+t contains\r\n per-label scores for all N possible arcs from token t in\r\n batch b.\r\n \"\"\"\r\n\r\n def __init__(self, component):\r\n super(PairwiseBilinearLabelNetwork, self).__init__(component)\r\n parameters = component.spec.network_unit.parameters\r\n\r\n self._num_labels = int(parameters['num_labels'])\r\n\r\n self._source_dim = self._linked_feature_dims['sources']\r\n self._target_dim = self._linked_feature_dims['targets']\r\n\r\n self._weights = []\r\n self._weights.append(\r\n network_units.add_var_initialized('bilinear',\r\n [self._source_dim,\r\n self._num_labels,\r\n self._target_dim],\r\n 'xavier'))\r\n\r\n self._params.extend(self._weights)\r\n self._regularized_weights.extend(self._weights)\r\n self._layers.append(network_units.Layer(component,\r\n name='bilinear_scores',\r\n dim=self._num_labels))\r\n\r\n def create(self,\r\n fixed_embeddings,\r\n linked_embeddings,\r\n context_tensor_arrays,\r\n attention_tensor,\r\n during_training,\r\n stride=None):\r\n \"\"\"Requires |stride|; otherwise see base class.\"\"\"\r\n del context_tensor_arrays, attention_tensor\r\n if stride is None:\r\n raise RuntimeError(\"PairwiseBilinearLabelNetwork needs 'stride' and must \"\r\n \"be called in a bulk component.\")\r\n\r\n sources = network_units.lookup_named_tensor('sources', linked_embeddings)\r\n sources_tensor = tf.reshape(sources.tensor, [stride, -1, self._source_dim])\r\n\r\n targets = network_units.lookup_named_tensor('targets', linked_embeddings)\r\n targets_tensor = tf.reshape(targets.tensor, [stride, -1, self._target_dim])\r\n\r\n # Dimensions: source_dim x num_labels x target_dim\r\n bilinear_params = self._component.get_variable('bilinear')\r\n\r\n # Ensures that num_steps is the same for both inputs\r\n num_steps = tf.shape(sources_tensor)[1]\r\n with tf.control_dependencies([tf.assert_equal(num_steps,\r\n tf.shape(targets_tensor)[1],\r\n name='num_steps_mismatch')]):\r\n # Dimensions:\r\n # (batch_size*num_steps x source_dim) *\r\n # (source_dim x num_labels*target_dim)\r\n # = (batch_size*num_steps x num_labels*target_dim)\r\n lin = tf.matmul(tf.reshape(sources_tensor, [-1, self._source_dim]),\r\n tf.reshape(bilinear_params, [self._source_dim, -1]))\r\n\r\n # (batch_size x num_steps*num_labels x target_dim) *\r\n # (batch_size x num_steps x target_dim)^T\r\n # = (batch_size x num_steps*num_labels x num_steps)\r\n bilin = tf.matmul(\r\n tf.reshape(lin, [-1, num_steps*self._num_labels, self._target_dim]),\r\n targets_tensor, transpose_b=True)\r\n\r\n # (batch_size x num_steps*num_labels x num_steps) ->\r\n # (batch_size x num_steps x num_steps*num_labels)\r\n scores = tf.transpose(bilin, [0, 2, 1])\r\n\r\n return [tf.reshape(scores, [-1, num_steps*self._num_labels],\r\n name='reshape_activations')]\r\n",
"import tensorflow as tf\r\n\r\nclass VariationalAutoencoder(object):\r\n\r\n def __init__(self, n_input, n_hidden, optimizer = tf.train.AdamOptimizer()):\r\n self.n_input = n_input\r\n self.n_hidden = n_hidden\r\n\r\n network_weights = self._initialize_weights()\r\n self.weights = network_weights\r\n\r\n # model\r\n self.x = tf.placeholder(tf.float32, [None, self.n_input])\r\n self.z_mean = tf.add(tf.matmul(self.x, self.weights['w1']), self.weights['b1'])\r\n self.z_log_sigma_sq = tf.add(tf.matmul(self.x, self.weights['log_sigma_w1']), self.weights['log_sigma_b1'])\r\n\r\n # sample from gaussian distribution\r\n eps = tf.random_normal(tf.stack([tf.shape(self.x)[0], self.n_hidden]), 0, 1, dtype = tf.float32)\r\n self.z = tf.add(self.z_mean, tf.multiply(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))\r\n\r\n self.reconstruction = tf.add(tf.matmul(self.z, self.weights['w2']), self.weights['b2'])\r\n\r\n # cost\r\n reconstr_loss = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))\r\n latent_loss = -0.5 * tf.reduce_sum(1 + self.z_log_sigma_sq\r\n - tf.square(self.z_mean)\r\n - tf.exp(self.z_log_sigma_sq), 1)\r\n self.cost = tf.reduce_mean(reconstr_loss + latent_loss)\r\n self.optimizer = optimizer.minimize(self.cost)\r\n\r\n init = tf.global_variables_initializer()\r\n self.sess = tf.Session()\r\n self.sess.run(init)\r\n\r\n def _initialize_weights(self):\r\n all_weights = dict()\r\n all_weights['w1'] = tf.get_variable(\"w1\", shape=[self.n_input, self.n_hidden],\r\n initializer=tf.contrib.layers.xavier_initializer())\r\n all_weights['log_sigma_w1'] = tf.get_variable(\"log_sigma_w1\", shape=[self.n_input, self.n_hidden],\r\n initializer=tf.contrib.layers.xavier_initializer())\r\n all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32))\r\n all_weights['log_sigma_b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32))\r\n all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype=tf.float32))\r\n all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype=tf.float32))\r\n return all_weights\r\n\r\n def partial_fit(self, X):\r\n cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict={self.x: X})\r\n return cost\r\n\r\n def calc_total_cost(self, X):\r\n return self.sess.run(self.cost, feed_dict = {self.x: X})\r\n\r\n def transform(self, X):\r\n return self.sess.run(self.z_mean, feed_dict={self.x: X})\r\n\r\n def generate(self, hidden = None):\r\n if hidden is None:\r\n hidden = self.sess.run(tf.random_normal([1, self.n_hidden]))\r\n return self.sess.run(self.reconstruction, feed_dict={self.z: hidden})\r\n\r\n def reconstruct(self, X):\r\n return self.sess.run(self.reconstruction, feed_dict={self.x: X})\r\n\r\n def getWeights(self):\r\n return self.sess.run(self.weights['w1'])\r\n\r\n def getBiases(self):\r\n return self.sess.run(self.weights['b1'])\r\n\r\n",
"# Copyright 2017 The TensorFlow Authors All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"A smoke test for VGGish.\r\n\r\nThis is a simple smoke test of a local install of VGGish and its associated\r\ndownloaded files. We create a synthetic sound, extract log mel spectrogram\r\nfeatures, run them through VGGish, post-process the embedding ouputs, and\r\ncheck some simple statistics of the results, allowing for variations that\r\nmight occur due to platform/version differences in the libraries we use.\r\n\r\nUsage:\r\n- Download the VGGish checkpoint and PCA parameters into the same directory as\r\n the VGGish source code. If you keep them elsewhere, update the checkpoint_path\r\n and pca_params_path variables below.\r\n- Run:\r\n $ python vggish_smoke_test.py\r\n\"\"\"\r\n\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\nimport vggish_input\r\nimport vggish_params\r\nimport vggish_postprocess\r\nimport vggish_slim\r\n\r\nprint('\\nTesting your install of VGGish\\n')\r\n\r\n# Paths to downloaded VGGish files.\r\ncheckpoint_path = 'vggish_model.ckpt'\r\npca_params_path = 'vggish_pca_params.npz'\r\n\r\n# Relative tolerance of errors in mean and standard deviation of embeddings.\r\nrel_error = 0.1 # Up to 10%\r\n\r\n# Generate a 1 kHz sine wave at 44.1 kHz (we use a high sampling rate\r\n# to test resampling to 16 kHz during feature extraction).\r\nnum_secs = 3\r\nfreq = 1000\r\nsr = 44100\r\nt = np.linspace(0, num_secs, int(num_secs * sr))\r\nx = np.sin(2 * np.pi * freq * t)\r\n\r\n# Produce a batch of log mel spectrogram examples.\r\ninput_batch = vggish_input.waveform_to_examples(x, sr)\r\nprint('Log Mel Spectrogram example: ', input_batch[0])\r\nnp.testing.assert_equal(\r\n input_batch.shape,\r\n [num_secs, vggish_params.NUM_FRAMES, vggish_params.NUM_BANDS])\r\n\r\n# Define VGGish, load the checkpoint, and run the batch through the model to\r\n# produce embeddings.\r\nwith tf.Graph().as_default(), tf.Session() as sess:\r\n vggish_slim.define_vggish_slim()\r\n vggish_slim.load_vggish_slim_checkpoint(sess, checkpoint_path)\r\n\r\n features_tensor = sess.graph.get_tensor_by_name(\r\n vggish_params.INPUT_TENSOR_NAME)\r\n embedding_tensor = sess.graph.get_tensor_by_name(\r\n vggish_params.OUTPUT_TENSOR_NAME)\r\n [embedding_batch] = sess.run([embedding_tensor],\r\n feed_dict={features_tensor: input_batch})\r\n print('VGGish embedding: ', embedding_batch[0])\r\n expected_embedding_mean = 0.131\r\n expected_embedding_std = 0.238\r\n np.testing.assert_allclose(\r\n [np.mean(embedding_batch), np.std(embedding_batch)],\r\n [expected_embedding_mean, expected_embedding_std],\r\n rtol=rel_error)\r\n\r\n# Postprocess the results to produce whitened quantized embeddings.\r\npproc = vggish_postprocess.Postprocessor(pca_params_path)\r\npostprocessed_batch = pproc.postprocess(embedding_batch)\r\nprint('Postprocessed VGGish embedding: ', postprocessed_batch[0])\r\nexpected_postprocessed_mean = 123.0\r\nexpected_postprocessed_std = 75.0\r\nnp.testing.assert_allclose(\r\n [np.mean(postprocessed_batch), np.std(postprocessed_batch)],\r\n [expected_postprocessed_mean, expected_postprocessed_std],\r\n rtol=rel_error)\r\n\r\nprint('\\nLooks Good To Me!\\n')\r\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\nr\"\"\"Utilities for creating TFRecords of TF examples for the Open Images dataset.\r\n\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport tensorflow as tf\r\n\r\nfrom object_detection.core import standard_fields\r\nfrom object_detection.utils import dataset_util\r\n\r\n\r\ndef tf_example_from_annotations_data_frame(annotations_data_frame, label_map,\r\n encoded_image):\r\n \"\"\"Populates a TF Example message with image annotations from a data frame.\r\n\r\n Args:\r\n annotations_data_frame: Data frame containing the annotations for a single\r\n image.\r\n label_map: String to integer label map.\r\n encoded_image: The encoded image string\r\n\r\n Returns:\r\n The populated TF Example, if the label of at least one object is present in\r\n label_map. Otherwise, returns None.\r\n \"\"\"\r\n\r\n filtered_data_frame = annotations_data_frame[\r\n annotations_data_frame.LabelName.isin(label_map)]\r\n filtered_data_frame_boxes = filtered_data_frame[\r\n ~filtered_data_frame.YMin.isnull()]\r\n filtered_data_frame_labels = filtered_data_frame[\r\n filtered_data_frame.YMin.isnull()]\r\n image_id = annotations_data_frame.ImageID.iloc[0]\r\n\r\n feature_map = {\r\n standard_fields.TfExampleFields.object_bbox_ymin:\r\n dataset_util.float_list_feature(\r\n filtered_data_frame_boxes.YMin.as_matrix()),\r\n standard_fields.TfExampleFields.object_bbox_xmin:\r\n dataset_util.float_list_feature(\r\n filtered_data_frame_boxes.XMin.as_matrix()),\r\n standard_fields.TfExampleFields.object_bbox_ymax:\r\n dataset_util.float_list_feature(\r\n filtered_data_frame_boxes.YMax.as_matrix()),\r\n standard_fields.TfExampleFields.object_bbox_xmax:\r\n dataset_util.float_list_feature(\r\n filtered_data_frame_boxes.XMax.as_matrix()),\r\n standard_fields.TfExampleFields.object_class_text:\r\n dataset_util.bytes_list_feature(\r\n filtered_data_frame_boxes.LabelName.as_matrix()),\r\n standard_fields.TfExampleFields.object_class_label:\r\n dataset_util.int64_list_feature(\r\n filtered_data_frame_boxes.LabelName.map(lambda x: label_map[x])\r\n .as_matrix()),\r\n standard_fields.TfExampleFields.filename:\r\n dataset_util.bytes_feature('{}.jpg'.format(image_id)),\r\n standard_fields.TfExampleFields.source_id:\r\n dataset_util.bytes_feature(image_id),\r\n standard_fields.TfExampleFields.image_encoded:\r\n dataset_util.bytes_feature(encoded_image),\r\n }\r\n\r\n if 'IsGroupOf' in filtered_data_frame.columns:\r\n feature_map[standard_fields.TfExampleFields.\r\n object_group_of] = dataset_util.int64_list_feature(\r\n filtered_data_frame_boxes.IsGroupOf.as_matrix().astype(int))\r\n if 'IsOccluded' in filtered_data_frame.columns:\r\n feature_map[standard_fields.TfExampleFields.\r\n object_occluded] = dataset_util.int64_list_feature(\r\n filtered_data_frame_boxes.IsOccluded.as_matrix().astype(\r\n int))\r\n if 'IsTruncated' in filtered_data_frame.columns:\r\n feature_map[standard_fields.TfExampleFields.\r\n object_truncated] = dataset_util.int64_list_feature(\r\n filtered_data_frame_boxes.IsTruncated.as_matrix().astype(\r\n int))\r\n if 'IsDepiction' in filtered_data_frame.columns:\r\n feature_map[standard_fields.TfExampleFields.\r\n object_depiction] = dataset_util.int64_list_feature(\r\n filtered_data_frame_boxes.IsDepiction.as_matrix().astype(\r\n int))\r\n\r\n if 'ConfidenceImageLabel' in filtered_data_frame_labels.columns:\r\n feature_map[standard_fields.TfExampleFields.\r\n image_class_label] = dataset_util.int64_list_feature(\r\n filtered_data_frame_labels.LabelName.map(\r\n lambda x: label_map[x]).as_matrix())\r\n feature_map[standard_fields.TfExampleFields.\r\n image_class_text] = dataset_util.bytes_list_feature(\r\n filtered_data_frame_labels.LabelName.as_matrix()),\r\n return tf.train.Example(features=tf.train.Features(feature=feature_map))\r\n",
"# -*- coding: utf-8 -*-\r\n# Copyright 2017 Google Inc. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"Tests for dragnn.python.visualization.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.platform import googletest\r\nfrom dragnn.protos import spec_pb2\r\nfrom dragnn.protos import trace_pb2\r\nfrom dragnn.python import visualization\r\n\r\n\r\ndef _get_trace_proto_string():\r\n trace = trace_pb2.MasterTrace()\r\n trace.component_trace.add(\r\n step_trace=[\r\n trace_pb2.ComponentStepTrace(fixed_feature_trace=[]),\r\n ],\r\n # Google Translate says this is \"component\" in Chinese. (To test UTF-8).\r\n name='零件',)\r\n return trace.SerializeToString()\r\n\r\n\r\ndef _get_master_spec():\r\n return spec_pb2.MasterSpec(\r\n component=[spec_pb2.ComponentSpec(name='jalapeño')])\r\n\r\n\r\nclass VisualizationTest(googletest.TestCase):\r\n\r\n def testCanFindScript(self):\r\n script = visualization._load_viz_script()\r\n self.assertIsInstance(script, str)\r\n self.assertTrue(10e3 < len(script) < 10e6,\r\n 'Script size should be between 10k and 10M')\r\n\r\n def testSampleTraceSerialization(self):\r\n json = visualization.parse_trace_json(_get_trace_proto_string())\r\n self.assertIsInstance(json, str)\r\n self.assertTrue('component_trace' in json)\r\n\r\n def testInteractiveVisualization(self):\r\n widget = visualization.InteractiveVisualization()\r\n widget.initial_html()\r\n widget.show_trace(_get_trace_proto_string())\r\n\r\n def testMasterSpecJson(self):\r\n visualization.trace_html(\r\n _get_trace_proto_string(), master_spec=_get_master_spec())\r\n widget = visualization.InteractiveVisualization()\r\n widget.initial_html()\r\n widget.show_trace(_get_trace_proto_string(), master_spec=_get_master_spec())\r\n\r\n\r\nif __name__ == '__main__':\r\n googletest.main()\r\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"A simple smoke test that runs these examples for 1 training iteraton.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport pandas as pd\r\nimport tensorflow as tf\r\nfrom six.moves import StringIO\r\n\r\nimport automobile_data\r\n\r\nimport dnn_regression\r\nimport linear_regression\r\nimport linear_regression_categorical\r\nimport custom_regression\r\n\r\n# pylint: disable=line-too-long\r\nFOUR_LINES = \"\\n\".join([\r\n \"1,?,alfa-romero,gas,std,two,hatchback,rwd,front,94.50,171.20,65.50,52.40,2823,ohcv,six,152,mpfi,2.68,3.47,9.00,154,5000,19,26,16500\",\r\n \"2,164,audi,gas,std,four,sedan,fwd,front,99.80,176.60,66.20,54.30,2337,ohc,four,109,mpfi,3.19,3.40,10.00,102,5500,24,30,13950\",\r\n \"2,164,audi,gas,std,four,sedan,4wd,front,99.40,176.60,66.40,54.30,2824,ohc,five,136,mpfi,3.19,3.40,8.00,115,5500,18,22,17450\",\r\n \"2,?,audi,gas,std,two,sedan,fwd,front,99.80,177.30,66.30,53.10,2507,ohc,five,136,mpfi,3.19,3.40,8.50,110,5500,19,25,15250\",])\r\n# pylint: enable=line-too-long\r\n\r\nmock = tf.test.mock\r\n\r\ndef four_lines_dataframe():\r\n text = StringIO(FOUR_LINES)\r\n return pd.read_csv(text, names=automobile_data.COLUMN_TYPES.keys(),\r\n dtype=automobile_data.COLUMN_TYPES, na_values=\"?\")\r\n\r\n\r\ndef four_lines_dataset(*args, **kwargs):\r\n del args, kwargs\r\n return tf.data.Dataset.from_tensor_slices(FOUR_LINES.split(\"\\n\"))\r\n\r\n\r\nclass RegressionTest(tf.test.TestCase):\r\n \"\"\"Test the regression examples in this directory.\"\"\"\r\n\r\n @mock.patch.dict(automobile_data.__dict__, {\"raw_dataframe\": four_lines_dataframe})\r\n def test_linear_regression(self):\r\n linear_regression.main([None, \"--train_steps=1\"])\r\n\r\n @mock.patch.dict(automobile_data.__dict__, {\"raw_dataframe\": four_lines_dataframe})\r\n def test_linear_regression_categorical(self):\r\n linear_regression_categorical.main([None, \"--train_steps=1\"])\r\n\r\n @mock.patch.dict(automobile_data.__dict__, {\"raw_dataframe\": four_lines_dataframe})\r\n def test_dnn_regression(self):\r\n dnn_regression.main([None, \"--train_steps=1\"])\r\n\r\n @mock.patch.dict(automobile_data.__dict__, {\"raw_dataframe\": four_lines_dataframe})\r\n def test_custom_regression(self):\r\n custom_regression.main([None, \"--train_steps=1\"])\r\n\r\nif __name__ == \"__main__\":\r\n tf.test.main()\r\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for sgf_wrapper.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport tensorflow as tf # pylint: disable=g-bad-import-order\r\n\r\nimport coords\r\nimport go\r\nfrom sgf_wrapper import replay_sgf, translate_sgf_move, make_sgf\r\nimport utils_test\r\n\r\nJAPANESE_HANDICAP_SGF = '''(;GM[1]FF[4]CA[UTF-8]AP[CGoban:3]ST[2]RU[Japanese]\r\nSZ[9]HA[2]RE[Void]KM[5.50]PW[test_white]PB[test_black]AB[gc][cg];W[ee];B[dg])'''\r\n\r\nCHINESE_HANDICAP_SGF = '''(;GM[1]FF[4]CA[UTF-8]AP[CGoban:3]ST[2]RU[Chinese]SZ[9]\r\nHA[2]RE[Void]KM[5.50]PW[test_white]PB[test_black]RE[B+39.50];B[gc];B[cg];W[ee];\r\nB[gg];W[eg];B[ge];W[ce];B[ec];W[cc];B[dd];W[de];B[cd];W[bd];B[bc];W[bb];B[be];\r\nW[ac];B[bf];W[dh];B[ch];W[ci];B[bi];W[di];B[ah];W[gh];B[hh];W[fh];B[hg];W[gi];\r\nB[fg];W[dg];B[ei];W[cf];B[ef];W[ff];B[fe];W[bg];B[bh];W[af];B[ag];W[ae];B[ad];\r\nW[ae];B[ed];W[db];B[df];W[eb];B[fb];W[ea];B[fa])'''\r\n\r\nNO_HANDICAP_SGF = '''(;CA[UTF-8]SZ[9]PB[Murakawa Daisuke]PW[Iyama Yuta]KM[6.5]\r\nHA[0]RE[W+1.5]GM[1];B[fd];W[cf];B[eg];W[dd];B[dc];W[cc];B[de];W[cd];B[ed];W[he];\r\nB[ce];W[be];B[df];W[bf];B[hd];W[ge];B[gd];W[gg];B[db];W[cb];B[cg];W[bg];B[gh];\r\nW[fh];B[hh];W[fg];B[eh];W[ei];B[di];W[fi];B[hg];W[dh];B[ch];W[ci];B[bh];W[ff];\r\nB[fe];W[hf];B[id];W[bi];B[ah];W[ef];B[dg];W[ee];B[di];W[ig];B[ai];W[ih];B[fb];\r\nW[hi];B[ag];W[ab];B[bd];W[bc];B[ae];W[ad];B[af];W[bd];B[ca];W[ba];B[da];W[ie])\r\n'''\r\n\r\ntf.logging.set_verbosity(tf.logging.ERROR)\r\n\r\n\r\nclass TestSgfGeneration(utils_test.MiniGoUnitTest):\r\n\r\n def test_translate_sgf_move(self):\r\n self.assertEqual(\r\n ';B[db]',\r\n translate_sgf_move(go.PlayerMove(go.BLACK, (1, 3)), None))\r\n self.assertEqual(\r\n ';W[aa]',\r\n translate_sgf_move(go.PlayerMove(go.WHITE, (0, 0)), None))\r\n self.assertEqual(\r\n ';W[]',\r\n translate_sgf_move(go.PlayerMove(go.WHITE, None), None))\r\n self.assertEqual(\r\n ';B[db]C[comment]',\r\n translate_sgf_move(go.PlayerMove(go.BLACK, (1, 3)), 'comment'))\r\n\r\n def test_make_sgf(self):\r\n all_pwcs = list(replay_sgf(utils_test.BOARD_SIZE, NO_HANDICAP_SGF))\r\n second_last_position, last_move, _ = all_pwcs[-1]\r\n last_position = second_last_position.play_move(last_move)\r\n\r\n back_to_sgf = make_sgf(\r\n utils_test.BOARD_SIZE,\r\n last_position.recent,\r\n last_position.score(),\r\n komi=last_position.komi,\r\n )\r\n reconstructed_positions = list(replay_sgf(\r\n utils_test.BOARD_SIZE, back_to_sgf))\r\n second_last_position2, last_move2, _ = reconstructed_positions[-1]\r\n last_position2 = second_last_position2.play_move(last_move2)\r\n\r\n self.assertEqualPositions(last_position, last_position2)\r\n\r\n\r\nclass TestSgfWrapper(utils_test.MiniGoUnitTest):\r\n\r\n def test_sgf_props(self):\r\n sgf_replayer = replay_sgf(utils_test.BOARD_SIZE, CHINESE_HANDICAP_SGF)\r\n initial = next(sgf_replayer)\r\n self.assertEqual(initial.result, go.BLACK)\r\n self.assertEqual(initial.position.komi, 5.5)\r\n\r\n def test_japanese_handicap_handling(self):\r\n intermediate_board = utils_test.load_board('''\r\n .........\r\n .........\r\n ......X..\r\n .........\r\n ....O....\r\n .........\r\n ..X......\r\n .........\r\n .........\r\n ''')\r\n intermediate_position = go.Position(\r\n utils_test.BOARD_SIZE,\r\n intermediate_board,\r\n n=1,\r\n komi=5.5,\r\n caps=(0, 0),\r\n recent=(go.PlayerMove(go.WHITE, coords.from_kgs(\r\n utils_test.BOARD_SIZE, 'E5')),),\r\n to_play=go.BLACK,\r\n )\r\n final_board = utils_test.load_board('''\r\n .........\r\n .........\r\n ......X..\r\n .........\r\n ....O....\r\n .........\r\n ..XX.....\r\n .........\r\n .........\r\n ''')\r\n final_position = go.Position(\r\n utils_test.BOARD_SIZE,\r\n final_board,\r\n n=2,\r\n komi=5.5,\r\n caps=(0, 0),\r\n recent=(\r\n go.PlayerMove(go.WHITE, coords.from_kgs(\r\n utils_test.BOARD_SIZE, 'E5')),\r\n go.PlayerMove(go.BLACK, coords.from_kgs(\r\n utils_test.BOARD_SIZE, 'D3')),),\r\n to_play=go.WHITE,\r\n )\r\n positions_w_context = list(replay_sgf(\r\n utils_test.BOARD_SIZE, JAPANESE_HANDICAP_SGF))\r\n self.assertEqualPositions(\r\n intermediate_position, positions_w_context[1].position)\r\n final_replayed_position = positions_w_context[-1].position.play_move(\r\n positions_w_context[-1].next_move)\r\n self.assertEqualPositions(final_position, final_replayed_position)\r\n\r\n def test_chinese_handicap_handling(self):\r\n intermediate_board = utils_test.load_board('''\r\n .........\r\n .........\r\n ......X..\r\n .........\r\n .........\r\n .........\r\n .........\r\n .........\r\n .........\r\n ''')\r\n intermediate_position = go.Position(\r\n utils_test.BOARD_SIZE,\r\n intermediate_board,\r\n n=1,\r\n komi=5.5,\r\n caps=(0, 0),\r\n recent=(go.PlayerMove(go.BLACK, coords.from_kgs(\r\n utils_test.BOARD_SIZE, 'G7')),),\r\n to_play=go.BLACK,\r\n )\r\n final_board = utils_test.load_board('''\r\n ....OX...\r\n .O.OOX...\r\n O.O.X.X..\r\n .OXXX....\r\n OX...XX..\r\n .X.XXO...\r\n X.XOOXXX.\r\n XXXO.OOX.\r\n .XOOX.O..\r\n ''')\r\n final_position = go.Position(\r\n utils_test.BOARD_SIZE,\r\n final_board,\r\n n=50,\r\n komi=5.5,\r\n caps=(7, 2),\r\n ko=None,\r\n recent=(\r\n go.PlayerMove(\r\n go.WHITE, coords.from_kgs(utils_test.BOARD_SIZE, 'E9')),\r\n go.PlayerMove(\r\n go.BLACK, coords.from_kgs(utils_test.BOARD_SIZE, 'F9')),),\r\n to_play=go.WHITE\r\n )\r\n positions_w_context = list(replay_sgf(\r\n utils_test.BOARD_SIZE, CHINESE_HANDICAP_SGF))\r\n self.assertEqualPositions(\r\n intermediate_position, positions_w_context[1].position)\r\n self.assertEqual(\r\n positions_w_context[1].next_move, coords.from_kgs(\r\n utils_test.BOARD_SIZE, 'C3'))\r\n final_replayed_position = positions_w_context[-1].position.play_move(\r\n positions_w_context[-1].next_move)\r\n self.assertEqualPositions(final_position, final_replayed_position)\r\n\r\n\r\nif __name__ == '__main__':\r\n tf.test.main()\r\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\nimport layers\r\nimport networks\r\n\r\n\r\ndef _get_grad_norm(ys, xs):\r\n \"\"\"Compute 2-norm of dys / dxs.\"\"\"\r\n return tf.sqrt(\r\n tf.add_n([tf.reduce_sum(tf.square(g)) for g in tf.gradients(ys, xs)]))\r\n\r\n\r\ndef _num_filters_stub(block_id):\r\n return networks.num_filters(block_id, 8, 1, 8)\r\n\r\n\r\nclass NetworksTest(tf.test.TestCase):\r\n\r\n def test_resolution_schedule_correct(self):\r\n rs = networks.ResolutionSchedule(\r\n start_resolutions=[5, 3], scale_base=2, num_resolutions=3)\r\n self.assertEqual(rs.start_resolutions, (5, 3))\r\n self.assertEqual(rs.scale_base, 2)\r\n self.assertEqual(rs.num_resolutions, 3)\r\n self.assertEqual(rs.final_resolutions, (20, 12))\r\n self.assertEqual(rs.scale_factor(1), 4)\r\n self.assertEqual(rs.scale_factor(2), 2)\r\n self.assertEqual(rs.scale_factor(3), 1)\r\n with self.assertRaises(ValueError):\r\n rs.scale_factor(0)\r\n with self.assertRaises(ValueError):\r\n rs.scale_factor(4)\r\n\r\n def test_block_name(self):\r\n self.assertEqual(networks.block_name(10), 'progressive_gan_block_10')\r\n\r\n def test_min_total_num_images(self):\r\n self.assertEqual(networks.min_total_num_images(7, 8, 4), 52)\r\n\r\n def test_compute_progress(self):\r\n current_image_id_ph = tf.placeholder(tf.int32, [])\r\n progress = networks.compute_progress(\r\n current_image_id_ph,\r\n stable_stage_num_images=7,\r\n transition_stage_num_images=8,\r\n num_blocks=2)\r\n with self.test_session(use_gpu=True) as sess:\r\n progress_output = [\r\n sess.run(progress, feed_dict={current_image_id_ph: current_image_id})\r\n for current_image_id in [0, 3, 6, 7, 8, 10, 15, 29, 100]\r\n ]\r\n self.assertArrayNear(progress_output,\r\n [0.0, 0.0, 0.0, 0.0, 0.125, 0.375, 1.0, 1.0, 1.0],\r\n 1.0e-6)\r\n\r\n def test_generator_alpha(self):\r\n with self.test_session(use_gpu=True) as sess:\r\n alpha_fixed_block_id = [\r\n sess.run(\r\n networks._generator_alpha(2, tf.constant(progress, tf.float32)))\r\n for progress in [0, 0.2, 1, 1.2, 2, 2.2, 3]\r\n ]\r\n alpha_fixed_progress = [\r\n sess.run(\r\n networks._generator_alpha(block_id, tf.constant(1.2, tf.float32)))\r\n for block_id in range(1, 5)\r\n ]\r\n\r\n self.assertArrayNear(alpha_fixed_block_id, [0, 0.2, 1, 0.8, 0, 0, 0],\r\n 1.0e-6)\r\n self.assertArrayNear(alpha_fixed_progress, [0, 0.8, 0.2, 0], 1.0e-6)\r\n\r\n def test_discriminator_alpha(self):\r\n with self.test_session(use_gpu=True) as sess:\r\n alpha_fixed_block_id = [\r\n sess.run(\r\n networks._discriminator_alpha(2, tf.constant(\r\n progress, tf.float32)))\r\n for progress in [0, 0.2, 1, 1.2, 2, 2.2, 3]\r\n ]\r\n alpha_fixed_progress = [\r\n sess.run(\r\n networks._discriminator_alpha(block_id,\r\n tf.constant(1.2, tf.float32)))\r\n for block_id in range(1, 5)\r\n ]\r\n\r\n self.assertArrayNear(alpha_fixed_block_id, [1, 1, 1, 0.8, 0, 0, 0], 1.0e-6)\r\n self.assertArrayNear(alpha_fixed_progress, [0, 0.8, 1, 1], 1.0e-6)\r\n\r\n def test_blend_images_in_stable_stage(self):\r\n x_np = np.random.normal(size=[2, 8, 8, 3])\r\n x = tf.constant(x_np, tf.float32)\r\n x_blend = networks.blend_images(\r\n x,\r\n progress=tf.constant(0.0),\r\n resolution_schedule=networks.ResolutionSchedule(\r\n scale_base=2, num_resolutions=2),\r\n num_blocks=2)\r\n with self.test_session(use_gpu=True) as sess:\r\n x_blend_np = sess.run(x_blend)\r\n x_blend_expected_np = sess.run(layers.upscale(layers.downscale(x, 2), 2))\r\n self.assertNDArrayNear(x_blend_np, x_blend_expected_np, 1.0e-6)\r\n\r\n def test_blend_images_in_transition_stage(self):\r\n x_np = np.random.normal(size=[2, 8, 8, 3])\r\n x = tf.constant(x_np, tf.float32)\r\n x_blend = networks.blend_images(\r\n x,\r\n tf.constant(0.2),\r\n resolution_schedule=networks.ResolutionSchedule(\r\n scale_base=2, num_resolutions=2),\r\n num_blocks=2)\r\n with self.test_session(use_gpu=True) as sess:\r\n x_blend_np = sess.run(x_blend)\r\n x_blend_expected_np = 0.8 * sess.run(\r\n layers.upscale(layers.downscale(x, 2), 2)) + 0.2 * x_np\r\n self.assertNDArrayNear(x_blend_np, x_blend_expected_np, 1.0e-6)\r\n\r\n def test_num_filters(self):\r\n self.assertEqual(networks.num_filters(1, 4096, 1, 256), 256)\r\n self.assertEqual(networks.num_filters(5, 4096, 1, 256), 128)\r\n\r\n def test_generator_grad_norm_progress(self):\r\n stable_stage_num_images = 2\r\n transition_stage_num_images = 3\r\n\r\n current_image_id_ph = tf.placeholder(tf.int32, [])\r\n progress = networks.compute_progress(\r\n current_image_id_ph,\r\n stable_stage_num_images,\r\n transition_stage_num_images,\r\n num_blocks=3)\r\n z = tf.random_normal([2, 10], dtype=tf.float32)\r\n x, _ = networks.generator(\r\n z, progress, _num_filters_stub,\r\n networks.ResolutionSchedule(\r\n start_resolutions=(4, 4), scale_base=2, num_resolutions=3))\r\n fake_loss = tf.reduce_sum(tf.square(x))\r\n grad_norms = [\r\n _get_grad_norm(\r\n fake_loss, tf.trainable_variables('.*/progressive_gan_block_1/.*')),\r\n _get_grad_norm(\r\n fake_loss, tf.trainable_variables('.*/progressive_gan_block_2/.*')),\r\n _get_grad_norm(\r\n fake_loss, tf.trainable_variables('.*/progressive_gan_block_3/.*'))\r\n ]\r\n\r\n grad_norms_output = None\r\n with self.test_session(use_gpu=True) as sess:\r\n sess.run(tf.global_variables_initializer())\r\n x1_np = sess.run(x, feed_dict={current_image_id_ph: 0.12})\r\n x2_np = sess.run(x, feed_dict={current_image_id_ph: 1.8})\r\n grad_norms_output = np.array([\r\n sess.run(grad_norms, feed_dict={current_image_id_ph: i})\r\n for i in range(15) # total num of images\r\n ])\r\n\r\n self.assertEqual((2, 16, 16, 3), x1_np.shape)\r\n self.assertEqual((2, 16, 16, 3), x2_np.shape)\r\n # The gradient of block_1 is always on.\r\n self.assertEqual(\r\n np.argmax(grad_norms_output[:, 0] > 0), 0,\r\n 'gradient norms {} for block 1 is not always on'.format(\r\n grad_norms_output[:, 0]))\r\n # The gradient of block_2 is on after 1 stable stage.\r\n self.assertEqual(\r\n np.argmax(grad_norms_output[:, 1] > 0), 3,\r\n 'gradient norms {} for block 2 is not on at step 3'.format(\r\n grad_norms_output[:, 1]))\r\n # The gradient of block_3 is on after 2 stable stage + 1 transition stage.\r\n self.assertEqual(\r\n np.argmax(grad_norms_output[:, 2] > 0), 8,\r\n 'gradient norms {} for block 3 is not on at step 8'.format(\r\n grad_norms_output[:, 2]))\r\n\r\n def test_discriminator_grad_norm_progress(self):\r\n stable_stage_num_images = 2\r\n transition_stage_num_images = 3\r\n\r\n current_image_id_ph = tf.placeholder(tf.int32, [])\r\n progress = networks.compute_progress(\r\n current_image_id_ph,\r\n stable_stage_num_images,\r\n transition_stage_num_images,\r\n num_blocks=3)\r\n x = tf.random_normal([2, 16, 16, 3])\r\n logits, _ = networks.discriminator(\r\n x, progress, _num_filters_stub,\r\n networks.ResolutionSchedule(\r\n start_resolutions=(4, 4), scale_base=2, num_resolutions=3))\r\n fake_loss = tf.reduce_sum(tf.square(logits))\r\n grad_norms = [\r\n _get_grad_norm(\r\n fake_loss, tf.trainable_variables('.*/progressive_gan_block_1/.*')),\r\n _get_grad_norm(\r\n fake_loss, tf.trainable_variables('.*/progressive_gan_block_2/.*')),\r\n _get_grad_norm(\r\n fake_loss, tf.trainable_variables('.*/progressive_gan_block_3/.*'))\r\n ]\r\n\r\n grad_norms_output = None\r\n with self.test_session(use_gpu=True) as sess:\r\n sess.run(tf.global_variables_initializer())\r\n grad_norms_output = np.array([\r\n sess.run(grad_norms, feed_dict={current_image_id_ph: i})\r\n for i in range(15) # total num of images\r\n ])\r\n\r\n # The gradient of block_1 is always on.\r\n self.assertEqual(\r\n np.argmax(grad_norms_output[:, 0] > 0), 0,\r\n 'gradient norms {} for block 1 is not always on'.format(\r\n grad_norms_output[:, 0]))\r\n # The gradient of block_2 is on after 1 stable stage.\r\n self.assertEqual(\r\n np.argmax(grad_norms_output[:, 1] > 0), 3,\r\n 'gradient norms {} for block 2 is not on at step 3'.format(\r\n grad_norms_output[:, 1]))\r\n # The gradient of block_3 is on after 2 stable stage + 1 transition stage.\r\n self.assertEqual(\r\n np.argmax(grad_norms_output[:, 2] > 0), 8,\r\n 'gradient norms {} for block 3 is not on at step 8'.format(\r\n grad_norms_output[:, 2]))\r\n\r\n\r\nif __name__ == '__main__':\r\n tf.test.main()\r\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"BERT library to process data for classification task.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport collections\r\nimport csv\r\nimport os\r\n\r\nfrom absl import logging\r\nimport tensorflow as tf\r\n\r\nfrom official.bert import tokenization\r\n\r\n\r\nclass InputExample(object):\r\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\r\n\r\n def __init__(self, guid, text_a, text_b=None, label=None):\r\n \"\"\"Constructs a InputExample.\r\n\r\n Args:\r\n guid: Unique id for the example.\r\n text_a: string. The untokenized text of the first sequence. For single\r\n sequence tasks, only this sequence must be specified.\r\n text_b: (Optional) string. The untokenized text of the second sequence.\r\n Only must be specified for sequence pair tasks.\r\n label: (Optional) string. The label of the example. This should be\r\n specified for train and dev examples, but not for test examples.\r\n \"\"\"\r\n self.guid = guid\r\n self.text_a = text_a\r\n self.text_b = text_b\r\n self.label = label\r\n\r\n\r\nclass InputFeatures(object):\r\n \"\"\"A single set of features of data.\"\"\"\r\n\r\n def __init__(self,\r\n input_ids,\r\n input_mask,\r\n segment_ids,\r\n label_id,\r\n is_real_example=True):\r\n self.input_ids = input_ids\r\n self.input_mask = input_mask\r\n self.segment_ids = segment_ids\r\n self.label_id = label_id\r\n self.is_real_example = is_real_example\r\n\r\n\r\nclass DataProcessor(object):\r\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\r\n\r\n def get_train_examples(self, data_dir):\r\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\r\n raise NotImplementedError()\r\n\r\n def get_dev_examples(self, data_dir):\r\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\r\n raise NotImplementedError()\r\n\r\n def get_test_examples(self, data_dir):\r\n \"\"\"Gets a collection of `InputExample`s for prediction.\"\"\"\r\n raise NotImplementedError()\r\n\r\n def get_labels(self):\r\n \"\"\"Gets the list of labels for this data set.\"\"\"\r\n raise NotImplementedError()\r\n\r\n @staticmethod\r\n def get_processor_name():\r\n \"\"\"Gets the string identifier of the processor.\"\"\"\r\n raise NotImplementedError()\r\n\r\n @classmethod\r\n def _read_tsv(cls, input_file, quotechar=None):\r\n \"\"\"Reads a tab separated value file.\"\"\"\r\n with tf.io.gfile.GFile(input_file, \"r\") as f:\r\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\r\n lines = []\r\n for line in reader:\r\n lines.append(line)\r\n return lines\r\n\r\n\r\nclass XnliProcessor(DataProcessor):\r\n \"\"\"Processor for the XNLI data set.\"\"\"\r\n\r\n def __init__(self):\r\n self.language = \"zh\"\r\n\r\n def get_train_examples(self, data_dir):\r\n \"\"\"See base class.\"\"\"\r\n lines = self._read_tsv(\r\n os.path.join(data_dir, \"multinli\",\r\n \"multinli.train.%s.tsv\" % self.language))\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n if i == 0:\r\n continue\r\n guid = \"train-%d\" % (i)\r\n text_a = tokenization.convert_to_unicode(line[0])\r\n text_b = tokenization.convert_to_unicode(line[1])\r\n label = tokenization.convert_to_unicode(line[2])\r\n if label == tokenization.convert_to_unicode(\"contradictory\"):\r\n label = tokenization.convert_to_unicode(\"contradiction\")\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\r\n return examples\r\n\r\n def get_dev_examples(self, data_dir):\r\n \"\"\"See base class.\"\"\"\r\n lines = self._read_tsv(os.path.join(data_dir, \"xnli.dev.tsv\"))\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n if i == 0:\r\n continue\r\n guid = \"dev-%d\" % (i)\r\n language = tokenization.convert_to_unicode(line[0])\r\n if language != tokenization.convert_to_unicode(self.language):\r\n continue\r\n text_a = tokenization.convert_to_unicode(line[6])\r\n text_b = tokenization.convert_to_unicode(line[7])\r\n label = tokenization.convert_to_unicode(line[1])\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\r\n return examples\r\n\r\n def get_labels(self):\r\n \"\"\"See base class.\"\"\"\r\n return [\"contradiction\", \"entailment\", \"neutral\"]\r\n\r\n @staticmethod\r\n def get_processor_name():\r\n \"\"\"See base class.\"\"\"\r\n return \"XNLI\"\r\n\r\n\r\nclass MnliProcessor(DataProcessor):\r\n \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\"\r\n\r\n def get_train_examples(self, data_dir):\r\n \"\"\"See base class.\"\"\"\r\n return self._create_examples(\r\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\r\n\r\n def get_dev_examples(self, data_dir):\r\n \"\"\"See base class.\"\"\"\r\n return self._create_examples(\r\n self._read_tsv(os.path.join(data_dir, \"dev_matched.tsv\")),\r\n \"dev_matched\")\r\n\r\n def get_test_examples(self, data_dir):\r\n \"\"\"See base class.\"\"\"\r\n return self._create_examples(\r\n self._read_tsv(os.path.join(data_dir, \"test_matched.tsv\")), \"test\")\r\n\r\n def get_labels(self):\r\n \"\"\"See base class.\"\"\"\r\n return [\"contradiction\", \"entailment\", \"neutral\"]\r\n\r\n @staticmethod\r\n def get_processor_name():\r\n \"\"\"See base class.\"\"\"\r\n return \"MNLI\"\r\n\r\n def _create_examples(self, lines, set_type):\r\n \"\"\"Creates examples for the training and dev sets.\"\"\"\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n if i == 0:\r\n continue\r\n guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0]))\r\n text_a = tokenization.convert_to_unicode(line[8])\r\n text_b = tokenization.convert_to_unicode(line[9])\r\n if set_type == \"test\":\r\n label = \"contradiction\"\r\n else:\r\n label = tokenization.convert_to_unicode(line[-1])\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\r\n return examples\r\n\r\n\r\nclass MrpcProcessor(DataProcessor):\r\n \"\"\"Processor for the MRPC data set (GLUE version).\"\"\"\r\n\r\n def get_train_examples(self, data_dir):\r\n \"\"\"See base class.\"\"\"\r\n return self._create_examples(\r\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\r\n\r\n def get_dev_examples(self, data_dir):\r\n \"\"\"See base class.\"\"\"\r\n return self._create_examples(\r\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\r\n\r\n def get_test_examples(self, data_dir):\r\n \"\"\"See base class.\"\"\"\r\n return self._create_examples(\r\n self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\r\n\r\n def get_labels(self):\r\n \"\"\"See base class.\"\"\"\r\n return [\"0\", \"1\"]\r\n\r\n @staticmethod\r\n def get_processor_name():\r\n \"\"\"See base class.\"\"\"\r\n return \"MRPC\"\r\n\r\n def _create_examples(self, lines, set_type):\r\n \"\"\"Creates examples for the training and dev sets.\"\"\"\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n if i == 0:\r\n continue\r\n guid = \"%s-%s\" % (set_type, i)\r\n text_a = tokenization.convert_to_unicode(line[3])\r\n text_b = tokenization.convert_to_unicode(line[4])\r\n if set_type == \"test\":\r\n label = \"0\"\r\n else:\r\n label = tokenization.convert_to_unicode(line[0])\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\r\n return examples\r\n\r\n\r\nclass ColaProcessor(DataProcessor):\r\n \"\"\"Processor for the CoLA data set (GLUE version).\"\"\"\r\n\r\n def get_train_examples(self, data_dir):\r\n \"\"\"See base class.\"\"\"\r\n return self._create_examples(\r\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\r\n\r\n def get_dev_examples(self, data_dir):\r\n \"\"\"See base class.\"\"\"\r\n return self._create_examples(\r\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\r\n\r\n def get_test_examples(self, data_dir):\r\n \"\"\"See base class.\"\"\"\r\n return self._create_examples(\r\n self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\r\n\r\n def get_labels(self):\r\n \"\"\"See base class.\"\"\"\r\n return [\"0\", \"1\"]\r\n\r\n @staticmethod\r\n def get_processor_name():\r\n \"\"\"See base class.\"\"\"\r\n return \"COLA\"\r\n\r\n def _create_examples(self, lines, set_type):\r\n \"\"\"Creates examples for the training and dev sets.\"\"\"\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n # Only the test set has a header\r\n if set_type == \"test\" and i == 0:\r\n continue\r\n guid = \"%s-%s\" % (set_type, i)\r\n if set_type == \"test\":\r\n text_a = tokenization.convert_to_unicode(line[1])\r\n label = \"0\"\r\n else:\r\n text_a = tokenization.convert_to_unicode(line[3])\r\n label = tokenization.convert_to_unicode(line[1])\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\r\n return examples\r\n\r\n\r\ndef convert_single_example(ex_index, example, label_list, max_seq_length,\r\n tokenizer):\r\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\r\n label_map = {}\r\n for (i, label) in enumerate(label_list):\r\n label_map[label] = i\r\n\r\n tokens_a = tokenizer.tokenize(example.text_a)\r\n tokens_b = None\r\n if example.text_b:\r\n tokens_b = tokenizer.tokenize(example.text_b)\r\n\r\n if tokens_b:\r\n # Modifies `tokens_a` and `tokens_b` in place so that the total\r\n # length is less than the specified length.\r\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\r\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\r\n else:\r\n # Account for [CLS] and [SEP] with \"- 2\"\r\n if len(tokens_a) > max_seq_length - 2:\r\n tokens_a = tokens_a[0:(max_seq_length - 2)]\r\n\r\n # The convention in BERT is:\r\n # (a) For sequence pairs:\r\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\r\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\r\n # (b) For single sequences:\r\n # tokens: [CLS] the dog is hairy . [SEP]\r\n # type_ids: 0 0 0 0 0 0 0\r\n #\r\n # Where \"type_ids\" are used to indicate whether this is the first\r\n # sequence or the second sequence. The embedding vectors for `type=0` and\r\n # `type=1` were learned during pre-training and are added to the wordpiece\r\n # embedding vector (and position vector). This is not *strictly* necessary\r\n # since the [SEP] token unambiguously separates the sequences, but it makes\r\n # it easier for the model to learn the concept of sequences.\r\n #\r\n # For classification tasks, the first vector (corresponding to [CLS]) is\r\n # used as the \"sentence vector\". Note that this only makes sense because\r\n # the entire model is fine-tuned.\r\n tokens = []\r\n segment_ids = []\r\n tokens.append(\"[CLS]\")\r\n segment_ids.append(0)\r\n for token in tokens_a:\r\n tokens.append(token)\r\n segment_ids.append(0)\r\n tokens.append(\"[SEP]\")\r\n segment_ids.append(0)\r\n\r\n if tokens_b:\r\n for token in tokens_b:\r\n tokens.append(token)\r\n segment_ids.append(1)\r\n tokens.append(\"[SEP]\")\r\n segment_ids.append(1)\r\n\r\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\r\n\r\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\r\n # tokens are attended to.\r\n input_mask = [1] * len(input_ids)\r\n\r\n # Zero-pad up to the sequence length.\r\n while len(input_ids) < max_seq_length:\r\n input_ids.append(0)\r\n input_mask.append(0)\r\n segment_ids.append(0)\r\n\r\n assert len(input_ids) == max_seq_length\r\n assert len(input_mask) == max_seq_length\r\n assert len(segment_ids) == max_seq_length\r\n\r\n label_id = label_map[example.label]\r\n if ex_index < 5:\r\n logging.info(\"*** Example ***\")\r\n logging.info(\"guid: %s\", (example.guid))\r\n logging.info(\"tokens: %s\",\r\n \" \".join([tokenization.printable_text(x) for x in tokens]))\r\n logging.info(\"input_ids: %s\", \" \".join([str(x) for x in input_ids]))\r\n logging.info(\"input_mask: %s\", \" \".join([str(x) for x in input_mask]))\r\n logging.info(\"segment_ids: %s\", \" \".join([str(x) for x in segment_ids]))\r\n logging.info(\"label: %s (id = %d)\", example.label, label_id)\r\n\r\n feature = InputFeatures(\r\n input_ids=input_ids,\r\n input_mask=input_mask,\r\n segment_ids=segment_ids,\r\n label_id=label_id,\r\n is_real_example=True)\r\n return feature\r\n\r\n\r\ndef file_based_convert_examples_to_features(examples, label_list,\r\n max_seq_length, tokenizer,\r\n output_file):\r\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\r\n\r\n writer = tf.io.TFRecordWriter(output_file)\r\n\r\n for (ex_index, example) in enumerate(examples):\r\n if ex_index % 10000 == 0:\r\n logging.info(\"Writing example %d of %d\", ex_index, len(examples))\r\n\r\n feature = convert_single_example(ex_index, example, label_list,\r\n max_seq_length, tokenizer)\r\n\r\n def create_int_feature(values):\r\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\r\n return f\r\n\r\n features = collections.OrderedDict()\r\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\r\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\r\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\r\n features[\"label_ids\"] = create_int_feature([feature.label_id])\r\n features[\"is_real_example\"] = create_int_feature(\r\n [int(feature.is_real_example)])\r\n\r\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\r\n writer.write(tf_example.SerializeToString())\r\n writer.close()\r\n\r\n\r\ndef _truncate_seq_pair(tokens_a, tokens_b, max_length):\r\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\r\n\r\n # This is a simple heuristic which will always truncate the longer sequence\r\n # one token at a time. This makes more sense than truncating an equal percent\r\n # of tokens from each, since if one sequence is very short then each token\r\n # that's truncated likely contains more information than a longer sequence.\r\n while True:\r\n total_length = len(tokens_a) + len(tokens_b)\r\n if total_length <= max_length:\r\n break\r\n if len(tokens_a) > len(tokens_b):\r\n tokens_a.pop()\r\n else:\r\n tokens_b.pop()\r\n\r\n\r\ndef generate_tf_record_from_data_file(processor,\r\n data_dir,\r\n vocab_file,\r\n train_data_output_path=None,\r\n eval_data_output_path=None,\r\n max_seq_length=128,\r\n do_lower_case=True):\r\n \"\"\"Generates and saves training data into a tf record file.\r\n\r\n Arguments:\r\n processor: Input processor object to be used for generating data. Subclass\r\n of `DataProcessor`.\r\n data_dir: Directory that contains train/eval data to process. Data files\r\n should be in from \"dev.tsv\", \"test.tsv\", or \"train.tsv\".\r\n vocab_file: Text file with words to be used for training/evaluation.\r\n train_data_output_path: Output to which processed tf record for training\r\n will be saved.\r\n eval_data_output_path: Output to which processed tf record for evaluation\r\n will be saved.\r\n max_seq_length: Maximum sequence length of the to be generated\r\n training/eval data.\r\n do_lower_case: Whether to lower case input text.\r\n\r\n Returns:\r\n A dictionary containing input meta data.\r\n \"\"\"\r\n assert train_data_output_path or eval_data_output_path\r\n\r\n label_list = processor.get_labels()\r\n tokenizer = tokenization.FullTokenizer(\r\n vocab_file=vocab_file, do_lower_case=do_lower_case)\r\n assert train_data_output_path\r\n train_input_data_examples = processor.get_train_examples(data_dir)\r\n file_based_convert_examples_to_features(train_input_data_examples, label_list,\r\n max_seq_length, tokenizer,\r\n train_data_output_path)\r\n num_training_data = len(train_input_data_examples)\r\n\r\n if eval_data_output_path:\r\n eval_input_data_examples = processor.get_dev_examples(data_dir)\r\n file_based_convert_examples_to_features(eval_input_data_examples,\r\n label_list, max_seq_length,\r\n tokenizer, eval_data_output_path)\r\n\r\n meta_data = {\r\n \"task_type\": \"bert_classification\",\r\n \"processor_type\": processor.get_processor_name(),\r\n \"num_labels\": len(processor.get_labels()),\r\n \"train_data_size\": num_training_data,\r\n \"max_seq_length\": max_seq_length,\r\n }\r\n\r\n if eval_data_output_path:\r\n meta_data[\"eval_data_size\"] = len(eval_input_data_examples)\r\n\r\n return meta_data\r\n",
"# Copyright 2018 The TensorFlow Authors All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"VRNN classes.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom collections import namedtuple\r\nimport functools\r\n\r\nimport sonnet as snt\r\nimport tensorflow as tf\r\n\r\nfrom fivo.models import base\r\n\r\n\r\nVRNNState = namedtuple(\"VRNNState\", \"rnn_state latent_encoded\")\r\n\r\n\r\nclass VRNN(object):\r\n \"\"\"Implementation of a Variational Recurrent Neural Network (VRNN).\r\n\r\n Introduced in \"A Recurrent Latent Variable Model for Sequential data\"\r\n by Chung et al. https://arxiv.org/pdf/1506.02216.pdf.\r\n\r\n The VRNN is a sequence model similar to an RNN that uses stochastic latent\r\n variables to improve its representational power. It can be thought of as a\r\n sequential analogue to the variational auto-encoder (VAE).\r\n\r\n The VRNN has a deterministic RNN as its backbone, represented by the\r\n sequence of RNN hidden states h_t. At each timestep, the RNN hidden state h_t\r\n is conditioned on the previous sequence element, x_{t-1}, as well as the\r\n latent state from the previous timestep, z_{t-1}.\r\n\r\n In this implementation of the VRNN the latent state z_t is Gaussian. The\r\n model's prior over z_t (also called the transition distribution) is\r\n distributed as Normal(mu_t, diag(sigma_t^2)) where mu_t and sigma_t are the\r\n mean and standard deviation output from a fully connected network that accepts\r\n the rnn hidden state h_t as input.\r\n\r\n The emission distribution p(x_t|z_t, h_t) is conditioned on the latent state\r\n z_t as well as the current RNN hidden state h_t via a fully connected network.\r\n\r\n To increase the modeling power of the VRNN, two additional networks are\r\n used to extract features from the data and the latent state. Those networks\r\n are called data_encoder and latent_encoder respectively.\r\n\r\n For an example of how to call the VRNN's methods see sample_step.\r\n\r\n There are a few differences between this exposition and the paper.\r\n First, the indexing scheme for h_t is different than the paper's -- what the\r\n paper calls h_t we call h_{t+1}. This is the same notation used by Fraccaro\r\n et al. to describe the VRNN in the paper linked above. Also, the VRNN paper\r\n uses VAE terminology to refer to the different internal networks, so it\r\n refers to the emission distribution as the decoder. This implementation also\r\n renames the functions phi_x and phi_z in the paper to data_encoder and\r\n latent_encoder.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n rnn_cell,\r\n data_encoder,\r\n latent_encoder,\r\n transition,\r\n emission,\r\n random_seed=None):\r\n \"\"\"Create a VRNN.\r\n\r\n Args:\r\n rnn_cell: A subclass of tf.nn.rnn_cell.RNNCell that will form the\r\n deterministic backbone of the VRNN. The inputs to the RNN will be the\r\n encoded latent state of the previous timestep with shape\r\n [batch_size, encoded_latent_size] as well as the encoded input of the\r\n current timestep, a Tensor of shape [batch_size, encoded_data_size].\r\n data_encoder: A callable that accepts a batch of data x_t and\r\n 'encodes' it, e.g. runs it through a fully connected network. Must\r\n accept as argument the inputs x_t, a Tensor of the shape\r\n [batch_size, data_size] and return a Tensor of shape\r\n [batch_size, encoded_data_size]. This callable will be called multiple\r\n times in the VRNN cell so if scoping is not handled correctly then\r\n multiple copies of the variables in this network could be made. It is\r\n recommended to use a snt.nets.MLP module, which takes care of this for\r\n you.\r\n latent_encoder: A callable that accepts a latent state z_t and\r\n 'encodes' it, e.g. runs it through a fully connected network. Must\r\n accept as argument a Tensor of shape [batch_size, latent_size] and\r\n return a Tensor of shape [batch_size, encoded_latent_size].\r\n This callable must also have the property 'output_size' defined,\r\n returning encoded_latent_size.\r\n transition: A callable that implements the transition distribution\r\n p(z_t|h_t). Must accept as argument the previous RNN hidden state and\r\n return a tf.distributions.Normal distribution conditioned on the input.\r\n emission: A callable that implements the emission distribution\r\n p(x_t|z_t, h_t). Must accept as arguments the encoded latent state\r\n and the RNN hidden state and return a subclass of\r\n tf.distributions.Distribution that can be used to evaluate the logprob\r\n of the targets.\r\n random_seed: The seed for the random ops. Sets the seed for sample_step.\r\n \"\"\"\r\n self.random_seed = random_seed\r\n self.rnn_cell = rnn_cell\r\n self.data_encoder = data_encoder\r\n self.latent_encoder = latent_encoder\r\n self.encoded_z_size = latent_encoder.output_size\r\n self.state_size = (self.rnn_cell.state_size)\r\n self._transition = transition\r\n self._emission = emission\r\n\r\n def zero_state(self, batch_size, dtype):\r\n \"\"\"The initial state of the VRNN.\r\n\r\n Contains the initial state of the RNN and the inital encoded latent.\r\n\r\n Args:\r\n batch_size: The batch size.\r\n dtype: The data type of the VRNN.\r\n Returns:\r\n zero_state: The initial state of the VRNN.\r\n \"\"\"\r\n return VRNNState(\r\n rnn_state=self.rnn_cell.zero_state(batch_size, dtype),\r\n latent_encoded=tf.zeros(\r\n [batch_size, self.latent_encoder.output_size], dtype=dtype))\r\n\r\n def run_rnn(self, prev_rnn_state, prev_latent_encoded, inputs):\r\n \"\"\"Runs the deterministic RNN for one step.\r\n\r\n Args:\r\n prev_rnn_state: The state of the RNN from the previous timestep.\r\n prev_latent_encoded: Float Tensor of shape\r\n [batch_size, encoded_latent_size], the previous latent state z_{t-1}\r\n run through latent_encoder.\r\n inputs: A Tensor of shape [batch_size, data_size], the current inputs to\r\n the model. Most often this is x_{t-1}, the previous token in the\r\n observation sequence.\r\n Returns:\r\n rnn_out: The output of the RNN.\r\n rnn_state: The new state of the RNN.\r\n \"\"\"\r\n inputs_encoded = self.data_encoder(tf.to_float(inputs))\r\n rnn_inputs = tf.concat([inputs_encoded, prev_latent_encoded], axis=1)\r\n rnn_out, rnn_state = self.rnn_cell(rnn_inputs, prev_rnn_state)\r\n return rnn_out, rnn_state\r\n\r\n def transition(self, rnn_out):\r\n \"\"\"Computes the transition distribution p(z_t|h_t).\r\n\r\n Note that p(z_t | h_t) = p(z_t| z_{1:t-1}, x_{1:t-1})\r\n\r\n Args:\r\n rnn_out: The output of the rnn for the current timestep.\r\n Returns:\r\n p(z_t | h_t): A normal distribution with event shape\r\n [batch_size, latent_size].\r\n \"\"\"\r\n return self._transition(rnn_out)\r\n\r\n def emission(self, latent, rnn_out):\r\n \"\"\"Computes the emission distribution p(x_t | z_t, h_t).\r\n\r\n Note that p(x_t | z_t, h_t) = p(x_t | z_{1:t}, x_{1:t-1}).\r\n\r\n Args:\r\n latent: The stochastic latent state z_t.\r\n rnn_out: The output of the rnn for the current timestep.\r\n Returns:\r\n p(x_t | z_t, h_t): A distribution with event shape\r\n [batch_size, data_size].\r\n latent_encoded: The latent state encoded with latent_encoder. Should be\r\n passed to run_rnn on the next timestep.\r\n \"\"\"\r\n latent_encoded = self.latent_encoder(latent)\r\n return self._emission(latent_encoded, rnn_out), latent_encoded\r\n\r\n def sample_step(self, prev_state, inputs, unused_t):\r\n \"\"\"Samples one output from the model.\r\n\r\n Args:\r\n prev_state: The previous state of the model, a VRNNState containing the\r\n previous rnn state and the previous encoded latent.\r\n inputs: A Tensor of shape [batch_size, data_size], the current inputs to\r\n the model. Most often this is x_{t-1}, the previous token in the\r\n observation sequence.\r\n unused_t: The current timestep. Not used currently.\r\n Returns:\r\n new_state: The next state of the model, a VRNNState.\r\n xt: A float Tensor of shape [batch_size, data_size], an output sampled\r\n from the emission distribution.\r\n \"\"\"\r\n rnn_out, rnn_state = self.run_rnn(prev_state.rnn_state,\r\n prev_state.latent_encoded,\r\n inputs)\r\n p_zt = self.transition(rnn_out)\r\n zt = p_zt.sample(seed=self.random_seed)\r\n p_xt_given_zt, latent_encoded = self.emission(zt, rnn_out)\r\n xt = p_xt_given_zt.sample(seed=self.random_seed)\r\n new_state = VRNNState(rnn_state=rnn_state, latent_encoded=latent_encoded)\r\n return new_state, tf.to_float(xt)\r\n\r\n# pylint: disable=invalid-name\r\n# pylint thinks this is a top-level constant.\r\nTrainableVRNNState = namedtuple(\"TrainableVRNNState\",\r\n VRNNState._fields + (\"rnn_out\",))\r\n# pylint: enable=g-invalid-name\r\n\r\n\r\nclass TrainableVRNN(VRNN, base.ELBOTrainableSequenceModel):\r\n \"\"\"A VRNN subclass with proposals and methods for training and evaluation.\r\n\r\n This class adds proposals used for training with importance-sampling based\r\n methods such as the ELBO. The model can be configured to propose from one\r\n of three proposals: a learned filtering proposal, a learned smoothing\r\n proposal, or the prior (i.e. the transition distribution).\r\n\r\n As described in the VRNN paper, the learned filtering proposal is\r\n parameterized by a fully connected neural network that accepts as input the\r\n current target x_t and the current rnn output h_t. The learned smoothing\r\n proposal is also given the hidden state of an RNN run in reverse over the\r\n inputs, so as to incorporate information about future observations. This\r\n smoothing proposal is not described in the VRNN paper.\r\n\r\n All learned proposals use the 'res_q' parameterization, meaning that instead\r\n of directly producing the mean of z_t, the proposal network predicts the\r\n 'residual' from the prior's mean. This is explored more in section 3.3 of\r\n https://arxiv.org/pdf/1605.07571.pdf.\r\n\r\n During training, the latent state z_t is sampled from the proposal and the\r\n reparameterization trick is used to provide low-variance gradients.\r\n\r\n Note that the VRNN paper uses VAE terminology to refer to the different\r\n internal networks, so the proposal is referred to as the encoder.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n rnn_cell,\r\n data_encoder,\r\n latent_encoder,\r\n transition,\r\n emission,\r\n proposal_type,\r\n proposal=None,\r\n rev_rnn_cell=None,\r\n tilt=None,\r\n random_seed=None):\r\n \"\"\"Create a trainable RNN.\r\n\r\n Args:\r\n rnn_cell: A subclass of tf.nn.rnn_cell.RNNCell that will form the\r\n deterministic backbone of the VRNN. The inputs to the RNN will be the\r\n encoded latent state of the previous timestep with shape\r\n [batch_size, encoded_latent_size] as well as the encoded input of the\r\n current timestep, a Tensor of shape [batch_size, encoded_data_size].\r\n data_encoder: A callable that accepts a batch of data x_t and\r\n 'encodes' it, e.g. runs it through a fully connected network. Must\r\n accept as argument the inputs x_t, a Tensor of the shape\r\n [batch_size, data_size] and return a Tensor of shape\r\n [batch_size, encoded_data_size]. This callable will be called multiple\r\n times in the VRNN cell so if scoping is not handled correctly then\r\n multiple copies of the variables in this network could be made. It is\r\n recommended to use a snt.nets.MLP module, which takes care of this for\r\n you.\r\n latent_encoder: A callable that accepts a latent state z_t and\r\n 'encodes' it, e.g. runs it through a fully connected network. Must\r\n accept as argument a Tensor of shape [batch_size, latent_size] and\r\n return a Tensor of shape [batch_size, encoded_latent_size].\r\n This callable must also have the property 'output_size' defined,\r\n returning encoded_latent_size.\r\n transition: A callable that implements the transition distribution\r\n p(z_t|h_t). Must accept as argument the previous RNN hidden state and\r\n return a tf.distributions.Normal distribution conditioned on the input.\r\n emission: A callable that implements the emission distribution\r\n p(x_t|z_t, h_t). Must accept as arguments the encoded latent state\r\n and the RNN hidden state and return a subclass of\r\n tf.distributions.Distribution that can be used to evaluate the logprob\r\n of the targets.\r\n proposal_type: A string indicating the type of proposal to use. Can\r\n be either \"filtering\", \"smoothing\", or \"prior\". When proposal_type is\r\n \"filtering\" or \"smoothing\", proposal must be provided. When\r\n proposal_type is \"smoothing\", rev_rnn_cell must also be provided.\r\n proposal: A callable that implements the proposal q(z_t| h_t, x_{1:T}).\r\n If proposal_type is \"filtering\" then proposal must accept as arguments\r\n the current rnn output, the encoded target of the current timestep,\r\n and the mean of the prior. If proposal_type is \"smoothing\" then\r\n in addition to the current rnn output and the mean of the prior\r\n proposal must accept as arguments the output of the reverse rnn.\r\n proposal should return a tf.distributions.Normal distribution\r\n conditioned on its inputs. If proposal_type is \"prior\" this argument is\r\n ignored.\r\n rev_rnn_cell: A subclass of tf.nn.rnn_cell.RNNCell that will aggregate\r\n observation statistics in the reverse direction. The inputs to the RNN\r\n will be the encoded reverse input of the current timestep, a Tensor of\r\n shape [batch_size, encoded_data_size].\r\n tilt: A callable that implements the log of a positive tilting function\r\n (ideally approximating log p(x_{t+1}|z_t, h_t). Must accept as arguments\r\n the encoded latent state and the RNN hidden state and return a subclass\r\n of tf.distributions.Distribution that can be used to evaluate the\r\n logprob of x_{t+1}. Optionally, None and then no tilt is used.\r\n random_seed: The seed for the random ops. Sets the seed for sample_step\r\n and __call__.\r\n \"\"\"\r\n super(TrainableVRNN, self).__init__(\r\n rnn_cell, data_encoder, latent_encoder,\r\n transition, emission, random_seed=random_seed)\r\n self.rev_rnn_cell = rev_rnn_cell\r\n self._tilt = tilt\r\n assert proposal_type in [\"filtering\", \"smoothing\", \"prior\"]\r\n self._proposal = proposal\r\n self.proposal_type = proposal_type\r\n if proposal_type != \"prior\":\r\n assert proposal, \"If not proposing from the prior, must provide proposal.\"\r\n if proposal_type == \"smoothing\":\r\n assert rev_rnn_cell, \"Must provide rev_rnn_cell for smoothing proposal.\"\r\n\r\n def zero_state(self, batch_size, dtype):\r\n super_state = super(TrainableVRNN, self).zero_state(batch_size, dtype)\r\n return TrainableVRNNState(\r\n rnn_out=tf.zeros([batch_size, self.rnn_cell.output_size], dtype=dtype),\r\n **super_state._asdict())\r\n\r\n def set_observations(self, observations, seq_lengths):\r\n \"\"\"Stores the model's observations.\r\n\r\n Stores the observations (inputs and targets) in TensorArrays and precomputes\r\n things for later like the reverse RNN output and encoded targets.\r\n\r\n Args:\r\n observations: The observations of the model, a tuple containing two\r\n Tensors of shape [max_seq_len, batch_size, data_size]. The Tensors\r\n should be the inputs and targets, respectively.\r\n seq_lengths: An int Tensor of shape [batch_size] containing the length\r\n of each sequence in observations.\r\n \"\"\"\r\n inputs, targets = observations\r\n self.seq_lengths = seq_lengths\r\n self.max_seq_len = tf.reduce_max(seq_lengths)\r\n self.inputs_ta = base.ta_for_tensor(inputs, clear_after_read=False)\r\n self.targets_ta = base.ta_for_tensor(targets, clear_after_read=False)\r\n targets_encoded = base.encode_all(targets, self.data_encoder)\r\n self.targets_encoded_ta = base.ta_for_tensor(targets_encoded,\r\n clear_after_read=False)\r\n if self.rev_rnn_cell:\r\n reverse_targets_encoded = tf.reverse_sequence(\r\n targets_encoded, seq_lengths, seq_axis=0, batch_axis=1)\r\n # Compute the reverse rnn over the targets.\r\n reverse_rnn_out, _ = tf.nn.dynamic_rnn(self.rev_rnn_cell,\r\n reverse_targets_encoded,\r\n time_major=True,\r\n dtype=tf.float32)\r\n reverse_rnn_out = tf.reverse_sequence(reverse_rnn_out, seq_lengths,\r\n seq_axis=0, batch_axis=1)\r\n self.reverse_rnn_ta = base.ta_for_tensor(reverse_rnn_out,\r\n clear_after_read=False)\r\n\r\n def _filtering_proposal(self, rnn_out, prior, t):\r\n \"\"\"Computes the filtering proposal distribution.\"\"\"\r\n return self._proposal(rnn_out,\r\n self.targets_encoded_ta.read(t),\r\n prior_mu=prior.mean())\r\n\r\n def _smoothing_proposal(self, rnn_out, prior, t):\r\n \"\"\"Computes the smoothing proposal distribution.\"\"\"\r\n return self._proposal(rnn_out,\r\n smoothing_tensors=[self.reverse_rnn_ta.read(t)],\r\n prior_mu=prior.mean())\r\n\r\n def proposal(self, rnn_out, prior, t):\r\n \"\"\"Computes the proposal distribution specified by proposal_type.\r\n\r\n Args:\r\n rnn_out: The output of the rnn for the current timestep.\r\n prior: A tf.distributions.Normal distribution representing the prior\r\n over z_t, p(z_t | z_{1:t-1}, x_{1:t-1}). Used for 'res_q'.\r\n t: A scalar int Tensor, the current timestep.\r\n \"\"\"\r\n if self.proposal_type == \"filtering\":\r\n return self._filtering_proposal(rnn_out, prior, t)\r\n elif self.proposal_type == \"smoothing\":\r\n return self._smoothing_proposal(rnn_out, prior, t)\r\n elif self.proposal_type == \"prior\":\r\n return self.transition(rnn_out)\r\n\r\n def tilt(self, rnn_out, latent_encoded, targets):\r\n r_func = self._tilt(rnn_out, latent_encoded)\r\n return tf.reduce_sum(r_func.log_prob(targets), axis=-1)\r\n\r\n def propose_and_weight(self, state, t):\r\n \"\"\"Runs the model and computes importance weights for one timestep.\r\n\r\n Runs the model and computes importance weights, sampling from the proposal\r\n instead of the transition/prior.\r\n\r\n Args:\r\n state: The previous state of the model, a TrainableVRNNState containing\r\n the previous rnn state, the previous rnn outs, and the previous encoded\r\n latent.\r\n t: A scalar integer Tensor, the current timestep.\r\n Returns:\r\n weights: A float Tensor of shape [batch_size].\r\n new_state: The new state of the model.\r\n \"\"\"\r\n inputs = self.inputs_ta.read(t)\r\n targets = self.targets_ta.read(t)\r\n rnn_out, next_rnn_state = self.run_rnn(state.rnn_state,\r\n state.latent_encoded,\r\n inputs)\r\n p_zt = self.transition(rnn_out)\r\n q_zt = self.proposal(rnn_out, p_zt, t)\r\n zt = q_zt.sample(seed=self.random_seed)\r\n p_xt_given_zt, latent_encoded = self.emission(zt, rnn_out)\r\n log_p_xt_given_zt = tf.reduce_sum(p_xt_given_zt.log_prob(targets), axis=-1)\r\n log_p_zt = tf.reduce_sum(p_zt.log_prob(zt), axis=-1)\r\n log_q_zt = tf.reduce_sum(q_zt.log_prob(zt), axis=-1)\r\n weights = log_p_zt + log_p_xt_given_zt - log_q_zt\r\n if self._tilt:\r\n prev_log_r = tf.cond(\r\n tf.greater(t, 0),\r\n lambda: self.tilt(state.rnn_out, state.latent_encoded, targets),\r\n lambda: 0.) # On the first step, prev_log_r = 0.\r\n log_r = tf.cond(\r\n tf.less(t + 1, self.max_seq_len),\r\n lambda: self.tilt(rnn_out, latent_encoded, self.targets_ta.read(t+1)),\r\n lambda: 0.)\r\n # On the last step, log_r = 0.\r\n log_r *= tf.to_float(t < self.seq_lengths - 1)\r\n weights += log_r - prev_log_r\r\n new_state = TrainableVRNNState(rnn_state=next_rnn_state,\r\n rnn_out=rnn_out,\r\n latent_encoded=latent_encoded)\r\n return weights, new_state\r\n\r\n\r\n_DEFAULT_INITIALIZERS = {\"w\": tf.contrib.layers.xavier_initializer(),\r\n \"b\": tf.zeros_initializer()}\r\n\r\n\r\ndef create_vrnn(\r\n data_size,\r\n latent_size,\r\n emission_class,\r\n rnn_hidden_size=None,\r\n fcnet_hidden_sizes=None,\r\n encoded_data_size=None,\r\n encoded_latent_size=None,\r\n sigma_min=0.0,\r\n raw_sigma_bias=0.25,\r\n emission_bias_init=0.0,\r\n use_tilt=False,\r\n proposal_type=\"filtering\",\r\n initializers=None,\r\n random_seed=None):\r\n \"\"\"A factory method for creating VRNN cells.\r\n\r\n Args:\r\n data_size: The dimension of the vectors that make up the data sequences.\r\n latent_size: The size of the stochastic latent state of the VRNN.\r\n emission_class: The class of the emission distribution. Can be either\r\n ConditionalNormalDistribution or ConditionalBernoulliDistribution.\r\n rnn_hidden_size: The hidden state dimension of the RNN that forms the\r\n deterministic part of this VRNN. If None, then it defaults\r\n to latent_size.\r\n fcnet_hidden_sizes: A list of python integers, the size of the hidden\r\n layers of the fully connected networks that parameterize the conditional\r\n distributions of the VRNN. If None, then it defaults to one hidden\r\n layer of size latent_size.\r\n encoded_data_size: The size of the output of the data encoding network. If\r\n None, defaults to latent_size.\r\n encoded_latent_size: The size of the output of the latent state encoding\r\n network. If None, defaults to latent_size.\r\n sigma_min: The minimum value that the standard deviation of the\r\n distribution over the latent state can take.\r\n raw_sigma_bias: A scalar that is added to the raw standard deviation\r\n output from the neural networks that parameterize the prior and\r\n approximate posterior. Useful for preventing standard deviations close\r\n to zero.\r\n emission_bias_init: A bias to added to the raw output of the fully\r\n connected network that parameterizes the emission distribution. Useful\r\n for initalizing the mean of the distribution to a sensible starting point\r\n such as the mean of the training data. Only used with Bernoulli generative\r\n distributions.\r\n use_tilt: If true, create a VRNN with a tilting function.\r\n proposal_type: The type of proposal to use. Can be \"filtering\", \"smoothing\",\r\n or \"prior\".\r\n initializers: The variable intitializers to use for the fully connected\r\n networks and RNN cell. Must be a dictionary mapping the keys 'w' and 'b'\r\n to the initializers for the weights and biases. Defaults to xavier for\r\n the weights and zeros for the biases when initializers is None.\r\n random_seed: A random seed for the VRNN resampling operations.\r\n Returns:\r\n model: A TrainableVRNN object.\r\n \"\"\"\r\n if rnn_hidden_size is None:\r\n rnn_hidden_size = latent_size\r\n if fcnet_hidden_sizes is None:\r\n fcnet_hidden_sizes = [latent_size]\r\n if encoded_data_size is None:\r\n encoded_data_size = latent_size\r\n if encoded_latent_size is None:\r\n encoded_latent_size = latent_size\r\n if initializers is None:\r\n initializers = _DEFAULT_INITIALIZERS\r\n data_encoder = snt.nets.MLP(\r\n output_sizes=fcnet_hidden_sizes + [encoded_data_size],\r\n initializers=initializers,\r\n name=\"data_encoder\")\r\n latent_encoder = snt.nets.MLP(\r\n output_sizes=fcnet_hidden_sizes + [encoded_latent_size],\r\n initializers=initializers,\r\n name=\"latent_encoder\")\r\n transition = base.ConditionalNormalDistribution(\r\n size=latent_size,\r\n hidden_layer_sizes=fcnet_hidden_sizes,\r\n sigma_min=sigma_min,\r\n raw_sigma_bias=raw_sigma_bias,\r\n initializers=initializers,\r\n name=\"prior\")\r\n # Construct the emission distribution.\r\n if emission_class == base.ConditionalBernoulliDistribution:\r\n # For Bernoulli distributed outputs, we initialize the bias so that the\r\n # network generates on average the mean from the training set.\r\n emission_dist = functools.partial(base.ConditionalBernoulliDistribution,\r\n bias_init=emission_bias_init)\r\n else:\r\n emission_dist = base.ConditionalNormalDistribution\r\n emission = emission_dist(\r\n size=data_size,\r\n hidden_layer_sizes=fcnet_hidden_sizes,\r\n initializers=initializers,\r\n name=\"generative\")\r\n # Construct the proposal distribution.\r\n if proposal_type in [\"filtering\", \"smoothing\"]:\r\n proposal = base.NormalApproximatePosterior(\r\n size=latent_size,\r\n hidden_layer_sizes=fcnet_hidden_sizes,\r\n sigma_min=sigma_min,\r\n raw_sigma_bias=raw_sigma_bias,\r\n initializers=initializers,\r\n smoothing=(proposal_type == \"smoothing\"),\r\n name=\"approximate_posterior\")\r\n else:\r\n proposal = None\r\n\r\n if use_tilt:\r\n tilt = emission_dist(\r\n size=data_size,\r\n hidden_layer_sizes=fcnet_hidden_sizes,\r\n initializers=initializers,\r\n name=\"tilt\")\r\n else:\r\n tilt = None\r\n\r\n rnn_cell = tf.nn.rnn_cell.LSTMCell(rnn_hidden_size,\r\n initializer=initializers[\"w\"])\r\n rev_rnn_cell = tf.nn.rnn_cell.LSTMCell(rnn_hidden_size,\r\n initializer=initializers[\"w\"])\r\n return TrainableVRNN(\r\n rnn_cell, data_encoder, latent_encoder, transition,\r\n emission, proposal_type, proposal=proposal, rev_rnn_cell=rev_rnn_cell,\r\n tilt=tilt, random_seed=random_seed)\r\n",
"# Copyright 2016 Google Inc. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\nr\"\"\"Utility functions for Real NVP.\r\n\"\"\"\r\n\r\n# pylint: disable=dangerous-default-value\r\n\r\nimport numpy\r\nfrom six.moves import xrange\r\nimport tensorflow as tf\r\nfrom tensorflow.python.framework import ops\r\n\r\nDEFAULT_BN_LAG = .0\r\n\r\n\r\ndef stable_var(input_, mean=None, axes=[0]):\r\n \"\"\"Numerically more stable variance computation.\"\"\"\r\n if mean is None:\r\n mean = tf.reduce_mean(input_, axes)\r\n res = tf.square(input_ - mean)\r\n max_sqr = tf.reduce_max(res, axes)\r\n res /= max_sqr\r\n res = tf.reduce_mean(res, axes)\r\n res *= max_sqr\r\n\r\n return res\r\n\r\n\r\ndef variable_on_cpu(name, shape, initializer, trainable=True):\r\n \"\"\"Helper to create a Variable stored on CPU memory.\r\n\r\n Args:\r\n name: name of the variable\r\n shape: list of ints\r\n initializer: initializer for Variable\r\n trainable: boolean defining if the variable is for training\r\n Returns:\r\n Variable Tensor\r\n \"\"\"\r\n var = tf.get_variable(\r\n name, shape, initializer=initializer, trainable=trainable)\r\n return var\r\n\r\n\r\n# layers\r\ndef conv_layer(input_,\r\n filter_size,\r\n dim_in,\r\n dim_out,\r\n name,\r\n stddev=1e-2,\r\n strides=[1, 1, 1, 1],\r\n padding=\"SAME\",\r\n nonlinearity=None,\r\n bias=False,\r\n weight_norm=False,\r\n scale=False):\r\n \"\"\"Convolutional layer.\"\"\"\r\n with tf.variable_scope(name) as scope:\r\n weights = variable_on_cpu(\r\n \"weights\",\r\n filter_size + [dim_in, dim_out],\r\n tf.random_uniform_initializer(\r\n minval=-stddev, maxval=stddev))\r\n # weight normalization\r\n if weight_norm:\r\n weights /= tf.sqrt(tf.reduce_sum(tf.square(weights), [0, 1, 2]))\r\n if scale:\r\n magnitude = variable_on_cpu(\r\n \"magnitude\", [dim_out],\r\n tf.constant_initializer(\r\n stddev * numpy.sqrt(dim_in * numpy.prod(filter_size) / 12.)))\r\n weights *= magnitude\r\n res = input_\r\n # handling filter size bigger than image size\r\n if hasattr(input_, \"shape\"):\r\n if input_.get_shape().as_list()[1] < filter_size[0]:\r\n pad_1 = tf.zeros([\r\n input_.get_shape().as_list()[0],\r\n filter_size[0] - input_.get_shape().as_list()[1],\r\n input_.get_shape().as_list()[2],\r\n input_.get_shape().as_list()[3]\r\n ])\r\n pad_2 = tf.zeros([\r\n input_.get_shape().as_list[0],\r\n filter_size[0],\r\n filter_size[1] - input_.get_shape().as_list()[2],\r\n input_.get_shape().as_list()[3]\r\n ])\r\n res = tf.concat(axis=1, values=[pad_1, res])\r\n res = tf.concat(axis=2, values=[pad_2, res])\r\n res = tf.nn.conv2d(\r\n input=res,\r\n filter=weights,\r\n strides=strides,\r\n padding=padding,\r\n name=scope.name)\r\n\r\n if hasattr(input_, \"shape\"):\r\n if input_.get_shape().as_list()[1] < filter_size[0]:\r\n res = tf.slice(res, [\r\n 0, filter_size[0] - input_.get_shape().as_list()[1],\r\n filter_size[1] - input_.get_shape().as_list()[2], 0\r\n ], [-1, -1, -1, -1])\r\n\r\n if bias:\r\n biases = variable_on_cpu(\"biases\", [dim_out], tf.constant_initializer(0.))\r\n res = tf.nn.bias_add(res, biases)\r\n if nonlinearity is not None:\r\n res = nonlinearity(res)\r\n\r\n return res\r\n\r\n\r\ndef max_pool_2x2(input_):\r\n \"\"\"Max pooling.\"\"\"\r\n return tf.nn.max_pool(\r\n input_, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\"SAME\")\r\n\r\n\r\ndef depool_2x2(input_, stride=2):\r\n \"\"\"Depooling.\"\"\"\r\n shape = input_.get_shape().as_list()\r\n batch_size = shape[0]\r\n height = shape[1]\r\n width = shape[2]\r\n channels = shape[3]\r\n res = tf.reshape(input_, [batch_size, height, 1, width, 1, channels])\r\n res = tf.concat(\r\n axis=2, values=[res, tf.zeros([batch_size, height, stride - 1, width, 1, channels])])\r\n res = tf.concat(axis=4, values=[\r\n res, tf.zeros([batch_size, height, stride, width, stride - 1, channels])\r\n ])\r\n res = tf.reshape(res, [batch_size, stride * height, stride * width, channels])\r\n\r\n return res\r\n\r\n\r\n# random flip on a batch of images\r\ndef batch_random_flip(input_):\r\n \"\"\"Simultaneous horizontal random flip.\"\"\"\r\n if isinstance(input_, (float, int)):\r\n return input_\r\n shape = input_.get_shape().as_list()\r\n batch_size = shape[0]\r\n height = shape[1]\r\n width = shape[2]\r\n channels = shape[3]\r\n res = tf.split(axis=0, num_or_size_splits=batch_size, value=input_)\r\n res = [elem[0, :, :, :] for elem in res]\r\n res = [tf.image.random_flip_left_right(elem) for elem in res]\r\n res = [tf.reshape(elem, [1, height, width, channels]) for elem in res]\r\n res = tf.concat(axis=0, values=res)\r\n\r\n return res\r\n\r\n\r\n# build a one hot representation corresponding to the integer tensor\r\n# the one-hot dimension is appended to the integer tensor shape\r\ndef as_one_hot(input_, n_indices):\r\n \"\"\"Convert indices to one-hot.\"\"\"\r\n shape = input_.get_shape().as_list()\r\n n_elem = numpy.prod(shape)\r\n indices = tf.range(n_elem)\r\n indices = tf.cast(indices, tf.int64)\r\n indices_input = tf.concat(axis=0, values=[indices, tf.reshape(input_, [-1])])\r\n indices_input = tf.reshape(indices_input, [2, -1])\r\n indices_input = tf.transpose(indices_input)\r\n res = tf.sparse_to_dense(\r\n indices_input, [n_elem, n_indices], 1., 0., name=\"flat_one_hot\")\r\n res = tf.reshape(res, [elem for elem in shape] + [n_indices])\r\n\r\n return res\r\n\r\n\r\ndef squeeze_2x2(input_):\r\n \"\"\"Squeezing operation: reshape to convert space to channels.\"\"\"\r\n return squeeze_nxn(input_, n_factor=2)\r\n\r\n\r\ndef squeeze_nxn(input_, n_factor=2):\r\n \"\"\"Squeezing operation: reshape to convert space to channels.\"\"\"\r\n if isinstance(input_, (float, int)):\r\n return input_\r\n shape = input_.get_shape().as_list()\r\n batch_size = shape[0]\r\n height = shape[1]\r\n width = shape[2]\r\n channels = shape[3]\r\n if height % n_factor != 0:\r\n raise ValueError(\"Height not divisible by %d.\" % n_factor)\r\n if width % n_factor != 0:\r\n raise ValueError(\"Width not divisible by %d.\" % n_factor)\r\n res = tf.reshape(\r\n input_,\r\n [batch_size,\r\n height // n_factor,\r\n n_factor, width // n_factor,\r\n n_factor, channels])\r\n res = tf.transpose(res, [0, 1, 3, 5, 2, 4])\r\n res = tf.reshape(\r\n res,\r\n [batch_size,\r\n height // n_factor,\r\n width // n_factor,\r\n channels * n_factor * n_factor])\r\n\r\n return res\r\n\r\n\r\ndef unsqueeze_2x2(input_):\r\n \"\"\"Unsqueezing operation: reshape to convert channels into space.\"\"\"\r\n if isinstance(input_, (float, int)):\r\n return input_\r\n shape = input_.get_shape().as_list()\r\n batch_size = shape[0]\r\n height = shape[1]\r\n width = shape[2]\r\n channels = shape[3]\r\n if channels % 4 != 0:\r\n raise ValueError(\"Number of channels not divisible by 4.\")\r\n res = tf.reshape(input_, [batch_size, height, width, channels // 4, 2, 2])\r\n res = tf.transpose(res, [0, 1, 4, 2, 5, 3])\r\n res = tf.reshape(res, [batch_size, 2 * height, 2 * width, channels // 4])\r\n\r\n return res\r\n\r\n\r\n# batch norm\r\ndef batch_norm(input_,\r\n dim,\r\n name,\r\n scale=True,\r\n train=True,\r\n epsilon=1e-8,\r\n decay=.1,\r\n axes=[0],\r\n bn_lag=DEFAULT_BN_LAG):\r\n \"\"\"Batch normalization.\"\"\"\r\n # create variables\r\n with tf.variable_scope(name):\r\n var = variable_on_cpu(\r\n \"var\", [dim], tf.constant_initializer(1.), trainable=False)\r\n mean = variable_on_cpu(\r\n \"mean\", [dim], tf.constant_initializer(0.), trainable=False)\r\n step = variable_on_cpu(\"step\", [], tf.constant_initializer(0.), trainable=False)\r\n if scale:\r\n gamma = variable_on_cpu(\"gamma\", [dim], tf.constant_initializer(1.))\r\n beta = variable_on_cpu(\"beta\", [dim], tf.constant_initializer(0.))\r\n # choose the appropriate moments\r\n if train:\r\n used_mean, used_var = tf.nn.moments(input_, axes, name=\"batch_norm\")\r\n cur_mean, cur_var = used_mean, used_var\r\n if bn_lag > 0.:\r\n used_mean -= (1. - bn_lag) * (used_mean - tf.stop_gradient(mean))\r\n used_var -= (1 - bn_lag) * (used_var - tf.stop_gradient(var))\r\n used_mean /= (1. - bn_lag**(step + 1))\r\n used_var /= (1. - bn_lag**(step + 1))\r\n else:\r\n used_mean, used_var = mean, var\r\n cur_mean, cur_var = used_mean, used_var\r\n\r\n # normalize\r\n res = (input_ - used_mean) / tf.sqrt(used_var + epsilon)\r\n # de-normalize\r\n if scale:\r\n res *= gamma\r\n res += beta\r\n\r\n # update variables\r\n if train:\r\n with tf.name_scope(name, \"AssignMovingAvg\", [mean, cur_mean, decay]):\r\n with ops.colocate_with(mean):\r\n new_mean = tf.assign_sub(\r\n mean,\r\n tf.check_numerics(decay * (mean - cur_mean), \"NaN in moving mean.\"))\r\n with tf.name_scope(name, \"AssignMovingAvg\", [var, cur_var, decay]):\r\n with ops.colocate_with(var):\r\n new_var = tf.assign_sub(\r\n var,\r\n tf.check_numerics(decay * (var - cur_var),\r\n \"NaN in moving variance.\"))\r\n with tf.name_scope(name, \"IncrementTime\", [step]):\r\n with ops.colocate_with(step):\r\n new_step = tf.assign_add(step, 1.)\r\n res += 0. * new_mean * new_var * new_step\r\n\r\n return res\r\n\r\n\r\n# batch normalization taking into account the volume transformation\r\ndef batch_norm_log_diff(input_,\r\n dim,\r\n name,\r\n train=True,\r\n epsilon=1e-8,\r\n decay=.1,\r\n axes=[0],\r\n reuse=None,\r\n bn_lag=DEFAULT_BN_LAG):\r\n \"\"\"Batch normalization with corresponding log determinant Jacobian.\"\"\"\r\n if reuse is None:\r\n reuse = not train\r\n # create variables\r\n with tf.variable_scope(name) as scope:\r\n if reuse:\r\n scope.reuse_variables()\r\n var = variable_on_cpu(\r\n \"var\", [dim], tf.constant_initializer(1.), trainable=False)\r\n mean = variable_on_cpu(\r\n \"mean\", [dim], tf.constant_initializer(0.), trainable=False)\r\n step = variable_on_cpu(\"step\", [], tf.constant_initializer(0.), trainable=False)\r\n # choose the appropriate moments\r\n if train:\r\n used_mean, used_var = tf.nn.moments(input_, axes, name=\"batch_norm\")\r\n cur_mean, cur_var = used_mean, used_var\r\n if bn_lag > 0.:\r\n used_var = stable_var(input_=input_, mean=used_mean, axes=axes)\r\n cur_var = used_var\r\n used_mean -= (1 - bn_lag) * (used_mean - tf.stop_gradient(mean))\r\n used_mean /= (1. - bn_lag**(step + 1))\r\n used_var -= (1 - bn_lag) * (used_var - tf.stop_gradient(var))\r\n used_var /= (1. - bn_lag**(step + 1))\r\n else:\r\n used_mean, used_var = mean, var\r\n cur_mean, cur_var = used_mean, used_var\r\n\r\n # update variables\r\n if train:\r\n with tf.name_scope(name, \"AssignMovingAvg\", [mean, cur_mean, decay]):\r\n with ops.colocate_with(mean):\r\n new_mean = tf.assign_sub(\r\n mean,\r\n tf.check_numerics(\r\n decay * (mean - cur_mean), \"NaN in moving mean.\"))\r\n with tf.name_scope(name, \"AssignMovingAvg\", [var, cur_var, decay]):\r\n with ops.colocate_with(var):\r\n new_var = tf.assign_sub(\r\n var,\r\n tf.check_numerics(decay * (var - cur_var),\r\n \"NaN in moving variance.\"))\r\n with tf.name_scope(name, \"IncrementTime\", [step]):\r\n with ops.colocate_with(step):\r\n new_step = tf.assign_add(step, 1.)\r\n used_var += 0. * new_mean * new_var * new_step\r\n used_var += epsilon\r\n\r\n return used_mean, used_var\r\n\r\n\r\ndef convnet(input_,\r\n dim_in,\r\n dim_hid,\r\n filter_sizes,\r\n dim_out,\r\n name,\r\n use_batch_norm=True,\r\n train=True,\r\n nonlinearity=tf.nn.relu):\r\n \"\"\"Chaining of convolutional layers.\"\"\"\r\n dims_in = [dim_in] + dim_hid[:-1]\r\n dims_out = dim_hid\r\n res = input_\r\n\r\n bias = (not use_batch_norm)\r\n with tf.variable_scope(name):\r\n for layer_idx in xrange(len(dim_hid)):\r\n res = conv_layer(\r\n input_=res,\r\n filter_size=filter_sizes[layer_idx],\r\n dim_in=dims_in[layer_idx],\r\n dim_out=dims_out[layer_idx],\r\n name=\"h_%d\" % layer_idx,\r\n stddev=1e-2,\r\n nonlinearity=None,\r\n bias=bias)\r\n if use_batch_norm:\r\n res = batch_norm(\r\n input_=res,\r\n dim=dims_out[layer_idx],\r\n name=\"bn_%d\" % layer_idx,\r\n scale=(nonlinearity == tf.nn.relu),\r\n train=train,\r\n epsilon=1e-8,\r\n axes=[0, 1, 2])\r\n if nonlinearity is not None:\r\n res = nonlinearity(res)\r\n\r\n res = conv_layer(\r\n input_=res,\r\n filter_size=filter_sizes[-1],\r\n dim_in=dims_out[-1],\r\n dim_out=dim_out,\r\n name=\"out\",\r\n stddev=1e-2,\r\n nonlinearity=None)\r\n\r\n return res\r\n\r\n\r\n# distributions\r\n# log-likelihood estimation\r\ndef standard_normal_ll(input_):\r\n \"\"\"Log-likelihood of standard Gaussian distribution.\"\"\"\r\n res = -.5 * (tf.square(input_) + numpy.log(2. * numpy.pi))\r\n\r\n return res\r\n\r\n\r\ndef standard_normal_sample(shape):\r\n \"\"\"Samples from standard Gaussian distribution.\"\"\"\r\n return tf.random_normal(shape)\r\n\r\n\r\nSQUEEZE_MATRIX = numpy.array([[[[1., 0., 0., 0.]], [[0., 0., 1., 0.]]],\r\n [[[0., 0., 0., 1.]], [[0., 1., 0., 0.]]]])\r\n\r\n\r\ndef squeeze_2x2_ordered(input_, reverse=False):\r\n \"\"\"Squeezing operation with a controlled ordering.\"\"\"\r\n shape = input_.get_shape().as_list()\r\n batch_size = shape[0]\r\n height = shape[1]\r\n width = shape[2]\r\n channels = shape[3]\r\n if reverse:\r\n if channels % 4 != 0:\r\n raise ValueError(\"Number of channels not divisible by 4.\")\r\n channels /= 4\r\n else:\r\n if height % 2 != 0:\r\n raise ValueError(\"Height not divisible by 2.\")\r\n if width % 2 != 0:\r\n raise ValueError(\"Width not divisible by 2.\")\r\n weights = numpy.zeros((2, 2, channels, 4 * channels))\r\n for idx_ch in xrange(channels):\r\n slice_2 = slice(idx_ch, (idx_ch + 1))\r\n slice_3 = slice((idx_ch * 4), ((idx_ch + 1) * 4))\r\n weights[:, :, slice_2, slice_3] = SQUEEZE_MATRIX\r\n shuffle_channels = [idx_ch * 4 for idx_ch in xrange(channels)]\r\n shuffle_channels += [idx_ch * 4 + 1 for idx_ch in xrange(channels)]\r\n shuffle_channels += [idx_ch * 4 + 2 for idx_ch in xrange(channels)]\r\n shuffle_channels += [idx_ch * 4 + 3 for idx_ch in xrange(channels)]\r\n shuffle_channels = numpy.array(shuffle_channels)\r\n weights = weights[:, :, :, shuffle_channels].astype(\"float32\")\r\n if reverse:\r\n res = tf.nn.conv2d_transpose(\r\n value=input_,\r\n filter=weights,\r\n output_shape=[batch_size, height * 2, width * 2, channels],\r\n strides=[1, 2, 2, 1],\r\n padding=\"SAME\",\r\n name=\"unsqueeze_2x2\")\r\n else:\r\n res = tf.nn.conv2d(\r\n input=input_,\r\n filter=weights,\r\n strides=[1, 2, 2, 1],\r\n padding=\"SAME\",\r\n name=\"squeeze_2x2\")\r\n\r\n return res\r\n",
"# Copyright 2016 The TensorFlow Authors All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"Various function to manipulate graphs for computing distances.\r\n\"\"\"\r\nimport skimage.morphology\r\nimport numpy as np\r\nimport networkx as nx\r\nimport itertools\r\nimport logging\r\nfrom datasets.nav_env import get_path_ids\r\nimport graph_tool as gt\r\nimport graph_tool.topology\r\nimport graph_tool.generation\r\nimport src.utils as utils\r\n\r\n# Compute shortest path from all nodes to or from all source nodes\r\ndef get_distance_node_list(gtG, source_nodes, direction, weights=None):\r\n gtG_ = gt.Graph(gtG)\r\n v = gtG_.add_vertex()\r\n\r\n if weights is not None:\r\n weights = gtG_.edge_properties[weights]\r\n\r\n for s in source_nodes:\r\n e = gtG_.add_edge(s, int(v))\r\n if weights is not None:\r\n weights[e] = 0.\r\n\r\n if direction == 'to':\r\n dist = gt.topology.shortest_distance(\r\n gt.GraphView(gtG_, reversed=True), source=gtG_.vertex(int(v)),\r\n target=None, weights=weights)\r\n elif direction == 'from':\r\n dist = gt.topology.shortest_distance(\r\n gt.GraphView(gtG_, reversed=False), source=gtG_.vertex(int(v)),\r\n target=None, weights=weights)\r\n dist = np.array(dist.get_array())\r\n dist = dist[:-1]\r\n if weights is None:\r\n dist = dist-1\r\n return dist\r\n\r\n# Functions for semantically labelling nodes in the traversal graph.\r\ndef generate_lattice(sz_x, sz_y):\r\n \"\"\"Generates a lattice with sz_x vertices along x and sz_y vertices along y\r\n direction Each of these vertices is step_size distance apart. Origin is at\r\n (0,0). \"\"\"\r\n g = gt.generation.lattice([sz_x, sz_y])\r\n x, y = np.meshgrid(np.arange(sz_x), np.arange(sz_y))\r\n x = np.reshape(x, [-1,1]); y = np.reshape(y, [-1,1]);\r\n nodes = np.concatenate((x,y), axis=1)\r\n return g, nodes\r\n\r\ndef add_diagonal_edges(g, nodes, sz_x, sz_y, edge_len):\r\n offset = [sz_x+1, sz_x-1]\r\n for o in offset:\r\n s = np.arange(nodes.shape[0]-o-1)\r\n t = s + o\r\n ind = np.all(np.abs(nodes[s,:] - nodes[t,:]) == np.array([[1,1]]), axis=1)\r\n s = s[ind][:,np.newaxis]\r\n t = t[ind][:,np.newaxis]\r\n st = np.concatenate((s,t), axis=1)\r\n for i in range(st.shape[0]):\r\n e = g.add_edge(st[i,0], st[i,1], add_missing=False)\r\n g.ep['wts'][e] = edge_len\r\n\r\ndef convert_traversible_to_graph(traversible, ff_cost=1., fo_cost=1.,\r\n oo_cost=1., connectivity=4):\r\n assert(connectivity == 4 or connectivity == 8)\r\n\r\n sz_x = traversible.shape[1]\r\n sz_y = traversible.shape[0]\r\n g, nodes = generate_lattice(sz_x, sz_y)\r\n\r\n # Assign costs.\r\n edge_wts = g.new_edge_property('float')\r\n g.edge_properties['wts'] = edge_wts\r\n wts = np.ones(g.num_edges(), dtype=np.float32)\r\n edge_wts.get_array()[:] = wts\r\n\r\n if connectivity == 8:\r\n add_diagonal_edges(g, nodes, sz_x, sz_y, np.sqrt(2.))\r\n\r\n se = np.array([[int(e.source()), int(e.target())] for e in g.edges()])\r\n s_xy = nodes[se[:,0]]\r\n t_xy = nodes[se[:,1]]\r\n s_t = np.ravel_multi_index((s_xy[:,1], s_xy[:,0]), traversible.shape)\r\n t_t = np.ravel_multi_index((t_xy[:,1], t_xy[:,0]), traversible.shape)\r\n s_t = traversible.ravel()[s_t]\r\n t_t = traversible.ravel()[t_t]\r\n\r\n wts = np.zeros(g.num_edges(), dtype=np.float32)\r\n wts[np.logical_and(s_t == True, t_t == True)] = ff_cost\r\n wts[np.logical_and(s_t == False, t_t == False)] = oo_cost\r\n wts[np.logical_xor(s_t, t_t)] = fo_cost\r\n\r\n edge_wts = g.edge_properties['wts']\r\n for i, e in enumerate(g.edges()):\r\n edge_wts[e] = edge_wts[e] * wts[i]\r\n # d = edge_wts.get_array()*1.\r\n # edge_wts.get_array()[:] = d*wts\r\n return g, nodes\r\n\r\ndef label_nodes_with_class(nodes_xyt, class_maps, pix):\r\n \"\"\"\r\n Returns:\r\n class_maps__: one-hot class_map for each class.\r\n node_class_label: one-hot class_map for each class, nodes_xyt.shape[0] x n_classes\r\n \"\"\"\r\n # Assign each pixel to a node.\r\n selem = skimage.morphology.disk(pix)\r\n class_maps_ = class_maps*1.\r\n for i in range(class_maps.shape[2]):\r\n class_maps_[:,:,i] = skimage.morphology.dilation(class_maps[:,:,i]*1, selem)\r\n class_maps__ = np.argmax(class_maps_, axis=2)\r\n class_maps__[np.max(class_maps_, axis=2) == 0] = -1\r\n\r\n # For each node pick out the label from this class map.\r\n x = np.round(nodes_xyt[:,[0]]).astype(np.int32)\r\n y = np.round(nodes_xyt[:,[1]]).astype(np.int32)\r\n ind = np.ravel_multi_index((y,x), class_maps__.shape)\r\n node_class_label = class_maps__.ravel()[ind][:,0]\r\n\r\n # Convert to one hot versions.\r\n class_maps_one_hot = np.zeros(class_maps.shape, dtype=np.bool)\r\n node_class_label_one_hot = np.zeros((node_class_label.shape[0], class_maps.shape[2]), dtype=np.bool)\r\n for i in range(class_maps.shape[2]):\r\n class_maps_one_hot[:,:,i] = class_maps__ == i\r\n node_class_label_one_hot[:,i] = node_class_label == i\r\n return class_maps_one_hot, node_class_label_one_hot\r\n\r\ndef label_nodes_with_class_geodesic(nodes_xyt, class_maps, pix, traversible,\r\n ff_cost=1., fo_cost=1., oo_cost=1.,\r\n connectivity=4):\r\n \"\"\"Labels nodes in nodes_xyt with class labels using geodesic distance as\r\n defined by traversible from class_maps.\r\n Inputs:\r\n nodes_xyt\r\n class_maps: counts for each class.\r\n pix: distance threshold to consider close enough to target.\r\n traversible: binary map of whether traversible or not.\r\n Output:\r\n labels: For each node in nodes_xyt returns a label of the class or -1 is\r\n unlabelled.\r\n \"\"\"\r\n g, nodes = convert_traversible_to_graph(traversible, ff_cost=ff_cost,\r\n fo_cost=fo_cost, oo_cost=oo_cost,\r\n connectivity=connectivity)\r\n\r\n class_dist = np.zeros_like(class_maps*1.)\r\n n_classes = class_maps.shape[2]\r\n if False:\r\n # Assign each pixel to a class based on number of points.\r\n selem = skimage.morphology.disk(pix)\r\n class_maps_ = class_maps*1.\r\n class_maps__ = np.argmax(class_maps_, axis=2)\r\n class_maps__[np.max(class_maps_, axis=2) == 0] = -1\r\n\r\n # Label nodes with classes.\r\n for i in range(n_classes):\r\n # class_node_ids = np.where(class_maps__.ravel() == i)[0]\r\n class_node_ids = np.where(class_maps[:,:,i].ravel() > 0)[0]\r\n dist_i = get_distance_node_list(g, class_node_ids, 'to', weights='wts')\r\n class_dist[:,:,i] = np.reshape(dist_i, class_dist[:,:,i].shape)\r\n class_map_geodesic = (class_dist <= pix)\r\n class_map_geodesic = np.reshape(class_map_geodesic, [-1, n_classes])\r\n\r\n # For each node pick out the label from this class map.\r\n x = np.round(nodes_xyt[:,[0]]).astype(np.int32)\r\n y = np.round(nodes_xyt[:,[1]]).astype(np.int32)\r\n ind = np.ravel_multi_index((y,x), class_dist[:,:,0].shape)\r\n node_class_label = class_map_geodesic[ind[:,0],:]\r\n class_map_geodesic = class_dist <= pix\r\n return class_map_geodesic, node_class_label\r\n\r\ndef _get_next_nodes_undirected(n, sc, n_ori):\r\n nodes_to_add = []\r\n nodes_to_validate = []\r\n (p, q, r) = n\r\n nodes_to_add.append((n, (p, q, r), 0))\r\n if n_ori == 4:\r\n for _ in [1, 2, 3, 4]:\r\n if _ == 1:\r\n v = (p - sc, q, r)\r\n elif _ == 2:\r\n v = (p + sc, q, r)\r\n elif _ == 3:\r\n v = (p, q - sc, r)\r\n elif _ == 4:\r\n v = (p, q + sc, r)\r\n nodes_to_validate.append((n, v, _))\r\n return nodes_to_add, nodes_to_validate\r\n\r\ndef _get_next_nodes(n, sc, n_ori):\r\n nodes_to_add = []\r\n nodes_to_validate = []\r\n (p, q, r) = n\r\n for r_, a_ in zip([-1, 0, 1], [1, 0, 2]):\r\n nodes_to_add.append((n, (p, q, np.mod(r+r_, n_ori)), a_))\r\n\r\n if n_ori == 6:\r\n if r == 0:\r\n v = (p + sc, q, r)\r\n elif r == 1:\r\n v = (p + sc, q + sc, r)\r\n elif r == 2:\r\n v = (p, q + sc, r)\r\n elif r == 3:\r\n v = (p - sc, q, r)\r\n elif r == 4:\r\n v = (p - sc, q - sc, r)\r\n elif r == 5:\r\n v = (p, q - sc, r)\r\n elif n_ori == 4:\r\n if r == 0:\r\n v = (p + sc, q, r)\r\n elif r == 1:\r\n v = (p, q + sc, r)\r\n elif r == 2:\r\n v = (p - sc, q, r)\r\n elif r == 3:\r\n v = (p, q - sc, r)\r\n nodes_to_validate.append((n,v,3))\r\n\r\n return nodes_to_add, nodes_to_validate\r\n\r\ndef generate_graph(valid_fn_vec=None, sc=1., n_ori=6,\r\n starting_location=(0, 0, 0), vis=False, directed=True):\r\n timer = utils.Timer()\r\n timer.tic()\r\n if directed: G = nx.DiGraph(directed=True)\r\n else: G = nx.Graph()\r\n G.add_node(starting_location)\r\n new_nodes = G.nodes()\r\n while len(new_nodes) != 0:\r\n nodes_to_add = []\r\n nodes_to_validate = []\r\n for n in new_nodes:\r\n if directed:\r\n na, nv = _get_next_nodes(n, sc, n_ori)\r\n else:\r\n na, nv = _get_next_nodes_undirected(n, sc, n_ori)\r\n nodes_to_add = nodes_to_add + na\r\n if valid_fn_vec is not None:\r\n nodes_to_validate = nodes_to_validate + nv\r\n else:\r\n node_to_add = nodes_to_add + nv\r\n\r\n # Validate nodes.\r\n vs = [_[1] for _ in nodes_to_validate]\r\n valids = valid_fn_vec(vs)\r\n\r\n for nva, valid in zip(nodes_to_validate, valids):\r\n if valid:\r\n nodes_to_add.append(nva)\r\n\r\n new_nodes = []\r\n for n,v,a in nodes_to_add:\r\n if not G.has_node(v):\r\n new_nodes.append(v)\r\n G.add_edge(n, v, action=a)\r\n\r\n timer.toc(average=True, log_at=1, log_str='src.graph_utils.generate_graph')\r\n return (G)\r\n\r\ndef vis_G(G, ax, vertex_color='r', edge_color='b', r=None):\r\n if edge_color is not None:\r\n for e in G.edges():\r\n XYT = zip(*e)\r\n x = XYT[-3]\r\n y = XYT[-2]\r\n t = XYT[-1]\r\n if r is None or t[0] == r:\r\n ax.plot(x, y, edge_color)\r\n if vertex_color is not None:\r\n XYT = zip(*G.nodes())\r\n x = XYT[-3]\r\n y = XYT[-2]\r\n t = XYT[-1]\r\n ax.plot(x, y, vertex_color + '.')\r\n\r\ndef convert_to_graph_tool(G):\r\n timer = utils.Timer()\r\n timer.tic()\r\n gtG = gt.Graph(directed=G.is_directed())\r\n gtG.ep['action'] = gtG.new_edge_property('int')\r\n\r\n nodes_list = G.nodes()\r\n nodes_array = np.array(nodes_list)\r\n\r\n nodes_id = np.zeros((nodes_array.shape[0],), dtype=np.int64)\r\n\r\n for i in range(nodes_array.shape[0]):\r\n v = gtG.add_vertex()\r\n nodes_id[i] = int(v)\r\n\r\n # d = {key: value for (key, value) in zip(nodes_list, nodes_id)}\r\n d = dict(itertools.izip(nodes_list, nodes_id))\r\n\r\n for src, dst, data in G.edges_iter(data=True):\r\n e = gtG.add_edge(d[src], d[dst])\r\n gtG.ep['action'][e] = data['action']\r\n nodes_to_id = d\r\n timer.toc(average=True, log_at=1, log_str='src.graph_utils.convert_to_graph_tool')\r\n return gtG, nodes_array, nodes_to_id\r\n\r\n\r\ndef _rejection_sampling(rng, sampling_d, target_d, bins, hardness, M):\r\n bin_ind = np.digitize(hardness, bins)-1\r\n i = 0\r\n ratio = target_d[bin_ind] / (M*sampling_d[bin_ind])\r\n while i < ratio.size and rng.rand() > ratio[i]:\r\n i = i+1\r\n return i\r\n\r\ndef heuristic_fn_vec(n1, n2, n_ori, step_size):\r\n # n1 is a vector and n2 is a single point.\r\n dx = (n1[:,0] - n2[0,0])/step_size\r\n dy = (n1[:,1] - n2[0,1])/step_size\r\n dt = n1[:,2] - n2[0,2]\r\n dt = np.mod(dt, n_ori)\r\n dt = np.minimum(dt, n_ori-dt)\r\n\r\n if n_ori == 6:\r\n if dx*dy > 0:\r\n d = np.maximum(np.abs(dx), np.abs(dy))\r\n else:\r\n d = np.abs(dy-dx)\r\n elif n_ori == 4:\r\n d = np.abs(dx) + np.abs(dy)\r\n\r\n return (d + dt).reshape((-1,1))\r\n\r\ndef get_hardness_distribution(gtG, max_dist, min_dist, rng, trials, bins, nodes,\r\n n_ori, step_size):\r\n heuristic_fn = lambda node_ids, node_id: \\\r\n heuristic_fn_vec(nodes[node_ids, :], nodes[[node_id], :], n_ori, step_size)\r\n num_nodes = gtG.num_vertices()\r\n gt_dists = []; h_dists = [];\r\n for i in range(trials):\r\n end_node_id = rng.choice(num_nodes)\r\n gt_dist = gt.topology.shortest_distance(gt.GraphView(gtG, reversed=True),\r\n source=gtG.vertex(end_node_id),\r\n target=None, max_dist=max_dist)\r\n gt_dist = np.array(gt_dist.get_array())\r\n ind = np.where(np.logical_and(gt_dist <= max_dist, gt_dist >= min_dist))[0]\r\n gt_dist = gt_dist[ind]\r\n h_dist = heuristic_fn(ind, end_node_id)[:,0]\r\n gt_dists.append(gt_dist)\r\n h_dists.append(h_dist)\r\n gt_dists = np.concatenate(gt_dists)\r\n h_dists = np.concatenate(h_dists)\r\n hardness = 1. - h_dists*1./gt_dists\r\n hist, _ = np.histogram(hardness, bins)\r\n hist = hist.astype(np.float64)\r\n hist = hist / np.sum(hist)\r\n return hist\r\n\r\ndef rng_next_goal_rejection_sampling(start_node_ids, batch_size, gtG, rng,\r\n max_dist, min_dist, max_dist_to_compute,\r\n sampling_d, target_d,\r\n nodes, n_ori, step_size, bins, M):\r\n sample_start_nodes = start_node_ids is None\r\n dists = []; pred_maps = []; end_node_ids = []; start_node_ids_ = [];\r\n hardnesss = []; gt_dists = [];\r\n num_nodes = gtG.num_vertices()\r\n for i in range(batch_size):\r\n done = False\r\n while not done:\r\n if sample_start_nodes:\r\n start_node_id = rng.choice(num_nodes)\r\n else:\r\n start_node_id = start_node_ids[i]\r\n\r\n gt_dist = gt.topology.shortest_distance(\r\n gt.GraphView(gtG, reversed=False), source=start_node_id, target=None,\r\n max_dist=max_dist)\r\n gt_dist = np.array(gt_dist.get_array())\r\n ind = np.where(np.logical_and(gt_dist <= max_dist, gt_dist >= min_dist))[0]\r\n ind = rng.permutation(ind)\r\n gt_dist = gt_dist[ind]*1.\r\n h_dist = heuristic_fn_vec(nodes[ind, :], nodes[[start_node_id], :],\r\n n_ori, step_size)[:,0]\r\n hardness = 1. - h_dist / gt_dist\r\n sampled_ind = _rejection_sampling(rng, sampling_d, target_d, bins,\r\n hardness, M)\r\n if sampled_ind < ind.size:\r\n # print sampled_ind\r\n end_node_id = ind[sampled_ind]\r\n hardness = hardness[sampled_ind]\r\n gt_dist = gt_dist[sampled_ind]\r\n done = True\r\n\r\n # Compute distance from end node to all nodes, to return.\r\n dist, pred_map = gt.topology.shortest_distance(\r\n gt.GraphView(gtG, reversed=True), source=end_node_id, target=None,\r\n max_dist=max_dist_to_compute, pred_map=True)\r\n dist = np.array(dist.get_array())\r\n pred_map = np.array(pred_map.get_array())\r\n\r\n hardnesss.append(hardness); dists.append(dist); pred_maps.append(pred_map);\r\n start_node_ids_.append(start_node_id); end_node_ids.append(end_node_id);\r\n gt_dists.append(gt_dist);\r\n paths = None\r\n return start_node_ids_, end_node_ids, dists, pred_maps, paths, hardnesss, gt_dists\r\n\r\n\r\ndef rng_next_goal(start_node_ids, batch_size, gtG, rng, max_dist,\r\n max_dist_to_compute, node_room_ids, nodes=None,\r\n compute_path=False, dists_from_start_node=None):\r\n # Compute the distance field from the starting location, and then pick a\r\n # destination in another room if possible otherwise anywhere outside this\r\n # room.\r\n dists = []; pred_maps = []; paths = []; end_node_ids = [];\r\n for i in range(batch_size):\r\n room_id = node_room_ids[start_node_ids[i]]\r\n # Compute distances.\r\n if dists_from_start_node == None:\r\n dist, pred_map = gt.topology.shortest_distance(\r\n gt.GraphView(gtG, reversed=False), source=gtG.vertex(start_node_ids[i]),\r\n target=None, max_dist=max_dist_to_compute, pred_map=True)\r\n dist = np.array(dist.get_array())\r\n else:\r\n dist = dists_from_start_node[i]\r\n\r\n # Randomly sample nodes which are within max_dist.\r\n near_ids = dist <= max_dist\r\n near_ids = near_ids[:, np.newaxis]\r\n # Check to see if there is a non-negative node which is close enough.\r\n non_same_room_ids = node_room_ids != room_id\r\n non_hallway_ids = node_room_ids != -1\r\n good1_ids = np.logical_and(near_ids, np.logical_and(non_same_room_ids, non_hallway_ids))\r\n good2_ids = np.logical_and(near_ids, non_hallway_ids)\r\n good3_ids = near_ids\r\n if np.any(good1_ids):\r\n end_node_id = rng.choice(np.where(good1_ids)[0])\r\n elif np.any(good2_ids):\r\n end_node_id = rng.choice(np.where(good2_ids)[0])\r\n elif np.any(good3_ids):\r\n end_node_id = rng.choice(np.where(good3_ids)[0])\r\n else:\r\n logging.error('Did not find any good nodes.')\r\n\r\n # Compute distance to this new goal for doing distance queries.\r\n dist, pred_map = gt.topology.shortest_distance(\r\n gt.GraphView(gtG, reversed=True), source=gtG.vertex(end_node_id),\r\n target=None, max_dist=max_dist_to_compute, pred_map=True)\r\n dist = np.array(dist.get_array())\r\n pred_map = np.array(pred_map.get_array())\r\n\r\n dists.append(dist)\r\n pred_maps.append(pred_map)\r\n end_node_ids.append(end_node_id)\r\n\r\n path = None\r\n if compute_path:\r\n path = get_path_ids(start_node_ids[i], end_node_ids[i], pred_map)\r\n paths.append(path)\r\n\r\n return start_node_ids, end_node_ids, dists, pred_maps, paths\r\n\r\n\r\ndef rng_room_to_room(batch_size, gtG, rng, max_dist, max_dist_to_compute,\r\n node_room_ids, nodes=None, compute_path=False):\r\n # Sample one of the rooms, compute the distance field. Pick a destination in\r\n # another room if possible otherwise anywhere outside this room.\r\n dists = []; pred_maps = []; paths = []; start_node_ids = []; end_node_ids = [];\r\n room_ids = np.unique(node_room_ids[node_room_ids[:,0] >= 0, 0])\r\n for i in range(batch_size):\r\n room_id = rng.choice(room_ids)\r\n end_node_id = rng.choice(np.where(node_room_ids[:,0] == room_id)[0])\r\n end_node_ids.append(end_node_id)\r\n\r\n # Compute distances.\r\n dist, pred_map = gt.topology.shortest_distance(\r\n gt.GraphView(gtG, reversed=True), source=gtG.vertex(end_node_id),\r\n target=None, max_dist=max_dist_to_compute, pred_map=True)\r\n dist = np.array(dist.get_array())\r\n pred_map = np.array(pred_map.get_array())\r\n dists.append(dist)\r\n pred_maps.append(pred_map)\r\n\r\n # Randomly sample nodes which are within max_dist.\r\n near_ids = dist <= max_dist\r\n near_ids = near_ids[:, np.newaxis]\r\n\r\n # Check to see if there is a non-negative node which is close enough.\r\n non_same_room_ids = node_room_ids != room_id\r\n non_hallway_ids = node_room_ids != -1\r\n good1_ids = np.logical_and(near_ids, np.logical_and(non_same_room_ids, non_hallway_ids))\r\n good2_ids = np.logical_and(near_ids, non_hallway_ids)\r\n good3_ids = near_ids\r\n if np.any(good1_ids):\r\n start_node_id = rng.choice(np.where(good1_ids)[0])\r\n elif np.any(good2_ids):\r\n start_node_id = rng.choice(np.where(good2_ids)[0])\r\n elif np.any(good3_ids):\r\n start_node_id = rng.choice(np.where(good3_ids)[0])\r\n else:\r\n logging.error('Did not find any good nodes.')\r\n\r\n start_node_ids.append(start_node_id)\r\n\r\n path = None\r\n if compute_path:\r\n path = get_path_ids(start_node_ids[i], end_node_ids[i], pred_map)\r\n paths.append(path)\r\n\r\n return start_node_ids, end_node_ids, dists, pred_maps, paths\r\n\r\n\r\ndef rng_target_dist_field(batch_size, gtG, rng, max_dist, max_dist_to_compute,\r\n nodes=None, compute_path=False):\r\n # Sample a single node, compute distance to all nodes less than max_dist,\r\n # sample nodes which are a particular distance away.\r\n dists = []; pred_maps = []; paths = []; start_node_ids = []\r\n end_node_ids = rng.choice(gtG.num_vertices(), size=(batch_size,),\r\n replace=False).tolist()\r\n\r\n for i in range(batch_size):\r\n dist, pred_map = gt.topology.shortest_distance(\r\n gt.GraphView(gtG, reversed=True), source=gtG.vertex(end_node_ids[i]),\r\n target=None, max_dist=max_dist_to_compute, pred_map=True)\r\n dist = np.array(dist.get_array())\r\n pred_map = np.array(pred_map.get_array())\r\n dists.append(dist)\r\n pred_maps.append(pred_map)\r\n\r\n # Randomly sample nodes which are withing max_dist\r\n near_ids = np.where(dist <= max_dist)[0]\r\n start_node_id = rng.choice(near_ids, size=(1,), replace=False)[0]\r\n start_node_ids.append(start_node_id)\r\n\r\n path = None\r\n if compute_path:\r\n path = get_path_ids(start_node_ids[i], end_node_ids[i], pred_map)\r\n paths.append(path)\r\n\r\n return start_node_ids, end_node_ids, dists, pred_maps, paths\r\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for object_detection.tflearn.inputs.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport functools\r\nimport os\r\nfrom absl.testing import parameterized\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\nfrom object_detection import inputs\r\nfrom object_detection.core import preprocessor\r\nfrom object_detection.core import standard_fields as fields\r\nfrom object_detection.utils import config_util\r\nfrom object_detection.utils import test_case\r\n\r\nFLAGS = tf.flags.FLAGS\r\n\r\n\r\ndef _get_configs_for_model(model_name):\r\n \"\"\"Returns configurations for model.\"\"\"\r\n fname = os.path.join(tf.resource_loader.get_data_files_path(),\r\n 'samples/configs/' + model_name + '.config')\r\n label_map_path = os.path.join(tf.resource_loader.get_data_files_path(),\r\n 'data/pet_label_map.pbtxt')\r\n data_path = os.path.join(tf.resource_loader.get_data_files_path(),\r\n 'test_data/pets_examples.record')\r\n configs = config_util.get_configs_from_pipeline_file(fname)\r\n override_dict = {\r\n 'train_input_path': data_path,\r\n 'eval_input_path': data_path,\r\n 'label_map_path': label_map_path\r\n }\r\n return config_util.merge_external_params_with_configs(\r\n configs, kwargs_dict=override_dict)\r\n\r\n\r\ndef _make_initializable_iterator(dataset):\r\n \"\"\"Creates an iterator, and initializes tables.\r\n\r\n Args:\r\n dataset: A `tf.data.Dataset` object.\r\n\r\n Returns:\r\n A `tf.data.Iterator`.\r\n \"\"\"\r\n iterator = dataset.make_initializable_iterator()\r\n tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)\r\n return iterator\r\n\r\n\r\nclass InputsTest(test_case.TestCase, parameterized.TestCase):\r\n\r\n def test_faster_rcnn_resnet50_train_input(self):\r\n \"\"\"Tests the training input function for FasterRcnnResnet50.\"\"\"\r\n configs = _get_configs_for_model('faster_rcnn_resnet50_pets')\r\n model_config = configs['model']\r\n model_config.faster_rcnn.num_classes = 37\r\n train_input_fn = inputs.create_train_input_fn(\r\n configs['train_config'], configs['train_input_config'], model_config)\r\n features, labels = _make_initializable_iterator(train_input_fn()).get_next()\r\n\r\n self.assertAllEqual([1, None, None, 3],\r\n features[fields.InputDataFields.image].shape.as_list())\r\n self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)\r\n self.assertAllEqual([1],\r\n features[inputs.HASH_KEY].shape.as_list())\r\n self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)\r\n self.assertAllEqual(\r\n [1, 100, 4],\r\n labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())\r\n self.assertEqual(tf.float32,\r\n labels[fields.InputDataFields.groundtruth_boxes].dtype)\r\n self.assertAllEqual(\r\n [1, 100, model_config.faster_rcnn.num_classes],\r\n labels[fields.InputDataFields.groundtruth_classes].shape.as_list())\r\n self.assertEqual(tf.float32,\r\n labels[fields.InputDataFields.groundtruth_classes].dtype)\r\n self.assertAllEqual(\r\n [1, 100],\r\n labels[fields.InputDataFields.groundtruth_weights].shape.as_list())\r\n self.assertEqual(tf.float32,\r\n labels[fields.InputDataFields.groundtruth_weights].dtype)\r\n self.assertAllEqual(\r\n [1, 100, model_config.faster_rcnn.num_classes],\r\n labels[fields.InputDataFields.groundtruth_confidences].shape.as_list())\r\n self.assertEqual(\r\n tf.float32,\r\n labels[fields.InputDataFields.groundtruth_confidences].dtype)\r\n\r\n def test_faster_rcnn_resnet50_train_input_with_additional_channels(self):\r\n \"\"\"Tests the training input function for FasterRcnnResnet50.\"\"\"\r\n configs = _get_configs_for_model('faster_rcnn_resnet50_pets')\r\n model_config = configs['model']\r\n configs['train_input_config'].num_additional_channels = 2\r\n configs['train_config'].retain_original_images = True\r\n model_config.faster_rcnn.num_classes = 37\r\n train_input_fn = inputs.create_train_input_fn(\r\n configs['train_config'], configs['train_input_config'], model_config)\r\n features, labels = _make_initializable_iterator(train_input_fn()).get_next()\r\n\r\n self.assertAllEqual([1, None, None, 5],\r\n features[fields.InputDataFields.image].shape.as_list())\r\n self.assertAllEqual(\r\n [1, None, None, 3],\r\n features[fields.InputDataFields.original_image].shape.as_list())\r\n self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)\r\n self.assertAllEqual([1],\r\n features[inputs.HASH_KEY].shape.as_list())\r\n self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)\r\n self.assertAllEqual(\r\n [1, 100, 4],\r\n labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())\r\n self.assertEqual(tf.float32,\r\n labels[fields.InputDataFields.groundtruth_boxes].dtype)\r\n self.assertAllEqual(\r\n [1, 100, model_config.faster_rcnn.num_classes],\r\n labels[fields.InputDataFields.groundtruth_classes].shape.as_list())\r\n self.assertEqual(tf.float32,\r\n labels[fields.InputDataFields.groundtruth_classes].dtype)\r\n self.assertAllEqual(\r\n [1, 100],\r\n labels[fields.InputDataFields.groundtruth_weights].shape.as_list())\r\n self.assertEqual(tf.float32,\r\n labels[fields.InputDataFields.groundtruth_weights].dtype)\r\n self.assertAllEqual(\r\n [1, 100, model_config.faster_rcnn.num_classes],\r\n labels[fields.InputDataFields.groundtruth_confidences].shape.as_list())\r\n self.assertEqual(\r\n tf.float32,\r\n labels[fields.InputDataFields.groundtruth_confidences].dtype)\r\n\r\n @parameterized.parameters(\r\n {'eval_batch_size': 1},\r\n {'eval_batch_size': 8}\r\n )\r\n def test_faster_rcnn_resnet50_eval_input(self, eval_batch_size=1):\r\n \"\"\"Tests the eval input function for FasterRcnnResnet50.\"\"\"\r\n configs = _get_configs_for_model('faster_rcnn_resnet50_pets')\r\n model_config = configs['model']\r\n model_config.faster_rcnn.num_classes = 37\r\n eval_config = configs['eval_config']\r\n eval_config.batch_size = eval_batch_size\r\n eval_input_fn = inputs.create_eval_input_fn(\r\n eval_config, configs['eval_input_configs'][0], model_config)\r\n features, labels = _make_initializable_iterator(eval_input_fn()).get_next()\r\n self.assertAllEqual([eval_batch_size, None, None, 3],\r\n features[fields.InputDataFields.image].shape.as_list())\r\n self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)\r\n self.assertAllEqual(\r\n [eval_batch_size, None, None, 3],\r\n features[fields.InputDataFields.original_image].shape.as_list())\r\n self.assertEqual(tf.uint8,\r\n features[fields.InputDataFields.original_image].dtype)\r\n self.assertAllEqual([eval_batch_size],\r\n features[inputs.HASH_KEY].shape.as_list())\r\n self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)\r\n self.assertAllEqual(\r\n [eval_batch_size, 100, 4],\r\n labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())\r\n self.assertEqual(tf.float32,\r\n labels[fields.InputDataFields.groundtruth_boxes].dtype)\r\n self.assertAllEqual(\r\n [eval_batch_size, 100, model_config.faster_rcnn.num_classes],\r\n labels[fields.InputDataFields.groundtruth_classes].shape.as_list())\r\n self.assertEqual(tf.float32,\r\n labels[fields.InputDataFields.groundtruth_classes].dtype)\r\n self.assertAllEqual(\r\n [eval_batch_size, 100],\r\n labels[fields.InputDataFields.groundtruth_weights].shape.as_list())\r\n self.assertEqual(\r\n tf.float32,\r\n labels[fields.InputDataFields.groundtruth_weights].dtype)\r\n self.assertAllEqual(\r\n [eval_batch_size, 100],\r\n labels[fields.InputDataFields.groundtruth_area].shape.as_list())\r\n self.assertEqual(tf.float32,\r\n labels[fields.InputDataFields.groundtruth_area].dtype)\r\n self.assertAllEqual(\r\n [eval_batch_size, 100],\r\n labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list())\r\n self.assertEqual(\r\n tf.bool, labels[fields.InputDataFields.groundtruth_is_crowd].dtype)\r\n self.assertAllEqual(\r\n [eval_batch_size, 100],\r\n labels[fields.InputDataFields.groundtruth_difficult].shape.as_list())\r\n self.assertEqual(\r\n tf.int32, labels[fields.InputDataFields.groundtruth_difficult].dtype)\r\n\r\n def test_ssd_inceptionV2_train_input(self):\r\n \"\"\"Tests the training input function for SSDInceptionV2.\"\"\"\r\n configs = _get_configs_for_model('ssd_inception_v2_pets')\r\n model_config = configs['model']\r\n model_config.ssd.num_classes = 37\r\n batch_size = configs['train_config'].batch_size\r\n train_input_fn = inputs.create_train_input_fn(\r\n configs['train_config'], configs['train_input_config'], model_config)\r\n features, labels = _make_initializable_iterator(train_input_fn()).get_next()\r\n\r\n self.assertAllEqual([batch_size, 300, 300, 3],\r\n features[fields.InputDataFields.image].shape.as_list())\r\n self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)\r\n self.assertAllEqual([batch_size],\r\n features[inputs.HASH_KEY].shape.as_list())\r\n self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)\r\n self.assertAllEqual(\r\n [batch_size],\r\n labels[fields.InputDataFields.num_groundtruth_boxes].shape.as_list())\r\n self.assertEqual(tf.int32,\r\n labels[fields.InputDataFields.num_groundtruth_boxes].dtype)\r\n self.assertAllEqual(\r\n [batch_size, 100, 4],\r\n labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())\r\n self.assertEqual(tf.float32,\r\n labels[fields.InputDataFields.groundtruth_boxes].dtype)\r\n self.assertAllEqual(\r\n [batch_size, 100, model_config.ssd.num_classes],\r\n labels[fields.InputDataFields.groundtruth_classes].shape.as_list())\r\n self.assertEqual(tf.float32,\r\n labels[fields.InputDataFields.groundtruth_classes].dtype)\r\n self.assertAllEqual(\r\n [batch_size, 100],\r\n labels[\r\n fields.InputDataFields.groundtruth_weights].shape.as_list())\r\n self.assertEqual(\r\n tf.float32,\r\n labels[fields.InputDataFields.groundtruth_weights].dtype)\r\n\r\n @parameterized.parameters(\r\n {'eval_batch_size': 1},\r\n {'eval_batch_size': 8}\r\n )\r\n def test_ssd_inceptionV2_eval_input(self, eval_batch_size=1):\r\n \"\"\"Tests the eval input function for SSDInceptionV2.\"\"\"\r\n configs = _get_configs_for_model('ssd_inception_v2_pets')\r\n model_config = configs['model']\r\n model_config.ssd.num_classes = 37\r\n eval_config = configs['eval_config']\r\n eval_config.batch_size = eval_batch_size\r\n eval_input_fn = inputs.create_eval_input_fn(\r\n eval_config, configs['eval_input_configs'][0], model_config)\r\n features, labels = _make_initializable_iterator(eval_input_fn()).get_next()\r\n self.assertAllEqual([eval_batch_size, 300, 300, 3],\r\n features[fields.InputDataFields.image].shape.as_list())\r\n self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)\r\n self.assertAllEqual(\r\n [eval_batch_size, 300, 300, 3],\r\n features[fields.InputDataFields.original_image].shape.as_list())\r\n self.assertEqual(tf.uint8,\r\n features[fields.InputDataFields.original_image].dtype)\r\n self.assertAllEqual([eval_batch_size],\r\n features[inputs.HASH_KEY].shape.as_list())\r\n self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)\r\n self.assertAllEqual(\r\n [eval_batch_size, 100, 4],\r\n labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())\r\n self.assertEqual(tf.float32,\r\n labels[fields.InputDataFields.groundtruth_boxes].dtype)\r\n self.assertAllEqual(\r\n [eval_batch_size, 100, model_config.ssd.num_classes],\r\n labels[fields.InputDataFields.groundtruth_classes].shape.as_list())\r\n self.assertEqual(tf.float32,\r\n labels[fields.InputDataFields.groundtruth_classes].dtype)\r\n self.assertAllEqual(\r\n [eval_batch_size, 100],\r\n labels[\r\n fields.InputDataFields.groundtruth_weights].shape.as_list())\r\n self.assertEqual(\r\n tf.float32,\r\n labels[fields.InputDataFields.groundtruth_weights].dtype)\r\n self.assertAllEqual(\r\n [eval_batch_size, 100],\r\n labels[fields.InputDataFields.groundtruth_area].shape.as_list())\r\n self.assertEqual(tf.float32,\r\n labels[fields.InputDataFields.groundtruth_area].dtype)\r\n self.assertAllEqual(\r\n [eval_batch_size, 100],\r\n labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list())\r\n self.assertEqual(\r\n tf.bool, labels[fields.InputDataFields.groundtruth_is_crowd].dtype)\r\n self.assertAllEqual(\r\n [eval_batch_size, 100],\r\n labels[fields.InputDataFields.groundtruth_difficult].shape.as_list())\r\n self.assertEqual(\r\n tf.int32, labels[fields.InputDataFields.groundtruth_difficult].dtype)\r\n\r\n def test_predict_input(self):\r\n \"\"\"Tests the predict input function.\"\"\"\r\n configs = _get_configs_for_model('ssd_inception_v2_pets')\r\n predict_input_fn = inputs.create_predict_input_fn(\r\n model_config=configs['model'],\r\n predict_input_config=configs['eval_input_configs'][0])\r\n serving_input_receiver = predict_input_fn()\r\n\r\n image = serving_input_receiver.features[fields.InputDataFields.image]\r\n receiver_tensors = serving_input_receiver.receiver_tensors[\r\n inputs.SERVING_FED_EXAMPLE_KEY]\r\n self.assertEqual([1, 300, 300, 3], image.shape.as_list())\r\n self.assertEqual(tf.float32, image.dtype)\r\n self.assertEqual(tf.string, receiver_tensors.dtype)\r\n\r\n def test_predict_input_with_additional_channels(self):\r\n \"\"\"Tests the predict input function with additional channels.\"\"\"\r\n configs = _get_configs_for_model('ssd_inception_v2_pets')\r\n configs['eval_input_configs'][0].num_additional_channels = 2\r\n predict_input_fn = inputs.create_predict_input_fn(\r\n model_config=configs['model'],\r\n predict_input_config=configs['eval_input_configs'][0])\r\n serving_input_receiver = predict_input_fn()\r\n\r\n image = serving_input_receiver.features[fields.InputDataFields.image]\r\n receiver_tensors = serving_input_receiver.receiver_tensors[\r\n inputs.SERVING_FED_EXAMPLE_KEY]\r\n # RGB + 2 additional channels = 5 channels.\r\n self.assertEqual([1, 300, 300, 5], image.shape.as_list())\r\n self.assertEqual(tf.float32, image.dtype)\r\n self.assertEqual(tf.string, receiver_tensors.dtype)\r\n\r\n def test_error_with_bad_train_config(self):\r\n \"\"\"Tests that a TypeError is raised with improper train config.\"\"\"\r\n configs = _get_configs_for_model('ssd_inception_v2_pets')\r\n configs['model'].ssd.num_classes = 37\r\n train_input_fn = inputs.create_train_input_fn(\r\n train_config=configs['eval_config'], # Expecting `TrainConfig`.\r\n train_input_config=configs['train_input_config'],\r\n model_config=configs['model'])\r\n with self.assertRaises(TypeError):\r\n train_input_fn()\r\n\r\n def test_error_with_bad_train_input_config(self):\r\n \"\"\"Tests that a TypeError is raised with improper train input config.\"\"\"\r\n configs = _get_configs_for_model('ssd_inception_v2_pets')\r\n configs['model'].ssd.num_classes = 37\r\n train_input_fn = inputs.create_train_input_fn(\r\n train_config=configs['train_config'],\r\n train_input_config=configs['model'], # Expecting `InputReader`.\r\n model_config=configs['model'])\r\n with self.assertRaises(TypeError):\r\n train_input_fn()\r\n\r\n def test_error_with_bad_train_model_config(self):\r\n \"\"\"Tests that a TypeError is raised with improper train model config.\"\"\"\r\n configs = _get_configs_for_model('ssd_inception_v2_pets')\r\n configs['model'].ssd.num_classes = 37\r\n train_input_fn = inputs.create_train_input_fn(\r\n train_config=configs['train_config'],\r\n train_input_config=configs['train_input_config'],\r\n model_config=configs['train_config']) # Expecting `DetectionModel`.\r\n with self.assertRaises(TypeError):\r\n train_input_fn()\r\n\r\n def test_error_with_bad_eval_config(self):\r\n \"\"\"Tests that a TypeError is raised with improper eval config.\"\"\"\r\n configs = _get_configs_for_model('ssd_inception_v2_pets')\r\n configs['model'].ssd.num_classes = 37\r\n eval_input_fn = inputs.create_eval_input_fn(\r\n eval_config=configs['train_config'], # Expecting `EvalConfig`.\r\n eval_input_config=configs['eval_input_configs'][0],\r\n model_config=configs['model'])\r\n with self.assertRaises(TypeError):\r\n eval_input_fn()\r\n\r\n def test_error_with_bad_eval_input_config(self):\r\n \"\"\"Tests that a TypeError is raised with improper eval input config.\"\"\"\r\n configs = _get_configs_for_model('ssd_inception_v2_pets')\r\n configs['model'].ssd.num_classes = 37\r\n eval_input_fn = inputs.create_eval_input_fn(\r\n eval_config=configs['eval_config'],\r\n eval_input_config=configs['model'], # Expecting `InputReader`.\r\n model_config=configs['model'])\r\n with self.assertRaises(TypeError):\r\n eval_input_fn()\r\n\r\n def test_error_with_bad_eval_model_config(self):\r\n \"\"\"Tests that a TypeError is raised with improper eval model config.\"\"\"\r\n configs = _get_configs_for_model('ssd_inception_v2_pets')\r\n configs['model'].ssd.num_classes = 37\r\n eval_input_fn = inputs.create_eval_input_fn(\r\n eval_config=configs['eval_config'],\r\n eval_input_config=configs['eval_input_configs'][0],\r\n model_config=configs['eval_config']) # Expecting `DetectionModel`.\r\n with self.assertRaises(TypeError):\r\n eval_input_fn()\r\n\r\n def test_output_equal_in_replace_empty_string_with_random_number(self):\r\n string_placeholder = tf.placeholder(tf.string, shape=[])\r\n replaced_string = inputs._replace_empty_string_with_random_number(\r\n string_placeholder)\r\n\r\n test_string = 'hello world'\r\n feed_dict = {string_placeholder: test_string}\r\n\r\n with self.test_session() as sess:\r\n out_string = sess.run(replaced_string, feed_dict=feed_dict)\r\n\r\n self.assertEqual(test_string, out_string)\r\n\r\n def test_output_is_integer_in_replace_empty_string_with_random_number(self):\r\n\r\n string_placeholder = tf.placeholder(tf.string, shape=[])\r\n replaced_string = inputs._replace_empty_string_with_random_number(\r\n string_placeholder)\r\n\r\n empty_string = ''\r\n feed_dict = {string_placeholder: empty_string}\r\n\r\n tf.set_random_seed(0)\r\n\r\n with self.test_session() as sess:\r\n out_string = sess.run(replaced_string, feed_dict=feed_dict)\r\n\r\n # Test whether out_string is a string which represents an integer.\r\n int(out_string) # throws an error if out_string is not castable to int.\r\n\r\n self.assertEqual(out_string, '2798129067578209328')\r\n\r\n\r\nclass DataAugmentationFnTest(test_case.TestCase):\r\n\r\n def test_apply_image_and_box_augmentation(self):\r\n data_augmentation_options = [\r\n (preprocessor.resize_image, {\r\n 'new_height': 20,\r\n 'new_width': 20,\r\n 'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR\r\n }),\r\n (preprocessor.scale_boxes_to_pixel_coordinates, {}),\r\n ]\r\n data_augmentation_fn = functools.partial(\r\n inputs.augment_input_data,\r\n data_augmentation_options=data_augmentation_options)\r\n tensor_dict = {\r\n fields.InputDataFields.image:\r\n tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),\r\n fields.InputDataFields.groundtruth_boxes:\r\n tf.constant(np.array([[.5, .5, 1., 1.]], np.float32))\r\n }\r\n augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)\r\n with self.test_session() as sess:\r\n augmented_tensor_dict_out = sess.run(augmented_tensor_dict)\r\n\r\n self.assertAllEqual(\r\n augmented_tensor_dict_out[fields.InputDataFields.image].shape,\r\n [20, 20, 3]\r\n )\r\n self.assertAllClose(\r\n augmented_tensor_dict_out[fields.InputDataFields.groundtruth_boxes],\r\n [[10, 10, 20, 20]]\r\n )\r\n\r\n def test_apply_image_and_box_augmentation_with_scores(self):\r\n data_augmentation_options = [\r\n (preprocessor.resize_image, {\r\n 'new_height': 20,\r\n 'new_width': 20,\r\n 'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR\r\n }),\r\n (preprocessor.scale_boxes_to_pixel_coordinates, {}),\r\n ]\r\n data_augmentation_fn = functools.partial(\r\n inputs.augment_input_data,\r\n data_augmentation_options=data_augmentation_options)\r\n tensor_dict = {\r\n fields.InputDataFields.image:\r\n tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),\r\n fields.InputDataFields.groundtruth_boxes:\r\n tf.constant(np.array([[.5, .5, 1., 1.]], np.float32)),\r\n fields.InputDataFields.groundtruth_classes:\r\n tf.constant(np.array([1.0], np.float32)),\r\n fields.InputDataFields.groundtruth_weights:\r\n tf.constant(np.array([0.8], np.float32)),\r\n }\r\n augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)\r\n with self.test_session() as sess:\r\n augmented_tensor_dict_out = sess.run(augmented_tensor_dict)\r\n\r\n self.assertAllEqual(\r\n augmented_tensor_dict_out[fields.InputDataFields.image].shape,\r\n [20, 20, 3]\r\n )\r\n self.assertAllClose(\r\n augmented_tensor_dict_out[fields.InputDataFields.groundtruth_boxes],\r\n [[10, 10, 20, 20]]\r\n )\r\n self.assertAllClose(\r\n augmented_tensor_dict_out[fields.InputDataFields.groundtruth_classes],\r\n [1.0]\r\n )\r\n self.assertAllClose(\r\n augmented_tensor_dict_out[\r\n fields.InputDataFields.groundtruth_weights],\r\n [0.8]\r\n )\r\n\r\n def test_include_masks_in_data_augmentation(self):\r\n data_augmentation_options = [\r\n (preprocessor.resize_image, {\r\n 'new_height': 20,\r\n 'new_width': 20,\r\n 'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR\r\n })\r\n ]\r\n data_augmentation_fn = functools.partial(\r\n inputs.augment_input_data,\r\n data_augmentation_options=data_augmentation_options)\r\n tensor_dict = {\r\n fields.InputDataFields.image:\r\n tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),\r\n fields.InputDataFields.groundtruth_instance_masks:\r\n tf.constant(np.zeros([2, 10, 10], np.uint8))\r\n }\r\n augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)\r\n with self.test_session() as sess:\r\n augmented_tensor_dict_out = sess.run(augmented_tensor_dict)\r\n\r\n self.assertAllEqual(\r\n augmented_tensor_dict_out[fields.InputDataFields.image].shape,\r\n [20, 20, 3])\r\n self.assertAllEqual(augmented_tensor_dict_out[\r\n fields.InputDataFields.groundtruth_instance_masks].shape, [2, 20, 20])\r\n\r\n def test_include_keypoints_in_data_augmentation(self):\r\n data_augmentation_options = [\r\n (preprocessor.resize_image, {\r\n 'new_height': 20,\r\n 'new_width': 20,\r\n 'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR\r\n }),\r\n (preprocessor.scale_boxes_to_pixel_coordinates, {}),\r\n ]\r\n data_augmentation_fn = functools.partial(\r\n inputs.augment_input_data,\r\n data_augmentation_options=data_augmentation_options)\r\n tensor_dict = {\r\n fields.InputDataFields.image:\r\n tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),\r\n fields.InputDataFields.groundtruth_boxes:\r\n tf.constant(np.array([[.5, .5, 1., 1.]], np.float32)),\r\n fields.InputDataFields.groundtruth_keypoints:\r\n tf.constant(np.array([[[0.5, 1.0], [0.5, 0.5]]], np.float32))\r\n }\r\n augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)\r\n with self.test_session() as sess:\r\n augmented_tensor_dict_out = sess.run(augmented_tensor_dict)\r\n\r\n self.assertAllEqual(\r\n augmented_tensor_dict_out[fields.InputDataFields.image].shape,\r\n [20, 20, 3]\r\n )\r\n self.assertAllClose(\r\n augmented_tensor_dict_out[fields.InputDataFields.groundtruth_boxes],\r\n [[10, 10, 20, 20]]\r\n )\r\n self.assertAllClose(\r\n augmented_tensor_dict_out[fields.InputDataFields.groundtruth_keypoints],\r\n [[[10, 20], [10, 10]]]\r\n )\r\n\r\n\r\ndef _fake_model_preprocessor_fn(image):\r\n return (image, tf.expand_dims(tf.shape(image)[1:], axis=0))\r\n\r\n\r\ndef _fake_image_resizer_fn(image, mask):\r\n return (image, mask, tf.shape(image))\r\n\r\n\r\nclass DataTransformationFnTest(test_case.TestCase):\r\n\r\n def test_combine_additional_channels_if_present(self):\r\n image = np.random.rand(4, 4, 3).astype(np.float32)\r\n additional_channels = np.random.rand(4, 4, 2).astype(np.float32)\r\n tensor_dict = {\r\n fields.InputDataFields.image:\r\n tf.constant(image),\r\n fields.InputDataFields.image_additional_channels:\r\n tf.constant(additional_channels),\r\n fields.InputDataFields.groundtruth_classes:\r\n tf.constant(np.array([1, 1], np.int32))\r\n }\r\n\r\n input_transformation_fn = functools.partial(\r\n inputs.transform_input_data,\r\n model_preprocess_fn=_fake_model_preprocessor_fn,\r\n image_resizer_fn=_fake_image_resizer_fn,\r\n num_classes=1)\r\n with self.test_session() as sess:\r\n transformed_inputs = sess.run(\r\n input_transformation_fn(tensor_dict=tensor_dict))\r\n self.assertAllEqual(transformed_inputs[fields.InputDataFields.image].dtype,\r\n tf.float32)\r\n self.assertAllEqual(transformed_inputs[fields.InputDataFields.image].shape,\r\n [4, 4, 5])\r\n self.assertAllClose(transformed_inputs[fields.InputDataFields.image],\r\n np.concatenate((image, additional_channels), axis=2))\r\n\r\n def test_use_multiclass_scores_when_present(self):\r\n image = np.random.rand(4, 4, 3).astype(np.float32)\r\n tensor_dict = {\r\n fields.InputDataFields.image:\r\n tf.constant(image),\r\n fields.InputDataFields.groundtruth_boxes:\r\n tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], np.float32)),\r\n fields.InputDataFields.multiclass_scores:\r\n tf.constant(np.array([0.2, 0.3, 0.5, 0.1, 0.6, 0.3], np.float32)),\r\n fields.InputDataFields.groundtruth_classes:\r\n tf.constant(np.array([1, 2], np.int32))\r\n }\r\n\r\n input_transformation_fn = functools.partial(\r\n inputs.transform_input_data,\r\n model_preprocess_fn=_fake_model_preprocessor_fn,\r\n image_resizer_fn=_fake_image_resizer_fn,\r\n num_classes=3, use_multiclass_scores=True)\r\n with self.test_session() as sess:\r\n transformed_inputs = sess.run(\r\n input_transformation_fn(tensor_dict=tensor_dict))\r\n\r\n self.assertAllClose(\r\n np.array([[0.2, 0.3, 0.5], [0.1, 0.6, 0.3]], np.float32),\r\n transformed_inputs[fields.InputDataFields.groundtruth_classes])\r\n\r\n def test_use_multiclass_scores_when_not_present(self):\r\n image = np.random.rand(4, 4, 3).astype(np.float32)\r\n tensor_dict = {\r\n fields.InputDataFields.image:\r\n tf.constant(image),\r\n fields.InputDataFields.groundtruth_boxes:\r\n tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], np.float32)),\r\n fields.InputDataFields.multiclass_scores:\r\n tf.placeholder(tf.float32),\r\n fields.InputDataFields.groundtruth_classes:\r\n tf.constant(np.array([1, 2], np.int32))\r\n }\r\n\r\n input_transformation_fn = functools.partial(\r\n inputs.transform_input_data,\r\n model_preprocess_fn=_fake_model_preprocessor_fn,\r\n image_resizer_fn=_fake_image_resizer_fn,\r\n num_classes=3, use_multiclass_scores=True)\r\n with self.test_session() as sess:\r\n transformed_inputs = sess.run(\r\n input_transformation_fn(tensor_dict=tensor_dict),\r\n feed_dict={\r\n tensor_dict[fields.InputDataFields.multiclass_scores]:\r\n np.array([], dtype=np.float32)\r\n })\r\n\r\n self.assertAllClose(\r\n np.array([[0, 1, 0], [0, 0, 1]], np.float32),\r\n transformed_inputs[fields.InputDataFields.groundtruth_classes])\r\n\r\n def test_returns_correct_class_label_encodings(self):\r\n tensor_dict = {\r\n fields.InputDataFields.image:\r\n tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),\r\n fields.InputDataFields.groundtruth_boxes:\r\n tf.constant(np.array([[0, 0, 1, 1], [.5, .5, 1, 1]], np.float32)),\r\n fields.InputDataFields.groundtruth_classes:\r\n tf.constant(np.array([3, 1], np.int32))\r\n }\r\n num_classes = 3\r\n input_transformation_fn = functools.partial(\r\n inputs.transform_input_data,\r\n model_preprocess_fn=_fake_model_preprocessor_fn,\r\n image_resizer_fn=_fake_image_resizer_fn,\r\n num_classes=num_classes)\r\n with self.test_session() as sess:\r\n transformed_inputs = sess.run(\r\n input_transformation_fn(tensor_dict=tensor_dict))\r\n\r\n self.assertAllClose(\r\n transformed_inputs[fields.InputDataFields.groundtruth_classes],\r\n [[0, 0, 1], [1, 0, 0]])\r\n self.assertAllClose(\r\n transformed_inputs[fields.InputDataFields.groundtruth_confidences],\r\n [[0, 0, 1], [1, 0, 0]])\r\n\r\n def test_returns_correct_labels_with_unrecognized_class(self):\r\n tensor_dict = {\r\n fields.InputDataFields.image:\r\n tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),\r\n fields.InputDataFields.groundtruth_boxes:\r\n tf.constant(\r\n np.array([[0, 0, 1, 1], [.2, .2, 4, 4], [.5, .5, 1, 1]],\r\n np.float32)),\r\n fields.InputDataFields.groundtruth_area:\r\n tf.constant(np.array([.5, .4, .3])),\r\n fields.InputDataFields.groundtruth_classes:\r\n tf.constant(np.array([3, -1, 1], np.int32)),\r\n fields.InputDataFields.groundtruth_keypoints:\r\n tf.constant(\r\n np.array([[[.1, .1]], [[.2, .2]], [[.5, .5]]],\r\n np.float32)),\r\n fields.InputDataFields.groundtruth_keypoint_visibilities:\r\n tf.constant([True, False, True]),\r\n fields.InputDataFields.groundtruth_instance_masks:\r\n tf.constant(np.random.rand(3, 4, 4).astype(np.float32)),\r\n fields.InputDataFields.groundtruth_is_crowd:\r\n tf.constant([False, True, False]),\r\n fields.InputDataFields.groundtruth_difficult:\r\n tf.constant(np.array([0, 0, 1], np.int32))\r\n }\r\n\r\n num_classes = 3\r\n input_transformation_fn = functools.partial(\r\n inputs.transform_input_data,\r\n model_preprocess_fn=_fake_model_preprocessor_fn,\r\n image_resizer_fn=_fake_image_resizer_fn,\r\n num_classes=num_classes)\r\n with self.test_session() as sess:\r\n transformed_inputs = sess.run(\r\n input_transformation_fn(tensor_dict=tensor_dict))\r\n\r\n self.assertAllClose(\r\n transformed_inputs[fields.InputDataFields.groundtruth_classes],\r\n [[0, 0, 1], [1, 0, 0]])\r\n self.assertAllEqual(\r\n transformed_inputs[fields.InputDataFields.num_groundtruth_boxes], 2)\r\n self.assertAllClose(\r\n transformed_inputs[fields.InputDataFields.groundtruth_area], [.5, .3])\r\n self.assertAllEqual(\r\n transformed_inputs[fields.InputDataFields.groundtruth_confidences],\r\n [[0, 0, 1], [1, 0, 0]])\r\n self.assertAllClose(\r\n transformed_inputs[fields.InputDataFields.groundtruth_boxes],\r\n [[0, 0, 1, 1], [.5, .5, 1, 1]])\r\n self.assertAllClose(\r\n transformed_inputs[fields.InputDataFields.groundtruth_keypoints],\r\n [[[.1, .1]], [[.5, .5]]])\r\n self.assertAllEqual(\r\n transformed_inputs[\r\n fields.InputDataFields.groundtruth_keypoint_visibilities],\r\n [True, True])\r\n self.assertAllEqual(\r\n transformed_inputs[\r\n fields.InputDataFields.groundtruth_instance_masks].shape, [2, 4, 4])\r\n self.assertAllEqual(\r\n transformed_inputs[fields.InputDataFields.groundtruth_is_crowd],\r\n [False, False])\r\n self.assertAllEqual(\r\n transformed_inputs[fields.InputDataFields.groundtruth_difficult],\r\n [0, 1])\r\n\r\n def test_returns_correct_merged_boxes(self):\r\n tensor_dict = {\r\n fields.InputDataFields.image:\r\n tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),\r\n fields.InputDataFields.groundtruth_boxes:\r\n tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], np.float32)),\r\n fields.InputDataFields.groundtruth_classes:\r\n tf.constant(np.array([3, 1], np.int32))\r\n }\r\n\r\n num_classes = 3\r\n input_transformation_fn = functools.partial(\r\n inputs.transform_input_data,\r\n model_preprocess_fn=_fake_model_preprocessor_fn,\r\n image_resizer_fn=_fake_image_resizer_fn,\r\n num_classes=num_classes,\r\n merge_multiple_boxes=True)\r\n\r\n with self.test_session() as sess:\r\n transformed_inputs = sess.run(\r\n input_transformation_fn(tensor_dict=tensor_dict))\r\n self.assertAllClose(\r\n transformed_inputs[fields.InputDataFields.groundtruth_boxes],\r\n [[.5, .5, 1., 1.]])\r\n self.assertAllClose(\r\n transformed_inputs[fields.InputDataFields.groundtruth_classes],\r\n [[1, 0, 1]])\r\n self.assertAllClose(\r\n transformed_inputs[fields.InputDataFields.groundtruth_confidences],\r\n [[1, 0, 1]])\r\n self.assertAllClose(\r\n transformed_inputs[fields.InputDataFields.num_groundtruth_boxes],\r\n 1)\r\n\r\n def test_returns_correct_groundtruth_confidences_when_input_present(self):\r\n tensor_dict = {\r\n fields.InputDataFields.image:\r\n tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),\r\n fields.InputDataFields.groundtruth_boxes:\r\n tf.constant(np.array([[0, 0, 1, 1], [.5, .5, 1, 1]], np.float32)),\r\n fields.InputDataFields.groundtruth_classes:\r\n tf.constant(np.array([3, 1], np.int32)),\r\n fields.InputDataFields.groundtruth_confidences:\r\n tf.constant(np.array([1.0, -1.0], np.float32))\r\n }\r\n num_classes = 3\r\n input_transformation_fn = functools.partial(\r\n inputs.transform_input_data,\r\n model_preprocess_fn=_fake_model_preprocessor_fn,\r\n image_resizer_fn=_fake_image_resizer_fn,\r\n num_classes=num_classes)\r\n with self.test_session() as sess:\r\n transformed_inputs = sess.run(\r\n input_transformation_fn(tensor_dict=tensor_dict))\r\n\r\n self.assertAllClose(\r\n transformed_inputs[fields.InputDataFields.groundtruth_classes],\r\n [[0, 0, 1], [1, 0, 0]])\r\n self.assertAllClose(\r\n transformed_inputs[fields.InputDataFields.groundtruth_confidences],\r\n [[0, 0, 1], [-1, 0, 0]])\r\n\r\n def test_returns_resized_masks(self):\r\n tensor_dict = {\r\n fields.InputDataFields.image:\r\n tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),\r\n fields.InputDataFields.groundtruth_instance_masks:\r\n tf.constant(np.random.rand(2, 4, 4).astype(np.float32)),\r\n fields.InputDataFields.groundtruth_classes:\r\n tf.constant(np.array([3, 1], np.int32)),\r\n fields.InputDataFields.original_image_spatial_shape:\r\n tf.constant(np.array([4, 4], np.int32))\r\n }\r\n\r\n def fake_image_resizer_fn(image, masks=None):\r\n resized_image = tf.image.resize_images(image, [8, 8])\r\n results = [resized_image]\r\n if masks is not None:\r\n resized_masks = tf.transpose(\r\n tf.image.resize_images(tf.transpose(masks, [1, 2, 0]), [8, 8]),\r\n [2, 0, 1])\r\n results.append(resized_masks)\r\n results.append(tf.shape(resized_image))\r\n return results\r\n\r\n num_classes = 3\r\n input_transformation_fn = functools.partial(\r\n inputs.transform_input_data,\r\n model_preprocess_fn=_fake_model_preprocessor_fn,\r\n image_resizer_fn=fake_image_resizer_fn,\r\n num_classes=num_classes,\r\n retain_original_image=True)\r\n with self.test_session() as sess:\r\n transformed_inputs = sess.run(\r\n input_transformation_fn(tensor_dict=tensor_dict))\r\n self.assertAllEqual(transformed_inputs[\r\n fields.InputDataFields.original_image].dtype, tf.uint8)\r\n self.assertAllEqual(transformed_inputs[\r\n fields.InputDataFields.original_image_spatial_shape], [4, 4])\r\n self.assertAllEqual(transformed_inputs[\r\n fields.InputDataFields.original_image].shape, [8, 8, 3])\r\n self.assertAllEqual(transformed_inputs[\r\n fields.InputDataFields.groundtruth_instance_masks].shape, [2, 8, 8])\r\n\r\n def test_applies_model_preprocess_fn_to_image_tensor(self):\r\n np_image = np.random.randint(256, size=(4, 4, 3))\r\n tensor_dict = {\r\n fields.InputDataFields.image:\r\n tf.constant(np_image),\r\n fields.InputDataFields.groundtruth_classes:\r\n tf.constant(np.array([3, 1], np.int32))\r\n }\r\n\r\n def fake_model_preprocessor_fn(image):\r\n return (image / 255., tf.expand_dims(tf.shape(image)[1:], axis=0))\r\n\r\n num_classes = 3\r\n input_transformation_fn = functools.partial(\r\n inputs.transform_input_data,\r\n model_preprocess_fn=fake_model_preprocessor_fn,\r\n image_resizer_fn=_fake_image_resizer_fn,\r\n num_classes=num_classes)\r\n\r\n with self.test_session() as sess:\r\n transformed_inputs = sess.run(\r\n input_transformation_fn(tensor_dict=tensor_dict))\r\n self.assertAllClose(transformed_inputs[fields.InputDataFields.image],\r\n np_image / 255.)\r\n self.assertAllClose(transformed_inputs[fields.InputDataFields.\r\n true_image_shape],\r\n [4, 4, 3])\r\n\r\n def test_applies_data_augmentation_fn_to_tensor_dict(self):\r\n np_image = np.random.randint(256, size=(4, 4, 3))\r\n tensor_dict = {\r\n fields.InputDataFields.image:\r\n tf.constant(np_image),\r\n fields.InputDataFields.groundtruth_classes:\r\n tf.constant(np.array([3, 1], np.int32))\r\n }\r\n\r\n def add_one_data_augmentation_fn(tensor_dict):\r\n return {key: value + 1 for key, value in tensor_dict.items()}\r\n\r\n num_classes = 4\r\n input_transformation_fn = functools.partial(\r\n inputs.transform_input_data,\r\n model_preprocess_fn=_fake_model_preprocessor_fn,\r\n image_resizer_fn=_fake_image_resizer_fn,\r\n num_classes=num_classes,\r\n data_augmentation_fn=add_one_data_augmentation_fn)\r\n with self.test_session() as sess:\r\n augmented_tensor_dict = sess.run(\r\n input_transformation_fn(tensor_dict=tensor_dict))\r\n\r\n self.assertAllEqual(augmented_tensor_dict[fields.InputDataFields.image],\r\n np_image + 1)\r\n self.assertAllEqual(\r\n augmented_tensor_dict[fields.InputDataFields.groundtruth_classes],\r\n [[0, 0, 0, 1], [0, 1, 0, 0]])\r\n\r\n def test_applies_data_augmentation_fn_before_model_preprocess_fn(self):\r\n np_image = np.random.randint(256, size=(4, 4, 3))\r\n tensor_dict = {\r\n fields.InputDataFields.image:\r\n tf.constant(np_image),\r\n fields.InputDataFields.groundtruth_classes:\r\n tf.constant(np.array([3, 1], np.int32))\r\n }\r\n\r\n def mul_two_model_preprocessor_fn(image):\r\n return (image * 2, tf.expand_dims(tf.shape(image)[1:], axis=0))\r\n\r\n def add_five_to_image_data_augmentation_fn(tensor_dict):\r\n tensor_dict[fields.InputDataFields.image] += 5\r\n return tensor_dict\r\n\r\n num_classes = 4\r\n input_transformation_fn = functools.partial(\r\n inputs.transform_input_data,\r\n model_preprocess_fn=mul_two_model_preprocessor_fn,\r\n image_resizer_fn=_fake_image_resizer_fn,\r\n num_classes=num_classes,\r\n data_augmentation_fn=add_five_to_image_data_augmentation_fn)\r\n with self.test_session() as sess:\r\n augmented_tensor_dict = sess.run(\r\n input_transformation_fn(tensor_dict=tensor_dict))\r\n\r\n self.assertAllEqual(augmented_tensor_dict[fields.InputDataFields.image],\r\n (np_image + 5) * 2)\r\n\r\n\r\nclass PadInputDataToStaticShapesFnTest(test_case.TestCase):\r\n\r\n def test_pad_images_boxes_and_classes(self):\r\n input_tensor_dict = {\r\n fields.InputDataFields.image:\r\n tf.placeholder(tf.float32, [None, None, 3]),\r\n fields.InputDataFields.groundtruth_boxes:\r\n tf.placeholder(tf.float32, [None, 4]),\r\n fields.InputDataFields.groundtruth_classes:\r\n tf.placeholder(tf.int32, [None, 3]),\r\n fields.InputDataFields.true_image_shape:\r\n tf.placeholder(tf.int32, [3]),\r\n fields.InputDataFields.original_image_spatial_shape:\r\n tf.placeholder(tf.int32, [2])\r\n }\r\n padded_tensor_dict = inputs.pad_input_data_to_static_shapes(\r\n tensor_dict=input_tensor_dict,\r\n max_num_boxes=3,\r\n num_classes=3,\r\n spatial_image_shape=[5, 6])\r\n\r\n self.assertAllEqual(\r\n padded_tensor_dict[fields.InputDataFields.image].shape.as_list(),\r\n [5, 6, 3])\r\n self.assertAllEqual(\r\n padded_tensor_dict[fields.InputDataFields.true_image_shape]\r\n .shape.as_list(), [3])\r\n self.assertAllEqual(\r\n padded_tensor_dict[fields.InputDataFields.original_image_spatial_shape]\r\n .shape.as_list(), [2])\r\n self.assertAllEqual(\r\n padded_tensor_dict[fields.InputDataFields.groundtruth_boxes]\r\n .shape.as_list(), [3, 4])\r\n self.assertAllEqual(\r\n padded_tensor_dict[fields.InputDataFields.groundtruth_classes]\r\n .shape.as_list(), [3, 3])\r\n\r\n def test_clip_boxes_and_classes(self):\r\n input_tensor_dict = {\r\n fields.InputDataFields.groundtruth_boxes:\r\n tf.placeholder(tf.float32, [None, 4]),\r\n fields.InputDataFields.groundtruth_classes:\r\n tf.placeholder(tf.int32, [None, 3]),\r\n fields.InputDataFields.num_groundtruth_boxes:\r\n tf.placeholder(tf.int32, [])\r\n }\r\n padded_tensor_dict = inputs.pad_input_data_to_static_shapes(\r\n tensor_dict=input_tensor_dict,\r\n max_num_boxes=3,\r\n num_classes=3,\r\n spatial_image_shape=[5, 6])\r\n\r\n self.assertAllEqual(\r\n padded_tensor_dict[fields.InputDataFields.groundtruth_boxes]\r\n .shape.as_list(), [3, 4])\r\n self.assertAllEqual(\r\n padded_tensor_dict[fields.InputDataFields.groundtruth_classes]\r\n .shape.as_list(), [3, 3])\r\n\r\n with self.test_session() as sess:\r\n out_tensor_dict = sess.run(\r\n padded_tensor_dict,\r\n feed_dict={\r\n input_tensor_dict[fields.InputDataFields.groundtruth_boxes]:\r\n np.random.rand(5, 4),\r\n input_tensor_dict[fields.InputDataFields.groundtruth_classes]:\r\n np.random.rand(2, 3),\r\n input_tensor_dict[fields.InputDataFields.num_groundtruth_boxes]:\r\n 5,\r\n })\r\n\r\n self.assertAllEqual(\r\n out_tensor_dict[fields.InputDataFields.groundtruth_boxes].shape, [3, 4])\r\n self.assertAllEqual(\r\n out_tensor_dict[fields.InputDataFields.groundtruth_classes].shape,\r\n [3, 3])\r\n self.assertEqual(\r\n out_tensor_dict[fields.InputDataFields.num_groundtruth_boxes],\r\n 3)\r\n\r\n def test_do_not_pad_dynamic_images(self):\r\n input_tensor_dict = {\r\n fields.InputDataFields.image:\r\n tf.placeholder(tf.float32, [None, None, 3]),\r\n }\r\n padded_tensor_dict = inputs.pad_input_data_to_static_shapes(\r\n tensor_dict=input_tensor_dict,\r\n max_num_boxes=3,\r\n num_classes=3,\r\n spatial_image_shape=[None, None])\r\n\r\n self.assertAllEqual(\r\n padded_tensor_dict[fields.InputDataFields.image].shape.as_list(),\r\n [None, None, 3])\r\n\r\n def test_images_and_additional_channels(self):\r\n input_tensor_dict = {\r\n fields.InputDataFields.image:\r\n tf.placeholder(tf.float32, [None, None, 5]),\r\n fields.InputDataFields.image_additional_channels:\r\n tf.placeholder(tf.float32, [None, None, 2]),\r\n }\r\n padded_tensor_dict = inputs.pad_input_data_to_static_shapes(\r\n tensor_dict=input_tensor_dict,\r\n max_num_boxes=3,\r\n num_classes=3,\r\n spatial_image_shape=[5, 6])\r\n\r\n # pad_input_data_to_static_shape assumes that image is already concatenated\r\n # with additional channels.\r\n self.assertAllEqual(\r\n padded_tensor_dict[fields.InputDataFields.image].shape.as_list(),\r\n [5, 6, 5])\r\n self.assertAllEqual(\r\n padded_tensor_dict[fields.InputDataFields.image_additional_channels]\r\n .shape.as_list(), [5, 6, 2])\r\n\r\n def test_images_and_additional_channels_errors(self):\r\n input_tensor_dict = {\r\n fields.InputDataFields.image:\r\n tf.placeholder(tf.float32, [None, None, 3]),\r\n fields.InputDataFields.image_additional_channels:\r\n tf.placeholder(tf.float32, [None, None, 2]),\r\n fields.InputDataFields.original_image:\r\n tf.placeholder(tf.float32, [None, None, 3]),\r\n }\r\n with self.assertRaises(ValueError):\r\n _ = inputs.pad_input_data_to_static_shapes(\r\n tensor_dict=input_tensor_dict,\r\n max_num_boxes=3,\r\n num_classes=3,\r\n spatial_image_shape=[5, 6])\r\n\r\n def test_gray_images(self):\r\n input_tensor_dict = {\r\n fields.InputDataFields.image:\r\n tf.placeholder(tf.float32, [None, None, 1]),\r\n }\r\n padded_tensor_dict = inputs.pad_input_data_to_static_shapes(\r\n tensor_dict=input_tensor_dict,\r\n max_num_boxes=3,\r\n num_classes=3,\r\n spatial_image_shape=[5, 6])\r\n\r\n self.assertAllEqual(\r\n padded_tensor_dict[fields.InputDataFields.image].shape.as_list(),\r\n [5, 6, 1])\r\n\r\n def test_gray_images_and_additional_channels(self):\r\n input_tensor_dict = {\r\n fields.InputDataFields.image:\r\n tf.placeholder(tf.float32, [None, None, 3]),\r\n fields.InputDataFields.image_additional_channels:\r\n tf.placeholder(tf.float32, [None, None, 2]),\r\n }\r\n # pad_input_data_to_static_shape assumes that image is already concatenated\r\n # with additional channels.\r\n padded_tensor_dict = inputs.pad_input_data_to_static_shapes(\r\n tensor_dict=input_tensor_dict,\r\n max_num_boxes=3,\r\n num_classes=3,\r\n spatial_image_shape=[5, 6])\r\n\r\n self.assertAllEqual(\r\n padded_tensor_dict[fields.InputDataFields.image].shape.as_list(),\r\n [5, 6, 3])\r\n self.assertAllEqual(\r\n padded_tensor_dict[fields.InputDataFields.image_additional_channels]\r\n .shape.as_list(), [5, 6, 2])\r\n\r\n def test_keypoints(self):\r\n input_tensor_dict = {\r\n fields.InputDataFields.groundtruth_keypoints:\r\n tf.placeholder(tf.float32, [None, 16, 4]),\r\n fields.InputDataFields.groundtruth_keypoint_visibilities:\r\n tf.placeholder(tf.bool, [None, 16]),\r\n }\r\n padded_tensor_dict = inputs.pad_input_data_to_static_shapes(\r\n tensor_dict=input_tensor_dict,\r\n max_num_boxes=3,\r\n num_classes=3,\r\n spatial_image_shape=[5, 6])\r\n\r\n self.assertAllEqual(\r\n padded_tensor_dict[fields.InputDataFields.groundtruth_keypoints]\r\n .shape.as_list(), [3, 16, 4])\r\n self.assertAllEqual(\r\n padded_tensor_dict[\r\n fields.InputDataFields.groundtruth_keypoint_visibilities]\r\n .shape.as_list(), [3, 16])\r\n\r\n\r\nif __name__ == '__main__':\r\n tf.test.main()\r\n",
"# Copyright 2017 Google, Inc. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"Functions to generate or load datasets for supervised learning.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom collections import namedtuple\r\n\r\nimport numpy as np\r\nfrom sklearn.datasets import make_classification\r\n\r\nMAX_SEED = 4294967295\r\n\r\n\r\nclass Dataset(namedtuple(\"Dataset\", \"data labels\")):\r\n \"\"\"Helper class for managing a supervised learning dataset.\r\n\r\n Args:\r\n data: an array of type float32 with N samples, each of which is the set\r\n of features for that sample. (Shape (N, D_i), where N is the number of\r\n samples and D_i is the number of features for that sample.)\r\n labels: an array of type int32 or int64 with N elements, indicating the\r\n class label for the corresponding set of features in data.\r\n \"\"\"\r\n # Since this is an immutable object, we don't need to reserve slots.\r\n __slots__ = ()\r\n\r\n @property\r\n def size(self):\r\n \"\"\"Dataset size (number of samples).\"\"\"\r\n return len(self.data)\r\n\r\n def batch_indices(self, num_batches, batch_size):\r\n \"\"\"Creates indices of shuffled minibatches.\r\n\r\n Args:\r\n num_batches: the number of batches to generate\r\n batch_size: the size of each batch\r\n\r\n Returns:\r\n batch_indices: a list of minibatch indices, arranged so that the dataset\r\n is randomly shuffled.\r\n\r\n Raises:\r\n ValueError: if the data and labels have different lengths\r\n \"\"\"\r\n if len(self.data) != len(self.labels):\r\n raise ValueError(\"Labels and data must have the same number of samples.\")\r\n\r\n batch_indices = []\r\n\r\n # Follows logic in mnist.py to ensure we cover the entire dataset.\r\n index_in_epoch = 0\r\n dataset_size = len(self.data)\r\n dataset_indices = np.arange(dataset_size)\r\n np.random.shuffle(dataset_indices)\r\n\r\n for _ in range(num_batches):\r\n start = index_in_epoch\r\n index_in_epoch += batch_size\r\n if index_in_epoch > dataset_size:\r\n\r\n # Finished epoch, reshuffle.\r\n np.random.shuffle(dataset_indices)\r\n\r\n # Start next epoch.\r\n start = 0\r\n index_in_epoch = batch_size\r\n\r\n end = index_in_epoch\r\n batch_indices.append(dataset_indices[start:end].tolist())\r\n\r\n return batch_indices\r\n\r\n\r\ndef noisy_parity_class(n_samples,\r\n n_classes=2,\r\n n_context_ids=5,\r\n noise_prob=0.25,\r\n random_seed=None):\r\n \"\"\"Returns a randomly generated sparse-to-sparse dataset.\r\n\r\n The label is a parity class of a set of context classes.\r\n\r\n Args:\r\n n_samples: number of samples (data points)\r\n n_classes: number of class labels (default: 2)\r\n n_context_ids: how many classes to take the parity of (default: 5).\r\n noise_prob: how often to corrupt the label (default: 0.25)\r\n random_seed: seed used for drawing the random data (default: None)\r\n Returns:\r\n dataset: A Dataset namedtuple containing the generated data and labels\r\n \"\"\"\r\n np.random.seed(random_seed)\r\n x = np.random.randint(0, n_classes, [n_samples, n_context_ids])\r\n noise = np.random.binomial(1, noise_prob, [n_samples])\r\n y = (np.sum(x, 1) + noise) % n_classes\r\n return Dataset(x.astype(\"float32\"), y.astype(\"int32\"))\r\n\r\n\r\ndef random(n_features, n_samples, n_classes=2, sep=1.0, random_seed=None):\r\n \"\"\"Returns a randomly generated classification dataset.\r\n\r\n Args:\r\n n_features: number of features (dependent variables)\r\n n_samples: number of samples (data points)\r\n n_classes: number of class labels (default: 2)\r\n sep: separation of the two classes, a higher value corresponds to\r\n an easier classification problem (default: 1.0)\r\n random_seed: seed used for drawing the random data (default: None)\r\n\r\n Returns:\r\n dataset: A Dataset namedtuple containing the generated data and labels\r\n \"\"\"\r\n # Generate the problem data.\r\n x, y = make_classification(n_samples=n_samples,\r\n n_features=n_features,\r\n n_informative=n_features,\r\n n_redundant=0,\r\n n_classes=n_classes,\r\n class_sep=sep,\r\n random_state=random_seed)\r\n\r\n return Dataset(x.astype(\"float32\"), y.astype(\"int32\"))\r\n\r\n\r\ndef random_binary(n_features, n_samples, random_seed=None):\r\n \"\"\"Returns a randomly generated dataset of binary values.\r\n\r\n Args:\r\n n_features: number of features (dependent variables)\r\n n_samples: number of samples (data points)\r\n random_seed: seed used for drawing the random data (default: None)\r\n\r\n Returns:\r\n dataset: A Dataset namedtuple containing the generated data and labels\r\n \"\"\"\r\n random_seed = (np.random.randint(MAX_SEED) if random_seed is None\r\n else random_seed)\r\n np.random.seed(random_seed)\r\n\r\n x = np.random.randint(2, size=(n_samples, n_features))\r\n y = np.zeros((n_samples, 1))\r\n\r\n return Dataset(x.astype(\"float32\"), y.astype(\"int32\"))\r\n\r\n\r\ndef random_symmetric(n_features, n_samples, random_seed=None):\r\n \"\"\"Returns a randomly generated dataset of values and their negatives.\r\n\r\n Args:\r\n n_features: number of features (dependent variables)\r\n n_samples: number of samples (data points)\r\n random_seed: seed used for drawing the random data (default: None)\r\n\r\n Returns:\r\n dataset: A Dataset namedtuple containing the generated data and labels\r\n \"\"\"\r\n random_seed = (np.random.randint(MAX_SEED) if random_seed is None\r\n else random_seed)\r\n np.random.seed(random_seed)\r\n\r\n x1 = np.random.normal(size=(int(n_samples/2), n_features))\r\n x = np.concatenate((x1, -x1), axis=0)\r\n y = np.zeros((n_samples, 1))\r\n\r\n return Dataset(x.astype(\"float32\"), y.astype(\"int32\"))\r\n\r\n\r\ndef random_mlp(n_features, n_samples, random_seed=None, n_layers=6, width=20):\r\n \"\"\"Returns a generated output of an MLP with random weights.\r\n\r\n Args:\r\n n_features: number of features (dependent variables)\r\n n_samples: number of samples (data points)\r\n random_seed: seed used for drawing the random data (default: None)\r\n n_layers: number of layers in random MLP\r\n width: width of the layers in random MLP\r\n\r\n Returns:\r\n dataset: A Dataset namedtuple containing the generated data and labels\r\n \"\"\"\r\n random_seed = (np.random.randint(MAX_SEED) if random_seed is None\r\n else random_seed)\r\n np.random.seed(random_seed)\r\n\r\n x = np.random.normal(size=(n_samples, n_features))\r\n y = x\r\n n_in = n_features\r\n scale_factor = np.sqrt(2.) / np.sqrt(n_features)\r\n for _ in range(n_layers):\r\n weights = np.random.normal(size=(n_in, width)) * scale_factor\r\n y = np.dot(y, weights).clip(min=0)\r\n n_in = width\r\n\r\n y = y[:, 0]\r\n y[y > 0] = 1\r\n\r\n return Dataset(x.astype(\"float32\"), y.astype(\"int32\"))\r\n\r\n\r\nEMPTY_DATASET = Dataset(np.array([], dtype=\"float32\"),\r\n np.array([], dtype=\"int32\"))\r\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"Tests for object_detection.predictors.heads.box_head.\"\"\"\r\nimport tensorflow as tf\r\n\r\nfrom google.protobuf import text_format\r\nfrom object_detection.builders import hyperparams_builder\r\nfrom object_detection.predictors.heads import box_head\r\nfrom object_detection.protos import hyperparams_pb2\r\nfrom object_detection.utils import test_case\r\n\r\n\r\nclass MaskRCNNBoxHeadTest(test_case.TestCase):\r\n\r\n def _build_arg_scope_with_hyperparams(self,\r\n op_type=hyperparams_pb2.Hyperparams.FC):\r\n hyperparams = hyperparams_pb2.Hyperparams()\r\n hyperparams_text_proto = \"\"\"\r\n activation: NONE\r\n regularizer {\r\n l2_regularizer {\r\n }\r\n }\r\n initializer {\r\n truncated_normal_initializer {\r\n }\r\n }\r\n \"\"\"\r\n text_format.Merge(hyperparams_text_proto, hyperparams)\r\n hyperparams.op = op_type\r\n return hyperparams_builder.build(hyperparams, is_training=True)\r\n\r\n def test_prediction_size(self):\r\n box_prediction_head = box_head.MaskRCNNBoxHead(\r\n is_training=False,\r\n num_classes=20,\r\n fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(),\r\n use_dropout=True,\r\n dropout_keep_prob=0.5,\r\n box_code_size=4,\r\n share_box_across_classes=False)\r\n roi_pooled_features = tf.random_uniform(\r\n [64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)\r\n prediction = box_prediction_head.predict(\r\n features=roi_pooled_features, num_predictions_per_location=1)\r\n self.assertAllEqual([64, 1, 20, 4], prediction.get_shape().as_list())\r\n\r\n\r\nclass ConvolutionalBoxPredictorTest(test_case.TestCase):\r\n\r\n def _build_arg_scope_with_hyperparams(\r\n self, op_type=hyperparams_pb2.Hyperparams.CONV):\r\n hyperparams = hyperparams_pb2.Hyperparams()\r\n hyperparams_text_proto = \"\"\"\r\n activation: NONE\r\n regularizer {\r\n l2_regularizer {\r\n }\r\n }\r\n initializer {\r\n truncated_normal_initializer {\r\n }\r\n }\r\n \"\"\"\r\n text_format.Merge(hyperparams_text_proto, hyperparams)\r\n hyperparams.op = op_type\r\n return hyperparams_builder.build(hyperparams, is_training=True)\r\n\r\n def test_prediction_size(self):\r\n box_prediction_head = box_head.ConvolutionalBoxHead(\r\n is_training=True,\r\n box_code_size=4,\r\n kernel_size=3)\r\n image_feature = tf.random_uniform(\r\n [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)\r\n box_encodings = box_prediction_head.predict(\r\n features=image_feature,\r\n num_predictions_per_location=1)\r\n self.assertAllEqual([64, 323, 1, 4], box_encodings.get_shape().as_list())\r\n\r\n\r\nclass WeightSharedConvolutionalBoxPredictorTest(test_case.TestCase):\r\n\r\n def _build_arg_scope_with_hyperparams(\r\n self, op_type=hyperparams_pb2.Hyperparams.CONV):\r\n hyperparams = hyperparams_pb2.Hyperparams()\r\n hyperparams_text_proto = \"\"\"\r\n activation: NONE\r\n regularizer {\r\n l2_regularizer {\r\n }\r\n }\r\n initializer {\r\n truncated_normal_initializer {\r\n }\r\n }\r\n \"\"\"\r\n text_format.Merge(hyperparams_text_proto, hyperparams)\r\n hyperparams.op = op_type\r\n return hyperparams_builder.build(hyperparams, is_training=True)\r\n\r\n def test_prediction_size(self):\r\n box_prediction_head = box_head.WeightSharedConvolutionalBoxHead(\r\n box_code_size=4)\r\n image_feature = tf.random_uniform(\r\n [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)\r\n box_encodings = box_prediction_head.predict(\r\n features=image_feature,\r\n num_predictions_per_location=1)\r\n self.assertAllEqual([64, 323, 4], box_encodings.get_shape().as_list())\r\n\r\n\r\nif __name__ == '__main__':\r\n tf.test.main()\r\n",
"# Copyright 2017 The TensorFlow Authors All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"Tests of the 2D masked convolution blocks.\"\"\"\r\n\r\nfrom __future__ import division\r\nfrom __future__ import unicode_literals\r\n\r\nimport numpy as np\r\nfrom six.moves import xrange\r\nimport tensorflow as tf\r\n\r\nimport blocks_masked_conv2d\r\n\r\n\r\nclass MaskedConv2DTest(tf.test.TestCase):\r\n\r\n def testRasterScanKernel(self):\r\n kernel_size = 5\r\n input_depth = 1\r\n output_depth = 1\r\n kernel_shape = [kernel_size, kernel_size, input_depth, output_depth]\r\n\r\n # pylint: disable=bad-whitespace\r\n kernel_feed = [[ 1.0, 2.0, 3.0, 4.0, 5.0],\r\n [ 6.0, 7.0, 8.0, 9.0, 10.0],\r\n [11.0, 12.0, 13.0, 14.0, 15.0],\r\n [16.0, 17.0, 18.0, 19.0, 20.0],\r\n [21.0, 22.0, 23.0, 24.0, 25.0]]\r\n kernel_feed = np.reshape(kernel_feed, kernel_shape)\r\n kernel_expected = [[ 1.0, 2.0, 3.0, 4.0, 5.0],\r\n [ 6.0, 7.0, 8.0, 9.0, 10.0],\r\n [11.0, 12.0, 0.0, 0.0, 0.0],\r\n [ 0.0, 0.0, 0.0, 0.0, 0.0],\r\n [ 0.0, 0.0, 0.0, 0.0, 0.0]]\r\n kernel_expected = np.reshape(kernel_expected, kernel_shape)\r\n # pylint: enable=bad-whitespace\r\n\r\n init_kernel = lambda s, t: tf.constant(kernel_feed, dtype=t, shape=s)\r\n masked_conv2d = blocks_masked_conv2d.RasterScanConv2D(\r\n output_depth, [kernel_size] * 2, [1] * 2, 'SAME',\r\n initializer=init_kernel)\r\n x = tf.placeholder(dtype=tf.float32, shape=[10] * 3 + [input_depth])\r\n _ = masked_conv2d(x)\r\n\r\n with self.test_session():\r\n tf.global_variables_initializer().run()\r\n kernel_value = masked_conv2d._kernel.eval()\r\n\r\n self.assertAllEqual(kernel_expected, kernel_value)\r\n\r\n def testDepthOrderKernel(self):\r\n kernel_size = 1\r\n input_depth = 7\r\n output_depth = input_depth\r\n kernel_shape = [kernel_size, kernel_size, input_depth, output_depth]\r\n\r\n kernel_feed = np.ones(kernel_shape)\r\n x_shape = [5] * 3 + [input_depth]\r\n x_feed = np.ones(x_shape)\r\n y_expected = np.zeros(x_shape[0:3] + [output_depth])\r\n y_expected[:, :, :] = np.arange(output_depth)\r\n\r\n init_kernel = lambda s, t: tf.constant(kernel_feed, dtype=t, shape=s)\r\n masked_conv2d = blocks_masked_conv2d.DepthOrderConv2D(\r\n output_depth, [kernel_size] * 2, [1] * 2, 'SAME',\r\n strict_order=True,\r\n initializer=init_kernel)\r\n x = tf.placeholder(dtype=tf.float32, shape=x_shape)\r\n y = masked_conv2d(x)\r\n\r\n with self.test_session():\r\n tf.global_variables_initializer().run()\r\n y_value = y.eval(feed_dict={x: x_feed})\r\n\r\n self.assertAllEqual(y_expected, y_value)\r\n\r\n def testGroupRasterScanKernel(self):\r\n kernel_size = 3\r\n input_depth = 4\r\n input_group_size = 2\r\n output_depth = 2\r\n output_group_size = 1\r\n kernel_shape = [kernel_size, kernel_size, input_depth, output_depth]\r\n kernel_feed = np.ones(shape=kernel_shape)\r\n\r\n height = 5\r\n width = 5\r\n x_shape = [1, height, width, input_depth]\r\n x_feed = np.ones(shape=x_shape)\r\n\r\n # pylint: disable=bad-whitespace\r\n y_expected = [\r\n [[ 0, 2], [ 4, 6], [ 4, 6], [ 4, 6], [ 4, 6]],\r\n [[ 8, 10], [16, 18], [16, 18], [16, 18], [12, 14]],\r\n [[ 8, 10], [16, 18], [16, 18], [16, 18], [12, 14]],\r\n [[ 8, 10], [16, 18], [16, 18], [16, 18], [12, 14]],\r\n [[ 8, 10], [16, 18], [16, 18], [16, 18], [12, 14]],\r\n ]\r\n y_expected = np.reshape(y_expected, [1, height, width, output_depth])\r\n # pylint: enable=bad-whitespace\r\n\r\n init_kernel = lambda s, t: tf.constant(kernel_feed, dtype=t, shape=s)\r\n masked_conv2d = blocks_masked_conv2d.GroupRasterScanConv2D(\r\n output_depth, [kernel_size] * 2, [1] * 2, 'SAME',\r\n strict_order=True,\r\n input_group_size=input_group_size,\r\n output_group_size=output_group_size,\r\n initializer=init_kernel)\r\n x = tf.placeholder(dtype=tf.float32, shape=x_shape)\r\n y = masked_conv2d(x)\r\n\r\n with self.test_session():\r\n tf.global_variables_initializer().run()\r\n y_value = y.eval(feed_dict={x: x_feed})\r\n\r\n self.assertAllEqual(y_expected, y_value)\r\n\r\n def testInFillingKernel(self):\r\n kernel_size = 5\r\n input_depth = 1\r\n output_depth = 1\r\n kernel_shape = [kernel_size, kernel_size, input_depth, output_depth]\r\n\r\n # pylint: disable=bad-whitespace\r\n kernel_feed = [[ 1.0, 2.0, 3.0, 4.0, 5.0],\r\n [ 6.0, 7.0, 8.0, 9.0, 10.0],\r\n [11.0, 12.0, 13.0, 14.0, 15.0],\r\n [16.0, 17.0, 18.0, 19.0, 20.0],\r\n [21.0, 22.0, 23.0, 24.0, 25.0]]\r\n kernel_feed = np.reshape(kernel_feed, kernel_shape)\r\n kernel_expected = [[ 1.0, 2.0, 3.0, 4.0, 5.0],\r\n [ 6.0, 7.0, 8.0, 9.0, 10.0],\r\n [11.0, 12.0, 0.0, 14.0, 15.0],\r\n [16.0, 17.0, 18.0, 19.0, 20.0],\r\n [21.0, 22.0, 23.0, 24.0, 25.0]]\r\n kernel_expected = np.reshape(kernel_expected, kernel_shape)\r\n # pylint: enable=bad-whitespace\r\n\r\n init_kernel = lambda s, t: tf.constant(kernel_feed, dtype=t, shape=s)\r\n masked_conv2d = blocks_masked_conv2d.InFillingConv2D(\r\n output_depth, [kernel_size] * 2, [1] * 2, 'SAME',\r\n initializer=init_kernel)\r\n x = tf.placeholder(dtype=tf.float32, shape=[10] * 3 + [input_depth])\r\n _ = masked_conv2d(x)\r\n\r\n with self.test_session():\r\n tf.global_variables_initializer().run()\r\n kernel_value = masked_conv2d._kernel.eval()\r\n\r\n self.assertAllEqual(kernel_expected, kernel_value)\r\n\r\n def testConv2DMaskedNumerics(self):\r\n kernel_size = 5\r\n input_shape = [1, 10, 10, 1]\r\n filter_shape = [kernel_size, kernel_size, 1, 1]\r\n strides = [1, 1, 1, 1]\r\n output_shape = [1, 10, 10, 1]\r\n\r\n conv = blocks_masked_conv2d.RasterScanConv2D(\r\n depth=filter_shape[-1],\r\n filter_size=filter_shape[0:2],\r\n strides=strides[1:3],\r\n padding='SAME',\r\n initializer=tf.constant_initializer(value=1.0))\r\n x = tf.placeholder(dtype=tf.float32, shape=input_shape)\r\n y = conv(x)\r\n\r\n x_feed = - np.ones(input_shape, dtype=float)\r\n y_expected = np.ones(output_shape, dtype=float)\r\n for i in xrange(input_shape[1]):\r\n for j in xrange(input_shape[2]):\r\n x_feed[0, i, j, 0] = 10 * (j + 1) + i\r\n v = 0\r\n ki_start = max(i - kernel_size // 2, 0)\r\n kj_start = max(j - kernel_size // 2, 0)\r\n kj_end = min(j + kernel_size // 2, input_shape[2] - 1)\r\n for ki in range(ki_start, i + 1):\r\n for kj in range(kj_start, kj_end + 1):\r\n if ki > i:\r\n continue\r\n if ki == i and kj >= j:\r\n continue\r\n v += 10 * (kj + 1) + ki\r\n y_expected[0, i, j, 0] = v\r\n\r\n with self.test_session():\r\n tf.global_variables_initializer().run()\r\n y_value = y.eval(feed_dict={x: x_feed})\r\n\r\n self.assertAllEqual(y_expected, y_value)\r\n\r\n\r\nif __name__ == '__main__':\r\n tf.test.main()\r\n",
"# Copyright 2017 The TensorFlow Authors All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"Defines the various loss functions in use by the PTN model.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport tensorflow as tf\r\n\r\nslim = tf.contrib.slim\r\n\r\n\r\ndef add_rotator_image_loss(inputs, outputs, step_size, weight_scale):\r\n \"\"\"Computes the image loss of deep rotator model.\r\n\r\n Args:\r\n inputs: Input dictionary to the model containing keys\r\n such as `images_k'.\r\n outputs: Output dictionary returned by the model containing keys\r\n such as `images_k'.\r\n step_size: A scalar representing the number of recurrent\r\n steps (number of repeated out-of-plane rotations)\r\n in the deep rotator network (int).\r\n weight_scale: A reweighting factor applied over the image loss (float).\r\n\r\n Returns:\r\n A `Tensor' scalar that returns averaged L2 loss\r\n (divided by batch_size and step_size) between the\r\n ground-truth images (RGB) and predicted images (tf.float32).\r\n\r\n \"\"\"\r\n batch_size = tf.shape(inputs['images_0'])[0]\r\n image_loss = 0\r\n for k in range(1, step_size + 1):\r\n image_loss += tf.nn.l2_loss(\r\n inputs['images_%d' % k] - outputs['images_%d' % k])\r\n\r\n image_loss /= tf.to_float(step_size * batch_size)\r\n slim.summaries.add_scalar_summary(\r\n image_loss, 'image_loss', prefix='losses')\r\n image_loss *= weight_scale\r\n return image_loss\r\n\r\n\r\ndef add_rotator_mask_loss(inputs, outputs, step_size, weight_scale):\r\n \"\"\"Computes the mask loss of deep rotator model.\r\n\r\n Args:\r\n inputs: Input dictionary to the model containing keys\r\n such as `masks_k'.\r\n outputs: Output dictionary returned by the model containing\r\n keys such as `masks_k'.\r\n step_size: A scalar representing the number of recurrent\r\n steps (number of repeated out-of-plane rotations)\r\n in the deep rotator network (int).\r\n weight_scale: A reweighting factor applied over the mask loss (float).\r\n\r\n Returns:\r\n A `Tensor' that returns averaged L2 loss\r\n (divided by batch_size and step_size) between the ground-truth masks\r\n (object silhouettes) and predicted masks (tf.float32).\r\n\r\n \"\"\"\r\n batch_size = tf.shape(inputs['images_0'])[0]\r\n mask_loss = 0\r\n for k in range(1, step_size + 1):\r\n mask_loss += tf.nn.l2_loss(\r\n inputs['masks_%d' % k] - outputs['masks_%d' % k])\r\n\r\n mask_loss /= tf.to_float(step_size * batch_size)\r\n slim.summaries.add_scalar_summary(\r\n mask_loss, 'mask_loss', prefix='losses')\r\n mask_loss *= weight_scale\r\n return mask_loss\r\n\r\n\r\ndef add_volume_proj_loss(inputs, outputs, num_views, weight_scale):\r\n \"\"\"Computes the projection loss of voxel generation model.\r\n\r\n Args:\r\n inputs: Input dictionary to the model containing keys such as\r\n `images_1'.\r\n outputs: Output dictionary returned by the model containing keys\r\n such as `masks_k' and ``projs_k'.\r\n num_views: A integer scalar represents the total number of\r\n viewpoints for each of the object (int).\r\n weight_scale: A reweighting factor applied over the projection loss (float).\r\n\r\n Returns:\r\n A `Tensor' that returns the averaged L2 loss\r\n (divided by batch_size and num_views) between the ground-truth\r\n masks (object silhouettes) and predicted masks (tf.float32).\r\n\r\n \"\"\"\r\n batch_size = tf.shape(inputs['images_1'])[0]\r\n proj_loss = 0\r\n for k in range(num_views):\r\n proj_loss += tf.nn.l2_loss(\r\n outputs['masks_%d' % (k + 1)] - outputs['projs_%d' % (k + 1)])\r\n proj_loss /= tf.to_float(num_views * batch_size)\r\n slim.summaries.add_scalar_summary(\r\n proj_loss, 'proj_loss', prefix='losses')\r\n proj_loss *= weight_scale\r\n return proj_loss\r\n\r\n\r\ndef add_volume_loss(inputs, outputs, num_views, weight_scale):\r\n \"\"\"Computes the volume loss of voxel generation model.\r\n\r\n Args:\r\n inputs: Input dictionary to the model containing keys such as\r\n `images_1' and `voxels'.\r\n outputs: Output dictionary returned by the model containing keys\r\n such as `voxels_k'.\r\n num_views: A scalar representing the total number of\r\n viewpoints for each object (int).\r\n weight_scale: A reweighting factor applied over the volume\r\n loss (tf.float32).\r\n\r\n Returns:\r\n A `Tensor' that returns the averaged L2 loss\r\n (divided by batch_size and num_views) between the ground-truth\r\n volumes and predicted volumes (tf.float32).\r\n\r\n \"\"\"\r\n batch_size = tf.shape(inputs['images_1'])[0]\r\n vol_loss = 0\r\n for k in range(num_views):\r\n vol_loss += tf.nn.l2_loss(\r\n inputs['voxels'] - outputs['voxels_%d' % (k + 1)])\r\n vol_loss /= tf.to_float(num_views * batch_size)\r\n slim.summaries.add_scalar_summary(\r\n vol_loss, 'vol_loss', prefix='losses')\r\n vol_loss *= weight_scale\r\n return vol_loss\r\n\r\n\r\ndef regularization_loss(scopes, params):\r\n \"\"\"Computes the weight decay as regularization during training.\r\n\r\n Args:\r\n scopes: A list of different components of the model such as\r\n ``encoder'', ``decoder'' and ``projector''.\r\n params: Parameters of the model.\r\n\r\n Returns:\r\n Regularization loss (tf.float32).\r\n \"\"\"\r\n\r\n reg_loss = tf.zeros(dtype=tf.float32, shape=[])\r\n if params.weight_decay > 0:\r\n is_trainable = lambda x: x in tf.trainable_variables()\r\n is_weights = lambda x: 'weights' in x.name\r\n for scope in scopes:\r\n scope_vars = filter(is_trainable,\r\n tf.contrib.framework.get_model_variables(scope))\r\n scope_vars = filter(is_weights, scope_vars)\r\n if scope_vars:\r\n reg_loss += tf.add_n([tf.nn.l2_loss(var) for var in scope_vars])\r\n\r\n slim.summaries.add_scalar_summary(\r\n reg_loss, 'reg_loss', prefix='losses')\r\n reg_loss *= params.weight_decay\r\n return reg_loss\r\n",
"# Copyright 2017 Google Inc. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"Tests for digraph ops.\"\"\"\r\n\r\nimport tensorflow as tf\r\n\r\nfrom dragnn.python import digraph_ops\r\n\r\n\r\nclass DigraphOpsTest(tf.test.TestCase):\r\n \"\"\"Testing rig.\"\"\"\r\n\r\n def testArcPotentialsFromTokens(self):\r\n with self.test_session():\r\n # Batch of two, where the second batch item is the reverse of the first.\r\n source_tokens = tf.constant([[[1, 2],\r\n [2, 3],\r\n [3, 4]],\r\n [[3, 4],\r\n [2, 3],\r\n [1, 2]]],\r\n tf.float32) # pyformat: disable\r\n target_tokens = tf.constant([[[4, 5, 6],\r\n [5, 6, 7],\r\n [6, 7, 8]],\r\n [[6, 7, 8],\r\n [5, 6, 7],\r\n [4, 5, 6]]],\r\n tf.float32) # pyformat: disable\r\n weights = tf.constant([[2, 3, 5],\r\n [7, 11, 13]],\r\n tf.float32) # pyformat: disable\r\n\r\n arcs = digraph_ops.ArcPotentialsFromTokens(source_tokens, target_tokens,\r\n weights)\r\n\r\n # For example,\r\n # ((1 * 2 * 4 + 1 * 3 * 5 + 1 * 5 * 6) +\r\n # (2 * 7 * 4 + 2 * 11 * 5 + 2 * 13 * 6)) = 375\r\n self.assertAllEqual(arcs.eval(),\r\n [[[375, 447, 519],\r\n [589, 702, 815],\r\n [803, 957, 1111]],\r\n [[1111, 957, 803], # reflected through the center\r\n [815, 702, 589],\r\n [519, 447, 375]]]) # pyformat: disable\r\n\r\n def testArcSourcePotentialsFromTokens(self):\r\n with self.test_session():\r\n tokens = tf.constant([[[4, 5, 6],\r\n [5, 6, 7],\r\n [6, 7, 8]],\r\n [[6, 7, 8],\r\n [5, 6, 7],\r\n [4, 5, 6]]], tf.float32) # pyformat: disable\r\n weights = tf.constant([2, 3, 5], tf.float32)\r\n\r\n arcs = digraph_ops.ArcSourcePotentialsFromTokens(tokens, weights)\r\n\r\n self.assertAllEqual(arcs.eval(), [[[53, 53, 53],\r\n [63, 63, 63],\r\n [73, 73, 73]],\r\n [[73, 73, 73],\r\n [63, 63, 63],\r\n [53, 53, 53]]]) # pyformat: disable\r\n\r\n def testRootPotentialsFromTokens(self):\r\n with self.test_session():\r\n root = tf.constant([1, 2], tf.float32)\r\n tokens = tf.constant([[[4, 5, 6],\r\n [5, 6, 7],\r\n [6, 7, 8]],\r\n [[6, 7, 8],\r\n [5, 6, 7],\r\n [4, 5, 6]]], tf.float32) # pyformat: disable\r\n weights_arc = tf.constant([[2, 3, 5],\r\n [7, 11, 13]],\r\n tf.float32) # pyformat: disable\r\n weights_source = tf.constant([11, 10], tf.float32)\r\n\r\n roots = digraph_ops.RootPotentialsFromTokens(root, tokens, weights_arc,\r\n weights_source)\r\n\r\n self.assertAllEqual(roots.eval(), [[406, 478, 550],\r\n [550, 478, 406]]) # pyformat: disable\r\n\r\n def testCombineArcAndRootPotentials(self):\r\n with self.test_session():\r\n arcs = tf.constant([[[1, 2, 3],\r\n [2, 3, 4],\r\n [3, 4, 5]],\r\n [[3, 4, 5],\r\n [2, 3, 4],\r\n [1, 2, 3]]], tf.float32) # pyformat: disable\r\n roots = tf.constant([[6, 7, 8],\r\n [8, 7, 6]], tf.float32) # pyformat: disable\r\n\r\n potentials = digraph_ops.CombineArcAndRootPotentials(arcs, roots)\r\n\r\n self.assertAllEqual(potentials.eval(), [[[6, 2, 3],\r\n [2, 7, 4],\r\n [3, 4, 8]],\r\n [[8, 4, 5],\r\n [2, 7, 4],\r\n [1, 2, 6]]]) # pyformat: disable\r\n\r\n def testLabelPotentialsFromTokens(self):\r\n with self.test_session():\r\n tokens = tf.constant([[[1, 2],\r\n [3, 4],\r\n [5, 6]],\r\n [[6, 5],\r\n [4, 3],\r\n [2, 1]]], tf.float32) # pyformat: disable\r\n\r\n\r\n weights = tf.constant([[ 2, 3],\r\n [ 5, 7],\r\n [11, 13]], tf.float32) # pyformat: disable\r\n\r\n labels = digraph_ops.LabelPotentialsFromTokens(tokens, weights)\r\n\r\n self.assertAllEqual(labels.eval(),\r\n\r\n [[[ 8, 19, 37],\r\n [ 18, 43, 85],\r\n [ 28, 67, 133]],\r\n [[ 27, 65, 131],\r\n [ 17, 41, 83],\r\n [ 7, 17, 35]]]) # pyformat: disable\r\n\r\n def testLabelPotentialsFromTokenPairs(self):\r\n with self.test_session():\r\n sources = tf.constant([[[1, 2],\r\n [3, 4],\r\n [5, 6]],\r\n [[6, 5],\r\n [4, 3],\r\n [2, 1]]], tf.float32) # pyformat: disable\r\n targets = tf.constant([[[3, 4],\r\n [5, 6],\r\n [7, 8]],\r\n [[8, 7],\r\n [6, 5],\r\n [4, 3]]], tf.float32) # pyformat: disable\r\n\r\n\r\n weights = tf.constant([[[ 2, 3],\r\n [ 5, 7]],\r\n [[11, 13],\r\n [17, 19]],\r\n [[23, 29],\r\n [31, 37]]], tf.float32) # pyformat: disable\r\n\r\n labels = digraph_ops.LabelPotentialsFromTokenPairs(sources, targets,\r\n weights)\r\n\r\n self.assertAllEqual(labels.eval(),\r\n\r\n [[[ 104, 339, 667],\r\n [ 352, 1195, 2375],\r\n [ 736, 2531, 5043]],\r\n [[ 667, 2419, 4857],\r\n [ 303, 1115, 2245],\r\n [ 75, 291, 593]]]) # pyformat: disable\r\n\r\n def testValidArcAndTokenMasks(self):\r\n with self.test_session():\r\n\r\n lengths = tf.constant([1, 2, 3], tf.int64)\r\n max_length = 4\r\n valid_arcs, valid_tokens = digraph_ops.ValidArcAndTokenMasks(\r\n lengths, max_length)\r\n self.assertAllEqual(valid_arcs.eval(),\r\n [[[1, 0, 0, 0],\r\n [0, 0, 0, 0],\r\n [0, 0, 0, 0],\r\n [0, 0, 0, 0]],\r\n [[1, 1, 0, 0],\r\n [1, 1, 0, 0],\r\n [0, 0, 0, 0],\r\n [0, 0, 0, 0]],\r\n [[1, 1, 1, 0],\r\n [1, 1, 1, 0],\r\n [1, 1, 1, 0],\r\n [0, 0, 0, 0]]]) # pyformat: disable\r\n self.assertAllEqual(valid_tokens.eval(),\r\n [[1, 0, 0, 0],\r\n [1, 1, 0, 0],\r\n [1, 1, 1, 0]]) # pyformat: disable\r\n\r\n def testLaplacianMatrixTree(self):\r\n with self.test_session():\r\n\r\n pad = 12345.6\r\n arcs = tf.constant([[[ 2, pad, pad, pad],\r\n [pad, pad, pad, pad],\r\n [pad, pad, pad, pad],\r\n [pad, pad, pad, pad]],\r\n [[ 2, 3, pad, pad],\r\n [ 5, 7, pad, pad],\r\n [pad, pad, pad, pad],\r\n [pad, pad, pad, pad]],\r\n [[ 2, 3, 5, pad],\r\n [ 7, 11, 13, pad],\r\n [ 17, 19, 23, pad],\r\n [pad, pad, pad, pad]],\r\n [[ 2, 3, 5, 7],\r\n [ 11, 13, 17, 19],\r\n [ 23, 29, 31, 37],\r\n [ 41, 43, 47, 53]]],\r\n tf.float32) # pyformat: disable\r\n lengths = tf.constant([1, 2, 3, 4], tf.int64)\r\n laplacian = digraph_ops.LaplacianMatrix(lengths, arcs)\r\n self.assertAllEqual(laplacian.eval(),\r\n [[[ 2, 0, 0, 0],\r\n [ 0, 1, 0, 0],\r\n [ 0, 0, 1, 0],\r\n [ 0, 0, 0, 1]],\r\n [[ 2, -3, 0, 0],\r\n [ 7, 5, 0, 0],\r\n [ 0, 0, 1, 0],\r\n [ 0, 0, 0, 1]],\r\n [[ 2, -3, -5, 0],\r\n [ 11, 20, -13, 0],\r\n [ 23, -19, 36, 0],\r\n [ 0, 0, 0, 1]],\r\n [[ 2, -3, -5, -7],\r\n [ 13, 47, -17, -19],\r\n [ 31, -29, 89, -37],\r\n [ 53, -43, -47, 131]]]) # pyformat: disable\r\n\r\n def testLaplacianMatrixForest(self):\r\n with self.test_session():\r\n\r\n pad = 12345.6\r\n arcs = tf.constant([[[ 2, pad, pad, pad],\r\n [pad, pad, pad, pad],\r\n [pad, pad, pad, pad],\r\n [pad, pad, pad, pad]],\r\n [[ 2, 3, pad, pad],\r\n [ 5, 7, pad, pad],\r\n [pad, pad, pad, pad],\r\n [pad, pad, pad, pad]],\r\n [[ 2, 3, 5, pad],\r\n [ 7, 11, 13, pad],\r\n [ 17, 19, 23, pad],\r\n [pad, pad, pad, pad]],\r\n [[ 2, 3, 5, 7],\r\n [ 11, 13, 17, 19],\r\n [ 23, 29, 31, 37],\r\n [ 41, 43, 47, 53]]],\r\n tf.float32) # pyformat: disable\r\n lengths = tf.constant([1, 2, 3, 4], tf.int64)\r\n laplacian = digraph_ops.LaplacianMatrix(lengths, arcs, forest=True)\r\n self.assertAllEqual(laplacian.eval(),\r\n [[[ 2, 0, 0, 0],\r\n [ 0, 1, 0, 0],\r\n [ 0, 0, 1, 0],\r\n [ 0, 0, 0, 1]],\r\n [[ 5, -3, 0, 0],\r\n [ -5, 12, 0, 0],\r\n [ 0, 0, 1, 0],\r\n [ 0, 0, 0, 1]],\r\n [[ 10, -3, -5, 0],\r\n [ -7, 31, -13, 0],\r\n [-17, -19, 59, 0],\r\n [ 0, 0, 0, 1]],\r\n [[ 17, -3, -5, -7],\r\n [-11, 60, -17, -19],\r\n [-23, -29, 120, -37],\r\n [-41, -43, -47, 184]]]) # pyformat: disable\r\n\r\n\r\nif __name__ == \"__main__\":\r\n tf.test.main()\r\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Features used by AlphaGo Zero, in approximate order of importance.\r\n\r\nFeature # Notes\r\nStone History 16 The stones of each color during the last 8 moves.\r\nOnes 1 Constant plane of 1s\r\nAll features with 8 planes are 1-hot encoded, with plane i marked with 1\r\nonly if the feature was equal to i. Any features >= 8 would be marked as 8.\r\n\r\nThis file includes the features from from AlphaGo Zero (AGZ) as NEW_FEATURES.\r\n\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport go\r\nimport numpy as np\r\n\r\n\r\ndef planes(num_planes):\r\n # to specify the number of planes in the features. For example, for a 19x19\r\n # go board, the input stone feature will be in the shape of [19, 19, 16],\r\n # where the third dimension is the num_planes.\r\n def deco(f):\r\n f.planes = num_planes\r\n return f\r\n return deco\r\n\r\n\r\n@planes(16)\r\ndef stone_features(board_size, position):\r\n \"\"\"Create the 16 planes of features for a given position.\r\n\r\n Args:\r\n board_size: the go board size.\r\n position: a given go board status.\r\n\r\n Returns:\r\n The 16 plane features.\r\n \"\"\"\r\n # a bit easier to calculate it with axis 0 being the 16 board states,\r\n # and then roll axis 0 to the end.\r\n features = np.zeros([16, board_size, board_size], dtype=np.uint8)\r\n\r\n num_deltas_avail = position.board_deltas.shape[0]\r\n cumulative_deltas = np.cumsum(position.board_deltas, axis=0)\r\n last_eight = np.tile(position.board, [8, 1, 1])\r\n # apply deltas to compute previous board states\r\n last_eight[1:num_deltas_avail + 1] -= cumulative_deltas\r\n # if no more deltas are available, just repeat oldest board.\r\n last_eight[num_deltas_avail + 1:] = last_eight[num_deltas_avail].reshape(\r\n 1, board_size, board_size)\r\n\r\n features[::2] = last_eight == position.to_play\r\n features[1::2] = last_eight == -position.to_play\r\n return np.rollaxis(features, 0, 3)\r\n\r\n\r\n@planes(1)\r\ndef color_to_play_feature(board_size, position):\r\n if position.to_play == go.BLACK:\r\n return np.ones([board_size, board_size, 1], dtype=np.uint8)\r\n else:\r\n return np.zeros([board_size, board_size, 1], dtype=np.uint8)\r\n\r\nNEW_FEATURES = [\r\n stone_features,\r\n color_to_play_feature\r\n]\r\n\r\nNEW_FEATURES_PLANES = sum(f.planes for f in NEW_FEATURES)\r\n\r\n\r\ndef extract_features(board_size, position, features=None):\r\n if features is None:\r\n features = NEW_FEATURES\r\n return np.concatenate([feature(board_size, position) for feature in features],\r\n axis=2)\r\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"Tests for object_detection.trainer.\"\"\"\r\n\r\nimport tensorflow as tf\r\n\r\nfrom google.protobuf import text_format\r\n\r\nfrom object_detection.core import losses\r\nfrom object_detection.core import model\r\nfrom object_detection.core import standard_fields as fields\r\nfrom object_detection.legacy import trainer\r\nfrom object_detection.protos import train_pb2\r\n\r\n\r\nNUMBER_OF_CLASSES = 2\r\n\r\n\r\ndef get_input_function():\r\n \"\"\"A function to get test inputs. Returns an image with one box.\"\"\"\r\n image = tf.random_uniform([32, 32, 3], dtype=tf.float32)\r\n key = tf.constant('image_000000')\r\n class_label = tf.random_uniform(\r\n [1], minval=0, maxval=NUMBER_OF_CLASSES, dtype=tf.int32)\r\n box_label = tf.random_uniform(\r\n [1, 4], minval=0.4, maxval=0.6, dtype=tf.float32)\r\n multiclass_scores = tf.random_uniform(\r\n [1, NUMBER_OF_CLASSES], minval=0.4, maxval=0.6, dtype=tf.float32)\r\n\r\n return {\r\n fields.InputDataFields.image: image,\r\n fields.InputDataFields.key: key,\r\n fields.InputDataFields.groundtruth_classes: class_label,\r\n fields.InputDataFields.groundtruth_boxes: box_label,\r\n fields.InputDataFields.multiclass_scores: multiclass_scores\r\n }\r\n\r\n\r\nclass FakeDetectionModel(model.DetectionModel):\r\n \"\"\"A simple (and poor) DetectionModel for use in test.\"\"\"\r\n\r\n def __init__(self):\r\n super(FakeDetectionModel, self).__init__(num_classes=NUMBER_OF_CLASSES)\r\n self._classification_loss = losses.WeightedSigmoidClassificationLoss()\r\n self._localization_loss = losses.WeightedSmoothL1LocalizationLoss()\r\n\r\n def preprocess(self, inputs):\r\n \"\"\"Input preprocessing, resizes images to 28x28.\r\n\r\n Args:\r\n inputs: a [batch, height_in, width_in, channels] float32 tensor\r\n representing a batch of images with values between 0 and 255.0.\r\n\r\n Returns:\r\n preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor.\r\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is\r\n of the form [height, width, channels] indicating the shapes\r\n of true images in the resized images, as resized images can be padded\r\n with zeros.\r\n \"\"\"\r\n true_image_shapes = [inputs.shape[:-1].as_list()\r\n for _ in range(inputs.shape[-1])]\r\n return tf.image.resize_images(inputs, [28, 28]), true_image_shapes\r\n\r\n def predict(self, preprocessed_inputs, true_image_shapes):\r\n \"\"\"Prediction tensors from inputs tensor.\r\n\r\n Args:\r\n preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor.\r\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is\r\n of the form [height, width, channels] indicating the shapes\r\n of true images in the resized images, as resized images can be padded\r\n with zeros.\r\n\r\n Returns:\r\n prediction_dict: a dictionary holding prediction tensors to be\r\n passed to the Loss or Postprocess functions.\r\n \"\"\"\r\n flattened_inputs = tf.contrib.layers.flatten(preprocessed_inputs)\r\n class_prediction = tf.contrib.layers.fully_connected(\r\n flattened_inputs, self._num_classes)\r\n box_prediction = tf.contrib.layers.fully_connected(flattened_inputs, 4)\r\n\r\n return {\r\n 'class_predictions_with_background': tf.reshape(\r\n class_prediction, [-1, 1, self._num_classes]),\r\n 'box_encodings': tf.reshape(box_prediction, [-1, 1, 4])\r\n }\r\n\r\n def postprocess(self, prediction_dict, true_image_shapes, **params):\r\n \"\"\"Convert predicted output tensors to final detections. Unused.\r\n\r\n Args:\r\n prediction_dict: a dictionary holding prediction tensors.\r\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is\r\n of the form [height, width, channels] indicating the shapes\r\n of true images in the resized images, as resized images can be padded\r\n with zeros.\r\n **params: Additional keyword arguments for specific implementations of\r\n DetectionModel.\r\n\r\n Returns:\r\n detections: a dictionary with empty fields.\r\n \"\"\"\r\n return {\r\n 'detection_boxes': None,\r\n 'detection_scores': None,\r\n 'detection_classes': None,\r\n 'num_detections': None\r\n }\r\n\r\n def loss(self, prediction_dict, true_image_shapes):\r\n \"\"\"Compute scalar loss tensors with respect to provided groundtruth.\r\n\r\n Calling this function requires that groundtruth tensors have been\r\n provided via the provide_groundtruth function.\r\n\r\n Args:\r\n prediction_dict: a dictionary holding predicted tensors\r\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is\r\n of the form [height, width, channels] indicating the shapes\r\n of true images in the resized images, as resized images can be padded\r\n with zeros.\r\n\r\n Returns:\r\n a dictionary mapping strings (loss names) to scalar tensors representing\r\n loss values.\r\n \"\"\"\r\n batch_reg_targets = tf.stack(\r\n self.groundtruth_lists(fields.BoxListFields.boxes))\r\n batch_cls_targets = tf.stack(\r\n self.groundtruth_lists(fields.BoxListFields.classes))\r\n weights = tf.constant(\r\n 1.0, dtype=tf.float32,\r\n shape=[len(self.groundtruth_lists(fields.BoxListFields.boxes)), 1])\r\n\r\n location_losses = self._localization_loss(\r\n prediction_dict['box_encodings'], batch_reg_targets,\r\n weights=weights)\r\n cls_losses = self._classification_loss(\r\n prediction_dict['class_predictions_with_background'], batch_cls_targets,\r\n weights=weights)\r\n\r\n loss_dict = {\r\n 'localization_loss': tf.reduce_sum(location_losses),\r\n 'classification_loss': tf.reduce_sum(cls_losses),\r\n }\r\n return loss_dict\r\n\r\n def regularization_losses(self):\r\n \"\"\"Returns a list of regularization losses for this model.\r\n\r\n Returns a list of regularization losses for this model that the estimator\r\n needs to use during training/optimization.\r\n\r\n Returns:\r\n A list of regularization loss tensors.\r\n \"\"\"\r\n pass\r\n\r\n def restore_map(self, fine_tune_checkpoint_type='detection'):\r\n \"\"\"Returns a map of variables to load from a foreign checkpoint.\r\n\r\n Args:\r\n fine_tune_checkpoint_type: whether to restore from a full detection\r\n checkpoint (with compatible variable names) or to restore from a\r\n classification checkpoint for initialization prior to training.\r\n Valid values: `detection`, `classification`. Default 'detection'.\r\n\r\n Returns:\r\n A dict mapping variable names to variables.\r\n \"\"\"\r\n return {var.op.name: var for var in tf.global_variables()}\r\n\r\n def updates(self):\r\n \"\"\"Returns a list of update operators for this model.\r\n\r\n Returns a list of update operators for this model that must be executed at\r\n each training step. The estimator's train op needs to have a control\r\n dependency on these updates.\r\n\r\n Returns:\r\n A list of update operators.\r\n \"\"\"\r\n pass\r\n\r\n\r\nclass TrainerTest(tf.test.TestCase):\r\n\r\n def test_configure_trainer_and_train_two_steps(self):\r\n train_config_text_proto = \"\"\"\r\n optimizer {\r\n adam_optimizer {\r\n learning_rate {\r\n constant_learning_rate {\r\n learning_rate: 0.01\r\n }\r\n }\r\n }\r\n }\r\n data_augmentation_options {\r\n random_adjust_brightness {\r\n max_delta: 0.2\r\n }\r\n }\r\n data_augmentation_options {\r\n random_adjust_contrast {\r\n min_delta: 0.7\r\n max_delta: 1.1\r\n }\r\n }\r\n num_steps: 2\r\n \"\"\"\r\n train_config = train_pb2.TrainConfig()\r\n text_format.Merge(train_config_text_proto, train_config)\r\n\r\n train_dir = self.get_temp_dir()\r\n\r\n trainer.train(\r\n create_tensor_dict_fn=get_input_function,\r\n create_model_fn=FakeDetectionModel,\r\n train_config=train_config,\r\n master='',\r\n task=0,\r\n num_clones=1,\r\n worker_replicas=1,\r\n clone_on_cpu=True,\r\n ps_tasks=0,\r\n worker_job_name='worker',\r\n is_chief=True,\r\n train_dir=train_dir)\r\n\r\n def test_configure_trainer_with_multiclass_scores_and_train_two_steps(self):\r\n train_config_text_proto = \"\"\"\r\n optimizer {\r\n adam_optimizer {\r\n learning_rate {\r\n constant_learning_rate {\r\n learning_rate: 0.01\r\n }\r\n }\r\n }\r\n }\r\n data_augmentation_options {\r\n random_adjust_brightness {\r\n max_delta: 0.2\r\n }\r\n }\r\n data_augmentation_options {\r\n random_adjust_contrast {\r\n min_delta: 0.7\r\n max_delta: 1.1\r\n }\r\n }\r\n num_steps: 2\r\n use_multiclass_scores: true\r\n \"\"\"\r\n train_config = train_pb2.TrainConfig()\r\n text_format.Merge(train_config_text_proto, train_config)\r\n\r\n train_dir = self.get_temp_dir()\r\n\r\n trainer.train(create_tensor_dict_fn=get_input_function,\r\n create_model_fn=FakeDetectionModel,\r\n train_config=train_config,\r\n master='',\r\n task=0,\r\n num_clones=1,\r\n worker_replicas=1,\r\n clone_on_cpu=True,\r\n ps_tasks=0,\r\n worker_job_name='worker',\r\n is_chief=True,\r\n train_dir=train_dir)\r\n\r\n\r\nif __name__ == '__main__':\r\n tf.test.main()\r\n",
"# Copyright 2017 The TensorFlow Authors All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"IMDB data loader and helpers.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport os\r\n# Dependency imports\r\nimport numpy as np\r\n\r\nimport tensorflow as tf\r\n\r\nFLAGS = tf.app.flags.FLAGS\r\ntf.app.flags.DEFINE_boolean('prefix_label', True, 'Vocabulary file.')\r\n\r\nnp.set_printoptions(precision=3)\r\nnp.set_printoptions(suppress=True)\r\n\r\nEOS_INDEX = 88892\r\n\r\n\r\ndef _read_words(filename, use_prefix=True):\r\n all_words = []\r\n sequence_example = tf.train.SequenceExample()\r\n for r in tf.python_io.tf_record_iterator(filename):\r\n sequence_example.ParseFromString(r)\r\n\r\n if FLAGS.prefix_label and use_prefix:\r\n label = sequence_example.context.feature['class'].int64_list.value[0]\r\n review_words = [EOS_INDEX + 1 + label]\r\n else:\r\n review_words = []\r\n review_words.extend([\r\n f.int64_list.value[0]\r\n for f in sequence_example.feature_lists.feature_list['token_id'].feature\r\n ])\r\n all_words.append(review_words)\r\n return all_words\r\n\r\n\r\ndef build_vocab(vocab_file):\r\n word_to_id = {}\r\n\r\n with tf.gfile.GFile(vocab_file, 'r') as f:\r\n index = 0\r\n for word in f:\r\n word_to_id[word.strip()] = index\r\n index += 1\r\n word_to_id['<eos>'] = EOS_INDEX\r\n\r\n return word_to_id\r\n\r\n\r\ndef imdb_raw_data(data_path=None):\r\n \"\"\"Load IMDB raw data from data directory \"data_path\".\r\n Reads IMDB tf record files containing integer ids,\r\n and performs mini-batching of the inputs.\r\n Args:\r\n data_path: string path to the directory where simple-examples.tgz has\r\n been extracted.\r\n Returns:\r\n tuple (train_data, valid_data)\r\n where each of the data objects can be passed to IMDBIterator.\r\n \"\"\"\r\n\r\n train_path = os.path.join(data_path, 'train_lm.tfrecords')\r\n valid_path = os.path.join(data_path, 'test_lm.tfrecords')\r\n\r\n train_data = _read_words(train_path)\r\n valid_data = _read_words(valid_path)\r\n return train_data, valid_data\r\n\r\n\r\ndef imdb_iterator(raw_data, batch_size, num_steps, epoch_size_override=None):\r\n \"\"\"Iterate on the raw IMDB data.\r\n\r\n This generates batch_size pointers into the raw IMDB data, and allows\r\n minibatch iteration along these pointers.\r\n\r\n Args:\r\n raw_data: one of the raw data outputs from imdb_raw_data.\r\n batch_size: int, the batch size.\r\n num_steps: int, the number of unrolls.\r\n\r\n Yields:\r\n Pairs of the batched data, each a matrix of shape [batch_size, num_steps].\r\n The second element of the tuple is the same data time-shifted to the\r\n right by one. The third is a set of weights with 1 indicating a word was\r\n present and 0 not.\r\n\r\n Raises:\r\n ValueError: if batch_size or num_steps are too high.\r\n \"\"\"\r\n del epoch_size_override\r\n data_len = len(raw_data)\r\n num_batches = data_len // batch_size - 1\r\n\r\n for batch in range(num_batches):\r\n x = np.zeros([batch_size, num_steps], dtype=np.int32)\r\n y = np.zeros([batch_size, num_steps], dtype=np.int32)\r\n w = np.zeros([batch_size, num_steps], dtype=np.float)\r\n\r\n for i in range(batch_size):\r\n data_index = batch * batch_size + i\r\n example = raw_data[data_index]\r\n\r\n if len(example) > num_steps:\r\n final_x = example[:num_steps]\r\n final_y = example[1:(num_steps + 1)]\r\n w[i] = 1\r\n\r\n else:\r\n to_fill_in = num_steps - len(example)\r\n final_x = example + [EOS_INDEX] * to_fill_in\r\n final_y = final_x[1:] + [EOS_INDEX]\r\n w[i] = [1] * len(example) + [0] * to_fill_in\r\n\r\n x[i] = final_x\r\n y[i] = final_y\r\n\r\n yield (x, y, w)\r\n",
"# Copyright 2017 Google Inc. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\nr\"\"\"Runs a DRAGNN model on a given set of CoNLL-formatted sentences.\r\n\r\nSample invocation:\r\n bazel run -c opt <...>:evaluator -- \\\r\n --master_spec=\"/path/to/master-spec\" \\\r\n --checkpoint_file=\"/path/to/model/name.checkpoint\" \\\r\n --input_file=\"/path/to/input/documents/test.connlu\"\r\n\"\"\"\r\n\r\nimport os\r\nimport re\r\nimport time\r\n\r\nfrom absl import flags\r\nimport tensorflow as tf\r\n\r\nfrom google.protobuf import text_format\r\nfrom tensorflow.python.client import timeline\r\nfrom tensorflow.python.platform import gfile\r\n\r\nfrom dragnn.protos import spec_pb2\r\nfrom dragnn.python import evaluation\r\nfrom dragnn.python import graph_builder\r\nfrom dragnn.python import sentence_io\r\nfrom dragnn.python import spec_builder\r\nfrom syntaxnet import sentence_pb2\r\n\r\nFLAGS = flags.FLAGS\r\n\r\nflags.DEFINE_string('master_spec', '',\r\n 'Path to text file containing a DRAGNN master spec to run.')\r\nflags.DEFINE_string('resource_dir', '',\r\n 'Optional base directory for resources in the master spec.')\r\nflags.DEFINE_bool('complete_master_spec', False, 'Whether the master_spec '\r\n 'needs the lexicon and other resources added to it.')\r\nflags.DEFINE_string('checkpoint_file', '', 'Path to trained model checkpoint.')\r\nflags.DEFINE_string('input_file', '',\r\n 'File of CoNLL-formatted sentences to read from.')\r\nflags.DEFINE_string('output_file', '',\r\n 'File path to write annotated sentences to.')\r\nflags.DEFINE_integer('max_batch_size', 2048, 'Maximum batch size to support.')\r\nflags.DEFINE_string('inference_beam_size', '', 'Comma separated list of '\r\n 'component_name=beam_size pairs.')\r\nflags.DEFINE_string('locally_normalize', '', 'Comma separated list of '\r\n 'component names to do local normalization on.')\r\nflags.DEFINE_integer('threads', 10, 'Number of threads used for intra- and '\r\n 'inter-op parallelism.')\r\nflags.DEFINE_string('timeline_output_file', '', 'Path to save timeline to. '\r\n 'If specified, the final iteration of the evaluation loop '\r\n 'will capture and save a TensorFlow timeline.')\r\nflags.DEFINE_string('log_file', '', 'File path to write parser eval results.')\r\nflags.DEFINE_string('language_name', '_', 'Name of language being parsed, '\r\n 'for logging.')\r\n\r\n\r\ndef main(unused_argv):\r\n tf.logging.set_verbosity(tf.logging.INFO)\r\n\r\n # Parse the flags containint lists, using regular expressions.\r\n # This matches and extracts key=value pairs.\r\n component_beam_sizes = re.findall(r'([^=,]+)=(\\d+)',\r\n FLAGS.inference_beam_size)\r\n # This matches strings separated by a comma. Does not return any empty\r\n # strings.\r\n components_to_locally_normalize = re.findall(r'[^,]+',\r\n FLAGS.locally_normalize)\r\n\r\n # Reads master spec.\r\n master_spec = spec_pb2.MasterSpec()\r\n with gfile.FastGFile(FLAGS.master_spec) as fin:\r\n text_format.Parse(fin.read(), master_spec)\r\n\r\n # Rewrite resource locations.\r\n if FLAGS.resource_dir:\r\n for component in master_spec.component:\r\n for resource in component.resource:\r\n for part in resource.part:\r\n part.file_pattern = os.path.join(FLAGS.resource_dir,\r\n part.file_pattern)\r\n\r\n if FLAGS.complete_master_spec:\r\n spec_builder.complete_master_spec(master_spec, None, FLAGS.resource_dir)\r\n\r\n # Graph building.\r\n tf.logging.info('Building the graph')\r\n g = tf.Graph()\r\n with g.as_default(), tf.device('/device:CPU:0'):\r\n hyperparam_config = spec_pb2.GridPoint()\r\n hyperparam_config.use_moving_average = True\r\n builder = graph_builder.MasterBuilder(master_spec, hyperparam_config)\r\n annotator = builder.add_annotation()\r\n builder.add_saver()\r\n\r\n tf.logging.info('Reading documents...')\r\n input_corpus = sentence_io.ConllSentenceReader(FLAGS.input_file).corpus()\r\n\r\n session_config = tf.ConfigProto(\r\n log_device_placement=False,\r\n intra_op_parallelism_threads=FLAGS.threads,\r\n inter_op_parallelism_threads=FLAGS.threads)\r\n\r\n with tf.Session(graph=g, config=session_config) as sess:\r\n tf.logging.info('Initializing variables...')\r\n sess.run(tf.global_variables_initializer())\r\n\r\n tf.logging.info('Loading from checkpoint...')\r\n sess.run('save/restore_all', {'save/Const:0': FLAGS.checkpoint_file})\r\n\r\n tf.logging.info('Processing sentences...')\r\n\r\n processed = []\r\n start_time = time.time()\r\n run_metadata = tf.RunMetadata()\r\n for start in range(0, len(input_corpus), FLAGS.max_batch_size):\r\n end = min(start + FLAGS.max_batch_size, len(input_corpus))\r\n feed_dict = {annotator['input_batch']: input_corpus[start:end]}\r\n for comp, beam_size in component_beam_sizes:\r\n feed_dict['%s/InferenceBeamSize:0' % comp] = beam_size\r\n for comp in components_to_locally_normalize:\r\n feed_dict['%s/LocallyNormalize:0' % comp] = True\r\n if FLAGS.timeline_output_file and end == len(input_corpus):\r\n serialized_annotations = sess.run(\r\n annotator['annotations'], feed_dict=feed_dict,\r\n options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),\r\n run_metadata=run_metadata)\r\n trace = timeline.Timeline(step_stats=run_metadata.step_stats)\r\n with open(FLAGS.timeline_output_file, 'w') as trace_file:\r\n trace_file.write(trace.generate_chrome_trace_format())\r\n else:\r\n serialized_annotations = sess.run(\r\n annotator['annotations'], feed_dict=feed_dict)\r\n processed.extend(serialized_annotations)\r\n\r\n tf.logging.info('Processed %d documents in %.2f seconds.',\r\n len(input_corpus), time.time() - start_time)\r\n pos, uas, las = evaluation.calculate_parse_metrics(input_corpus, processed)\r\n if FLAGS.log_file:\r\n with gfile.GFile(FLAGS.log_file, 'w') as f:\r\n f.write('%s\\t%f\\t%f\\t%f\\n' % (FLAGS.language_name, pos, uas, las))\r\n\r\n if FLAGS.output_file:\r\n with gfile.GFile(FLAGS.output_file, 'w') as f:\r\n for serialized_sentence in processed:\r\n sentence = sentence_pb2.Sentence()\r\n sentence.ParseFromString(serialized_sentence)\r\n f.write(text_format.MessageToString(sentence) + '\\n\\n')\r\n\r\n\r\nif __name__ == '__main__':\r\n tf.app.run()\r\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"Bounding Box List definition.\r\n\r\nBoxList represents a list of bounding boxes as tensorflow\r\ntensors, where each bounding box is represented as a row of 4 numbers,\r\n[y_min, x_min, y_max, x_max]. It is assumed that all bounding boxes\r\nwithin a given list correspond to a single image. See also\r\nbox_list_ops.py for common box related operations (such as area, iou, etc).\r\n\r\nOptionally, users can add additional related fields (such as weights).\r\nWe assume the following things to be true about fields:\r\n* they correspond to boxes in the box_list along the 0th dimension\r\n* they have inferrable rank at graph construction time\r\n* all dimensions except for possibly the 0th can be inferred\r\n (i.e., not None) at graph construction time.\r\n\r\nSome other notes:\r\n * Following tensorflow conventions, we use height, width ordering,\r\n and correspondingly, y,x (or ymin, xmin, ymax, xmax) ordering\r\n * Tensors are always provided as (flat) [N, 4] tensors.\r\n\"\"\"\r\n\r\nimport tensorflow as tf\r\n\r\nfrom object_detection.utils import shape_utils\r\n\r\n\r\nclass BoxList(object):\r\n \"\"\"Box collection.\"\"\"\r\n\r\n def __init__(self, boxes):\r\n \"\"\"Constructs box collection.\r\n\r\n Args:\r\n boxes: a tensor of shape [N, 4] representing box corners\r\n\r\n Raises:\r\n ValueError: if invalid dimensions for bbox data or if bbox data is not in\r\n float32 format.\r\n \"\"\"\r\n if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4:\r\n raise ValueError('Invalid dimensions for box data.')\r\n if boxes.dtype != tf.float32:\r\n raise ValueError('Invalid tensor type: should be tf.float32')\r\n self.data = {'boxes': boxes}\r\n\r\n def num_boxes(self):\r\n \"\"\"Returns number of boxes held in collection.\r\n\r\n Returns:\r\n a tensor representing the number of boxes held in the collection.\r\n \"\"\"\r\n return tf.shape(self.data['boxes'])[0]\r\n\r\n def num_boxes_static(self):\r\n \"\"\"Returns number of boxes held in collection.\r\n\r\n This number is inferred at graph construction time rather than run-time.\r\n\r\n Returns:\r\n Number of boxes held in collection (integer) or None if this is not\r\n inferrable at graph construction time.\r\n \"\"\"\r\n return shape_utils.get_dim_as_int(self.data['boxes'].get_shape()[0])\r\n\r\n def get_all_fields(self):\r\n \"\"\"Returns all fields.\"\"\"\r\n return self.data.keys()\r\n\r\n def get_extra_fields(self):\r\n \"\"\"Returns all non-box fields (i.e., everything not named 'boxes').\"\"\"\r\n return [k for k in self.data.keys() if k != 'boxes']\r\n\r\n def add_field(self, field, field_data):\r\n \"\"\"Add field to box list.\r\n\r\n This method can be used to add related box data such as\r\n weights/labels, etc.\r\n\r\n Args:\r\n field: a string key to access the data via `get`\r\n field_data: a tensor containing the data to store in the BoxList\r\n \"\"\"\r\n self.data[field] = field_data\r\n\r\n def has_field(self, field):\r\n return field in self.data\r\n\r\n def get(self):\r\n \"\"\"Convenience function for accessing box coordinates.\r\n\r\n Returns:\r\n a tensor with shape [N, 4] representing box coordinates.\r\n \"\"\"\r\n return self.get_field('boxes')\r\n\r\n def set(self, boxes):\r\n \"\"\"Convenience function for setting box coordinates.\r\n\r\n Args:\r\n boxes: a tensor of shape [N, 4] representing box corners\r\n\r\n Raises:\r\n ValueError: if invalid dimensions for bbox data\r\n \"\"\"\r\n if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4:\r\n raise ValueError('Invalid dimensions for box data.')\r\n self.data['boxes'] = boxes\r\n\r\n def get_field(self, field):\r\n \"\"\"Accesses a box collection and associated fields.\r\n\r\n This function returns specified field with object; if no field is specified,\r\n it returns the box coordinates.\r\n\r\n Args:\r\n field: this optional string parameter can be used to specify\r\n a related field to be accessed.\r\n\r\n Returns:\r\n a tensor representing the box collection or an associated field.\r\n\r\n Raises:\r\n ValueError: if invalid field\r\n \"\"\"\r\n if not self.has_field(field):\r\n raise ValueError('field ' + str(field) + ' does not exist')\r\n return self.data[field]\r\n\r\n def set_field(self, field, value):\r\n \"\"\"Sets the value of a field.\r\n\r\n Updates the field of a box_list with a given value.\r\n\r\n Args:\r\n field: (string) name of the field to set value.\r\n value: the value to assign to the field.\r\n\r\n Raises:\r\n ValueError: if the box_list does not have specified field.\r\n \"\"\"\r\n if not self.has_field(field):\r\n raise ValueError('field %s does not exist' % field)\r\n self.data[field] = value\r\n\r\n def get_center_coordinates_and_sizes(self, scope=None):\r\n \"\"\"Computes the center coordinates, height and width of the boxes.\r\n\r\n Args:\r\n scope: name scope of the function.\r\n\r\n Returns:\r\n a list of 4 1-D tensors [ycenter, xcenter, height, width].\r\n \"\"\"\r\n with tf.name_scope(scope, 'get_center_coordinates_and_sizes'):\r\n box_corners = self.get()\r\n ymin, xmin, ymax, xmax = tf.unstack(tf.transpose(box_corners))\r\n width = xmax - xmin\r\n height = ymax - ymin\r\n ycenter = ymin + height / 2.\r\n xcenter = xmin + width / 2.\r\n return [ycenter, xcenter, height, width]\r\n\r\n def transpose_coordinates(self, scope=None):\r\n \"\"\"Transpose the coordinate representation in a boxlist.\r\n\r\n Args:\r\n scope: name scope of the function.\r\n \"\"\"\r\n with tf.name_scope(scope, 'transpose_coordinates'):\r\n y_min, x_min, y_max, x_max = tf.split(\r\n value=self.get(), num_or_size_splits=4, axis=1)\r\n self.set(tf.concat([x_min, y_min, x_max, y_max], 1))\r\n\r\n def as_tensor_dict(self, fields=None):\r\n \"\"\"Retrieves specified fields as a dictionary of tensors.\r\n\r\n Args:\r\n fields: (optional) list of fields to return in the dictionary.\r\n If None (default), all fields are returned.\r\n\r\n Returns:\r\n tensor_dict: A dictionary of tensors specified by fields.\r\n\r\n Raises:\r\n ValueError: if specified field is not contained in boxlist.\r\n \"\"\"\r\n tensor_dict = {}\r\n if fields is None:\r\n fields = self.get_all_fields()\r\n for field in fields:\r\n if not self.has_field(field):\r\n raise ValueError('boxlist must contain all specified fields')\r\n tensor_dict[field] = self.get_field(field)\r\n return tensor_dict\r\n",
"# Copyright 2018 The TensorFlow Authors All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"Tests for run_training.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport os\r\nimport time\r\n\r\nfrom absl import flags\r\nfrom absl.testing import absltest\r\nfrom absl.testing import flagsaver\r\nfrom absl.testing import parameterized\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom google.protobuf import text_format\r\n\r\nimport run_training\r\nfrom protos import seq2label_pb2\r\nimport test_utils\r\n\r\nFLAGS = flags.FLAGS\r\n\r\n\r\nclass RunTrainingTest(parameterized.TestCase):\r\n\r\n @parameterized.parameters(2, 4, 7)\r\n def test_wait_until(self, wait_sec):\r\n end_time = time.time() + wait_sec\r\n run_training.wait_until(end_time)\r\n self.assertEqual(round(time.time() - end_time), 0)\r\n\r\n @parameterized.parameters(\r\n ({}, {'a': 0.7, 'b': 12.3}, 12.3, None,\r\n {'a': 0.7, 'b': 12.3, 'is_infeasible': False}),\r\n ({'a': 0.42}, {'b': 24.5}, 24.5, 32.0,\r\n {'a': 0.42, 'b': 24.5, 'is_infeasible': False}),\r\n ({'a': 0.503}, {'a': 0.82, 'b': 7.2}, 7.2, 0.1,\r\n {'a': 0.82, 'b': 7.2, 'is_infeasible': True}),\r\n ({}, {'a': 0.7, 'b': 12.3}, float('Inf'), None,\r\n {'a': 0.7, 'b': 12.3, 'is_infeasible': True})\r\n )\r\n def test_update_measures(self, measures, new_measures, loss, max_loss,\r\n expected):\r\n run_training.update_measures(measures, new_measures, loss, max_loss)\r\n self.assertEqual(measures, expected)\r\n\r\n def test_write_measures(self):\r\n init_time = time.time()\r\n measures = {\r\n 'global_step': 311448,\r\n 'train_loss': np.float32(18.36),\r\n 'train_weighted_accuracy': np.float32(0.3295),\r\n 'train_accuracy': 0.8243,\r\n 'is_infeasible': False\r\n }\r\n tmp_path = os.path.join(FLAGS.test_tmpdir, 'measures.pbtxt')\r\n run_training.write_measures(measures, tmp_path, init_time)\r\n experiment_measures = seq2label_pb2.Seq2LabelExperimentMeasures()\r\n with tf.gfile.Open(tmp_path) as f:\r\n text_format.Parse(f.read(), experiment_measures)\r\n self.assertEqual(experiment_measures.checkpoint_path, tmp_path)\r\n self.assertFalse(experiment_measures.experiment_infeasible)\r\n self.assertEqual(experiment_measures.steps, measures['global_step'])\r\n self.assertGreater(experiment_measures.wall_time, 0)\r\n self.assertEqual(len(experiment_measures.measures), 3)\r\n for measure in experiment_measures.measures:\r\n self.assertAlmostEqual(measure.value, measures[measure.name])\r\n\r\n @parameterized.parameters((test_utils.TEST_TARGETS[:1],),\r\n (test_utils.TEST_TARGETS,))\r\n def test_run_training(self, targets):\r\n \"\"\"Tests whether the training loop can be run successfully.\r\n\r\n Generates test input files and runs the main driving code.\r\n\r\n Args:\r\n targets: the targets to train on.\r\n \"\"\"\r\n # Create test input and metadata files.\r\n num_examples, read_len = 20, 5\r\n train_file = test_utils.create_tmp_train_file(num_examples, read_len)\r\n metadata_path = test_utils.create_tmp_metadata(num_examples, read_len)\r\n\r\n # Check that the training loop runs as expected.\r\n logdir = os.path.join(FLAGS.test_tmpdir, 'train:{}'.format(len(targets)))\r\n with flagsaver.flagsaver(\r\n train_files=train_file,\r\n metadata_path=metadata_path,\r\n targets=targets,\r\n logdir=logdir,\r\n hparams='train_steps=10,min_read_length=5',\r\n batch_size=10):\r\n run_training.main(FLAGS)\r\n # Check training loop ran by confirming existence of a checkpoint file.\r\n self.assertIsNotNone(tf.train.latest_checkpoint(FLAGS.logdir))\r\n # Check training loop ran by confiming existence of a measures file.\r\n self.assertTrue(\r\n os.path.exists(os.path.join(FLAGS.logdir, 'measures.pbtxt')))\r\n\r\n\r\nif __name__ == '__main__':\r\n absltest.main()\r\n",
"# Copyright 2018 The TensorFlow Authors All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for utils.py.\"\"\"\r\n\r\nimport tensorflow as tf\r\n\r\nfrom deeplab.core import utils\r\n\r\n\r\nclass UtilsTest(tf.test.TestCase):\r\n\r\n def testScaleDimensionOutput(self):\r\n self.assertEqual(161, utils.scale_dimension(321, 0.5))\r\n self.assertEqual(193, utils.scale_dimension(321, 0.6))\r\n self.assertEqual(241, utils.scale_dimension(321, 0.75))\r\n\r\n\r\nif __name__ == '__main__':\r\n tf.test.main()\r\n",
"# Copyright 2016 The TensorFlow Authors All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"Code for training the prediction model.\"\"\"\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\nfrom tensorflow.python.platform import app\r\nfrom tensorflow.python.platform import flags\r\n\r\nfrom prediction_input import build_tfrecord_input\r\nfrom prediction_model import construct_model\r\n\r\n# How often to record tensorboard summaries.\r\nSUMMARY_INTERVAL = 40\r\n\r\n# How often to run a batch through the validation model.\r\nVAL_INTERVAL = 200\r\n\r\n# How often to save a model checkpoint\r\nSAVE_INTERVAL = 2000\r\n\r\n# tf record data location:\r\nDATA_DIR = 'push/push_train'\r\n\r\n# local output directory\r\nOUT_DIR = '/tmp/data'\r\n\r\nFLAGS = flags.FLAGS\r\n\r\nflags.DEFINE_string('data_dir', DATA_DIR, 'directory containing data.')\r\nflags.DEFINE_string('output_dir', OUT_DIR, 'directory for model checkpoints.')\r\nflags.DEFINE_string('event_log_dir', OUT_DIR, 'directory for writing summary.')\r\nflags.DEFINE_integer('num_iterations', 100000, 'number of training iterations.')\r\nflags.DEFINE_string('pretrained_model', '',\r\n 'filepath of a pretrained model to initialize from.')\r\n\r\nflags.DEFINE_integer('sequence_length', 10,\r\n 'sequence length, including context frames.')\r\nflags.DEFINE_integer('context_frames', 2, '# of frames before predictions.')\r\nflags.DEFINE_integer('use_state', 1,\r\n 'Whether or not to give the state+action to the model')\r\n\r\nflags.DEFINE_string('model', 'CDNA',\r\n 'model architecture to use - CDNA, DNA, or STP')\r\n\r\nflags.DEFINE_integer('num_masks', 10,\r\n 'number of masks, usually 1 for DNA, 10 for CDNA, STN.')\r\nflags.DEFINE_float('schedsamp_k', 900.0,\r\n 'The k hyperparameter for scheduled sampling,'\r\n '-1 for no scheduled sampling.')\r\nflags.DEFINE_float('train_val_split', 0.95,\r\n 'The percentage of files to use for the training set,'\r\n ' vs. the validation set.')\r\n\r\nflags.DEFINE_integer('batch_size', 32, 'batch size for training')\r\nflags.DEFINE_float('learning_rate', 0.001,\r\n 'the base learning rate of the generator')\r\n\r\n\r\n## Helper functions\r\ndef peak_signal_to_noise_ratio(true, pred):\r\n \"\"\"Image quality metric based on maximal signal power vs. power of the noise.\r\n\r\n Args:\r\n true: the ground truth image.\r\n pred: the predicted image.\r\n Returns:\r\n peak signal to noise ratio (PSNR)\r\n \"\"\"\r\n return 10.0 * tf.log(1.0 / mean_squared_error(true, pred)) / tf.log(10.0)\r\n\r\n\r\ndef mean_squared_error(true, pred):\r\n \"\"\"L2 distance between tensors true and pred.\r\n\r\n Args:\r\n true: the ground truth image.\r\n pred: the predicted image.\r\n Returns:\r\n mean squared error between ground truth and predicted image.\r\n \"\"\"\r\n return tf.reduce_sum(tf.square(true - pred)) / tf.to_float(tf.size(pred))\r\n\r\n\r\nclass Model(object):\r\n\r\n def __init__(self,\r\n images=None,\r\n actions=None,\r\n states=None,\r\n sequence_length=None,\r\n reuse_scope=None,\r\n prefix=None):\r\n\r\n if sequence_length is None:\r\n sequence_length = FLAGS.sequence_length\r\n\r\n if prefix is None:\r\n prefix = tf.placeholder(tf.string, [])\r\n self.prefix = prefix\r\n self.iter_num = tf.placeholder(tf.float32, [])\r\n summaries = []\r\n\r\n # Split into timesteps.\r\n actions = tf.split(axis=1, num_or_size_splits=int(actions.get_shape()[1]), value=actions)\r\n actions = [tf.squeeze(act) for act in actions]\r\n states = tf.split(axis=1, num_or_size_splits=int(states.get_shape()[1]), value=states)\r\n states = [tf.squeeze(st) for st in states]\r\n images = tf.split(axis=1, num_or_size_splits=int(images.get_shape()[1]), value=images)\r\n images = [tf.squeeze(img) for img in images]\r\n\r\n if reuse_scope is None:\r\n gen_images, gen_states = construct_model(\r\n images,\r\n actions,\r\n states,\r\n iter_num=self.iter_num,\r\n k=FLAGS.schedsamp_k,\r\n use_state=FLAGS.use_state,\r\n num_masks=FLAGS.num_masks,\r\n cdna=FLAGS.model == 'CDNA',\r\n dna=FLAGS.model == 'DNA',\r\n stp=FLAGS.model == 'STP',\r\n context_frames=FLAGS.context_frames)\r\n else: # If it's a validation or test model.\r\n with tf.variable_scope(reuse_scope, reuse=True):\r\n gen_images, gen_states = construct_model(\r\n images,\r\n actions,\r\n states,\r\n iter_num=self.iter_num,\r\n k=FLAGS.schedsamp_k,\r\n use_state=FLAGS.use_state,\r\n num_masks=FLAGS.num_masks,\r\n cdna=FLAGS.model == 'CDNA',\r\n dna=FLAGS.model == 'DNA',\r\n stp=FLAGS.model == 'STP',\r\n context_frames=FLAGS.context_frames)\r\n\r\n # L2 loss, PSNR for eval.\r\n loss, psnr_all = 0.0, 0.0\r\n for i, x, gx in zip(\r\n range(len(gen_images)), images[FLAGS.context_frames:],\r\n gen_images[FLAGS.context_frames - 1:]):\r\n recon_cost = mean_squared_error(x, gx)\r\n psnr_i = peak_signal_to_noise_ratio(x, gx)\r\n psnr_all += psnr_i\r\n summaries.append(\r\n tf.summary.scalar(prefix + '_recon_cost' + str(i), recon_cost))\r\n summaries.append(tf.summary.scalar(prefix + '_psnr' + str(i), psnr_i))\r\n loss += recon_cost\r\n\r\n for i, state, gen_state in zip(\r\n range(len(gen_states)), states[FLAGS.context_frames:],\r\n gen_states[FLAGS.context_frames - 1:]):\r\n state_cost = mean_squared_error(state, gen_state) * 1e-4\r\n summaries.append(\r\n tf.summary.scalar(prefix + '_state_cost' + str(i), state_cost))\r\n loss += state_cost\r\n summaries.append(tf.summary.scalar(prefix + '_psnr_all', psnr_all))\r\n self.psnr_all = psnr_all\r\n\r\n self.loss = loss = loss / np.float32(len(images) - FLAGS.context_frames)\r\n\r\n summaries.append(tf.summary.scalar(prefix + '_loss', loss))\r\n\r\n self.lr = tf.placeholder_with_default(FLAGS.learning_rate, ())\r\n\r\n self.train_op = tf.train.AdamOptimizer(self.lr).minimize(loss)\r\n self.summ_op = tf.summary.merge(summaries)\r\n\r\n\r\ndef main(unused_argv):\r\n\r\n print('Constructing models and inputs.')\r\n with tf.variable_scope('model', reuse=None) as training_scope:\r\n images, actions, states = build_tfrecord_input(training=True)\r\n model = Model(images, actions, states, FLAGS.sequence_length,\r\n prefix='train')\r\n\r\n with tf.variable_scope('val_model', reuse=None):\r\n val_images, val_actions, val_states = build_tfrecord_input(training=False)\r\n val_model = Model(val_images, val_actions, val_states,\r\n FLAGS.sequence_length, training_scope, prefix='val')\r\n\r\n print('Constructing saver.')\r\n # Make saver.\r\n saver = tf.train.Saver(\r\n tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES), max_to_keep=0)\r\n\r\n # Make training session.\r\n sess = tf.InteractiveSession()\r\n sess.run(tf.global_variables_initializer())\r\n\r\n summary_writer = tf.summary.FileWriter(\r\n FLAGS.event_log_dir, graph=sess.graph, flush_secs=10)\r\n\r\n if FLAGS.pretrained_model:\r\n saver.restore(sess, FLAGS.pretrained_model)\r\n\r\n tf.train.start_queue_runners(sess)\r\n\r\n tf.logging.info('iteration number, cost')\r\n\r\n # Run training.\r\n for itr in range(FLAGS.num_iterations):\r\n # Generate new batch of data.\r\n feed_dict = {model.iter_num: np.float32(itr),\r\n model.lr: FLAGS.learning_rate}\r\n cost, _, summary_str = sess.run([model.loss, model.train_op, model.summ_op],\r\n feed_dict)\r\n\r\n # Print info: iteration #, cost.\r\n tf.logging.info(str(itr) + ' ' + str(cost))\r\n\r\n if (itr) % VAL_INTERVAL == 2:\r\n # Run through validation set.\r\n feed_dict = {val_model.lr: 0.0,\r\n val_model.iter_num: np.float32(itr)}\r\n _, val_summary_str = sess.run([val_model.train_op, val_model.summ_op],\r\n feed_dict)\r\n summary_writer.add_summary(val_summary_str, itr)\r\n\r\n if (itr) % SAVE_INTERVAL == 2:\r\n tf.logging.info('Saving model.')\r\n saver.save(sess, FLAGS.output_dir + '/model' + str(itr))\r\n\r\n if (itr) % SUMMARY_INTERVAL:\r\n summary_writer.add_summary(summary_str, itr)\r\n\r\n tf.logging.info('Saving model.')\r\n saver.save(sess, FLAGS.output_dir + '/model')\r\n tf.logging.info('Training complete')\r\n tf.logging.flush()\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n"
] | [
[
"tensorflow.python.training.monitored_session.MonitoredSession",
"tensorflow.python.training.monitored_session.ChiefSessionCreator",
"numpy.asarray",
"tensorflow.gfile.GFile",
"tensorflow.placeholder",
"numpy.ndarray",
"tensorflow.map_fn",
"tensorflow.python.platform.flags.DEFINE_string",
"tensorflow.app.run"
],
[
"tensorflow.nn.conv2d",
"tensorflow.squeeze",
"tensorflow.to_float",
"tensorflow.logical_not",
"tensorflow.matmul",
"tensorflow.shape",
"tensorflow.split",
"tensorflow.sequence_mask",
"tensorflow.nn.relu",
"tensorflow.reduce_max",
"tensorflow.nn.softmax",
"tensorflow.transpose",
"tensorflow.sin",
"tensorflow.range",
"tensorflow.cos",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.mod",
"tensorflow.constant_initializer",
"tensorflow.variable_scope"
],
[
"tensorflow.matmul",
"tensorflow.reduce_mean",
"tensorflow.zeros",
"tensorflow.shape",
"tensorflow.placeholder",
"tensorflow.exp",
"tensorflow.subtract",
"tensorflow.global_variables_initializer",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.Session",
"tensorflow.train.AdamOptimizer",
"tensorflow.square",
"tensorflow.random_normal"
],
[
"numpy.testing.assert_equal",
"tensorflow.Graph",
"numpy.sin",
"numpy.std",
"numpy.mean",
"tensorflow.Session"
],
[
"tensorflow.train.Features"
],
[
"tensorflow.python.platform.googletest.main"
],
[
"tensorflow.test.main"
],
[
"tensorflow.test.main",
"tensorflow.logging.set_verbosity"
],
[
"tensorflow.constant",
"tensorflow.gradients",
"tensorflow.placeholder",
"tensorflow.test.main",
"tensorflow.global_variables_initializer",
"numpy.random.normal",
"numpy.argmax",
"tensorflow.square",
"tensorflow.trainable_variables",
"tensorflow.random_normal"
],
[
"tensorflow.io.TFRecordWriter",
"tensorflow.io.gfile.GFile",
"tensorflow.train.Features"
],
[
"tensorflow.reduce_max",
"tensorflow.nn.dynamic_rnn",
"tensorflow.concat",
"tensorflow.zeros",
"tensorflow.greater",
"tensorflow.nn.rnn_cell.LSTMCell",
"tensorflow.zeros_initializer",
"tensorflow.less",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.to_float",
"tensorflow.reverse_sequence"
],
[
"tensorflow.get_variable",
"tensorflow.concat",
"tensorflow.zeros",
"tensorflow.nn.max_pool",
"tensorflow.cast",
"tensorflow.nn.conv2d_transpose",
"tensorflow.nn.conv2d",
"tensorflow.sparse_to_dense",
"tensorflow.image.random_flip_left_right",
"tensorflow.assign_add",
"tensorflow.random_uniform_initializer",
"tensorflow.nn.moments",
"tensorflow.check_numerics",
"tensorflow.stop_gradient",
"tensorflow.name_scope",
"tensorflow.square",
"numpy.zeros",
"numpy.log",
"tensorflow.python.framework.ops.colocate_with",
"tensorflow.split",
"numpy.array",
"tensorflow.nn.bias_add",
"tensorflow.reduce_max",
"tensorflow.transpose",
"tensorflow.range",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.constant_initializer",
"numpy.prod",
"tensorflow.variable_scope",
"tensorflow.sqrt",
"tensorflow.random_normal"
],
[
"numpy.logical_xor",
"numpy.minimum",
"numpy.sqrt",
"numpy.concatenate",
"numpy.round",
"numpy.max",
"numpy.zeros_like",
"numpy.any",
"numpy.ravel_multi_index",
"numpy.digitize",
"numpy.histogram",
"numpy.where",
"numpy.unique",
"numpy.reshape",
"numpy.arange",
"numpy.argmax",
"numpy.zeros",
"numpy.logical_and",
"numpy.array",
"numpy.sum",
"numpy.abs",
"numpy.mod"
],
[
"tensorflow.constant",
"tensorflow.resource_loader.get_data_files_path",
"tensorflow.transpose",
"tensorflow.shape",
"tensorflow.image.resize_images",
"tensorflow.test.main",
"tensorflow.placeholder",
"numpy.concatenate",
"numpy.random.rand",
"tensorflow.set_random_seed",
"numpy.array",
"tensorflow.add_to_collection",
"numpy.zeros",
"numpy.random.randint"
],
[
"numpy.dot",
"sklearn.datasets.make_classification",
"numpy.sqrt",
"numpy.random.seed",
"numpy.arange",
"numpy.random.shuffle",
"numpy.concatenate",
"numpy.random.normal",
"numpy.random.binomial",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.random.randint"
],
[
"tensorflow.random_uniform",
"tensorflow.test.main"
],
[
"tensorflow.constant",
"numpy.reshape",
"numpy.arange",
"tensorflow.placeholder",
"tensorflow.test.main",
"numpy.ones",
"tensorflow.constant_initializer",
"tensorflow.global_variables_initializer",
"numpy.zeros"
],
[
"tensorflow.contrib.framework.get_model_variables",
"tensorflow.shape",
"tensorflow.zeros",
"tensorflow.nn.l2_loss",
"tensorflow.to_float",
"tensorflow.trainable_variables"
],
[
"tensorflow.constant",
"tensorflow.test.main"
],
[
"numpy.rollaxis",
"numpy.tile",
"numpy.cumsum",
"numpy.ones",
"numpy.zeros"
],
[
"tensorflow.constant",
"tensorflow.image.resize_images",
"tensorflow.reduce_sum",
"tensorflow.reshape",
"tensorflow.global_variables",
"tensorflow.test.main",
"tensorflow.contrib.layers.fully_connected",
"tensorflow.contrib.layers.flatten",
"tensorflow.random_uniform"
],
[
"tensorflow.gfile.GFile",
"numpy.set_printoptions",
"tensorflow.train.SequenceExample",
"tensorflow.python_io.tf_record_iterator",
"tensorflow.app.flags.DEFINE_boolean",
"numpy.zeros"
],
[
"tensorflow.Graph",
"tensorflow.device",
"tensorflow.python.platform.gfile.GFile",
"tensorflow.RunMetadata",
"tensorflow.RunOptions",
"tensorflow.ConfigProto",
"tensorflow.global_variables_initializer",
"tensorflow.logging.info",
"tensorflow.logging.set_verbosity",
"tensorflow.Session",
"tensorflow.python.platform.gfile.FastGFile",
"tensorflow.python.client.timeline.Timeline",
"tensorflow.app.run"
],
[
"tensorflow.concat",
"tensorflow.transpose",
"tensorflow.name_scope",
"tensorflow.shape"
],
[
"tensorflow.train.latest_checkpoint",
"numpy.float32",
"tensorflow.gfile.Open"
],
[
"tensorflow.test.main"
],
[
"tensorflow.python.platform.app.run",
"tensorflow.python.platform.flags.DEFINE_integer",
"tensorflow.train.AdamOptimizer",
"tensorflow.python.platform.flags.DEFINE_string",
"tensorflow.summary.scalar",
"tensorflow.get_collection",
"tensorflow.placeholder_with_default",
"tensorflow.squeeze",
"tensorflow.python.platform.flags.DEFINE_float",
"tensorflow.square",
"numpy.float32",
"tensorflow.InteractiveSession",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.logging.info",
"tensorflow.size",
"tensorflow.summary.merge",
"tensorflow.summary.FileWriter",
"tensorflow.train.start_queue_runners",
"tensorflow.logging.flush",
"tensorflow.log",
"tensorflow.variable_scope"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
bmcmenamin/word2vec_advice | [
"69dbde89b26b80d10f778147f2e3abe1628d6e05"
] | [
"scrape_scripts/textScrape.py"
] | [
"#!/Users/mcmenamin/.virtualenvs/py3env/bin/python\n\nfrom lxml import html\nimport requests\n\nfrom datetime import date\nimport numpy as np\nimport pandas as pd\n\nimport re as re\n\nfrom itertools import chain\nimport pickle\n\nfrom tqdm import tqdm\n\ndef getURLforYear(year, archiveURL='http://www.uexpress.com/dearabby/archives'):\n archive = requests.get('{0}/{1}'.format(archiveURL, year))\n tree = html.fromstring(archive.text)\n urlList = [a.attrib['href'] for a in tree.find_class('media-link-main')]\n return urlList\n\ndef scrape_page(extURL, baseURL='http://www.uexpress.com/'): \n page = requests.get('{0}{1}'.format(baseURL, extURL))\n tree = html.fromstring(page.text)\n questions = tree.find_class('item-section')\n allQ = []\n for q in questions:\n qText = [i.text_content() for i in q.iterfind('p')]\n allQ += qText\n allQ = ' '.join(allQ)\n return allQ\n\ndef parseAbby(block):\n block = block.strip().split('DEAR ')\n\n abbyBlock = [p.startswith('ABBY:') for p in block]\n dearReaderBlock = [p.startswith('READERS:') for p in block]\n replyBlock = [not (p[0] or p[1]) for p in zip(abbyBlock, dearReaderBlock)]\n \n QA_pairs = []\n if True in abbyBlock and True in replyBlock:\n firstBlock = abbyBlock.index(True)\n \n block = block[firstBlock:]\n abbyBlock = abbyBlock[firstBlock:]\n dearReaderBlock = dearReaderBlock[firstBlock:]\n replyBlock = replyBlock[firstBlock:]\n \n for i in range(len(block)-1):\n if abbyBlock[i] and replyBlock[i+1]:\n QA_pairs.append([block[i], block[i+1]])\n return QA_pairs\n\n\n#\n# Get an iterator of URLs from archives for a specific date range\n#\n\narchivedURLs = list(chain.from_iterable([getURLforYear(y) for y in range(1991,2017+1)]))\n\n\n#\n# Pull in the text from each archived URL\n#\n\nall_text_dict = {}\nfor url in tqdm(archivedURLs):\n raw_text = scrape_page(url)\n all_text_dict[url] = {'path': url,\n 'date': date(*[int(i) for i in url.split('/')[2:5]]),\n 'raw_text': raw_text,\n 'parse_text': parseAbby(raw_text)\n } \ndf_text = pd.DataFrame.from_dict(all_text_dict, orient='index')\n\ndf_text.to_pickle('abbyText.pickle')\n\ndf_text.to_json('abbyText.json',\n lines=True,\n orient='records',\n force_ascii=True\n)\n"
] | [
[
"pandas.DataFrame.from_dict"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
globotree/tensorflow | [
"b944fb947898de8cb4279a5a8a066955ba685412",
"b944fb947898de8cb4279a5a8a066955ba685412",
"04f2870814d2773e09dcfa00cbe76a66a2c4de88",
"04f2870814d2773e09dcfa00cbe76a66a2c4de88"
] | [
"tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/multi_variables_v1.py",
"tensorflow/python/keras/datasets/fashion_mnist.py",
"tensorflow/python/autograph/pyct/transformer.py",
"tensorflow/python/framework/tensor_util.py"
] | [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n# RUN: %p/multi_variables_v1 | FileCheck %s\n\n# pylint: disable=missing-docstring,line-too-long\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common_v1\n\n# Verify that the tf.versions attribute exists. It is difficult to enforce\n# contents, since the version numbers change over time. The conversion logic\n# itself is verified in the common graphdef converter, so here just assert\n# it is being invoked.\n# CHECK: module\n# CHECK-SAME: tf.versions\n# CHECK-SAME: bad_consumers\n# CHECK-SAME: min_consumer\n# CHECK-SAME: producer\n\n# CHECK: \"tf_saved_model.global_tensor\"() {is_mutable, sym_name = \"y\", type = tensor<1x3xf32>, value = {{.*}} : tensor<1x3xf32>} : () -> ()\n# CHECK: \"tf_saved_model.global_tensor\"() {is_mutable, sym_name = \"z\", type = tensor<3x3xf32>, value = {{.*}} : tensor<3x3xf32>} : () -> ()\n# CHECK: func @basic([[ARG0:%.*]]: tensor<3x1xf32>,\n# CHECK-SAME: [[ARG1:%.*]]: tensor<!tf.resource<tensor<1x3xf32>>> {tf_saved_model.bound_input = @y}\n# CHECK-SAME: [[ARG2:%.*]]: tensor<!tf.resource<tensor<3x3xf32>>> {tf_saved_model.bound_input = @z}) -> tensor<3x3xf32>\n# CHECK-NEXT: [[R0:%.*]] = \"tf.ReadVariableOp\"([[ARG1]]) {{{.*}}} : (tensor<!tf.resource<tensor<1x3xf32>>>) -> tensor<1x3xf32>\n# CHECK-NEXT: [[R1:%.*]] = \"tf.MatMul\"([[ARG0]], [[R0]]) {{{.*}}} : (tensor<3x1xf32>, tensor<1x3xf32>) -> tensor<3x3xf32>\n# CHECK-NEXT: [[R2:%.*]] = \"tf.ReadVariableOp\"([[ARG2]]) {{{.*}}} : (tensor<!tf.resource<tensor<3x3xf32>>>) -> tensor<3x3xf32>\n# CHECK-NEXT: [[R3:%.*]] = \"tf.MatMul\"([[R1]], [[R2]]) {{{.*}}} : (tensor<3x3xf32>, tensor<3x3xf32>) -> tensor<3x3xf32>\n# CHECK-NEXT: return [[R3]] : tensor<3x3xf32>\n\n\ndef Test():\n\n # Default TF1.x uses reference variables that are not supported by SavedModel\n # v1 Importer. To use SavedModel V1 Importer, resource variables should be\n # enabled.\n tf.compat.v1.enable_resource_variables()\n\n tf.compat.v1.disable_eager_execution()\n\n x = tf.constant([[1.0], [1.0], [1.0]])\n y = tf.compat.v1.get_variable(\n name='y',\n shape=(1, 3),\n initializer=tf.random_normal_initializer(),\n trainable=True)\n z = tf.compat.v1.get_variable(\n name='z',\n shape=(3, 3),\n initializer=tf.random_normal_initializer(),\n trainable=True)\n r = tf.matmul(x, y)\n s = tf.matmul(r, z)\n\n tensor_info_x = tf.compat.v1.saved_model.utils.build_tensor_info(x)\n tensor_info_s = tf.compat.v1.saved_model.utils.build_tensor_info(s)\n\n return {\n 'basic':\n (tf.compat.v1.saved_model.signature_def_utils.build_signature_def(\n inputs={'x': tensor_info_x},\n outputs={'s': tensor_info_s},\n method_name=tf.saved_model.PREDICT_METHOD_NAME))\n }\n\n\nif __name__ == '__main__':\n common_v1.do_test(Test())\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Fashion-MNIST dataset.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gzip\nimport os\n\nimport numpy as np\n\nfrom tensorflow.python.keras.utils.data_utils import get_file\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n@keras_export('keras.datasets.fashion_mnist.load_data')\ndef load_data():\n \"\"\"Loads the Fashion-MNIST dataset.\n\n This is a dataset of 60,000 28x28 grayscale images of 10 fashion categories,\n along with a test set of 10,000 images. This dataset can be used as\n a drop-in replacement for MNIST. The class labels are:\n\n | Label | Description |\n |:-----:|-------------|\n | 0 | T-shirt/top |\n | 1 | Trouser |\n | 2 | Pullover |\n | 3 | Dress |\n | 4 | Coat |\n | 5 | Sandal |\n | 6 | Shirt |\n | 7 | Sneaker |\n | 8 | Bag |\n | 9 | Ankle boot |\n\n Returns:\n Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.\n\n x_train, x_test: uint8 arrays of grayscale image data with shape\n (num_samples, 28, 28).\n y_train, y_test: uint8 arrays of labels (integers in range 0-9)\n with shape (num_samples,).\n\n License:\n The copyright for Fashion-MNIST is held by Zalando SE.\n Fashion-MNIST is licensed under the [MIT license](\n https://github.com/zalandoresearch/fashion-mnist/blob/master/LICENSE).\n\n \"\"\"\n dirname = os.path.join('datasets', 'fashion-mnist')\n base = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/'\n files = [\n 'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',\n 't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'\n ]\n\n paths = []\n for fname in files:\n paths.append(get_file(fname, origin=base + fname, cache_subdir=dirname))\n\n with gzip.open(paths[0], 'rb') as lbpath:\n y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n\n with gzip.open(paths[1], 'rb') as imgpath:\n x_train = np.frombuffer(\n imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)\n\n with gzip.open(paths[2], 'rb') as lbpath:\n y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n\n with gzip.open(paths[3], 'rb') as imgpath:\n x_test = np.frombuffer(\n imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)\n\n return (x_train, y_train), (x_test, y_test)\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A node transformer that includes utilities for SCT.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport gast\n\nfrom tensorflow.python.autograph.pyct import anno\nfrom tensorflow.python.autograph.pyct import loader\nfrom tensorflow.python.autograph.pyct import pretty_printer\nfrom tensorflow.python.autograph.pyct import templates\n\n\n# TODO(znado): Use namedtuple.\nclass Context(object):\n \"\"\"Contains information about a source code transformation.\n\n This object is mutable, and is updated during conversion. Not thread safe.\n\n Attributes:\n info: EntityInfo, immutable.\n current_origin: origin_info.OriginInfo, holds the OriginInfo of the last\n AST node to be processed successfully. Useful for error handling.\n \"\"\"\n\n def __init__(self, info):\n self.info = info\n self.current_origin = None\n\n\n# TODO(mdan): Move to a standalone file.\nclass EntityInfo(\n collections.namedtuple(\n 'EntityInfo',\n ('source_code', 'source_file', 'future_features', 'namespace'))):\n \"\"\"Contains information about a Python entity.\n\n Immutable.\n\n Examples of entities include functions and classes.\n\n Attributes:\n source_code: The entity's source code.\n source_file: The entity's source file.\n future_features: Tuple[Text], the future features that this entity was\n compiled with. See\n https://docs.python.org/2/reference/simple_stmts.html#future.\n namespace: Dict[str, ], containing symbols visible to the entity (excluding\n parameters).\n \"\"\"\n pass\n\n\nclass _StateStack(object):\n \"\"\"Templated context manager.\n\n This class provides syntactic sugar for a stack of objects of known\n type. It allows accessing attributes of the object at the top of the stack\n directly against this object, which allows for very terse syntax.\n\n For example, this code:\n\n stack = _StateStack(Foo)\n stack.enter()\n stack.bar\n\n Is equivalent to:\n\n stack = []\n stack.append(Foo())\n foo = stack[-1]\n foo.bar\n\n See _State for more on how this is used.\n\n Attributes:\n type: Any, the type of objects that this stack holds\n level: int, the current stack depth\n stack: List[Any], the actual stack\n value: Any, the instance of the object at the top of the stack\n \"\"\"\n\n def __init__(self, type_):\n # Because we override __setattr__, we need to attach these attributes using\n # the superclass' setattr.\n object.__setattr__(self, 'type', type_)\n object.__setattr__(self, '_stack', [])\n if not hasattr(type_, 'no_root'):\n self.enter()\n\n def __enter__(self):\n self.enter()\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.exit()\n\n def enter(self):\n self._stack.append(self.type())\n\n def exit(self):\n self._stack.pop()\n\n @property\n def stack(self):\n return self._stack\n\n @property\n def level(self):\n return len(self._stack)\n\n @property\n def value(self):\n return self._stack[-1]\n\n def __iter__(self):\n return iter(self._stack)\n\n def __getattr__(self, key):\n return getattr(self._stack[-1], key)\n\n def __setattr__(self, key, value):\n setattr(self._stack[-1], key, value)\n\n\nclass _State(object):\n \"\"\"Syntactic sugar for accessing an instance of a StateStack context manager.\n\n This structure offers syntactic sugar over a dict of stacks of objects\n of known type. These structures are useful to keep state during AST walks.\n Multiple different scopes can be tracked in parallel. For example:\n\n s = _State()\n\n s[foo].enter()\n s[bar].enter() # this will not affect s[foo]\n\n Element access has special semantics:\n * keys are a data type\n * element values are _StateStack(type=key) objects\n * missing elements are automatically added, similarly to defaultdict\n\n For example, the following block :\n\n _State s\n s[Foo]\n\n Is equivalent to:\n\n s = {}\n if Foo not in s:\n s[Foo] = Foo()\n s[Foo]\n\n See Base for how it's used.\n \"\"\"\n\n def __init__(self):\n self._value = {}\n\n def __getitem__(self, key):\n if key not in self._value:\n self._value[key] = _StateStack(key)\n return self._value[key]\n\n\nclass Base(gast.NodeTransformer):\n \"\"\"Base class for general-purpose code transformers transformers.\n\n This is an extension of ast.NodeTransformer that provides a few additional\n functions, like state tracking within the scope of arbitrary node, helpers\n for processing code blocks, debugging, mapping of transformed code to\n original code, and others.\n\n Scope-local state tracking: to keep state across nodes, at the level of\n (possibly nested) scopes, use enter/exit_local_scope and set/get_local.\n You must call enter/exit_local_scope manually, but the transformer detects\n when they are not properly paired.\n\n The transformer allows keeping state across calls to `visit_*` that is local\n to arbitrary nodes and their descendants, using the self.state attribute.\n Multiple independent scopes are allowed and automatically constructed.\n\n For example, to keep track of the `If` node that encloses any `Name` node,\n one can write:\n\n ```\n class FooType(object):\n\n def __init__(self):\n self.foo_property = None\n\n class DummyTransformer(Base):\n\n def visit_If(self, node):\n self.state[FooType].enter()\n self.state[FooType].foo_property = node\n node = self.veneric_visit(node)\n self.state[FooType].exit()\n return node\n\n def visit_Name(self, node):\n self.state[FooType].foo_property # will hold the innermost enclosing if\n ```\n\n Alternatively, the `enter()`/`exit()` calls can be managed by a `with`\n statement:\n\n ```\n def visit_If(self, node):\n with self.state[FooType] as foo:\n foo.foo_property = node\n return self.generic_visit(node)\n ```\n \"\"\"\n\n # TODO(mdan): Document all extra features.\n\n def __init__(self, ctx):\n \"\"\"Initialize the transformer.\n\n Subclasses should call this.\n\n Args:\n ctx: A Context object.\n \"\"\"\n self._lineno = 0\n self._col_offset = 0\n self.ctx = ctx\n self._enclosing_entities = []\n\n # A stack that allows keeping mutable, scope-local state where scopes may be\n # nested. For example, it can be used to track the usage of break\n # statements in each loop, where loops may be nested.\n self._local_scope_state = []\n self.enter_local_scope()\n\n # Allows scoping of local variables to keep state across calls to visit_*\n # methods. Multiple scope hierchies may exist and are keyed by tag. A scope\n # is valid at one or more nodes and all its children. Scopes created in\n # child nodes supersede their parent. Scopes are isolated from one another.\n self.state = _State()\n\n @property\n def enclosing_entities(self):\n return tuple(self._enclosing_entities)\n\n @property\n def local_scope_level(self):\n return len(self._local_scope_state)\n\n def enter_local_scope(self, inherit=None):\n \"\"\"Deprecated.\n\n Use self.state instead.\n\n Marks entry into a new local scope.\n\n Args:\n inherit: Optional enumerable of variable names to copy from the parent\n scope.\n \"\"\"\n scope_entered = {}\n if inherit:\n this_scope = self._local_scope_state[-1]\n for name in inherit:\n if name in this_scope:\n scope_entered[name] = this_scope[name]\n self._local_scope_state.append(scope_entered)\n\n def exit_local_scope(self, keep=None):\n \"\"\"Deprecated.\n\n Use self.state instead.\n\n Marks exit from the current local scope.\n\n Args:\n keep: Optional enumerable of variable names to copy into the parent scope.\n\n Returns:\n A dict containing the scope that has just been exited.\n \"\"\"\n scope_left = self._local_scope_state.pop()\n if keep:\n this_scope = self._local_scope_state[-1]\n for name in keep:\n if name in scope_left:\n this_scope[name] = scope_left[name]\n return scope_left\n\n def set_local(self, name, value):\n \"\"\"Deprecated. Use self.state instead.\"\"\"\n self._local_scope_state[-1][name] = value\n\n def get_local(self, name, default=None):\n \"\"\"Deprecated. Use self.state instead.\"\"\"\n return self._local_scope_state[-1].get(name, default)\n\n def debug_print(self, node):\n \"\"\"Helper method useful for debugging. Prints the AST.\"\"\"\n if __debug__:\n print(pretty_printer.fmt(node))\n return node\n\n def debug_print_src(self, node):\n \"\"\"Helper method useful for debugging. Prints the AST as code.\"\"\"\n if __debug__:\n print(loader.load_ast(node))\n return node\n\n def create_assignment(self, target, expression):\n template = \"\"\"\n target = expression\n \"\"\"\n return templates.replace(template, target=target, expression=expression)\n\n def visit_block(self, nodes, before_visit=None, after_visit=None):\n \"\"\"A more powerful version of generic_visit for statement blocks.\n\n An example of a block is the body of an if statement.\n\n This function allows specifying a postprocessing callback (the\n after_visit argument) argument which can be used to move nodes to a new\n destination. This is done by after_visit by returning a non-null\n second return value, e.g. return new_node, new_destination.\n\n For example, a transformer could perform the following move:\n\n foo()\n bar()\n baz()\n\n foo()\n if cond:\n bar()\n baz()\n\n The above could be done with a postprocessor of this kind:\n\n def after_visit(node):\n if node_is_function_call(bar):\n new_container_node = build_cond()\n new_container_node.body.append(node)\n return new_container_node, new_container_node.body\n else:\n # Once we set a new destination, all subsequent items will be\n # moved to it, so we don't need to explicitly handle baz.\n return node, None\n\n Args:\n nodes: enumerable of AST node objects. If None, the function returns None.\n before_visit: optional callable that is called before visiting each item\n in nodes\n after_visit: optional callable that takes in an AST node and returns a\n tuple (new_node, new_destination). It is called after visiting each item\n in nodes. Is used in the same was as the\n visit_* methods: new_node will replace the node; if not None,\n new_destination must be a list, and subsequent nodes will be placed\n in this list instead of the list returned by visit_block.\n\n Returns:\n A list of AST node objects containing the transformed items fron nodes,\n except those nodes that have been relocated using after_visit.\n \"\"\"\n if nodes is None:\n return None\n\n results = []\n node_destination = results\n for node in nodes:\n if before_visit:\n # TODO(mdan): We can modify node here too, if ever needed.\n before_visit()\n\n replacement = self.visit(node)\n\n if after_visit and replacement:\n replacement, new_destination = after_visit(replacement)\n else:\n new_destination = None\n\n if replacement:\n if isinstance(replacement, (list, tuple)):\n node_destination.extend(replacement)\n else:\n node_destination.append(replacement)\n\n # Allow the postprocessor to reroute the remaining nodes to a new list.\n if new_destination is not None:\n node_destination = new_destination\n return results\n\n # TODO(mdan): Remove.\n def apply_to_single_assignments(self, targets, values, apply_fn):\n \"\"\"Applies a function to each individual assignment.\n\n This function can process a possibly-unpacked (e.g. a, b = c, d) assignment.\n It tries to break down the unpacking if possible. In effect, it has the same\n effect as passing the assigned values in SSA form to apply_fn.\n\n Examples:\n\n The following will result in apply_fn(a, c), apply_fn(b, d):\n\n a, b = c, d\n\n The following will result in apply_fn(a, c[0]), apply_fn(b, c[1]):\n\n a, b = c\n\n The following will result in apply_fn(a, (b, c)):\n\n a = b, c\n\n It uses the visitor pattern to allow subclasses to process single\n assignments individually.\n\n Args:\n targets: list, tuple of or individual AST node. Should be used with the\n targets field of an ast.Assign node.\n values: an AST node.\n apply_fn: a function of a single argument, which will be called with the\n respective nodes of each single assignment. The signature is\n apply_fn(target, value), no return value.\n \"\"\"\n if not isinstance(targets, (list, tuple)):\n targets = (targets,)\n for target in targets:\n if isinstance(target, (gast.Tuple, gast.List)):\n for i in range(len(target.elts)):\n target_el = target.elts[i]\n if isinstance(values, (gast.Tuple, gast.List)):\n value_el = values.elts[i]\n else:\n value_el = gast.Subscript(values, gast.Index(i), ctx=gast.Store())\n self.apply_to_single_assignments(target_el, value_el, apply_fn)\n else:\n # TODO(mdan): Look into allowing to rewrite the AST here.\n apply_fn(target, values)\n\n def _get_source(self, node):\n try:\n source, _ = loader.load_ast(node)\n return source\n # pylint: disable=broad-except\n # This function is used for error reporting. If an exception occurs here,\n # it should be suppressed, in favor of emitting as informative a message\n # about the original error as possible.\n except Exception:\n return '<could not convert AST to source>'\n\n def visit(self, node):\n if not isinstance(node, gast.AST):\n # This is not that uncommon a mistake: various node bodies are lists, for\n # example, posing a land mine for transformers that need to recursively\n # call `visit`. The error needs to be raised before the exception handler\n # below is installed, because said handler will mess up if `node` is not,\n # in fact, a node.\n msg = ('invalid value for \"node\": expected \"ast.AST\", got \"{}\"; to'\n ' visit lists of nodes, use \"visit_block\" instead').format(\n type(node))\n raise ValueError(msg)\n\n did_enter_function = False\n local_scope_size_at_entry = len(self._local_scope_state)\n processing_expr_node = False\n\n parent_origin = self.ctx.current_origin\n if isinstance(node, (gast.FunctionDef, gast.ClassDef, gast.Lambda)):\n did_enter_function = True\n elif isinstance(node, gast.Expr):\n processing_expr_node = True\n\n if did_enter_function:\n self._enclosing_entities.append(node)\n\n if anno.hasanno(node, anno.Basic.ORIGIN):\n self.ctx.current_origin = anno.getanno(node, anno.Basic.ORIGIN)\n\n if processing_expr_node:\n entry_expr_value = node.value\n\n if not anno.hasanno(node, anno.Basic.SKIP_PROCESSING):\n result = super(Base, self).visit(node)\n self.ctx.current_origin = parent_origin\n\n # Adjust for consistency: replacing the value of an Expr with\n # an Assign node removes the need for the Expr node.\n if processing_expr_node:\n if isinstance(result, gast.Expr) and result.value != entry_expr_value:\n # When the replacement is a list, it is assumed that the list came\n # from a template that contained a number of statements, which\n # themselves are standalone and don't require an enclosing Expr.\n if isinstance(result.value,\n (list, tuple, gast.Assign, gast.AugAssign)):\n result = result.value\n\n # By default, all replacements receive the origin info of the replaced node.\n if result is not node and result is not None:\n nodes_to_adjust = result\n if isinstance(result, (list, tuple)):\n nodes_to_adjust = result\n else:\n nodes_to_adjust = (result,)\n for n in nodes_to_adjust:\n if not anno.hasanno(n, anno.Basic.ORIGIN):\n inherited_origin = anno.getanno(\n node, anno.Basic.ORIGIN, default=parent_origin)\n if inherited_origin is not None:\n anno.setanno(n, anno.Basic.ORIGIN, inherited_origin)\n\n # On exception, the local scope integrity is not guaranteed.\n if did_enter_function:\n self._enclosing_entities.pop()\n\n if local_scope_size_at_entry != len(self._local_scope_state):\n raise AssertionError(\n 'Inconsistent local scope stack. Before entering node %s, the'\n ' stack had length %d, after exit it has length %d. This'\n ' indicates enter_local_scope and exit_local_scope are not'\n ' well paired.' % (node, local_scope_size_at_entry,\n len(self._local_scope_state)))\n return result\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utilities to create TensorProtos.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport six\n\nfrom tensorflow.core.framework import tensor_pb2\nfrom tensorflow.core.framework import tensor_shape_pb2\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_like\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util.tf_export import tf_export\n\n# Fallback in case fast_tensor_util is not properly compiled.\n# pylint: disable=g-import-not-at-top\ntry:\n from tensorflow.python.framework import fast_tensor_util\n _FAST_TENSOR_UTIL_AVAILABLE = True\nexcept ImportError:\n _FAST_TENSOR_UTIL_AVAILABLE = False\n# pylint: enable=g-import-not-at-top\n\n\ndef ExtractBitsFromFloat16(x):\n return np.asarray(x, dtype=np.float16).view(np.uint16).item()\n\n\ndef SlowAppendFloat16ArrayToTensorProto(tensor_proto, proto_values):\n tensor_proto.half_val.extend(\n [ExtractBitsFromFloat16(x) for x in proto_values])\n\n\ndef _MediumAppendFloat16ArrayToTensorProto(tensor_proto, proto_values):\n # TODO: Remove the conversion if cython supports np.float16_t\n fast_tensor_util.AppendFloat16ArrayToTensorProto(\n tensor_proto,\n np.asarray(proto_values, dtype=np.float16).view(np.uint16))\n\n\ndef ExtractBitsFromBFloat16(x):\n return np.asarray(\n x, dtype=dtypes.bfloat16.as_numpy_dtype).view(np.uint16).item()\n\n\ndef SlowAppendBFloat16ArrayToTensorProto(tensor_proto, proto_values):\n tensor_proto.half_val.extend(\n [ExtractBitsFromBFloat16(x) for x in proto_values])\n\n\ndef FastAppendBFloat16ArrayToTensorProto(tensor_proto, proto_values):\n fast_tensor_util.AppendBFloat16ArrayToTensorProto(\n tensor_proto, np.asarray(\n proto_values, dtype=dtypes.bfloat16.as_numpy_dtype).view(np.uint16))\n\n\nif _FAST_TENSOR_UTIL_AVAILABLE:\n _NP_TO_APPEND_FN = {\n dtypes.bfloat16.as_numpy_dtype:\n FastAppendBFloat16ArrayToTensorProto,\n np.float16:\n _MediumAppendFloat16ArrayToTensorProto,\n np.float32:\n fast_tensor_util.AppendFloat32ArrayToTensorProto,\n np.float64:\n fast_tensor_util.AppendFloat64ArrayToTensorProto,\n np.int32:\n fast_tensor_util.AppendInt32ArrayToTensorProto,\n np.int64:\n fast_tensor_util.AppendInt64ArrayToTensorProto,\n np.uint8:\n fast_tensor_util.AppendUInt8ArrayToTensorProto,\n np.uint16:\n fast_tensor_util.AppendUInt16ArrayToTensorProto,\n np.uint32:\n fast_tensor_util.AppendUInt32ArrayToTensorProto,\n np.uint64:\n fast_tensor_util.AppendUInt64ArrayToTensorProto,\n np.int8:\n fast_tensor_util.AppendInt8ArrayToTensorProto,\n np.int16:\n fast_tensor_util.AppendInt16ArrayToTensorProto,\n np.complex64:\n fast_tensor_util.AppendComplex64ArrayToTensorProto,\n np.complex128:\n fast_tensor_util.AppendComplex128ArrayToTensorProto,\n np.object:\n fast_tensor_util.AppendObjectArrayToTensorProto,\n np.bool:\n fast_tensor_util.AppendBoolArrayToTensorProto,\n dtypes.qint8.as_numpy_dtype:\n fast_tensor_util.AppendInt8ArrayToTensorProto,\n dtypes.quint8.as_numpy_dtype:\n fast_tensor_util.AppendUInt8ArrayToTensorProto,\n dtypes.qint16.as_numpy_dtype:\n fast_tensor_util.AppendInt8ArrayToTensorProto,\n dtypes.quint16.as_numpy_dtype:\n fast_tensor_util.AppendUInt8ArrayToTensorProto,\n dtypes.qint32.as_numpy_dtype:\n fast_tensor_util.AppendInt32ArrayToTensorProto,\n # NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.\n }\nelse:\n\n def SlowAppendFloat32ArrayToTensorProto(tensor_proto, proto_values):\n tensor_proto.float_val.extend([x.item() for x in proto_values])\n\n def SlowAppendFloat64ArrayToTensorProto(tensor_proto, proto_values):\n tensor_proto.double_val.extend([x.item() for x in proto_values])\n\n def SlowAppendIntArrayToTensorProto(tensor_proto, proto_values):\n tensor_proto.int_val.extend([x.item() for x in proto_values])\n\n def SlowAppendInt64ArrayToTensorProto(tensor_proto, proto_values):\n tensor_proto.int64_val.extend([x.item() for x in proto_values])\n\n def SlowAppendQIntArrayToTensorProto(tensor_proto, proto_values):\n tensor_proto.int_val.extend([x.item()[0] for x in proto_values])\n\n def SlowAppendUInt32ArrayToTensorProto(tensor_proto, proto_values):\n tensor_proto.uint32_val.extend([x.item() for x in proto_values])\n\n def SlowAppendUInt64ArrayToTensorProto(tensor_proto, proto_values):\n tensor_proto.uint64_val.extend([x.item() for x in proto_values])\n\n def SlowAppendComplex64ArrayToTensorProto(tensor_proto, proto_values):\n tensor_proto.scomplex_val.extend(\n [v.item() for x in proto_values for v in [x.real, x.imag]])\n\n def SlowAppendComplex128ArrayToTensorProto(tensor_proto, proto_values):\n tensor_proto.dcomplex_val.extend(\n [v.item() for x in proto_values for v in [x.real, x.imag]])\n\n def SlowAppendObjectArrayToTensorProto(tensor_proto, proto_values):\n tensor_proto.string_val.extend([compat.as_bytes(x) for x in proto_values])\n\n def SlowAppendBoolArrayToTensorProto(tensor_proto, proto_values):\n tensor_proto.bool_val.extend([x.item() for x in proto_values])\n\n _NP_TO_APPEND_FN = {\n dtypes.bfloat16.as_numpy_dtype: SlowAppendBFloat16ArrayToTensorProto,\n np.float16: SlowAppendFloat16ArrayToTensorProto,\n np.float32: SlowAppendFloat32ArrayToTensorProto,\n np.float64: SlowAppendFloat64ArrayToTensorProto,\n np.int32: SlowAppendIntArrayToTensorProto,\n np.int64: SlowAppendInt64ArrayToTensorProto,\n np.uint8: SlowAppendIntArrayToTensorProto,\n np.uint16: SlowAppendIntArrayToTensorProto,\n np.uint32: SlowAppendUInt32ArrayToTensorProto,\n np.uint64: SlowAppendUInt64ArrayToTensorProto,\n np.int8: SlowAppendIntArrayToTensorProto,\n np.int16: SlowAppendIntArrayToTensorProto,\n np.complex64: SlowAppendComplex64ArrayToTensorProto,\n np.complex128: SlowAppendComplex128ArrayToTensorProto,\n np.object: SlowAppendObjectArrayToTensorProto,\n np.bool: SlowAppendBoolArrayToTensorProto,\n dtypes.qint8.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,\n dtypes.quint8.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,\n dtypes.qint16.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,\n dtypes.quint16.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,\n dtypes.qint32.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,\n # NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.\n }\n\n\ndef GetFromNumpyDTypeDict(dtype_dict, dtype):\n # NOTE: dtype_dict.get(dtype) always returns None.\n for key, val in six.iteritems(dtype_dict):\n if key == dtype:\n return val\n return None\n\n\ndef GetNumpyAppendFn(dtype):\n # numpy dtype for strings are variable length. We can not compare\n # dtype with a single constant (np.string does not exist) to decide\n # dtype is a \"string\" type. We need to compare the dtype.type to be\n # sure it's a string type.\n if dtype.type == np.string_ or dtype.type == np.unicode_:\n if _FAST_TENSOR_UTIL_AVAILABLE:\n return fast_tensor_util.AppendObjectArrayToTensorProto\n else:\n return SlowAppendObjectArrayToTensorProto\n return GetFromNumpyDTypeDict(_NP_TO_APPEND_FN, dtype)\n\n\ndef TensorShapeProtoToList(shape):\n \"\"\"Convert a TensorShape to a list.\n\n Args:\n shape: A TensorShapeProto.\n\n Returns:\n List of integers representing the dimensions of the tensor.\n \"\"\"\n return [dim.size for dim in shape.dim]\n\n\ndef _GetDenseDimensions(list_of_lists):\n \"\"\"Returns the inferred dense dimensions of a list of lists.\"\"\"\n if not isinstance(list_of_lists, (list, tuple)):\n return []\n elif not list_of_lists:\n return [0]\n else:\n return [len(list_of_lists)] + _GetDenseDimensions(list_of_lists[0])\n\n\ndef _FlattenToStrings(nested_strings):\n if isinstance(nested_strings, (list, tuple)):\n for inner in nested_strings:\n for flattened_string in _FlattenToStrings(inner):\n yield flattened_string\n else:\n yield nested_strings\n\n\n_TENSOR_CONTENT_TYPES = frozenset([\n dtypes.float32, dtypes.float64, dtypes.int32, dtypes.uint8, dtypes.int16,\n dtypes.int8, dtypes.int64, dtypes.qint8, dtypes.quint8, dtypes.qint16,\n dtypes.quint16, dtypes.qint32, dtypes.uint32, dtypes.uint64\n])\n\n\n# pylint: disable=invalid-name\ndef _check_failed(v):\n # NB. none of the _check_* functions could raise a ValueError, so\n # it is safe to use here.\n raise ValueError(v)\n\n\ndef _check_quantized(values):\n # Cannot rely on `nest` because the leaves are tuples.\n if not isinstance(values, (list, tuple)):\n _check_failed(values)\n if isinstance(values, tuple):\n _ = [_check_int(v) for v in values]\n else:\n _ = [_check_quantized(v) for v in values]\n\n\ndef _generate_isinstance_check(expected_types):\n def inner(values):\n _ = [_check_failed(v) for v in nest.flatten(values)\n if not isinstance(v, expected_types)]\n return inner\n\n_check_int = _generate_isinstance_check(\n (compat.integral_types, tensor_shape.Dimension))\n_check_float = _generate_isinstance_check(compat.real_types)\n_check_complex = _generate_isinstance_check(compat.complex_types)\n_check_str = _generate_isinstance_check(compat.bytes_or_text_types)\n_check_bool = _generate_isinstance_check(bool)\n\n\ndef _check_not_tensor(values):\n _ = [_check_failed(v) for v in nest.flatten(values)\n if isinstance(v, ops.Tensor)]\n# pylint: enable=invalid-name\n\n_TF_TO_IS_OK = {\n dtypes.bool: _check_bool,\n dtypes.complex128: _check_complex,\n dtypes.complex64: _check_complex,\n dtypes.float16: _check_float,\n dtypes.float32: _check_float,\n dtypes.float64: _check_float,\n dtypes.int16: _check_int,\n dtypes.int32: _check_int,\n dtypes.int64: _check_int,\n dtypes.int8: _check_int,\n dtypes.qint16: _check_quantized,\n dtypes.qint32: _check_quantized,\n dtypes.qint8: _check_quantized,\n dtypes.quint16: _check_quantized,\n dtypes.quint8: _check_quantized,\n dtypes.string: _check_str,\n dtypes.uint16: _check_int,\n dtypes.uint8: _check_int,\n dtypes.uint32: _check_int,\n dtypes.uint64: _check_int,\n}\n\n\ndef _AssertCompatible(values, dtype):\n if dtype is None:\n fn = _check_not_tensor\n else:\n try:\n fn = _TF_TO_IS_OK[dtype]\n except KeyError:\n # There isn't a specific fn, so we try to do the best possible.\n if dtype.is_integer:\n fn = _check_int\n elif dtype.is_floating:\n fn = _check_float\n elif dtype.is_complex:\n fn = _check_complex\n elif dtype.is_quantized:\n fn = _check_quantized\n else:\n fn = _check_not_tensor\n\n try:\n fn(values)\n except ValueError as e:\n [mismatch] = e.args\n if dtype is None:\n raise TypeError(\"Expected any non-tensor type, got a tensor instead.\")\n else:\n raise TypeError(\"Expected %s, got %s of type '%s' instead.\" %\n (dtype.name, repr(mismatch), type(mismatch).__name__))\n\n\ndef _is_array_like(obj): # pylint: disable=invalid-name\n \"\"\"Check if a given object is array-like.\"\"\"\n if isinstance(obj, ops.Tensor) and not isinstance(obj, ops._EagerTensorBase): # pylint: disable=protected-access\n # Tensor implements __array__ only so it can inform the user that it is not\n # a valid array.\n return False\n\n # TODO(slebedev): an object could also implement C-level array interface.\n if (callable(getattr(obj, \"__array__\", None)) or\n isinstance(getattr(obj, \"__array_interface__\", None), dict)):\n return True\n\n try:\n memoryview(obj)\n except TypeError:\n return False\n else:\n return not isinstance(obj, bytes)\n\n\n# pylint: disable=invalid-name\n@tf_export(\"make_tensor_proto\")\ndef make_tensor_proto(values, dtype=None, shape=None, verify_shape=False,\n allow_broadcast=False):\n \"\"\"Create a TensorProto.\n\n In TensorFlow 2.0, representing tensors as protos should no longer be a\n common workflow. That said, this utility function is still useful for\n generating TF Serving request protos:\n\n ```python\n request = tensorflow_serving.apis.predict_pb2.PredictRequest()\n request.model_spec.name = \"my_model\"\n request.model_spec.signature_name = \"serving_default\"\n request.inputs[\"images\"].CopyFrom(tf.make_tensor_proto(X_new))\n ```\n\n `make_tensor_proto` accepts \"values\" of a python scalar, a python list, a\n numpy ndarray, or a numpy scalar.\n\n If \"values\" is a python scalar or a python list, make_tensor_proto\n first convert it to numpy ndarray. If dtype is None, the\n conversion tries its best to infer the right numpy data\n type. Otherwise, the resulting numpy array has a compatible data\n type with the given dtype.\n\n In either case above, the numpy ndarray (either the caller provided\n or the auto-converted) must have the compatible type with dtype.\n\n `make_tensor_proto` then converts the numpy array to a tensor proto.\n\n If \"shape\" is None, the resulting tensor proto represents the numpy\n array precisely.\n\n Otherwise, \"shape\" specifies the tensor's shape and the numpy array\n can not have more elements than what \"shape\" specifies.\n\n Args:\n values: Values to put in the TensorProto.\n dtype: Optional tensor_pb2 DataType value.\n shape: List of integers representing the dimensions of tensor.\n verify_shape: Boolean that enables verification of a shape of values.\n allow_broadcast: Boolean that enables allowing scalars and 1 length vector\n broadcasting. Cannot be true when verify_shape is true.\n\n Returns:\n A `TensorProto`. Depending on the type, it may contain data in the\n \"tensor_content\" attribute, which is not directly useful to Python programs.\n To access the values you should convert the proto back to a numpy ndarray\n with `tf.make_ndarray(proto)`.\n\n If `values` is a `TensorProto`, it is immediately returned; `dtype` and\n `shape` are ignored.\n\n Raises:\n TypeError: if unsupported types are provided.\n ValueError: if arguments have inappropriate values or if verify_shape is\n True and shape of values is not equals to a shape from the argument.\n\n \"\"\"\n if allow_broadcast and verify_shape:\n raise ValueError(\"allow_broadcast and verify_shape are not both allowed.\")\n if isinstance(values, tensor_pb2.TensorProto):\n return values\n\n if dtype:\n dtype = dtypes.as_dtype(dtype)\n\n is_quantized = (\n dtype in [\n dtypes.qint8, dtypes.quint8, dtypes.qint16, dtypes.quint16,\n dtypes.qint32\n ])\n\n if _is_array_like(values):\n values = np.asarray(values)\n\n # We first convert value to a numpy array or scalar.\n if isinstance(values, (np.ndarray, np.generic)):\n if dtype and dtype.is_numpy_compatible:\n nparray = values.astype(dtype.as_numpy_dtype)\n else:\n nparray = values\n else:\n if values is None:\n raise ValueError(\"None values not supported.\")\n # if dtype is provided, forces numpy array to be the type\n # provided if possible.\n if dtype and dtype.is_numpy_compatible:\n np_dt = dtype.as_numpy_dtype\n else:\n np_dt = None\n # If shape is None, numpy.prod returns None when dtype is not set, but\n # raises exception when dtype is set to np.int64\n if shape is not None and np.prod(shape, dtype=np.int64) == 0:\n nparray = np.empty(shape, dtype=np_dt)\n else:\n _AssertCompatible(values, dtype)\n nparray = np.array(values, dtype=np_dt)\n # check to them.\n # We need to pass in quantized values as tuples, so don't apply the shape\n if (list(nparray.shape) != _GetDenseDimensions(values) and\n not is_quantized):\n raise ValueError(\"\"\"Argument must be a dense tensor: %s\"\"\"\n \"\"\" - got shape %s, but wanted %s.\"\"\" %\n (values, list(nparray.shape),\n _GetDenseDimensions(values)))\n\n # python/numpy default float type is float64. We prefer float32 instead.\n if (nparray.dtype == np.float64) and dtype is None:\n nparray = nparray.astype(np.float32)\n # python/numpy default int type is int64. We prefer int32 instead.\n elif (nparray.dtype == np.int64) and dtype is None:\n downcasted_array = nparray.astype(np.int32)\n # Do not down cast if it leads to precision loss.\n if np.array_equal(downcasted_array, nparray):\n nparray = downcasted_array\n\n # if dtype is provided, it must be compatible with what numpy\n # conversion says.\n numpy_dtype = dtypes.as_dtype(nparray.dtype)\n if numpy_dtype is None:\n raise TypeError(\"Unrecognized data type: %s\" % nparray.dtype)\n\n # If dtype was specified and is a quantized type, we convert\n # numpy_dtype back into the quantized version.\n if is_quantized:\n numpy_dtype = dtype\n\n if dtype is not None and (not hasattr(dtype, \"base_dtype\") or\n dtype.base_dtype != numpy_dtype.base_dtype):\n raise TypeError(\"Incompatible types: %s vs. %s. Value is %s\" %\n (dtype, nparray.dtype, values))\n\n # If shape is not given, get the shape from the numpy array.\n if shape is None:\n shape = nparray.shape\n is_same_size = True\n shape_size = nparray.size\n else:\n shape = [int(dim) for dim in shape]\n shape_size = np.prod(shape, dtype=np.int64)\n is_same_size = shape_size == nparray.size\n\n if allow_broadcast:\n if nparray.shape == (1,) or nparray.shape == tuple():\n pass\n elif nparray.size != shape_size:\n raise TypeError(\"Expected Tensor's shape: %s, got %s.\" %\n (tuple(shape), nparray.shape))\n\n else:\n if verify_shape and nparray.shape != tuple(shape):\n raise TypeError(\"Expected Tensor's shape: %s, got %s.\" %\n (tuple(shape), nparray.shape))\n\n if nparray.size > shape_size:\n raise ValueError(\n \"Too many elements provided. Needed at most %d, but received %d\" %\n (shape_size, nparray.size))\n\n tensor_proto = tensor_pb2.TensorProto(\n dtype=numpy_dtype.as_datatype_enum,\n tensor_shape=tensor_shape.as_shape(shape).as_proto())\n\n if is_same_size and numpy_dtype in _TENSOR_CONTENT_TYPES and shape_size > 1:\n if nparray.size * nparray.itemsize >= (1 << 31):\n raise ValueError(\n \"Cannot create a tensor proto whose content is larger than 2GB.\")\n tensor_proto.tensor_content = nparray.tostring()\n return tensor_proto\n\n # If we were not given values as a numpy array, compute the proto_values\n # from the given values directly, to avoid numpy trimming nulls from the\n # strings. Since values could be a list of strings, or a multi-dimensional\n # list of lists that might or might not correspond to the given shape,\n # we flatten it conservatively.\n if numpy_dtype == dtypes.string and not isinstance(values, np.ndarray):\n proto_values = _FlattenToStrings(values)\n\n # At this point, values may be a list of objects that we could not\n # identify a common type for (hence it was inferred as\n # np.object/dtypes.string). If we are unable to convert it to a\n # string, we raise a more helpful error message.\n #\n # Ideally, we'd be able to convert the elements of the list to a\n # common type, but this type inference requires some thinking and\n # so we defer it for now.\n try:\n str_values = [compat.as_bytes(x) for x in proto_values]\n except TypeError:\n raise TypeError(\"Failed to convert object of type %s to Tensor. \"\n \"Contents: %s. Consider casting elements to a \"\n \"supported type.\" % (type(values), values))\n tensor_proto.string_val.extend(str_values)\n return tensor_proto\n\n # TensorFlow expects C order (a.k.a., eigen row major).\n proto_values = nparray.ravel()\n\n append_fn = GetNumpyAppendFn(proto_values.dtype)\n if append_fn is None:\n raise TypeError(\n \"Element type not supported in TensorProto: %s\" % numpy_dtype.name)\n append_fn(tensor_proto, proto_values)\n\n return tensor_proto\n# pylint: enable=invalid-name\n\n\n@tf_export(\"make_ndarray\")\ndef MakeNdarray(tensor):\n \"\"\"Create a numpy ndarray from a tensor.\n\n Create a numpy ndarray with the same shape and data as the tensor.\n\n For example:\n\n ```python\n # Tensor a has shape (2,3)\n a = tf.constant([[1,2,3],[4,5,6]])\n proto_tensor = tf.make_tensor_proto(a) # convert `tensor a` to a proto tensor\n tf.make_ndarray(proto_tensor) # output: array([[1, 2, 3],\n # [4, 5, 6]], dtype=int32)\n # output has shape (2,3)\n ```\n\n Args:\n tensor: A TensorProto.\n\n Returns:\n A numpy array with the tensor contents.\n\n Raises:\n TypeError: if tensor has unsupported type.\n\n \"\"\"\n shape = [d.size for d in tensor.tensor_shape.dim]\n num_elements = np.prod(shape, dtype=np.int64)\n tensor_dtype = dtypes.as_dtype(tensor.dtype)\n dtype = tensor_dtype.as_numpy_dtype\n\n if tensor.tensor_content:\n return (np.frombuffer(tensor.tensor_content,\n dtype=dtype).copy().reshape(shape))\n\n if tensor_dtype == dtypes.string:\n # np.pad throws on these arrays of type np.object.\n values = list(tensor.string_val)\n padding = num_elements - len(values)\n if padding > 0:\n last = values[-1] if values else \"\"\n values.extend([last] * padding)\n return np.array(values, dtype=dtype).reshape(shape)\n\n if tensor_dtype == dtypes.float16 or tensor_dtype == dtypes.bfloat16:\n # the half_val field of the TensorProto stores the binary representation\n # of the fp16: we need to reinterpret this as a proper float16\n values = np.fromiter(tensor.half_val, dtype=np.uint16)\n values.dtype = tensor_dtype.as_numpy_dtype\n elif tensor_dtype == dtypes.float32:\n values = np.fromiter(tensor.float_val, dtype=dtype)\n elif tensor_dtype == dtypes.float64:\n values = np.fromiter(tensor.double_val, dtype=dtype)\n elif tensor_dtype in [\n dtypes.int32, dtypes.uint8, dtypes.uint16, dtypes.int16, dtypes.int8,\n dtypes.qint32, dtypes.quint8, dtypes.qint8, dtypes.qint16, dtypes.quint16\n ]:\n values = np.fromiter(tensor.int_val, dtype=dtype)\n elif tensor_dtype == dtypes.int64:\n values = np.fromiter(tensor.int64_val, dtype=dtype)\n elif tensor_dtype == dtypes.complex64:\n it = iter(tensor.scomplex_val)\n values = np.array([complex(x[0], x[1]) for x in zip(it, it)], dtype=dtype)\n elif tensor_dtype == dtypes.complex128:\n it = iter(tensor.dcomplex_val)\n values = np.array([complex(x[0], x[1]) for x in zip(it, it)], dtype=dtype)\n elif tensor_dtype == dtypes.bool:\n values = np.fromiter(tensor.bool_val, dtype=dtype)\n else:\n raise TypeError(\"Unsupported tensor type: %s\" % tensor.dtype)\n\n if values.size == 0:\n return np.zeros(shape, dtype)\n\n if values.size != num_elements:\n values = np.pad(values, (0, num_elements - values.size), \"edge\")\n\n return values.reshape(shape)\n\n\ndef ShapeEquals(tensor_proto, shape):\n \"\"\"Returns True if \"tensor_proto\" has the given \"shape\".\n\n Args:\n tensor_proto: A TensorProto.\n shape: A tensor shape, expressed as a TensorShape, list, or tuple.\n\n Returns:\n True if \"tensor_proto\" has the given \"shape\", otherwise False.\n\n Raises:\n TypeError: If \"tensor_proto\" is not a TensorProto, or shape is not a\n TensorShape, list, or tuple.\n \"\"\"\n if not isinstance(tensor_proto, tensor_pb2.TensorProto):\n raise TypeError(\"tensor_proto is not a tensor_pb2.TensorProto object\")\n if isinstance(shape, tensor_shape_pb2.TensorShapeProto):\n shape = [d.size for d in shape.dim]\n elif not isinstance(shape, (list, tuple)):\n raise TypeError(\"shape is not a list or tuple\")\n tensor_shape_list = [d.size for d in tensor_proto.tensor_shape.dim]\n return all(x == y for x, y in zip(tensor_shape_list, shape))\n\n\ndef _ConstantValue(tensor, partial):\n # TODO(touts): Support Variables?\n if not isinstance(tensor, ops.Tensor):\n raise TypeError(\"%r is not a Tensor, has type %s\" % (tensor, type(tensor)))\n if tensor.op.type == \"Const\":\n return MakeNdarray(tensor.op.get_attr(\"value\"))\n elif tensor.op.type == \"Shape\":\n input_shape = tensor.op.inputs[0].get_shape()\n if input_shape.is_fully_defined():\n return np.array(\n [dim.value for dim in input_shape.dims],\n dtype=tensor.dtype.as_numpy_dtype)\n else:\n return None\n elif tensor.op.type == \"Size\":\n input_shape = tensor.op.inputs[0].get_shape()\n if input_shape.is_fully_defined():\n return np.prod([dim.value for dim in input_shape.dims], dtype=np.int32)\n else:\n return None\n elif tensor.op.type == \"Rank\":\n input_shape = tensor.op.inputs[0].get_shape()\n if input_shape.ndims is not None:\n return np.ndarray(\n shape=(),\n buffer=np.array([input_shape.ndims], dtype=np.int32),\n dtype=np.int32)\n else:\n return None\n elif tensor.op.type == \"Range\":\n start = constant_value(tensor.op.inputs[0])\n if start is None:\n return None\n limit = constant_value(tensor.op.inputs[1])\n if limit is None:\n return None\n delta = constant_value(tensor.op.inputs[2])\n if delta is None:\n return None\n return np.arange(start, limit, delta, dtype=tensor.dtype.as_numpy_dtype)\n elif tensor.op.type == \"Cast\":\n pre_cast = constant_value(tensor.op.inputs[0])\n if pre_cast is None:\n return None\n cast_dtype = dtypes.as_dtype(tensor.op.get_attr(\"DstT\"))\n return pre_cast.astype(cast_dtype.as_numpy_dtype)\n elif tensor.op.type == \"Concat\":\n dim = constant_value(tensor.op.inputs[0])\n if dim is None:\n return None\n values = []\n for x in tensor.op.inputs[1:]:\n value = constant_value(x)\n if value is None:\n return None\n values.append(value)\n return np.concatenate(values, axis=dim)\n elif tensor.op.type == \"ConcatV2\":\n dim = constant_value(tensor.op.inputs[-1])\n if dim is None:\n return None\n values = []\n for x in tensor.op.inputs[:-1]:\n value = constant_value(x)\n if value is None:\n return None\n values.append(value)\n return np.concatenate(values, axis=dim)\n elif tensor.op.type == \"Pack\":\n values = []\n # Some imported GraphDefs have Pack ops with zero inputs. Those are invalid\n # and shouldn't be produced, but to deal sensibly with them here we check\n # and return None.\n if not tensor.op.inputs:\n return None\n # We can't handle axis != 0 Packs at the moment.\n if tensor.op.get_attr(\"axis\") != 0:\n return None\n for x in tensor.op.inputs:\n value = constant_value(x, partial)\n if value is None and not partial:\n return None\n values.append(value)\n return np.array(values)\n elif tensor.op.type == \"Unpack\":\n # We can't handle axis != 0 Unpacks at the moment.\n if tensor.op.get_attr(\"axis\") != 0:\n return None\n value = constant_value(tensor.op.inputs[0], partial)\n if value is None:\n return None\n return value[tensor.value_index]\n elif tensor.op.type == \"Split\":\n dim = constant_value(tensor.op.inputs[0])\n value = constant_value(tensor.op.inputs[1], partial)\n if value is None or dim is None:\n return None\n split = np.split(value, tensor.op.get_attr(\"num_split\"), dim)\n return split[tensor.value_index]\n elif tensor.op.type == \"Fill\":\n fill_shape = tensor.shape\n fill_value = constant_value(tensor.op.inputs[1])\n if fill_shape.is_fully_defined() and fill_value is not None:\n return np.full(fill_shape.as_list(), fill_value, dtype=fill_value.dtype)\n else:\n return None\n elif tensor.op.type == \"Equal\":\n value1 = constant_value(tensor.op.inputs[0])\n if value1 is None:\n return None\n value2 = constant_value(tensor.op.inputs[1])\n if value2 is None:\n return None\n return np.equal(value1, value2)\n elif tensor.op.type == \"NotEqual\":\n value1 = constant_value(tensor.op.inputs[0])\n if value1 is None:\n return None\n value2 = constant_value(tensor.op.inputs[1])\n if value2 is None:\n return None\n return np.not_equal(value1, value2)\n elif tensor.op.type == \"StopGradient\":\n return constant_value(tensor.op.inputs[0], partial)\n else:\n return None\n\n\n@tf_export(\"get_static_value\")\ndef constant_value(tensor, partial=False): # pylint: disable=invalid-name\n \"\"\"Returns the constant value of the given tensor, if efficiently calculable.\n\n This function attempts to partially evaluate the given tensor, and\n returns its value as a numpy ndarray if this succeeds.\n\n Compatibility(V1): If `constant_value(tensor)` returns a non-`None` result, it\n will no longer be possible to feed a different value for `tensor`. This allows\n the result of this function to influence the graph that is constructed, and\n permits static shape optimizations.\n\n Args:\n tensor: The Tensor to be evaluated.\n partial: If True, the returned numpy array is allowed to have partially\n evaluated values. Values that can't be evaluated will be None.\n\n Returns:\n A numpy ndarray containing the constant value of the given `tensor`,\n or None if it cannot be calculated.\n\n Raises:\n TypeError: if tensor is not an ops.Tensor.\n \"\"\"\n if isinstance(tensor, ops.EagerTensor):\n return tensor.numpy()\n if not is_tensor(tensor):\n return tensor\n if not isinstance(tensor, ops.Tensor):\n return None\n ret = _ConstantValue(tensor, partial)\n if ret is not None:\n # The caller may now depend on the constant value of `tensor`, so we\n # conservatively prevent it from being fed.\n tensor.graph.prevent_feeding(tensor)\n return ret\n\n\ndef constant_value_as_shape(tensor): # pylint: disable=invalid-name\n \"\"\"A version of `constant_value()` that returns a `TensorShape`.\n\n This version should be used when a constant tensor value is\n interpreted as a (possibly partial) shape, e.g. in the shape\n function for `tf.reshape()`. By explicitly requesting a\n `TensorShape` as the return value, it is possible to represent\n unknown dimensions; by contrast, `constant_value()` is\n all-or-nothing.\n\n Args:\n tensor: The rank-0 or rank-1 Tensor to be evaluated.\n\n Returns:\n A `TensorShape` based on the constant value of the given `tensor`.\n\n Raises:\n ValueError: If the shape is rank-0 and is not statically known to be -1.\n \"\"\"\n if isinstance(tensor, ops.EagerTensor):\n return tensor_shape.as_shape(\n [dim if dim != -1 else None for dim in tensor.numpy()])\n\n if tensor.get_shape().ndims == 0:\n value = constant_value(tensor)\n if value is None:\n raise ValueError(\n \"Received a scalar with unknown value as shape; require a statically \"\n \"known scalar with value '-1' to describe an unknown shape.\")\n if value != -1:\n raise ValueError(\n \"Received a scalar value '%s' as shape; require a statically known \"\n \"scalar with value '-1' to describe an unknown shape.\" % value)\n return tensor_shape.unknown_shape()\n\n shape = tensor.get_shape().with_rank(1)\n if shape == [0]:\n return tensor_shape.TensorShape([])\n elif tensor.op.type == \"Cast\":\n pre_cast = constant_value_as_shape(tensor.op.inputs[0])\n if pre_cast.dims is None:\n # the input to cast has a totally undefined shape; just return that.\n return pre_cast\n cast_dtype = dtypes.as_dtype(tensor.op.get_attr(\"DstT\"))\n if cast_dtype not in (dtypes.int32, dtypes.int64):\n return tensor_shape.unknown_shape(shape.dims[0].value)\n dest_dtype_shape_array = np.array(\n [x if x is not None else -1 for x in pre_cast.as_list()]).astype(\n cast_dtype.as_numpy_dtype)\n return tensor_shape.TensorShape([\n x if x >= 0 else None\n for x in dest_dtype_shape_array])\n elif tensor.op.type == \"Shape\":\n return tensor.op.inputs[0].get_shape()\n elif tensor.op.type == \"Pack\":\n ret = tensor_shape.TensorShape([]) # Empty list.\n # Since we expect rank 1 inputs, Pack's axis must be zero, otherwise it\n # would not be rank 1.\n assert tensor.op.get_attr(\"axis\") == 0\n for pack_input in tensor.op.inputs:\n # `pack_input` must be a scalar. Attempt to evaluate it, and append it\n # to `ret`.\n pack_input_val = constant_value(pack_input)\n if pack_input_val is None or pack_input_val < 0:\n new_dim = tensor_shape.Dimension(None)\n else:\n new_dim = tensor_shape.Dimension(pack_input_val)\n ret = ret.concatenate([new_dim])\n return ret\n elif tensor.op.type == \"Concat\":\n # We assume that `tensor.op.inputs[0]` evaluates to 0, as this is\n # the only legal value when concatenating vectors, and it will\n # have been checked by a previous shape function.\n ret = tensor_shape.TensorShape([]) # Empty list.\n for concat_input in tensor.op.inputs[1:]:\n # `concat_input` must be a vector. Attempt to evaluate it as a shape,\n # and concatenate it with `ret`.\n ret = ret.concatenate(constant_value_as_shape(concat_input))\n return ret\n elif tensor.op.type == \"ConcatV2\":\n # We assume that `tensor.op.inputs[-1]` evaluates to 0, as this is\n # the only legal value when concatenating vectors, and it will\n # have been checked by a previous shape function.\n ret = tensor_shape.TensorShape([]) # Empty list.\n for concat_input in tensor.op.inputs[:-1]:\n # `concat_input` must be a vector. Attempt to evaluate it as a shape,\n # and concatenate it with `ret`.\n ret = ret.concatenate(constant_value_as_shape(concat_input))\n return ret\n elif tensor.op.type == \"StridedSlice\":\n try:\n begin = constant_value(tensor.op.inputs[1])\n end = constant_value(tensor.op.inputs[2])\n strides = constant_value(tensor.op.inputs[3])\n if begin is not None and end is not None and strides is not None:\n begin = begin[0]\n end = end[0]\n strides = strides[0]\n begin_mask = tensor.op.get_attr(\"begin_mask\")\n if begin_mask == 1:\n begin = None\n end_mask = tensor.op.get_attr(\"end_mask\")\n if end_mask == 1:\n end = None\n\n ellipsis_mask = tensor.op.get_attr(\"ellipsis_mask\")\n new_axis_mask = tensor.op.get_attr(\"new_axis_mask\")\n shrink_axis_mask = tensor.op.get_attr(\"shrink_axis_mask\")\n valid_attributes = (not ellipsis_mask and not new_axis_mask and\n not shrink_axis_mask and (not begin_mask or\n (begin_mask == 1)) and\n (not end_mask or (end_mask == 1)))\n if valid_attributes: # additional inputs not supported\n prev = constant_value_as_shape(tensor.op.inputs[0])\n prev = prev[begin:end:strides]\n ret = tensor_shape.TensorShape(prev)\n return ret\n\n except ValueError: # Could come from get_attr or slicing prev.\n pass\n except TypeError: # Could come from slicing prev.\n pass\n elif (tensor.op.type == \"Placeholder\" and\n tensor.op.graph.building_function and\n hasattr(tensor.op.graph, \"internal_captures\")):\n # If we are inside a FuncGraph try to lookup the constant value of the\n # corresponding external capture. Note that we only look at captures and\n # not the fed inputs because those can be fed different values in different\n # instantiations of the function call or different iterations of a\n # tf.while_loop.\n for i, capture in enumerate(tensor.op.graph.internal_captures):\n if capture is tensor:\n external_capture = tensor.op.graph.external_captures[i]\n return constant_value_as_shape(external_capture)\n\n ret = tensor_shape.unknown_shape(shape.dims[0].value)\n value = constant_value(tensor)\n if value is not None:\n ret = ret.merge_with(\n tensor_shape.TensorShape([d if d >= 0 else None for d in value]))\n return ret\n\n\n@tf_export(\"is_tensor\")\ndef is_tensor(x): # pylint: disable=invalid-name\n \"\"\"Checks whether `x` is a tensor or \"tensor-like\".\n\n If `is_tensor(x)` returns `True`, it is safe to assume that `x` is a tensor or\n can be converted to a tensor using `ops.convert_to_tensor(x)`.\n \n Usage example:\n \n >>> tf.is_tensor(tf.constant([[1,2,3],[4,5,6],[7,8,9]])) \n True\n >>> tf.is_tensor(\"Hello World\")\n False\n \n Args:\n x: A python object to check.\n\n Returns:\n `True` if `x` is a tensor or \"tensor-like\", `False` if not.\n \"\"\"\n return (isinstance(x, tensor_like._TensorLike) or # pylint: disable=protected-access\n ops.is_dense_tensor_like(x) or\n getattr(x, \"is_tensor_like\", False))\n\n\ndef shape_tensor(shape): # pylint: disable=invalid-name\n \"\"\"Convert to an int32 or int64 tensor, defaulting to int32 if empty.\"\"\"\n dtype = None\n if isinstance(shape, (tuple, list)):\n if not shape:\n dtype = dtypes.int32\n else:\n # If there are Dimension objects in the shape, unwrap them. This can be a\n # problem if v1 and v2 TensorShape objects get mixed up in partial\n # conversions, leading to shapes such as (1, 2, Dimension(5)), which are\n # not convertible to Tensors becasue of mixed content.\n shape = tuple(map(tensor_shape.dimension_value, shape))\n return ops.convert_to_tensor(shape, dtype=dtype, name=\"shape\")\n\n\n# DO NOT USE: For testing only.\n_ENABLE_MAYBE_SET_STATIC_SHAPE = True\n\n\ndef maybe_set_static_shape(tensor, shape): # pylint: disable=invalid-name\n \"\"\"Sets the shape of `tensor` to the `shape`'s constant value, if inferrable.\n\n This is a temporary workaround to fix shape inference across functional op\n boundaries. E.g.\n\n ```python\n shape = tf.constant([3])\n @tf.function\n def f():\n u = tf.random_uniform(shape)\n return u\n ```\n\n If we were to rely solely on C++ shape inference, the shape of `u` inside\n `f` would be unknown because C++ shape inference is not aware of the outer\n graph and all it sees is a Placeholder node when backtracing the captured\n tensor for `shape`. `maybe_set_static_shape` computes the static shape value\n of `shape` by traversing the `FuncGraph` boundaries and sets the correct\n shape.\n\n A longer term solution would be to fix C++ shape inference.\n\n Args:\n tensor: A tensor.\n shape: A shape tensor.\n \"\"\"\n if (_ENABLE_MAYBE_SET_STATIC_SHAPE and not context.executing_eagerly() and\n ops.get_default_graph().building_function and\n not tensor.shape.is_fully_defined() and is_tensor(shape)):\n shape = shape_tensor(shape)\n const_shape = constant_value_as_shape(shape)\n tensor.set_shape(const_shape)\n"
] | [
[
"tensorflow.compat.v1.random_normal_initializer",
"tensorflow.compat.v1.compat.v1.saved_model.signature_def_utils.build_signature_def",
"tensorflow.compat.v1.compat.v1.enable_resource_variables",
"tensorflow.compat.v1.matmul",
"tensorflow.compat.v1.compat.v1.disable_eager_execution",
"tensorflow.compat.v1.compat.v1.saved_model.utils.build_tensor_info",
"tensorflow.compat.v1.constant"
],
[
"tensorflow.python.util.tf_export.keras_export",
"tensorflow.python.keras.utils.data_utils.get_file"
],
[
"tensorflow.python.autograph.pyct.anno.setanno",
"tensorflow.python.autograph.pyct.templates.replace",
"tensorflow.python.autograph.pyct.anno.hasanno",
"tensorflow.python.autograph.pyct.pretty_printer.fmt",
"tensorflow.python.autograph.pyct.loader.load_ast",
"tensorflow.python.autograph.pyct.anno.getanno"
],
[
"tensorflow.python.framework.tensor_shape.TensorShape",
"numpy.asarray",
"tensorflow.python.framework.ops.is_dense_tensor_like",
"numpy.concatenate",
"tensorflow.python.eager.context.executing_eagerly",
"numpy.pad",
"numpy.arange",
"tensorflow.python.util.tf_export.tf_export",
"numpy.frombuffer",
"numpy.zeros",
"tensorflow.python.framework.dtypes.as_dtype",
"numpy.equal",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.framework.tensor_shape.unknown_shape",
"numpy.not_equal",
"numpy.fromiter",
"numpy.array",
"numpy.array_equal",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.framework.tensor_shape.Dimension",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.python.framework.ops.get_default_graph",
"numpy.prod",
"tensorflow.python.framework.tensor_shape.as_shape",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.7"
]
}
] |
shpotes/s4x | [
"83151b8a7cfc78ebfc1d87ccc5109b6a0444a5e5",
"d14be41ea8994c36fb75801a12837c1d3e77cb57"
] | [
"t5x/losses_test.py",
"t5x/contrib/moe/trainer_test.py"
] | [
"# Copyright 2022 The T5X Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for t5x.losses.\"\"\"\n\nfrom absl.testing import absltest\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom t5x import losses\n\n\nclass LossTest(absltest.TestCase):\n\n def test_xent(self):\n\n def lossfn(logits, targets, weights):\n loss, z_loss, weight_sum = losses.compute_weighted_cross_entropy(\n logits,\n targets,\n weights,\n label_smoothing=0.1,\n z_loss=0.1,\n loss_normalizing_factor=0.1)\n return loss, (z_loss, weight_sum)\n\n batch_size = 2\n length = 4\n vocab_size = 8\n logits = np.random.normal(size=(batch_size, length,\n vocab_size)).astype(np.float32)\n targets = np.random.randint(0, vocab_size, size=(batch_size, length))\n weights = np.ones_like(targets)\n out = jax.jit(jax.value_and_grad(lossfn, has_aux=True))(logits, targets,\n weights)\n (loss, (z_loss, weight_sum)), dlogits = out\n # Just a smoke test for now\n # TODO(t5x): Expand test\n print(jax.device_get(((loss, (z_loss, weight_sum)), dlogits)))\n\n\nclass SpecialLossNormalizingFactorTest(absltest.TestCase):\n\n def test_num_real_target_tokens(self):\n batch = {\n 'decoder_target_tokens':\n jnp.asarray([[1, 2, 3, 4, 0], [5, 6, 0, 0, 0]], jnp.int32)\n }\n\n (output_lnf,\n output_loss_weights) = losses.get_loss_normalizing_factor_and_weights(\n loss_normalizing_factor=losses.SpecialLossNormalizingFactor\n .NUM_REAL_TARGET_TOKENS,\n batch=batch)\n\n np.testing.assert_allclose(output_lnf, 6.0, rtol=1e-3)\n np.testing.assert_allclose(\n output_loss_weights,\n np.array([[1.0, 1.0, 1.0, 1.0, 0.0], [1.0, 1.0, 0.0, 0.0, 0.0]],\n dtype=np.float32),\n rtol=1e-3)\n\n def test_num_total_target_tokens(self):\n batch = {\n 'decoder_target_tokens':\n jnp.asarray([[1, 2, 3, 4, 0], [5, 6, 0, 0, 0]], jnp.int32)\n }\n\n (output_lnf,\n output_loss_weights) = losses.get_loss_normalizing_factor_and_weights(\n loss_normalizing_factor=losses.SpecialLossNormalizingFactor\n .NUM_TOTAL_TARGET_TOKENS,\n batch=batch)\n\n np.testing.assert_allclose(output_lnf, 10.0, rtol=1e-3)\n np.testing.assert_allclose(\n output_loss_weights,\n np.array([[1.0, 1.0, 1.0, 1.0, 0.0], [1.0, 1.0, 0.0, 0.0, 0.0]],\n dtype=np.float32),\n rtol=1e-3)\n\n def test_average_per_sequence(self):\n batch = {\n 'decoder_target_tokens':\n jnp.asarray([[1, 2, 3, 4, 0], [5, 6, 0, 0, 0]], jnp.int32)\n }\n\n (output_lnf,\n output_loss_weights) = losses.get_loss_normalizing_factor_and_weights(\n loss_normalizing_factor=losses.SpecialLossNormalizingFactor\n .AVERAGE_PER_SEQUENCE,\n batch=batch)\n\n np.testing.assert_allclose(output_lnf, 2.0, rtol=1e-3)\n np.testing.assert_allclose(\n output_loss_weights,\n jnp.asarray([[0.25, 0.25, 0.25, 0.25, 0.0], [0.5, 0.5, 0.0, 0.0, 0.0]],\n jnp.float32),\n rtol=1e-3)\n\n def test_average_per_sequence_with_weights(self):\n batch = {\n 'decoder_target_tokens':\n jnp.asarray([[1, 2, 3, 4, 0], [5, 6, 0, 0, 0]], jnp.int32),\n 'decoder_loss_weights':\n jnp.asarray([[0.5, 1.0, 0.25, 2.0, 0.0], [1.0, 1.0, 0.0, 0.0, 0.0]],\n jnp.float32)\n }\n\n (output_lnf,\n output_loss_weights) = losses.get_loss_normalizing_factor_and_weights(\n loss_normalizing_factor=losses.SpecialLossNormalizingFactor\n .AVERAGE_PER_SEQUENCE,\n batch=batch)\n\n np.testing.assert_allclose(output_lnf, 2.0, rtol=1e-3)\n np.testing.assert_allclose(\n output_loss_weights,\n jnp.asarray(\n [[0.1333, 0.2666, 0.0666, 0.5333, 0.0], [0.5, 0.5, 0.0, 0.0, 0.0]],\n jnp.float32),\n rtol=1e-3)\n\nif __name__ == '__main__':\n absltest.main()\n",
"# Copyright 2022 The T5X Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for trainer.\"\"\"\n\nimport contextlib\n\nfrom absl.testing import absltest\nfrom flax import optim\nimport jax\nimport numpy as np\nfrom t5x import metrics as metrics_lib\nfrom t5x import models as models_lib\nfrom t5x import train_state as train_state_lib\nfrom t5x.contrib.moe import partitioning\nfrom t5x.contrib.moe import trainer as trainer_lib\nimport tensorflow as tf\n\nmock = absltest.mock\njax.config.parse_flags_with_absl()\n\n\n# Make `log_elapsed_time` a no-op to simplify mocking of `time.time()`.\[email protected]\ndef fake_log_elapsed_time(_):\n yield\n\n\njax._src.dispatch.log_elapsed_time = fake_log_elapsed_time\n\n\ndef fake_accum_grads(model, optimizer, batch, rng, num_microbatches,\n data_partition_spec):\n del model, num_microbatches, rng, data_partition_spec\n # Add `i` to each optimzer value.\n i = batch['i'].sum()\n grad_accum = jax.tree_map(lambda x: i, optimizer)\n # Add j to each metric.\n j = batch['j'].sum()\n metrics = {\n 'loss': metrics_lib.Sum.from_model_output(j),\n 'accuracy': metrics_lib.Sum.from_model_output(j)\n }\n return grad_accum, metrics, None\n\n\ndef fake_apply_grads(optimizer,\n grad_accum,\n metrics,\n learning_rate,\n weight_metrics_computer,\n other_state_variables=None):\n del weight_metrics_computer\n del other_state_variables\n metrics['learning_rate'] = metrics_lib.Sum.from_model_output(learning_rate)\n optimizer = jax.tree_multimap(lambda x, g: x + g, optimizer, grad_accum)\n return optimizer, metrics\n\n\nclass MoeTrainerTest(absltest.TestCase):\n\n def setUp(self):\n super().setUp()\n self.init_optimizer = optim.Optimizer(\n optim.GradientDescent(),\n state=optim.OptimizerState(\n step=0, param_states={\n 'expert_bias': 0,\n 'kernel': 0\n }),\n target={\n 'expert_bias': np.zeros(4),\n 'kernel': np.zeros((2, 4))\n })\n self.init_train_state = train_state_lib.FlaxOptimTrainState(\n self.init_optimizer)\n train_state_axes = jax.tree_map(lambda x: None, self.init_train_state)\n model_dir = self.create_tempdir().full_path\n\n mapfn = lambda i: {'i': [tf.cast(i, tf.int32)], 'j': [tf.cast(1, tf.int32)]}\n self.dataset = tf.data.Dataset.range(6).map(mapfn).batch(\n 2, drop_remainder=True)\n\n num_experts = 10\n self.test_trainer = trainer_lib.MoeTrainer(\n model=mock.create_autospec(models_lib.BaseModel, instance=True),\n train_state=self.init_train_state,\n partitioner=partitioning.MoePjitPartitioner(\n num_experts=num_experts, num_partitions=1),\n eval_names=['task1', 'task2'],\n summary_dir=model_dir,\n train_state_axes=train_state_axes,\n rng=np.ones(2, np.uint32),\n learning_rate_fn=lambda step: 2 * step,\n num_microbatches=None,\n num_experts=num_experts)\n\n @mock.patch('time.time')\n @mock.patch('t5x.trainer.accumulate_grads_microbatched', fake_accum_grads)\n @mock.patch('t5x.trainer.apply_grads', fake_apply_grads)\n @mock.patch('absl.logging.log', lambda *_: None) # avoids time.time() calls\n def _test_train(self, precompile, mock_time=None):\n trainer = self.test_trainer\n initial_rng = trainer._base_rng\n\n if precompile:\n mock_time.side_effect = [0, 1]\n trainer.compile_train(next(self.dataset.as_numpy_iterator()))\n trainer._compiled_train_step = mock.Mock(\n side_effect=trainer._compiled_train_step)\n\n trainer._partitioned_train_step = mock.Mock(\n side_effect=trainer._partitioned_train_step)\n\n # train start, logging, train end, logging\n mock_time.side_effect = [1, 5]\n num_steps = 2\n trainer.train(self.dataset.as_numpy_iterator(), num_steps)\n\n # Base rng must remain the same.\n np.testing.assert_array_equal(trainer._base_rng, initial_rng)\n\n expected_optimizer = optim.Optimizer(\n self.init_optimizer.optimizer_def,\n state=optim.OptimizerState(\n step=[6],\n param_states={\n 'expert_bias': 60, # 10 * (0+1+2+3) = 60\n 'kernel': 6 # 0+1+2+3 = 6\n }),\n target={\n 'expert_bias': 60 * np.ones(4),\n 'kernel': 6 * np.ones((2, 4))\n })\n expected_train_state = train_state_lib.FlaxOptimTrainState(\n expected_optimizer)\n jax.tree_multimap(np.testing.assert_allclose, trainer.train_state,\n expected_train_state)\n\n if precompile:\n self.assertEqual(trainer._compiled_train_step.call_count, num_steps)\n trainer._partitioned_train_step.assert_not_called()\n else:\n self.assertIsNone(trainer._compiled_train_step)\n self.assertEqual(trainer._partitioned_train_step.call_count, num_steps)\n\n def test_train_noprecompile(self):\n self._test_train(False)\n\n def test_train_precompile(self):\n self._test_train(True)\n\n\nif __name__ == '__main__':\n absltest.main()\n"
] | [
[
"numpy.ones_like",
"numpy.random.normal",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.random.randint"
],
[
"tensorflow.cast",
"numpy.ones",
"numpy.testing.assert_array_equal",
"tensorflow.data.Dataset.range",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
rafalmularczyk/public_lectures | [
"fcd10c217f56021ebdec0046dfe0def7f31e9b0c"
] | [
"Data Analytics/Utilities/DA_tools.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy as np\nlight=\"#FFFCDC\"\nlight_highlight=\"#FEF590\"\nmid=\"#FDED2A\"\nmid_highlight=\"#f0dc05\"\ndark=\"#EECA02\"\ndark_highlight=\"#BB9700\"\ngreen=\"#00FF00\"\nlight_grey=\"#DDDDDD\"\n\ndef is_sorted(a):\n '''Check if numpy 1d-array is sorted\n '''\n return np.all(a[:-1] <= a[1:])\n\ndef ribbon_plot(x, fx, ax=None,zorder=0):\n '''Plot a ribbon plot for regression and similar.\n Plot consists of quantiles (by 10%) of a variate (fx) as a function of covariate (x).\n x has shape (n, )\n fx has shape (N,n)\n '''\n if ax is None:\n ax = plt.gca()\n if not is_sorted(x):\n print('Sorting')\n arr2D = np.concatenate([np.expand_dims(x,axis=0),fx],axis=0)\n sortedArr = arr2D [ :, arr2D[0].argsort()]\n x = sortedArr[0,:]\n fx = sortedArr[1:,:]\n\n probs = [10, 20, 30, 40, 50, 60, 70, 80, 90]\n perc_interv=np.percentile(fx, probs, axis=0)\n ax.fill_between(x,perc_interv[0,:],perc_interv[8,:],color=light,zorder=zorder)\n ax.fill_between(x,perc_interv[1,:],perc_interv[7,:],color=light_highlight,zorder=zorder)\n ax.fill_between(x,perc_interv[2,:],perc_interv[6,:],color=mid,zorder=zorder)\n ax.fill_between(x,perc_interv[3,:],perc_interv[5,:],color=mid_highlight,zorder=zorder)\n ax.plot(x,perc_interv[4,:],color=dark,zorder=zorder)\n return(ax)\n"
] | [
[
"numpy.all",
"matplotlib.pyplot.gca",
"numpy.expand_dims",
"numpy.percentile"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mkulariya1/tefla | [
"8de25c1b67dcf025535f5e8c40539de59acd7fb8",
"8de25c1b67dcf025535f5e8c40539de59acd7fb8",
"34f8fd0e2f2ee02aa73c6289753e08a95cc41880"
] | [
"tefla/core/losses.py",
"tefla/dataset/textdataset.py",
"tefla/dataset/textdataflow.py"
] | [
"# -------------------------------------------------------------------#\n# Written by Mrinal Haloi\n# Contact: [email protected]\n# Copyright 2016, Mrinal Haloi\n# -------------------------------------------------------------------#\nimport numpy as np\nimport tensorflow as tf\nimport numbers\nfrom functools import partial\nfrom ..utils import util\nfrom .layers import flatten, fully_connected as fc, relu\nfrom .layers import gradient_reverse\nfrom ..utils import losses_utils\nlog_loss = tf.losses.log_loss\n\n\ndef log_loss_custom(predictions, labels, eps=1e-7, name='log'):\n \"\"\"Define a log loss.\n\n Args:\n predictions: 2D tensor or array, [batch_size, num_classes] predictions of the network .\n labels: 2D or array tensor, [batch_size, num_classes] ground truth labels or target labels.\n eps: a constant to set upper or lower limit for labels, smoothening factor\n name: Optional scope/name for op_scope.\n\n Returns:\n A tensor with the log loss.\n \"\"\"\n with tf.name_scope(name):\n predictions = tf.to_float(predictions)\n labels = tf.to_float(labels)\n predictions = tf.clip_by_value(predictions, eps, 1 - eps)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n loss = -tf.reduce_mean(labels * tf.log(predictions))\n return loss\n\n\ndef log_loss_tf(predictions, labels, eps=1e-7, weights=1.0, name='log_loss'):\n \"\"\"Define a log loss.\n\n Args:\n predictions: 2D tensor or array, [batch_size, num_classes] predictions of the network .\n labels: 2D or array tensor, [batch_size, num_classes] ground truth labels or target labels.\n eps: a constant to set upper or lower limit for labels, smoothening factor\n name: Optional scope/name for op_scope.\n\n Returns:\n A tensor with the log loss.\n \"\"\"\n with tf.name_scope(name):\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n predictions = tf.to_float(predictions)\n labels = tf.to_float(labels)\n losses = -tf.multiply(labels, tf.log(predictions + eps)) - tf.multiply(\n (1 - labels), tf.log(1 - predictions + eps))\n return tf.losses.compute_weighted_loss(losses, weights)\n\n\ndef kappa_loss(predictions, labels, y_pow=1, eps=1e-15, num_ratings=5, batch_size=32, name='kappa'):\n \"\"\"Define a kappa loss, Its a continuous differentiable approximation of\n discrete kappa loss.\n\n Args:\n predictions: 2D tensor or array, [batch_size, num_classes] predictions of the network .\n labels: 2D tensor or array,[batch_size, num_classes] ground truth labels or target labels.\n y_pow: int, to whcih the labels should be raised; useful if model diverge. e.g. y_pow=2\n num_ratings: numbers of rater to used, typically num_classes of the model\n batch_size: batch_size of the training or validation ops\n eps: a float, prevents divide by zero\n name: Optional scope/name for op_scope.\n\n Returns:\n A tensor with the kappa loss.\n \"\"\"\n with tf.name_scope(name):\n labels = tf.to_float(labels)\n repeat_op = tf.to_float(\n tf.tile(tf.reshape(tf.range(0, num_ratings), [num_ratings, 1]), [1, num_ratings]))\n repeat_op_sq = tf.square((repeat_op - tf.transpose(repeat_op)))\n weights = repeat_op_sq / tf.to_float((num_ratings - 1)**2)\n\n pred_ = predictions**y_pow\n try:\n pred_norm = pred_ / \\\n (eps + tf.reshape(tf.reduce_sum(pred_, 1), [-1, 1]))\n except Exception:\n pred_norm = pred_ / \\\n (eps + tf.reshape(tf.reduce_sum(pred_, 1), [batch_size, 1]))\n\n hist_rater_a = tf.reduce_sum(pred_norm, 0)\n hist_rater_b = tf.reduce_sum(labels, 0)\n\n conf_mat = tf.matmul(tf.transpose(pred_norm), labels)\n\n nom = tf.reduce_sum(weights * conf_mat)\n denom = tf.reduce_sum(weights * tf.matmul(\n tf.reshape(hist_rater_a, [num_ratings, 1]), tf.reshape(hist_rater_b, [1, num_ratings])) /\n tf.to_float(batch_size))\n\n try:\n return -(1 - nom / denom)\n except Exception:\n return -(1 - nom / (denom + eps))\n\n\ndef kappa_log_loss(predictions,\n labels,\n label_smoothing=0.0,\n y_pow=1,\n batch_size=32,\n log_scale=0.5,\n num_classes=5,\n log_offset=0.50,\n name='kappa_log'):\n \"\"\"Define a joint kappa and log loss, Kappa is a continuous differentiable\n approximation of discrete kappa loss.\n\n Args:\n predictions: 2D tensor or array, [batch_size, num_classes] predictions of the network .\n labels: 2D tensor or array,[batch_size, num_classes] ground truth labels or target labels.\n label_smoothing: a float, used to smooth the labels for better generalization\n if greater than 0 then smooth the labels.\n y_pow: int, to whcih the labels should be raised; useful if model diverge. e.g. y_pow=2\n num_ratings: numbers of rater to used, typically num_classes of the model\n batch_size: batch_size of the training or validation ops\n log_scale: a float, used to multiply the clipped log loss, e.g: 0.5\n log_offset:a float minimum log loss offset to substract from original log loss; e.g. 0.50\n name: Optional scope/name for op_scope.\n\n Returns:\n A tensor with the kappa log loss.\n \"\"\"\n with tf.name_scope(name):\n num_classes = labels.get_shape()[-1].value\n labels = tf.cast(labels, predictions.dtype)\n if label_smoothing > 0:\n smooth_positives = 1.0 - label_smoothing\n smooth_negatives = label_smoothing / num_classes\n labels = labels * smooth_positives + smooth_negatives\n log_loss_res = log_loss(predictions, labels)\n kappa_loss_res = kappa_loss(\n predictions, labels, y_pow=y_pow, num_ratings=num_classes, batch_size=batch_size)\n return kappa_loss_res + log_scale * (log_loss_res - log_offset)\n\n\ndef kappa_log_loss_clipped(predictions,\n labels,\n label_smoothing=0.0,\n y_pow=1,\n batch_size=32,\n log_scale=0.5,\n log_cutoff=0.80,\n num_classes=5,\n name='kappa_log_clipped'):\n \"\"\"Define a joint kappa and log loss; log loss is clipped by a defined min\n value; Kappa is a continuous differentiable approximation of discrete kappa\n loss.\n\n Args:\n predictions: 2D tensor or array, [batch_size, num_classes] predictions of the network .\n labels: 2D tensor or array,[batch_size, num_classes] ground truth labels or target labels.\n label_smoothing: a float, used to smooth the labels for better generalization\n if greater than 0 then smooth the labels.\n y_pow: int, to whcih the labels should be raised; useful if model diverge. e.g. y_pow=2\n num_ratings: numbers of rater to used, typically num_classes of the model\n batch_size: batch_size of the training or validation ops\n log_scale: a float, used to multiply the clipped log loss, e.g: 0.5\n log_cutoff:a float, minimum log loss value; e.g. 0.50\n name: Optional scope/name for op_scope.\n\n Returns:\n A tensor with the clipped kappa log loss.\n \"\"\"\n with tf.name_scope(name):\n num_classes = labels.get_shape()[-1].value\n labels = tf.cast(labels, predictions.dtype)\n if label_smoothing > 0:\n smooth_positives = 1.0 - label_smoothing\n smooth_negatives = label_smoothing / num_classes\n labels = labels * smooth_positives + smooth_negatives\n log_loss_res = log_loss_tf(predictions, labels)\n kappa_loss_res = kappa_loss(\n predictions, labels, y_pow=y_pow, num_ratings=num_classes, batch_size=batch_size)\n return kappa_loss_res + log_scale * tf.clip_by_value(log_loss_res, log_cutoff, 10**3)\n\n\ndef cross_entropy_loss(logits, labels, label_smoothing=0.0, weight=1.0, name='cross_entropy_loss'):\n \"\"\"Define a cross entropy loss with label smoothing.\n\n Args:\n predictions: 2D tensor or array, [batch_size, num_classes] predictions of the network .\n labels: 2D tensor or array,[batch_size, num_classes] ground truth labels or target labels.\n label_smoothing: a float, used to smooth the labels for better generalization\n if greater than 0 then smooth the labels.\n weight: scale the loss by this factor.\n name: Optional scope/name for op_scope.\n\n Returns:\n A tensor with the cross entropy loss.\n \"\"\"\n logits.get_shape().assert_is_compatible_with(labels.get_shape())\n with tf.name_scope(name):\n num_classes = labels.get_shape()[-1].value\n labels = tf.cast(labels, logits.dtype)\n if label_smoothing > 0:\n smooth_positives = 1.0 - label_smoothing\n smooth_negatives = label_smoothing / num_classes\n labels = labels * smooth_positives + smooth_negatives\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(\n logits=logits, labels=labels, name='xentropy')\n weight = tf.convert_to_tensor(weight, dtype=logits.dtype.base_dtype, name='loss_weight')\n loss = tf.multiply(weight, tf.reduce_mean(cross_entropy), name='value')\n return loss\n\n\ndef l1_l2_regularizer(var, weight_l1=1.0, weight_l2=1.0, name='l1_l2_regularizer'):\n \"\"\"Define a L2Loss, useful for regularize, i.e. weight decay.\n\n Args:\n var: tensor to regularize.\n weight_l1: an optional weight to modulate the l1 loss.\n weight_l2: an optional weight to modulate the l2 loss.\n name: Optional scope/name for op_scope.\n\n Returns:\n the l1+L2 loss op.\n \"\"\"\n with tf.name_scope(name):\n weight_l1_t = tf.convert_to_tensor(weight_l1, dtype=var.dtype.base_dtype, name='weight_l1')\n weight_l2_t = tf.convert_to_tensor(weight_l2, dtype=var.dtype.base_dtype, name='weight_l2')\n reg_l1 = tf.multiply(weight_l1_t, tf.reduce_sum(tf.abs(var)), name='value_l1')\n reg_l2 = tf.multiply(weight_l2_t, tf.nn.l2_loss(var), name='value_l2')\n return tf.add(reg_l1, reg_l2, name='value')\n\n\ndef l1_regularizer(scale, name='l1_regularizer'):\n \"\"\"Returns a function that can be used to apply L1 regularization to weights.\n L1 regularization encourages sparsity.\n\n Args:\n scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.\n name: An optional name/scope name.\n\n Returns:\n A function with signature `l1(weights)` that apply L1 regularization.\n\n Raises:\n ValueError: If scale is negative or if scale is not a float.\n \"\"\"\n if isinstance(scale, numbers.Integral):\n raise ValueError('scale cannot be an integer: %s' % scale)\n if isinstance(scale, numbers.Real):\n if scale < 0.:\n raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale)\n if scale == 0.:\n return lambda _: None\n\n def l1(weights, name='l1_regularizer'):\n \"\"\"Applies L1 regularization to weights.\"\"\"\n with tf.name_scope(name):\n my_scale = tf.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale')\n return tf.multiply(my_scale, tf.reduce_sum(tf.abs(weights)), name=name)\n\n return l1\n\n\ndef l2_regularizer(scale, name='l2_regularizer'):\n \"\"\"Returns a function that can be used to apply L2 regularization to weights.\n Small values of L2 can help prevent overfitting the training data.\n\n Args:\n scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.\n name: An optional name/scope name.\n\n Returns:\n A function with signature `l2(weights)` that applies L2 regularization.\n\n Raises:\n ValueError: If scale is negative or if scale is not a float.\n \"\"\"\n if isinstance(scale, numbers.Integral):\n raise ValueError('scale cannot be an integer: %s' % (scale,))\n if isinstance(scale, numbers.Real):\n if scale < 0.:\n raise ValueError('Setting a scale less than 0 on a regularizer: %g.' % scale)\n if scale == 0.:\n return lambda _: None\n\n def l2(weights, name='l2_regularizer'):\n \"\"\"Applies l2 regularization to weights.\"\"\"\n with tf.name_scope(name):\n my_scale = tf.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale')\n return tf.multiply(my_scale, nn.l2_loss(weights), name=name)\n\n return l2\n\n\ndef discretized_mix_logistic_loss(inputs,\n predictions,\n sum_all=True,\n name='disretized_mix_logistic_loss'):\n \"\"\"log-likelihood for mixture of discretized logistics, assumes the data has\n been rescaled to.\n\n [-1,1] interval\n\n Args:\n predictions: 4D tensor or array, [batch_size, width, height, out_channels]\n predictions of the network .\n inputs: 4D tensor or array, [batch_size, width, height, num_classes]\n ground truth labels or target labels.\n name: Optional scope/name for op_scope.\n\n Returns:\n A tensor with the discretized mix logistic loss.\n \"\"\"\n with tf.name_scope(name):\n inputs_shape = list(map(int, inputs.get_shape()))\n predictions_shape = list(map(int, predictions.get_shape()))\n nr_mix = int(predictions_shape[-1] / 10)\n logit_probs = predictions[:, :, :, :nr_mix]\n predictions = tf.reshape(predictions[:, :, :, nr_mix:], inputs_shape + [nr_mix * 3])\n means = predictions[:, :, :, :, :nr_mix]\n log_scales = tf.maximum(predictions[:, :, :, :, nr_mix:2 * nr_mix], -7.)\n coeffs = tf.nn.tanh(predictions[:, :, :, :, 2 * nr_mix:3 * nr_mix])\n inputs = tf.reshape(inputs, inputs_shape + [1]) + tf.zeros(inputs_shape + [nr_mix])\n m2 = tf.reshape(means[:, :, :, 1, :] + coeffs[:, :, :, 0, :] * inputs[:, :, :, 0, :],\n [inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix])\n m3 = tf.reshape(\n means[:, :, :, 2, :] + coeffs[:, :, :, 1, :] * inputs[:, :, :, 0, :] +\n coeffs[:, :, :, 2, :] * inputs[:, :, :, 1, :],\n [inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix])\n means = tf.concat([\n tf.reshape(means[:, :, :, 0, :],\n [inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix]), m2, m3\n ],\n axis=3)\n centered_inputs = inputs - means\n inv_stdv = tf.exp(-log_scales)\n plus_in = inv_stdv * (centered_inputs + 1. / 255.)\n cdf_plus = tf.nn.sigmoid(plus_in)\n min_in = inv_stdv * (centered_inputs - 1. / 255.)\n cdf_min = tf.nn.sigmoid(min_in)\n log_cdf_plus = plus_in - tf.nn.softplus(plus_in)\n log_one_minus_cdf_min = -tf.nn.softplus(min_in)\n cdf_delta = cdf_plus - cdf_min\n mid_in = inv_stdv * centered_inputs\n log_pdf_mid = mid_in - log_scales - 2. * tf.nn.softplus(mid_in)\n log_probs = tf.select(\n inputs < -0.999, log_cdf_plus,\n tf.select(\n inputs > 0.999, log_one_minus_cdf_min,\n tf.select(cdf_delta > 1e-5, tf.log(tf.maximum(cdf_delta, 1e-12)),\n log_pdf_mid - np.log(127.5))))\n\n log_probs = tf.reduce_sum(log_probs, 3) + \\\n log_prob_from_logits(logit_probs)\n if sum_all:\n return -tf.reduce_sum(log_sum_exp(log_probs))\n else:\n return -tf.reduce_sum(log_sum_exp(log_probs), [1, 2])\n\n\ndef mse_loss(pred, labels):\n try:\n batch_size = tf.cast(pred.shape[0], tf.float32)\n except Exception as e:\n print('Pred is a tf tensor %s' % str(e.message))\n batch_size = tf.cast(tf.shape(pred)[0], tf.float32)\n loss_val = tf.sqrt(2 * tf.nn.l2_loss(pred - labels)) / batch_size\n return loss_val\n\n\ndef pullaway_loss(embeddings, name='pullaway_loss'):\n \"\"\"Pull Away loss calculation.\n\n Args:\n embeddings: The embeddings to be orthogonalized for varied faces.\n Shape [batch_size, embeddings_dim]\n\n Return: pull away term loss\n \"\"\"\n with tf.name_scope(name):\n norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))\n normalized_embeddings = embeddings / norm\n similarity = tf.matmul(normalized_embeddings, normalized_embeddings, transpose_b=True)\n batch_size = tf.cast(tf.shape(embeddings)[0], tf.float32)\n pt_loss = (tf.reduce_sum(similarity) - batch_size) / \\\n (batch_size * (batch_size - 1))\n return pt_loss\n\n\ndef log_sum_exp(x):\n \"\"\"numerically stable log_sum_exp implementation that prevents overflow.\"\"\"\n axis = len(x.get_shape()) - 1\n m = tf.reduce_max(x, axis)\n m2 = tf.reduce_max(x, axis, keep_dims=True)\n return m + tf.log(tf.reduce_sum(tf.exp(x - m2), axis))\n\n\ndef log_prob_from_logits(x):\n \"\"\"numerically stable log_softmax implementation that prevents overflow.\"\"\"\n axis = len(x.get_shape()) - 1\n m = tf.reduce_max(x, axis, keep_dims=True)\n return x - m - tf.log(tf.reduce_sum(tf.exp(x - m), axis, keep_dims=True))\n\n\ndef segment_loss(logits, labels, num_classes, head=None):\n \"\"\"Calculate the loss from the logits and the labels.\n\n Args:\n logits: tensor, float - [batch_size * width * height, num_classes].\n Use vgg_fcn.up as logits.\n labels: Labels tensor, int32 - [batch_size * width * height, num_classes].\n The ground truth of your data.\n head: numpy array - [num_classes]\n Weighting the loss of each class\n Optional: Prioritize some classes\n\n Returns:\n loss: Loss tensor of type float.\n \"\"\"\n with tf.name_scope('segment_loss'):\n # logits = tf.reshape(logits, (-1, num_classes))\n epsilon = tf.constant(value=1e-7)\n labels = tf.to_float(labels)\n # labels = tf.to_float(tf.reshape(labels, (-1, num_classes)))\n\n softmax = tf.nn.softmax(logits) + epsilon\n\n if head is not None:\n cross_entropy = -tf.reduce_sum(tf.mul(labels * tf.log(softmax), head), axis=[1])\n else:\n cross_entropy = -tf.reduce_sum(labels * tf.log(softmax), axis=[1])\n\n cross_entropy_mean = tf.reduce_mean(cross_entropy, name='xentropy_mean')\n return cross_entropy_mean\n\n\ndef triplet_loss(anchor, positive, negative, alpha=0.2, name='triplet_loss'):\n \"\"\"Calculate the triplet loss according to the FaceNet paper.\n\n Args:\n anchor: 2-D `tensor` [batch_size, embedding_size], the embeddings for the anchor images.\n positive: 2-D `tensor` [batch_size, embedding_size], the embeddings for the positive images.\n negative: 2-D `tensor` [batch_size, embedding_size], the embeddings for the negative images.\n alpha: positive to negative triplet distance margin\n\n Returns:\n the triplet loss.\n \"\"\"\n with tf.name_scope(name):\n pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)\n neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)\n basic_loss = tf.add(tf.subtract(pos_dist, neg_dist), alpha)\n loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)\n return loss\n\n\ndef decov_loss(xs, name='decov_loss'):\n \"\"\"Decov loss as described in https://arxiv.org/pdf/1511.06068.pdf 'Reducing\n Overfitting In Deep Networks by Decorrelating Representation'.\n\n Args:\n xs: 4-D `tensor` [batch_size, height, width, channels], input\n\n Returns:\n a `float` decov loss\n \"\"\"\n with tf.name_scope(name):\n x = tf.reshape(xs, [int(xs.get_shape()[0]), -1])\n m = tf.reduce_mean(x, 0, True)\n z = tf.expand_dims(x - m, 2)\n corr = tf.reduce_mean(tf.matmul(z, tf.transpose(z, perm=[0, 2, 1])), 0)\n corr_frob_sqr = tf.reduce_sum(tf.square(corr))\n corr_diag_sqr = tf.reduce_sum(tf.square(tf.diag_part(corr)))\n loss = 0.5 * (corr_frob_sqr - corr_diag_sqr)\n return loss\n\n\ndef center_loss(features, label, alpha, num_classes, name='center_loss'):\n \"\"\"Center loss based on the paper \"A Discriminative Feature Learning Approach\n for Deep Face Recognition\" (http://ydwen.github.io/papers/WenECCV16.pdf)\n\n Args:\n features: 2-D `tensor` [batch_size, feature_length], input features\n label: 1-D `tensor` [batch_size], input label\n alpha: center loss parameter\n num_classes: a `int` numof classes for training\n\n Returns:\n a `float`, center loss\n \"\"\"\n with tf.variable_scope(name):\n num_features = features.get_shape()[1]\n centers = tf.get_variable(\n 'centers', [num_classes, num_features],\n dtype=tf.float32,\n initializer=tf.constant_initializer(0),\n trainable=False)\n label = tf.reshape(label, [-1])\n centers_batch = tf.gather(centers, label)\n diff = (1 - alpha) * (centers_batch - features)\n centers = tf.scatter_sub(centers, label, diff)\n loss = tf.nn.l2_loss(features - centers_batch)\n return loss, centers\n\n\ndef correlation_loss(source_samples, target_samples, weight, name='corr_loss'):\n \"\"\"Adds a similarity loss term, the correlation between two representations.\n\n Args:\n source_samples: a tensor of shape [num_samples, num_features]\n target_samples: a tensor of shape [num_samples, num_features]\n weight: a scalar weight for the loss.\n scope: optional name scope for summary tags.\n\n Returns:\n a scalar tensor representing the correlation loss value.\n \"\"\"\n with tf.name_scope(name):\n source_samples -= tf.reduce_mean(source_samples, 0)\n target_samples -= tf.reduce_mean(target_samples, 0)\n source_samples = tf.nn.l2_normalize(source_samples, 1)\n target_samples = tf.nn.l2_normalize(target_samples, 1)\n source_cov = tf.matmul(tf.transpose(source_samples), source_samples)\n target_cov = tf.matmul(tf.transpose(target_samples), target_samples)\n corr_loss = tf.reduce_mean(tf.square(source_cov - target_cov)) * weight\n\n assert_op = tf.Assert(tf.is_finite(corr_loss), [corr_loss])\n with tf.control_dependencies([assert_op]):\n tag = 'Correlation Loss'\n barrier = tf.no_op(tag)\n\n return corr_loss\n\n\ndef maximum_mean_discrepancy(x,\n y,\n kernel=util.gaussian_kernel_matrix,\n name='maximum_mean_discrepancy'):\n r\"\"\"Computes the Maximum Mean Discrepancy (MMD) of two samples: x and y.\n\n Maximum Mean Discrepancy (MMD) is a distance-measure between the samples of\n the distributions of x and y. Here we use the kernel two sample estimate\n using the empirical mean of the two distributions.\n\n MMD^2(P, Q) = || \\E{\\phi(x)} - \\E{\\phi(y)} ||^2\n = \\E{ K(x, x) } + \\E{ K(y, y) } - 2 \\E{ K(x, y) },\n\n where K = <\\phi(x), \\phi(y)>,\n is the desired kernel function, in this case a radial basis kernel.\n\n Args:\n x: a tensor of shape [num_samples, num_features]\n y: a tensor of shape [num_samples, num_features]\n kernel: a function which computes the kernel in MMD. Defaults to the\n GaussianKernelMatrix.\n\n Returns:\n a scalar denoting the squared maximum mean discrepancy loss.\n \"\"\"\n with tf.name_scope(name):\n # \\E{ K(x, x) } + \\E{ K(y, y) } - 2 \\E{ K(x, y) }\n cost = tf.reduce_mean(kernel(x, x))\n cost += tf.reduce_mean(kernel(y, y))\n cost -= 2 * tf.reduce_mean(kernel(x, y))\n\n # We do not allow the loss to become negative.\n cost = tf.where(cost > 0, cost, 0, name='value')\n return cost\n\n\ndef mmd_loss(source_samples, target_samples, weight, name='mmd_loss'):\n \"\"\"Adds a similarity loss term, the MMD between two representations.\n\n This Maximum Mean Discrepancy (MMD) loss is calculated with a number of\n different Gaussian kernels.\n\n Args:\n source_samples: a tensor of shape [num_samples, num_features].\n target_samples: a tensor of shape [num_samples, num_features].\n weight: the weight of the MMD loss.\n scope: optional name scope for summary tags.\n\n Returns:\n a scalar tensor representing the MMD loss value.\n \"\"\"\n with tf.name_scope(name):\n sigmas = [\n 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 20, 25, 30, 35, 100, 1e3, 1e4, 1e5, 1e6\n ]\n gaussian_kernel = partial(util.gaussian_kernel_matrix, sigmas=tf.constant(sigmas))\n\n loss_value = maximum_mean_discrepancy(source_samples, target_samples, kernel=gaussian_kernel)\n loss_value = tf.maximum(1e-4, loss_value) * weight\n assert_op = tf.Assert(tf.is_finite(loss_value), [loss_value])\n with tf.control_dependencies([assert_op]):\n tag = 'MMD_Loss'\n barrier = tf.no_op(tag)\n return loss_value\n\n\ndef dann_loss(source_samples, target_samples, weight, name='dann_loss'):\n \"\"\"Adds the domain adversarial (DANN) loss.\n\n Args:\n source_samples: a tensor of shape [num_samples, num_features].\n target_samples: a tensor of shape [num_samples, num_features].\n weight: the weight of the loss.\n scope: optional name scope for summary tags.\n\n Returns:\n a scalar tensor representing the correlation loss value.\n \"\"\"\n with tf.variable_scope(name):\n batch_size = tf.shape(source_samples)[0]\n samples = tf.concat(values=[source_samples, target_samples], axis=0)\n samples = flatten(samples)\n\n domain_selection_mask = tf.concat(\n values=[tf.zeros((batch_size, 1)), tf.ones((batch_size, 1))], axis=0)\n\n grl = gradient_reverse(samples)\n grl = tf.reshape(grl, (-1, samples.get_shape().as_list()[1]))\n\n grl = fc(grl, 100, True, None, activation=relu, name='fc1')\n logits = fc(grl, 1, True, None, activation=None, name='fc2')\n\n domain_predictions = tf.sigmoid(logits)\n\n domain_loss = tf.losses.log_loss(domain_selection_mask, domain_predictions, weights=weight)\n\n domain_accuracy = util.accuracy_tf(domain_selection_mask, tf.round(domain_predictions))\n\n assert_op = tf.Assert(tf.is_finite(domain_loss), [domain_loss])\n with tf.control_dependencies([assert_op]):\n tag_loss = 'losses/domain_loss'\n barrier = tf.no_op(tag_loss)\n\n return domain_loss\n\n\ndef difference_loss(private_samples, shared_samples, weight=1.0, name='difference_loss'):\n \"\"\"Adds the difference loss between the private and shared representations.\n\n Args:\n private_samples: a tensor of shape [num_samples, num_features].\n shared_samples: a tensor of shape [num_samples, num_features].\n weight: the weight of the incoherence loss.\n name: the name of the tf summary.\n \"\"\"\n with tf.name_scope(name):\n private_samples -= tf.reduce_mean(private_samples, 0)\n shared_samples -= tf.reduce_mean(shared_samples, 0)\n\n private_samples = tf.nn.l2_normalize(private_samples, 1)\n shared_samples = tf.nn.l2_normalize(shared_samples, 1)\n\n correlation_matrix = tf.matmul(private_samples, shared_samples, transpose_a=True)\n\n cost = tf.reduce_mean(tf.square(correlation_matrix)) * weight\n cost = tf.where(cost > 0, cost, 0, name='value')\n\n assert_op = tf.Assert(tf.is_finite(cost), [cost])\n with tf.control_dependencies([assert_op]):\n barrier = tf.no_op(name)\n return cost\n\n\ndef log_quaternion_loss_batch(predictions, labels, name='log_quaternion_batch_loss'):\n \"\"\"A helper function to compute the error between quaternions.\n\n Args:\n predictions: A Tensor of size [batch_size, 4].\n labels: A Tensor of size [batch_size, 4].\n params: A dictionary of parameters. Expecting 'use_logging', 'batch_size'.\n\n Returns:\n A Tensor of size [batch_size], denoting the error between the quaternions.\n \"\"\"\n assertions = []\n assertions.append(\n tf.Assert(\n tf.reduce_all(tf.less(tf.abs(tf.reduce_sum(tf.square(predictions), [1]) - 1), 1e-4)),\n ['The l2 norm of each prediction quaternion vector should be 1.']))\n assertions.append(\n tf.Assert(\n tf.reduce_all(tf.less(tf.abs(tf.reduce_sum(tf.square(labels), [1]) - 1), 1e-4)),\n ['The l2 norm of each label quaternion vector should be 1.']))\n with tf.name_scope(name):\n with tf.control_dependencies(assertions):\n product = tf.multiply(predictions, labels)\n internal_dot_products = tf.reduce_sum(product, [1])\n logcost = tf.log(1e-4 + 1 - tf.abs(internal_dot_products))\n return logcost\n\n\ndef log_quaternion_loss(predictions, labels, batch_size, name='log_quaternion_loss'):\n \"\"\"A helper function to compute the mean error between batches of\n quaternions.\n\n The caller is expected to add the loss to the graph.\n\n Args:\n predictions: A Tensor of size [batch_size, 4].\n labels: A Tensor of size [batch_size, 4].\n params: A dictionary of parameters. Expecting 'use_logging', 'batch_size'.\n\n Returns:\n A Tensor of size 1, denoting the mean error between batches of quaternions.\n \"\"\"\n with tf.name_scope(name):\n logcost = log_quaternion_loss_batch(predictions, labels)\n logcost = tf.reduce_sum(logcost, [0])\n logcost = tf.multiply(logcost, 1.0 / batch_size, name='log_quaternion_loss')\n return logcost\n\n\ndef random_perturbation_loss(embedded, length, loss_fn, perturb_norm_length=0.1):\n \"\"\"Adds noise to embeddings and recomputes classification loss.\n\n Args:\n embedded: 3-D float `Tensor`, [batch_size, num_timesteps, embedding_dim]\n length: a `int`, length of the mask\n loss_fn: a callable, that returns loss\n perturb_norm_length: a `float`, Norm length of adversarial perturbation\n to be optimized with validatio\n\n Returns:\n perturbation loss\n \"\"\"\n noise = tf.random_normal(shape=tf.shape(embedded))\n perturb = _scale_l2(_mask_by_length(noise, length), perturb_norm_length)\n return loss_fn(embedded + perturb)\n\n\ndef adversarial_loss(embedded, loss, loss_fn, perturb_norm_length=0.1):\n \"\"\"Adds gradient to embedding and recomputes classification loss.\n\n Args:\n embedded: 3-D float `Tensor`, [batch_size, num_timesteps, embedding_dim]\n loss: `float`, loss\n loss_fn: a callable, that returns loss\n perturb_norm_length: a `float`, Norm length of adversarial perturbation\n to be optimized with validatio\n\n Returns:\n adversial loss\n \"\"\"\n grad, = tf.gradients(\n loss, embedded, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)\n grad = tf.stop_gradient(grad)\n perturb = _scale_l2(grad, perturb_norm_length)\n return loss_fn(embedded + perturb)\n\n\ndef virtual_adversarial_loss(logits,\n embedded,\n labels,\n length,\n logits_from_embedding_fn,\n num_classes,\n num_power_iteration=1,\n small_constant_for_finite_diff=1e-3,\n perturb_norm_length=0.1):\n \"\"\"Virtual adversarial loss. Computes virtual adversarial perturbation by\n finite difference method and power iteration, adds it to the embedding, and\n computes the KL divergence between the new logits and the original logits.\n\n Args:\n logits: 2-D float `Tensor`, [num_timesteps*batch_size, m], where m=1 if\n num_classes=2, otherwise m=num_classes.\n embedded: 3-D float `Tensor`, [batch_size, num_timesteps, embedding_dim].\n labels: 1-D `Tensor`, input labels\n length: a `int`, input length\n logits_from_embedding_fn: callable that takes embeddings and returns\n classifier logits.\n num_classes: num_classes for training\n vocab_size: a `int`, vocabular size of the problem\n num_power_iteration: a `int`, the number of power iteration\n small_constant_for_finite_diff: a `float`, Small constant for finite difference method\n perturb_norm_length: a `float`, Norm length of adversarial perturbation\n to be optimized with validatio\n\n Returns:\n a `float` `scalar`, KL divergence.\n \"\"\"\n logits = tf.stop_gradient(logits)\n weights = _end_of_seq_mask(labels, vocab_size)\n\n d = _mask_by_length(tf.random_normal(shape=tf.shape(embedded)), length)\n\n for _ in range(num_power_iteration):\n d = _scale_l2(d, small_constant_for_finite_diff)\n d_logits = logits_from_embedding_fn(embedded + d)\n kl = _kl_divergence_with_logits(logits, d_logits, weights, num_classes)\n d, = tf.gradients(kl, d, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)\n d = tf.stop_gradient(d)\n\n perturb = _scale_l2(_mask_by_length(d, length), perturb_norm_length)\n vadv_logits = logits_from_embedding_fn(embedded + perturb)\n return _kl_divergence_with_logits(logits, vadv_logits, weights, num_classes)\n\n\ndef random_perturbation_loss_brnn(embedded, length, loss_fn, perturb_norm_length=0.1):\n \"\"\"Adds noise to embeddings and recomputes classification loss fir\n bidirectional rnn models.\n\n Args:\n embedded: 3-D float `Tensor`, [batch_size, num_timesteps, embedding_dim]\n length: a `int`, length of the mask\n loss_fn: a callable, that returns loss\n perturb_norm_length: a `float`, Norm length of adversarial perturbation to\n be optimized with validatio\n\n Returns:\n perturbation loss\n \"\"\"\n noise = [tf.random_normal(shape=tf.shape(emb)) for emb in embedded]\n masked = [_mask_by_length(n, length) for n in noise]\n scaled = [_scale_l2(m, perturb_norm_length) for m in masked]\n return loss_fn([e + s for (e, s) in zip(embedded, scaled)])\n\n\ndef adversarial_loss_brnn(embedded, loss, loss_fn, perurb_norm_length=0.1):\n \"\"\"Adds gradient to embeddings and recomputes classification loss for\n bidirectional rnn models.\n\n Args:\n embedded: 3-D float `Tensor`, [batch_size, num_timesteps, embedding_dim]\n loss: `float`, loss\n loss_fn: a callable, that returns loss\n perturb_norm_length: a `float`, Norm length of adversarial perturbation\n to be optimized with validatio\n\n Returns:\n adversial loss\n \"\"\"\n grads = tf.gradients(\n loss, embedded, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)\n adv_exs = [\n emb + _scale_l2(tf.stop_gradient(g), perturb_norm_length) for emb, g in zip(embedded, grads)\n ]\n return loss_fn(adv_exs)\n\n\ndef virtual_adversarial_loss_brnn(logits,\n embedded,\n labels,\n length,\n logits_from_embedding_fn,\n vocab_size,\n num_classes,\n num_power_iteration=1,\n small_constant_for_finite_diff=1e-3,\n perturb_norm_length=0.1):\n \"\"\"Virtual adversarial loss for bidirectional models Computes virtual\n adversarial perturbation by finite difference method and power iteration,\n adds it to the embedding, and computes the KL divergence between the new\n logits and the original logits.\n\n Args:\n logits: 2-D float `Tensor`, [num_timesteps*batch_size, m], where m=1 if\n num_classes=2, otherwise m=num_classes.\n embedded: 3-D float `Tensor`, [batch_size, num_timesteps, embedding_dim].\n labels: 1-D `Tensor`, input labels\n length: a `int`, input length\n logits_from_embedding_fn: callable that takes embeddings and returns\n classifier logits.\n num_classes: num_classes for training\n vocab_size: a `int`, vocabular size of the problem\n num_power_iteration: a `int`, the number of power iteration\n small_constant_for_finite_diff: a `float`, Small constant for finite difference method\n perturb_norm_length: a `float`, Norm length of adversarial perturbation\n to be optimized with validatio\n\n Returns:\n a `float` `scalar`, KL divergence.\n \"\"\"\n logits = tf.stop_gradient(logits)\n weights = _end_of_seq_mask(labels, vocab_size)\n\n perturbs = [_mask_by_length(tf.random_normal(shape=tf.shape(emb)), length) for emb in embedded]\n for _ in range(num_power_iteration):\n perturbs = [_scale_l2(d, small_constant_for_finite_diff) for d in perturbs]\n d_logits = logits_from_embedding_fn([emb + d for (emb, d) in zip(embedded, perturbs)])\n kl = _kl_divergence_with_logits(logits, d_logits, weights, num_classes)\n perturbs = tf.gradients(\n kl, perturbs, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)\n perturbs = [tf.stop_gradient(d) for d in perturbs]\n\n perturbs = [_scale_l2(_mask_by_length(d, length), perturb_norm_length) for d in perturbs]\n vadv_logits = logits_from_embedding_fn([emb + d for (emb, d) in zip(embedded, perturbs)])\n return _kl_divergence_with_logits(logits, vadv_logits, weights, num_classes)\n\n\ndef _mask_by_length(t, length):\n maxlen = t.get_shape().as_list()[1]\n mask = tf.sequence_mask(length, maxlen=maxlen)\n mask = tf.expand_dims(tf.cast(mask, tf.float32), -1)\n return t * mask\n\n\ndef _scale_l2(x, norm_length):\n alpha = tf.reduce_max(tf.abs(x), (1, 2), keep_dims=True) + 1e-12\n l2_norm = alpha * tf.sqrt(tf.reduce_sum(tf.pow(x / alpha, 2), (1, 2), keep_dims=True) + 1e-6)\n x_unit = x / l2_norm\n return norm_length * x_unit\n\n\ndef _end_of_seq_mask(tokens, vocab_size):\n \"\"\"Generate a mask for the EOS token (1.0 on EOS, 0.0 otherwise).\n\n Args:\n tokens: 1-D integer `Tensor` [num_timesteps*batch_size]. Each element is an\n id from the vocab.\n vocab_size: a `int`, vocabular size of the problem\n\n Returns:\n Float 1-D `Tensor` same shape as tokens, whose values are 1.0 on the end of\n sequence and 0.0 on the others.\n \"\"\"\n eos_id = vocab_size - 1\n return tf.cast(tf.equal(tokens, eos_id), tf.float32)\n\n\ndef _kl_divergence_with_logits(q_logits, p_logits, weights, num_classes):\n \"\"\"Returns weighted KL divergence between distributions q and p.\n\n Args:\n q_logits: logits for 1st argument of KL divergence shape\n [num_timesteps * batch_size, num_classes] if num_classes > 2, and\n [num_timesteps * batch_size] if num_classes == 2.\n p_logits: logits for 2nd argument of KL divergence with same shape q_logits.\n weights: 1-D `float` tensor with shape [num_timesteps * batch_size].\n Elements should be 1.0 only on end of sequences\n num_classes: a `int`, number of training classes\n\n Returns:\n a `float` `scalar`, KL divergence.\n \"\"\"\n if num_classes == 2:\n q = tf.nn.sigmoid(q_logits)\n p = tf.nn.sigmoid(p_logits)\n kl = (-tf.nn.sigmoid_cross_entropy_with_logits(logits=q_logits, labels=q) +\n f.nn.sigmoid_cross_entropy_with_logits(logits=p_logits, labels=q))\n\n else:\n q = tf.nn.softmax(q_logits)\n p = tf.nn.softmax(p_logits)\n kl = tf.reduce_sum(q * (tf.log(q) - tf.log(p)), 1)\n\n num_labels = tf.reduce_sum(weights)\n num_labels = tf.where(tf.equal(num_labels, 0.), 1., num_labels)\n\n kl.get_shape().assert_has_rank(2)\n weights.get_shape().assert_has_rank(1)\n loss = tf.identity(tf.reduce_sum(tf.expand_dims(weights, -1) * kl) / num_labels, name='kl')\n return loss\n\n\ndef cross_entropy_sequence_loss(logits, targets, sequence_length):\n \"\"\"Calculates the per-example cross-entropy loss for a sequence of logits and\n masks out all losses passed the sequence length.\n\n Args:\n logits: Logits of shape `[T, B, vocab_size]`\n targets: Target classes of shape `[T, B]`\n sequence_length: An int32 tensor of shape `[B]` corresponding\n to the length of each input\n\n Returns:\n A tensor of shape [T, B] that contains the loss per example, per time step.\n \"\"\"\n with tf.name_scope(\"cross_entropy_sequence_loss\"):\n losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets)\n loss_mask = tf.sequence_mask(tf.to_int32(sequence_length), tf.to_int32(tf.shape(targets)[0]))\n losses = losses * tf.transpose(tf.to_float(loss_mask), [1, 0])\n\n return losses\n\n\ndef dice_loss(predictions, targets, weights=1., name='dice_loss'):\n with tf.name_scope(name):\n # predictions = tf.to_float(predictions)\n targets = tf.to_float(targets)\n intersection = 2 * tf.reduce_sum(predictions * targets) + weights\n union = weights + tf.reduce_sum(predictions) + tf.reduce_sum(targets)\n loss = -(intersection / (union))\n return loss\n\n\ndef precision_recall_auc_loss(labels,\n logits,\n precision_range=(0.0, 1.0),\n num_anchors=20,\n weights=1.0,\n dual_rate_factor=0.1,\n label_priors=None,\n surrogate_type='xent',\n lambdas_initializer=tf.constant_initializer(1.0),\n reuse=None,\n variables_collections=None,\n trainable=True,\n scope=None):\n \"\"\"Computes precision-recall AUC loss.\n The loss is based on a sum of losses for recall at a range of\n precision values (anchor points). This sum is a Riemann sum that\n approximates the area under the precision-recall curve.\n The per-example `weights` argument changes not only the coefficients of\n individual training examples, but how the examples are counted toward the\n constraint. If `label_priors` is given, it MUST take `weights` into account.\n That is,\n label_priors = P / (P + N)\n where\n P = sum_i (wt_i on positives)\n N = sum_i (wt_i on negatives).\n Args:\n labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels].\n logits: A `Tensor` with the same shape as `labels`.\n precision_range: A length-two tuple, the range of precision values over\n which to compute AUC. The entries must be nonnegative, increasing, and\n less than or equal to 1.0.\n num_anchors: The number of grid points used to approximate the Riemann sum.\n weights: Coefficients for the loss. Must be a scalar or `Tensor` of shape\n [batch_size] or [batch_size, num_labels].\n dual_rate_factor: A floating point value which controls the step size for\n the Lagrange multipliers.\n label_priors: None, or a floating point `Tensor` of shape [num_labels]\n containing the prior probability of each label (i.e. the fraction of the\n training data consisting of positive examples). If None, the label\n priors are computed from `labels` with a moving average. See the notes\n above regarding the interaction with `weights` and do not set this unless\n you have a good reason to do so.\n surrogate_type: Either 'xent' or 'hinge', specifying which upper bound\n should be used for indicator functions.\n lambdas_initializer: An initializer for the Lagrange multipliers.\n reuse: Whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: Optional list of collections for the variables.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n scope: Optional scope for `variable_scope`.\n Returns:\n loss: A `Tensor` of the same shape as `logits` with the component-wise\n loss.\n other_outputs: A dictionary of useful internal quantities for debugging. For\n more details, see http://arxiv.org/pdf/1608.04802.pdf.\n lambdas: A Tensor of shape [1, num_labels, num_anchors] consisting of the\n Lagrange multipliers.\n biases: A Tensor of shape [1, num_labels, num_anchors] consisting of the\n learned bias term for each.\n label_priors: A Tensor of shape [1, num_labels, 1] consisting of the prior\n probability of each label learned by the loss, if not provided.\n true_positives_lower_bound: Lower bound on the number of true positives\n given `labels` and `logits`. This is the same lower bound which is used\n in the loss expression to be optimized.\n false_positives_upper_bound: Upper bound on the number of false positives\n given `labels` and `logits`. This is the same upper bound which is used\n in the loss expression to be optimized.\n Raises:\n ValueError: If `surrogate_type` is not `xent` or `hinge`.\n \"\"\"\n with tf.variable_scope(scope, 'precision_recall_auc', [labels, logits, label_priors], reuse=reuse):\n labels, logits, weights, original_shape = _prepare_labels_logits_weights(labels, logits, weights)\n num_labels = losses_utils.get_num_labels(logits)\n\n # Convert other inputs to tensors and standardize dtypes.\n dual_rate_factor = losses_utils.convert_and_cast(dual_rate_factor, 'dual_rate_factor',\n logits.dtype)\n\n # Create Tensor of anchor points and distance between anchors.\n precision_values, delta = _range_to_anchors_and_delta(precision_range, num_anchors, logits.dtype)\n # Create lambdas with shape [1, num_labels, num_anchors].\n lambdas, lambdas_variable = _create_dual_variable(\n 'lambdas',\n shape=[1, num_labels, num_anchors],\n dtype=logits.dtype,\n initializer=lambdas_initializer,\n collections=variables_collections,\n trainable=trainable,\n dual_rate_factor=dual_rate_factor)\n # Create biases with shape [1, num_labels, num_anchors].\n biases = tf.contrib.framework.model_variable(\n name='biases',\n shape=[1, num_labels, num_anchors],\n dtype=logits.dtype,\n initializer=tf.zeros_initializer(),\n collections=variables_collections,\n trainable=trainable)\n # Maybe create label_priors.\n label_priors = maybe_create_label_priors(label_priors, labels, weights, variables_collections)\n label_priors = tf.reshape(label_priors, [1, num_labels, 1])\n\n # Expand logits, labels, and weights to shape [batch_size, num_labels, 1].\n logits = tf.expand_dims(logits, 2)\n labels = tf.expand_dims(labels, 2)\n weights = tf.expand_dims(weights, 2)\n\n # Calculate weighted loss and other outputs. The log(2.0) term corrects for\n # logloss not being an upper bound on the indicator function.\n loss = weights * losses_utils.weighted_surrogate_loss(\n labels,\n logits + biases,\n surrogate_type=surrogate_type,\n positive_weights=1.0 + lambdas * (1.0 - precision_values),\n negative_weights=lambdas * precision_values)\n maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0\n maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)\n lambda_term = lambdas * (1.0 - precision_values) * label_priors * maybe_log2\n per_anchor_loss = loss - lambda_term\n per_label_loss = delta * tf.reduce_sum(per_anchor_loss, 2)\n # Normalize the AUC such that a perfect score function will have AUC 1.0.\n # Because precision_range is discretized into num_anchors + 1 intervals\n # but only num_anchors terms are included in the Riemann sum, the\n # effective length of the integration interval is `delta` less than the\n # length of precision_range.\n scaled_loss = tf.div(\n per_label_loss, precision_range[1] - precision_range[0] - delta, name='AUC_Normalize')\n scaled_loss = tf.reshape(scaled_loss, original_shape)\n\n other_outputs = {\n 'lambdas':\n lambdas_variable,\n 'biases':\n biases,\n 'label_priors':\n label_priors,\n 'true_positives_lower_bound':\n true_positives_lower_bound(labels, logits, weights, surrogate_type),\n 'false_positives_upper_bound':\n false_positives_upper_bound(labels, logits, weights, surrogate_type)\n }\n\n return scaled_loss, other_outputs\n\n\ndef roc_auc_loss(labels, logits, weights=1.0, surrogate_type='xent', scope=None):\n \"\"\"Computes ROC AUC loss.\n The area under the ROC curve is the probability p that a randomly chosen\n positive example will be scored higher than a randomly chosen negative\n example. This loss approximates 1-p by using a surrogate (either hinge loss or\n cross entropy) for the indicator function. Specifically, the loss is:\n sum_i sum_j w_i*w_j*loss(logit_i - logit_j)\n where i ranges over the positive datapoints, j ranges over the negative\n datapoints, logit_k denotes the logit (or score) of the k-th datapoint, and\n loss is either the hinge or log loss given a positive label.\n Args:\n labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels].\n logits: A `Tensor` with the same shape and dtype as `labels`.\n weights: Coefficients for the loss. Must be a scalar or `Tensor` of shape\n [batch_size] or [batch_size, num_labels].\n surrogate_type: Either 'xent' or 'hinge', specifying which upper bound\n should be used for the indicator function.\n scope: Optional scope for `name_scope`.\n Returns:\n loss: A `Tensor` of the same shape as `logits` with the component-wise loss.\n other_outputs: An empty dictionary, for consistency.\n Raises:\n ValueError: If `surrogate_type` is not `xent` or `hinge`.\n \"\"\"\n with tf.name_scope(scope, 'roc_auc', [labels, logits, weights]):\n # Convert inputs to tensors and standardize dtypes.\n labels, logits, weights, original_shape = _prepare_labels_logits_weights(labels, logits, weights)\n\n # Create tensors of pairwise differences for logits and labels, and\n # pairwise products of weights. These have shape\n # [batch_size, batch_size, num_labels].\n logits_difference = tf.expand_dims(logits, 0) - tf.expand_dims(logits, 1)\n labels_difference = tf.expand_dims(labels, 0) - tf.expand_dims(labels, 1)\n weights_product = tf.expand_dims(weights, 0) * tf.expand_dims(weights, 1)\n\n signed_logits_difference = labels_difference * logits_difference\n raw_loss = losses_utils.weighted_surrogate_loss(\n labels=tf.ones_like(signed_logits_difference),\n logits=signed_logits_difference,\n surrogate_type=surrogate_type)\n weighted_loss = weights_product * raw_loss\n\n # Zero out entries of the loss where labels_difference zero (so loss is only\n # computed on pairs with different labels).\n loss = tf.reduce_mean(tf.abs(labels_difference) * weighted_loss, 0) * 0.5\n loss = tf.reshape(loss, original_shape)\n return loss, {}\n\n\ndef recall_at_precision_loss(labels,\n logits,\n target_precision,\n weights=1.0,\n dual_rate_factor=0.1,\n label_priors=None,\n surrogate_type='xent',\n lambdas_initializer=tf.constant_initializer(1.0),\n reuse=None,\n variables_collections=None,\n trainable=True,\n scope=None):\n \"\"\"Computes recall at precision loss.\n The loss is based on a surrogate of the form\n wt * w(+) * loss(+) + wt * w(-) * loss(-) - c * pi,\n where:\n - w(+) = 1 + lambdas * (1 - target_precision)\n - loss(+) is the cross-entropy loss on the positive examples\n - w(-) = lambdas * target_precision\n - loss(-) is the cross-entropy loss on the negative examples\n - wt is a scalar or tensor of per-example weights\n - c = lambdas * (1 - target_precision)\n - pi is the label_priors.\n The per-example weights change not only the coefficients of individual\n training examples, but how the examples are counted toward the constraint.\n If `label_priors` is given, it MUST take `weights` into account. That is,\n label_priors = P / (P + N)\n where\n P = sum_i (wt_i on positives)\n N = sum_i (wt_i on negatives).\n Args:\n labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels].\n logits: A `Tensor` with the same shape as `labels`.\n target_precision: The precision at which to compute the loss. Can be a\n floating point value between 0 and 1 for a single precision value, or a\n `Tensor` of shape [num_labels], holding each label's target precision\n value.\n weights: Coefficients for the loss. Must be a scalar or `Tensor` of shape\n [batch_size] or [batch_size, num_labels].\n dual_rate_factor: A floating point value which controls the step size for\n the Lagrange multipliers.\n label_priors: None, or a floating point `Tensor` of shape [num_labels]\n containing the prior probability of each label (i.e. the fraction of the\n training data consisting of positive examples). If None, the label\n priors are computed from `labels` with a moving average. See the notes\n above regarding the interaction with `weights` and do not set this unless\n you have a good reason to do so.\n surrogate_type: Either 'xent' or 'hinge', specifying which upper bound\n should be used for indicator functions.\n lambdas_initializer: An initializer for the Lagrange multipliers.\n reuse: Whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: Optional list of collections for the variables.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n scope: Optional scope for `variable_scope`.\n Returns:\n loss: A `Tensor` of the same shape as `logits` with the component-wise\n loss.\n other_outputs: A dictionary of useful internal quantities for debugging. For\n more details, see http://arxiv.org/pdf/1608.04802.pdf.\n lambdas: A Tensor of shape [num_labels] consisting of the Lagrange\n multipliers.\n label_priors: A Tensor of shape [num_labels] consisting of the prior\n probability of each label learned by the loss, if not provided.\n true_positives_lower_bound: Lower bound on the number of true positives\n given `labels` and `logits`. This is the same lower bound which is used\n in the loss expression to be optimized.\n false_positives_upper_bound: Upper bound on the number of false positives\n given `labels` and `logits`. This is the same upper bound which is used\n in the loss expression to be optimized.\n Raises:\n ValueError: If `logits` and `labels` do not have the same shape.\n \"\"\"\n with tf.variable_scope(scope, 'recall_at_precision', [logits, labels, label_priors], reuse=reuse):\n labels, logits, weights, original_shape = _prepare_labels_logits_weights(labels, logits, weights)\n num_labels = losses_utils.get_num_labels(logits)\n\n # Convert other inputs to tensors and standardize dtypes.\n target_precision = losses_utils.convert_and_cast(target_precision, 'target_precision',\n logits.dtype)\n dual_rate_factor = losses_utils.convert_and_cast(dual_rate_factor, 'dual_rate_factor',\n logits.dtype)\n\n # Create lambdas.\n lambdas, lambdas_variable = _create_dual_variable(\n 'lambdas',\n shape=[num_labels],\n dtype=logits.dtype,\n initializer=lambdas_initializer,\n collections=variables_collections,\n trainable=trainable,\n dual_rate_factor=dual_rate_factor)\n # Maybe create label_priors.\n label_priors = maybe_create_label_priors(label_priors, labels, weights, variables_collections)\n\n # Calculate weighted loss and other outputs. The log(2.0) term corrects for\n # logloss not being an upper bound on the indicator function.\n weighted_loss = weights * losses_utils.weighted_surrogate_loss(\n labels,\n logits,\n surrogate_type=surrogate_type,\n positive_weights=1.0 + lambdas * (1.0 - target_precision),\n negative_weights=lambdas * target_precision)\n maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0\n maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)\n lambda_term = lambdas * (1.0 - target_precision) * label_priors * maybe_log2\n loss = tf.reshape(weighted_loss - lambda_term, original_shape)\n other_outputs = {\n 'lambdas':\n lambdas_variable,\n 'label_priors':\n label_priors,\n 'true_positives_lower_bound':\n true_positives_lower_bound(labels, logits, weights, surrogate_type),\n 'false_positives_upper_bound':\n false_positives_upper_bound(labels, logits, weights, surrogate_type)\n }\n\n return loss, other_outputs\n\n\ndef precision_at_recall_loss(labels,\n logits,\n target_recall,\n weights=1.0,\n dual_rate_factor=0.1,\n label_priors=None,\n surrogate_type='xent',\n lambdas_initializer=tf.constant_initializer(1.0),\n reuse=None,\n variables_collections=None,\n trainable=True,\n scope=None):\n \"\"\"Computes precision at recall loss.\n The loss is based on a surrogate of the form\n wt * loss(-) + lambdas * (pi * (b - 1) + wt * loss(+))\n where:\n - loss(-) is the cross-entropy loss on the negative examples\n - loss(+) is the cross-entropy loss on the positive examples\n - wt is a scalar or tensor of per-example weights\n - b is the target recall\n - pi is the label_priors.\n The per-example weights change not only the coefficients of individual\n training examples, but how the examples are counted toward the constraint.\n If `label_priors` is given, it MUST take `weights` into account. That is,\n label_priors = P / (P + N)\n where\n P = sum_i (wt_i on positives)\n N = sum_i (wt_i on negatives).\n Args:\n labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels].\n logits: A `Tensor` with the same shape as `labels`.\n target_recall: The recall at which to compute the loss. Can be a floating\n point value between 0 and 1 for a single target recall value, or a\n `Tensor` of shape [num_labels] holding each label's target recall value.\n weights: Coefficients for the loss. Must be a scalar or `Tensor` of shape\n [batch_size] or [batch_size, num_labels].\n dual_rate_factor: A floating point value which controls the step size for\n the Lagrange multipliers.\n label_priors: None, or a floating point `Tensor` of shape [num_labels]\n containing the prior probability of each label (i.e. the fraction of the\n training data consisting of positive examples). If None, the label\n priors are computed from `labels` with a moving average. See the notes\n above regarding the interaction with `weights` and do not set this unless\n you have a good reason to do so.\n surrogate_type: Either 'xent' or 'hinge', specifying which upper bound\n should be used for indicator functions.\n lambdas_initializer: An initializer for the Lagrange multipliers.\n reuse: Whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: Optional list of collections for the variables.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n scope: Optional scope for `variable_scope`.\n Returns:\n loss: A `Tensor` of the same shape as `logits` with the component-wise\n loss.\n other_outputs: A dictionary of useful internal quantities for debugging. For\n more details, see http://arxiv.org/pdf/1608.04802.pdf.\n lambdas: A Tensor of shape [num_labels] consisting of the Lagrange\n multipliers.\n label_priors: A Tensor of shape [num_labels] consisting of the prior\n probability of each label learned by the loss, if not provided.\n true_positives_lower_bound: Lower bound on the number of true positives\n given `labels` and `logits`. This is the same lower bound which is used\n in the loss expression to be optimized.\n false_positives_upper_bound: Upper bound on the number of false positives\n given `labels` and `logits`. This is the same upper bound which is used\n in the loss expression to be optimized.\n \"\"\"\n with tf.variable_scope(scope, 'precision_at_recall', [logits, labels, label_priors], reuse=reuse):\n labels, logits, weights, original_shape = _prepare_labels_logits_weights(labels, logits, weights)\n num_labels = losses_utils.get_num_labels(logits)\n\n # Convert other inputs to tensors and standardize dtypes.\n target_recall = losses_utils.convert_and_cast(target_recall, 'target_recall', logits.dtype)\n dual_rate_factor = losses_utils.convert_and_cast(dual_rate_factor, 'dual_rate_factor',\n logits.dtype)\n\n # Create lambdas.\n lambdas, lambdas_variable = _create_dual_variable(\n 'lambdas',\n shape=[num_labels],\n dtype=logits.dtype,\n initializer=lambdas_initializer,\n collections=variables_collections,\n trainable=trainable,\n dual_rate_factor=dual_rate_factor)\n # Maybe create label_priors.\n label_priors = maybe_create_label_priors(label_priors, labels, weights, variables_collections)\n\n # Calculate weighted loss and other outputs. The log(2.0) term corrects for\n # logloss not being an upper bound on the indicator function.\n weighted_loss = weights * losses_utils.weighted_surrogate_loss(\n labels, logits, surrogate_type, positive_weights=lambdas, negative_weights=1.0)\n maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0\n maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)\n lambda_term = lambdas * label_priors * (target_recall - 1.0) * maybe_log2\n loss = tf.reshape(weighted_loss + lambda_term, original_shape)\n other_outputs = {\n 'lambdas':\n lambdas_variable,\n 'label_priors':\n label_priors,\n 'true_positives_lower_bound':\n true_positives_lower_bound(labels, logits, weights, surrogate_type),\n 'false_positives_upper_bound':\n false_positives_upper_bound(labels, logits, weights, surrogate_type)\n }\n\n return loss, other_outputs\n\n\ndef false_positive_rate_at_true_positive_rate_loss(labels,\n logits,\n target_rate,\n weights=1.0,\n dual_rate_factor=0.1,\n label_priors=None,\n surrogate_type='xent',\n lambdas_initializer=tf.constant_initializer(1.0),\n reuse=None,\n variables_collections=None,\n trainable=True,\n scope=None):\n \"\"\"Computes false positive rate at true positive rate loss.\n Note that `true positive rate` is a synonym for Recall, and that minimizing\n the false positive rate and maximizing precision are equivalent for a fixed\n Recall. Therefore, this function is identical to precision_at_recall_loss.\n The per-example weights change not only the coefficients of individual\n training examples, but how the examples are counted toward the constraint.\n If `label_priors` is given, it MUST take `weights` into account. That is,\n label_priors = P / (P + N)\n where\n P = sum_i (wt_i on positives)\n N = sum_i (wt_i on negatives).\n Args:\n labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels].\n logits: A `Tensor` with the same shape as `labels`.\n target_rate: The true positive rate at which to compute the loss. Can be a\n floating point value between 0 and 1 for a single true positive rate, or\n a `Tensor` of shape [num_labels] holding each label's true positive rate.\n weights: Coefficients for the loss. Must be a scalar or `Tensor` of shape\n [batch_size] or [batch_size, num_labels].\n dual_rate_factor: A floating point value which controls the step size for\n the Lagrange multipliers.\n label_priors: None, or a floating point `Tensor` of shape [num_labels]\n containing the prior probability of each label (i.e. the fraction of the\n training data consisting of positive examples). If None, the label\n priors are computed from `labels` with a moving average. See the notes\n above regarding the interaction with `weights` and do not set this unless\n you have a good reason to do so.\n surrogate_type: Either 'xent' or 'hinge', specifying which upper bound\n should be used for indicator functions. 'xent' will use the cross-entropy\n loss surrogate, and 'hinge' will use the hinge loss.\n lambdas_initializer: An initializer op for the Lagrange multipliers.\n reuse: Whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: Optional list of collections for the variables.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n scope: Optional scope for `variable_scope`.\n Returns:\n loss: A `Tensor` of the same shape as `logits` with the component-wise\n loss.\n other_outputs: A dictionary of useful internal quantities for debugging. For\n more details, see http://arxiv.org/pdf/1608.04802.pdf.\n lambdas: A Tensor of shape [num_labels] consisting of the Lagrange\n multipliers.\n label_priors: A Tensor of shape [num_labels] consisting of the prior\n probability of each label learned by the loss, if not provided.\n true_positives_lower_bound: Lower bound on the number of true positives\n given `labels` and `logits`. This is the same lower bound which is used\n in the loss expression to be optimized.\n false_positives_upper_bound: Upper bound on the number of false positives\n given `labels` and `logits`. This is the same upper bound which is used\n in the loss expression to be optimized.\n Raises:\n ValueError: If `surrogate_type` is not `xent` or `hinge`.\n \"\"\"\n return precision_at_recall_loss(\n labels=labels,\n logits=logits,\n target_recall=target_rate,\n weights=weights,\n dual_rate_factor=dual_rate_factor,\n label_priors=label_priors,\n surrogate_type=surrogate_type,\n lambdas_initializer=lambdas_initializer,\n reuse=reuse,\n variables_collections=variables_collections,\n trainable=trainable,\n scope=scope)\n\n\ndef true_positive_rate_at_false_positive_rate_loss(labels,\n logits,\n target_rate,\n weights=1.0,\n dual_rate_factor=0.1,\n label_priors=None,\n surrogate_type='xent',\n lambdas_initializer=tf.constant_initializer(1.0),\n reuse=None,\n variables_collections=None,\n trainable=True,\n scope=None):\n \"\"\"Computes true positive rate at false positive rate loss.\n The loss is based on a surrogate of the form\n wt * loss(+) + lambdas * (wt * loss(-) - r * (1 - pi))\n where:\n - loss(-) is the loss on the negative examples\n - loss(+) is the loss on the positive examples\n - wt is a scalar or tensor of per-example weights\n - r is the target rate\n - pi is the label_priors.\n The per-example weights change not only the coefficients of individual\n training examples, but how the examples are counted toward the constraint.\n If `label_priors` is given, it MUST take `weights` into account. That is,\n label_priors = P / (P + N)\n where\n P = sum_i (wt_i on positives)\n N = sum_i (wt_i on negatives).\n Args:\n labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels].\n logits: A `Tensor` with the same shape as `labels`.\n target_rate: The false positive rate at which to compute the loss. Can be a\n floating point value between 0 and 1 for a single false positive rate, or\n a `Tensor` of shape [num_labels] holding each label's false positive rate.\n weights: Coefficients for the loss. Must be a scalar or `Tensor` of shape\n [batch_size] or [batch_size, num_labels].\n dual_rate_factor: A floating point value which controls the step size for\n the Lagrange multipliers.\n label_priors: None, or a floating point `Tensor` of shape [num_labels]\n containing the prior probability of each label (i.e. the fraction of the\n training data consisting of positive examples). If None, the label\n priors are computed from `labels` with a moving average. See the notes\n above regarding the interaction with `weights` and do not set this unless\n you have a good reason to do so.\n surrogate_type: Either 'xent' or 'hinge', specifying which upper bound\n should be used for indicator functions. 'xent' will use the cross-entropy\n loss surrogate, and 'hinge' will use the hinge loss.\n lambdas_initializer: An initializer op for the Lagrange multipliers.\n reuse: Whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: Optional list of collections for the variables.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n scope: Optional scope for `variable_scope`.\n Returns:\n loss: A `Tensor` of the same shape as `logits` with the component-wise\n loss.\n other_outputs: A dictionary of useful internal quantities for debugging. For\n more details, see http://arxiv.org/pdf/1608.04802.pdf.\n lambdas: A Tensor of shape [num_labels] consisting of the Lagrange\n multipliers.\n label_priors: A Tensor of shape [num_labels] consisting of the prior\n probability of each label learned by the loss, if not provided.\n true_positives_lower_bound: Lower bound on the number of true positives\n given `labels` and `logits`. This is the same lower bound which is used\n in the loss expression to be optimized.\n false_positives_upper_bound: Upper bound on the number of false positives\n given `labels` and `logits`. This is the same upper bound which is used\n in the loss expression to be optimized.\n Raises:\n ValueError: If `surrogate_type` is not `xent` or `hinge`.\n \"\"\"\n with tf.variable_scope(scope, 'tpr_at_fpr', [labels, logits, label_priors], reuse=reuse):\n labels, logits, weights, original_shape = _prepare_labels_logits_weights(labels, logits, weights)\n num_labels = losses_utils.get_num_labels(logits)\n\n # Convert other inputs to tensors and standardize dtypes.\n target_rate = losses_utils.convert_and_cast(target_rate, 'target_rate', logits.dtype)\n dual_rate_factor = losses_utils.convert_and_cast(dual_rate_factor, 'dual_rate_factor',\n logits.dtype)\n\n # Create lambdas.\n lambdas, lambdas_variable = _create_dual_variable(\n 'lambdas',\n shape=[num_labels],\n dtype=logits.dtype,\n initializer=lambdas_initializer,\n collections=variables_collections,\n trainable=trainable,\n dual_rate_factor=dual_rate_factor)\n # Maybe create label_priors.\n label_priors = maybe_create_label_priors(label_priors, labels, weights, variables_collections)\n\n # Loss op and other outputs. The log(2.0) term corrects for\n # logloss not being an upper bound on the indicator function.\n weighted_loss = weights * losses_utils.weighted_surrogate_loss(\n labels,\n logits,\n surrogate_type=surrogate_type,\n positive_weights=1.0,\n negative_weights=lambdas)\n maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0\n maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)\n lambda_term = lambdas * target_rate * (1.0 - label_priors) * maybe_log2\n loss = tf.reshape(weighted_loss - lambda_term, original_shape)\n other_outputs = {\n 'lambdas':\n lambdas_variable,\n 'label_priors':\n label_priors,\n 'true_positives_lower_bound':\n true_positives_lower_bound(labels, logits, weights, surrogate_type),\n 'false_positives_upper_bound':\n false_positives_upper_bound(labels, logits, weights, surrogate_type)\n }\n\n return loss, other_outputs\n\n\ndef _prepare_labels_logits_weights(labels, logits, weights):\n \"\"\"Validates labels, logits, and weights.\n Converts inputs to tensors, checks shape compatibility, and casts dtype if\n necessary.\n Args:\n labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels].\n logits: A `Tensor` with the same shape as `labels`.\n weights: Either `None` or a `Tensor` with shape broadcastable to `logits`.\n Returns:\n labels: Same as `labels` arg after possible conversion to tensor, cast, and\n reshape.\n logits: Same as `logits` arg after possible conversion to tensor and\n reshape.\n weights: Same as `weights` arg after possible conversion, cast, and reshape.\n original_shape: Shape of `labels` and `logits` before reshape.\n Raises:\n ValueError: If `labels` and `logits` do not have the same shape.\n \"\"\"\n # Convert `labels` and `logits` to Tensors and standardize dtypes.\n logits = tf.convert_to_tensor(logits, name='logits')\n labels = losses_utils.convert_and_cast(labels, 'labels', logits.dtype.base_dtype)\n weights = losses_utils.convert_and_cast(weights, 'weights', logits.dtype.base_dtype)\n\n try:\n labels.get_shape().merge_with(logits.get_shape())\n except ValueError:\n raise ValueError('logits and labels must have the same shape (%s vs %s)' % (logits.get_shape(),\n labels.get_shape()))\n\n original_shape = labels.get_shape().as_list()\n if labels.get_shape().ndims > 0:\n original_shape[0] = -1\n if labels.get_shape().ndims <= 1:\n labels = tf.reshape(labels, [-1, 1])\n logits = tf.reshape(logits, [-1, 1])\n\n if weights.get_shape().ndims == 1:\n # Weights has shape [batch_size]. Reshape to [batch_size, 1].\n weights = tf.reshape(weights, [-1, 1])\n if weights.get_shape().ndims == 0:\n # Weights is a scalar. Change shape of weights to match logits.\n weights *= tf.ones_like(logits)\n\n return labels, logits, weights, original_shape\n\n\ndef _range_to_anchors_and_delta(precision_range, num_anchors, dtype):\n \"\"\"Calculates anchor points from precision range.\n Args:\n precision_range: As required in precision_recall_auc_loss.\n num_anchors: int, number of equally spaced anchor points.\n dtype: Data type of returned tensors.\n Returns:\n precision_values: A `Tensor` of data type dtype with equally spaced values\n in the interval precision_range.\n delta: The spacing between the values in precision_values.\n Raises:\n ValueError: If precision_range is invalid.\n \"\"\"\n # Validate precision_range.\n if not 0 <= precision_range[0] <= precision_range[-1] <= 1:\n raise ValueError(\n 'precision values must obey 0 <= %f <= %f <= 1' % (precision_range[0], precision_range[-1]))\n if not 0 < len(precision_range) < 3:\n raise ValueError('length of precision_range (%d) must be 1 or 2' % len(precision_range))\n\n # Sets precision_values uniformly between min_precision and max_precision.\n values = np.linspace(start=precision_range[0], stop=precision_range[1], num=num_anchors + 2)[1:-1]\n precision_values = losses_utils.convert_and_cast(values, 'precision_values', dtype)\n delta = losses_utils.convert_and_cast(values[0] - precision_range[0], 'delta', dtype)\n # Makes precision_values [1, 1, num_anchors].\n precision_values = losses_utils.expand_outer(precision_values, 3)\n return precision_values, delta\n\n\ndef _create_dual_variable(name, shape, dtype, initializer, collections, trainable, dual_rate_factor):\n \"\"\"Creates a new dual variable.\n Dual variables are required to be nonnegative. If trainable, their gradient\n is reversed so that they are maximized (rather than minimized) by the\n optimizer.\n Args:\n name: A string, the name for the new variable.\n shape: Shape of the new variable.\n dtype: Data type for the new variable.\n initializer: Initializer for the new variable.\n collections: List of graph collections keys. The new variable is added to\n these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.\n trainable: If `True`, the default, also adds the variable to the graph\n collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as\n the default list of variables to use by the `Optimizer` classes.\n dual_rate_factor: A floating point value or `Tensor`. The learning rate for\n the dual variable is scaled by this factor.\n Returns:\n dual_value: An op that computes the absolute value of the dual variable\n and reverses its gradient.\n dual_variable: The underlying variable itself.\n \"\"\"\n # We disable partitioning while constructing dual variables because they will\n # be updated with assign, which is not available for partitioned variables.\n partitioner = tf.get_variable_scope().partitioner\n try:\n tf.get_variable_scope().set_partitioner(None)\n dual_variable = tf.contrib.framework.model_variable(\n name=name,\n shape=shape,\n dtype=dtype,\n initializer=initializer,\n collections=collections,\n trainable=trainable)\n finally:\n tf.get_variable_scope().set_partitioner(partitioner)\n # Using the absolute value enforces nonnegativity.\n dual_value = tf.abs(dual_variable)\n\n if trainable:\n # To reverse the gradient on the dual variable, multiply the gradient by\n # -dual_rate_factor\n dual_value = (tf.stop_gradient(\n (1.0 + dual_rate_factor) * dual_value) - dual_rate_factor * dual_value)\n return dual_value, dual_variable\n\n\ndef maybe_create_label_priors(label_priors, labels, weights, variables_collections):\n \"\"\"Creates moving average ops to track label priors, if necessary.\n Args:\n label_priors: As required in e.g. precision_recall_auc_loss.\n labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels].\n weights: As required in e.g. precision_recall_auc_loss.\n variables_collections: Optional list of collections for the variables, if\n any must be created.\n Returns:\n label_priors: A Tensor of shape [num_labels] consisting of the\n weighted label priors, after updating with moving average ops if created.\n \"\"\"\n if label_priors is not None:\n label_priors = losses_utils.convert_and_cast(\n label_priors, name='label_priors', dtype=labels.dtype.base_dtype)\n return tf.squeeze(label_priors)\n\n label_priors = losses_utils.build_label_priors(\n labels, weights, variables_collections=variables_collections)\n return label_priors\n\n\ndef true_positives_lower_bound(labels, logits, weights, surrogate_type):\n \"\"\"Calculate a lower bound on the number of true positives.\n This lower bound on the number of true positives given `logits` and `labels`\n is the same one used in the global objectives loss functions.\n Args:\n labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels].\n logits: A `Tensor` of shape [batch_size, num_labels] or\n [batch_size, num_labels, num_anchors]. If the third dimension is present,\n the lower bound is computed on each slice [:, :, k] independently.\n weights: Per-example loss coefficients, with shape broadcast-compatible with\n that of `labels`.\n surrogate_type: Either 'xent' or 'hinge', specifying which upper bound\n should be used for indicator functions.\n Returns:\n A `Tensor` of shape [num_labels] or [num_labels, num_anchors].\n \"\"\"\n maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0\n maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)\n if logits.get_shape().ndims == 3 and labels.get_shape().ndims < 3:\n labels = tf.expand_dims(labels, 2)\n loss_on_positives = losses_utils.weighted_surrogate_loss(\n labels, logits, surrogate_type, negative_weights=0.0) / maybe_log2\n return tf.reduce_sum(weights * (labels - loss_on_positives), 0)\n\n\ndef false_positives_upper_bound(labels, logits, weights, surrogate_type):\n \"\"\"Calculate an upper bound on the number of false positives.\n This upper bound on the number of false positives given `logits` and `labels`\n is the same one used in the global objectives loss functions.\n Args:\n labels: A `Tensor` of shape [batch_size, num_labels]\n logits: A `Tensor` of shape [batch_size, num_labels] or\n [batch_size, num_labels, num_anchors]. If the third dimension is present,\n the lower bound is computed on each slice [:, :, k] independently.\n weights: Per-example loss coefficients, with shape broadcast-compatible with\n that of `labels`.\n surrogate_type: Either 'xent' or 'hinge', specifying which upper bound\n should be used for indicator functions.\n Returns:\n A `Tensor` of shape [num_labels] or [num_labels, num_anchors].\n \"\"\"\n maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0\n maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)\n loss_on_negatives = losses_utils.weighted_surrogate_loss(\n labels, logits, surrogate_type, positive_weights=0.0) / maybe_log2\n return tf.reduce_sum(weights * loss_on_negatives, 0)\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport six\nimport os\n\nimport tensorflow as tf\nfrom . import text_encoder\nfrom .texttfrecords import TextTFRecord\n\nUNSHUFFLED_SUFFIX = \"-unshuffled\"\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass TextDataset():\n\n def __init__(self, data_dir, vocab_name, dataset_name):\n self._vocab_name = vocab_name\n self._dataset_name = dataset_name\n self._data_dir = data_dir\n self.tfrecords = TextTFRecord()\n\n @property\n def is_character_level(self):\n raise NotImplementedError()\n\n @property\n def has_inputs(self):\n return True\n\n @property\n def data_dir(self):\n return self._data_dir\n\n @property\n def input_space_id(self):\n raise NotImplementedError()\n\n @property\n def target_space_id(self):\n raise NotImplementedError()\n\n @property\n def num_shards(self):\n raise NotImplementedError()\n\n @property\n def num_dev_shards(self):\n return 1\n\n @property\n def vocab_name(self):\n return self._vocab_name\n\n @property\n def vocab_file(self):\n return \"%s.%d\" % (self.vocab_name, self.targeted_vocab_size)\n\n @property\n def dataset_name(self):\n return self_dataset_name\n\n @property\n def use_subword_tokenizer(self):\n raise NotImplementedError()\n\n @property\n def targeted_vocab_size(self):\n raise NotImplementedError()\n\n @property\n def use_train_shards_for_dev(self):\n return False\n\n @abc.abstractmethod\n def generator(self, tmp_dir, train, *args, **kwargs):\n \"\"\"Generator for lm1b sentences.\n\n Args:\n tmp_dir: a string.\n train: a boolean.\n characters: a boolean\n\n Yields:\n A dictionary {\"inputs\": [0], \"targets\": [<subword ids>]}\n \"\"\"\n raise NotImplementedError()\n\n # def feature_encoders(self):\n # return {\"inputs\": text_encoder.TextEncoder(), \"targets\": text_encoder.TextEncoder()}\n\n def example_reading_spec(self):\n data_fields = {\"inputs\": tf.VarLenFeature(tf.int64), \"targets\": tf.VarLenFeature(tf.int64)}\n data_items_to_decoders = None\n return (data_fields, data_items_to_decoders)\n\n def generate_data(self, tmp_dir, task_id=-1):\n train_paths = self.training_filepaths(self.num_shards)\n dev_paths = self.dev_filepaths(self.num_dev_shards)\n if self.use_train_shards_for_dev:\n all_paths = train_paths + dev_paths\n self.tfrecords.generate_files(self.generator(self._data_dir, tmp_dir, True), all_paths)\n self.tfrecords.shuffle_dataset(train_paths)\n else:\n self.tfrecords.generate_dataset_and_shuffle(\n self.generator(self._data_dir, tmp_dir, True), train_paths,\n self.generator(self._data_dir, tmp_dir, False), dev_paths)\n\n def feature_encoders(self):\n if self.is_character_level:\n encoder = text_encoder.ByteTextEncoder()\n elif self.use_subword_tokenizer:\n vocab_filename = os.path.join(self._data_dir, self.vocab_file)\n encoder = text_encoder.SubwordTextEncoder(vocab_filename)\n else:\n vocab_filename = os.path.join(self._data_dir, self.vocab_file)\n encoder = text_encoder.TokenTextEncoder(vocab_filename)\n if self.has_inputs:\n return {\"inputs\": encoder, \"targets\": encoder}\n return {\"targets\": encoder}\n\n def training_filepaths(self, num_shards):\n return self.train_data_filenames(num_shards)\n\n def dev_filepaths(self, num_shards):\n return self.dev_data_filenames(num_shards)\n\n def test_filepaths(self, num_shards):\n return self.test_data_filenames(num_shards)\n\n def _data_filenames(self, output_name, output_dir, num_shards):\n return [\n os.path.join(output_dir, fname) for fname in self.shard_filepath(output_name, num_shards)\n ]\n\n def train_data_filenames(self, num_shards):\n return self._data_filenames(self._dataset_name + UNSHUFFLED_SUFFIX + \"-train\", self._data_dir,\n num_shards)\n\n def dev_data_filenames(self, num_shards):\n return self._data_filenames(self._dataset_name + \"-dev\", self._data_dir, num_shards)\n\n def test_data_filenames(self, num_shards):\n return self._data_filenames(self.dataset_name + \"-test\", self._data_dir, num_shards)\n\n def combined_data_filenames(self, num_training_shards):\n return (self.train_data_filenames(num_training_shards) + self.dev_data_filenames(1) +\n self.test_data_filenames(1))\n\n def sharded_name(self, base_name, shard, total_shards):\n return \"%s-%.5d-of-%.5d\" % (base_name, shard, total_shards)\n\n def shard_filepath(self, fname, num_shards):\n return [self.sharded_name(fname, shard, num_shards) for shard in range(num_shards)]\n\n def get_data_filepatterns(self, mode='training'):\n datasets = []\n data_dir = os.path.join(self._data_dir, self._dataset_name)\n if mode == 'training':\n datasets.append(\"%s-train*\" % data_dir)\n elif mode == 'eval':\n datasets.append(\"%s-dev*\" % data_dir)\n else:\n datasets.append(\"%s-train*\" % data_dir)\n datasets.append(\"%s-dev*\" % data_dir)\n return datasets\n\n def get_data_files(self, data_sources):\n \"\"\"Get data_files from data_sources.\n\n Args:\n data_sources: a list/tuple of files or the location of the data, i.e.\n /path/to/train@128, /path/to/train* or /tmp/.../train*\n\n Returns:\n a list of data_files.\n\n Raises:\n ValueError: if not data files are not found\n \"\"\"\n if isinstance(data_sources, (list, tuple)):\n data_files = []\n for source in data_sources:\n data_files += self.get_data_files(source)\n else:\n if '*' in data_sources or '?' in data_sources or '[' in data_sources:\n data_files = tf.gfile.Glob(data_sources)\n else:\n data_files = [data_sources]\n if not data_files:\n raise ValueError('No data files found in %s' % (data_sources,))\n return data_files\n\n\nclass SpaceID(object):\n \"\"\"Input and target space ids.\n\n Add more as needed.\n \"\"\"\n # Generic / unknown output space (default)\n GENERIC = 0\n # Image labels\n IMAGE_LABEL = 1\n # English characters\n EN_CHR = 2\n # English tokens\n EN_TOK = 3\n # English bpe tokens\n EN_BPE_TOK = 4\n # French characters\n FR_CHR = 5\n # French tokens\n FR_TOK = 6\n # German characters\n DE_CHR = 7\n # German tokens\n DE_TOK = 8\n # German bpe tokens\n DE_BPE_TOK = 9\n # Digit cipher lexicon 0\n DIGIT_0 = 10\n # Digit cipher lexicon 1\n DIGIT_1 = 11\n # Audio waveform domain\n AUDIO_WAV = 12\n # Audio spectral domain\n AUDIO_SPECTRAL = 13\n # Parse characters\n PARSE_CHR = 14\n # Parse tokens\n PARSE_TOK = 15\n # Chinese tokens\n ZH_TOK = 16\n # Icelandic characters\n ICE_CHAR = 17\n # Icelandic tokens\n ICE_TOK = 18\n # Icelandic parse tokens\n ICE_PARSE_TOK = 19\n # Macedonian tokens\n MK_TOK = 20\n # Czech tokens\n CS_TOK = 21\n # Czech characters\n CS_CHR = 22\n # Genetic bases (ACTG)\n DNA = 23\n # Real numbers\n REAL = 24\n # Images\n IMAGE = 25\n # Peptide\n PEPTIDE = 26\n # Python\n PY_TOK = 27\n # C++\n CPP_TOK = 28\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nfrom six.moves import xrange\n\n\nclass TextDataflow(object):\n \"\"\"Dataflow handling class.\n\n Args:\n dataset: an instance of the dataset class\n num_readers: num of readers to read the dataset\n shuffle: a bool, shuffle the dataset\n num_epochs: total number of epoch for training or validation\n min_queue_examples: minimum number of items after dequeue\n capacity: total queue capacity\n \"\"\"\n\n def __init__(self,\n dataset,\n decoder,\n shuffle=True,\n num_threads=1,\n num_epochs=None,\n capacity=2048,\n max_length=None,\n min_bucket_length=8,\n length_bucket_step=1.1,\n shard_multiplier=1,\n length_multiplier=1):\n self.dataset = dataset\n self.decoder = decoder\n self.shuffle = shuffle\n self.capacity = capacity\n self.num_threads = num_threads\n self.max_length = max_length\n self.min_bucket_length = min_bucket_length\n self.length_bucket_step = length_bucket_step\n self.shard_multiplier = shard_multiplier\n self.length_multiplier = length_multiplier\n\n def get_batch(self, batch_size, preprocessing_fn=None, mode='training', drop_long_sequences=False):\n \"\"\"Input pipeline, returns a dictionary of batched and padded tensors.\n\n Args:\n problem: Problem instance for which to build the input pipeline.\n data_file_pattern: file pattern for input files.\n capacity: int, data pipeline buffer capacity.\n mode: tf.contrib.learn.ModeKeys entry.\n batching_scheme: a dictionary containing\n \"boundaries\": a list of integers for the boundaries that will be\n used for bucketing; see bucket_by_sequence_length for more details.\n \"batch_sizes\": a list of batch sizes corresponding to the buckets\n \"max_length\": an integer. We drop sequences which are longer.\n\n Returns:\n dict <feature name, batched and padded Tensor>\n \"\"\"\n data_file_patterns = self.dataset.get_data_filepatterns(mode=mode)\n dataset_r = self.decoder.examples_reader([data_file_patterns], bool(mode == 'training'),\n self.capacity)\n batching_scheme = self._batching_scheme(\n batch_size=batch_size,\n max_length=self.max_length,\n min_length_bucket=self.min_bucket_length,\n length_bucket_step=self.length_bucket_step,\n drop_long_sequences=drop_long_sequences,\n shard_multiplier=self.shard_multiplier,\n length_multiplier=self.length_multiplier)\n\n with tf.name_scope(\"input_pipeline\"):\n if preprocessing_fn is not None:\n dataset_r = dataset_r.map(\n lambda ex: preprocessing_fn(ex, mode), num_threads=self.num_threads)\n dataset_r = dataset_r.filter(\n lambda ex: self._example_too_big(ex, batching_scheme[\"max_length\"]))\n\n dataset_r = self.bucket_by_sequence_length(\n dataset_r, self._example_length, batching_scheme[\"boundaries\"],\n batching_scheme[\"batch_sizes\"], batching_scheme[\"window_size\"])\n # We reshuffle the batches to prevent many long-sequence batches at once.\n if batching_scheme[\"shuffle_queue_size\"] is not None:\n dataset_r = dataset_r.shuffle(batching_scheme[\"shuffle_queue_size\"])\n batched_examples = dataset_r.make_one_shot_iterator().get_next()\n return batched_examples\n\n def _example_length(self, example):\n length = 0\n # Length of the example is the maximum length of the feature lengths\n for v in example.values():\n # For images the sequence length is the size of the spatial dimensions.\n feature_length = (tf.shape(v)[0]\n if len(v.get_shape()) < 3 else tf.shape(v)[0] * tf.shape(v)[1])\n length = tf.maximum(length, feature_length)\n return length\n\n def _example_too_big(self, example, max_length):\n return tf.less_equal(self._example_length(example), max_length)\n\n def bucket_by_sequence_length(self, dataset, example_length_fn, bucket_boundaries,\n bucket_batch_sizes, window_size):\n \"\"\"Bucket entries in dataset by length.\n\n Args:\n dataset: Dataset of dict<feature name, Tensor>.\n example_length_fn: function from example to int, determines the length of\n the example, which will determine the bucket it goes into.\n bucket_boundaries: list<int>, boundaries of the buckets.\n bucket_batch_sizes: list<int>, batch size per bucket.\n window_size: an integer divisible by all elements of bucket_batch_sizes\n\n Returns:\n Dataset of padded and batched examples.\n \"\"\"\n with tf.name_scope(\"bucket_by_seq_length\"):\n\n def example_to_bucket_id(example):\n \"\"\"Return int64 id of the length bucket for this example.\"\"\"\n seq_length = example_length_fn(example)\n\n boundaries = list(bucket_boundaries)\n buckets_min = [np.iinfo(np.int32).min] + boundaries\n buckets_max = boundaries + [np.iinfo(np.int32).max]\n conditions_c = tf.logical_and(\n tf.less_equal(buckets_min, seq_length), tf.less(seq_length, buckets_max))\n bucket_id = tf.reduce_min(tf.where(conditions_c))\n\n return bucket_id\n\n def batching_fn(bucket_id, grouped_dataset):\n batch_sizes = tf.constant(bucket_batch_sizes, dtype=tf.int64)\n batch_size = batch_sizes[bucket_id]\n\n # Pad each dimension of each feature so that they match.\n padded_shapes = dict(\n [(name, [None] * len(shape)) for name, shape in grouped_dataset.output_shapes.items()])\n return grouped_dataset.padded_batch(batch_size, padded_shapes)\n\n dataset_gbw = tf.contrib.data.group_by_window(example_to_bucket_id, batching_fn, window_size)\n return dataset.apply(dataset_gbw)\n\n def _bucket_boundaries(self, max_length, min_length=8, length_bucket_step=1.1):\n \"\"\"A default set of length-bucket boundaries.\"\"\"\n assert min_length <= max_length\n assert length_bucket_step > 1.0\n x = min_length\n boundaries = []\n while x < max_length:\n boundaries.append(x)\n x = max(x + 1, int(x * length_bucket_step))\n return boundaries\n\n def _batching_scheme(self,\n batch_size,\n max_length,\n min_length_bucket,\n length_bucket_step,\n drop_long_sequences=False,\n shard_multiplier=1,\n length_multiplier=1):\n \"\"\"A batching scheme based on model hyperparameters.\n\n Every batch containins a number of sequences divisible by `shard_multiplier`.\n\n Args:\n batch_size: int, total number of tokens in a batch.\n max_length: int, sequences longer than this will be skipped. Defaults to\n batch_size.\n min_length_bucket: int\n length_bucket_step: float greater than 1.0\n drop_long_sequences: bool, if True, then sequences longer than\n `max_length` are dropped. This prevents generating batches with\n more than the usual number of tokens, which can cause out-of-memory\n errors.\n shard_multiplier: an integer increasing the batch_size to suit splitting\n across datashards.\n length_multiplier: an integer multiplier that is used to increase the\n batch sizes and sequence length tolerance.\n\n Returns:\n A dictionary with parameters that can be passed to input_pipeline:\n * boundaries: list of bucket boundaries\n * batch_sizes: list of batch sizes for each length bucket\n * max_length: int, maximum length of an example\n \"\"\"\n max_length = max_length or batch_size\n boundaries = self._bucket_boundaries(max_length, min_length_bucket, length_bucket_step)\n boundaries = [boundary * length_multiplier for boundary in boundaries]\n max_length *= length_multiplier\n batch_sizes = [max(1, batch_size // length) for length in boundaries + [max_length]]\n max_batch_size = max(batch_sizes)\n # Since the Datasets API only allows a single constant for window_size,\n # and it needs divide all bucket_batch_sizes, we pick a highly-compoisite\n # window size and then round down all batch sizes to divisors of that window\n # size, so that a window can always be divided evenly into batches.\n # TODO: remove this when Dataset API improves.\n highly_composite_numbers = [\n 1, 2, 4, 6, 12, 24, 36, 48, 60, 120, 180, 240, 360, 720, 840, 1260, 1680, 2520, 5040, 7560,\n 10080, 15120, 20160, 25200, 27720, 45360, 50400, 55440, 83160, 110880, 166320, 221760,\n 277200, 332640, 498960, 554400, 665280, 720720, 1081080, 1441440, 2162160, 2882880, 3603600,\n 4324320, 6486480, 7207200, 8648640, 10810800, 14414400, 17297280, 21621600, 32432400,\n 36756720, 43243200, 61261200, 73513440, 110270160\n ]\n window_size = max([i for i in highly_composite_numbers if i <= 3 * max_batch_size])\n divisors = [i for i in xrange(1, window_size + 1) if window_size % i == 0]\n batch_sizes = [max([d for d in divisors if d <= bs]) for bs in batch_sizes]\n window_size *= shard_multiplier\n batch_sizes = [bs * shard_multiplier for bs in batch_sizes]\n max_batches_per_window = window_size // min(batch_sizes)\n shuffle_queue_size = max_batches_per_window * 3\n ret = {\n \"boundaries\": boundaries,\n \"batch_sizes\": batch_sizes,\n \"max_length\": (max_length if drop_long_sequences else 10**9),\n \"shuffle_queue_size\": shuffle_queue_size,\n \"window_size\": window_size,\n }\n return ret\n\n def constant_batching_scheme(self, constant_batch_size_in_sequences):\n \"\"\"A batching scheme with constant batch size.\n\n Args:\n constant_batch_size_in_sequences: an integer\n\n Returns:\n a dictionary\n \"\"\"\n boundaries = self._bucket_boundaries(1024)\n batch_sizes = [constant_batch_size_in_sequences] * (1 + len(boundaries))\n return {\n \"boundaries\": boundaries,\n \"batch_sizes\": batch_sizes,\n \"max_length\": 10**9,\n \"shuffle_queue_size\": None,\n \"window_size\": constant_batch_size_in_sequences,\n }\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.concat",
"tensorflow.is_finite",
"tensorflow.control_dependencies",
"tensorflow.scatter_sub",
"tensorflow.reduce_sum",
"numpy.linspace",
"tensorflow.cast",
"tensorflow.equal",
"tensorflow.zeros",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.nn.l2_loss",
"tensorflow.where",
"tensorflow.losses.log_loss",
"tensorflow.to_int32",
"tensorflow.diag_part",
"tensorflow.gradients",
"tensorflow.squeeze",
"tensorflow.stop_gradient",
"tensorflow.losses.compute_weighted_loss",
"tensorflow.gather",
"tensorflow.div",
"tensorflow.subtract",
"tensorflow.add",
"tensorflow.name_scope",
"tensorflow.to_float",
"tensorflow.square",
"tensorflow.nn.softplus",
"tensorflow.nn.l2_normalize",
"tensorflow.matmul",
"numpy.log",
"tensorflow.nn.sigmoid",
"tensorflow.shape",
"tensorflow.pow",
"tensorflow.zeros_initializer",
"tensorflow.exp",
"tensorflow.nn.tanh",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.no_op",
"tensorflow.round",
"tensorflow.sequence_mask",
"tensorflow.clip_by_value",
"tensorflow.reduce_max",
"tensorflow.multiply",
"tensorflow.constant",
"tensorflow.nn.softmax",
"tensorflow.transpose",
"tensorflow.reduce_mean",
"tensorflow.range",
"tensorflow.maximum",
"tensorflow.reshape",
"tensorflow.sigmoid",
"tensorflow.expand_dims",
"tensorflow.ones_like",
"tensorflow.contrib.framework.model_variable",
"tensorflow.constant_initializer",
"tensorflow.ones",
"tensorflow.log",
"tensorflow.variable_scope",
"tensorflow.get_variable_scope",
"tensorflow.abs"
],
[
"tensorflow.gfile.Glob",
"tensorflow.VarLenFeature"
],
[
"tensorflow.constant",
"tensorflow.shape",
"tensorflow.less",
"tensorflow.maximum",
"tensorflow.less_equal",
"tensorflow.name_scope",
"tensorflow.where",
"numpy.iinfo",
"tensorflow.contrib.data.group_by_window"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7"
]
}
] |
rusty-fast-solvers/rusty-green-kernel | [
"9317f88e873550270c482473005250a9d2df2950"
] | [
"rusty_green_kernel/test/test_rusty_green_kernel.py"
] | [
"\"\"\"Unit tests for direct assembly and evaluation of kernels.\"\"\"\nimport numpy as np\nimport pytest\n\n\[email protected](\"parallel\", [True, False])\[email protected](\"dtype,rtol\", [(np.float64, 1e-14), (np.float32, 5e-6)])\ndef test_laplace_assemble(dtype, rtol, parallel):\n \"\"\"Test the Laplace kernel.\"\"\"\n from rusty_green_kernel import assemble_laplace_kernel\n\n nsources = 10\n ntargets = 20\n\n rng = np.random.default_rng(seed=0)\n # Construct target and sources so that they do not overlap\n # apart from the first point.\n\n targets = 1.5 + rng.random((3, ntargets), dtype=dtype)\n sources = rng.random((3, nsources), dtype=dtype)\n sources[:, 0] = targets[:, 0] # Test what happens if source = target\n\n actual = assemble_laplace_kernel(sources, targets, dtype=dtype, parallel=parallel)\n\n # Calculate expected result\n\n # A divide by zero error is expected to happen here.\n # So just ignore the warning.\n old_param = np.geterr()[\"divide\"]\n np.seterr(divide=\"ignore\")\n\n expected = np.empty((ntargets, nsources), dtype=dtype)\n\n for index, target in enumerate(targets.T):\n expected[index, :] = 1.0 / (\n 4 * np.pi * np.linalg.norm(sources - target.reshape(3, 1), axis=0)\n )\n\n # Reset the warnings\n np.seterr(divide=old_param)\n\n expected[0, 0] = 0 # First source and target are identical.\n\n np.testing.assert_allclose(actual, expected, rtol=rtol)\n\n\[email protected](\"parallel\", [True, False])\[email protected](\"dtype,rtol\", [(np.float64, 1e-14), (np.float32, 5e-6)])\ndef test_laplace_evaluate_only_values(dtype, rtol, parallel):\n \"\"\"Test the Laplace kernel.\"\"\"\n from rusty_green_kernel import evaluate_laplace_kernel\n\n nsources = 10\n ntargets = 20\n ncharge_vecs = 2\n\n rng = np.random.default_rng(seed=0)\n # Construct target and sources so that they do not overlap\n # apart from the first point.\n\n targets = 1.5 + rng.random((3, ntargets), dtype=dtype)\n sources = rng.random((3, nsources), dtype=dtype)\n sources[:, 0] = targets[:, 0] # Test what happens if source = target\n charges = rng.random((ncharge_vecs, nsources), dtype=dtype)\n\n actual = evaluate_laplace_kernel(\n sources, targets, charges, dtype=dtype, parallel=parallel\n )\n\n # Calculate expected result\n\n # A divide by zero error is expected to happen here.\n # So just ignore the warning.\n old_param = np.geterr()[\"divide\"]\n np.seterr(divide=\"ignore\")\n\n expected = np.empty((nsources, ntargets), dtype=dtype)\n\n for index, target in enumerate(targets.T):\n expected[:, index] = 1.0 / (\n 4 * np.pi * np.linalg.norm(sources - target.reshape(3, 1), axis=0)\n )\n\n # Reset the warnings\n np.seterr(divide=old_param)\n\n expected[0, 0] = 0 # First source and target are identical.\n\n expected = np.expand_dims(charges @ expected, -1)\n\n np.testing.assert_allclose(actual, expected, rtol=rtol)\n\n\[email protected](\"parallel\", [True, False])\[email protected](\"dtype,rtol\", [(np.float64, 1e-14), (np.float32, 5e-6)])\ndef test_laplace_evaluate_values_and_deriv(dtype, rtol, parallel):\n \"\"\"Test the Laplace kernel.\"\"\"\n from rusty_green_kernel import evaluate_laplace_kernel\n\n nsources = 10\n ntargets = 20\n ncharge_vecs = 2\n\n rng = np.random.default_rng(seed=0)\n # Construct target and sources so that they do not overlap\n # apart from the first point.\n\n targets = 1.5 + rng.random((3, ntargets), dtype=dtype)\n sources = rng.random((3, nsources), dtype=dtype)\n sources[:, 0] = targets[:, 0] # Test what happens if source = target\n charges = rng.random((ncharge_vecs, nsources), dtype=dtype)\n\n actual = evaluate_laplace_kernel(\n sources, targets, charges, dtype=dtype, return_gradients=True, parallel=parallel\n )\n\n # Calculate expected result\n\n # A divide by zero error is expected to happen here.\n # So just ignore the warning.\n old_params = np.geterr()\n np.seterr(all=\"ignore\")\n\n expected = np.empty((nsources, ntargets, 4), dtype=dtype)\n\n for index, target in enumerate(targets.T):\n diff = sources - target.reshape(3, 1)\n dist = np.linalg.norm(diff, axis=0)\n expected[:, index, 0] = 1.0 / (4 * np.pi * dist)\n expected[:, index, 1:] = diff.T / (4 * np.pi * dist.reshape(nsources, 1) ** 3)\n expected[dist == 0, index, :] = 0\n\n # Reset the warnings\n np.seterr(**old_params)\n\n expected = np.tensordot(charges, expected, 1)\n\n np.testing.assert_allclose(actual, expected, rtol=rtol)\n\n\[email protected](\"parallel\", [True, False])\[email protected](\"dtype,rtol\", [(np.complex128, 1e-14), (np.complex64, 5e-6)])\ndef test_helmholtz_assemble(dtype, rtol, parallel):\n \"\"\"Test the Laplace kernel.\"\"\"\n from rusty_green_kernel import assemble_helmholtz_kernel\n\n wavenumber = 2.5\n\n nsources = 10\n ntargets = 20\n\n if dtype == np.complex128:\n real_type = np.float64\n elif dtype == np.complex64:\n real_type = np.float32\n else:\n raise ValueError(f\"Unsupported type: {dtype}.\")\n\n rng = np.random.default_rng(seed=0)\n # Construct target and sources so that they do not overlap\n # apart from the first point.\n\n targets = 1.5 + rng.random((3, ntargets), dtype=real_type)\n sources = rng.random((3, nsources), dtype=real_type)\n sources[:, 0] = targets[:, 0] # Test what happens if source = target\n\n actual = assemble_helmholtz_kernel(\n sources, targets, wavenumber, dtype=dtype, parallel=parallel\n )\n\n # Calculate expected result\n\n # A divide by zero error is expected to happen here.\n # So just ignore the warning.\n old_params = np.geterr()\n np.seterr(all=\"ignore\")\n\n expected = np.empty((ntargets, nsources), dtype=dtype)\n\n for index, target in enumerate(targets.T):\n dist = np.linalg.norm(sources - target.reshape(3, 1), axis=0)\n expected[index, :] = np.exp(1j * wavenumber * dist) / (4 * np.pi * dist)\n expected[index, dist == 0] = 0\n\n # Reset the warnings\n np.seterr(**old_params)\n\n np.testing.assert_allclose(actual, expected, rtol=rtol)\n\n\[email protected](\"dtype,rtol\", [(np.complex128, 1e-14), (np.complex64, 5e-6)])\ndef test_helmholtz_evaluate_only_values(dtype, rtol):\n \"\"\"Test the Laplace kernel.\"\"\"\n from rusty_green_kernel import evaluate_helmholtz_kernel\n\n nsources = 10\n ntargets = 20\n ncharge_vecs = 2\n\n wavenumber = 2.5 + 1.3j\n\n if dtype == np.complex128:\n real_type = np.float64\n elif dtype == np.complex64:\n real_type = np.float32\n else:\n raise ValueError(f\"Unsupported type: {dtype}.\")\n\n rng = np.random.default_rng(seed=0)\n # Construct target and sources so that they do not overlap\n # apart from the first point.\n\n targets = 1.5 + rng.random((3, ntargets), dtype=real_type)\n sources = rng.random((3, nsources), dtype=real_type)\n sources[:, 0] = targets[:, 0] # Test what happens if source = target\n charges = rng.random((ncharge_vecs, nsources), dtype=real_type) + 1j * rng.random(\n (ncharge_vecs, nsources), dtype=real_type\n )\n\n actual = evaluate_helmholtz_kernel(\n sources, targets, charges, wavenumber, dtype=dtype, parallel=False\n )\n\n # Calculate expected result\n\n # A divide by zero error is expected to happen here.\n # So just ignore the warning.\n old_param = np.geterr()\n np.seterr(all=\"ignore\")\n\n expected = np.empty((nsources, ntargets), dtype=dtype)\n\n for index, target in enumerate(targets.T):\n dist = np.linalg.norm(sources - target.reshape(3, 1), axis=0)\n expected[:, index] = np.exp(1j * wavenumber * dist) / (4 * np.pi * dist)\n expected[dist == 0, index] = 0\n\n # Reset the warnings\n np.seterr(**old_param)\n\n expected = np.expand_dims(np.tensordot(charges, expected, 1), -1)\n\n np.testing.assert_allclose(actual, expected, rtol=rtol)\n\n\[email protected](\"parallel\", [True, False])\[email protected](\"dtype,rtol\", [(np.complex128, 1e-14), (np.complex64, 5e-6)])\ndef test_helmholtz_evaluate_values_and_deriv(dtype, rtol, parallel):\n \"\"\"Test the Laplace kernel.\"\"\"\n from rusty_green_kernel import evaluate_helmholtz_kernel\n\n nsources = 10\n ntargets = 20\n ncharge_vecs = 2\n\n wavenumber = 2.5 + 1.3j\n\n if dtype == np.complex128:\n real_type = np.float64\n elif dtype == np.complex64:\n real_type = np.float32\n else:\n raise ValueError(f\"Unsupported type: {dtype}.\")\n\n rng = np.random.default_rng(seed=0)\n # Construct target and sources so that they do not overlap\n # apart from the first point.\n\n targets = 1.5 + rng.random((3, ntargets), dtype=real_type)\n sources = rng.random((3, nsources), dtype=real_type)\n sources[:, 0] = targets[:, 0] # Test what happens if source = target\n charges = rng.random((ncharge_vecs, nsources), dtype=real_type) + 1j * rng.random(\n (ncharge_vecs, nsources), dtype=real_type\n )\n\n actual = evaluate_helmholtz_kernel(\n sources,\n targets,\n charges,\n wavenumber,\n dtype=dtype,\n return_gradients=True,\n parallel=parallel,\n )\n\n # Calculate expected result\n\n # A divide by zero error is expected to happen here.\n # So just ignore the warning.\n old_params = np.geterr()\n np.seterr(all=\"ignore\")\n\n expected = np.empty((nsources, ntargets, 4), dtype=dtype)\n\n for index, target in enumerate(targets.T):\n diff = target.reshape(3, 1) - sources\n dist = np.linalg.norm(diff, axis=0)\n expected[:, index, 0] = np.exp(1j * wavenumber * dist) / (4 * np.pi * dist)\n expected[:, index, 1:] = (\n diff.T\n * expected[:, index, 0].reshape(nsources, 1)\n / dist.reshape(nsources, 1) ** 2\n * (1j * wavenumber * dist.reshape(nsources, 1) - 1)\n )\n expected[dist == 0, index, :] = 0\n\n # Reset the warnings\n np.seterr(**old_params)\n\n expected = np.tensordot(charges, expected, 1)\n\n np.testing.assert_allclose(actual, expected, rtol=rtol)\n\n\[email protected](\"parallel\", [True, False])\[email protected](\"dtype,rtol\", [(np.float64, 1e-14), (np.float32, 5e-6)])\ndef test_modified_helmholtz_assemble(dtype, rtol, parallel):\n \"\"\"Test the modified Helmholtz kernel.\"\"\"\n from rusty_green_kernel import assemble_modified_helmholtz_kernel\n\n nsources = 10\n ntargets = 20\n\n omega = 2.5\n\n rng = np.random.default_rng(seed=0)\n # Construct target and sources so that they do not overlap\n # apart from the first point.\n\n targets = 1.5 + rng.random((3, ntargets), dtype=dtype)\n sources = rng.random((3, nsources), dtype=dtype)\n sources[:, 0] = targets[:, 0] # Test what happens if source = target\n\n actual = assemble_modified_helmholtz_kernel(\n sources, targets, omega, dtype=dtype, parallel=parallel\n )\n\n # Calculate expected result\n\n # A divide by zero error is expected to happen here.\n # So just ignore the warning.\n old_param = np.geterr()[\"divide\"]\n np.seterr(divide=\"ignore\")\n\n expected = np.empty((ntargets, nsources), dtype=dtype)\n\n for index, target in enumerate(targets.T):\n dist = np.linalg.norm(sources - target.reshape(3, 1), axis=0)\n expected[index, :] = np.exp(-omega * dist) / (4 * np.pi * dist)\n\n # Reset the warnings\n np.seterr(divide=old_param)\n\n expected[0, 0] = 0 # First source and target are identical.\n\n np.testing.assert_allclose(actual, expected, rtol=rtol)\n\n\[email protected](\"parallel\", [True, False])\[email protected](\"dtype,rtol\", [(np.float64, 1e-14), (np.float32, 5e-6)])\ndef test_modified_helmholtz_evaluate_only_values(dtype, rtol, parallel):\n \"\"\"Test the modified Helmholtz kernel.\"\"\"\n from rusty_green_kernel import evaluate_modified_helmholtz_kernel\n\n nsources = 10\n ntargets = 20\n ncharge_vecs = 2\n\n omega = 2.5\n\n rng = np.random.default_rng(seed=0)\n # Construct target and sources so that they do not overlap\n # apart from the first point.\n\n targets = 1.5 + rng.random((3, ntargets), dtype=dtype)\n sources = rng.random((3, nsources), dtype=dtype)\n sources[:, 0] = targets[:, 0] # Test what happens if source = target\n charges = rng.random((ncharge_vecs, nsources), dtype=dtype)\n\n actual = evaluate_modified_helmholtz_kernel(\n sources, targets, charges, omega, dtype=dtype, parallel=parallel\n )\n\n # Calculate expected result\n\n # A divide by zero error is expected to happen here.\n # So just ignore the warning.\n old_param = np.geterr()[\"divide\"]\n np.seterr(divide=\"ignore\")\n\n expected = np.empty((nsources, ntargets), dtype=dtype)\n\n for index, target in enumerate(targets.T):\n dist = np.linalg.norm(sources - target.reshape(3, 1), axis=0)\n expected[:, index] = np.exp(-omega * dist) / (4 * np.pi * dist)\n\n # Reset the warnings\n np.seterr(divide=old_param)\n\n expected[0, 0] = 0 # First source and target are identical.\n\n expected = np.expand_dims(charges @ expected, -1)\n\n np.testing.assert_allclose(actual, expected, rtol=rtol)\n\n\[email protected](\"parallel\", [True, False])\[email protected](\"dtype,rtol\", [(np.float64, 1e-14), (np.float32, 5e-6)])\ndef test_modified_helmholtz_evaluate_values_and_deriv(dtype, rtol, parallel):\n \"\"\"Test the modified Helmholtz kernel.\"\"\"\n from rusty_green_kernel import evaluate_modified_helmholtz_kernel\n\n nsources = 10\n ntargets = 20\n ncharge_vecs = 2\n\n omega = 2.5\n\n rng = np.random.default_rng(seed=0)\n # Construct target and sources so that they do not overlap\n # apart from the first point.\n\n targets = 1.5 + rng.random((3, ntargets), dtype=dtype)\n sources = rng.random((3, nsources), dtype=dtype)\n sources[:, 0] = targets[:, 0] # Test what happens if source = target\n charges = rng.random((ncharge_vecs, nsources), dtype=dtype)\n\n actual = evaluate_modified_helmholtz_kernel(\n sources,\n targets,\n charges,\n omega,\n dtype=dtype,\n return_gradients=True,\n parallel=parallel,\n )\n\n # Calculate expected result\n\n # A divide by zero error is expected to happen here.\n # So just ignore the warning.\n old_params = np.geterr()\n np.seterr(all=\"ignore\")\n\n expected = np.empty((nsources, ntargets, 4), dtype=dtype)\n\n for index, target in enumerate(targets.T):\n diff = target.reshape(3, 1) - sources\n dist = np.linalg.norm(diff, axis=0)\n expected[:, index, 0] = np.exp(-omega * dist) / (4 * np.pi * dist)\n expected[:, index, 1:] = (\n diff.T\n / (4 * np.pi * dist.reshape(nsources, 1) ** 3)\n * np.exp(-omega * dist.reshape(nsources, 1))\n * (-omega * dist.reshape(nsources, 1) - 1)\n )\n expected[dist == 0, index, :] = 0\n\n # Reset the warnings\n np.seterr(**old_params)\n\n expected = np.tensordot(charges, expected, 1)\n\n np.testing.assert_allclose(actual, expected, rtol=rtol)\n\n\ndef test_laplace_derivative_is_correct():\n \"\"\"Test that the Gradient of the Laplace kernel is correct.\"\"\"\n from rusty_green_kernel import evaluate_laplace_kernel\n\n nsources = 10\n\n eps = 1e-10\n\n dtype = np.float64\n\n targets = np.array(\n [\n [1.1, 1.5, 2.3],\n [1.1 + eps, 1.5, 2.3],\n [1.1 - eps, 1.5, 2.3],\n [1.1, 1.5 + eps, 2.3],\n [1.1, 1.5 - eps, 2.3],\n [1.1, 1.5, 2.3 + eps],\n [1.1, 1.5, 2.3 - eps],\n ]\n ).T\n\n rng = np.random.default_rng(seed=0)\n\n sources = rng.random((3, nsources), dtype=dtype)\n charges = rng.random((1, nsources), dtype=dtype)\n\n # Evalute derivative approximately.\n\n values = evaluate_laplace_kernel(sources, targets, charges)\n\n x_deriv = (values[0, 1, 0] - values[0, 2, 0]) / (2 * eps)\n y_deriv = (values[0, 3, 0] - values[0, 4, 0]) / (2 * eps)\n z_deriv = (values[0, 5, 0] - values[0, 6, 0]) / (2 * eps)\n\n expected = np.array([x_deriv, y_deriv, z_deriv])\n\n actual = evaluate_laplace_kernel(sources, targets, charges, return_gradients=True)[\n 0, 0, 1:\n ]\n\n np.testing.assert_allclose(actual, expected, rtol=1e-5)\n\n\ndef test_helmholtz_derivative_is_correct():\n \"\"\"Test that the Gradient of the Helmholtz kernel is correct.\"\"\"\n from rusty_green_kernel import evaluate_helmholtz_kernel\n\n nsources = 10\n\n wavenumber = 2.5 + 1.3j\n\n eps = 1e-10\n\n dtype = np.float64\n\n targets = np.array(\n [\n [1.1, 1.5, 2.3],\n [1.1 + eps, 1.5, 2.3],\n [1.1 - eps, 1.5, 2.3],\n [1.1, 1.5 + eps, 2.3],\n [1.1, 1.5 - eps, 2.3],\n [1.1, 1.5, 2.3 + eps],\n [1.1, 1.5, 2.3 - eps],\n ]\n ).T\n\n rng = np.random.default_rng(seed=0)\n\n sources = rng.random((3, nsources), dtype=dtype)\n charges = rng.random((1, nsources), dtype=dtype)\n\n # Evalute derivative approximately.\n\n values = evaluate_helmholtz_kernel(sources, targets, charges, wavenumber)\n\n x_deriv = (values[0, 1, 0] - values[0, 2, 0]) / (2 * eps)\n y_deriv = (values[0, 3, 0] - values[0, 4, 0]) / (2 * eps)\n z_deriv = (values[0, 5, 0] - values[0, 6, 0]) / (2 * eps)\n\n expected = np.array([x_deriv, y_deriv, z_deriv])\n\n actual = evaluate_helmholtz_kernel(\n sources, targets, charges, wavenumber, return_gradients=True\n )[0, 0, 1:]\n\n np.testing.assert_allclose(actual, expected, rtol=1e-5)\n\n\ndef test_modified_helmholtz_derivative_is_correct():\n \"\"\"Test that the Gradient of the Helmholtz kernel is correct.\"\"\"\n from rusty_green_kernel import evaluate_modified_helmholtz_kernel\n\n nsources = 10\n\n omega = 1.3\n\n eps = 1e-10\n\n dtype = np.float64\n\n targets = np.array(\n [\n [1.1, 1.5, 2.3],\n [1.1 + eps, 1.5, 2.3],\n [1.1 - eps, 1.5, 2.3],\n [1.1, 1.5 + eps, 2.3],\n [1.1, 1.5 - eps, 2.3],\n [1.1, 1.5, 2.3 + eps],\n [1.1, 1.5, 2.3 - eps],\n ]\n ).T\n\n rng = np.random.default_rng(seed=0)\n\n sources = rng.random((3, nsources), dtype=dtype)\n charges = rng.random((1, nsources), dtype=dtype)\n\n # Evalute derivative approximately.\n\n values = evaluate_modified_helmholtz_kernel(sources, targets, charges, omega)\n\n x_deriv = (values[0, 1, 0] - values[0, 2, 0]) / (2 * eps)\n y_deriv = (values[0, 3, 0] - values[0, 4, 0]) / (2 * eps)\n z_deriv = (values[0, 5, 0] - values[0, 6, 0]) / (2 * eps)\n\n expected = np.array([x_deriv, y_deriv, z_deriv])\n\n actual = evaluate_modified_helmholtz_kernel(\n sources, targets, charges, omega, return_gradients=True\n )[0, 0, 1:]\n\n np.testing.assert_allclose(actual, expected, rtol=1e-5)\n\n\ndef test_helmholtz_at_zero_agrees_with_laplace():\n \"\"\"Test if Helmholtz with wavenumber 0 agrees with Laplace.\"\"\"\n from rusty_green_kernel import evaluate_helmholtz_kernel\n from rusty_green_kernel import evaluate_laplace_kernel\n\n nsources = 10\n ntargets = 20\n ncharge_vecs = 2\n\n wavenumber = 0\n\n dtype = np.float64\n\n rng = np.random.default_rng(seed=0)\n # Construct target and sources so that they do not overlap\n # apart from the first point.\n\n targets = 1.5 + rng.random((3, ntargets), dtype=dtype)\n sources = rng.random((3, nsources), dtype=dtype)\n sources[:, 0] = targets[:, 0] # Test what happens if source = target\n charges = rng.random((ncharge_vecs, nsources), dtype=dtype) + 1j * rng.random(\n (ncharge_vecs, nsources), dtype=dtype\n )\n\n values_helmholtz = evaluate_helmholtz_kernel(\n sources, targets, charges, wavenumber, return_gradients=True\n )\n values_laplace = evaluate_laplace_kernel(\n sources, targets, np.real(charges), return_gradients=True\n ) + 1j * evaluate_laplace_kernel(\n sources, targets, np.imag(charges), return_gradients=True\n )\n\n np.testing.assert_allclose(values_helmholtz, values_laplace, rtol=1E-14)\n\ndef test_helmholtz_imaginary_wavenumber_agrees_with_modified_helmholtz():\n \"\"\"Test if Helmholtz with wavenumber 0 agrees with Laplace.\"\"\"\n from rusty_green_kernel import evaluate_helmholtz_kernel\n from rusty_green_kernel import evaluate_modified_helmholtz_kernel\n\n nsources = 10\n ntargets = 20\n ncharge_vecs = 2\n\n wavenumber = 1.3j\n\n dtype = np.float64\n\n rng = np.random.default_rng(seed=0)\n # Construct target and sources so that they do not overlap\n # apart from the first point.\n\n targets = 1.5 + rng.random((3, ntargets), dtype=dtype)\n sources = rng.random((3, nsources), dtype=dtype)\n sources[:, 0] = targets[:, 0] # Test what happens if source = target\n charges = rng.random((ncharge_vecs, nsources), dtype=dtype) + 1j * rng.random(\n (ncharge_vecs, nsources), dtype=dtype\n )\n\n values_helmholtz = evaluate_helmholtz_kernel(\n sources, targets, charges, wavenumber, return_gradients=True\n )\n values_modified_helmholtz = evaluate_modified_helmholtz_kernel(\n sources, targets, np.real(charges), np.imag(wavenumber), return_gradients=True\n ) + 1j * evaluate_modified_helmholtz_kernel(\n sources, targets, np.imag(charges), np.imag(wavenumber), return_gradients=True\n )\n\n np.testing.assert_allclose(values_helmholtz, values_modified_helmholtz, rtol=1E-14)"
] | [
[
"numpy.expand_dims",
"numpy.imag",
"numpy.geterr",
"numpy.linalg.norm",
"numpy.seterr",
"numpy.real",
"numpy.tensordot",
"numpy.testing.assert_allclose",
"numpy.exp",
"numpy.array",
"numpy.empty",
"numpy.random.default_rng"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Ialkhouri/Adv_attacks_big_picture_classification | [
"53edffc3b5bb313e476dcdbaf97ec776884cad50"
] | [
"Alg2_ADMM_MNIST_model_1.py"
] | [
"# Importing Libraries\n\nfrom foolbox.criteria import TargetClass\nfrom foolbox.criteria import Misclassification\n\nfrom numpy import linalg as LA\nimport matplotlib.pyplot as plt\n\nfrom foolbox.attacks import CarliniWagnerL2Attack\nfrom foolbox.attacks import SaliencyMapAttack\nfrom foolbox.attacks import GradientSignAttack\n\nfrom foolbox.v1.attacks import FGSM\nfrom foolbox.v1.attacks import MomentumIterativeAttack\n#from foolbox.v1.attacks import GradientSignAttack\n\nfrom skimage.measure import compare_ssim\n\n\nfrom keras import layers, models\n\nimport numpy as np\n\nfrom keras.utils import np_utils\n\nfrom keras import backend as K\nfrom keras.applications import vgg16\n\nimport tensorflow as tf\n\n\nimport pickle\n\nimport foolbox\n\nimport json\n\nimport timeit\nstart = timeit.default_timer()\n\nimport cvxpy as cp\nfrom numpy import linalg as LA\nfrom ISMAIL_big_picture_journal_lib import sup_lbl_from_lbl,get_S_T_S_T_comp_from_lbl,Imperceptibility,ADMM_,Attack_performance,cvxPy_pert_gen\n\n########################################################################\n############################################### Fashion MNIST dataset import\n############################################################################\n\n#tf.keras.backend.set_learning_phase(False)\n# Keras Parameters\nbatch_size = 28\nnb_classes = 10\nnb_epoch = 2\nimg_rows, img_col = 28, 28\nimg_channels = 1\n# download mnist data and split into train and test sets\n(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.fashion_mnist.load_data()\n# reshape data to fit model\nX_train = train_images.reshape(train_images.shape[0], 28, 28, 1)\nX_test = test_images.reshape(test_images.shape[0], 28, 28, 1)\nX_train, X_test = X_train/255, X_test/255\n# normalization:\ntrain_images = train_images / 255\ntest_images = test_images / 255\nprint(\"\")\n\ny_train = np_utils.to_categorical(train_labels,10)\ny_test = np_utils.to_categorical(test_labels,10)\n\nX_train_1d = X_train.reshape(60000,784,1)\nX_test_1d = X_test.reshape(10000,784,1)\n\n################################################################################\n############## Loading the model and preprocessing #####################\n######################################################################################\n\n########### load the propoer model here\n\n\nmodel1 = tf.keras.models.load_model('my_model_1d_last_dense_activation_seperate')\nmodel1.summary()\n####################################################################\n\n\n\n\n\n####################################################################################\n############RE-LABEL TRAIN_LABELS AND TEST_LABELS (Using a dictonary) #########################\n######################################################################################\ndic5 = {2:0, 4:0, 6:0, 5:2, 7:2, 9:2, 8:4}\ntrain_labels_5 = [dic5[x] if x in dic5.keys() else x for x in train_labels]\ntest_labels_5 = [dic5[x] if x in dic5.keys() else x for x in test_labels]\n\n'''\nyour mapping is different than mine. Here is the mapping from the paper you gave me.\n0 ==> {0,2,4,6} top\n1 ==> {1} bottom\n2 ==> {5,7,9} shoes\n3 ==> {3} dress\n4 ==> {8}\n'''\n######################################################################################\n# #####################################################################\n################### loading Grads and testing the vectorization\n#####################################################################\n\nGrad_MNIST_model1 = pickle.load(open(\"/home/user/.PyCharmCE2019.1/config/scratches/saved_models_variables/Grad_MNIST_model1_1d_before_SM.p\",\"rb\"))\n\ndisc_values = pickle.load(open(\"/home/user/.PyCharmCE2019.1/config/scratches/saved_models_variables/disc_values_before_SM.p\",\"rb\"))\n\n\n################################################################################\n##################################### BUILDING THE ALG - 1 PROBLEM WITH CVXPY ######\n################################################################################\n\n######## to save eta, ceate a vectorized empty np array of size 10000,28*28,1\nnumber_of_observations = 10000\n\n### tensors to save and to calc CApert, CApert_sup, ELA, RLA, and sigmas\neta_vec = np.zeros(shape=(number_of_observations,28*28,1))\nimperceptibility_rho_2_save = np.nan*np.ones(shape=(number_of_observations,1))\nimperceptibility_rho_i_save = np.nan*np.ones(shape=(number_of_observations,1))\nimperceptibility_sssim_save = np.nan*np.ones(shape=(number_of_observations,1))\npred_pert_lbls = np.zeros(shape=(number_of_observations))\npred_pert_sup_lbls = np.zeros(shape=(number_of_observations))\npred_lbls = np.zeros(shape=(number_of_observations))\n\ncnt = 0\n\nQ = 3\nepsilon_D = 0.18\n\n######################### loading perturbations from MIFGSM\nMIFGSM_perturbed_images = pickle.load(open(\"/home/user/.PyCharmCE2019.1/config/scratches/saved_models_variables/MIFGSM_perturbed_images.p\",\"rb\"))\n\nMIFGSM_perturbations = pickle.load(open(\"/home/user/.PyCharmCE2019.1/config/scratches/saved_models_variables/MIFGSM_perturbations.p\",\"rb\"))\n\nMIFGSM_pred_label_w_pert = pickle.load(open(\"/home/user/.PyCharmCE2019.1/config/scratches/saved_models_variables/MIFGSM_pred_label_w_pert.p\",\"rb\"))\n\nMIFGSM_pred_label_w_pert_super_label = pickle.load(open(\"/home/user/.PyCharmCE2019.1/config/scratches/saved_models_variables/MIFGSM_pred_super_label_w_pert.p\",\"rb\"))\n\n\n\n\n\n\nfor id in range(number_of_observations):\n\n ######## LET THE INPUT IMAGE be:\n id = id\n input_image = X_test_1d[id]\n\n input_image_reshaped = input_image.reshape(784)\n\n ######## get tru_lbl\n tru_lbl = test_labels[id]\n\n ######## get tru_sup_lbl\n tru_sup_lbl = sup_lbl_from_lbl(tru_lbl)\n\n ######## get pred_lbl\n pred_lbl = np.argmax(model1(input_image.reshape(1, 784, 1)))\n pred_lbls[id] = pred_lbl\n\n ######## get_pred_sup_lbl\n pred_sup_lbl = sup_lbl_from_lbl(pred_lbl)\n\n ######## get S_T and S_T_comp: this is based on the tru lbl not the predicted lbl\n [S_T,S_T_comp] = get_S_T_S_T_comp_from_lbl(tru_lbl)\n\n ######## get vectozied gradients and disc values of of the disgnated lbl\n\n Grad_MNIST_model1_vec_disgnated = Grad_MNIST_model1[id,:,:]\n\n #print('Grad_MNIST_model1_vec_disgnated = ' , Grad_MNIST_model1_vec_disgnated.shape)\n\n disc_values_disgnated = disc_values[id,:]\n\n ####### get S_T_comp_star as the reduced/sorted set with cardinality = Q\n # get the indicies of the highest Q values from the f(input image), where f is the discriminant vector before the softmax\n # vector before softmax is:\n disc_values = pickle.load(\n open(\"/home/user/.PyCharmCE2019.1/config/scratches/saved_models_variables/disc_values_before_SM.p\", \"rb\"))\n disc_values_disgnated = disc_values[id, :]\n\n # remove S_T values and place them with -100.0\n temp = disc_values[id, :]\n disc_values_disgnated_excluding_S_T = temp\n disc_values_disgnated_excluding_S_T[S_T] = -100.0\n S_T_comp_star = (-disc_values_disgnated_excluding_S_T).argsort()[0:Q]\n\n # # keep this to restart above variables in the case of using j_star from the NOC methid\n disc_values = pickle.load(\n open(\"/home/user/.PyCharmCE2019.1/config/scratches/saved_models_variables/disc_values_before_SM.p\", \"rb\"))\n disc_values_disgnated = disc_values[id, :]\n\n\n ###### SAVE eta[id] of each j \\in S_T_comp\n # initial\n eta_vec_j = np.zeros(shape=(10,28*28,1))\n # distance initial\n D_j = 1000000*np.ones(shape=(10, 1))\n\n\n\n ####################################### Alg .II\n\n ## try MIFGSM; if good, then exit the program and we found eta^*\n\n if MIFGSM_pred_label_w_pert_super_label[id] != tru_sup_lbl:\n eta_cvx = MIFGSM_perturbations[id,:,:,:].reshape(784,1)\n eta_vec[id, :, :] = eta_cvx.reshape(n, 1)\n eta_source = 'MIFGSM'\n cnt = cnt + 1\n rho_2 = Imperceptibility(input_image, eta_cvx)[0]\n rho_inf = Imperceptibility(input_image, eta_cvx)[1]\n D_ssim = Imperceptibility(input_image, eta_cvx)[2]\n imperceptibility_rho_2_save[id] = rho_2\n imperceptibility_rho_i_save[id] = rho_inf\n imperceptibility_sssim_save[id] = D_ssim\n image_pert = eta_cvx + input_image\n #pred_pert_sup_lbls[id] = sup_lbl_from_lbl(np.argmax(model1(image_pert.reshape(1, 784, 1))))\n pred_pert_lbls[id] = MIFGSM_pred_label_w_pert[id]\n pred_pert_sup_lbls[id] = MIFGSM_pred_label_w_pert_super_label[id]\n print('id = ', id, \"eta_source = \" , 'MIFGSM' , ' ; winning_label = ', 'Nadaaaaaa', 'pred_sup_lbl = ', pred_sup_lbl, 'predecited_perturbed_super_lbl = ',\n MIFGSM_pred_label_w_pert_super_label[id], ' (rho_2,rho_inf, ssim) = ', Imperceptibility(input_image,eta_cvx)[0:2], ' ; count = ', cnt)\n\n\n\n ## ELSE\n else:\n flag = 0\n eta_source = 'not MIFGSM'\n for jj in S_T_comp_star:\n j_star = jj\n # find eta_jj\n\n ########\n epsilon = 10\n\n ####### get matrix G \\in N \\times |S_T| and b \\in |S_T|, where G_columns = [grad_j_star - grad_l], for all l \\in S_T\n n = 28*28\n card_S_T = len(S_T) # cardinality of the set S_T\n\n mat_G = np.zeros(shape=(n,card_S_T)) # init mat_G\n\n vec_b_wout = np.zeros(shape=(card_S_T,1) )\n\n temp_jstar = Grad_MNIST_model1_vec_disgnated[j_star , : ,:]\n temp_jstar = temp_jstar.reshape(n,)\n b_jstar = disc_values_disgnated[j_star]\n #b_jstar = b_jstar.reshape(1,)\n\n for i in range(card_S_T):\n temp1 = Grad_MNIST_model1_vec_disgnated[S_T[i] , : ,:]\n temp1 = temp1.reshape(n,)\n\n b_l = disc_values_disgnated[S_T[i]]\n # b_l = b_l.reshape(1,)\n\n mat_G[:,i] = temp_jstar - temp1\n vec_b_wout[ i] = b_l - b_jstar\n\n vec_b = vec_b_wout + epsilon\n\n ###############################################################################################\n ##### ADMM\n #### algorithm parameters\n r_penalty_factor = 0.0075\n number_of_iterations_tau = 10\n\n # eADMM stopping criteria\n epsilon_A = 0.15\n\n admm_type = \"ADMM\"\n\n eta_cvx = ADMM_(input_image,model1,pred_sup_lbl,r_penalty_factor,number_of_iterations_tau,epsilon_A,mat_G, vec_b,admm_type)\n ################################################################################################\n\n\n\n\n ################# calculate the distance\n image_pert_temp = input_image + eta_cvx\n #D_j[jj] = LA.norm(eta_cvx, 2)\n D_j[jj] = Imperceptibility(input_image,eta_cvx)[0]\n\n if sup_lbl_from_lbl(np.argmax(model1(image_pert_temp.reshape(1, 784, 1)))) != pred_sup_lbl and D_j[jj] <= epsilon_D:\n\n #print('break for is used')\n flag = 1\n eta_cvx = eta_cvx\n eta_vec[id, :, :] = eta_cvx.reshape(n, 1)\n cnt = cnt + 1\n rho_2 = Imperceptibility(input_image, eta_cvx)[0]\n rho_inf = Imperceptibility(input_image, eta_cvx)[1]\n D_ssim = Imperceptibility(input_image, eta_cvx)[2]\n imperceptibility_rho_2_save[id] = rho_2\n imperceptibility_rho_i_save[id] = rho_inf\n imperceptibility_sssim_save[id] = D_ssim\n image_pert = eta_cvx + input_image\n pred_pert_lbls[id] = np.argmax(model1(image_pert.reshape(1, 784, 1)))\n pred_pert_sup_lbls[id] = sup_lbl_from_lbl(np.argmax(model1(image_pert.reshape(1, 784, 1))))\n print('id = ', id, \"eta_source = \", 'not MIFGSM and break is used', ' ; winning_label = ', jj, 'pred_sup_lbl = ',\n pred_sup_lbl, 'predecited_perturbed_super_lbl = ',\n pred_pert_sup_lbls[id], ' (rho_2,rho_inf, ssim) = ', Imperceptibility(input_image, eta_cvx)[0:2],\n ' ; count = ', cnt)\n break\n\n\n else:\n # save the mother fucking eta_cvx to choose from in the future\n # save eta for each j \\in S_T_comp\n eta_vec_j[jj,:,:] = eta_cvx.reshape(n,1)\n\n\n if flag != 1:\n winning_label = np.argmin(D_j)\n eta_cvx = eta_vec_j[winning_label, :, :]\n eta_cvx = eta_cvx\n rho_2 = Imperceptibility(input_image, eta_cvx)[0]\n rho_inf = Imperceptibility(input_image, eta_cvx)[1]\n D_ssim = Imperceptibility(input_image, eta_cvx)[2]\n\n\n # cnt is increased iff T(k(x+eta)) != T(k(x))\n if sup_lbl_from_lbl(np.argmax(model1((input_image+eta_cvx).reshape(1, 784, 1)))) != pred_sup_lbl:\n cnt = cnt + 1\n imperceptibility_rho_2_save[id] = rho_2\n imperceptibility_rho_i_save[id] = rho_inf\n imperceptibility_sssim_save[id] = D_ssim\n\n\n image_pert = eta_cvx + input_image\n pred_pert_lbls[id] = np.argmax(model1(image_pert.reshape(1, 784, 1)))\n pred_pert_sup_lbls[id] = sup_lbl_from_lbl(np.argmax(model1(image_pert.reshape(1, 784, 1))))\n print('id = ', id, \"eta_source = \", 'not MIFGSM and no break', ' ; winning_label = ', winning_label,\n 'pred_sup_lbl = ',\n pred_sup_lbl, 'predecited_perturbed_super_lbl = ',\n pred_pert_sup_lbls[id], ' (rho_2,rho_inf, ssim) = ', Imperceptibility(input_image, eta_cvx)[0:2],\n ' ; count = ', cnt)\n\n\n\n\n\nattack_success = cnt / number_of_observations\n\nprint('ATTACK SUCCESS = ' , attack_success*100 , '%')\n\n\nCA_pert, CA_pert_sup, RLA, ELA,RLA_sup, ELA_sup , sigma_2, sigma_inf, sigma_s = \\\n Attack_performance(test_labels[0:number_of_observations] ,\n pred_lbls,\n pred_pert_lbls ,\n imperceptibility_rho_2_save,\n imperceptibility_rho_i_save,\n imperceptibility_sssim_save)\n\n# attack performace\nprint('Number of observations = ', number_of_observations ,\n '\\n CA_pert = ' , CA_pert,\n \"\\n CA_pert_sup = \" , CA_pert_sup ,\n \"\\n RLA = \" , RLA ,\n \"\\n ELA = \" , ELA,\n '\\n RLA_sup = ' , RLA_sup,\n '\\n ELA_sup = ' , ELA_sup,\n \"\\n sigma_2 = \" , sigma_2 ,\n \"\\n sigma_inf = \" , sigma_inf ,\n '\\n ssim = ' , sigma_s)\n\n\n\n\n\n# # #####################################################################\n# # ################### Plotting images\n# # #####################################################################\n# print(\"\")\n#\n# plt.figure()\n# plt.subplot(1,3,1)\n# plt.title('Original')\n# plt.imshow(input_image.reshape(28,28))\n# plt.axis('off')\n#\n#\n# plt.subplot(1,3,2)\n# plt.title('pertubations')\n# plt.imshow(eta_cvx.reshape(28,28))\n# plt.axis('off')\n#\n#\n# plt.subplot(1,3,3)\n# plt.title('perturbed image')\n# plt.imshow(image_pert.reshape(28,28))\n# plt.axis('off')\n#\n#\n# plt.show()\n# # ########################################################################\n\n\nstop = timeit.default_timer()\n\nprint('Time: ', stop - start)\n\n#pickle.dump(eta_vec, open(\"eta_vec_alg2_samples.p\", \"wb\"))\n\n\nprint('break here')\n\n\n\n\n"
] | [
[
"tensorflow.keras.models.load_model",
"tensorflow.keras.datasets.fashion_mnist.load_data",
"numpy.ones",
"numpy.argmin",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
ICRC-BME/epycom | [
"5bfa3fb9020f04536b7a08382533c8abf56ca85f"
] | [
"epycom/univariate/approximate_entropy.py"
] | [
"# -*- coding: utf-8 -*-\n# Copyright (c) St. Anne's University Hospital in Brno. International Clinical\n# Research Center, Biomedical Engineering. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\n# Third pary imports\nimport numpy as np\nfrom numba import njit\n\n# Local imports\nfrom ..utils.method import Method\n\n\n@njit('f8(f8[:], f8[:])', cache=True)\ndef _maxdist(x_i, x_j):\n dist = 0\n\n leni = len(x_i)\n lenj = len(x_j)\n\n if leni < lenj:\n n = len(x_i)\n else:\n n = len(x_j)\n\n for ua in range(n):\n if abs(x_i[ua] - x_j[ua]) > dist:\n dist = abs(x_i[ua] - x_j[ua])\n\n return dist\n\n\n@njit('f8(i8, i8, f8, f8[:])', cache=True)\ndef _phi_jitted(m, N, r, sig):\n z = N - m + 1\n\n xlen = N - m + 1\n x = np.full((xlen, m), np.inf, dtype='float64')\n\n # Sampling the signal\n for i in range(xlen):\n x[i] = sig[i: i + m]\n\n C = np.full(len(sig), np.inf, dtype='float64')\n iterator = cnt = 0\n for x_i in x:\n for x_j in x:\n if _maxdist(x_i, x_j) <= r:\n cnt += 1\n C[iterator] = cnt / (N - m + 1.0)\n cnt = 0\n iterator += 1\n\n C = C[:iterator]\n\n phi = 0\n for c in C:\n phi = phi+np.log(c)\n\n return phi/z\n\n\n@njit('f8(f8[:], f8, i8)', cache=True)\ndef compute_approximate_entropy(sig, r, m):\n \"\"\"\n Function computes approximate entropy of given signal\n\n Parameters\n ----------\n sig: np.ndarray\n 1D signal\n r: np.float64\n filtering treshold, recommended values: (0.1-0.25)*np.nanstd(sig)\n m: int\n window length of compared run of data, recommended (2-8)\n\n Returns\n -------\n entro: numpy.float64\n approximate entropy\n\n Example\n -------\n signal_entropy = approximate_entropy(data, 0.1*np.nanstd(data))\n \"\"\"\n\n N = sig.shape[0]\n return abs(_phi_jitted(m + 1, N, r, sig) - _phi_jitted(m, N, r, sig))\n\n\nclass ApproximateEntropy(Method):\n\n algorithm = 'APPROXIMATE_ENTROPY'\n algorithm_type = 'univariate'\n version = '1.0.0'\n dtype = [('apen', 'float32')]\n\n def __init__(self, **kwargs):\n \"\"\"\n Approximate entropy\n\n Parameters\n ----------\n sig: np.ndarray\n 1D signal\n m: int\n window length of compared run of data, recommended (2-8)\n r: float64\n filtering treshold, recommended values: (0.1-0.25)*std\n \"\"\"\n\n super().__init__(compute_approximate_entropy, **kwargs)\n self._event_flag = False\n"
] | [
[
"numpy.log",
"numpy.full"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JPFrancoia/aws-data-wrangler | [
"5b08087d79b42683b03be91ba5ebc12ad4bd2d3d"
] | [
"awswrangler/s3.py"
] | [
"\"\"\"Amazon S3 Module.\"\"\"\n\nimport concurrent.futures\nimport csv\nimport logging\nimport time\nimport uuid\nfrom itertools import repeat\nfrom typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union\n\nimport boto3 # type: ignore\nimport botocore.exceptions # type: ignore\nimport pandas as pd # type: ignore\nimport pandas.io.parsers # type: ignore\nimport pyarrow as pa # type: ignore\nimport pyarrow.lib # type: ignore\nimport pyarrow.parquet # type: ignore\nimport s3fs # type: ignore\nfrom boto3.s3.transfer import TransferConfig # type: ignore\nfrom pandas.io.common import infer_compression # type: ignore\n\nfrom awswrangler import _data_types, _utils, catalog, exceptions\n\n_COMPRESSION_2_EXT: Dict[Optional[str], str] = {None: \"\", \"gzip\": \".gz\", \"snappy\": \".snappy\"}\n\n_logger: logging.Logger = logging.getLogger(__name__)\n\n\ndef get_bucket_region(bucket: str, boto3_session: Optional[boto3.Session] = None) -> str:\n \"\"\"Get bucket region name.\n\n Parameters\n ----------\n bucket : str\n Bucket name.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n\n Returns\n -------\n str\n Region code (e.g. 'us-east-1').\n\n Examples\n --------\n Using the default boto3 session\n\n >>> import awswrangler as wr\n >>> region = wr.s3.get_bucket_region('bucket-name')\n\n Using a custom boto3 session\n\n >>> import boto3\n >>> import awswrangler as wr\n >>> region = wr.s3.get_bucket_region('bucket-name', boto3_session=boto3.Session())\n\n \"\"\"\n client_s3: boto3.client = _utils.client(service_name=\"s3\", session=boto3_session)\n _logger.debug(f\"bucket: {bucket}\")\n region: str = client_s3.get_bucket_location(Bucket=bucket)[\"LocationConstraint\"]\n region = \"us-east-1\" if region is None else region\n _logger.debug(f\"region: {region}\")\n return region\n\n\ndef does_object_exist(path: str, boto3_session: Optional[boto3.Session] = None) -> bool:\n \"\"\"Check if object exists on S3.\n\n Parameters\n ----------\n path: str\n S3 path (e.g. s3://bucket/key).\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n\n Returns\n -------\n bool\n True if exists, False otherwise.\n\n Examples\n --------\n Using the default boto3 session\n\n >>> import awswrangler as wr\n >>> wr.s3.does_object_exist('s3://bucket/key_real')\n True\n >>> wr.s3.does_object_exist('s3://bucket/key_unreal')\n False\n\n Using a custom boto3 session\n\n >>> import boto3\n >>> import awswrangler as wr\n >>> wr.s3.does_object_exist('s3://bucket/key_real', boto3_session=boto3.Session())\n True\n >>> wr.s3.does_object_exist('s3://bucket/key_unreal', boto3_session=boto3.Session())\n False\n\n \"\"\"\n client_s3: boto3.client = _utils.client(service_name=\"s3\", session=boto3_session)\n bucket: str\n key: str\n bucket, key = path.replace(\"s3://\", \"\").split(\"/\", 1)\n try:\n client_s3.head_object(Bucket=bucket, Key=key)\n return True\n except botocore.exceptions.ClientError as ex:\n if ex.response[\"ResponseMetadata\"][\"HTTPStatusCode\"] == 404:\n return False\n raise ex # pragma: no cover\n\n\ndef list_objects(path: str, boto3_session: Optional[boto3.Session] = None) -> List[str]:\n \"\"\"List Amazon S3 objects from a prefix.\n\n Parameters\n ----------\n path : str\n S3 path (e.g. s3://bucket/prefix).\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n\n Returns\n -------\n List[str]\n List of objects paths.\n\n Examples\n --------\n Using the default boto3 session\n\n >>> import awswrangler as wr\n >>> wr.s3.list_objects('s3://bucket/prefix')\n ['s3://bucket/prefix0', 's3://bucket/prefix1', 's3://bucket/prefix2']\n\n Using a custom boto3 session\n\n >>> import boto3\n >>> import awswrangler as wr\n >>> wr.s3.list_objects('s3://bucket/prefix', boto3_session=boto3.Session())\n ['s3://bucket/prefix0', 's3://bucket/prefix1', 's3://bucket/prefix2']\n\n \"\"\"\n client_s3: boto3.client = _utils.client(service_name=\"s3\", session=boto3_session)\n paginator = client_s3.get_paginator(\"list_objects_v2\")\n bucket: str\n prefix: str\n bucket, prefix = _utils.parse_path(path=path)\n response_iterator = paginator.paginate(Bucket=bucket, Prefix=prefix, PaginationConfig={\"PageSize\": 1000})\n paths: List[str] = []\n for page in response_iterator:\n contents: Optional[List] = page.get(\"Contents\")\n if contents is not None:\n for content in contents:\n if (content is not None) and (\"Key\" in content):\n key: str = content[\"Key\"]\n paths.append(f\"s3://{bucket}/{key}\")\n return paths\n\n\ndef _path2list(path: Union[str, List[str]], boto3_session: Optional[boto3.Session]) -> List[str]:\n if isinstance(path, str): # prefix\n paths: List[str] = list_objects(path=path, boto3_session=boto3_session)\n elif isinstance(path, list):\n paths = path\n else:\n raise exceptions.InvalidArgumentType(f\"{type(path)} is not a valid path type. Please, use str or List[str].\")\n return paths\n\n\ndef delete_objects(\n path: Union[str, List[str]], use_threads: bool = True, boto3_session: Optional[boto3.Session] = None\n) -> None:\n \"\"\"Delete Amazon S3 objects from a received S3 prefix or list of S3 objects paths.\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n path : Union[str, List[str]]\n S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n\n Returns\n -------\n None\n None.\n\n Examples\n --------\n >>> import awswrangler as wr\n >>> wr.s3.delete_objects(['s3://bucket/key0', 's3://bucket/key1']) # Delete both objects\n >>> wr.s3.delete_objects('s3://bucket/prefix') # Delete all objects under the received prefix\n\n \"\"\"\n paths: List[str] = _path2list(path=path, boto3_session=boto3_session)\n if len(paths) < 1:\n return\n client_s3: boto3.client = _utils.client(service_name=\"s3\", session=boto3_session)\n buckets: Dict[str, List[str]] = _split_paths_by_bucket(paths=paths)\n for bucket, keys in buckets.items():\n chunks: List[List[str]] = _utils.chunkify(lst=keys, max_length=1_000)\n if use_threads is False:\n for chunk in chunks:\n _delete_objects(bucket=bucket, keys=chunk, client_s3=client_s3)\n else:\n cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)\n with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:\n executor.map(_delete_objects, repeat(bucket), chunks, repeat(client_s3))\n\n\ndef _split_paths_by_bucket(paths: List[str]) -> Dict[str, List[str]]:\n buckets: Dict[str, List[str]] = {}\n bucket: str\n key: str\n for path in paths:\n bucket, key = _utils.parse_path(path=path)\n if bucket not in buckets:\n buckets[bucket] = []\n buckets[bucket].append(key)\n return buckets\n\n\ndef _delete_objects(bucket: str, keys: List[str], client_s3: boto3.client) -> None:\n _logger.debug(f\"len(keys): {len(keys)}\")\n batch: List[Dict[str, str]] = [{\"Key\": key} for key in keys]\n client_s3.delete_objects(Bucket=bucket, Delete={\"Objects\": batch})\n\n\ndef describe_objects(\n path: Union[str, List[str]],\n wait_time: Optional[Union[int, float]] = None,\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n) -> Dict[str, Dict[str, Any]]:\n \"\"\"Describe Amazon S3 objects from a received S3 prefix or list of S3 objects paths.\n\n Fetch attributes like ContentLength, DeleteMarker, LastModified, ContentType, etc\n The full list of attributes can be explored under the boto3 head_object documentation:\n https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.head_object\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n path : Union[str, List[str]]\n S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).\n wait_time : Union[int,float], optional\n How much time (seconds) should Wrangler try to reach this objects.\n Very useful to overcome eventual consistence issues.\n `None` means only a single try will be done.\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n\n Returns\n -------\n Dict[str, Dict[str, Any]]\n Return a dictionary of objects returned from head_objects where the key is the object path.\n The response object can be explored here:\n https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.head_object\n\n Examples\n --------\n >>> import awswrangler as wr\n >>> descs0 = wr.s3.describe_objects(['s3://bucket/key0', 's3://bucket/key1']) # Describe both objects\n >>> descs1 = wr.s3.describe_objects('s3://bucket/prefix') # Describe all objects under the prefix\n >>> descs2 = wr.s3.describe_objects('s3://bucket/prefix', wait_time=30) # Overcoming eventual consistence issues\n\n \"\"\"\n paths: List[str] = _path2list(path=path, boto3_session=boto3_session)\n if len(paths) < 1:\n return {}\n client_s3: boto3.client = _utils.client(service_name=\"s3\", session=boto3_session)\n resp_list: List[Tuple[str, Dict[str, Any]]]\n if use_threads is False:\n resp_list = [_describe_object(path=p, wait_time=wait_time, client_s3=client_s3) for p in paths]\n else:\n cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)\n with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:\n resp_list = list(executor.map(_describe_object, paths, repeat(wait_time), repeat(client_s3)))\n desc_list: Dict[str, Dict[str, Any]] = dict(resp_list)\n return desc_list\n\n\ndef _describe_object(\n path: str, wait_time: Optional[Union[int, float]], client_s3: boto3.client\n) -> Tuple[str, Dict[str, Any]]:\n wait_time = int(wait_time) if isinstance(wait_time, float) else wait_time\n tries: int = wait_time if (wait_time is not None) and (wait_time > 0) else 1\n bucket: str\n key: str\n bucket, key = _utils.parse_path(path=path)\n desc: Dict[str, Any] = {}\n for i in range(tries, 0, -1):\n try:\n desc = client_s3.head_object(Bucket=bucket, Key=key)\n break\n except botocore.exceptions.ClientError as e: # pragma: no cover\n if e.response[\"ResponseMetadata\"][\"HTTPStatusCode\"] == 404: # Not Found\n _logger.debug(f\"Object not found. {i} seconds remaining to wait.\")\n if i == 1: # Last try, there is no more need to sleep\n break\n time.sleep(1)\n else:\n raise e\n return path, desc\n\n\ndef size_objects(\n path: Union[str, List[str]],\n wait_time: Optional[Union[int, float]] = None,\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n) -> Dict[str, Optional[int]]:\n \"\"\"Get the size (ContentLength) in bytes of Amazon S3 objects from a received S3 prefix or list of S3 objects paths.\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n path : Union[str, List[str]]\n S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).\n wait_time : Union[int,float], optional\n How much time (seconds) should Wrangler try to reach this objects.\n Very useful to overcome eventual consistence issues.\n `None` means only a single try will be done.\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n\n Returns\n -------\n Dict[str, Optional[int]]\n Dictionary where the key is the object path and the value is the object size.\n\n Examples\n --------\n >>> import awswrangler as wr\n >>> sizes0 = wr.s3.size_objects(['s3://bucket/key0', 's3://bucket/key1']) # Get the sizes of both objects\n >>> sizes1 = wr.s3.size_objects('s3://bucket/prefix') # Get the sizes of all objects under the received prefix\n >>> sizes2 = wr.s3.size_objects('s3://bucket/prefix', wait_time=30) # Overcoming eventual consistence issues\n\n \"\"\"\n desc_list: Dict[str, Dict[str, Any]] = describe_objects(\n path=path, wait_time=wait_time, use_threads=use_threads, boto3_session=boto3_session\n )\n size_list: Dict[str, Optional[int]] = {k: d.get(\"ContentLength\", None) for k, d in desc_list.items()}\n return size_list\n\n\ndef to_csv( # pylint: disable=too-many-arguments\n df: pd.DataFrame,\n path: str,\n sep: str = \",\",\n index: bool = True,\n columns: Optional[List[str]] = None,\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n s3_additional_kwargs: Optional[Dict[str, str]] = None,\n dataset: bool = False,\n partition_cols: Optional[List[str]] = None,\n mode: Optional[str] = None,\n database: Optional[str] = None,\n table: Optional[str] = None,\n dtype: Optional[Dict[str, str]] = None,\n description: Optional[str] = None,\n parameters: Optional[Dict[str, str]] = None,\n columns_comments: Optional[Dict[str, str]] = None,\n **pandas_kwargs,\n) -> Dict[str, Union[List[str], Dict[str, List[str]]]]:\n \"\"\"Write CSV file or dataset on Amazon S3.\n\n The concept of Dataset goes beyond the simple idea of files and enable more\n complex features like partitioning, casting and catalog integration (Amazon Athena/AWS Glue Catalog).\n\n Note\n ----\n The table name and all column names will be automatically sanitize using\n `wr.catalog.sanitize_table_name` and `wr.catalog.sanitize_column_name`.\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n df: pandas.DataFrame\n Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html\n path : str\n Amazon S3 path (e.g. s3://bucket/filename.csv).\n sep : str\n String of length 1. Field delimiter for the output file.\n index : bool\n Write row names (index).\n columns : List[str], optional\n Columns to write.\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 Session will be used if boto3_session receive None.\n s3_additional_kwargs:\n Forward to s3fs, useful for server side encryption\n https://s3fs.readthedocs.io/en/latest/#serverside-encryption\n dataset: bool\n If True store a parquet dataset instead of a single file.\n If True, enable all follow arguments:\n partition_cols, mode, database, table, description, parameters, columns_comments, .\n partition_cols: List[str], optional\n List of column names that will be used to create partitions. Only takes effect if dataset=True.\n mode: str, optional\n ``append`` (Default), ``overwrite``, ``overwrite_partitions``. Only takes effect if dataset=True.\n database : str, optional\n Glue/Athena catalog: Database name.\n table : str, optional\n Glue/Athena catalog: Table name.\n dtype: Dict[str, str], optional\n Dictionary of columns names and Athena/Glue types to be casted.\n Useful when you have columns with undetermined or mixed data types.\n Only takes effect if dataset=True.\n (e.g. {'col name': 'bigint', 'col2 name': 'int'})\n description: str, optional\n Glue/Athena catalog: Table description\n parameters: Dict[str, str], optional\n Glue/Athena catalog: Key/value pairs to tag the table.\n columns_comments: Dict[str, str], optional\n Glue/Athena catalog:\n Columns names and the related comments (e.g. {'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}).\n pandas_kwargs:\n keyword arguments forwarded to pandas.DataFrame.to_csv()\n https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_csv.html\n\n Returns\n -------\n None\n None.\n\n Examples\n --------\n Writing single file\n\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> wr.s3.to_csv(\n ... df=pd.DataFrame({'col': [1, 2, 3]}),\n ... path='s3://bucket/prefix/my_file.csv',\n ... )\n {\n 'paths': ['s3://bucket/prefix/my_file.csv'],\n 'partitions_values': {}\n }\n\n Writing single file encrypted with a KMS key\n\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> wr.s3.to_csv(\n ... df=pd.DataFrame({'col': [1, 2, 3]}),\n ... path='s3://bucket/prefix/my_file.csv',\n ... s3_additional_kwargs={\n ... 'ServerSideEncryption': 'aws:kms',\n ... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'\n ... }\n ... )\n {\n 'paths': ['s3://bucket/prefix/my_file.csv'],\n 'partitions_values': {}\n }\n\n Writing partitioned dataset\n\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> wr.s3.to_csv(\n ... df=pd.DataFrame({\n ... 'col': [1, 2, 3],\n ... 'col2': ['A', 'A', 'B']\n ... }),\n ... path='s3://bucket/prefix',\n ... dataset=True,\n ... partition_cols=['col2']\n ... )\n {\n 'paths': ['s3://.../col2=A/x.csv', 's3://.../col2=B/y.csv'],\n 'partitions_values: {\n 's3://.../col2=A/': ['A'],\n 's3://.../col2=B/': ['B']\n }\n }\n\n Writing dataset to S3 with metadata on Athena/Glue Catalog.\n\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> wr.s3.to_csv(\n ... df=pd.DataFrame({\n ... 'col': [1, 2, 3],\n ... 'col2': ['A', 'A', 'B']\n ... }),\n ... path='s3://bucket/prefix',\n ... dataset=True,\n ... partition_cols=['col2'],\n ... database='default', # Athena/Glue database\n ... table='my_table' # Athena/Glue table\n ... )\n {\n 'paths': ['s3://.../col2=A/x.csv', 's3://.../col2=B/y.csv'],\n 'partitions_values: {\n 's3://.../col2=A/': ['A'],\n 's3://.../col2=B/': ['B']\n }\n }\n\n Writing dataset casting empty column data type\n\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> wr.s3.to_csv(\n ... df=pd.DataFrame({\n ... 'col': [1, 2, 3],\n ... 'col2': ['A', 'A', 'B'],\n ... 'col3': [None, None, None]\n ... }),\n ... path='s3://bucket/prefix',\n ... dataset=True,\n ... database='default', # Athena/Glue database\n ... table='my_table' # Athena/Glue table\n ... dtype={'col3': 'date'}\n ... )\n {\n 'paths': ['s3://.../x.csv'],\n 'partitions_values: {}\n }\n\n \"\"\"\n if (database is None) ^ (table is None):\n raise exceptions.InvalidArgumentCombination(\n \"Please pass database and table arguments to be able to store the metadata into the Athena/Glue Catalog.\"\n )\n if df.empty is True:\n raise exceptions.EmptyDataFrame()\n session: boto3.Session = _utils.ensure_session(session=boto3_session)\n partition_cols = partition_cols if partition_cols else []\n dtype = dtype if dtype else {}\n columns_comments = columns_comments if columns_comments else {}\n partitions_values: Dict[str, List[str]] = {}\n fs: s3fs.S3FileSystem = _utils.get_fs(session=session, s3_additional_kwargs=s3_additional_kwargs)\n if dataset is False:\n if partition_cols:\n raise exceptions.InvalidArgumentCombination(\"Please, pass dataset=True to be able to use partition_cols.\")\n if mode is not None:\n raise exceptions.InvalidArgumentCombination(\"Please pass dataset=True to be able to use mode.\")\n if any(arg is not None for arg in (database, table, description, parameters)):\n raise exceptions.InvalidArgumentCombination(\n \"Please pass dataset=True to be able to use any one of these \"\n \"arguments: database, table, description, parameters, \"\n \"columns_comments.\"\n )\n pandas_kwargs[\"sep\"] = sep\n pandas_kwargs[\"index\"] = index\n pandas_kwargs[\"columns\"] = columns\n _to_text(file_format=\"csv\", df=df, path=path, fs=fs, **pandas_kwargs)\n paths = [path]\n else:\n mode = \"append\" if mode is None else mode\n exist: bool = False\n if columns:\n df = df[columns]\n if (database is not None) and (table is not None): # Normalize table to respect Athena's standards\n df = catalog.sanitize_dataframe_columns_names(df=df)\n partition_cols = [catalog.sanitize_column_name(p) for p in partition_cols]\n dtype = {catalog.sanitize_column_name(k): v.lower() for k, v in dtype.items()}\n columns_comments = {catalog.sanitize_column_name(k): v for k, v in columns_comments.items()}\n exist = catalog.does_table_exist(database=database, table=table, boto3_session=session)\n if (exist is True) and (mode in (\"append\", \"overwrite_partitions\")):\n for k, v in catalog.get_table_types(database=database, table=table, boto3_session=session).items():\n dtype[k] = v\n df = catalog.drop_duplicated_columns(df=df)\n paths, partitions_values = _to_csv_dataset(\n df=df,\n path=path,\n index=index,\n sep=sep,\n fs=fs,\n use_threads=use_threads,\n partition_cols=partition_cols,\n dtype=dtype,\n mode=mode,\n boto3_session=session,\n )\n if (database is not None) and (table is not None):\n columns_types, partitions_types = _data_types.athena_types_from_pandas_partitioned(\n df=df, index=index, partition_cols=partition_cols, dtype=dtype, index_left=True\n )\n if (exist is False) or (mode == \"overwrite\"):\n catalog.create_csv_table(\n database=database,\n table=table,\n path=path,\n columns_types=columns_types,\n partitions_types=partitions_types,\n description=description,\n parameters=parameters,\n columns_comments=columns_comments,\n boto3_session=session,\n mode=\"overwrite\",\n sep=sep,\n )\n if partitions_values:\n _logger.debug(f\"partitions_values:\\n{partitions_values}\")\n catalog.add_csv_partitions(\n database=database, table=table, partitions_values=partitions_values, boto3_session=session, sep=sep\n )\n return {\"paths\": paths, \"partitions_values\": partitions_values}\n\n\ndef _to_csv_dataset(\n df: pd.DataFrame,\n path: str,\n index: bool,\n sep: str,\n fs: s3fs.S3FileSystem,\n use_threads: bool,\n mode: str,\n dtype: Dict[str, str],\n partition_cols: Optional[List[str]] = None,\n boto3_session: Optional[boto3.Session] = None,\n) -> Tuple[List[str], Dict[str, List[str]]]:\n paths: List[str] = []\n partitions_values: Dict[str, List[str]] = {}\n path = path if path[-1] == \"/\" else f\"{path}/\"\n if mode not in [\"append\", \"overwrite\", \"overwrite_partitions\"]:\n raise exceptions.InvalidArgumentValue(\n f\"{mode} is a invalid mode, please use append, overwrite or overwrite_partitions.\"\n )\n if (mode == \"overwrite\") or ((mode == \"overwrite_partitions\") and (not partition_cols)):\n delete_objects(path=path, use_threads=use_threads, boto3_session=boto3_session)\n df = _data_types.cast_pandas_with_athena_types(df=df, dtype=dtype)\n _logger.debug(f\"dtypes: {df.dtypes}\")\n if not partition_cols:\n file_path: str = f\"{path}{uuid.uuid4().hex}.csv\"\n _to_text(\n file_format=\"csv\",\n df=df,\n path=file_path,\n fs=fs,\n quoting=csv.QUOTE_NONE,\n escapechar=\"\\\\\",\n header=False,\n date_format=\"%Y-%m-%d %H:%M:%S.%f\",\n index=index,\n sep=sep,\n )\n paths.append(file_path)\n else:\n for keys, subgroup in df.groupby(by=partition_cols, observed=True):\n subgroup = subgroup.drop(partition_cols, axis=\"columns\")\n keys = (keys,) if not isinstance(keys, tuple) else keys\n subdir = \"/\".join([f\"{name}={val}\" for name, val in zip(partition_cols, keys)])\n prefix: str = f\"{path}{subdir}/\"\n if mode == \"overwrite_partitions\":\n delete_objects(path=prefix, use_threads=use_threads, boto3_session=boto3_session)\n file_path = f\"{prefix}{uuid.uuid4().hex}.csv\"\n _to_text(\n file_format=\"csv\",\n df=subgroup,\n path=file_path,\n fs=fs,\n quoting=csv.QUOTE_NONE,\n escapechar=\"\\\\\",\n header=False,\n date_format=\"%Y-%m-%d %H:%M:%S.%f\",\n index=index,\n sep=sep,\n )\n paths.append(file_path)\n partitions_values[prefix] = [str(k) for k in keys]\n return paths, partitions_values\n\n\ndef to_json(\n df: pd.DataFrame,\n path: str,\n boto3_session: Optional[boto3.Session] = None,\n s3_additional_kwargs: Optional[Dict[str, str]] = None,\n **pandas_kwargs,\n) -> None:\n \"\"\"Write JSON file on Amazon S3.\n\n Parameters\n ----------\n df: pandas.DataFrame\n Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html\n path : str\n Amazon S3 path (e.g. s3://bucket/filename.csv).\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 Session will be used if boto3_session receive None.\n s3_additional_kwargs:\n Forward to s3fs, useful for server side encryption\n https://s3fs.readthedocs.io/en/latest/#serverside-encryption\n pandas_kwargs:\n keyword arguments forwarded to pandas.DataFrame.to_csv()\n https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_json.html\n\n Returns\n -------\n None\n None.\n\n Examples\n --------\n Writing JSON file\n\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> wr.s3.to_json(\n ... df=pd.DataFrame({'col': [1, 2, 3]}),\n ... path='s3://bucket/filename.json',\n ... )\n\n Writing CSV file encrypted with a KMS key\n\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> wr.s3.to_json(\n ... df=pd.DataFrame({'col': [1, 2, 3]}),\n ... path='s3://bucket/filename.json',\n ... s3_additional_kwargs={\n ... 'ServerSideEncryption': 'aws:kms',\n ... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'\n ... }\n ... )\n\n \"\"\"\n return _to_text(\n file_format=\"json\",\n df=df,\n path=path,\n boto3_session=boto3_session,\n s3_additional_kwargs=s3_additional_kwargs,\n **pandas_kwargs,\n )\n\n\ndef _to_text(\n file_format: str,\n df: pd.DataFrame,\n path: str,\n fs: Optional[s3fs.S3FileSystem] = None,\n boto3_session: Optional[boto3.Session] = None,\n s3_additional_kwargs: Optional[Dict[str, str]] = None,\n **pandas_kwargs,\n) -> None:\n if df.empty is True: # pragma: no cover\n raise exceptions.EmptyDataFrame()\n if fs is None:\n fs = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)\n with fs.open(path, \"w\") as f:\n if file_format == \"csv\":\n df.to_csv(f, **pandas_kwargs)\n elif file_format == \"json\":\n df.to_json(f, **pandas_kwargs)\n\n\ndef to_parquet( # pylint: disable=too-many-arguments\n df: pd.DataFrame,\n path: str,\n index: bool = False,\n compression: Optional[str] = \"snappy\",\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n s3_additional_kwargs: Optional[Dict[str, str]] = None,\n dataset: bool = False,\n partition_cols: Optional[List[str]] = None,\n mode: Optional[str] = None,\n database: Optional[str] = None,\n table: Optional[str] = None,\n dtype: Optional[Dict[str, str]] = None,\n description: Optional[str] = None,\n parameters: Optional[Dict[str, str]] = None,\n columns_comments: Optional[Dict[str, str]] = None,\n) -> Dict[str, Union[List[str], Dict[str, List[str]]]]:\n \"\"\"Write Parquet file or dataset on Amazon S3.\n\n The concept of Dataset goes beyond the simple idea of files and enable more\n complex features like partitioning, casting and catalog integration (Amazon Athena/AWS Glue Catalog).\n\n Note\n ----\n The table name and all column names will be automatically sanitize using\n `wr.catalog.sanitize_table_name` and `wr.catalog.sanitize_column_name`.\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n df: pandas.DataFrame\n Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html\n path : str\n S3 path (for file e.g. ``s3://bucket/prefix/filename.parquet``) (for dataset e.g. ``s3://bucket/prefix``).\n index : bool\n True to store the DataFrame index in file, otherwise False to ignore it.\n compression: str, optional\n Compression style (``None``, ``snappy``, ``gzip``).\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n s3_additional_kwargs:\n Forward to s3fs, useful for server side encryption\n https://s3fs.readthedocs.io/en/latest/#serverside-encryption\n dataset: bool\n If True store a parquet dataset instead of a single file.\n If True, enable all follow arguments:\n partition_cols, mode, database, table, description, parameters, columns_comments, .\n partition_cols: List[str], optional\n List of column names that will be used to create partitions. Only takes effect if dataset=True.\n mode: str, optional\n ``append`` (Default), ``overwrite``, ``overwrite_partitions``. Only takes effect if dataset=True.\n database : str, optional\n Glue/Athena catalog: Database name.\n table : str, optional\n Glue/Athena catalog: Table name.\n dtype: Dict[str, str], optional\n Dictionary of columns names and Athena/Glue types to be casted.\n Useful when you have columns with undetermined or mixed data types.\n Only takes effect if dataset=True.\n (e.g. {'col name': 'bigint', 'col2 name': 'int'})\n description: str, optional\n Glue/Athena catalog: Table description\n parameters: Dict[str, str], optional\n Glue/Athena catalog: Key/value pairs to tag the table.\n columns_comments: Dict[str, str], optional\n Glue/Athena catalog:\n Columns names and the related comments (e.g. {'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}).\n\n Returns\n -------\n Dict[str, Union[List[str], Dict[str, List[str]]]]\n Dictionary with:\n 'paths': List of all stored files paths on S3.\n 'partitions_values': Dictionary of partitions added with keys as S3 path locations\n and values as a list of partitions values as str.\n\n Examples\n --------\n Writing single file\n\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> wr.s3.to_parquet(\n ... df=pd.DataFrame({'col': [1, 2, 3]}),\n ... path='s3://bucket/prefix/my_file.parquet',\n ... )\n {\n 'paths': ['s3://bucket/prefix/my_file.parquet'],\n 'partitions_values': {}\n }\n\n Writing single file encrypted with a KMS key\n\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> wr.s3.to_parquet(\n ... df=pd.DataFrame({'col': [1, 2, 3]}),\n ... path='s3://bucket/prefix/my_file.parquet',\n ... s3_additional_kwargs={\n ... 'ServerSideEncryption': 'aws:kms',\n ... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'\n ... }\n ... )\n {\n 'paths': ['s3://bucket/prefix/my_file.parquet'],\n 'partitions_values': {}\n }\n\n Writing partitioned dataset\n\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> wr.s3.to_parquet(\n ... df=pd.DataFrame({\n ... 'col': [1, 2, 3],\n ... 'col2': ['A', 'A', 'B']\n ... }),\n ... path='s3://bucket/prefix',\n ... dataset=True,\n ... partition_cols=['col2']\n ... )\n {\n 'paths': ['s3://.../col2=A/x.parquet', 's3://.../col2=B/y.parquet'],\n 'partitions_values: {\n 's3://.../col2=A/': ['A'],\n 's3://.../col2=B/': ['B']\n }\n }\n\n Writing dataset to S3 with metadata on Athena/Glue Catalog.\n\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> wr.s3.to_parquet(\n ... df=pd.DataFrame({\n ... 'col': [1, 2, 3],\n ... 'col2': ['A', 'A', 'B']\n ... }),\n ... path='s3://bucket/prefix',\n ... dataset=True,\n ... partition_cols=['col2'],\n ... database='default', # Athena/Glue database\n ... table='my_table' # Athena/Glue table\n ... )\n {\n 'paths': ['s3://.../col2=A/x.parquet', 's3://.../col2=B/y.parquet'],\n 'partitions_values: {\n 's3://.../col2=A/': ['A'],\n 's3://.../col2=B/': ['B']\n }\n }\n\n Writing dataset casting empty column data type\n\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> wr.s3.to_parquet(\n ... df=pd.DataFrame({\n ... 'col': [1, 2, 3],\n ... 'col2': ['A', 'A', 'B'],\n ... 'col3': [None, None, None]\n ... }),\n ... path='s3://bucket/prefix',\n ... dataset=True,\n ... database='default', # Athena/Glue database\n ... table='my_table' # Athena/Glue table\n ... dtype={'col3': 'date'}\n ... )\n {\n 'paths': ['s3://.../x.parquet'],\n 'partitions_values: {}\n }\n\n \"\"\"\n if (database is None) ^ (table is None):\n raise exceptions.InvalidArgumentCombination(\n \"Please pass database and table arguments to be able to store the metadata into the Athena/Glue Catalog.\"\n )\n if df.empty is True:\n raise exceptions.EmptyDataFrame()\n session: boto3.Session = _utils.ensure_session(session=boto3_session)\n partition_cols = partition_cols if partition_cols else []\n dtype = dtype if dtype else {}\n columns_comments = columns_comments if columns_comments else {}\n partitions_values: Dict[str, List[str]] = {}\n cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)\n fs: s3fs.S3FileSystem = _utils.get_fs(session=session, s3_additional_kwargs=s3_additional_kwargs)\n compression_ext: Optional[str] = _COMPRESSION_2_EXT.get(compression, None)\n if compression_ext is None:\n raise exceptions.InvalidCompression(f\"{compression} is invalid, please use None, snappy or gzip.\")\n if dataset is False:\n if partition_cols:\n raise exceptions.InvalidArgumentCombination(\"Please, pass dataset=True to be able to use partition_cols.\")\n if mode is not None:\n raise exceptions.InvalidArgumentCombination(\"Please pass dataset=True to be able to use mode.\")\n if any(arg is not None for arg in (database, table, description, parameters)):\n raise exceptions.InvalidArgumentCombination(\n \"Please pass dataset=True to be able to use any one of these \"\n \"arguments: database, table, description, parameters, \"\n \"columns_comments.\"\n )\n paths = [\n _to_parquet_file(\n df=df, path=path, schema=None, index=index, compression=compression, cpus=cpus, fs=fs, dtype={}\n )\n ]\n else:\n mode = \"append\" if mode is None else mode\n exist: bool = False\n if (database is not None) and (table is not None): # Normalize table to respect Athena's standards\n df = catalog.sanitize_dataframe_columns_names(df=df)\n partition_cols = [catalog.sanitize_column_name(p) for p in partition_cols]\n dtype = {catalog.sanitize_column_name(k): v.lower() for k, v in dtype.items()}\n columns_comments = {catalog.sanitize_column_name(k): v for k, v in columns_comments.items()}\n exist = catalog.does_table_exist(database=database, table=table, boto3_session=session)\n if (exist is True) and (mode in (\"append\", \"overwrite_partitions\")):\n for k, v in catalog.get_table_types(database=database, table=table, boto3_session=session).items():\n dtype[k] = v\n df = catalog.drop_duplicated_columns(df=df)\n paths, partitions_values = _to_parquet_dataset(\n df=df,\n path=path,\n index=index,\n compression=compression,\n compression_ext=compression_ext,\n cpus=cpus,\n fs=fs,\n use_threads=use_threads,\n partition_cols=partition_cols,\n dtype=dtype,\n mode=mode,\n boto3_session=session,\n )\n if (database is not None) and (table is not None):\n columns_types, partitions_types = _data_types.athena_types_from_pandas_partitioned(\n df=df, index=index, partition_cols=partition_cols, dtype=dtype\n )\n if (exist is False) or (mode == \"overwrite\"):\n catalog.create_parquet_table(\n database=database,\n table=table,\n path=path,\n columns_types=columns_types,\n partitions_types=partitions_types,\n compression=compression,\n description=description,\n parameters=parameters,\n columns_comments=columns_comments,\n boto3_session=session,\n mode=\"overwrite\",\n )\n if partitions_values:\n _logger.debug(f\"partitions_values:\\n{partitions_values}\")\n catalog.add_parquet_partitions(\n database=database,\n table=table,\n partitions_values=partitions_values,\n compression=compression,\n boto3_session=session,\n )\n return {\"paths\": paths, \"partitions_values\": partitions_values}\n\n\ndef _to_parquet_dataset(\n df: pd.DataFrame,\n path: str,\n index: bool,\n compression: Optional[str],\n compression_ext: str,\n cpus: int,\n fs: s3fs.S3FileSystem,\n use_threads: bool,\n mode: str,\n dtype: Dict[str, str],\n partition_cols: Optional[List[str]] = None,\n boto3_session: Optional[boto3.Session] = None,\n) -> Tuple[List[str], Dict[str, List[str]]]:\n paths: List[str] = []\n partitions_values: Dict[str, List[str]] = {}\n path = path if path[-1] == \"/\" else f\"{path}/\"\n if mode not in [\"append\", \"overwrite\", \"overwrite_partitions\"]:\n raise exceptions.InvalidArgumentValue(\n f\"{mode} is a invalid mode, please use append, overwrite or overwrite_partitions.\"\n )\n if (mode == \"overwrite\") or ((mode == \"overwrite_partitions\") and (not partition_cols)):\n delete_objects(path=path, use_threads=use_threads, boto3_session=boto3_session)\n df = _data_types.cast_pandas_with_athena_types(df=df, dtype=dtype)\n schema: pa.Schema = _data_types.pyarrow_schema_from_pandas(\n df=df, index=index, ignore_cols=partition_cols, dtype=dtype\n )\n _logger.debug(f\"schema: {schema}\")\n if not partition_cols:\n file_path: str = f\"{path}{uuid.uuid4().hex}{compression_ext}.parquet\"\n _to_parquet_file(\n df=df, schema=schema, path=file_path, index=index, compression=compression, cpus=cpus, fs=fs, dtype=dtype\n )\n paths.append(file_path)\n else:\n for keys, subgroup in df.groupby(by=partition_cols, observed=True):\n subgroup = subgroup.drop(partition_cols, axis=\"columns\")\n keys = (keys,) if not isinstance(keys, tuple) else keys\n subdir = \"/\".join([f\"{name}={val}\" for name, val in zip(partition_cols, keys)])\n prefix: str = f\"{path}{subdir}/\"\n if mode == \"overwrite_partitions\":\n delete_objects(path=prefix, use_threads=use_threads, boto3_session=boto3_session)\n file_path = f\"{prefix}{uuid.uuid4().hex}{compression_ext}.parquet\"\n _to_parquet_file(\n df=subgroup,\n schema=schema,\n path=file_path,\n index=index,\n compression=compression,\n cpus=cpus,\n fs=fs,\n dtype=dtype,\n )\n paths.append(file_path)\n partitions_values[prefix] = [str(k) for k in keys]\n return paths, partitions_values\n\n\ndef _to_parquet_file(\n df: pd.DataFrame,\n path: str,\n schema: pa.Schema,\n index: bool,\n compression: Optional[str],\n cpus: int,\n fs: s3fs.S3FileSystem,\n dtype: Dict[str, str],\n) -> str:\n table: pa.Table = pyarrow.Table.from_pandas(df=df, schema=schema, nthreads=cpus, preserve_index=index, safe=True)\n for col_name, col_type in dtype.items():\n if col_name in table.column_names:\n col_index = table.column_names.index(col_name)\n pyarrow_dtype = _data_types.athena2pyarrow(col_type)\n field = pa.field(name=col_name, type=pyarrow_dtype)\n table = table.set_column(col_index, field, table.column(col_name).cast(pyarrow_dtype))\n _logger.debug(f\"Casting column {col_name} ({col_index}) to {col_type} ({pyarrow_dtype})\")\n pyarrow.parquet.write_table(\n table=table,\n where=path,\n write_statistics=True,\n use_dictionary=True,\n filesystem=fs,\n coerce_timestamps=\"ms\",\n compression=compression,\n flavor=\"spark\",\n )\n return path\n\n\ndef read_csv(\n path: Union[str, List[str]],\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n s3_additional_kwargs: Optional[Dict[str, str]] = None,\n chunksize: Optional[int] = None,\n **pandas_kwargs,\n) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:\n \"\"\"Read CSV file(s) from from a received S3 prefix or list of S3 objects paths.\n\n Note\n ----\n For partial and gradual reading use the argument ``chunksize`` instead of ``iterator``.\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n path : Union[str, List[str]]\n S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. ``[s3://bucket/key0, s3://bucket/key1]``).\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n s3_additional_kwargs:\n Forward to s3fs, useful for server side encryption\n https://s3fs.readthedocs.io/en/latest/#serverside-encryption\n chunksize: int, optional\n If specified, return an generator where chunksize is the number of rows to include in each chunk.\n pandas_kwargs:\n keyword arguments forwarded to pandas.read_csv().\n https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html\n\n Returns\n -------\n Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]\n Pandas DataFrame or a Generator in case of `chunksize != None`.\n\n Examples\n --------\n Reading all CSV files under a prefix\n\n >>> import awswrangler as wr\n >>> df = wr.s3.read_csv(path='s3://bucket/prefix/')\n\n Reading all CSV files under a prefix encrypted with a KMS key\n\n >>> import awswrangler as wr\n >>> df = wr.s3.read_csv(\n ... path='s3://bucket/prefix/',\n ... s3_additional_kwargs={\n ... 'ServerSideEncryption': 'aws:kms',\n ... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'\n ... }\n ... )\n\n Reading all CSV files from a list\n\n >>> import awswrangler as wr\n >>> df = wr.s3.read_csv(path=['s3://bucket/filename0.csv', 's3://bucket/filename1.csv'])\n\n Reading in chunks of 100 lines\n\n >>> import awswrangler as wr\n >>> dfs = wr.s3.read_csv(path=['s3://bucket/filename0.csv', 's3://bucket/filename1.csv'], chunksize=100)\n >>> for df in dfs:\n >>> print(df) # 100 lines Pandas DataFrame\n\n \"\"\"\n return _read_text(\n parser_func=pd.read_csv,\n path=path,\n use_threads=use_threads,\n boto3_session=boto3_session,\n s3_additional_kwargs=s3_additional_kwargs,\n chunksize=chunksize,\n **pandas_kwargs,\n )\n\n\ndef read_fwf(\n path: Union[str, List[str]],\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n s3_additional_kwargs: Optional[Dict[str, str]] = None,\n chunksize: Optional[int] = None,\n **pandas_kwargs,\n) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:\n \"\"\"Read fixed-width formatted file(s) from from a received S3 prefix or list of S3 objects paths.\n\n Note\n ----\n For partial and gradual reading use the argument ``chunksize`` instead of ``iterator``.\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n path : Union[str, List[str]]\n S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. ``[s3://bucket/key0, s3://bucket/key1]``).\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n s3_additional_kwargs:\n Forward to s3fs, useful for server side encryption\n https://s3fs.readthedocs.io/en/latest/#serverside-encryption\n chunksize: int, optional\n If specified, return an generator where chunksize is the number of rows to include in each chunk.\n pandas_kwargs:\n keyword arguments forwarded to pandas.read_fwf().\n https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_fwf.html\n\n Returns\n -------\n Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]\n Pandas DataFrame or a Generator in case of `chunksize != None`.\n\n Examples\n --------\n Reading all fixed-width formatted (FWF) files under a prefix\n\n >>> import awswrangler as wr\n >>> df = wr.s3.read_fwf(path='s3://bucket/prefix/')\n\n Reading all fixed-width formatted (FWF) files under a prefix encrypted with a KMS key\n\n >>> import awswrangler as wr\n >>> df = wr.s3.read_fwf(\n ... path='s3://bucket/prefix/',\n ... s3_additional_kwargs={\n ... 'ServerSideEncryption': 'aws:kms',\n ... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'\n ... }\n ... )\n\n Reading all fixed-width formatted (FWF) files from a list\n\n >>> import awswrangler as wr\n >>> df = wr.s3.read_fwf(path=['s3://bucket/filename0.txt', 's3://bucket/filename1.txt'])\n\n Reading in chunks of 100 lines\n\n >>> import awswrangler as wr\n >>> dfs = wr.s3.read_fwf(path=['s3://bucket/filename0.txt', 's3://bucket/filename1.txt'], chunksize=100)\n >>> for df in dfs:\n >>> print(df) # 100 lines Pandas DataFrame\n\n \"\"\"\n return _read_text(\n parser_func=pd.read_fwf,\n path=path,\n use_threads=use_threads,\n boto3_session=boto3_session,\n s3_additional_kwargs=s3_additional_kwargs,\n chunksize=chunksize,\n **pandas_kwargs,\n )\n\n\ndef read_json(\n path: Union[str, List[str]],\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n s3_additional_kwargs: Optional[Dict[str, str]] = None,\n chunksize: Optional[int] = None,\n **pandas_kwargs,\n) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:\n \"\"\"Read JSON file(s) from from a received S3 prefix or list of S3 objects paths.\n\n Note\n ----\n For partial and gradual reading use the argument ``chunksize`` instead of ``iterator``.\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n path : Union[str, List[str]]\n S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. ``[s3://bucket/key0, s3://bucket/key1]``).\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n s3_additional_kwargs:\n Forward to s3fs, useful for server side encryption\n https://s3fs.readthedocs.io/en/latest/#serverside-encryption\n chunksize: int, optional\n If specified, return an generator where chunksize is the number of rows to include in each chunk.\n pandas_kwargs:\n keyword arguments forwarded to pandas.read_json().\n https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_json.html\n\n Returns\n -------\n Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]\n Pandas DataFrame or a Generator in case of `chunksize != None`.\n\n Examples\n --------\n Reading all JSON files under a prefix\n\n >>> import awswrangler as wr\n >>> df = wr.s3.read_json(path='s3://bucket/prefix/')\n\n Reading all JSON files under a prefix encrypted with a KMS key\n\n >>> import awswrangler as wr\n >>> df = wr.s3.read_json(\n ... path='s3://bucket/prefix/',\n ... s3_additional_kwargs={\n ... 'ServerSideEncryption': 'aws:kms',\n ... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'\n ... }\n ... )\n\n Reading all JSON files from a list\n\n >>> import awswrangler as wr\n >>> df = wr.s3.read_json(path=['s3://bucket/filename0.json', 's3://bucket/filename1.json'])\n\n Reading in chunks of 100 lines\n\n >>> import awswrangler as wr\n >>> dfs = wr.s3.read_json(path=['s3://bucket/filename0.json', 's3://bucket/filename1.json'], chunksize=100)\n >>> for df in dfs:\n >>> print(df) # 100 lines Pandas DataFrame\n\n \"\"\"\n return _read_text(\n parser_func=pd.read_json,\n path=path,\n use_threads=use_threads,\n boto3_session=boto3_session,\n s3_additional_kwargs=s3_additional_kwargs,\n chunksize=chunksize,\n **pandas_kwargs,\n )\n\n\ndef _read_text(\n parser_func: Callable,\n path: Union[str, List[str]],\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n s3_additional_kwargs: Optional[Dict[str, str]] = None,\n chunksize: Optional[int] = None,\n **pandas_kwargs,\n) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:\n if \"iterator\" in pandas_kwargs:\n raise exceptions.InvalidArgument(\"Please, use chunksize instead of iterator.\")\n paths: List[str] = _path2list(path=path, boto3_session=boto3_session)\n if chunksize is not None:\n dfs: Iterator[pd.DataFrame] = _read_text_chunksize(\n parser_func=parser_func,\n paths=paths,\n boto3_session=boto3_session,\n chunksize=chunksize,\n pandas_args=pandas_kwargs,\n s3_additional_kwargs=s3_additional_kwargs,\n )\n return dfs\n if use_threads is False:\n df: pd.DataFrame = pd.concat(\n objs=[\n _read_text_full(\n parser_func=parser_func,\n path=p,\n boto3_session=boto3_session,\n pandas_args=pandas_kwargs,\n s3_additional_kwargs=s3_additional_kwargs,\n )\n for p in paths\n ],\n ignore_index=True,\n sort=False,\n )\n else:\n cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)\n with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:\n df = pd.concat(\n objs=executor.map(\n _read_text_full,\n repeat(parser_func),\n paths,\n repeat(boto3_session),\n repeat(pandas_kwargs),\n repeat(s3_additional_kwargs),\n ),\n ignore_index=True,\n sort=False,\n )\n return df\n\n\ndef _read_text_chunksize(\n parser_func: Callable,\n paths: List[str],\n boto3_session: boto3.Session,\n chunksize: int,\n pandas_args: Dict[str, Any],\n s3_additional_kwargs: Optional[Dict[str, str]] = None,\n) -> Iterator[pd.DataFrame]:\n fs: s3fs.S3FileSystem = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)\n for path in paths:\n _logger.debug(f\"path: {path}\")\n if pandas_args.get(\"compression\", \"infer\") == \"infer\":\n pandas_args[\"compression\"] = infer_compression(path, compression=\"infer\")\n with fs.open(path, \"rb\") as f:\n reader: pandas.io.parsers.TextFileReader = parser_func(f, chunksize=chunksize, **pandas_args)\n for df in reader:\n yield df\n\n\ndef _read_text_full(\n parser_func: Callable,\n path: str,\n boto3_session: boto3.Session,\n pandas_args: Dict[str, Any],\n s3_additional_kwargs: Optional[Dict[str, str]] = None,\n) -> pd.DataFrame:\n fs: s3fs.S3FileSystem = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)\n if pandas_args.get(\"compression\", \"infer\") == \"infer\":\n pandas_args[\"compression\"] = infer_compression(path, compression=\"infer\")\n with fs.open(path, \"rb\") as f:\n return parser_func(f, **pandas_args)\n\n\ndef _read_parquet_init(\n path: Union[str, List[str]],\n filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,\n categories: List[str] = None,\n validate_schema: bool = True,\n dataset: bool = False,\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n s3_additional_kwargs: Optional[Dict[str, str]] = None,\n) -> pyarrow.parquet.ParquetDataset:\n \"\"\"Encapsulate all initialization before the use of the pyarrow.parquet.ParquetDataset.\"\"\"\n if dataset is False:\n path_or_paths: Union[str, List[str]] = _path2list(path=path, boto3_session=boto3_session)\n elif isinstance(path, str):\n path_or_paths = path[:-1] if path.endswith(\"/\") else path\n else:\n path_or_paths = path\n _logger.debug(f\"path_or_paths: {path_or_paths}\")\n fs: s3fs.S3FileSystem = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)\n cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)\n data: pyarrow.parquet.ParquetDataset = pyarrow.parquet.ParquetDataset(\n path_or_paths=path_or_paths,\n filesystem=fs,\n metadata_nthreads=cpus,\n filters=filters,\n read_dictionary=categories,\n validate_schema=validate_schema,\n )\n return data\n\n\ndef read_parquet(\n path: Union[str, List[str]],\n filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,\n columns: Optional[List[str]] = None,\n validate_schema: bool = True,\n chunked: bool = False,\n dataset: bool = False,\n categories: List[str] = None,\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n s3_additional_kwargs: Optional[Dict[str, str]] = None,\n) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:\n \"\"\"Read Apache Parquet file(s) from from a received S3 prefix or list of S3 objects paths.\n\n The concept of Dataset goes beyond the simple idea of files and enable more\n complex features like partitioning and catalog integration (AWS Glue Catalog).\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n path : Union[str, List[str]]\n S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).\n filters: Union[List[Tuple], List[List[Tuple]]], optional\n List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.\n columns : List[str], optional\n Names of columns to read from the file(s).\n validate_schema:\n Check that individual file schemas are all the same / compatible. Schemas within a\n folder prefix should all be the same. Disable if you have schemas that are different\n and want to disable this check.\n chunked : bool\n If True will break the data in smaller DataFrames (Non deterministic number of lines).\n Otherwise return a single DataFrame with the whole data.\n dataset: bool\n If True read a parquet dataset instead of simple file(s) loading all the related partitions as columns.\n categories: List[str], optional\n List of columns names that should be returned as pandas.Categorical.\n Recommended for memory restricted environments.\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n s3_additional_kwargs:\n Forward to s3fs, useful for server side encryption\n https://s3fs.readthedocs.io/en/latest/#serverside-encryption\n\n Returns\n -------\n Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]\n Pandas DataFrame or a Generator in case of `chunked=True`.\n\n Examples\n --------\n Reading all Parquet files under a prefix\n\n >>> import awswrangler as wr\n >>> df = wr.s3.read_parquet(path='s3://bucket/prefix/')\n\n Reading all Parquet files under a prefix encrypted with a KMS key\n\n >>> import awswrangler as wr\n >>> df = wr.s3.read_parquet(\n ... path='s3://bucket/prefix/',\n ... s3_additional_kwargs={\n ... 'ServerSideEncryption': 'aws:kms',\n ... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'\n ... }\n ... )\n\n Reading all Parquet files from a list\n\n >>> import awswrangler as wr\n >>> df = wr.s3.read_parquet(path=['s3://bucket/filename0.parquet', 's3://bucket/filename1.parquet'])\n\n Reading in chunks\n\n >>> import awswrangler as wr\n >>> dfs = wr.s3.read_parquet(path=['s3://bucket/filename0.csv', 's3://bucket/filename1.csv'], chunked=True)\n >>> for df in dfs:\n >>> print(df) # Smaller Pandas DataFrame\n\n \"\"\"\n data: pyarrow.parquet.ParquetDataset = _read_parquet_init(\n path=path,\n filters=filters,\n dataset=dataset,\n categories=categories,\n use_threads=use_threads,\n boto3_session=boto3_session,\n s3_additional_kwargs=s3_additional_kwargs,\n validate_schema=validate_schema,\n )\n if chunked is False:\n return _read_parquet(\n data=data, columns=columns, categories=categories, use_threads=use_threads, validate_schema=validate_schema\n )\n return _read_parquet_chunked(data=data, columns=columns, categories=categories, use_threads=use_threads)\n\n\ndef _read_parquet(\n data: pyarrow.parquet.ParquetDataset,\n columns: Optional[List[str]] = None,\n categories: List[str] = None,\n use_threads: bool = True,\n validate_schema: bool = True,\n) -> pd.DataFrame:\n tables: List[pa.Table] = []\n for piece in data.pieces:\n table: pa.Table = piece.read(\n columns=columns, use_threads=use_threads, partitions=data.partitions, use_pandas_metadata=False\n )\n tables.append(table)\n promote: bool = not validate_schema\n table = pa.lib.concat_tables(tables, promote=promote)\n return table.to_pandas(\n use_threads=use_threads,\n split_blocks=True,\n self_destruct=True,\n integer_object_nulls=False,\n date_as_object=True,\n ignore_metadata=True,\n categories=categories,\n types_mapper=_data_types.pyarrow2pandas_extension,\n )\n\n\ndef _read_parquet_chunked(\n data: pyarrow.parquet.ParquetDataset,\n columns: Optional[List[str]] = None,\n categories: List[str] = None,\n use_threads: bool = True,\n) -> Iterator[pd.DataFrame]:\n for piece in data.pieces:\n table: pa.Table = piece.read(\n columns=columns, use_threads=use_threads, partitions=data.partitions, use_pandas_metadata=False\n )\n yield table.to_pandas(\n use_threads=use_threads,\n split_blocks=True,\n self_destruct=True,\n integer_object_nulls=False,\n date_as_object=True,\n ignore_metadata=True,\n categories=categories,\n types_mapper=_data_types.pyarrow2pandas_extension,\n )\n\n\ndef read_parquet_metadata(\n path: Union[str, List[str]],\n filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,\n dataset: bool = False,\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n) -> Tuple[Dict[str, str], Optional[Dict[str, str]]]:\n \"\"\"Read Apache Parquet file(s) metadata from from a received S3 prefix or list of S3 objects paths.\n\n The concept of Dataset goes beyond the simple idea of files and enable more\n complex features like partitioning and catalog integration (AWS Glue Catalog).\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n path : Union[str, List[str]]\n S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).\n filters: Union[List[Tuple], List[List[Tuple]]], optional\n List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.\n dataset: bool\n If True read a parquet dataset instead of simple file(s) loading all the related partitions as columns.\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n\n Returns\n -------\n Tuple[Dict[str, str], Optional[Dict[str, str]]]\n columns_types: Dictionary with keys as column names and vales as\n data types (e.g. {'col0': 'bigint', 'col1': 'double'}). /\n partitions_types: Dictionary with keys as partition names\n and values as data types (e.g. {'col2': 'date'}).\n\n Examples\n --------\n Reading all Parquet files (with partitions) metadata under a prefix\n\n >>> import awswrangler as wr\n >>> columns_types, partitions_types = wr.s3.read_parquet_metadata(path='s3://bucket/prefix/', dataset=True)\n\n Reading all Parquet files metadata from a list\n\n >>> import awswrangler as wr\n >>> columns_types, partitions_types = wr.s3.read_parquet_metadata(path=[\n ... 's3://bucket/filename0.parquet',\n ... 's3://bucket/filename1.parquet'\n ... ])\n\n \"\"\"\n data: pyarrow.parquet.ParquetDataset = _read_parquet_init(\n path=path, filters=filters, dataset=dataset, use_threads=use_threads, boto3_session=boto3_session\n )\n return _data_types.athena_types_from_pyarrow_schema(\n schema=data.schema.to_arrow_schema(), partitions=data.partitions\n )\n\n\ndef store_parquet_metadata(\n path: str,\n database: str,\n table: str,\n filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,\n dataset: bool = False,\n use_threads: bool = True,\n description: Optional[str] = None,\n parameters: Optional[Dict[str, str]] = None,\n columns_comments: Optional[Dict[str, str]] = None,\n compression: Optional[str] = None,\n boto3_session: Optional[boto3.Session] = None,\n) -> Tuple[Dict[str, str], Optional[Dict[str, str]], Optional[Dict[str, List[str]]]]:\n \"\"\"Infer and store parquet metadata on AWS Glue Catalog.\n\n Infer Apache Parquet file(s) metadata from from a received S3 prefix or list of S3 objects paths\n And then stores it on AWS Glue Catalog including all inferred partitions\n (No need of 'MCSK REPAIR TABLE')\n\n The concept of Dataset goes beyond the simple idea of files and enable more\n complex features like partitioning and catalog integration (AWS Glue Catalog).\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n path : Union[str, List[str]]\n S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).\n database : str\n Glue/Athena catalog: Database name.\n table : str\n Glue/Athena catalog: Table name.\n filters: Union[List[Tuple], List[List[Tuple]]], optional\n List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.\n dataset: bool\n If True read a parquet dataset instead of simple file(s) loading all the related partitions as columns.\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n description: str, optional\n Glue/Athena catalog: Table description\n parameters: Dict[str, str], optional\n Glue/Athena catalog: Key/value pairs to tag the table.\n columns_comments: Dict[str, str], optional\n Glue/Athena catalog:\n Columns names and the related comments (e.g. {'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}).\n compression: str, optional\n Compression style (``None``, ``snappy``, ``gzip``, etc).\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n\n Returns\n -------\n Tuple[Dict[str, str], Optional[Dict[str, str]], Optional[Dict[str, List[str]]]]\n The metadata used to create the Glue Table.\n columns_types: Dictionary with keys as column names and vales as\n data types (e.g. {'col0': 'bigint', 'col1': 'double'}). /\n partitions_types: Dictionary with keys as partition names\n and values as data types (e.g. {'col2': 'date'}). /\n partitions_values: Dictionary with keys as S3 path locations and values as a\n list of partitions values as str (e.g. {'s3://bucket/prefix/y=2020/m=10/': ['2020', '10']}).\n\n Examples\n --------\n Reading all Parquet files metadata under a prefix\n\n >>> import awswrangler as wr\n >>> columns_types, partitions_types, partitions_values = wr.s3.store_parquet_metadata(\n ... path='s3://bucket/prefix/',\n ... database='...',\n ... table='...',\n ... dataset=True\n ... )\n\n \"\"\"\n session: boto3.Session = _utils.ensure_session(session=boto3_session)\n data: pyarrow.parquet.ParquetDataset = _read_parquet_init(\n path=path, filters=filters, dataset=dataset, use_threads=use_threads, boto3_session=session\n )\n partitions: Optional[pyarrow.parquet.ParquetPartitions] = data.partitions\n columns_types, partitions_types = _data_types.athena_types_from_pyarrow_schema(\n schema=data.schema.to_arrow_schema(), partitions=partitions\n )\n catalog.create_parquet_table(\n database=database,\n table=table,\n path=path,\n columns_types=columns_types,\n partitions_types=partitions_types,\n description=description,\n parameters=parameters,\n columns_comments=columns_comments,\n boto3_session=session,\n )\n partitions_values: Dict[str, List[str]] = _data_types.athena_partitions_from_pyarrow_partitions(\n path=path, partitions=partitions\n )\n catalog.add_parquet_partitions(\n database=database,\n table=table,\n partitions_values=partitions_values,\n compression=compression,\n boto3_session=session,\n )\n return columns_types, partitions_types, partitions_values\n\n\ndef wait_objects_exist(\n paths: List[str],\n delay: Optional[Union[int, float]] = None,\n max_attempts: Optional[int] = None,\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n) -> None:\n \"\"\"Wait Amazon S3 objects exist.\n\n Polls S3.Client.head_object() every 5 seconds (default) until a successful\n state is reached. An error is returned after 20 (default) failed checks.\n https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Waiter.ObjectExists\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n paths : List[str]\n List of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).\n delay : Union[int,float], optional\n The amount of time in seconds to wait between attempts. Default: 5\n max_attempts : int, optional\n The maximum number of attempts to be made. Default: 20\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n\n Returns\n -------\n None\n None.\n\n Examples\n --------\n >>> import awswrangler as wr\n >>> wr.s3.wait_objects_exist(['s3://bucket/key0', 's3://bucket/key1']) # wait both objects\n\n \"\"\"\n return _wait_objects(\n waiter_name=\"object_exists\",\n paths=paths,\n delay=delay,\n max_attempts=max_attempts,\n use_threads=use_threads,\n boto3_session=boto3_session,\n )\n\n\ndef wait_objects_not_exist(\n paths: List[str],\n delay: Optional[Union[int, float]] = None,\n max_attempts: Optional[int] = None,\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n) -> None:\n \"\"\"Wait Amazon S3 objects not exist.\n\n Polls S3.Client.head_object() every 5 seconds (default) until a successful\n state is reached. An error is returned after 20 (default) failed checks.\n https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Waiter.ObjectNotExists\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n paths : List[str]\n List of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).\n delay : Union[int,float], optional\n The amount of time in seconds to wait between attempts. Default: 5\n max_attempts : int, optional\n The maximum number of attempts to be made. Default: 20\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n\n Returns\n -------\n None\n None.\n\n Examples\n --------\n >>> import awswrangler as wr\n >>> wr.s3.wait_objects_not_exist(['s3://bucket/key0', 's3://bucket/key1']) # wait both objects not exist\n\n \"\"\"\n return _wait_objects(\n waiter_name=\"object_not_exists\",\n paths=paths,\n delay=delay,\n max_attempts=max_attempts,\n use_threads=use_threads,\n boto3_session=boto3_session,\n )\n\n\ndef _wait_objects(\n waiter_name: str,\n paths: List[str],\n delay: Optional[Union[int, float]] = None,\n max_attempts: Optional[int] = None,\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n) -> None:\n delay = 5 if delay is None else delay\n max_attempts = 20 if max_attempts is None else max_attempts\n _delay: int = int(delay) if isinstance(delay, float) else delay\n\n if len(paths) < 1:\n return None\n client_s3: boto3.client = _utils.client(service_name=\"s3\", session=boto3_session)\n waiter = client_s3.get_waiter(waiter_name)\n _paths: List[Tuple[str, str]] = [_utils.parse_path(path=p) for p in paths]\n if use_threads is False:\n for bucket, key in _paths:\n waiter.wait(Bucket=bucket, Key=key, WaiterConfig={\"Delay\": _delay, \"MaxAttempts\": max_attempts})\n else:\n cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)\n with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:\n futures: List[concurrent.futures.Future] = []\n for bucket, key in _paths:\n future: concurrent.futures.Future = executor.submit(\n fn=waiter.wait, Bucket=bucket, Key=key, WaiterConfig={\"Delay\": _delay, \"MaxAttempts\": max_attempts}\n )\n futures.append(future)\n for future in futures:\n future.result()\n return None\n\n\ndef read_parquet_table(\n table: str,\n database: str,\n filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,\n columns: Optional[List[str]] = None,\n categories: List[str] = None,\n chunked: bool = False,\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n s3_additional_kwargs: Optional[Dict[str, str]] = None,\n) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:\n \"\"\"Read Apache Parquet table registered on AWS Glue Catalog.\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n table : str\n AWS Glue Catalog table name.\n database : str\n AWS Glue Catalog database name.\n filters: Union[List[Tuple], List[List[Tuple]]], optional\n List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.\n columns : List[str], optional\n Names of columns to read from the file(s).\n categories: List[str], optional\n List of columns names that should be returned as pandas.Categorical.\n Recommended for memory restricted environments.\n chunked : bool\n If True will break the data in smaller DataFrames (Non deterministic number of lines).\n Otherwise return a single DataFrame with the whole data.\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n s3_additional_kwargs:\n Forward to s3fs, useful for server side encryption\n https://s3fs.readthedocs.io/en/latest/#serverside-encryption\n\n Returns\n -------\n Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]\n Pandas DataFrame or a Generator in case of `chunked=True`.\n\n Examples\n --------\n Reading Parquet Table\n\n >>> import awswrangler as wr\n >>> df = wr.s3.read_parquet_table(database='...', table='...')\n\n Reading Parquet Table encrypted\n\n >>> import awswrangler as wr\n >>> df = wr.s3.read_parquet_table(\n ... database='...',\n ... table='...'\n ... s3_additional_kwargs={\n ... 'ServerSideEncryption': 'aws:kms',\n ... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'\n ... }\n ... )\n\n Reading Parquet Table in chunks\n\n >>> import awswrangler as wr\n >>> dfs = wr.s3.read_parquet_table(database='...', table='...', chunked=True)\n >>> for df in dfs:\n >>> print(df) # Smaller Pandas DataFrame\n\n \"\"\"\n path: str = catalog.get_table_location(database=database, table=table, boto3_session=boto3_session)\n return read_parquet(\n path=path,\n filters=filters,\n columns=columns,\n categories=categories,\n chunked=chunked,\n dataset=True,\n use_threads=use_threads,\n boto3_session=boto3_session,\n s3_additional_kwargs=s3_additional_kwargs,\n )\n\n\ndef merge_datasets(\n source_path: str,\n target_path: str,\n mode: str = \"append\",\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n) -> List[str]:\n \"\"\"Merge a source dataset into a target dataset.\n\n Note\n ----\n If you are merging tables (S3 datasets + Glue Catalog metadata),\n remember that you will also need to update your partitions metadata in some cases.\n (e.g. wr.athena.repair_table(table='...', database='...'))\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n source_path : str,\n S3 Path for the source directory.\n target_path : str,\n S3 Path for the target directory.\n mode: str, optional\n ``append`` (Default), ``overwrite``, ``overwrite_partitions``.\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n\n Returns\n -------\n List[str]\n List of new objects paths.\n\n Examples\n --------\n >>> import awswrangler as wr\n >>> wr.s3.merge_datasets(\n ... source_path=\"s3://bucket0/dir0/\",\n ... target_path=\"s3://bucket1/dir1/\",\n ... mode=\"append\"\n ... )\n [\"s3://bucket1/dir1/key0\", \"s3://bucket1/dir1/key1\"]\n\n \"\"\"\n source_path = source_path[:-1] if source_path[-1] == \"/\" else source_path\n target_path = target_path[:-1] if target_path[-1] == \"/\" else target_path\n session: boto3.Session = _utils.ensure_session(session=boto3_session)\n\n paths: List[str] = list_objects(path=f\"{source_path}/\", boto3_session=session)\n _logger.debug(f\"len(paths): {len(paths)}\")\n if len(paths) < 1:\n return []\n\n if mode == \"overwrite\":\n _logger.debug(f\"Deleting to overwrite: {target_path}/\")\n delete_objects(path=f\"{target_path}/\", use_threads=use_threads, boto3_session=session)\n elif mode == \"overwrite_partitions\":\n paths_wo_prefix: List[str] = [x.replace(f\"{source_path}/\", \"\") for x in paths]\n paths_wo_filename: List[str] = [f\"{x.rpartition('/')[0]}/\" for x in paths_wo_prefix]\n partitions_paths: List[str] = list(set(paths_wo_filename))\n target_partitions_paths = [f\"{target_path}/{x}\" for x in partitions_paths]\n for path in target_partitions_paths:\n _logger.debug(f\"Deleting to overwrite_partitions: {path}\")\n delete_objects(path=path, use_threads=use_threads, boto3_session=session)\n elif mode != \"append\":\n raise exceptions.InvalidArgumentValue(f\"{mode} is a invalid mode option.\")\n\n new_objects: List[str] = copy_objects(\n paths=paths, source_path=source_path, target_path=target_path, use_threads=use_threads, boto3_session=session\n )\n _logger.debug(f\"len(new_objects): {len(new_objects)}\")\n return new_objects\n\n\ndef copy_objects(\n paths: List[str],\n source_path: str,\n target_path: str,\n use_threads: bool = True,\n boto3_session: Optional[boto3.Session] = None,\n) -> List[str]:\n \"\"\"Copy a list of S3 objects to another S3 directory.\n\n Note\n ----\n In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().\n\n Parameters\n ----------\n paths : List[str]\n List of S3 objects paths (e.g. [s3://bucket/dir0/key0, s3://bucket/dir0/key1]).\n source_path : str,\n S3 Path for the source directory.\n target_path : str,\n S3 Path for the target directory.\n use_threads : bool\n True to enable concurrent requests, False to disable multiple threads.\n If enabled os.cpu_count() will be used as the max number of threads.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n\n Returns\n -------\n List[str]\n List of new objects paths.\n\n Examples\n --------\n >>> import awswrangler as wr\n >>> wr.s3.copy_objects(\n ... paths=[\"s3://bucket0/dir0/key0\", \"s3://bucket0/dir0/key1\"])\n ... source_path=\"s3://bucket0/dir0/\",\n ... target_path=\"s3://bucket1/dir1/\",\n ... )\n [\"s3://bucket1/dir1/key0\", \"s3://bucket1/dir1/key1\"]\n\n \"\"\"\n _logger.debug(f\"len(paths): {len(paths)}\")\n if len(paths) < 1:\n return []\n source_path = source_path[:-1] if source_path[-1] == \"/\" else source_path\n target_path = target_path[:-1] if target_path[-1] == \"/\" else target_path\n session: boto3.Session = _utils.ensure_session(session=boto3_session)\n batch: List[Tuple[str, str]] = []\n new_objects: List[str] = []\n for path in paths:\n path_wo_prefix: str = path.replace(f\"{source_path}/\", \"\")\n path_final: str = f\"{target_path}/{path_wo_prefix}\"\n new_objects.append(path_final)\n batch.append((path, path_final))\n _logger.debug(f\"len(new_objects): {len(new_objects)}\")\n _copy_objects(batch=batch, use_threads=use_threads, boto3_session=session)\n return new_objects\n\n\ndef _copy_objects(batch: List[Tuple[str, str]], use_threads: bool, boto3_session: boto3.Session) -> None:\n _logger.debug(f\"len(batch): {len(batch)}\")\n client_s3: boto3.client = _utils.client(service_name=\"s3\", session=boto3_session)\n resource_s3: boto3.resource = _utils.resource(service_name=\"s3\", session=boto3_session)\n for source, target in batch:\n source_bucket, source_key = _utils.parse_path(path=source)\n copy_source: Dict[str, str] = {\"Bucket\": source_bucket, \"Key\": source_key}\n target_bucket, target_key = _utils.parse_path(path=target)\n resource_s3.meta.client.copy(\n CopySource=copy_source,\n Bucket=target_bucket,\n Key=target_key,\n SourceClient=client_s3,\n Config=TransferConfig(num_download_attempts=15, use_threads=use_threads),\n )\n"
] | [
[
"pandas.io.common.infer_compression"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.0",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Llambi/Web_Semantica | [
"16f98a7d78ba08366a67caf2bd44f3f45af6ee21"
] | [
"IndexerQuery/model/QueryAnalizer.py"
] | [
"import numpy as np\n\nfrom model.indexer_v1 import Indexer\n\n\nclass QueryAnalizer:\n def __init__(self, query, document_list, enable_stemming=True, filter_stopwords=True):\n self.__query = Indexer([query], enable_stemming=enable_stemming, filter_stopwords=filter_stopwords)\n self.__indexer = Indexer(document_list, enable_stemming=enable_stemming, filter_stopwords=filter_stopwords)\n self.result = None\n\n def cosine_similarity(self):\n if self.result is not None:\n return self.result\n\n result = {}\n for query_term, value in self.__query.words_index.items():\n indexer_term = self.__indexer.words_index[query_term]\n\n tf_idf_query_term = self.__query.words_index[query_term][\"idf\"] * \\\n self.__query.words_index[query_term][\"documents\"][0][\"tf\"]\n\n tf_documents = list(map(lambda doc: doc[\"tf\"], indexer_term[\"documents\"]))\n\n dot_product = np.dot(tf_idf_query_term, tf_documents)\n\n result[query_term] = list(zip(\n list(\n map(\n lambda doc: doc[\"document\"].text,\n indexer_term[\"documents\"]))\n ,\n list(\n map(\n lambda elem: elem / (np.linalg.norm(tf_idf_query_term) + np.linalg.norm(tf_documents)),\n dot_product\n ))\n ))\n self.result = result\n for key, elm in self.result.items():\n self.result[key] = sorted(elm, key=lambda tup: tup[1], reverse=True)\n return self.result\n"
] | [
[
"numpy.dot",
"numpy.linalg.norm"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dolboBobo/python3_ios | [
"e149f1bc2e50046c8810f83dae7739a8dea939ee",
"e149f1bc2e50046c8810f83dae7739a8dea939ee",
"e149f1bc2e50046c8810f83dae7739a8dea939ee",
"e149f1bc2e50046c8810f83dae7739a8dea939ee",
"e149f1bc2e50046c8810f83dae7739a8dea939ee",
"e149f1bc2e50046c8810f83dae7739a8dea939ee",
"e149f1bc2e50046c8810f83dae7739a8dea939ee",
"e149f1bc2e50046c8810f83dae7739a8dea939ee",
"e149f1bc2e50046c8810f83dae7739a8dea939ee",
"e149f1bc2e50046c8810f83dae7739a8dea939ee",
"e149f1bc2e50046c8810f83dae7739a8dea939ee",
"e149f1bc2e50046c8810f83dae7739a8dea939ee",
"e149f1bc2e50046c8810f83dae7739a8dea939ee",
"e149f1bc2e50046c8810f83dae7739a8dea939ee",
"e149f1bc2e50046c8810f83dae7739a8dea939ee",
"e149f1bc2e50046c8810f83dae7739a8dea939ee"
] | [
"extraPackages/matplotlib-3.0.3/examples/images_contours_and_fields/contourf_log.py",
"extraPackages/matplotlib-3.0.3/examples/subplots_axes_and_figures/zoom_inset_axes.py",
"extraPackages/matplotlib-3.0.3/examples/pie_and_polar_charts/polar_legend.py",
"extraPackages/matplotlib-3.0.3/examples/specialty_plots/leftventricle_bulleye.py",
"extraPackages/pyzmq-17.1.2/examples/heartbeat/ping.py",
"extraPackages/matplotlib-3.0.3/examples/axisartist/simple_axis_direction01.py",
"extraPackages/matplotlib-3.0.2/examples/axes_grid1/demo_colorbar_with_inset_locator.py",
"extraPackages/matplotlib-3.0.3/examples/userdemo/annotate_simple01.py",
"extraPackages/matplotlib-3.0.3/examples/images_contours_and_fields/quiver_simple_demo.py",
"extraPackages/matplotlib-3.0.3/examples/images_contours_and_fields/triinterp_demo.py",
"extraPackages/matplotlib-3.0.3/examples/images_contours_and_fields/interpolation_methods.py",
"extraPackages/matplotlib-3.0.3/examples/axisartist/simple_axisartist1.py",
"extraPackages/matplotlib-3.0.3/examples/misc/demo_agg_filter.py",
"extraPackages/matplotlib-3.0.3/examples/subplots_axes_and_figures/figure_title.py",
"extraPackages/matplotlib-3.0.3/examples/shapes_and_collections/arrow_guide.py",
"extraPackages/matplotlib-3.0.3/examples/pyplots/whats_new_99_spines.py"
] | [
"\"\"\"\n============================\nContourf and log color scale\n============================\n\nDemonstrate use of a log color scale in contourf\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy import ma\nfrom matplotlib import ticker, cm\n\nN = 100\nx = np.linspace(-3.0, 3.0, N)\ny = np.linspace(-2.0, 2.0, N)\n\nX, Y = np.meshgrid(x, y)\n\n# A low hump with a spike coming out.\n# Needs to have z/colour axis on a log scale so we see both hump and spike.\n# linear scale only shows the spike.\nZ1 = np.exp(-(X)**2 - (Y)**2)\nZ2 = np.exp(-(X * 10)**2 - (Y * 10)**2)\nz = Z1 + 50 * Z2\n\n# Put in some negative values (lower left corner) to cause trouble with logs:\nz[:5, :5] = -1\n\n# The following is not strictly essential, but it will eliminate\n# a warning. Comment it out to see the warning.\nz = ma.masked_where(z <= 0, z)\n\n\n# Automatic selection of levels works; setting the\n# log locator tells contourf to use a log scale:\nfig, ax = plt.subplots()\ncs = ax.contourf(X, Y, z, locator=ticker.LogLocator(), cmap=cm.PuBu_r)\n\n# Alternatively, you can manually set the levels\n# and the norm:\n# lev_exp = np.arange(np.floor(np.log10(z.min())-1),\n# np.ceil(np.log10(z.max())+1))\n# levs = np.power(10, lev_exp)\n# cs = ax.contourf(X, Y, z, levs, norm=colors.LogNorm())\n\ncbar = fig.colorbar(cs)\n\nplt.show()\n\n#############################################################################\n#\n# ------------\n#\n# References\n# \"\"\"\"\"\"\"\"\"\"\n#\n# The use of the following functions, methods and classes is shown\n# in this example:\n\nimport matplotlib\nmatplotlib.axes.Axes.contourf\nmatplotlib.pyplot.contourf\nmatplotlib.figure.Figure.colorbar\nmatplotlib.pyplot.colorbar\nmatplotlib.axes.Axes.legend\nmatplotlib.pyplot.legend\nmatplotlib.ticker.LogLocator\n",
"\"\"\"\n======================\nZoom region inset axes\n======================\n\nExample of an inset axes and a rectangle showing where the zoom is located.\n\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef get_demo_image():\n from matplotlib.cbook import get_sample_data\n import numpy as np\n f = get_sample_data(\"axes_grid/bivariate_normal.npy\", asfileobj=False)\n z = np.load(f)\n # z is a numpy array of 15x15\n return z, (-3, 4, -4, 3)\n\nfig, ax = plt.subplots(figsize=[5, 4])\n\n# make data\nZ, extent = get_demo_image()\nZ2 = np.zeros([150, 150], dtype=\"d\")\nny, nx = Z.shape\nZ2[30:30 + ny, 30:30 + nx] = Z\n\nax.imshow(Z2, extent=extent, interpolation=\"nearest\",\n origin=\"lower\")\n\n# inset axes....\naxins = ax.inset_axes([0.5, 0.5, 0.47, 0.47])\naxins.imshow(Z2, extent=extent, interpolation=\"nearest\",\n origin=\"lower\")\n# sub region of the original image\nx1, x2, y1, y2 = -1.5, -0.9, -2.5, -1.9\naxins.set_xlim(x1, x2)\naxins.set_ylim(y1, y2)\naxins.set_xticklabels('')\naxins.set_yticklabels('')\n\nax.indicate_inset_zoom(axins)\n\nplt.show()\n\n#############################################################################\n#\n# ------------\n#\n# References\n# \"\"\"\"\"\"\"\"\"\"\n#\n# The use of the following functions and methods is shown in this example:\n\nimport matplotlib\nmatplotlib.axes.Axes.inset_axes\nmatplotlib.axes.Axes.indicate_inset_zoom\nmatplotlib.axes.Axes.imshow\n",
"\"\"\"\n============\nPolar Legend\n============\n\nDemo of a legend on a polar-axis plot.\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# radar green, solid grid lines\nplt.rc('grid', color='#316931', linewidth=1, linestyle='-')\nplt.rc('xtick', labelsize=15)\nplt.rc('ytick', labelsize=15)\n\n# force square figure and square axes looks better for polar, IMO\nfig = plt.figure(figsize=(8, 8))\nax = fig.add_axes([0.1, 0.1, 0.8, 0.8],\n projection='polar', facecolor='#d5de9c')\n\nr = np.arange(0, 3.0, 0.01)\ntheta = 2 * np.pi * r\nax.plot(theta, r, color='#ee8d18', lw=3, label='a line')\nax.plot(0.5 * theta, r, color='blue', ls='--', lw=3, label='another line')\nax.legend()\n\nplt.show()\n\n#############################################################################\n#\n# ------------\n#\n# References\n# \"\"\"\"\"\"\"\"\"\"\n#\n# The use of the following functions, methods, classes and modules is shown\n# in this example:\n\nimport matplotlib\nmatplotlib.axes.Axes.plot\nmatplotlib.axes.Axes.legend\nmatplotlib.projections.polar\nmatplotlib.projections.polar.PolarAxes\n",
"\"\"\"\n=====================\nLeftventricle Bulleye\n=====================\n\nThis example demonstrates how to create the 17 segment model for the left\nventricle recommended by the American Heart Association (AHA).\n\"\"\"\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\n\ndef bullseye_plot(ax, data, segBold=None, cmap=None, norm=None):\n \"\"\"\n Bullseye representation for the left ventricle.\n\n Parameters\n ----------\n ax : axes\n data : list of int and float\n The intensity values for each of the 17 segments\n segBold: list of int, optional\n A list with the segments to highlight\n cmap : ColorMap or None, optional\n Optional argument to set the desired colormap\n norm : Normalize or None, optional\n Optional argument to normalize data into the [0.0, 1.0] range\n\n\n Notes\n -----\n This function create the 17 segment model for the left ventricle according\n to the American Heart Association (AHA) [1]_\n\n References\n ----------\n .. [1] M. D. Cerqueira, N. J. Weissman, V. Dilsizian, A. K. Jacobs,\n S. Kaul, W. K. Laskey, D. J. Pennell, J. A. Rumberger, T. Ryan,\n and M. S. Verani, \"Standardized myocardial segmentation and\n nomenclature for tomographic imaging of the heart\",\n Circulation, vol. 105, no. 4, pp. 539-542, 2002.\n \"\"\"\n if segBold is None:\n segBold = []\n\n linewidth = 2\n data = np.array(data).ravel()\n\n if cmap is None:\n cmap = plt.cm.viridis\n\n if norm is None:\n norm = mpl.colors.Normalize(vmin=data.min(), vmax=data.max())\n\n theta = np.linspace(0, 2 * np.pi, 768)\n r = np.linspace(0.2, 1, 4)\n\n # Create the bound for the segment 17\n for i in range(r.shape[0]):\n ax.plot(theta, np.repeat(r[i], theta.shape), '-k', lw=linewidth)\n\n # Create the bounds for the segments 1-12\n for i in range(6):\n theta_i = np.deg2rad(i * 60)\n ax.plot([theta_i, theta_i], [r[1], 1], '-k', lw=linewidth)\n\n # Create the bounds for the segments 13-16\n for i in range(4):\n theta_i = np.deg2rad(i * 90 - 45)\n ax.plot([theta_i, theta_i], [r[0], r[1]], '-k', lw=linewidth)\n\n # Fill the segments 1-6\n r0 = r[2:4]\n r0 = np.repeat(r0[:, np.newaxis], 128, axis=1).T\n for i in range(6):\n # First segment start at 60 degrees\n theta0 = theta[i * 128:i * 128 + 128] + np.deg2rad(60)\n theta0 = np.repeat(theta0[:, np.newaxis], 2, axis=1)\n z = np.ones((128, 2)) * data[i]\n ax.pcolormesh(theta0, r0, z, cmap=cmap, norm=norm)\n if i + 1 in segBold:\n ax.plot(theta0, r0, '-k', lw=linewidth + 2)\n ax.plot(theta0[0], [r[2], r[3]], '-k', lw=linewidth + 1)\n ax.plot(theta0[-1], [r[2], r[3]], '-k', lw=linewidth + 1)\n\n # Fill the segments 7-12\n r0 = r[1:3]\n r0 = np.repeat(r0[:, np.newaxis], 128, axis=1).T\n for i in range(6):\n # First segment start at 60 degrees\n theta0 = theta[i * 128:i * 128 + 128] + np.deg2rad(60)\n theta0 = np.repeat(theta0[:, np.newaxis], 2, axis=1)\n z = np.ones((128, 2)) * data[i + 6]\n ax.pcolormesh(theta0, r0, z, cmap=cmap, norm=norm)\n if i + 7 in segBold:\n ax.plot(theta0, r0, '-k', lw=linewidth + 2)\n ax.plot(theta0[0], [r[1], r[2]], '-k', lw=linewidth + 1)\n ax.plot(theta0[-1], [r[1], r[2]], '-k', lw=linewidth + 1)\n\n # Fill the segments 13-16\n r0 = r[0:2]\n r0 = np.repeat(r0[:, np.newaxis], 192, axis=1).T\n for i in range(4):\n # First segment start at 45 degrees\n theta0 = theta[i * 192:i * 192 + 192] + np.deg2rad(45)\n theta0 = np.repeat(theta0[:, np.newaxis], 2, axis=1)\n z = np.ones((192, 2)) * data[i + 12]\n ax.pcolormesh(theta0, r0, z, cmap=cmap, norm=norm)\n if i + 13 in segBold:\n ax.plot(theta0, r0, '-k', lw=linewidth + 2)\n ax.plot(theta0[0], [r[0], r[1]], '-k', lw=linewidth + 1)\n ax.plot(theta0[-1], [r[0], r[1]], '-k', lw=linewidth + 1)\n\n # Fill the segments 17\n if data.size == 17:\n r0 = np.array([0, r[0]])\n r0 = np.repeat(r0[:, np.newaxis], theta.size, axis=1).T\n theta0 = np.repeat(theta[:, np.newaxis], 2, axis=1)\n z = np.ones((theta.size, 2)) * data[16]\n ax.pcolormesh(theta0, r0, z, cmap=cmap, norm=norm)\n if 17 in segBold:\n ax.plot(theta0, r0, '-k', lw=linewidth + 2)\n\n ax.set_ylim([0, 1])\n ax.set_yticklabels([])\n ax.set_xticklabels([])\n\n\n# Create the fake data\ndata = np.array(range(17)) + 1\n\n\n# Make a figure and axes with dimensions as desired.\nfig, ax = plt.subplots(figsize=(12, 8), nrows=1, ncols=3,\n subplot_kw=dict(projection='polar'))\nfig.canvas.set_window_title('Left Ventricle Bulls Eyes (AHA)')\n\n# Create the axis for the colorbars\naxl = fig.add_axes([0.14, 0.15, 0.2, 0.05])\naxl2 = fig.add_axes([0.41, 0.15, 0.2, 0.05])\naxl3 = fig.add_axes([0.69, 0.15, 0.2, 0.05])\n\n\n# Set the colormap and norm to correspond to the data for which\n# the colorbar will be used.\ncmap = mpl.cm.viridis\nnorm = mpl.colors.Normalize(vmin=1, vmax=17)\n\n# ColorbarBase derives from ScalarMappable and puts a colorbar\n# in a specified axes, so it has everything needed for a\n# standalone colorbar. There are many more kwargs, but the\n# following gives a basic continuous colorbar with ticks\n# and labels.\ncb1 = mpl.colorbar.ColorbarBase(axl, cmap=cmap, norm=norm,\n orientation='horizontal')\ncb1.set_label('Some Units')\n\n\n# Set the colormap and norm to correspond to the data for which\n# the colorbar will be used.\ncmap2 = mpl.cm.cool\nnorm2 = mpl.colors.Normalize(vmin=1, vmax=17)\n\n# ColorbarBase derives from ScalarMappable and puts a colorbar\n# in a specified axes, so it has everything needed for a\n# standalone colorbar. There are many more kwargs, but the\n# following gives a basic continuous colorbar with ticks\n# and labels.\ncb2 = mpl.colorbar.ColorbarBase(axl2, cmap=cmap2, norm=norm2,\n orientation='horizontal')\ncb2.set_label('Some other units')\n\n\n# The second example illustrates the use of a ListedColormap, a\n# BoundaryNorm, and extended ends to show the \"over\" and \"under\"\n# value colors.\ncmap3 = mpl.colors.ListedColormap(['r', 'g', 'b', 'c'])\ncmap3.set_over('0.35')\ncmap3.set_under('0.75')\n\n# If a ListedColormap is used, the length of the bounds array must be\n# one greater than the length of the color list. The bounds must be\n# monotonically increasing.\nbounds = [2, 3, 7, 9, 15]\nnorm3 = mpl.colors.BoundaryNorm(bounds, cmap3.N)\ncb3 = mpl.colorbar.ColorbarBase(axl3, cmap=cmap3, norm=norm3,\n # to use 'extend', you must\n # specify two extra boundaries:\n boundaries=[0] + bounds + [18],\n extend='both',\n ticks=bounds, # optional\n spacing='proportional',\n orientation='horizontal')\ncb3.set_label('Discrete intervals, some other units')\n\n\n# Create the 17 segment model\nbullseye_plot(ax[0], data, cmap=cmap, norm=norm)\nax[0].set_title('Bulls Eye (AHA)')\n\nbullseye_plot(ax[1], data, cmap=cmap2, norm=norm2)\nax[1].set_title('Bulls Eye (AHA)')\n\nbullseye_plot(ax[2], data, segBold=[3, 5, 6, 11, 12, 16],\n cmap=cmap3, norm=norm3)\nax[2].set_title('Segments [3,5,6,11,12,16] in bold')\n\nplt.show()\n",
"#!/usr/bin/env python\n\"\"\"For use with pong.py\n\nThis script simply pings a process started by pong.py or tspong.py, to \ndemonstrate that zmq remains responsive while Python blocks.\n\nAuthors\n-------\n* MinRK\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\nimport time\nimport numpy\nimport zmq\n\nctx = zmq.Context()\n\nreq = ctx.socket(zmq.REQ)\nreq.connect('tcp://127.0.0.1:10111')\n\n#wait for connects\ntime.sleep(1)\nn=0\nwhile True:\n time.sleep(numpy.random.random())\n for i in range(4):\n n+=1\n msg = 'ping %i' % n\n tic = time.time()\n req.send_string(msg)\n resp = req.recv_string()\n print(\"%s: %.2f ms\" % (msg, 1000*(time.time()-tic)))\n assert msg == resp\n",
"\"\"\"\n=======================\nSimple Axis Direction01\n=======================\n\n\"\"\"\nimport matplotlib.pyplot as plt\nimport mpl_toolkits.axisartist as axisartist\n\nfig = plt.figure(figsize=(4, 2.5))\nax1 = fig.add_subplot(axisartist.Subplot(fig, \"111\"))\nfig.subplots_adjust(right=0.8)\n\nax1.axis[\"left\"].major_ticklabels.set_axis_direction(\"top\")\nax1.axis[\"left\"].label.set_text(\"Label\")\n\nax1.axis[\"right\"].label.set_visible(True)\nax1.axis[\"right\"].label.set_text(\"Label\")\nax1.axis[\"right\"].label.set_axis_direction(\"left\")\n\nplt.show()\n",
"\"\"\"\n================================\nDemo Colorbar With Inset Locator\n================================\n\n\"\"\"\nimport matplotlib.pyplot as plt\n\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\n\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=[6, 3])\n\naxins1 = inset_axes(ax1,\n width=\"50%\", # width = 10% of parent_bbox width\n height=\"5%\", # height : 50%\n loc='upper right')\n\nim1 = ax1.imshow([[1, 2], [2, 3]])\nplt.colorbar(im1, cax=axins1, orientation=\"horizontal\", ticks=[1, 2, 3])\naxins1.xaxis.set_ticks_position(\"bottom\")\n\naxins = inset_axes(ax2,\n width=\"5%\", # width = 10% of parent_bbox width\n height=\"50%\", # height : 50%\n loc='lower left',\n bbox_to_anchor=(1.05, 0., 1, 1),\n bbox_transform=ax2.transAxes,\n borderpad=0,\n )\n\n# Controlling the placement of the inset axes is basically same as that\n# of the legend. you may want to play with the borderpad value and\n# the bbox_to_anchor coordinate.\n\nim = ax2.imshow([[1, 2], [2, 3]])\nplt.colorbar(im, cax=axins, ticks=[1, 2, 3])\n\nplt.show()\n",
"\"\"\"\n=================\nAnnotate Simple01\n=================\n\n\"\"\"\nimport matplotlib.pyplot as plt\n\n\nfig, ax = plt.subplots(figsize=(3, 3))\n\nax.annotate(\"\",\n xy=(0.2, 0.2), xycoords='data',\n xytext=(0.8, 0.8), textcoords='data',\n arrowprops=dict(arrowstyle=\"->\",\n connectionstyle=\"arc3\"),\n )\n\nplt.show()\n",
"\"\"\"\n==================\nQuiver Simple Demo\n==================\n\nA simple example of a `~.axes.Axes.quiver` plot with a `~.axes.Axes.quiverkey`.\n\nFor more advanced options refer to\n:doc:`/gallery/images_contours_and_fields/quiver_demo`.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nX = np.arange(-10, 10, 1)\nY = np.arange(-10, 10, 1)\nU, V = np.meshgrid(X, Y)\n\nfig, ax = plt.subplots()\nq = ax.quiver(X, Y, U, V)\nax.quiverkey(q, X=0.3, Y=1.1, U=10,\n label='Quiver key, length = 10', labelpos='E')\n\nplt.show()\n\n#############################################################################\n#\n# ------------\n#\n# References\n# \"\"\"\"\"\"\"\"\"\"\n#\n# The use of the following functions and methods is shown in this example:\n\nimport matplotlib\nmatplotlib.axes.Axes.quiver\nmatplotlib.pyplot.quiver\nmatplotlib.axes.Axes.quiverkey\nmatplotlib.pyplot.quiverkey\n",
"\"\"\"\n==============\nTriinterp Demo\n==============\n\nInterpolation from triangular grid to quad grid.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport matplotlib.tri as mtri\nimport numpy as np\n\n# Create triangulation.\nx = np.asarray([0, 1, 2, 3, 0.5, 1.5, 2.5, 1, 2, 1.5])\ny = np.asarray([0, 0, 0, 0, 1.0, 1.0, 1.0, 2, 2, 3.0])\ntriangles = [[0, 1, 4], [1, 2, 5], [2, 3, 6], [1, 5, 4], [2, 6, 5], [4, 5, 7],\n [5, 6, 8], [5, 8, 7], [7, 8, 9]]\ntriang = mtri.Triangulation(x, y, triangles)\n\n# Interpolate to regularly-spaced quad grid.\nz = np.cos(1.5 * x) * np.cos(1.5 * y)\nxi, yi = np.meshgrid(np.linspace(0, 3, 20), np.linspace(0, 3, 20))\n\ninterp_lin = mtri.LinearTriInterpolator(triang, z)\nzi_lin = interp_lin(xi, yi)\n\ninterp_cubic_geom = mtri.CubicTriInterpolator(triang, z, kind='geom')\nzi_cubic_geom = interp_cubic_geom(xi, yi)\n\ninterp_cubic_min_E = mtri.CubicTriInterpolator(triang, z, kind='min_E')\nzi_cubic_min_E = interp_cubic_min_E(xi, yi)\n\n# Set up the figure\nfig, axs = plt.subplots(nrows=2, ncols=2)\naxs = axs.flatten()\n\n# Plot the triangulation.\naxs[0].tricontourf(triang, z)\naxs[0].triplot(triang, 'ko-')\naxs[0].set_title('Triangular grid')\n\n# Plot linear interpolation to quad grid.\naxs[1].contourf(xi, yi, zi_lin)\naxs[1].plot(xi, yi, 'k-', lw=0.5, alpha=0.5)\naxs[1].plot(xi.T, yi.T, 'k-', lw=0.5, alpha=0.5)\naxs[1].set_title(\"Linear interpolation\")\n\n# Plot cubic interpolation to quad grid, kind=geom\naxs[2].contourf(xi, yi, zi_cubic_geom)\naxs[2].plot(xi, yi, 'k-', lw=0.5, alpha=0.5)\naxs[2].plot(xi.T, yi.T, 'k-', lw=0.5, alpha=0.5)\naxs[2].set_title(\"Cubic interpolation,\\nkind='geom'\")\n\n# Plot cubic interpolation to quad grid, kind=min_E\naxs[3].contourf(xi, yi, zi_cubic_min_E)\naxs[3].plot(xi, yi, 'k-', lw=0.5, alpha=0.5)\naxs[3].plot(xi.T, yi.T, 'k-', lw=0.5, alpha=0.5)\naxs[3].set_title(\"Cubic interpolation,\\nkind='min_E'\")\n\nfig.tight_layout()\nplt.show()\n\n#############################################################################\n#\n# ------------\n#\n# References\n# \"\"\"\"\"\"\"\"\"\"\n#\n# The use of the following functions, methods, classes and modules is shown\n# in this example:\n\nimport matplotlib\nmatplotlib.axes.Axes.tricontourf\nmatplotlib.pyplot.tricontourf\nmatplotlib.axes.Axes.triplot\nmatplotlib.pyplot.triplot\nmatplotlib.axes.Axes.contourf\nmatplotlib.pyplot.contourf\nmatplotlib.axes.Axes.plot\nmatplotlib.pyplot.plot\nmatplotlib.tri\nmatplotlib.tri.LinearTriInterpolator\nmatplotlib.tri.CubicTriInterpolator\nmatplotlib.tri.Triangulation\n",
"\"\"\"\n=================================\nInterpolations for imshow/matshow\n=================================\n\nThis example displays the difference between interpolation methods for\n:meth:`~.axes.Axes.imshow` and :meth:`~.axes.Axes.matshow`.\n\nIf `interpolation` is None, it defaults to the ``image.interpolation``\n:doc:`rc parameter </tutorials/introductory/customizing>`.\nIf the interpolation is ``'none'``, then no interpolation is performed\nfor the Agg, ps and pdf backends. Other backends will default to ``'nearest'``.\n\nFor the Agg, ps and pdf backends, ``interpolation = 'none'`` works well when a\nbig image is scaled down, while ``interpolation = 'nearest'`` works well when\na small image is scaled up.\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nmethods = [None, 'none', 'nearest', 'bilinear', 'bicubic', 'spline16',\n 'spline36', 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric',\n 'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos']\n\n# Fixing random state for reproducibility\nnp.random.seed(19680801)\n\ngrid = np.random.rand(4, 4)\n\nfig, axs = plt.subplots(nrows=3, ncols=6, figsize=(9.3, 6),\n subplot_kw={'xticks': [], 'yticks': []})\n\nfig.subplots_adjust(left=0.03, right=0.97, hspace=0.3, wspace=0.05)\n\nfor ax, interp_method in zip(axs.flat, methods):\n ax.imshow(grid, interpolation=interp_method, cmap='viridis')\n ax.set_title(str(interp_method))\n\nplt.tight_layout()\nplt.show()\n\n#############################################################################\n#\n# ------------\n#\n# References\n# \"\"\"\"\"\"\"\"\"\"\n#\n# The use of the following functions and methods is shown\n# in this example:\n\nimport matplotlib\nmatplotlib.axes.Axes.imshow\nmatplotlib.pyplot.imshow\n",
"\"\"\"\n==================\nSimple Axisartist1\n==================\n\n\"\"\"\nimport matplotlib.pyplot as plt\nimport mpl_toolkits.axisartist as AA\n\nfig = plt.figure(1)\nfig.subplots_adjust(right=0.85)\nax = AA.Subplot(fig, 1, 1, 1)\nfig.add_subplot(ax)\n\n# make some axis invisible\nax.axis[\"bottom\", \"top\", \"right\"].set_visible(False)\n\n# make an new axis along the first axis axis (x-axis) which pass\n# through y=0.\nax.axis[\"y=0\"] = ax.new_floating_axis(nth_coord=0, value=0,\n axis_direction=\"bottom\")\nax.axis[\"y=0\"].toggle(all=True)\nax.axis[\"y=0\"].label.set_text(\"y = 0\")\n\nax.set_ylim(-2, 4)\n\nplt.show()\n",
"\"\"\"\n===============\nDemo Agg Filter\n===============\n\n\"\"\"\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nimport matplotlib.cm as cm\nimport matplotlib.transforms as mtransforms\nfrom matplotlib.colors import LightSource\nfrom matplotlib.artist import Artist\n\n\ndef smooth1d(x, window_len):\n # copied from http://www.scipy.org/Cookbook/SignalSmooth\n\n s = np.r_[2*x[0] - x[window_len:1:-1], x, 2*x[-1] - x[-1:-window_len:-1]]\n w = np.hanning(window_len)\n y = np.convolve(w/w.sum(), s, mode='same')\n return y[window_len-1:-window_len+1]\n\n\ndef smooth2d(A, sigma=3):\n\n window_len = max(int(sigma), 3)*2 + 1\n A1 = np.array([smooth1d(x, window_len) for x in np.asarray(A)])\n A2 = np.transpose(A1)\n A3 = np.array([smooth1d(x, window_len) for x in A2])\n A4 = np.transpose(A3)\n\n return A4\n\n\nclass BaseFilter(object):\n def prepare_image(self, src_image, dpi, pad):\n ny, nx, depth = src_image.shape\n # tgt_image = np.zeros([pad*2+ny, pad*2+nx, depth], dtype=\"d\")\n padded_src = np.zeros([pad*2 + ny, pad*2 + nx, depth], dtype=\"d\")\n padded_src[pad:-pad, pad:-pad, :] = src_image[:, :, :]\n\n return padded_src # , tgt_image\n\n def get_pad(self, dpi):\n return 0\n\n def __call__(self, im, dpi):\n pad = self.get_pad(dpi)\n padded_src = self.prepare_image(im, dpi, pad)\n tgt_image = self.process_image(padded_src, dpi)\n return tgt_image, -pad, -pad\n\n\nclass OffsetFilter(BaseFilter):\n def __init__(self, offsets=None):\n if offsets is None:\n self.offsets = (0, 0)\n else:\n self.offsets = offsets\n\n def get_pad(self, dpi):\n return int(max(*self.offsets)/72.*dpi)\n\n def process_image(self, padded_src, dpi):\n ox, oy = self.offsets\n a1 = np.roll(padded_src, int(ox/72.*dpi), axis=1)\n a2 = np.roll(a1, -int(oy/72.*dpi), axis=0)\n return a2\n\n\nclass GaussianFilter(BaseFilter):\n \"simple gauss filter\"\n\n def __init__(self, sigma, alpha=0.5, color=None):\n self.sigma = sigma\n self.alpha = alpha\n if color is None:\n self.color = (0, 0, 0)\n else:\n self.color = color\n\n def get_pad(self, dpi):\n return int(self.sigma*3/72.*dpi)\n\n def process_image(self, padded_src, dpi):\n # offsetx, offsety = int(self.offsets[0]), int(self.offsets[1])\n tgt_image = np.zeros_like(padded_src)\n aa = smooth2d(padded_src[:, :, -1]*self.alpha,\n self.sigma/72.*dpi)\n tgt_image[:, :, -1] = aa\n tgt_image[:, :, :-1] = self.color\n return tgt_image\n\n\nclass DropShadowFilter(BaseFilter):\n def __init__(self, sigma, alpha=0.3, color=None, offsets=None):\n self.gauss_filter = GaussianFilter(sigma, alpha, color)\n self.offset_filter = OffsetFilter(offsets)\n\n def get_pad(self, dpi):\n return max(self.gauss_filter.get_pad(dpi),\n self.offset_filter.get_pad(dpi))\n\n def process_image(self, padded_src, dpi):\n t1 = self.gauss_filter.process_image(padded_src, dpi)\n t2 = self.offset_filter.process_image(t1, dpi)\n return t2\n\n\nclass LightFilter(BaseFilter):\n \"simple gauss filter\"\n\n def __init__(self, sigma, fraction=0.5):\n self.gauss_filter = GaussianFilter(sigma, alpha=1)\n self.light_source = LightSource()\n self.fraction = fraction\n\n def get_pad(self, dpi):\n return self.gauss_filter.get_pad(dpi)\n\n def process_image(self, padded_src, dpi):\n t1 = self.gauss_filter.process_image(padded_src, dpi)\n elevation = t1[:, :, 3]\n rgb = padded_src[:, :, :3]\n\n rgb2 = self.light_source.shade_rgb(rgb, elevation,\n fraction=self.fraction)\n\n tgt = np.empty_like(padded_src)\n tgt[:, :, :3] = rgb2\n tgt[:, :, 3] = padded_src[:, :, 3]\n\n return tgt\n\n\nclass GrowFilter(BaseFilter):\n \"enlarge the area\"\n\n def __init__(self, pixels, color=None):\n self.pixels = pixels\n if color is None:\n self.color = (1, 1, 1)\n else:\n self.color = color\n\n def __call__(self, im, dpi):\n pad = self.pixels\n ny, nx, depth = im.shape\n new_im = np.empty([pad*2 + ny, pad*2 + nx, depth], dtype=\"d\")\n alpha = new_im[:, :, 3]\n alpha.fill(0)\n alpha[pad:-pad, pad:-pad] = im[:, :, -1]\n alpha2 = np.clip(smooth2d(alpha, self.pixels/72.*dpi) * 5, 0, 1)\n new_im[:, :, -1] = alpha2\n new_im[:, :, :-1] = self.color\n offsetx, offsety = -pad, -pad\n\n return new_im, offsetx, offsety\n\n\nclass FilteredArtistList(Artist):\n \"\"\"\n A simple container to draw filtered artist.\n \"\"\"\n\n def __init__(self, artist_list, filter):\n self._artist_list = artist_list\n self._filter = filter\n Artist.__init__(self)\n\n def draw(self, renderer):\n renderer.start_rasterizing()\n renderer.start_filter()\n for a in self._artist_list:\n a.draw(renderer)\n renderer.stop_filter(self._filter)\n renderer.stop_rasterizing()\n\n\ndef filtered_text(ax):\n # mostly copied from contour_demo.py\n\n # prepare image\n delta = 0.025\n x = np.arange(-3.0, 3.0, delta)\n y = np.arange(-2.0, 2.0, delta)\n X, Y = np.meshgrid(x, y)\n Z1 = np.exp(-X**2 - Y**2)\n Z2 = np.exp(-(X - 1)**2 - (Y - 1)**2)\n Z = (Z1 - Z2) * 2\n\n # draw\n im = ax.imshow(Z, interpolation='bilinear', origin='lower',\n cmap=cm.gray, extent=(-3, 3, -2, 2))\n levels = np.arange(-1.2, 1.6, 0.2)\n CS = ax.contour(Z, levels,\n origin='lower',\n linewidths=2,\n extent=(-3, 3, -2, 2))\n\n ax.set_aspect(\"auto\")\n\n # contour label\n cl = ax.clabel(CS, levels[1::2], # label every second level\n inline=1,\n fmt='%1.1f',\n fontsize=11)\n\n # change clable color to black\n from matplotlib.patheffects import Normal\n for t in cl:\n t.set_color(\"k\")\n # to force TextPath (i.e., same font in all backends)\n t.set_path_effects([Normal()])\n\n # Add white glows to improve visibility of labels.\n white_glows = FilteredArtistList(cl, GrowFilter(3))\n ax.add_artist(white_glows)\n white_glows.set_zorder(cl[0].get_zorder() - 0.1)\n\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n\n\ndef drop_shadow_line(ax):\n # copied from examples/misc/svg_filter_line.py\n\n # draw lines\n l1, = ax.plot([0.1, 0.5, 0.9], [0.1, 0.9, 0.5], \"bo-\",\n mec=\"b\", mfc=\"w\", lw=5, mew=3, ms=10, label=\"Line 1\")\n l2, = ax.plot([0.1, 0.5, 0.9], [0.5, 0.2, 0.7], \"ro-\",\n mec=\"r\", mfc=\"w\", lw=5, mew=3, ms=10, label=\"Line 1\")\n\n gauss = DropShadowFilter(4)\n\n for l in [l1, l2]:\n\n # draw shadows with same lines with slight offset.\n\n xx = l.get_xdata()\n yy = l.get_ydata()\n shadow, = ax.plot(xx, yy)\n shadow.update_from(l)\n\n # offset transform\n ot = mtransforms.offset_copy(l.get_transform(), ax.figure,\n x=4.0, y=-6.0, units='points')\n\n shadow.set_transform(ot)\n\n # adjust zorder of the shadow lines so that it is drawn below the\n # original lines\n shadow.set_zorder(l.get_zorder() - 0.5)\n shadow.set_agg_filter(gauss)\n shadow.set_rasterized(True) # to support mixed-mode renderers\n\n ax.set_xlim(0., 1.)\n ax.set_ylim(0., 1.)\n\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n\n\ndef drop_shadow_patches(ax):\n # Copied from barchart_demo.py\n N = 5\n menMeans = (20, 35, 30, 35, 27)\n\n ind = np.arange(N) # the x locations for the groups\n width = 0.35 # the width of the bars\n\n rects1 = ax.bar(ind, menMeans, width, color='r', ec=\"w\", lw=2)\n\n womenMeans = (25, 32, 34, 20, 25)\n rects2 = ax.bar(ind + width + 0.1, womenMeans, width,\n color='y', ec=\"w\", lw=2)\n\n # gauss = GaussianFilter(1.5, offsets=(1,1), )\n gauss = DropShadowFilter(5, offsets=(1, 1), )\n shadow = FilteredArtistList(rects1 + rects2, gauss)\n ax.add_artist(shadow)\n shadow.set_zorder(rects1[0].get_zorder() - 0.1)\n\n ax.set_ylim(0, 40)\n\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n\n\ndef light_filter_pie(ax):\n fracs = [15, 30, 45, 10]\n explode = (0, 0.05, 0, 0)\n pies = ax.pie(fracs, explode=explode)\n ax.patch.set_visible(True)\n\n light_filter = LightFilter(9)\n for p in pies[0]:\n p.set_agg_filter(light_filter)\n p.set_rasterized(True) # to support mixed-mode renderers\n p.set(ec=\"none\",\n lw=2)\n\n gauss = DropShadowFilter(9, offsets=(3, 4), alpha=0.7)\n shadow = FilteredArtistList(pies[0], gauss)\n ax.add_artist(shadow)\n shadow.set_zorder(pies[0][0].get_zorder() - 0.1)\n\n\nif 1:\n\n plt.figure(1, figsize=(6, 6))\n plt.subplots_adjust(left=0.05, right=0.95)\n\n ax = plt.subplot(221)\n filtered_text(ax)\n\n ax = plt.subplot(222)\n drop_shadow_line(ax)\n\n ax = plt.subplot(223)\n drop_shadow_patches(ax)\n\n ax = plt.subplot(224)\n ax.set_aspect(1)\n light_filter_pie(ax)\n ax.set_frame_on(True)\n\n plt.show()\n",
"\"\"\"\n============\nFigure Title\n============\n\nCreate a figure with separate subplot titles and a centered figure title.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef f(t):\n s1 = np.cos(2*np.pi*t)\n e1 = np.exp(-t)\n return s1 * e1\n\nt1 = np.arange(0.0, 5.0, 0.1)\nt2 = np.arange(0.0, 5.0, 0.02)\nt3 = np.arange(0.0, 2.0, 0.01)\n\n\nfig, axs = plt.subplots(2, 1, constrained_layout=True)\naxs[0].plot(t1, f(t1), 'o', t2, f(t2), '-')\naxs[0].set_title('subplot 1')\naxs[0].set_xlabel('distance (m)')\naxs[0].set_ylabel('Damped oscillation')\nfig.suptitle('This is a somewhat long figure title', fontsize=16)\n\naxs[1].plot(t3, np.cos(2*np.pi*t3), '--')\naxs[1].set_xlabel('time (s)')\naxs[1].set_title('subplot 2')\naxs[1].set_ylabel('Undamped')\n\nplt.show()\n",
"\"\"\"\n===========\nArrow guide\n===========\n\nAdding arrow patches to plots.\n\nArrows are often used to annotate plots. This tutorial shows how to plot arrows\nthat behave differently when the data limits on a plot are changed. In general,\npoints on a plot can either be fixed in \"data space\" or \"display space\".\nSomething plotted in data space moves when the data limits are altered - an\nexample would the points in a scatter plot. Something plotted in display space\nstays static when data limits are altered - an example would be a figure title\nor the axis labels.\n\nArrows consist of a head (and possibly a tail) and a stem drawn between a\nstart point and and end point, called 'anchor points' from now on.\nHere we show three use cases for plotting arrows, depending on whether the\nhead or anchor points need to be fixed in data or display space:\n\n 1. Head shape fixed in display space, anchor points fixed in data space\n 2. Head shape and anchor points fixed in display space\n 3. Entire patch fixed in data space\n\nBelow each use case is presented in turn.\n\"\"\"\nimport matplotlib.patches as mpatches\nimport matplotlib.pyplot as plt\nx_tail = 0.1\ny_tail = 0.1\nx_head = 0.9\ny_head = 0.9\ndx = x_head - x_tail\ndy = y_head - y_tail\n\n\n###############################################################################\n# Head shape fixed in display space and anchor points fixed in data space\n# -----------------------------------------------------------------------\n#\n# This is useful if you are annotating a plot, and don't want the arrow to\n# to change shape or position if you pan or scale the plot. Note that when\n# the axis limits change\n#\n# In this case we use `.patches.FancyArrowPatch`\n#\n# Note that when the axis limits are changed, the arrow shape stays the same,\n# but the anchor points move.\n\nfig, axs = plt.subplots(nrows=2)\narrow = mpatches.FancyArrowPatch((x_tail, y_tail), (dx, dy),\n mutation_scale=100)\naxs[0].add_patch(arrow)\n\narrow = mpatches.FancyArrowPatch((x_tail, y_tail), (dx, dy),\n mutation_scale=100)\naxs[1].add_patch(arrow)\naxs[1].set_xlim(0, 2)\naxs[1].set_ylim(0, 2)\n\n###############################################################################\n# Head shape and anchor points fixed in display space\n# ---------------------------------------------------\n#\n# This is useful if you are annotating a plot, and don't want the arrow to\n# to change shape or position if you pan or scale the plot.\n#\n# In this case we use `.patches.FancyArrowPatch`, and pass the keyword argument\n# ``transform=ax.transAxes`` where ``ax`` is the axes we are adding the patch\n# to.\n#\n# Note that when the axis limits are changed, the arrow shape and location\n# stays the same.\n\nfig, axs = plt.subplots(nrows=2)\narrow = mpatches.FancyArrowPatch((x_tail, y_tail), (dx, dy),\n mutation_scale=100,\n transform=axs[0].transAxes)\naxs[0].add_patch(arrow)\n\narrow = mpatches.FancyArrowPatch((x_tail, y_tail), (dx, dy),\n mutation_scale=100,\n transform=axs[1].transAxes)\naxs[1].add_patch(arrow)\naxs[1].set_xlim(0, 2)\naxs[1].set_ylim(0, 2)\n\n\n###############################################################################\n# Head shape and anchor points fixed in data space\n# ------------------------------------------------\n#\n# In this case we use `.patches.Arrow`\n#\n# Note that when the axis limits are changed, the arrow shape and location\n# changes.\n\nfig, axs = plt.subplots(nrows=2)\n\narrow = mpatches.Arrow(x_tail, y_tail, dx, dy)\naxs[0].add_patch(arrow)\n\narrow = mpatches.Arrow(x_tail, y_tail, dx, dy)\naxs[1].add_patch(arrow)\naxs[1].set_xlim(0, 2)\naxs[1].set_ylim(0, 2)\n\n###############################################################################\n\nplt.show()\n",
"\"\"\"\n=====================\nWhats New 0.99 Spines\n=====================\n\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef adjust_spines(ax,spines):\n for loc, spine in ax.spines.items():\n if loc in spines:\n spine.set_position(('outward',10)) # outward by 10 points\n else:\n spine.set_color('none') # don't draw spine\n\n # turn off ticks where there is no spine\n if 'left' in spines:\n ax.yaxis.set_ticks_position('left')\n else:\n # no yaxis ticks\n ax.yaxis.set_ticks([])\n\n if 'bottom' in spines:\n ax.xaxis.set_ticks_position('bottom')\n else:\n # no xaxis ticks\n ax.xaxis.set_ticks([])\n\nfig = plt.figure()\n\nx = np.linspace(0,2*np.pi,100)\ny = 2*np.sin(x)\n\nax = fig.add_subplot(2,2,1)\nax.plot(x,y)\nadjust_spines(ax,['left'])\n\nax = fig.add_subplot(2,2,2)\nax.plot(x,y)\nadjust_spines(ax,[])\n\nax = fig.add_subplot(2,2,3)\nax.plot(x,y)\nadjust_spines(ax,['left','bottom'])\n\nax = fig.add_subplot(2,2,4)\nax.plot(x,y)\nadjust_spines(ax,['bottom'])\n\nplt.show()\n\n#############################################################################\n#\n# ------------\n#\n# References\n# \"\"\"\"\"\"\"\"\"\"\n#\n# The use of the following functions, methods, classes and modules is shown\n# in this example:\n\nimport matplotlib\nmatplotlib.axis.Axis.set_ticks\nmatplotlib.axis.XAxis.set_ticks_position\nmatplotlib.axis.YAxis.set_ticks_position\nmatplotlib.spines\nmatplotlib.spines.Spine\nmatplotlib.spines.Spine.set_color\nmatplotlib.spines.Spine.set_position\n"
] | [
[
"numpy.meshgrid",
"numpy.linspace",
"numpy.ma.masked_where",
"matplotlib.pyplot.subplots",
"matplotlib.ticker.LogLocator",
"numpy.exp",
"matplotlib.pyplot.show"
],
[
"numpy.load",
"matplotlib.pyplot.subplots",
"matplotlib.cbook.get_sample_data",
"matplotlib.pyplot.show",
"numpy.zeros"
],
[
"numpy.arange",
"matplotlib.pyplot.show",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.figure"
],
[
"numpy.array",
"matplotlib.colors.BoundaryNorm",
"numpy.linspace",
"matplotlib.colors.Normalize",
"numpy.ones",
"matplotlib.colorbar.ColorbarBase",
"numpy.deg2rad",
"matplotlib.colors.ListedColormap",
"numpy.repeat",
"matplotlib.pyplot.show"
],
[
"numpy.random.random"
],
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
],
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
],
[
"numpy.arange",
"numpy.meshgrid",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
],
[
"matplotlib.tri.CubicTriInterpolator",
"numpy.linspace",
"numpy.asarray",
"matplotlib.pyplot.subplots",
"numpy.cos",
"matplotlib.tri.LinearTriInterpolator",
"matplotlib.pyplot.show",
"matplotlib.tri.Triangulation"
],
[
"matplotlib.pyplot.tight_layout",
"numpy.random.seed",
"matplotlib.pyplot.subplots",
"numpy.random.rand",
"matplotlib.pyplot.show"
],
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"matplotlib.patheffects.Normal",
"numpy.meshgrid",
"matplotlib.artist.Artist.__init__",
"numpy.asarray",
"numpy.arange",
"numpy.empty_like",
"numpy.empty",
"matplotlib.colors.LightSource",
"matplotlib.pyplot.subplot",
"numpy.zeros_like",
"matplotlib.pyplot.subplots_adjust",
"numpy.hanning",
"numpy.transpose",
"numpy.exp",
"numpy.zeros",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.arange",
"numpy.cos",
"matplotlib.pyplot.subplots",
"numpy.exp",
"matplotlib.pyplot.show"
],
[
"matplotlib.patches.Arrow",
"matplotlib.patches.FancyArrowPatch",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
],
[
"matplotlib.pyplot.show",
"numpy.sin",
"numpy.linspace",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ska-sa/katsdpdisp | [
"3fd2f5878c0bd3ae56815568446593b876881e3f"
] | [
"katsdpdisp/test/test_data.py"
] | [
"\"\"\"Tests for :py:mod:`katsdpdisp.data`.\"\"\"\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal\nfrom katsdpdisp.data import SparseArray\n\ndef test_sparsearray(fullslots=100,fullbls=10,fullchan=5,nslots=10,maxbaselines=6,islot_new_bls=6):\n \"\"\"Simulates the assignment and retrieval of data as it happens in the signal displays when \n it receives different sets of baseline data at different timestamps, with some time continuity.\n (fullslots,fullbls,fullchan) is the dimensions of the full/complete dataset\n (nslots,maxbaselines,fullchan) is the true size of the sparse array, representing a size of (nslots,fullbls,fullchan)\n where maxbaselines<fullbls\n islot_new_bls is the number of time stamps that passes before there is a new baseline product selected/chosen in the test sequence\"\"\"\n mx=SparseArray(nslots,fullbls,fullchan,maxbaselines,dtype=np.int32)\n\n rs = np.random.RandomState(seed=0)\n fulldata=rs.random_integers(0,10,[fullslots,fullbls,fullchan])\n histbaselines=[]\n for it in range(fullslots):\n if it%islot_new_bls==0:#add a new baseline, remove old, every so often\n while True:\n newbaseline=rs.random_integers(0,fullbls-1,[1])\n if len(histbaselines)==0 or (newbaseline not in histbaselines[-1]):\n break\n if (len(histbaselines)==0):\n newbaselines=np.r_[newbaseline]\n elif (len(histbaselines[-1])<islot_new_bls):\n newbaselines=np.r_[histbaselines[-1],newbaseline]\n else:\n newbaselines=np.r_[histbaselines[-1][1:],newbaseline]\n histbaselines.append(newbaselines)\n mx[it%nslots,histbaselines[-1],:]=fulldata[it,histbaselines[-1],:]\n for cit in range(islot_new_bls):\n if (cit>=len(histbaselines)):\n break\n hasthesebaselines=list(set(histbaselines[-1-cit]) & set(histbaselines[-1]))\n missingbaselines=list(set(histbaselines[-1-cit]) - set(histbaselines[-1]))\n retrieved=mx[(it-cit)%nslots,hasthesebaselines,:]\n assert_array_equal(retrieved, fulldata[it-cit,hasthesebaselines,:], 'SparseArray getitem test failed')\n missingretrieved=mx[(it-cit)%nslots,missingbaselines,:]\n assert_array_equal(missingretrieved,np.zeros(missingretrieved.shape,dtype=np.int32), 'SparseArray missing baseline test failed')\n\ndef test_sparsearray_indexing(fullslots=100,fullbls=10,fullchan=5,nslots=10,maxbaselines=6):\n mx=SparseArray(nslots,fullbls,fullchan,maxbaselines,dtype=np.int32)\n\n rs = np.random.RandomState(seed=0)\n fulldata=rs.random_integers(0,10,[fullslots,fullbls,fullchan])\n\n mx[0,0,0]=fulldata[0,0,0]\n assert_array_equal(mx[0,0,0], fulldata[0,0,0], 'SparseArray [scalar,scalar,scalar] index test failed')\n\n mx[1,1,:]=fulldata[1,1,:]\n assert_array_equal(mx[1,1,:], fulldata[1,1,:], 'SparseArray [scalar,scalar,slice] index test 2 failed') #baseline change so previous assignment purged (in future may retain until running out of memory and necessary to purge)\n\n mx[2,1,:]=fulldata[2,1,:]\n assert_array_equal(mx[1:3,1,:], fulldata[1:3,1,:], 'SparseArray retain old value test failed') #assign to same baseline so previous slot value remain\n\n mx[3,:maxbaselines,0]=fulldata[3,:maxbaselines,0]\n assert_array_equal(mx[3,:maxbaselines,0], fulldata[3,:maxbaselines,0], 'SparseArray [scalar,slice,scalar] index test failed')\n\n mx[:,1,3]=fulldata[:nslots,1,3]\n assert_array_equal(mx[:,1,3], fulldata[:nslots,1,3], 'SparseArray [slice,scalar,scalar] index test failed')\n \n mx[:,1,:]=fulldata[:nslots,1,:]\n assert_array_equal(mx[:,1,:], fulldata[:nslots,1,:], 'SparseArray [slice,scalar,slice] index test failed')\n\n mx[:,1:maxbaselines,:]=fulldata[2:nslots+2,1:maxbaselines,:]\n assert_array_equal(mx[:,1:maxbaselines,:], fulldata[2:nslots+2,1:maxbaselines,:], 'SparseArray [slice,slice,slice] index test failed')\n\n"
] | [
[
"numpy.testing.assert_array_equal",
"numpy.random.RandomState",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rmrafailov/metaworld | [
"b2cd055e5f2413ec6d66ef29e45d05af989dca3b",
"b2cd055e5f2413ec6d66ef29e45d05af989dca3b",
"b2cd055e5f2413ec6d66ef29e45d05af989dca3b",
"b2cd055e5f2413ec6d66ef29e45d05af989dca3b",
"b2cd055e5f2413ec6d66ef29e45d05af989dca3b"
] | [
"metaworld/policies/sawyer_coffee_pull_v2_policy.py",
"metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_button_press.py",
"metaworld/policies/sawyer_push_wall_v2_policy.py",
"metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_coffee_button_v2.py",
"metaworld/policies/sawyer_faucet_open_v2_policy.py"
] | [
"import numpy as np\n\nfrom metaworld.policies.action import Action\nfrom metaworld.policies.policy import Policy, assert_fully_parsed, move\n\n\nclass SawyerCoffeePullV2Policy(Policy):\n\n @staticmethod\n @assert_fully_parsed\n def _parse_obs(obs):\n return {\n 'hand_pos': obs[:3],\n 'mug_pos': obs[3:6],\n 'unused_info': obs[6:],\n }\n\n def get_action(self, obs):\n o_d = self._parse_obs(obs)\n\n action = Action({\n 'delta_pos': np.arange(3),\n 'grab_effort': 3\n })\n\n action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=10.)\n action['grab_effort'] = self._grab_effort(o_d)\n\n return action.array\n\n @staticmethod\n def _desired_pos(o_d):\n pos_curr = o_d['hand_pos']\n pos_mug = o_d['mug_pos'] + np.array([-.005, .0, .05])\n\n if np.linalg.norm(pos_curr[:2] - pos_mug[:2]) > 0.06:\n return pos_mug + np.array([.0, .0, .15])\n elif abs(pos_curr[2] - pos_mug[2]) > 0.02:\n return pos_mug\n elif pos_curr[1] > .65:\n return np.array([.5, .6, .1])\n else:\n return np.array([pos_curr[0] - .1, .6, .1])\n\n @staticmethod\n def _grab_effort(o_d):\n pos_curr = o_d['hand_pos']\n pos_mug = o_d['mug_pos'] + np.array([.01, .0, .05])\n\n if np.linalg.norm(pos_curr[:2] - pos_mug[:2]) > 0.06 or \\\n abs(pos_curr[2] - pos_mug[2]) > 0.1:\n return -1.\n else:\n return .7\n",
"import numpy as np\nfrom gym.spaces import Box\n\nfrom metaworld.envs.asset_path_utils import full_v1_path_for\nfrom metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set\n\n\nclass SawyerButtonPressEnv(SawyerXYZEnv):\n def __init__(self):\n\n hand_low = (-0.5, 0.40, 0.05)\n hand_high = (0.5, 1, 0.5)\n obj_low = (-0.1, 0.8, 0.05)\n obj_high = (0.1, 0.9, 0.05)\n goal_low = (-0.1, .7, .099)\n goal_high = (0.1, .8, .101)\n\n super().__init__(\n self.model_name,\n hand_low=hand_low,\n hand_high=hand_high,\n )\n\n self.init_config = {\n 'obj_init_pos': np.array([0., 0.9, 0.04], dtype=np.float32),\n 'hand_init_pos': np.array([0, 0.6, 0.2], dtype=np.float32),\n }\n self.goal = np.array([0, 0.78, 0.12])\n self.obj_init_pos = self.init_config['obj_init_pos']\n self.hand_init_pos = self.init_config['hand_init_pos']\n\n self._random_reset_space = Box(\n np.array(obj_low),\n np.array(obj_high),\n )\n self.goal_space = Box(np.array(goal_low), np.array(goal_high))\n\n @property\n def model_name(self):\n return full_v1_path_for('sawyer_xyz/sawyer_button_press.xml')\n\n @_assert_task_is_set\n def step(self, action):\n ob = super().step(action)\n reward, reachDist, pressDist = self.compute_reward(action, ob)\n self.curr_path_length += 1\n info = {\n 'reachDist': reachDist,\n 'goalDist': pressDist,\n 'epRew': reward,\n 'pickRew': None,\n 'success': float(pressDist <= 0.02)\n }\n\n return ob, reward, False, info\n\n @property\n def _target_site_config(self):\n return []\n\n def _get_pos_objects(self):\n return self.data.site_xpos[self.model.site_name2id('buttonStart')]\n\n def _set_obj_xyz(self, pos):\n qpos = self.data.qpos.flat.copy()\n qvel = self.data.qvel.flat.copy()\n qpos[9] = pos\n qvel[9] = 0\n self.set_state(qpos, qvel)\n\n def reset_model(self):\n self._reset_hand()\n self._target_pos = self.goal.copy()\n self.obj_init_pos = self.init_config['obj_init_pos']\n\n if self.random_init:\n goal_pos = self._get_state_rand_vec()\n self.obj_init_pos = goal_pos\n button_pos = goal_pos.copy()\n button_pos[1] -= 0.12\n button_pos[2] += 0.07\n self._target_pos = button_pos\n\n self.sim.model.body_pos[self.model.body_name2id('box')] = self.obj_init_pos\n self.sim.model.body_pos[self.model.body_name2id('button')] = self._target_pos\n self._set_obj_xyz(0)\n self._target_pos = self._get_site_pos('hole')\n self.maxDist = np.abs(self.data.site_xpos[self.model.site_name2id('buttonStart')][1] - self._target_pos[1])\n self.target_reward = 1000*self.maxDist + 1000*2\n\n return self._get_obs()\n\n def _reset_hand(self):\n super()._reset_hand(10)\n\n rightFinger, leftFinger = self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector')\n self.init_fingerCOM = (rightFinger + leftFinger)/2\n self.pickCompleted = False\n\n def compute_reward(self, actions, obs):\n del actions\n objPos = obs[3:6]\n\n leftFinger = self._get_site_pos('leftEndEffector')\n fingerCOM = leftFinger\n\n pressGoal = self._target_pos[1]\n\n pressDist = np.abs(objPos[1] - pressGoal)\n reachDist = np.linalg.norm(objPos - fingerCOM)\n\n c1 = 1000\n c2 = 0.01\n c3 = 0.001\n if reachDist < 0.05:\n pressRew = 1000*(self.maxDist - pressDist) + c1*(np.exp(-(pressDist**2)/c2) + np.exp(-(pressDist**2)/c3))\n else:\n pressRew = 0\n pressRew = max(pressRew, 0)\n reward = -reachDist + pressRew\n\n return [reward, reachDist, pressDist]\n",
"import numpy as np\n\nfrom metaworld.policies.action import Action\nfrom metaworld.policies.policy import Policy, assert_fully_parsed, move\n\n\nclass SawyerPushWallV2Policy(Policy):\n\n @staticmethod\n @assert_fully_parsed\n def _parse_obs(obs):\n return {\n 'hand_pos': obs[:3],\n 'obj_pos': obs[3:6],\n 'goal_pos': obs[9:],\n 'unused_info': obs[6:9],\n }\n\n def get_action(self, obs):\n o_d = self._parse_obs(obs)\n\n action = Action({\n 'delta_pos': np.arange(3),\n 'grab_effort': 3\n })\n\n action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self.desired_pos(o_d), p=10.)\n action['grab_effort'] = self.grab_effort(o_d)\n\n return action.array\n\n @staticmethod\n def desired_pos(o_d):\n pos_curr = o_d['hand_pos']\n pos_obj = o_d['obj_pos'] + np.array([-0.005, 0, 0])\n\n # If error in the XY plane is greater than 0.02, place end effector above the puck\n if np.linalg.norm(pos_curr[:2] - pos_obj[:2]) > 0.02:\n return pos_obj + np.array([0., 0., 0.2])\n # Once XY error is low enough, drop end effector down on top of obj\n elif abs(pos_curr[2] - pos_obj[2]) > 0.04:\n return pos_obj + np.array([0., 0., 0.03])\n # Move to the goal\n else:\n #if the wall is between the puck and the goal, go around the wall\n if(-0.1 <= pos_obj[0] <= 0.3 and 0.65 <= pos_obj[1] <= 0.75):\n return pos_curr + np.array([-1, 0, 0])\n elif ((-0.15 < pos_obj[0] < 0.05 or 0.15 < pos_obj[0] < 0.35)\n and 0.695 <= pos_obj[1] <= 0.755):\n return pos_curr + np.array([0, 1, 0])\n return o_d['goal_pos']\n\n @staticmethod\n def grab_effort(o_d):\n pos_curr = o_d['hand_pos']\n pos_obj = o_d['obj_pos']\n\n if np.linalg.norm(pos_curr[:2] - pos_obj[:2]) > 0.02 or \\\n abs(pos_curr[2] - pos_obj[2]) > 0.1:\n return 0.0\n # While end effector is moving down toward the obj, begin closing the grabber\n else:\n return 0.6\n",
"import numpy as np\nfrom gym.spaces import Box\n\nfrom metaworld.envs.asset_path_utils import full_v2_path_for\nfrom metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set\n\n\nclass SawyerCoffeeButtonEnvV2(SawyerXYZEnv):\n\n def __init__(self):\n\n self.max_dist = 0.03\n\n hand_low = (-0.5, .4, 0.05)\n hand_high = (0.5, 1., 0.5)\n obj_low = (-0.1, 0.8, -.001)\n obj_high = (0.1, 0.9, +.001)\n # goal_low[3] would be .1, but objects aren't fully initialized until a\n # few steps after reset(). In that time, it could be .01\n goal_low = obj_low + np.array([-.001, -.22 + self.max_dist, .299])\n goal_high = obj_high + np.array([+.001, -.22 + self.max_dist, .301])\n\n super().__init__(\n self.model_name,\n hand_low=hand_low,\n hand_high=hand_high,\n )\n\n self.init_config = {\n 'obj_init_pos': np.array([0, 0.9, 0.28]),\n 'obj_init_angle': 0.3,\n 'hand_init_pos': np.array([0., .4, .2]),\n }\n self.goal = np.array([0, 0.78, 0.33])\n self.obj_init_pos = self.init_config['obj_init_pos']\n self.obj_init_angle = self.init_config['obj_init_angle']\n self.hand_init_pos = self.init_config['hand_init_pos']\n\n self.max_path_length = 150\n\n self._random_reset_space = Box(\n np.array(obj_low),\n np.array(obj_high),\n )\n self.goal_space = Box(np.array(goal_low), np.array(goal_high))\n\n self.target_reward = 1000 * self.max_dist + 1000 * 2\n\n @property\n def model_name(self):\n return full_v2_path_for('sawyer_xyz/sawyer_coffee.xml')\n\n @_assert_task_is_set\n def step(self, action):\n ob = super().step(action)\n reward, reachDist, pushDist = self.compute_reward(action, ob)\n self.curr_path_length += 1\n info = {\n 'reachDist': reachDist,\n 'goalDist': pushDist,\n 'epRew': reward,\n 'pickRew': None,\n 'success': float(pushDist <= 0.02)\n }\n\n return ob, reward, False, info\n\n @property\n def _target_site_config(self):\n return [('coffee_goal', self._target_pos)]\n\n def _get_pos_objects(self):\n return self._get_site_pos('buttonStart')\n\n def _set_obj_xyz(self, pos):\n qpos = self.data.qpos.flatten()\n qvel = self.data.qvel.flatten()\n qpos[0:3] = pos.copy()\n qvel[9:15] = 0\n self.set_state(qpos, qvel)\n\n def reset_model(self):\n self._reset_hand()\n\n self.obj_init_pos = self._get_state_rand_vec() if self.random_init \\\n else self.init_config['obj_init_pos']\n self.sim.model.body_pos[self.model.body_name2id(\n 'coffee_machine'\n )] = self.obj_init_pos\n\n pos_mug = self.obj_init_pos + np.array([.0, -.22, .0])\n self._set_obj_xyz(pos_mug)\n\n pos_button = self.obj_init_pos + np.array([.0, -.22, .3])\n self._target_pos = pos_button + np.array([.0, self.max_dist, .0])\n\n return self._get_obs()\n\n def _reset_hand(self):\n super()._reset_hand()\n self.reachCompleted = False\n\n def compute_reward(self, actions, obs):\n del actions\n\n objPos = obs[3:6]\n\n leftFinger = self._get_site_pos('leftEndEffector')\n fingerCOM = leftFinger\n\n pressGoal = self._target_pos[1]\n\n pressDist = np.abs(objPos[1] - pressGoal)\n reachDist = np.linalg.norm(objPos - fingerCOM)\n\n c1 = 1000\n c2 = 0.01\n c3 = 0.001\n if reachDist < 0.05:\n pressRew = 1000 * (self.max_dist - pressDist) + c1 * (np.exp(-(pressDist ** 2) / c2) + np.exp(-(pressDist ** 2) / c3))\n else:\n pressRew = 0\n\n pressRew = max(pressRew, 0)\n reward = -reachDist + pressRew\n\n return [reward, reachDist, pressDist]\n",
"import numpy as np\n\nfrom metaworld.policies.action import Action\nfrom metaworld.policies.policy import Policy, assert_fully_parsed, move\n\n\nclass SawyerFaucetOpenV2Policy(Policy):\n\n @staticmethod\n @assert_fully_parsed\n def _parse_obs(obs):\n return {\n 'hand_pos': obs[:3],\n 'faucet_pos': obs[3:6],\n 'unused_info': obs[6:],\n }\n\n def get_action(self, obs):\n o_d = self._parse_obs(obs)\n\n action = Action({\n 'delta_pos': np.arange(3),\n 'grab_effort': 3\n })\n\n action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=25.)\n action['grab_effort'] = 1.\n\n return action.array\n\n @staticmethod\n def _desired_pos(o_d):\n pos_curr = o_d['hand_pos']\n pos_faucet = o_d['faucet_pos'] + np.array([-.04, .0, .03])\n\n if np.linalg.norm(pos_curr[:2] - pos_faucet[:2]) > 0.04:\n return pos_faucet + np.array([.0, .0, .1])\n elif abs(pos_curr[2] - pos_faucet[2]) > 0.04:\n return pos_faucet\n else:\n return pos_faucet + np.array([.1, .05, .0])\n"
] | [
[
"numpy.arange",
"numpy.array",
"numpy.linalg.norm"
],
[
"numpy.exp",
"numpy.array",
"numpy.linalg.norm",
"numpy.abs"
],
[
"numpy.arange",
"numpy.array",
"numpy.linalg.norm"
],
[
"numpy.exp",
"numpy.array",
"numpy.linalg.norm",
"numpy.abs"
],
[
"numpy.arange",
"numpy.array",
"numpy.linalg.norm"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AndreeaMutu/Python-Baseball | [
"6ca5e5006fd01ffa5b55c4859ebad7251a1f35a6"
] | [
"stats/data.py"
] | [
"import os\nimport glob\nimport pandas as pd\n\ngame_files = glob.glob(os.path.join(os.getcwd(),'games','*.EVE'))\ngame_files.sort()\n\ngame_frames = []\nfor game_file in game_files:\n game_frame = pd.read_csv(game_file, names=['type','multi2','multi3','multi4','multi5','multi6','event'])\n game_frames.append(game_frame)\n\ngames = pd.concat(game_frames)\ngames.loc[games['multi5']=='??',['multi5']]=''\nidentifiers = games['multi2'].str.extract(r'(.LS(\\d{4})\\d{5})')\nidentifiers = identifiers.fillna(method='ffill')\nidentifiers.columns=['game_id', 'year']\ngames = pd.concat([games, identifiers], sort=False, axis=1)\ngames = games.fillna(' ')\n\ngames.loc[:, 'type'] = pd.Categorical(games.loc[:, 'type'])\nprint(games.head())\n"
] | [
[
"pandas.Categorical",
"pandas.concat",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
KleistvonLiu/denoise-imu-gyro | [
"76e75e194a3804c473be077663b4a668fc0b7c28"
] | [
"main_EUROC.py"
] | [
"import os\nimport torch\nimport src.learning as lr\nimport src.networks as sn\nimport src.losses as sl\nimport src.dataset as ds\nimport numpy as np\n\nbase_dir = os.path.dirname(os.path.realpath(__file__))\ndata_dir = '/path/to/EUROC/dataset'\n# test a given network\n# address = os.path.join(base_dir, 'results/EUROC/2020_02_18_16_52_55/')\n# or test the last trained network\naddress = \"last\"\n################################################################################\n# Network parameters\n################################################################################\nnet_class = sn.GyroNet\nnet_params = {\n 'in_dim': 6,\n 'out_dim': 3,\n 'c0': 16,\n 'dropout': 0.1,\n 'ks': [7, 7, 7, 7],\n 'ds': [4, 4, 4],\n 'momentum': 0.1,\n 'gyro_std': [1*np.pi/180, 2*np.pi/180, 5*np.pi/180],\n}\n################################################################################\n# Dataset parameters\n################################################################################\ndataset_class = ds.EUROCDataset\ndataset_params = {\n # where are raw data ?\n 'data_dir': data_dir,\n # where record preloaded data ?\n 'predata_dir': os.path.join(base_dir, 'data/EUROC'),\n # set train, val and test sequence\n 'train_seqs': [\n 'MH_01_easy',\n 'MH_03_medium',\n 'MH_05_difficult',\n 'V1_02_medium',\n 'V2_01_easy',\n 'V2_03_difficult'\n ],\n 'val_seqs': [\n 'MH_01_easy',\n 'MH_03_medium',\n 'MH_05_difficult',\n 'V1_02_medium',\n 'V2_01_easy',\n 'V2_03_difficult',\n ],\n 'test_seqs': [\n 'MH_02_easy',\n 'MH_04_difficult',\n 'V2_02_medium',\n 'V1_03_difficult',\n 'V1_01_easy',\n ],\n # size of trajectory during training\n 'N': 32 * 500, # should be integer * 'max_train_freq'\n 'min_train_freq': 16,\n 'max_train_freq': 32,\n}\n################################################################################\n# Training parameters\n################################################################################\ntrain_params = {\n 'optimizer_class': torch.optim.Adam,\n 'optimizer': {\n 'lr': 0.01,\n 'weight_decay': 1e-1,\n 'amsgrad': False,\n },\n 'loss_class': sl.GyroLoss,\n 'loss': {\n 'min_N': int(np.log2(dataset_params['min_train_freq'])),\n 'max_N': int(np.log2(dataset_params['max_train_freq'])),\n 'w': 1e6,\n 'target': 'rotation matrix',\n 'huber': 0.005,\n 'dt': 0.005,\n },\n 'scheduler_class': torch.optim.lr_scheduler.CosineAnnealingWarmRestarts,\n 'scheduler': {\n 'T_0': 600,\n 'T_mult': 2,\n 'eta_min': 1e-3,\n },\n 'dataloader': {\n 'batch_size': 10,\n 'pin_memory': False,\n 'num_workers': 0,\n 'shuffle': False,\n },\n # frequency of validation step\n 'freq_val': 600,\n # total number of epochs\n 'n_epochs': 1800,\n # where record results ?\n 'res_dir': os.path.join(base_dir, \"results/EUROC\"),\n # where record Tensorboard log ?\n 'tb_dir': os.path.join(base_dir, \"results/runs/EUROC\"),\n}\n################################################################################\n# Train on training data set\n################################################################################\n# learning_process = lr.GyroLearningBasedProcessing(train_params['res_dir'],\n# train_params['tb_dir'], net_class, net_params, None,\n# train_params['loss']['dt'])\n# learning_process.train(dataset_class, dataset_params, train_params)\n################################################################################\n# Test on full data set\n################################################################################\nlearning_process = lr.GyroLearningBasedProcessing(train_params['res_dir'],\n train_params['tb_dir'], net_class, net_params, address=address,\n dt=train_params['loss']['dt'])\nlearning_process.test(dataset_class, dataset_params, ['test'])"
] | [
[
"numpy.log2"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nzare/ignite | [
"b53c6aeef87754b3cd3638c91172b386dc73af12",
"002b595daa8a8345286c5e096c33e278948686a7",
"b53c6aeef87754b3cd3638c91172b386dc73af12"
] | [
"tests/ignite/contrib/handlers/test_polyaxon_logger.py",
"examples/contrib/cifar10/main.py",
"tests/ignite/handlers/test_early_stopping.py"
] | [
"import os\nfrom unittest.mock import MagicMock, call\n\nimport pytest\nimport torch\n\nfrom ignite.contrib.handlers.polyaxon_logger import *\nfrom ignite.engine import Engine, Events, State\n\nos.environ[\"POLYAXON_NO_OP\"] = \"1\"\n\n\ndef test_output_handler_with_wrong_logger_type():\n\n wrapper = OutputHandler(\"tag\", output_transform=lambda x: x)\n\n mock_logger = MagicMock()\n mock_engine = MagicMock()\n with pytest.raises(RuntimeError, match=\"Handler 'OutputHandler' works only with PolyaxonLogger\"):\n wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)\n\n\ndef test_output_handler_output_transform():\n\n wrapper = OutputHandler(\"tag\", output_transform=lambda x: x)\n mock_logger = MagicMock(spec=PolyaxonLogger)\n mock_logger.log_metrics = MagicMock()\n\n mock_engine = MagicMock()\n mock_engine.state = State()\n mock_engine.state.output = 12345\n mock_engine.state.iteration = 123\n\n wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)\n\n mock_logger.log_metrics.assert_called_once_with(step=123, **{\"tag/output\": 12345})\n\n wrapper = OutputHandler(\"another_tag\", output_transform=lambda x: {\"loss\": x})\n mock_logger = MagicMock(spec=PolyaxonLogger)\n mock_logger.log_metrics = MagicMock()\n\n wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)\n mock_logger.log_metrics.assert_called_once_with(step=123, **{\"another_tag/loss\": 12345})\n\n\ndef test_output_handler_metric_names():\n\n wrapper = OutputHandler(\"tag\", metric_names=[\"a\", \"b\", \"c\"])\n mock_logger = MagicMock(spec=PolyaxonLogger)\n mock_logger.log_metrics = MagicMock()\n\n mock_engine = MagicMock()\n mock_engine.state = State(metrics={\"a\": 12.23, \"b\": 23.45, \"c\": torch.tensor(10.0)})\n mock_engine.state.iteration = 5\n\n wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)\n\n assert mock_logger.log_metrics.call_count == 1\n mock_logger.log_metrics.assert_called_once_with(step=5, **{\"tag/a\": 12.23, \"tag/b\": 23.45, \"tag/c\": 10.0})\n\n wrapper = OutputHandler(\"tag\", metric_names=[\"a\",])\n\n mock_engine = MagicMock()\n mock_engine.state = State(metrics={\"a\": torch.Tensor([0.0, 1.0, 2.0, 3.0])})\n mock_engine.state.iteration = 5\n\n mock_logger = MagicMock(spec=PolyaxonLogger)\n mock_logger.log_metrics = MagicMock()\n\n wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)\n\n assert mock_logger.log_metrics.call_count == 1\n mock_logger.log_metrics.assert_has_calls(\n [call(step=5, **{\"tag/a/0\": 0.0, \"tag/a/1\": 1.0, \"tag/a/2\": 2.0, \"tag/a/3\": 3.0}),], any_order=True\n )\n\n wrapper = OutputHandler(\"tag\", metric_names=[\"a\", \"c\"])\n\n mock_engine = MagicMock()\n mock_engine.state = State(metrics={\"a\": 55.56, \"c\": \"Some text\"})\n mock_engine.state.iteration = 7\n\n mock_logger = MagicMock(spec=PolyaxonLogger)\n mock_logger.log_metrics = MagicMock()\n\n with pytest.warns(UserWarning):\n wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)\n\n assert mock_logger.log_metrics.call_count == 1\n mock_logger.log_metrics.assert_has_calls([call(step=7, **{\"tag/a\": 55.56})], any_order=True)\n\n # all metrics\n wrapper = OutputHandler(\"tag\", metric_names=\"all\")\n mock_logger = MagicMock(spec=PolyaxonLogger)\n mock_logger.log_metrics = MagicMock()\n\n mock_engine = MagicMock()\n mock_engine.state = State(metrics={\"a\": 12.23, \"b\": 23.45, \"c\": torch.tensor(10.0)})\n mock_engine.state.iteration = 5\n\n wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)\n\n assert mock_logger.log_metrics.call_count == 1\n mock_logger.log_metrics.assert_called_once_with(step=5, **{\"tag/a\": 12.23, \"tag/b\": 23.45, \"tag/c\": 10.0})\n\n\ndef test_output_handler_both():\n\n wrapper = OutputHandler(\"tag\", metric_names=[\"a\", \"b\"], output_transform=lambda x: {\"loss\": x})\n mock_logger = MagicMock(spec=PolyaxonLogger)\n mock_logger.log_metrics = MagicMock()\n\n mock_engine = MagicMock()\n mock_engine.state = State(metrics={\"a\": 12.23, \"b\": 23.45})\n mock_engine.state.epoch = 5\n mock_engine.state.output = 12345\n\n wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)\n\n assert mock_logger.log_metrics.call_count == 1\n mock_logger.log_metrics.assert_called_once_with(step=5, **{\"tag/a\": 12.23, \"tag/b\": 23.45, \"tag/loss\": 12345})\n\n\ndef test_output_handler_with_wrong_global_step_transform_output():\n def global_step_transform(*args, **kwargs):\n return \"a\"\n\n wrapper = OutputHandler(\"tag\", output_transform=lambda x: {\"loss\": x}, global_step_transform=global_step_transform)\n mock_logger = MagicMock(spec=PolyaxonLogger)\n mock_logger.log_metrics = MagicMock()\n\n mock_engine = MagicMock()\n mock_engine.state = State()\n mock_engine.state.epoch = 5\n mock_engine.state.output = 12345\n\n with pytest.raises(TypeError, match=\"global_step must be int\"):\n wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)\n\n\ndef test_output_handler_with_global_step_transform():\n def global_step_transform(*args, **kwargs):\n return 10\n\n wrapper = OutputHandler(\"tag\", output_transform=lambda x: {\"loss\": x}, global_step_transform=global_step_transform)\n mock_logger = MagicMock(spec=PolyaxonLogger)\n mock_logger.log_metrics = MagicMock()\n\n mock_engine = MagicMock()\n mock_engine.state = State()\n mock_engine.state.epoch = 5\n mock_engine.state.output = 12345\n\n wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)\n mock_logger.log_metrics.assert_called_once_with(step=10, **{\"tag/loss\": 12345})\n\n\ndef test_output_handler_with_global_step_from_engine():\n\n mock_another_engine = MagicMock()\n mock_another_engine.state = State()\n mock_another_engine.state.epoch = 10\n mock_another_engine.state.output = 12.345\n\n wrapper = OutputHandler(\n \"tag\",\n output_transform=lambda x: {\"loss\": x},\n global_step_transform=global_step_from_engine(mock_another_engine),\n )\n\n mock_logger = MagicMock(spec=PolyaxonLogger)\n mock_logger.log_metrics = MagicMock()\n\n mock_engine = MagicMock()\n mock_engine.state = State()\n mock_engine.state.epoch = 1\n mock_engine.state.output = 0.123\n\n wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)\n assert mock_logger.log_metrics.call_count == 1\n mock_logger.log_metrics.assert_has_calls(\n [call(step=mock_another_engine.state.epoch, **{\"tag/loss\": mock_engine.state.output})]\n )\n\n mock_another_engine.state.epoch = 11\n mock_engine.state.output = 1.123\n\n wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)\n assert mock_logger.log_metrics.call_count == 2\n mock_logger.log_metrics.assert_has_calls(\n [call(step=mock_another_engine.state.epoch, **{\"tag/loss\": mock_engine.state.output})]\n )\n\n\ndef test_optimizer_params_handler_wrong_setup():\n\n with pytest.raises(TypeError):\n OptimizerParamsHandler(optimizer=None)\n\n optimizer = MagicMock(spec=torch.optim.Optimizer)\n handler = OptimizerParamsHandler(optimizer=optimizer)\n\n mock_logger = MagicMock()\n mock_engine = MagicMock()\n with pytest.raises(RuntimeError, match=\"Handler OptimizerParamsHandler works only with PolyaxonLogger\"):\n handler(mock_engine, mock_logger, Events.ITERATION_STARTED)\n\n\ndef test_optimizer_params():\n\n optimizer = torch.optim.SGD([torch.Tensor(0)], lr=0.01)\n wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name=\"lr\")\n mock_logger = MagicMock(spec=PolyaxonLogger)\n mock_logger.log_metrics = MagicMock()\n mock_engine = MagicMock()\n mock_engine.state = State()\n mock_engine.state.iteration = 123\n\n wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)\n mock_logger.log_metrics.assert_called_once_with(**{\"lr/group_0\": 0.01, \"step\": 123})\n\n wrapper = OptimizerParamsHandler(optimizer, param_name=\"lr\", tag=\"generator\")\n mock_logger = MagicMock(spec=PolyaxonLogger)\n mock_logger.log_metrics = MagicMock()\n\n wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)\n mock_logger.log_metrics.assert_called_once_with(**{\"generator/lr/group_0\": 0.01, \"step\": 123})\n\n\ndef test_integration():\n\n n_epochs = 5\n data = list(range(50))\n\n losses = torch.rand(n_epochs * len(data))\n losses_iter = iter(losses)\n\n def update_fn(engine, batch):\n return next(losses_iter)\n\n trainer = Engine(update_fn)\n\n plx_logger = PolyaxonLogger()\n\n def dummy_handler(engine, logger, event_name):\n global_step = engine.state.get_event_attrib_value(event_name)\n logger.log_metrics(step=global_step, **{\"{}\".format(\"test_value\"): global_step})\n\n plx_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)\n\n trainer.run(data, max_epochs=n_epochs)\n\n\ndef test_integration_as_context_manager():\n\n n_epochs = 5\n data = list(range(50))\n\n losses = torch.rand(n_epochs * len(data))\n losses_iter = iter(losses)\n\n def update_fn(engine, batch):\n return next(losses_iter)\n\n with PolyaxonLogger() as plx_logger:\n\n trainer = Engine(update_fn)\n\n def dummy_handler(engine, logger, event_name):\n global_step = engine.state.get_event_attrib_value(event_name)\n logger.log_metrics(step=global_step, **{\"{}\".format(\"test_value\"): global_step})\n\n plx_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)\n\n trainer.run(data, max_epochs=n_epochs)\n\n\[email protected]\ndef no_site_packages():\n import sys\n\n polyaxon_client_modules = {}\n for k in sys.modules:\n if \"polyaxon\" in k:\n polyaxon_client_modules[k] = sys.modules[k]\n for k in polyaxon_client_modules:\n del sys.modules[k]\n\n prev_path = list(sys.path)\n sys.path = [p for p in sys.path if \"site-packages\" not in p]\n yield \"no_site_packages\"\n sys.path = prev_path\n for k in polyaxon_client_modules:\n sys.modules[k] = polyaxon_client_modules[k]\n\n\ndef test_no_polyaxon_client(no_site_packages):\n\n with pytest.raises(RuntimeError, match=r\"This contrib module requires polyaxon-client to be installed\"):\n PolyaxonLogger()\n",
"from pathlib import Path\nfrom datetime import datetime\n\nimport fire\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nimport ignite\nimport ignite.distributed as idist\nfrom ignite.engine import Events, Engine, create_supervised_evaluator\nfrom ignite.metrics import Accuracy, Loss\nfrom ignite.handlers import Checkpoint, DiskSaver\nfrom ignite.utils import manual_seed, setup_logger\n\nfrom ignite.contrib.engines import common\nfrom ignite.contrib.handlers import PiecewiseLinear\n\nimport utils\n\n\ndef training(local_rank, config):\n\n rank = idist.get_rank()\n manual_seed(config[\"seed\"] + rank)\n device = idist.device()\n\n logger = setup_logger(name=\"CIFAR10-Training\", distributed_rank=local_rank)\n\n log_basic_info(logger, config)\n\n output_path = config[\"output_path\"]\n if rank == 0:\n if config[\"stop_iteration\"] is None:\n now = datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n else:\n now = \"stop-on-{}\".format(config[\"stop_iteration\"])\n\n folder_name = \"{}_backend-{}-{}_{}\".format(config[\"model\"], idist.backend(), idist.get_world_size(), now)\n output_path = Path(output_path) / folder_name\n if not output_path.exists():\n output_path.mkdir(parents=True)\n config[\"output_path\"] = output_path.as_posix()\n logger.info(\"Output path: {}\".format(config[\"output_path\"]))\n\n if \"cuda\" in device.type:\n config[\"cuda device name\"] = torch.cuda.get_device_name(local_rank)\n\n if config[\"with_trains\"]:\n from trains import Task\n\n task = Task.init(\"CIFAR10-Training\", task_name=output_path.stem)\n task.connect_configuration(config)\n # Log hyper parameters\n hyper_params = [\n \"model\",\n \"batch_size\",\n \"momentum\",\n \"weight_decay\",\n \"num_epochs\",\n \"learning_rate\",\n \"num_warmup_epochs\",\n ]\n task.connect({k: config[k] for k in hyper_params})\n\n # Setup dataflow, model, optimizer, criterion\n train_loader, test_loader = get_dataflow(config)\n\n config[\"num_iters_per_epoch\"] = len(train_loader)\n model, optimizer, criterion, lr_scheduler = initialize(config)\n\n # Create trainer for current task\n trainer = create_trainer(model, optimizer, criterion, lr_scheduler, train_loader.sampler, config, logger)\n\n # Let's now setup evaluator engine to perform model's validation and compute metrics\n metrics = {\n \"accuracy\": Accuracy(),\n \"loss\": Loss(criterion),\n }\n\n # We define two evaluators as they wont have exactly similar roles:\n # - `evaluator` will save the best model based on validation score\n evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)\n train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)\n\n def run_validation(engine):\n epoch = trainer.state.epoch\n state = train_evaluator.run(train_loader)\n log_metrics(logger, epoch, state.times[\"COMPLETED\"], \"Train\", state.metrics)\n state = evaluator.run(test_loader)\n log_metrics(logger, epoch, state.times[\"COMPLETED\"], \"Test\", state.metrics)\n\n trainer.add_event_handler(Events.EPOCH_COMPLETED(every=config[\"validate_every\"]) | Events.COMPLETED, run_validation)\n\n if rank == 0:\n # Setup TensorBoard logging on trainer and evaluators. Logged values are:\n # - Training metrics, e.g. running average loss values\n # - Learning rate\n # - Evaluation train/test metrics\n evaluators = {\"training\": train_evaluator, \"test\": evaluator}\n tb_logger = common.setup_tb_logging(output_path, trainer, optimizer, evaluators=evaluators)\n\n # Store 3 best models by validation accuracy:\n common.gen_save_best_models_by_val_score(\n save_handler=get_save_handler(config),\n evaluator=evaluator,\n models={\"model\": model},\n metric_name=\"accuracy\",\n n_saved=3,\n trainer=trainer,\n tag=\"test\",\n )\n\n # In order to check training resuming we can stop training on a given iteration\n if config[\"stop_iteration\"] is not None:\n\n @trainer.on(Events.ITERATION_STARTED(once=config[\"stop_iteration\"]))\n def _():\n logger.info(\"Stop training on {} iteration\".format(trainer.state.iteration))\n trainer.terminate()\n\n try:\n trainer.run(train_loader, max_epochs=config[\"num_epochs\"])\n except Exception as e:\n import traceback\n\n print(traceback.format_exc())\n\n if rank == 0:\n tb_logger.close()\n\n\ndef run(\n seed=543,\n data_path=\"/tmp/cifar10\",\n output_path=\"/tmp/output-cifar10/\",\n model=\"resnet18\",\n batch_size=512,\n momentum=0.9,\n weight_decay=1e-4,\n num_workers=12,\n num_epochs=24,\n learning_rate=0.4,\n num_warmup_epochs=4,\n validate_every=3,\n checkpoint_every=200,\n backend=None,\n resume_from=None,\n log_every_iters=15,\n nproc_per_node=None,\n stop_iteration=None,\n with_trains=False,\n **spawn_kwargs\n):\n \"\"\"Main entry to train an model on CIFAR10 dataset.\n\n Args:\n seed (int): random state seed to set. Default, 543.\n data_path (str): input dataset path. Default, \"/tmp/cifar10\".\n output_path (str): output path. Default, \"/tmp/output-cifar10\".\n model (str): model name (from torchvision) to setup model to train. Default, \"resnet18\".\n batch_size (int): total batch size. Default, 512.\n momentum (float): optimizer's momentum. Default, 0.9.\n weight_decay (float): weight decay. Default, 1e-4.\n num_workers (int): number of workers in the data loader. Default, 12.\n num_epochs (int): number of epochs to train the model. Default, 24.\n learning_rate (float): peak of piecewise linear learning rate scheduler. Default, 0.4.\n num_warmup_epochs (int): number of warm-up epochs before learning rate decay. Default, 4.\n validate_every (int): run model's validation every ``validate_every`` epochs. Default, 3.\n checkpoint_every (int): store training checkpoint every ``checkpoint_every`` iterations. Default, 200.\n backend (str, optional): backend to use for distributed configuration. Possible values: None, \"nccl\", \"xla-tpu\",\n \"gloo\" etc. Default, None.\n nproc_per_node (int, optional): optional argument to setup number of processes per node. It is useful,\n when main python process is spawning training as child processes.\n resume_from (str, optional): path to checkpoint to use to resume the training from. Default, None.\n log_every_iters (int): argument to log batch loss every ``log_every_iters`` iterations.\n It can be 0 to disable it. Default, 15.\n stop_iteration (int, optional): iteration to stop the training. Can be used to check resume from checkpoint.\n with_trains (bool): if True, experiment Trains logger is setup. Default, False.\n **spawn_kwargs: Other kwargs to spawn run in child processes: master_addr, master_port, node_rank, nnodes\n\n \"\"\"\n # catch all local parameters\n config = locals()\n config.update(config[\"spawn_kwargs\"])\n del config[\"spawn_kwargs\"]\n\n spawn_kwargs[\"nproc_per_node\"] = nproc_per_node\n\n with idist.Parallel(backend=backend, **spawn_kwargs) as parallel:\n\n parallel.run(training, config)\n\n\ndef get_dataflow(config):\n # - Get train/test datasets\n if idist.get_rank() > 0:\n # Ensure that only rank 0 download the dataset\n idist.barrier()\n\n train_dataset, test_dataset = utils.get_train_test_datasets(config[\"data_path\"])\n\n if idist.get_rank() == 0:\n # Ensure that only rank 0 download the dataset\n idist.barrier()\n\n # Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu\n train_loader = idist.auto_dataloader(\n train_dataset, batch_size=config[\"batch_size\"], num_workers=config[\"num_workers\"], shuffle=True, drop_last=True,\n )\n\n test_loader = idist.auto_dataloader(\n test_dataset, batch_size=2 * config[\"batch_size\"], num_workers=config[\"num_workers\"], shuffle=False,\n )\n return train_loader, test_loader\n\n\ndef initialize(config):\n model = utils.get_model(config[\"model\"])\n # Adapt model for distributed settings if configured\n model = idist.auto_model(model)\n\n optimizer = optim.SGD(\n model.parameters(),\n lr=config[\"learning_rate\"],\n momentum=config[\"momentum\"],\n weight_decay=config[\"weight_decay\"],\n nesterov=True,\n )\n optimizer = idist.auto_optim(optimizer)\n criterion = nn.CrossEntropyLoss().to(idist.device())\n\n le = config[\"num_iters_per_epoch\"]\n milestones_values = [\n (0, 0.0),\n (le * config[\"num_warmup_epochs\"], config[\"learning_rate\"]),\n (le * config[\"num_epochs\"], 0.0),\n ]\n lr_scheduler = PiecewiseLinear(optimizer, param_name=\"lr\", milestones_values=milestones_values)\n\n return model, optimizer, criterion, lr_scheduler\n\n\ndef log_metrics(logger, epoch, elapsed, tag, metrics):\n logger.info(\n \"\\nEpoch {} - elapsed: {} - {} metrics:\\n {}\".format(\n epoch, elapsed, tag, \"\\n\".join([\"\\t{}: {}\".format(k, v) for k, v in metrics.items()])\n )\n )\n\n\ndef log_basic_info(logger, config):\n logger.info(\"Train {} on CIFAR10\".format(config[\"model\"]))\n logger.info(\"- PyTorch version: {}\".format(torch.__version__))\n logger.info(\"- Ignite version: {}\".format(ignite.__version__))\n\n logger.info(\"\\n\")\n logger.info(\"Configuration:\")\n for key, value in config.items():\n logger.info(\"\\t{}: {}\".format(key, value))\n logger.info(\"\\n\")\n\n if idist.get_world_size() > 1:\n logger.info(\"\\nDistributed setting:\")\n logger.info(\"\\tbackend: {}\".format(idist.backend()))\n logger.info(\"\\tworld size: {}\".format(idist.get_world_size()))\n logger.info(\"\\n\")\n\n\ndef create_trainer(model, optimizer, criterion, lr_scheduler, train_sampler, config, logger):\n\n device = idist.device()\n\n # Setup Ignite trainer:\n # - let's define training step\n # - add other common handlers:\n # - TerminateOnNan,\n # - handler to setup learning rate scheduling,\n # - ModelCheckpoint\n # - RunningAverage` on `train_step` output\n # - Two progress bars on epochs and optionally on iterations\n\n def train_step(engine, batch):\n\n x, y = batch[0], batch[1]\n\n if x.device != device:\n x = x.to(device, non_blocking=True)\n y = y.to(device, non_blocking=True)\n\n model.train()\n # Supervised part\n y_pred = model(x)\n loss = criterion(y_pred, y)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # This can be helpful for XLA to avoid performance slow down if fetch loss.item() every iteration\n if config[\"log_every_iters\"] > 0 and (engine.state.iteration - 1) % config[\"log_every_iters\"] == 0:\n batch_loss = loss.item()\n engine.state.saved_batch_loss = batch_loss\n else:\n batch_loss = engine.state.saved_batch_loss\n\n return {\n \"batch loss\": batch_loss,\n }\n\n trainer = Engine(train_step)\n trainer.state.saved_batch_loss = -1.0\n trainer.state_dict_user_keys.append(\"saved_batch_loss\")\n trainer.logger = logger\n\n to_save = {\"trainer\": trainer, \"model\": model, \"optimizer\": optimizer, \"lr_scheduler\": lr_scheduler}\n metric_names = [\n \"batch loss\",\n ]\n\n common.setup_common_training_handlers(\n trainer=trainer,\n train_sampler=train_sampler,\n to_save=to_save,\n save_every_iters=config[\"checkpoint_every\"],\n save_handler=get_save_handler(config),\n lr_scheduler=lr_scheduler,\n output_names=metric_names if config[\"log_every_iters\"] > 0 else None,\n with_pbars=False,\n clear_cuda_cache=False,\n )\n\n resume_from = config[\"resume_from\"]\n if resume_from is not None:\n checkpoint_fp = Path(resume_from)\n assert checkpoint_fp.exists(), \"Checkpoint '{}' is not found\".format(checkpoint_fp.as_posix())\n logger.info(\"Resume from a checkpoint: {}\".format(checkpoint_fp.as_posix()))\n checkpoint = torch.load(checkpoint_fp.as_posix(), map_location=\"cpu\")\n Checkpoint.load_objects(to_load=to_save, checkpoint=checkpoint)\n\n return trainer\n\n\ndef get_save_handler(config):\n if config[\"with_trains\"]:\n from ignite.contrib.handlers.trains_logger import TrainsSaver\n\n return TrainsSaver(dirname=config[\"output_path\"])\n\n return DiskSaver(config[\"output_path\"], require_empty=False)\n\n\nif __name__ == \"__main__\":\n fire.Fire({\"run\": run})\n",
"import os\n\nimport pytest\nimport torch\n\nimport ignite.distributed as idist\nfrom ignite.engine import Engine, Events\nfrom ignite.handlers import EarlyStopping\n\n\ndef do_nothing_update_fn(engine, batch):\n pass\n\n\ndef test_args_validation():\n\n trainer = Engine(do_nothing_update_fn)\n\n with pytest.raises(ValueError, match=r\"Argument patience should be positive integer.\"):\n EarlyStopping(patience=-1, score_function=lambda engine: 0, trainer=trainer)\n\n with pytest.raises(ValueError, match=r\"Argument min_delta should not be a negative number.\"):\n EarlyStopping(patience=2, min_delta=-0.1, score_function=lambda engine: 0, trainer=trainer)\n\n with pytest.raises(TypeError, match=r\"Argument score_function should be a function.\"):\n EarlyStopping(patience=2, score_function=12345, trainer=trainer)\n\n with pytest.raises(TypeError, match=r\"Argument trainer should be an instance of Engine.\"):\n EarlyStopping(patience=2, score_function=lambda engine: 0, trainer=None)\n\n\ndef test_simple_early_stopping():\n\n scores = iter([1.0, 0.8, 0.88])\n\n def score_function(engine):\n return next(scores)\n\n trainer = Engine(do_nothing_update_fn)\n\n h = EarlyStopping(patience=2, score_function=score_function, trainer=trainer)\n # Call 3 times and check if stopped\n assert not trainer.should_terminate\n h(None)\n assert not trainer.should_terminate\n h(None)\n assert not trainer.should_terminate\n h(None)\n assert trainer.should_terminate\n\n\ndef test_state_dict():\n\n scores = iter([1.0, 0.8, 0.88])\n\n def score_function(engine):\n return next(scores)\n\n trainer = Engine(do_nothing_update_fn)\n\n h = EarlyStopping(patience=2, score_function=score_function, trainer=trainer)\n # Call 3 times and check if stopped\n assert not trainer.should_terminate\n h(None)\n assert not trainer.should_terminate\n\n # Swap to new object, but maintain state\n h2 = EarlyStopping(patience=2, score_function=score_function, trainer=trainer)\n h2.load_state_dict(h.state_dict())\n\n h2(None)\n assert not trainer.should_terminate\n h2(None)\n assert trainer.should_terminate\n\n\ndef test_early_stopping_on_delta():\n\n scores = iter([1.0, 2.0, 2.01, 3.0, 3.01, 3.02])\n\n trainer = Engine(do_nothing_update_fn)\n\n h = EarlyStopping(patience=2, min_delta=0.1, score_function=lambda _: next(scores), trainer=trainer)\n\n assert not trainer.should_terminate\n h(None) # counter == 0\n assert not trainer.should_terminate\n h(None) # delta == 1.0; counter == 0\n assert not trainer.should_terminate\n h(None) # delta == 0.01; counter == 1\n assert not trainer.should_terminate\n h(None) # delta == 0.99; counter == 0\n assert not trainer.should_terminate\n h(None) # delta == 0.01; counter == 1\n assert not trainer.should_terminate\n h(None) # delta == 0.01; counter == 2\n assert trainer.should_terminate\n\n\ndef test_early_stopping_on_last_event_delta():\n\n scores = iter([0.0, 0.3, 0.6])\n\n trainer = Engine(do_nothing_update_fn)\n\n h = EarlyStopping(\n patience=2, min_delta=0.4, cumulative_delta=False, score_function=lambda _: next(scores), trainer=trainer\n )\n\n assert not trainer.should_terminate\n h(None) # counter == 0\n assert not trainer.should_terminate\n h(None) # delta == 0.3; counter == 1\n assert not trainer.should_terminate\n h(None) # delta == 0.3; counter == 2\n assert trainer.should_terminate\n\n\ndef test_early_stopping_on_cumulative_delta():\n\n scores = iter([0.0, 0.3, 0.6])\n\n trainer = Engine(do_nothing_update_fn)\n\n h = EarlyStopping(\n patience=2, min_delta=0.4, cumulative_delta=True, score_function=lambda _: next(scores), trainer=trainer\n )\n\n assert not trainer.should_terminate\n h(None) # counter == 0\n assert not trainer.should_terminate\n h(None) # delta == 0.3; counter == 1\n assert not trainer.should_terminate\n h(None) # delta == 0.6; counter == 0\n assert not trainer.should_terminate\n\n\ndef test_simple_early_stopping_on_plateau():\n def score_function(engine):\n return 42\n\n trainer = Engine(do_nothing_update_fn)\n\n h = EarlyStopping(patience=1, score_function=score_function, trainer=trainer)\n # Call 2 times and check if stopped\n assert not trainer.should_terminate\n h(None)\n assert not trainer.should_terminate\n h(None)\n assert trainer.should_terminate\n\n\ndef test_simple_no_early_stopping():\n\n scores = iter([1.0, 0.8, 1.2])\n\n def score_function(engine):\n return next(scores)\n\n trainer = Engine(do_nothing_update_fn)\n\n h = EarlyStopping(patience=2, score_function=score_function, trainer=trainer)\n # Call 3 times and check if not stopped\n assert not trainer.should_terminate\n h(None)\n h(None)\n h(None)\n assert not trainer.should_terminate\n\n\ndef test_with_engine_early_stopping():\n class Counter(object):\n def __init__(self, count=0):\n self.count = count\n\n n_epochs_counter = Counter()\n\n scores = iter([1.0, 0.8, 1.2, 1.5, 0.9, 1.0, 0.99, 1.1, 0.9])\n\n def score_function(engine):\n return next(scores)\n\n trainer = Engine(do_nothing_update_fn)\n evaluator = Engine(do_nothing_update_fn)\n early_stopping = EarlyStopping(patience=3, score_function=score_function, trainer=trainer)\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def evaluation(engine):\n evaluator.run([0])\n n_epochs_counter.count += 1\n\n evaluator.add_event_handler(Events.COMPLETED, early_stopping)\n trainer.run([0], max_epochs=10)\n assert n_epochs_counter.count == 7\n assert trainer.state.epoch == 7\n\n\ndef test_with_engine_early_stopping_on_plateau():\n class Counter(object):\n def __init__(self, count=0):\n self.count = count\n\n n_epochs_counter = Counter()\n\n def score_function(engine):\n return 0.047\n\n trainer = Engine(do_nothing_update_fn)\n evaluator = Engine(do_nothing_update_fn)\n early_stopping = EarlyStopping(patience=4, score_function=score_function, trainer=trainer)\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def evaluation(engine):\n evaluator.run([0])\n n_epochs_counter.count += 1\n\n evaluator.add_event_handler(Events.COMPLETED, early_stopping)\n trainer.run([0], max_epochs=10)\n assert n_epochs_counter.count == 5\n assert trainer.state.epoch == 5\n\n\ndef test_with_engine_no_early_stopping():\n class Counter(object):\n def __init__(self, count=0):\n self.count = count\n\n n_epochs_counter = Counter()\n\n scores = iter([1.0, 0.8, 1.2, 1.23, 0.9, 1.0, 1.1, 1.253, 1.26, 1.2])\n\n def score_function(engine):\n return next(scores)\n\n trainer = Engine(do_nothing_update_fn)\n evaluator = Engine(do_nothing_update_fn)\n early_stopping = EarlyStopping(patience=5, score_function=score_function, trainer=trainer)\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def evaluation(engine):\n evaluator.run([0])\n n_epochs_counter.count += 1\n\n evaluator.add_event_handler(Events.COMPLETED, early_stopping)\n trainer.run([0], max_epochs=10)\n assert n_epochs_counter.count == 10\n assert trainer.state.epoch == 10\n\n\ndef _test_distrib_with_engine_early_stopping(device):\n\n import torch.distributed as dist\n\n torch.manual_seed(12)\n\n class Counter(object):\n def __init__(self, count=0):\n self.count = count\n\n n_epochs_counter = Counter()\n\n scores = torch.tensor([1.0, 0.8, 1.2, 1.5, 0.9, 1.0, 0.99, 1.1, 0.9], requires_grad=False).to(device)\n\n def score_function(engine):\n i = trainer.state.epoch - 1\n v = scores[i]\n dist.all_reduce(v)\n v /= dist.get_world_size()\n return v.item()\n\n trainer = Engine(do_nothing_update_fn)\n evaluator = Engine(do_nothing_update_fn)\n early_stopping = EarlyStopping(patience=3, score_function=score_function, trainer=trainer)\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def evaluation(engine):\n evaluator.run([0])\n n_epochs_counter.count += 1\n\n evaluator.add_event_handler(Events.COMPLETED, early_stopping)\n trainer.run([0], max_epochs=10)\n assert trainer.state.epoch == 7\n assert n_epochs_counter.count == 7\n\n\ndef _test_distrib_integration_engine_early_stopping(device):\n\n import torch.distributed as dist\n from ignite.metrics import Accuracy\n\n rank = dist.get_rank()\n ws = dist.get_world_size()\n torch.manual_seed(12)\n\n n_epochs = 10\n n_iters = 20\n\n y_preds = (\n [torch.randint(0, 2, size=(n_iters, ws)).to(device)]\n + [torch.ones(n_iters, ws).to(device)]\n + [torch.randint(0, 2, size=(n_iters, ws)).to(device) for _ in range(n_epochs - 2)]\n )\n\n y_true = (\n [torch.randint(0, 2, size=(n_iters, ws)).to(device)]\n + [torch.ones(n_iters, ws).to(device)]\n + [torch.randint(0, 2, size=(n_iters, ws)).to(device) for _ in range(n_epochs - 2)]\n )\n\n def update(engine, _):\n e = trainer.state.epoch - 1\n i = engine.state.iteration - 1\n return y_preds[e][i, rank], y_true[e][i, rank]\n\n evaluator = Engine(update)\n acc = Accuracy(device=device)\n acc.attach(evaluator, \"acc\")\n\n def score_function(engine):\n return engine.state.metrics[\"acc\"]\n\n trainer = Engine(lambda e, b: None)\n early_stopping = EarlyStopping(patience=3, score_function=score_function, trainer=trainer)\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def evaluation(engine):\n data = list(range(n_iters))\n evaluator.run(data=data)\n\n evaluator.add_event_handler(Events.COMPLETED, early_stopping)\n trainer.run([0], max_epochs=10)\n assert trainer.state.epoch == 5\n\n\[email protected]\[email protected](not idist.has_native_dist_support, reason=\"Skip if no native dist support\")\[email protected](torch.cuda.device_count() < 1, reason=\"Skip if no GPU\")\ndef test_distrib_gpu(local_rank, distributed_context_single_node_nccl):\n device = \"cuda:{}\".format(local_rank)\n _test_distrib_with_engine_early_stopping(device)\n _test_distrib_integration_engine_early_stopping(device)\n\n\[email protected]\[email protected](not idist.has_native_dist_support, reason=\"Skip if no native dist support\")\ndef test_distrib_cpu(local_rank, distributed_context_single_node_gloo):\n device = \"cpu\"\n _test_distrib_with_engine_early_stopping(device)\n _test_distrib_integration_engine_early_stopping(device)\n\n\[email protected]_distributed\[email protected](not idist.has_native_dist_support, reason=\"Skip if no native dist support\")\[email protected](\"MULTINODE_DISTRIB\" not in os.environ, reason=\"Skip if not multi-node distributed\")\ndef test_multinode_distrib_cpu(distributed_context_multi_node_gloo):\n device = \"cpu\"\n _test_distrib_with_engine_early_stopping(device)\n _test_distrib_integration_engine_early_stopping(device)\n\n\[email protected]_distributed\[email protected](not idist.has_native_dist_support, reason=\"Skip if no native dist support\")\[email protected](\"GPU_MULTINODE_DISTRIB\" not in os.environ, reason=\"Skip if not multi-node distributed\")\ndef test_multinode_distrib_gpu(distributed_context_multi_node_nccl):\n device = \"cuda:{}\".format(distributed_context_multi_node_nccl[\"local_rank\"])\n _test_distrib_with_engine_early_stopping(device)\n _test_distrib_integration_engine_early_stopping(device)\n"
] | [
[
"torch.Tensor",
"torch.tensor"
],
[
"torch.nn.CrossEntropyLoss",
"torch.cuda.get_device_name"
],
[
"torch.randint",
"torch.ones",
"torch.manual_seed",
"torch.tensor",
"torch.distributed.get_rank",
"torch.cuda.device_count",
"torch.distributed.all_reduce",
"torch.distributed.get_world_size"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Monnoroch/tensorflow | [
"1d76583411038767f673a0c96174c80eaf9ff42f",
"1d76583411038767f673a0c96174c80eaf9ff42f"
] | [
"tensorflow/python/ops/math_ops.py",
"tensorflow/python/kernel_tests/embedding_ops_test.py"
] | [
"\"\"\"## Arithmetic Operators\n\nTensorFlow provides several operations that you can use to add basic arithmetic\noperators to your graph.\n\n@@add\n@@sub\n@@mul\n@@div\n@@mod\n\n## Basic Math Functions\n\nTensorFlow provides several operations that you can use to add basic\nmathematical functions to your graph.\n\n@@add_n\n@@abs\n@@neg\n@@sign\n@@inv\n@@square\n@@round\n@@sqrt\n@@rsqrt\n@@pow\n@@exp\n@@log\n@@ceil\n@@floor\n@@maximum\n@@minimum\n@@cos\n@@sin\n\n## Matrix Math Functions\n\nTensorFlow provides several operations that you can use to add basic\nmathematical functions for matrices to your graph.\n\n@@diag\n@@transpose\n\n@@matmul\n@@batch_matmul\n\n@@matrix_determinant\n@@batch_matrix_determinant\n\n@@matrix_inverse\n@@batch_matrix_inverse\n\n@@cholesky\n@@batch_cholesky\n\n## Complex Number Functions\n\nTensorFlow provides several operations that you can use to add complex number\nfunctions to your graph.\n\n@@complex\n@@complex_abs\n@@conj\n@@imag\n@@real\n\n## Reduction\n\nTensorFlow provides several operations that you can use to perform\ncommon math computations that reduce various dimensions of a tensor.\n\n@@reduce_sum\n@@reduce_prod\n@@reduce_min\n@@reduce_max\n@@reduce_mean\n@@reduce_all\n@@reduce_any\n\n@@accumulate_n\n\n## Segmentation\n\nTensorFlow provides several operations that you can use to perform common\nmath computations on tensor segments.\nHere a segmentation is a partitioning of a tensor along\nthe first dimension, i.e. it defines a mapping from the first dimension onto\n`segment_ids`. The `segment_ids` tensor should be the size of\nthe first dimension, `d0`, with consecutive IDs in the range `0` to `k`,\nwhere `k<d0`.\nIn particular, a segmentation of a matrix tensor is a mapping of rows to\nsegments.\n\nFor example:\n\n```python\nc = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])\ntf.segment_sum(c, tf.constant([0, 0, 1]))\n ==> [[0 0 0 0]\n [5 6 7 8]]\n```\n\n@@segment_sum\n@@segment_prod\n@@segment_min\n@@segment_max\n@@segment_mean\n\n@@unsorted_segment_sum\n\n@@sparse_segment_sum\n@@sparse_segment_mean\n\n\n## Sequence Comparison and Indexing\n\nTensorFlow provides several operations that you can use to add sequence\ncomparison and index extraction to your graph. You can use these operations to\ndetermine sequence differences and determine the indexes of specific values in\na tensor.\n\n@@argmin\n@@argmax\n\n@@listdiff\n@@where\n@@unique\n\n@@edit_distance\n\n@@invert_permutation\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.python.platform\n\nimport numpy as np\nimport six.moves\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.framework import types\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import common_shapes\nfrom tensorflow.python.ops import gen_math_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import gen_state_ops\n# pylint: disable=wildcard-import,undefined-variable\nfrom tensorflow.python.ops.gen_math_ops import *\n\n\n# Aliases for some automatically-generated names.\nargmax = gen_math_ops.arg_max\nargmin = gen_math_ops.arg_min\nlinspace = gen_math_ops.lin_space\n\n\n# pylint: disable=anomalous-backslash-in-string,protected-access\ndef abs(x, name=None):\n \"\"\"Computes the absolute value of a tensor.\n\n Given a tensor of real numbers `x`, this operation returns a tensor\n containing the absolute value of each element in `x`. For example, if x is\n an input element and y is an output element, this operation computes\n \\\\\\\\(y = |x|\\\\\\\\).\n\n See [`tf.complex_abs()`](#tf_complex_abs) to compute the absolute value of a complex\n number.\n\n Args:\n x: A `Tensor` of type `float`, `double`, `int32`, or `int64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` the same size and type as `x` with absolute values.\n \"\"\"\n with ops.op_scope([x], name, \"Abs\") as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n if x.dtype == types.complex64:\n return gen_math_ops.complex_abs(x, name=name)\n return gen_math_ops._abs(x, name=name)\n\n\n\ndef pow(x, y, name=None):\n \"\"\"Computes the power of one value to another.\n\n Given a tensor `x` and a tensor `y`, this operation computes \\\\\\\\(x^y\\\\\\\\) for\n corresponding elements in `x` and `y`. For example:\n\n ```\n # tensor 'x' is [[2, 2]], [3, 3]]\n # tensor 'y' is [[8, 16], [2, 3]]\n tf.pow(x, y) ==> [[256, 65536], [9, 27]]\n ```\n\n Args:\n x: A `Tensor` of type `float`, `double`, `int32`, `complex64`, or `int64`.\n y: A `Tensor` of type `float`, `double`, `int32`, `complex64`, or `int64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`.\n \"\"\"\n with ops.op_scope([x], name, \"Pow\") as name:\n return gen_math_ops._pow(x, y, name=name)\n\n\ndef complex(real, imag, name=None):\n \"\"\"Converts two real numbers to a complex number.\n\n Given a tensor `real` representing the real part of a complex number, and a\n tensor `imag` representing the imaginary part of a complex number, this\n operation computes complex numbers elementwise of the form \\\\\\\\(a + bj\\\\\\\\),\n where *a* represents the `real` part and *b* represents the `imag` part.\n\n The input tensors `real` and `imag` must be the same shape.\n\n For example:\n\n ```\n # tensor 'real' is [2.25, 3.25]\n # tensor `imag` is [4.75, 5.75]\n tf.complex(real, imag) ==> [[2.25 + 4.74j], [3.25 + 5.75j]]\n ```\n\n Args:\n real: A `Tensor` of type `float`.\n imag: A `Tensor` of type `float`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `complex64`.\n \"\"\"\n with ops.op_scope([real, imag], name, \"Complex\") as name:\n return gen_math_ops._complex(real, imag, name=name)\n\n\ndef round(x, name=None):\n \"\"\"Rounds the values of a tensor to the nearest integer, element-wise.\n\n For example:\n\n ```python\n # 'a' is [0.9, 2.5, 2.3, -4.4]\n tf.round(a) ==> [ 1.0, 3.0, 2.0, -4.0 ]\n ```\n\n Args:\n x: A `Tensor` of type `float` or `double`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of same shape and type as `x`.\n \"\"\"\n x = ops.convert_to_tensor(x, name=\"x\")\n if x.dtype.is_integer:\n return x\n else:\n return floor(x + 0.5, name=name)\n\n\ndef cast(x, dtype, name=None):\n \"\"\"Casts a tensor to a new type.\n\n The operation casts `x` (in case of `Tensor`) or `x.values`\n (in case of `SparseTensor`) to `dtype`.\n\n For example:\n\n ```python\n # tensor `a` is [1.8, 2.2], dtype=tf.float\n tf.cast(a, tf.int32) ==> [1, 2] # dtype=tf.int32\n ```\n\n Args:\n x: A `Tensor` or `SparseTensor`.\n dtype: The destination type.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` with same shape as `x`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `dtype`.\n \"\"\"\n with ops.op_scope([x], name, \"Cast\") as name:\n if isinstance(x, ops.SparseTensor):\n values_cast = cast(x.values, dtype, name=name)\n return ops.SparseTensor(x.indices, values_cast, x.shape)\n else:\n # TODO(touts): Handle what Josh said.\n #\n # Could return ops.convert_to_tensor(x, dtype=dtype, ...) here, but that\n # allows some conversions that cast() can't do, e.g. casting numbers to\n # strings.\n x = ops.convert_to_tensor(x, name=\"x\")\n if x.dtype.base_dtype == dtype:\n return x\n return gen_math_ops.cast(x, dtype, name=name)\n\n\ndef to_float(x, name=\"ToFloat\"):\n \"\"\"Casts a tensor to type `float32`.\n\n Args:\n x: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` with same shape as `x` with type `float32`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `float32`.\n \"\"\"\n return cast(x, types.float32, name=name)\n\n\ndef to_double(x, name=\"ToDouble\"):\n \"\"\"Casts a tensor to type `float64`.\n\n Args:\n x: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` with same shape as `x` with type `float64`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `float64`.\n \"\"\"\n return cast(x, types.float64, name=name)\n\n\ndef to_int32(x, name=\"ToInt32\"):\n \"\"\"Casts a tensor to type `int32`.\n\n Args:\n x: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` with same shape as `x` with type `int32`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `int32`.\n \"\"\"\n return cast(x, types.int32, name=name)\n\n\ndef to_int64(x, name=\"ToInt64\"):\n \"\"\"Casts a tensor to type `int64`.\n\n Args:\n x: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` with same shape as `x` with type `int64`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `int64`.\n \"\"\"\n return cast(x, types.int64, name=name)\n\n\ndef to_bfloat16(x, name=\"ToBFloat16\"):\n \"\"\"Casts a tensor to type `bfloat16`.\n\n Args:\n x: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` with same shape as `x` with type `bfloat16`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `bfloat16`.\n \"\"\"\n return cast(x, types.bfloat16, name=name)\n\n\nops.Tensor._override_operator(\"__neg__\", neg)\nops.Tensor._override_operator(\"__abs__\", abs)\n# __invert__ corresponds to the ~ operator. Here we follow the numpy convention\n# ~ marks an elementwise bit-wise inverse. This is only implemented for boolean\n# tensors and will throw a TypeError if used on nonboolean arrays\nops.Tensor._override_operator(\"__invert__\", logical_not)\n\n\ndef _OverrideBinaryOperatorHelper(func, op_name):\n \"\"\"Register operators with different tensor and scalar versions.\n\n Args:\n func: the operator\n op_name: name of the operator being overridden\n \"\"\"\n\n def binary_op_wrapper(x, y):\n with ops.op_scope([x, y], None, op_name) as name:\n assert isinstance(x, ops.Tensor)\n y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name=\"y\")\n return func(x, y, name=name)\n\n ops.Tensor._override_operator(\"__%s__\" % op_name, binary_op_wrapper)\n del binary_op_wrapper\n\n def r_binary_op_wrapper(y, x):\n with ops.op_scope([x, y], None, op_name) as name:\n assert isinstance(y, ops.Tensor)\n x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name=\"x\")\n return func(x, y, name=name)\n\n ops.Tensor._override_operator(\"__r%s__\" % op_name, r_binary_op_wrapper)\n del r_binary_op_wrapper\n\n\n# Conversion table for __truediv__. None entries mean no conversion required.\n_TRUEDIV_TABLE = {\n types.uint8: types.float32,\n types.int8: types.float32,\n types.int16: types.float32,\n types.int32: types.float64,\n types.int64: types.float64,\n types.float32: None,\n types.float64: None,\n types.complex64: None,\n}\n\n\ndef truediv(x, y, name=None):\n \"\"\"Divides x / y elementwise, always producing floating point results.\n\n The same as `tf.div` for floating point arguments, but casts integer arguments\n to floating point before dividing so that the result is always floating point.\n This op is generated by normal `x / y` division in Python 3 and in Python 2.7\n with `from __future__ import division`. If you want integer division that\n rounds down, use `x // y` or `tf.floordiv`.\n\n `x` and `y` must have the same numeric type. If the inputs are floating\n point, the output will have the same type. If the inputs are integral, the\n inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`\n and `int64` (matching the behavior of Numpy).\n\n Args:\n x: `Tensor` numerator of numeric type.\n y: `Tensor` denominator of numeric type.\n name: A name for the operation (optional).\n\n Returns:\n `x / y` evaluated in floating point.\n\n Raises:\n TypeError: If `x` and `y` have different dtypes.\n \"\"\"\n with ops.op_scope([x, y], name, \"truediv\") as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n y = ops.convert_to_tensor(y, name=\"y\")\n x_dtype = x.dtype.base_dtype\n y_dtype = y.dtype.base_dtype\n if x_dtype != y_dtype:\n raise TypeError(\"x and y must have the same dtype, got %r != %r\" %\n (x_dtype, y_dtype))\n try:\n dtype = _TRUEDIV_TABLE[x_dtype]\n except KeyError:\n raise TypeError(\"Invalid dtype %r in __truediv__\" % x_dtype)\n if dtype is not None:\n x = cast(x, dtype)\n y = cast(y, dtype)\n return div(x, y, name=name)\n\n\ndef floordiv(x, y, name=None):\n \"\"\"Divides `x / y` elementwise, rounding down for floating point.\n\n The same as `tf.div(x,y)`, but uses `tf.floor(tf.div(x,y))` for floating\n point arguments so that the result is always an integer (though possibly an\n integer represented as floating point). This op is generated by `x // y`\n floor division in Python 3 and in Python 2.7 with\n `from __future__ import division`.\n\n Note that for efficiency, __floordiv__ uses C semantics for negative numbers\n (unlike Python and Numpy).\n\n `x` and `y` must have the same type, and the result will have the same type\n as well.\n\n Args:\n x: `Tensor` numerator of real numeric type.\n y: `Tensor` numerator of real numeric type.\n name: A name for the operation (optional).\n\n Returns:\n `x / y` rounded down (except possibly for integers in C).\n\n Raises:\n TypeError: If the inputs are complex.\n \"\"\"\n with ops.op_scope([x, y], name, \"floordiv\") as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n dtype = x.dtype\n if dtype.is_floating:\n return floor(div(x, y), name=name)\n else:\n if not dtype.is_integer:\n raise TypeError(\"Expected floating point or integer, got %r\" % dtype)\n return div(x, y, name=name)\n\n\n_OverrideBinaryOperatorHelper(add, \"add\")\n_OverrideBinaryOperatorHelper(sub, \"sub\")\n_OverrideBinaryOperatorHelper(mul, \"mul\")\n_OverrideBinaryOperatorHelper(div, \"div\")\n_OverrideBinaryOperatorHelper(truediv, \"truediv\")\n_OverrideBinaryOperatorHelper(floordiv, \"floordiv\")\n_OverrideBinaryOperatorHelper(mod, \"mod\")\n\n\ndef logical_xor(x, y, name=\"LogicalXor\"):\n \"\"\"x ^ y = (x | y) & ~(x & y).\"\"\"\n # TODO(alemi) Make this a cwise op if people end up relying on it.\n return logical_and(logical_or(x, y), logical_not(logical_and(x, y)),\n name=name)\n\n_OverrideBinaryOperatorHelper(logical_and, \"and\")\n_OverrideBinaryOperatorHelper(logical_or, \"or\")\n_OverrideBinaryOperatorHelper(logical_xor, \"xor\")\n\nops.Tensor._override_operator(\"__lt__\", less)\nops.Tensor._override_operator(\"__le__\", less_equal)\nops.Tensor._override_operator(\"__gt__\", greater)\nops.Tensor._override_operator(\"__ge__\", greater_equal)\n\n\ndef range(start, limit, delta=1, name=\"range\"):\n \"\"\"Creates a sequence of integers.\n\n This operation creates a sequence of integers that begins at `start` and\n extends by increments of `delta` up to but not including `limit`.\n\n For example:\n\n ```\n # 'start' is 3\n # 'limit' is 18\n # 'delta' is 3\n tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]\n ```\n\n Args:\n start: A 0-D (scalar) of type `int32`. First entry in sequence.\n limit: A 0-D (scalar) of type `int32`. Upper limit of sequence,\n exclusive.\n delta: A 0-D `Tensor` (scalar) of type `int32`. Optional. Default is 1.\n Number that increments `start`.\n name: A name for the operation (optional).\n\n Returns:\n An 1-D `int32` `Tensor`.\n \"\"\"\n return gen_math_ops._range(start, limit, delta, name=name)\n\n\[email protected](\"Range\")\ndef _RangeShape(op):\n start_value = tensor_util.ConstantValue(op.inputs[0])\n limit_value = tensor_util.ConstantValue(op.inputs[1])\n delta_value = tensor_util.ConstantValue(op.inputs[2])\n if start_value is None or limit_value is None or delta_value is None:\n return [tensor_shape.vector(None)]\n else:\n return [tensor_shape.vector((limit_value - start_value + delta_value - 1) //\n delta_value)]\n\n\n# Reduction operations\ndef _ReductionDims(x, reduction_indices):\n \"\"\"Returns range(0, rank(x)) if reduction_indices is None.\"\"\"\n if reduction_indices is not None:\n return reduction_indices\n else:\n return range(0, array_ops.rank(x))\n\n\ndef reduce_sum(input_tensor, reduction_indices=None, keep_dims=False,\n name=None):\n \"\"\"Computes the sum of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `reduction_indices`.\n Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each\n entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions\n are retained with length 1.\n\n If `reduction_indices` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n # 'x' is [[1, 1, 1]]\n # [1, 1, 1]]\n tf.reduce_sum(x) ==> 6\n tf.reduce_sum(x, 0) ==> [2, 2, 2]\n tf.reduce_sum(x, 1) ==> [3, 3]\n tf.reduce_sum(x, 1, keep_dims=True) ==> [[3], [3]]\n tf.reduce_sum(x, [0, 1]) ==> 6\n ```\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n reduction_indices: The dimensions to reduce. If `None` (the defaut),\n reduces all dimensions.\n keep_dims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n \"\"\"\n return gen_math_ops._sum(input_tensor, _ReductionDims(input_tensor,\n reduction_indices),\n keep_dims, name=name)\n\n\ndef reduce_mean(input_tensor, reduction_indices=None, keep_dims=False,\n name=None):\n \"\"\"Computes the mean of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `reduction_indices`.\n Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each\n entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions\n are retained with length 1.\n\n If `reduction_indices` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n # 'x' is [[1., 1. ]]\n # [2., 2.]]\n tf.reduce_mean(x) ==> 1.5\n tf.reduce_mean(x, 0) ==> [1.5, 1.5]\n tf.reduce_mean(x, 1) ==> [1., 2.]\n ```\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n reduction_indices: The dimensions to reduce. If `None` (the defaut),\n reduces all dimensions.\n keep_dims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n \"\"\"\n return gen_math_ops._mean(input_tensor, _ReductionDims(input_tensor,\n reduction_indices),\n keep_dims, name=name)\n\n\ndef reduce_prod(input_tensor, reduction_indices=None, keep_dims=False,\n name=None):\n \"\"\"Computes the product of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `reduction_indices`.\n Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each\n entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions\n are retained with length 1.\n\n If `reduction_indices` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n reduction_indices: The dimensions to reduce. If `None` (the defaut),\n reduces all dimensions.\n keep_dims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n \"\"\"\n return gen_math_ops._prod(input_tensor, _ReductionDims(input_tensor,\n reduction_indices),\n keep_dims, name=name)\n\n\ndef reduce_min(input_tensor, reduction_indices=None, keep_dims=False,\n name=None):\n \"\"\"Computes the minimum of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `reduction_indices`.\n Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each\n entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions\n are retained with length 1.\n\n If `reduction_indices` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n reduction_indices: The dimensions to reduce. If `None` (the defaut),\n reduces all dimensions.\n keep_dims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n \"\"\"\n return gen_math_ops._min(input_tensor, _ReductionDims(input_tensor,\n reduction_indices),\n keep_dims, name=name)\n\n\ndef reduce_max(input_tensor, reduction_indices=None, keep_dims=False,\n name=None):\n \"\"\"Computes the maximum of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `reduction_indices`.\n Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each\n entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions\n are retained with length 1.\n\n If `reduction_indices` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n reduction_indices: The dimensions to reduce. If `None` (the defaut),\n reduces all dimensions.\n keep_dims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n \"\"\"\n return gen_math_ops._max(input_tensor, _ReductionDims(input_tensor,\n reduction_indices),\n keep_dims, name=name)\n\n\ndef reduce_all(input_tensor, reduction_indices=None, keep_dims=False,\n name=None):\n \"\"\"Computes the \"logical and\" of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `reduction_indices`.\n Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each\n entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions\n are retained with length 1.\n\n If `reduction_indices` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n # 'x' is [[True, True]]\n # [False, False]]\n tf.reduce_all(x) ==> False\n tf.reduce_all(x, 0) ==> [False, False]\n tf.reduce_all(x, 1) ==> [True, False]\n ```\n\n Args:\n input_tensor: The boolean tensor to reduce.\n reduction_indices: The dimensions to reduce. If `None` (the defaut),\n reduces all dimensions.\n keep_dims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n \"\"\"\n return gen_math_ops._all(input_tensor, _ReductionDims(input_tensor,\n reduction_indices),\n keep_dims, name=name)\n\n\ndef reduce_any(input_tensor, reduction_indices=None, keep_dims=False,\n name=None):\n \"\"\"Computes the \"logical or\" of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `reduction_indices`.\n Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each\n entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions\n are retained with length 1.\n\n If `reduction_indices` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n # 'x' is [[True, True]]\n # [False, False]]\n tf.reduce_any(x) ==> True\n tf.reduce_any(x, 0) ==> [True, True]\n tf.reduce_any(x, 1) ==> [True, False]\n ```\n\n Args:\n input_tensor: The boolean tensor to reduce.\n reduction_indices: The dimensions to reduce. If `None` (the defaut),\n reduces all dimensions.\n keep_dims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n \"\"\"\n return gen_math_ops._any(input_tensor, _ReductionDims(input_tensor,\n reduction_indices),\n keep_dims, name=name)\n\n\ndef matmul(a, b,\n transpose_a=False, transpose_b=False,\n a_is_sparse=False, b_is_sparse=False,\n name=None):\n \"\"\"Multiplies matrix `a` by matrix `b`, producing `a` * `b`.\n\n The inputs must be two-dimensional matrices, with matching inner dimensions,\n possibly after transposition.\n\n Both matrices must be of the same type. The supported types are:\n `float`, `double`, `int32`, `complex64`.\n\n Either matrix can be transposed on the fly by setting the corresponding flag\n to `True`. This is `False` by default.\n\n If one or both of the matrices contain a lot of zeros, a more efficient\n multiplication algorithm can be used by setting the corresponding\n `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.\n\n For example:\n\n ```python\n # 2-D tensor `a`\n a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) => [[1. 2. 3.]\n [4. 5. 6.]]\n # 2-D tensor `b`\n b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2]) => [[7. 8.]\n [9. 10.]\n [11. 12.]]\n c = tf.matmul(a, b) => [[58 64]\n [139 154]]\n ```\n\n Args:\n a: `Tensor` of type `float`, `double`, `int32` or `complex64`.\n b: `Tensor` with same type as `a`.\n transpose_a: If `True`, `a` is transposed before multiplication.\n transpose_b: If `True`, `b` is transposed before multiplication.\n a_is_sparse: If `True`, `a` is treated as a sparse matrix.\n b_is_sparse: If `True`, `b` is treated as a sparse matrix.\n name: Name for the operation (optional).\n\n Returns:\n A `Tensor` of the same type as `a`.\n \"\"\"\n with ops.op_scope([a, b], name, \"MatMul\") as name:\n a = ops.convert_to_tensor(a, name=\"a\")\n b = ops.convert_to_tensor(b, name=\"b\")\n if a.dtype == types.float32 and (a_is_sparse or b_is_sparse):\n return sparse_matmul(a, b,\n transpose_a=transpose_a,\n transpose_b=transpose_b,\n a_is_sparse=a_is_sparse,\n b_is_sparse=b_is_sparse,\n name=name)\n else:\n return gen_math_ops._mat_mul(a, b,\n transpose_a=transpose_a,\n transpose_b=transpose_b,\n name=name)\n\nsparse_matmul = gen_math_ops._sparse_mat_mul\nbatch_matmul = gen_math_ops._batch_mat_mul\n\nops.RegisterShape(\"MatMul\")(common_shapes.matmul_shape)\nops.RegisterShape(\"SparseMatMul\")(common_shapes.matmul_shape)\n\n\ndef _as_indexed_slices(x):\n \"\"\"Convert 'x' to IndexedSlices.\n\n Convert a dense Tensor to a block-sparse IndexedSlices.\n\n Args:\n x: Either a Tensor object, or an IndexedSlices object.\n\n Returns:\n An IndexedSlices object.\n\n Raises:\n TypeError: If 'x' is not a Tensor or an IndexedSlices object.\n \"\"\"\n # TODO(touts): op_scope\n if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):\n raise TypeError(\"Not a Tensor or IndexedSlices: %s\" % type(x))\n if isinstance(x, ops.IndexedSlices):\n return x\n x_shape = array_ops.shape(x)\n return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)\n\n\ndef _as_indexed_slices_list(inputs):\n \"\"\"Convert all elements of 'inputs' to IndexedSlices.\n\n Additionally, homogenize the types of all the indices to\n either int32 or int64.\n\n Args:\n inputs: List containing either Tensor or IndexedSlices objects.\n\n Returns:\n A list of IndexedSlices objects.\n\n Raises:\n TypeError: If 'inputs' is not a list or a tuple.\n \"\"\"\n if not isinstance(inputs, (list, tuple)):\n raise TypeError(\"Expected a list or tuple, not a %s\" % type(inputs))\n outputs = [_as_indexed_slices(i) for i in inputs]\n with_int32_index = [o.indices for o in outputs\n if o.indices.dtype == types.int32]\n if not with_int32_index or len(with_int32_index) == len(outputs):\n return outputs\n casted_outputs = []\n for o in outputs:\n if o.indices.dtype == types.int32:\n casted_outputs.append(\n ops.IndexedSlices(o.values, cast(o.indices, types.int64),\n o.dense_shape))\n else:\n casted_outputs.append(o)\n return casted_outputs\n\n\ndef accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):\n \"\"\"Returns the element-wise sum of a list of tensors.\n\n Optionally, pass `shape` and `tensor_dtype` for shape and type checking,\n otherwise, these are inferred.\n\n For example:\n\n ```python\n # tensor 'a' is [[1, 2], [3, 4]\n # tensor `b` is [[5, 0], [0, 6]]\n tf.accumulate_n([a, b, a]) ==> [[7, 4], [6, 14]]\n\n # Explicitly pass shape and type\n tf.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)\n ==> [[7, 4], [6, 14]]\n ```\n\n Args:\n inputs: A list of `Tensor` objects, each with same shape and type.\n shape: Shape of elements of `inputs`.\n tensor_dtype: The type of `inputs`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of same shape and type as the elements of `inputs`.\n\n Raises:\n ValueError: If `inputs` don't all have same shape and dtype or the shape\n cannot be inferred.\n \"\"\"\n if tensor_dtype is None:\n if not inputs or not isinstance(inputs, (list, tuple)):\n raise ValueError(\"inputs must be a list of at least one Tensor with the \"\n \"same dtype and shape\")\n inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)\n if not all(isinstance(x, ops.Tensor) for x in inputs):\n raise ValueError(\"inputs must be a list of at least one Tensor with the \"\n \"same dtype and shape\")\n if not all(x.dtype == inputs[0].dtype for x in inputs):\n raise ValueError(\"inputs must be a list of at least one Tensor with the \"\n \"same dtype and shape\")\n tensor_dtype = inputs[0].dtype\n if shape is not None:\n shape = tensor_shape.as_shape(shape)\n else:\n shape = tensor_shape.unknown_shape()\n for input_tensor in inputs:\n if isinstance(input_tensor, ops.Tensor):\n shape = shape.merge_with(input_tensor.get_shape())\n if not shape.is_fully_defined():\n # TODO(pbar): Make a version of assign_add that accepts an uninitialized\n # lvalue, and takes its shape from that? This would allow accumulate_n to\n # work in all situations that add_n currently works.\n raise ValueError(\"Cannot infer the shape of the accumulator for \"\n \"accumulate_n. Pass the shape argument, or set the shape \"\n \"of at least one of the inputs.\")\n with ops.op_scope(inputs, name, \"AccumulateN\") as name:\n var = gen_state_ops._temporary_variable(shape=shape, dtype=tensor_dtype)\n var_name = var.op.name\n var = state_ops.assign(var, array_ops.zeros_like(inputs[0]))\n update_ops = []\n for input_tensor in inputs:\n op = state_ops.assign_add(var, input_tensor, use_locking=True)\n update_ops.append(op)\n with ops.control_dependencies(update_ops):\n return gen_state_ops._destroy_temporary_variable(var,\n var_name=var_name,\n name=name)\n\n\[email protected](\"BatchMatMul\")\ndef _BatchMatMulShape(op):\n \"\"\"Shape function for BatchMatMul op.\"\"\"\n a_shape = op.inputs[0].get_shape()\n adj_a = op.get_attr(\"adj_x\")\n b_shape = op.inputs[1].get_shape()\n adj_b = op.get_attr(\"adj_y\")\n if not a_shape.is_fully_defined() or not b_shape.is_fully_defined():\n return [tensor_shape.unknown_shape()]\n batch_dims = a_shape[:-2].merge_with(b_shape[:-2])\n output_rows = a_shape[-1] if adj_a else a_shape[-2]\n output_cols = b_shape[-2] if adj_b else b_shape[-1]\n inner_a = a_shape[-2] if adj_a else a_shape[-1]\n inner_b = b_shape[-1] if adj_b else b_shape[-2]\n inner_a.assert_is_compatible_with(inner_b)\n return [batch_dims.concatenate([output_rows, output_cols])]\n\n\ndef sigmoid(x, name=None):\n \"\"\"Computes sigmoid of `x` element-wise.\n\n Specifically, `y = 1 / (1 + exp(-x))`.\n\n Args:\n x: A Tensor with type `float`, `double`, `int32`, `complex64`, `int64`,\n or `qint32`.\n name: A name for the operation (optional).\n\n Returns:\n A Tensor with the same type as `x` if `x.dtype != qint32`\n otherwise the return type is `quint8`.\n \"\"\"\n with ops.op_scope([x], name, \"Sigmoid\") as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n return gen_math_ops._sigmoid(x, name=name)\n\n\ndef tanh(x, name=None):\n \"\"\"Computes hyperbolic tangent of `x` element-wise.\n\n Args:\n x: A Tensor with type `float`, `double`, `int32`, `complex64`, `int64`,\n or `qint32`.\n name: A name for the operation (optional).\n\n Returns:\n A Tensor with the same type as `x` if `x.dtype != qint32` otherwise\n the return type is `quint8`.\n \"\"\"\n with ops.op_scope([x], name, \"Tanh\") as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n return gen_math_ops._tanh(x, name=name)\n\n\nops.RegisterShape(\"Abs\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Ceil\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Conj\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Cos\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Exp\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Floor\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Imag\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Inv\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"IsFinite\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"IsInf\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"IsNan\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Log\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"LogicalNot\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Neg\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Real\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Rsqrt\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Sign\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Sin\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Sqrt\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Square\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Sigmoid\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Tanh\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Cast\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"ComplexAbs\")(common_shapes.unchanged_shape)\n\n\[email protected](\"Add\")\[email protected](\"Complex\")\[email protected](\"Div\")\[email protected](\"Equal\")\[email protected](\"Greater\")\[email protected](\"GreaterEqual\")\[email protected](\"Less\")\[email protected](\"LessEqual\")\[email protected](\"LogicalAnd\")\[email protected](\"LogicalOr\")\[email protected](\"Maximum\")\[email protected](\"Minimum\")\[email protected](\"Mod\")\[email protected](\"Mul\")\[email protected](\"NotEqual\")\[email protected](\"Pow\")\[email protected](\"Sub\")\ndef _BroadcastShape(op):\n \"\"\"Common shape function for binary operators that broadcast their inputs.\"\"\"\n shape_x = op.inputs[0].get_shape()\n shape_y = op.inputs[1].get_shape()\n if shape_x.ndims is None or shape_y.ndims is None:\n return [tensor_shape.unknown_shape()]\n\n # To compute the broadcasted dimensions, we zip together shape_x and shape_y,\n # and pad with 1 to make them the same length.\n broadcasted_dims = reversed(list(six.moves.zip_longest(\n reversed(shape_x.dims),\n reversed(shape_y.dims),\n fillvalue=tensor_shape.Dimension(1))))\n # Next we combine the dimensions according to the numpy broadcasting rules.\n # http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html\n return_dims = []\n for (dim_x, dim_y) in broadcasted_dims:\n if dim_x.value is None or dim_y.value is None:\n # One or both dimensions is unknown. If either dimension is greater than\n # 1, we assume that the program is correct, and the other dimension will\n # be broadcast to match it.\n # TODO(mrry): If we eliminate the shape checks in C++, we must still\n # assert that the unknown dim is either 1 or the same as the known dim.\n if dim_x.value is not None and dim_x.value > 1:\n return_dims.append(dim_x)\n elif dim_y.value is not None and dim_y.value > 1:\n return_dims.append(dim_y)\n else:\n return_dims.append(None)\n elif dim_x.value == 1:\n # We will broadcast dim_x to dim_y.\n return_dims.append(dim_y)\n elif dim_y.value == 1:\n # We will broadcast dim_y to dim_x.\n return_dims.append(dim_x)\n elif dim_x.value == dim_y.value:\n # The dimensions are compatible, so output is the same size in that\n # dimension.\n return_dims.append(dim_x.merge_with(dim_y))\n else:\n raise ValueError(\"Incompatible shapes for broadcasting: %s and %s\"\n % (shape_x, shape_y))\n return [tensor_shape.TensorShape(return_dims)]\n\n\[email protected](\"AddN\")\ndef _AddNShape(op):\n merged_shape = tensor_shape.unknown_shape()\n for input_ in op.inputs:\n merged_shape = merged_shape.merge_with(input_.get_shape())\n return [merged_shape]\n\n\[email protected](\"Select\")\ndef _SelectShape(op):\n # All three inputs must have the same shape.\n return [op.inputs[0].get_shape()\n .merge_with(op.inputs[1].get_shape())\n .merge_with(op.inputs[2].get_shape())]\n\n\[email protected](\"ArgMax\")\[email protected](\"ArgMin\")\ndef _ArgOpShape(op):\n \"\"\"Common shape function for arg-reduction ops.\"\"\"\n dimension_shape = op.inputs[1].get_shape()\n dimension_shape.assert_is_compatible_with(tensor_shape.scalar())\n input_shape = op.inputs[0].get_shape()\n if input_shape.ndims is None:\n return [tensor_shape.unknown_shape()]\n elif input_shape.ndims <= 1:\n return [tensor_shape.scalar()]\n\n dimension = tensor_util.ConstantValue(op.inputs[1])\n if dimension is None:\n return [tensor_shape.unknown_shape(ndims=input_shape.ndims - 1)]\n elif 0 <= dimension and dimension < input_shape.ndims:\n returned_shape = []\n for i, dim in enumerate(input_shape.dims):\n if i != dimension:\n returned_shape.append(dim)\n return [tensor_shape.TensorShape(returned_shape)]\n else:\n raise ValueError(\n \"dimension (%d) must be in the range [0, %d), where %d is the number \"\n \"of dimensions in the input\"\n % (dimension, input_shape.ndims, input_shape.ndims))\n\n\[email protected](\"All\")\[email protected](\"Any\")\[email protected](\"Max\")\[email protected](\"Mean\")\[email protected](\"Min\")\[email protected](\"Prod\")\[email protected](\"Sum\")\ndef _ReductionShape(op):\n \"\"\"Common shape function for reduction ops.\"\"\"\n input_shape = op.inputs[0].get_shape()\n reduction_indices = tensor_util.ConstantValue(op.inputs[1])\n keep_dims = op.get_attr(\"keep_dims\")\n if reduction_indices is None or input_shape.ndims is None:\n if keep_dims:\n return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]\n else:\n return [tensor_shape.unknown_shape()]\n\n # Turn reduction_indices from scalar to vector if necessary\n reduction_indices = np.ravel(reduction_indices)\n\n for reduction_index in reduction_indices:\n if reduction_index < 0 or reduction_index >= input_shape.ndims:\n raise ValueError(\"Invalid reduction dimension %d for input with %d \"\n \"dimensions\" % (reduction_index, input_shape.ndims))\n\n returned_dims = []\n if keep_dims:\n for i, dim in enumerate(input_shape.dims):\n if i in reduction_indices:\n returned_dims.append(1)\n else:\n returned_dims.append(dim)\n else:\n for i, dim in enumerate(input_shape.dims):\n if i not in reduction_indices:\n returned_dims.append(dim)\n return [tensor_shape.TensorShape(returned_dims)]\n\n\[email protected](\"SegmentMax\")\[email protected](\"SegmentMean\")\[email protected](\"SegmentMin\")\[email protected](\"SegmentProd\")\[email protected](\"SegmentSum\")\ndef _SegmentReductionShape(op):\n \"\"\"Common shape function for segment reduction ops.\"\"\"\n data_shape = op.inputs[0].get_shape()\n segment_ids_shape = op.inputs[1].get_shape()\n segment_ids_shape.assert_has_rank(1)\n return [tensor_shape.TensorShape([None]).concatenate(data_shape[1:])]\n\n\[email protected](\"SparseSegmentMean\")\[email protected](\"SparseSegmentSum\")\ndef _SparseSegmentReductionShape(op):\n \"\"\"Common shape function for sparse segment reduction ops.\"\"\"\n data_shape = op.inputs[0].get_shape()\n indices_shape = op.inputs[1].get_shape()\n indices_shape.assert_has_rank(1)\n segment_ids_shape = op.inputs[2].get_shape()\n segment_ids_shape.assert_has_rank(1)\n indices_shape.assert_is_compatible_with(segment_ids_shape)\n return [tensor_shape.TensorShape([None]).concatenate(data_shape[1:])]\n\n\[email protected](\"SparseSegmentMeanGrad\")\ndef _SparseSegmentMeanGradShape(op):\n \"\"\"Shape function for the SparseSegmentMeanGrad op.\"\"\"\n input_shape = op.inputs[0].get_shape()\n indices_shape = op.inputs[1].get_shape().with_rank(1)\n unused_segment_ids_shape = op.inputs[2].get_shape().merge_with(indices_shape)\n unused_output_dim0_shape = op.inputs[3].get_shape().merge_with(\n tensor_shape.scalar())\n output_dim0 = tensor_util.ConstantValue(op.inputs[3])\n if output_dim0 is not None:\n dim0 = output_dim0[0]\n else:\n dim0 = None\n return [tensor_shape.TensorShape([dim0]).concatenate(input_shape[1:])]\n\n\[email protected](\"UnsortedSegmentSum\")\ndef _UnsortedSegmentSumShape(op):\n \"\"\"Shape function for UnsortedSegmentSum.\"\"\"\n data_shape = op.inputs[0].get_shape()\n segment_ids_shape = op.inputs[1].get_shape()\n mid = segment_ids_shape.ndims\n if mid is None:\n return [tensor_shape.unknown_shape()]\n else:\n num_segments = tensor_util.ConstantValue(op.inputs[2])\n return [tensor_shape.TensorShape([num_segments]).concatenate(\n data_shape[mid:])]\n\n\[email protected](\"LinSpace\")\ndef _LinspaceShape(op):\n num = tensor_util.ConstantValue(op.inputs[2])\n return [tensor_shape.vector(num)]\n",
"\"\"\"Functional tests for ops used with embeddings.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\n\nimport tensorflow.python.platform\n\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\n\nfrom tensorflow.python.kernel_tests import gradient_checker as gc\n\n\ndef _AsLong(array):\n \"\"\"Casts arrays elements to long type. Used to convert from numpy tf.\"\"\"\n return [int(x) for x in array]\n\n\nclass ScatterAddSubTest(tf.test.TestCase):\n\n def _TestCase(self, shape, indices, scatter_op=tf.scatter_add):\n \"\"\"Run a random test case with the given shape and indices.\n\n Args:\n shape: Shape of the parameters array.\n indices: One-dimensional array of ints, the indices of the last dimension\n of the parameters to update.\n scatter_op: ScatterAdd or ScatterSub.\n \"\"\"\n super(ScatterAddSubTest, self).setUp()\n with self.test_session(use_gpu=False):\n # Create a random parameter array of given shape\n p_init = np.random.rand(*shape).astype(\"f\")\n # Create the shape of the update array. All dimensions except the last\n # match the parameter array, the last dimension equals the # of indices.\n vals_shape = [len(indices)] + shape[1:]\n vals_init = np.random.rand(*vals_shape).astype(\"f\")\n v_i = [float(x) for x in vals_init.ravel()]\n p = tf.Variable(p_init)\n vals = tf.constant(v_i, shape=vals_shape, name=\"vals\")\n ind = tf.constant(indices, dtype=tf.int32)\n p2 = scatter_op(p, ind, vals, name=\"updated_p\")\n # p = init\n tf.initialize_all_variables().run()\n # p += vals\n result = p2.eval()\n # Compute the expected 'p' using numpy operations.\n for i, ind in enumerate(indices):\n if scatter_op == tf.scatter_add:\n p_init.reshape(shape[0], -1)[ind, :] += (\n vals_init.reshape(vals_shape[0], -1)[i, :])\n else:\n p_init.reshape(shape[0], -1)[ind, :] -= (\n vals_init.reshape(vals_shape[0], -1)[i, :])\n self.assertTrue(all((p_init == result).ravel()))\n\n def testNoRepetitions(self):\n self._TestCase([2, 2], [1])\n self._TestCase([4, 4, 4], [2, 0])\n self._TestCase([43, 20, 10, 10], [42, 5, 6, 1, 3, 5, 7, 9])\n\n def testWithRepetitions(self):\n self._TestCase([2, 2], [1, 1])\n self._TestCase([5, 3, 9, 5], [2, 0, 4, 1, 3, 1, 4, 0, 4, 3])\n self._TestCase([32, 4, 4], [31] * 8)\n\n def testRandom(self):\n # Random shapes of rank 4, random indices\n for _ in range(5):\n shape = np.random.randint(1, 20, size=4)\n indices = np.random.randint(shape[0], size=2 * shape[0])\n self._TestCase(_AsLong(list(shape)), list(indices))\n\n def testSubRandom(self):\n # Random shapes of rank 4, random indices\n for _ in range(5):\n shape = np.random.randint(1, 20, size=4)\n indices = np.random.randint(shape[0], size=2 * shape[0])\n self._TestCase(_AsLong(list(shape)), list(indices),\n tf.scatter_sub)\n\n def testWrongShape(self):\n # Indices and values mismatch.\n var = tf.Variable(tf.zeros(shape=[1024, 64, 64], dtype=tf.float32))\n indices = tf.placeholder(tf.int32, shape=[32])\n values = tf.placeholder(tf.float32, shape=[33, 64, 64])\n with self.assertRaises(ValueError):\n tf.scatter_add(var, indices, values)\n\n # Var and values mismatch.\n values = tf.placeholder(tf.float32, shape=[32, 64, 63])\n with self.assertRaises(ValueError):\n tf.scatter_add(var, indices, values)\n\n\ndef _PName(param_id):\n return \"p\" + str(param_id)\n\n\ndef _EmbeddingParams(num_shards, vocab_size,\n dtype=tf.float32,\n shape=None):\n p = []\n params = {}\n feed_dict = {}\n if not shape: shape = [10]\n assert not vocab_size % num_shards\n shape = [vocab_size // num_shards] + shape\n for i in range(num_shards):\n param_name = _PName(i)\n constant_t = tf.constant(1.0, shape=shape, dtype=dtype,\n name=param_name)\n p.append(constant_t)\n np_type = \"f\" if dtype == tf.float32 else \"d\"\n val = (np.random.rand(*shape).astype(np_type)) + 1\n params[param_name + \":0\"] = val\n feed_dict[constant_t.name] = val\n return p, params, feed_dict\n\n\ndef _EmbeddingResult(params, id_vals, num_shards, weight_vals=None):\n if weight_vals is None:\n weight_vals = np.copy(id_vals)\n weight_vals.fill(1)\n values = []\n weights = []\n for ids, wts in zip(id_vals, weight_vals):\n val_aggr = None\n wt_aggr = None\n if isinstance(ids, int):\n ids = [ids]\n wts = [wts]\n for i, wt_val in zip(ids, wts):\n val = np.copy(params[_PName(i % num_shards) + \":0\"][\n i // num_shards, :]) * wt_val\n if val_aggr is None:\n assert wt_aggr is None\n val_aggr = val\n wt_aggr = wt_val\n else:\n assert wt_aggr is not None\n val_aggr += val\n wt_aggr += wt_val\n values.append(val_aggr)\n weights.append(wt_aggr)\n values = np.array(values).astype(np.float32)\n weights = np.array(weights).astype(np.float32)\n return values, weights\n\n\nclass EmbeddingLookupTest(tf.test.TestCase):\n\n # This test looks up [0, 0] in a parameter matrix sharded 2 ways. Since\n # both the ids are in the first shard, one of the resulting lookup\n # vector is going to be empty. The subsequent DivOp fails because of that.\n # TODO(keveman): Disabling the test until the underlying problem is fixed.\n def testSimpleSharded(self):\n with self.test_session():\n num_shards = 2\n vocab_size = 4\n p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)\n\n id_vals = np.array([0, 0])\n ids = tf.constant(list(id_vals), dtype=tf.int32)\n print(\"Construct ids\", ids.get_shape())\n embedding = tf.nn.embedding_lookup(p, ids)\n\n tf_result = embedding.eval(feed_dict=feed_dict)\n np_result, _ = _EmbeddingResult(params, id_vals, num_shards)\n self.assertAllEqual(np_result, tf_result)\n self.assertShapeEqual(np_result, embedding)\n\n def testSharded(self):\n with self.test_session():\n num_shards = 5\n vocab_size = 25\n # Embedding dimensions is 10. The 10 x vocab_size embedding\n # parameters are spread in num_shards matrices, so each\n # matrix is 10 x (vocab_size / num_shards)\n p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)\n\n num_vals = 30\n # Fetch num_vals embeddings for random word ids. Since\n # num_vals > vocab_size, this ought to have repetitions, so\n # will test that aspect.\n id_vals = np.random.randint(vocab_size, size=num_vals)\n ids = tf.constant(list(id_vals), dtype=tf.int32)\n\n embedding = tf.nn.embedding_lookup(p, ids)\n tf_result = embedding.eval(feed_dict=feed_dict)\n np_result, _ = _EmbeddingResult(params, id_vals, num_shards)\n self.assertAllEqual(np_result, tf_result)\n self.assertShapeEqual(np_result, embedding)\n\n def testGradientsEmbeddingLookup(self):\n vocab_size = 9\n num_ids = 5\n id_vals = list(np.random.randint(vocab_size, size=num_ids))\n tf.logging.vlog(1, id_vals)\n for num_shards in [1, 3]:\n with self.test_session():\n ids = tf.constant(id_vals, dtype=tf.int32)\n x, params, _ = _EmbeddingParams(\n num_shards, vocab_size, shape=[2])\n y = tf.nn.embedding_lookup(x, ids)\n y_shape = [num_ids] + list(params[_PName(0) + \":0\"].shape[1:])\n x_name = [_PName(i) for i in range(num_shards)]\n x_init_value = [params[x_n + \":0\"] for x_n in x_name]\n x_shape = [i.shape for i in x_init_value]\n err = gc.ComputeGradientError(x, x_shape, y, y_shape,\n x_init_value=x_init_value)\n self.assertLess(err, 1e-4)\n\n def testGradientsEmbeddingLookupWithComputedParams(self):\n vocab_size = 9\n num_ids = 5\n id_vals = list(np.random.randint(vocab_size, size=num_ids))\n tf.logging.vlog(1, id_vals)\n for num_shards in [1, 3]:\n with self.test_session():\n ids = tf.constant(id_vals, dtype=tf.int32)\n x, params, _ = _EmbeddingParams(\n num_shards, vocab_size, shape=[2])\n # This will force a conversion from IndexedSlices to Tensor.\n x_squared = [tf.square(elem) for elem in x]\n y = tf.nn.embedding_lookup(x_squared, ids)\n y_shape = [num_ids] + list(params[_PName(0) + \":0\"].shape[1:])\n x_name = [_PName(i) for i in range(num_shards)]\n x_init_value = [params[x_n + \":0\"] for x_n in x_name]\n x_shape = [i.shape for i in x_init_value]\n err = gc.ComputeGradientError(x, x_shape, y, y_shape,\n x_init_value=x_init_value)\n self.assertLess(err, 1e-3)\n\n def testConstructionNonSharded(self):\n with tf.Graph().as_default():\n p = tf.Variable(tf.zeros(shape=[100, 100], dtype=tf.float32))\n ids = tf.constant([0, 1, 1, 7], dtype=tf.int32)\n tf.nn.embedding_lookup([p], ids)\n\n def testConstructionSharded(self):\n with tf.Graph().as_default():\n p = []\n for _ in range(2):\n p += [tf.Variable(tf.zeros(shape=[100, 100], dtype=tf.float32))]\n ids = tf.constant([0, 1, 1, 17], dtype=tf.int32)\n tf.nn.embedding_lookup(p, ids)\n\n def testHigherRank(self):\n np.random.seed(8)\n with self.test_session():\n for params_shape in (12,), (6, 3):\n params = np.random.randn(*params_shape)\n for ids_shape in (3, 2), (4, 3):\n ids = np.random.randint(params.shape[0],\n size=np.prod(ids_shape)).reshape(ids_shape)\n # Compare nonsharded to gather\n simple = tf.nn.embedding_lookup(params, ids).eval()\n self.assertAllEqual(simple, tf.gather(params, ids).eval())\n # Run a few random sharded versions\n for procs in 1, 2, 3:\n stride = procs * tf.range(0, params.shape[0] // procs)\n split_params = [tf.gather(params, stride + p)\n for p in xrange(procs)]\n sharded = tf.nn.embedding_lookup(split_params, ids).eval()\n self.assertAllEqual(simple, sharded)\n\n\nclass EmbeddingLookupSparseTest(tf.test.TestCase):\n\n def _RandomIdsAndWeights(self, batch_size, vocab_size):\n max_val_per_entry = 6\n vals_per_batch_entry = np.random.randint(\n 1, max_val_per_entry, size=batch_size)\n num_vals = np.sum(vals_per_batch_entry)\n\n ids = np.random.randint(vocab_size, size=num_vals)\n weights = 1 + np.random.rand(num_vals)\n\n indices = []\n for batch_entry, num_val in enumerate(vals_per_batch_entry):\n for val_index in range(num_val):\n indices.append([batch_entry, val_index])\n\n shape = [batch_size, max_val_per_entry]\n\n sp_ids = tf.SparseTensor(\n tf.constant(indices, tf.int64),\n tf.constant(ids, tf.int32),\n tf.constant(shape, tf.int64))\n sp_weights = tf.SparseTensor(\n tf.constant(indices, tf.int64),\n tf.constant(weights, tf.float32),\n tf.constant(shape, tf.int64))\n\n return sp_ids, sp_weights, ids, weights, vals_per_batch_entry\n\n def _GroupByBatchEntry(self, vals, vals_per_batch_entry):\n grouped_vals = []\n index = 0\n for num_val in vals_per_batch_entry:\n grouped_vals.append(list(vals[index: (index + num_val)]))\n index += num_val\n return grouped_vals\n\n def testEmbeddingLookupSparse(self):\n vocab_size = 25\n batch_size = 10\n param_shape = [2, 5]\n\n sp_ids, sp_weights, ids, weights, vals_per_batch_entry = (\n self._RandomIdsAndWeights(batch_size, vocab_size))\n\n grouped_ids = self._GroupByBatchEntry(ids, vals_per_batch_entry)\n grouped_weights = self._GroupByBatchEntry(weights, vals_per_batch_entry)\n grouped_ignored_weights = self._GroupByBatchEntry(\n np.ones(np.sum(vals_per_batch_entry)), vals_per_batch_entry)\n\n for num_shards, combiner, dtype, ignore_weights in itertools.product(\n [1, 5],\n [\"sum\", \"mean\"],\n [tf.float32, tf.float64],\n [True, False]):\n\n with self.test_session():\n p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size,\n shape=param_shape,\n dtype=dtype)\n embedding_sum = tf.nn.embedding_lookup_sparse(\n p, sp_ids, None if ignore_weights else sp_weights,\n combiner=combiner)\n tf_embedding_sum = embedding_sum.eval(feed_dict=feed_dict)\n\n np_embedding_sum, np_weight_sum = _EmbeddingResult(\n params, grouped_ids, num_shards,\n weight_vals=grouped_ignored_weights\n if ignore_weights else grouped_weights)\n if combiner == \"mean\":\n np_embedding_sum /= np.reshape(np_weight_sum, (batch_size, 1, 1))\n self.assertAllClose(np_embedding_sum, tf_embedding_sum)\n\n def testGradientsEmbeddingLookupSparse(self):\n vocab_size = 12\n batch_size = 4\n param_shape = [2, 3]\n sp_ids, sp_weights, _, _, _ = (\n self._RandomIdsAndWeights(batch_size, vocab_size))\n\n for num_shards, combiner, dtype, ignore_weights in itertools.product(\n [1, 3],\n [\"sum\", \"mean\"],\n [tf.float32, tf.float64],\n [True, False]):\n with self.test_session():\n x, params, _ = _EmbeddingParams(num_shards, vocab_size,\n shape=param_shape,\n dtype=dtype)\n\n y = tf.nn.embedding_lookup_sparse(\n x, sp_ids, None if ignore_weights else sp_weights,\n combiner=combiner)\n x_name = [_PName(i) for i in range(num_shards)]\n x_init_value = [params[x_n + \":0\"] for x_n in x_name]\n x_shape = [i.shape for i in x_init_value]\n y_shape = [batch_size] + list(params[_PName(0) + \":0\"].shape[1:])\n err = gc.ComputeGradientError(x, x_shape, y, y_shape,\n x_init_value=x_init_value)\n self.assertLess(err, 1e-5 if dtype == tf.float64 else 2e-3)\n\n\nclass DynamicStitchOpTest(tf.test.TestCase):\n\n def testCint32Cpu(self):\n with self.test_session(use_gpu=False):\n indices = [tf.convert_to_tensor([0, 1, 2]), tf.convert_to_tensor([2, 3])]\n values = [tf.convert_to_tensor([12, 23, 34]), tf.convert_to_tensor([1, 2])]\n self.assertAllEqual(\n tf.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])\n\n def testCint32Gpu(self):\n with self.test_session(use_gpu=True):\n indices = [tf.convert_to_tensor([0, 1, 2]), tf.convert_to_tensor([2, 3])]\n values = [tf.convert_to_tensor([12, 23, 34]), tf.convert_to_tensor([1, 2])]\n self.assertAllEqual(\n tf.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])\n\n def testInt32Cpu(self):\n with self.test_session(use_gpu=False):\n indices = [tf.convert_to_tensor([0, 1, 2]), tf.convert_to_tensor([2, 3])]\n values = [tf.convert_to_tensor([12, 23, 34]), tf.convert_to_tensor([1, 2])]\n self.assertAllEqual(\n tf.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])\n\n def testInt32Gpu(self):\n with self.test_session(use_gpu=True):\n indices = [tf.convert_to_tensor([0, 1, 2]), tf.convert_to_tensor([2, 3])]\n values = [tf.convert_to_tensor([12, 23, 34]), tf.convert_to_tensor([1, 2])]\n self.assertAllEqual(\n tf.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])\n\n def testSumGradArgs(self):\n with self.test_session(use_gpu=False):\n indices = [tf.convert_to_tensor([0, 1, 2, 3]),\n tf.convert_to_tensor([2, 3])]\n values = [tf.convert_to_tensor([2, 3, 5, 7]), tf.convert_to_tensor([1, 1])]\n self.assertAllEqual(\n tf.dynamic_stitch(indices, values).eval(), [2, 3, 1, 1])\n\n # We expect that the values are merged in order.\n def testStitchOrder(self):\n with self.test_session():\n indices = []\n np_values = []\n values = []\n for _ in range(10):\n indices.extend([tf.convert_to_tensor(np.arange(100).astype(np.int32))])\n np_values.extend([np.random.uniform(size=100)])\n values.extend([tf.convert_to_tensor(np_values[-1])])\n stitched = tf.dynamic_stitch(indices, values).eval()\n self.assertAllEqual(np_values[-1], stitched)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] | [
[
"tensorflow.python.framework.tensor_shape.scalar",
"tensorflow.python.ops.gen_math_ops._range",
"tensorflow.python.ops.gen_math_ops._abs",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.framework.ops.op_scope",
"tensorflow.python.ops.state_ops.assign_add",
"tensorflow.python.ops.gen_math_ops._sigmoid",
"tensorflow.python.ops.gen_math_ops._complex",
"tensorflow.python.framework.ops.convert_n_to_tensor_or_indexed_slices",
"tensorflow.python.ops.gen_math_ops._tanh",
"tensorflow.python.ops.array_ops.rank",
"tensorflow.python.ops.gen_state_ops._temporary_variable",
"tensorflow.python.ops.gen_state_ops._destroy_temporary_variable",
"numpy.ravel",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.framework.ops.SparseTensor",
"tensorflow.python.ops.gen_math_ops._mat_mul",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.python.framework.ops.RegisterShape",
"tensorflow.python.framework.ops.Tensor._override_operator",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.gen_math_ops.complex_abs",
"tensorflow.python.framework.tensor_util.ConstantValue",
"tensorflow.python.framework.tensor_shape.unknown_shape",
"tensorflow.python.ops.gen_math_ops._pow",
"tensorflow.python.ops.gen_math_ops.cast",
"tensorflow.python.framework.tensor_shape.Dimension",
"tensorflow.python.framework.tensor_shape.vector",
"tensorflow.python.framework.tensor_shape.as_shape"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.zeros",
"tensorflow.scatter_add",
"numpy.random.randn",
"numpy.random.randint",
"tensorflow.Graph",
"tensorflow.Variable",
"numpy.reshape",
"numpy.arange",
"tensorflow.test.main",
"tensorflow.dynamic_stitch",
"numpy.copy",
"tensorflow.initialize_all_variables",
"tensorflow.gather",
"tensorflow.square",
"tensorflow.python.kernel_tests.gradient_checker.ComputeGradientError",
"tensorflow.placeholder",
"numpy.random.rand",
"tensorflow.nn.embedding_lookup_sparse",
"numpy.array",
"tensorflow.nn.embedding_lookup",
"numpy.sum",
"tensorflow.constant",
"numpy.random.seed",
"tensorflow.range",
"numpy.prod",
"tensorflow.logging.vlog",
"numpy.random.uniform"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.