repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
XiaoZheng-YY/EEG-DL
[ "cafe35070b811045018d009aedf0ae164c52054b" ]
[ "Models/Network/BiRNN_with_Attention.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Import useful packages\nimport tensorflow as tf\n\n\ndef attention(inputs, attention_size, time_major=False, return_alphas=False):\n \"\"\"\n\n Attention mechanism layer which reduces RNN/Bi-RNN outputs with Attention vector.\n The idea was proposed in the article by Z. Yang et al., \"Hierarchical Attention Networks\n for Document Classification\", 2016: http://www.aclweb.org/anthology/N16-1174.\n Variables notation is also inherited from the article\n\n Args:\n inputs: The Attention inputs.\n Matches outputs of RNN/Bi-RNN layer (not final state):\n In case of RNN, this must be RNN outputs `Tensor`:\n If time_major == False (default), this must be a tensor of shape:\n `[batch_size, max_time, cell.output_size]`.\n If time_major == True, this must be a tensor of shape:\n `[max_time, batch_size, cell.output_size]`.\n In case of Bidirectional RNN, this must be a tuple (outputs_fw, outputs_bw) containing the forward and\n the backward RNN outputs `Tensor`.\n If time_major == False (default),\n outputs_fw is a `Tensor` shaped:\n `[batch_size, max_time, cell_fw.output_size]`\n and outputs_bw is a `Tensor` shaped:\n `[batch_size, max_time, cell_bw.output_size]`.\n If time_major == True,\n outputs_fw is a `Tensor` shaped:\n `[max_time, batch_size, cell_fw.output_size]`\n and outputs_bw is a `Tensor` shaped:\n `[max_time, batch_size, cell_bw.output_size]`.\n attention_size: Linear size of the Attention weights.\n time_major: The shape format of the `inputs` Tensors.\n If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.\n If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.\n Using `time_major = True` is a bit more efficient because it avoids\n transposes at the beginning and end of the RNN calculation. However,\n most TensorFlow data is batch-major, so by default this function\n accepts input and emits output in batch-major form.\n return_alphas: Whether to return attention coefficients variable along with layer's output.\n Used for visualization purpose.\n\n Returns:\n The Attention output `Tensor`.\n In case of RNN, this will be a `Tensor` shaped:\n `[batch_size, cell.output_size]`.\n In case of Bidirectional RNN, this will be a `Tensor` shaped:\n `[batch_size, cell_fw.output_size + cell_bw.output_size]`.\n\n \"\"\"\n\n if isinstance(inputs, tuple):\n # In case of Bi-RNN, concatenate the forward and the backward RNN outputs.\n inputs = tf.concat(inputs, 2)\n\n if time_major:\n # (T,B,D) => (B,T,D)\n inputs = tf.array_ops.transpose(inputs, [1, 0, 2])\n\n hidden_size = inputs.shape[2].value # D value - hidden size of the RNN layer\n\n # Trainable parameters\n w_omega = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1))\n b_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1))\n u_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1))\n\n with tf.name_scope('v'):\n # Applying fully connected layer with non-linear activation to each of the B*T timestamps;\n # the shape of `v` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size\n v = tf.tanh(tf.tensordot(inputs, w_omega, axes=1) + b_omega)\n\n # For each of the timestamps its vector of size A from `v` is reduced with `u` vector\n vu = tf.tensordot(v, u_omega, axes=1, name='vu') # (B,T) shape\n alphas = tf.nn.softmax(vu, name='alphas') # (B,T) shape\n\n # Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape\n output = tf.reduce_sum(inputs * tf.expand_dims(alphas, -1), 1)\n\n if not return_alphas:\n return output\n else:\n return output, alphas\n\n\ndef BiRNN_with_Attention(Input, max_time, n_input, rnn_size, attention_size, keep_prob, weights_1, biases_1, weights_2, biases_2):\n '''\n\n Args:\n Input: The reshaped input EEG signals\n max_time: The unfolded time slice of BiRNN Model\n n_input: The input signal size at one time\n rnn_size: The number of RNN units inside the BiRNN Model\n keep_prob: The Keep probability of Dropout\n weights_1: The Weights of first fully-connected layer\n biases_1: The biases of first fully-connected layer\n weights_2: The Weights of second fully-connected layer\n biases_2: The biases of second fully-connected layer\n\n Returns:\n FC_2: Final prediction of BiRNN Model\n FC_1: Extracted features from the first fully connected layer\n alphas: Attention Weights - Studied Attention Weights\n\n '''\n\n # Input EEG signals\n Input = tf.reshape(Input, [-1, max_time, n_input])\n\n # Forward and Backward RNN models (BiRNN Models)\n rnn_fw_cell = tf.contrib.rnn.BasicRNNCell(num_units=rnn_size)\n rnn_bw_cell = tf.contrib.rnn.BasicRNNCell(num_units=rnn_size)\n\n # Dropout for the BiRNN Model\n rnn_fw_drop = tf.contrib.rnn.DropoutWrapper(cell=rnn_fw_cell, input_keep_prob=keep_prob)\n rnn_bw_drop = tf.contrib.rnn.DropoutWrapper(cell=rnn_bw_cell, input_keep_prob=keep_prob)\n\n # One layer Attention-based BiRNN Model\n outputs, _ = tf.compat.v1.nn.bidirectional_dynamic_rnn(rnn_fw_drop, rnn_bw_drop, Input, dtype=tf.float32)\n\n # Attention Mechanism\n attention_output, alphas = attention(inputs=outputs, attention_size=attention_size, return_alphas=True)\n attention_output_drop = tf.nn.dropout(attention_output, keep_prob)\n\n # First fully-connected layer\n FC_1 = tf.matmul(attention_output_drop, weights_1) + biases_1\n FC_1 = tf.layers.batch_normalization(FC_1, training=True)\n FC_1 = tf.nn.softplus(FC_1)\n FC_1 = tf.nn.dropout(FC_1, keep_prob)\n\n # Second fully-connected layer\n FC_2 = tf.matmul(FC_1, weights_2) + biases_2\n FC_2 = tf.nn.softmax(FC_2)\n\n return FC_2, FC_1, alphas\n" ]
[ [ "tensorflow.matmul", "tensorflow.nn.softmax", "tensorflow.layers.batch_normalization", "tensorflow.concat", "tensorflow.contrib.rnn.DropoutWrapper", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.contrib.rnn.BasicRNNCell", "tensorflow.random_normal", "tensorflow.name_scope", "tensorflow.tensordot", "tensorflow.array_ops.transpose", "tensorflow.compat.v1.nn.bidirectional_dynamic_rnn", "tensorflow.nn.softplus", "tensorflow.nn.dropout" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
HarrieO/RankingComplexLayouts
[ "53e8fdca3b2d4efffc2506423997e257f01ba094" ]
[ "mdprank/main.py" ]
[ "import os, sys\nsys.path.insert(1, os.path.join(sys.path[0], '..'))\nimport argparse\nimport letorinput as letorin\nimport losses\nimport models\nimport numpy as np\nimport rewards\nimport tensorflow as tf\nimport gru.gru as gru\nimport mdprank as mdprank\n\nfrom tensorflow.contrib.training import wait_for_new_checkpoint\n\nparser = argparse.ArgumentParser(description='Baseline Run')\nparser.add_argument('--model_dir', type=str, default=None,\n help='Directory to store/load model.')\nparser.add_argument('--summary_dir', type=str, default=None,\n help='Directory to store/load summaries.')\nparser.add_argument('--input_dir', type=str, required=True,\n help='Directory where input is found '\n '(features.txt, [train, vali, test].tfrecord).')\nparser.add_argument('--steps', type=int, default=10000,\n help='')\nparser.add_argument('--eval', action='store_true',\n help='')\nparser.add_argument('--eval_steps', type=int, default=250,\n help='')\nparser.add_argument('--dataset', type=str, required=True,\n help='')\nparser.add_argument('--partition', type=str, default='train',\n help='')\n# parser.add_argument('--serp_len', type=int, default=10,\n# help='')\nparser.add_argument('--discount', type=str, default='ndcg',\n help='')\nparser.add_argument('--learning_rate', type=float, default=0.01)\nparser.add_argument('--epsilon_decay', type=float, default=0.9999)\nparser.add_argument('--steps_per_transfer', type=int, default=1000)\nparser.add_argument('--min_replay', type=int, default=1000)\nparser.add_argument('--update', type=str, default='dqn')\nparser.add_argument('--doc_rewards', action='store_true',\n help='')\n\n\nargs = parser.parse_args()\n\nparams = {\n 'hidden_units': [],\n 'hidden_state_size': 256,\n 'model': 'mdpdiv',\n 'model_name': 'mdpdiv',\n 'update': args.update,\n # 'serp_len': args.serp_len,\n 'serp_len': 10,\n 'evaluation': args.eval,\n 'partition': args.partition,\n 'steps': args.steps,\n 'eval_steps': args.eval_steps,\n 'doc_emb': [128],\n 'steps_per_transfer': args.steps_per_transfer,\n 'visible_dropout': 1.,\n 'hidden_dropout': 1.,\n 'l2_scale': 0.,\n 'learning_rate': args.learning_rate,\n 'epsilon_decay': args.epsilon_decay,\n 'discount': args.discount,\n 'context_input': False,\n 'all_discounts': {\n 'ndcg': list(range(10)),\n 'hill': [8,6,4,2,0,1,3,5,7,9],\n 'reverse': [9,8,7,6,5,4,3,2,1,0],\n },\n 'compact_gru': True,\n 'min_replay': args.min_replay,\n 'read_batch': 64,\n 'replay_batch': 1,\n 'doc_rewards': args.doc_rewards,\n}\n\n# 'ndcg': [1./(np.log2(i+2)) for i in range(10)],\n# 'wave': [1./(np.log2(i+2)) for i in [4,3,2,1,0,5,6,7,8,9]],\n# 'hill': [1./(np.log2(i+2)) for i in [8,5,3,1,0,2,4,6,7,9]],\n# 'even': [1./(np.log2(i+2)) for i in [0,5,1,6,2,7,3,8,4,9]],\n# 'uneven': [1./(np.log2(i+2)) for i in [5,0,6,1,7,2,8,3,9,4]],\n# 'reverse': [1./(np.log2(i+2)) for i in [9,8,7,6,5,4,3,2,1,0]],\n# 'skip': [1./(np.log2(i+2)) for i in [7,8,9,0,1,2,3,4,5,6]],\n# 'cup': [1./(np.log2(i+2)) for i in [0,2,4,6,8,9,7,5,3,1]],\n\nif args.dataset == 'NP2003':\n params['train_size'] = 90\n params['vali_size'] = 30\n params['test_size'] = 30\n params['max_docs'] = 1000\nelif args.dataset == 'MSLR30':\n params['train_size'] = 18919\n params['vali_size'] = 6306\n params['test_size'] = 6306\n params['max_docs'] = 1251\nelif args.dataset == 'WEBSCOPE':\n params['train_size'] = 19944\n params['vali_size'] = 2994\n params['test_size'] = 6983\n params['max_docs'] = 139\nelif args.dataset == 'istella':\n params['train_size'] = 23219\n params['vali_size'] = 9799 # vali set is actually empty use train\n params['test_size'] = 9799\n params['max_docs'] = 439\n\nif params['evaluation']:\n params['train_size'] = params['vali_size']\n\nfeat = letorin.get_features(params, args.input_dir)\n\nn_read_threads = 3\nn_policy_threads = 5\nif args.eval:\n params['visible_dropout'] = 1.\n params['hidden_dropout'] = 1.\n params['read_batch'] = 50\n n_policy_threads = 1\n\nif args.dataset == 'istella' and params['partition'] == 'vali':\n params['partition'] = 'train'\n examples, labels = letorin.get_letor_examples(params, args.input_dir,\n feat, num_threads=n_read_threads)\n params['partition'] = 'vali'\nelse:\n examples, labels = letorin.get_letor_examples(params, args.input_dir,\n feat, num_threads=n_read_threads)\n\nglobal_step = tf.Variable(0, trainable=False, name='global_step')\n\neval_update_ops = []\n\nepisode = mdprank.model(params, examples, labels)\n\n\n# Cutoff discounts\ndiscounts = rewards.get_DCG_discounts(params, episode['docs_per_query'],\n labels)\n\n# Ideal rewards\nideal_rewards = rewards.calculate_ideal_rewards(params, labels, discounts)\nfor name, v in ideal_rewards.items():\n episode['ideal_rewards/%s' % name] = v\n episode['discounts/%s' % name] = discounts[name]\n\nif params['evaluation']:\n # rewards = rewards.calculate_NDCG_reward(params,\n # to_enqueue, stats_ops=eval_update_ops)\n rewards, _ = rewards.calculate_custom_discount_reward(params,\n episode, stats_ops=eval_update_ops)\n mean_reward, mean_update = tf.metrics.mean(tf.reduce_mean(rewards))\n eval_update = tf.group(mean_update, *eval_update_ops)\nelse:\n\n # rewards = rewards.calculate_NDCG_reward(params, replay)\n rewards, doc_rewards = rewards.calculate_custom_discount_reward(params, episode)\n\n if params['doc_rewards']:\n loss = -(episode['probs']*tf.cumsum(doc_rewards, axis=1, reverse=True))\n else:\n # rewards = tf.Print(rewards, [tf.reduce_sum(episode['probs'], axis=1)], 'probs: ')\n loss = -(episode['probs']*rewards)\n loss = tf.reduce_mean(loss)\n\n\n\n opt = tf.train.AdamOptimizer(learning_rate=params['learning_rate'])\n # opt = tf.train.AdagradOptimizer(learning_rate=params['learning_rate'])\n # opt = tf.train.GradientDescentOptimizer(learning_rate=params['learning_rate'])\n gvs = opt.compute_gradients(loss)\n # for grad, var in gvs:\n # loss = tf.Print(loss, [grad], 'grad %s: ' % var)\n\n # opt_op = opt.minimize(loss, global_step=global_step)\n\n capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs]\n opt_op = opt.apply_gradients(capped_gvs, global_step=global_step)\n\n\nmerged_summary = tf.summary.merge_all()\n\ninit = tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer())\n\nsum_path = args.summary_dir + '/' + params['partition']\nif params['partition'] == 'train' and params['evaluation']:\n sum_path = args.summary_dir + '/overfit'\nwriter = tf.summary.FileWriter(sum_path)\nsaver = tf.train.Saver()\n\nif not params['evaluation']:\n\n # Train supervisor to handle starting sessions.\n sv = tf.train.Supervisor(logdir=args.model_dir,\n summary_writer=writer,\n save_summaries_secs=120,\n global_step=global_step,\n # manual saving since learning is fast\n save_model_secs=0)\n\n # At this point the model will be instantiated and actually ran.\n with sv.managed_session() as sess:\n\n # Continue from a previous saved checkpoint, if it exists.\n checkpoint = tf.train.latest_checkpoint(args.model_dir)\n if checkpoint:\n print('Loading checkpoint', checkpoint)\n saver.restore(sess, checkpoint)\n else:\n print('No existing checkpoint found.')\n sess.run(init)\n\n # Check the current state of the network.\n i = sess.run([global_step])[0]\n if i == 0:\n saver.save(sess, args.model_dir + '/model.ckpt',\n global_step=i)\n print('Running %d steps.' % (params['steps'] - i))\n while i < params['steps']:\n i, l_i = sess.run([global_step, loss, opt_op])[:2]\n # print(\"%d %f\" % (i, l_i))\n # Evaluation will be performed on saved checkpoints\n # only. Since learning goes very fast, we save often.\n if i % params['eval_steps'] == 0 or i == params['steps']:\n saver.save(sess, args.model_dir + '/model.ckpt', global_step=i)\nelse:\n print('Evaluating on %s' % params['partition'])\n # For each checkpoint the entire dataset is evaluated.\n steps_per_eval = params['%s_size' % params['partition']]\n checkpoint = None\n # Basic session since we will only manually save summaries.\n with tf.Session() as sess:\n coord = tf.train.Coordinator()\n # Queue runners will take care of reading data in seperate threads.\n threads = tf.train.start_queue_runners(coord=coord)\n while True:\n checkpoint = wait_for_new_checkpoint(args.model_dir,\n checkpoint,\n seconds_to_sleep=1,\n timeout=1200)\n if checkpoint is None:\n print('No checkpoint found for 20 min, exiting evaluation.')\n break\n # Init for variables that are not part of checkpoint,\n # in this case the ones used for metrics.\n sess.run(init)\n # Restore a checkpoint saved by the training run.\n saver.restore(sess, checkpoint)\n # Update the metrics for every element in the dataset.\n batch_steps = int(np.ceil(steps_per_eval/float(params['read_batch'])))\n for i in range(batch_steps):\n sess.run([eval_update])\n # Get the resulting metrics.\n cur_step, cur_reward, cur_summary = sess.run([global_step, mean_reward, merged_summary])\n # Pass the summary to the writer, which stores it for Tensorboard.\n writer.add_summary(cur_summary, global_step=cur_step)\n writer.flush()\n\n print('Step %d: %.02f' % (cur_step, cur_reward))\n if cur_step == params['steps']:\n break\n\n coord.request_stop()\n coord.join(threads)\n\n\n# init = tf.group(tf.global_variables_initializer(),\n# tf.local_variables_initializer())\n# with tf.Session() as sess:\n# coord = tf.train.Coordinator()\n# # Queue runners will take care of reading data in seperate threads.\n# threads = tf.train.start_queue_runners(coord=coord)\n \n# sess.run(init)\n \n\n# coord.request_stop()\n# coord.join(threads)\n# exit()\n" ]
[ [ "tensorflow.clip_by_value", "tensorflow.summary.FileWriter", "tensorflow.local_variables_initializer", "tensorflow.Variable", "tensorflow.reduce_mean", "tensorflow.train.latest_checkpoint", "tensorflow.train.start_queue_runners", "tensorflow.train.Coordinator", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "tensorflow.train.Supervisor", "tensorflow.train.AdamOptimizer", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.contrib.training.wait_for_new_checkpoint", "tensorflow.group", "tensorflow.cumsum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
jiangwenj02/MLC
[ "3cecdc669a9027f230d29e1565d16bab958809a2" ]
[ "CIFAR/data_helper_cifar.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nimport torch.nn.functional as F\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport numpy as np\nfrom PIL import Image\nfrom utils import DataIterator\n\ndef prepare_data(gold_fraction, corruption_prob, corruption_type, args):\n if args.use_mwnet_loader:\n return prepare_data_mwnet(gold_fraction, corruption_prob, corruption_type, args)\n else:\n return prepare_data_mlc(gold_fraction, corruption_prob, corruption_type, args)\n\ndef prepare_data_mwnet(gold_fraction, corruption_prob, corruption_type, args):\n from load_corrupted_data_mlg import CIFAR10, CIFAR100 \n normalize = transforms.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],\n std=[x / 255.0 for x in [63.0, 62.1, 66.7]])\n if True: # no augment as used by mwnet\n train_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Lambda(lambda x: F.pad(x.unsqueeze(0),\n (4, 4, 4, 4), mode='reflect').squeeze()),\n transforms.ToPILImage(),\n transforms.RandomCrop(32),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ])\n else:\n train_transform = transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])\n test_transform = transforms.Compose([\n transforms.ToTensor(),\n normalize\n ])\n\n args.num_meta = int(50000 * gold_fraction)\n\n if args.dataset == 'cifar10':\n num_classes = 10\n \n train_data_meta = CIFAR10(\n root=args.data_path, train=True, meta=True, num_meta=args.num_meta, corruption_prob=corruption_prob,\n corruption_type=args.corruption_type, transform=train_transform, download=True)\n train_data = CIFAR10(\n root=args.data_path, train=True, meta=False, num_meta=args.num_meta, corruption_prob=corruption_prob,\n corruption_type=args.corruption_type, transform=train_transform, download=True, seed=args.seed)\n test_data = CIFAR10(root=args.data_path, train=False, transform=test_transform, download=True)\n\n valid_data = CIFAR10(\n root=args.data_path, train=True, meta=True, num_meta=args.num_meta, corruption_prob=corruption_prob,\n corruption_type=args.corruption_type, transform=train_transform, download=True)\n\n elif args.dataset == 'cifar100':\n num_classes = 100\n \n train_data_meta = CIFAR100(\n root=args.data_path, train=True, meta=True, num_meta=args.num_meta, corruption_prob=corruption_prob,\n corruption_type=args.corruption_type, transform=train_transform, download=True)\n train_data = CIFAR100(\n root=args.data_path, train=True, meta=False, num_meta=args.num_meta, corruption_prob=corruption_prob,\n corruption_type=args.corruption_type, transform=train_transform, download=True, seed=args.seed)\n test_data = CIFAR100(root=args.data_path, train=False, transform=test_transform, download=True)\n\n valid_data = CIFAR100(\n root=args.data_path, train=True, meta=True, num_meta=args.num_meta, corruption_prob=corruption_prob,\n corruption_type=args.corruption_type, transform=train_transform, download=True) \n\n train_gold_loader = DataIterator(torch.utils.data.DataLoader(train_data_meta, batch_size=args.bs, shuffle=True,\n num_workers=args.prefetch, pin_memory=True))\n train_silver_loader = torch.utils.data.DataLoader(train_data, batch_size=args.bs, shuffle=True,\n num_workers=args.prefetch, pin_memory=True)\n valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=args.bs, shuffle=True,\n num_workers=args.prefetch, pin_memory=True)\n test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.bs, shuffle=False,\n num_workers=args.prefetch, pin_memory=True)\n\n return train_gold_loader, train_silver_loader, valid_loader, test_loader, num_classes\n\ndef prepare_data_mlc(gold_fraction, corruption_prob, corruption_type, args):\n from load_corrupted_data import CIFAR10, CIFAR100\n \n mean = [x / 255 for x in [125.3, 123.0, 113.9]]\n std = [x / 255 for x in [63.0, 62.1, 66.7]]\n\n train_transform = transforms.Compose(\n [transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4), transforms.ToTensor(),\n transforms.Normalize(mean, std)])\n test_transform = transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize(mean, std)])\n\n # since cifar10 and cifar100 have no official validation split, use gold as valid also\n if args.dataset == 'cifar10':\n train_data_gold = CIFAR10(\n args.data_path, True, True, gold_fraction, corruption_prob, args.corruption_type,\n transform=train_transform, download=True, distinguish_gold=False, seed=args.seed)\n train_data_silver = CIFAR10(\n args.data_path, True, False, gold_fraction, corruption_prob, args.corruption_type,\n transform=train_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices, seed=args.seed, distinguish_gold=False, weaklabel=args.weaklabel) # note here for the change\n train_data_gold_deterministic = CIFAR10(\n args.data_path, True, True, gold_fraction, corruption_prob, args.corruption_type,\n transform=test_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices, distinguish_gold=False, seed=args.seed)\n test_data = CIFAR10(args.data_path, train=False, transform=test_transform, download=True, distinguish_gold=False, seed=args.seed)\n\n # same as gold\n valid_data = CIFAR10(\n args.data_path, True, True, gold_fraction, corruption_prob, args.corruption_type,\n transform=train_transform, download=True, distinguish_gold=False, seed=args.seed)\n\n num_classes = 10\n\n elif args.dataset == 'cifar100':\n train_data_gold = CIFAR100(\n args.data_path, True, True, gold_fraction, corruption_prob, args.corruption_type,\n transform=train_transform, download=True, distinguish_gold=False, seed=args.seed)\n train_data_silver = CIFAR100(\n args.data_path, True, False, gold_fraction, corruption_prob, args.corruption_type,\n transform=train_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices, seed=args.seed, distinguish_gold=False,\n weaklabel=args.weaklabel) # note the weaklabel arg\n train_data_gold_deterministic = CIFAR100(\n args.data_path, True, True, gold_fraction, corruption_prob, args.corruption_type,\n transform=test_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices, distinguish_gold=False, seed=args.seed)\n test_data = CIFAR100(args.data_path, train=False, transform=test_transform, download=True, distinguish_gold=False, seed=args.seed)\n\n # same as gold\n valid_data = CIFAR100(\n args.data_path, True, True, gold_fraction, corruption_prob, args.corruption_type,\n transform=train_transform, download=True, distinguish_gold=False, seed=args.seed)\n \n num_classes = 100\n\n\n gold_sampler = None\n silver_sampler = None\n valid_sampler = None\n test_sampler = None\n batch_size = args.bs\n \n train_gold_loader = DataIterator(torch.utils.data.DataLoader(\n train_data_gold, batch_size=batch_size, shuffle=(gold_sampler is None),\n num_workers=args.prefetch, pin_memory=True, sampler=gold_sampler))\n train_silver_loader =torch.utils.data.DataLoader(\n train_data_silver, batch_size=batch_size, shuffle=(silver_sampler is None),\n num_workers=args.prefetch, pin_memory=True, sampler=silver_sampler)\n valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, shuffle=(valid_sampler is None), num_workers=args.prefetch, pin_memory=True, sampler=valid_sampler)\n test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=(test_sampler is None), num_workers=args.prefetch, pin_memory=True, sampler=test_sampler)\n\n return train_gold_loader, train_silver_loader, valid_loader, test_loader, num_classes\n" ]
[ [ "torch.utils.data.DataLoader" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
themilkyreygalaxy/astr-119-session_4
[ "0933cfd60149b748a66797732c4476d4ee5dac42" ]
[ "demo_numpy.py" ]
[ "import numpy as np\n\nx = 1.0\t\t\t#define a float\ny = 2.0\t\t\t#define another float\n\n#trigonometry\nprint(np.sin(x))\t\t#sin(x)\nprint(np.cos(x))\t\t#cos(x)\nprint(np.tan(x))\t\t#tan(x)\nprint(np.arcsin(x))\t\t#arcsin(x)\nprint(np.arccos(x))\t\t#arccos(x)\nprint(np.arctan(x))\t\t#arctan(x)\nprint(np.arctan2(x,y))\t#arctan(x/y)\nprint(np.rad2deg(x)) \t#convert rad to deg\n\n#hyperbolic functions\nprint(np.sinh(x))\t\t#sinh(x)\nprint(np.cosh(x))\t\t#cosh(x)\nprint(np.tanh(x))\t\t#tanh(x)\nprint(np.arcsinh(x))\t#arcsinh(x)\nprint(np.arccosh(x))\t#arccosh(x)\nprint(np.arctanh(x))\t#arctanh(x)" ]
[ [ "numpy.arctanh", "numpy.cosh", "numpy.arctan", "numpy.arcsin", "numpy.arccosh", "numpy.arccos", "numpy.cos", "numpy.sin", "numpy.tan", "numpy.arctan2", "numpy.rad2deg", "numpy.sinh", "numpy.tanh", "numpy.arcsinh" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mihailupu/classifier
[ "8f3e9017d4650c2927beebd6855426d00532cf8a" ]
[ "eval.py" ]
[ "#! /usr/bin/env python\n\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport data_helpers\nfrom tensorflow.contrib import learn\nimport csv\n\n# Parameters\n# ==================================================\n\n# Data Parameters\ntf.flags.DEFINE_string(\"positive_data_file\", \"/home/mlupu/wipoAbstracts2Test.txt\", \"Data source for the wipo data.\")\ntf.flags.DEFINE_string(\"negative_data_file\", \"./data/rt-polaritydata/rt-polarity.neg\", \"Data source for the positive data.\")\n\n# Eval Parameters\ntf.flags.DEFINE_integer(\"batch_size\", 64, \"Batch Size (default: 64)\")\ntf.flags.DEFINE_string(\"checkpoint_dir\", \".\", \"Checkpoint directory from training run\")\ntf.flags.DEFINE_boolean(\"eval_train\", True, \"Evaluate on all training data\")\n\n# Misc Parameters\ntf.flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\ntf.flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\ntf.flags.DEFINE_string('gpudev','1','Select a GPU Device.')\n\n\nFLAGS = tf.flags.FLAGS\nFLAGS._parse_flags()\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = FLAGS.gpudev\nprint(\"\\nParameters:\")\nfor attr, value in sorted(FLAGS.__flags.items()):\n print(\"{}={}\".format(attr.upper(), value))\nprint(\"\")\n\n# CHANGE THIS: Load data. Load your own data here\nif FLAGS.eval_train:\n # x_raw, y_test = data_helpers.load_data_and_labels(FLAGS.positive_data_file, FLAGS.negative_data_file)\n x_raw, y_test = data_helpers.load_wipo_data_and_labels(FLAGS.positive_data_file)\n # y_test = np.argmax(y_test, axis=1)\nelse:\n x_raw = [\"a masterpiece four years in the making\", \"everything is off.\"]\n y_test = [1, 0]\n\n# Map data into vocabulary\nvocab_path = os.path.join(FLAGS.checkpoint_dir, \"..\", \"vocab\")\nvocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)\nx_test = np.array(list(vocab_processor.transform(x_raw)))\n\nprint(\"\\nEvaluating...\\n\")\n\n# Evaluation\n# ==================================================\ncheckpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)\ngraph = tf.Graph()\nwith graph.as_default():\n session_conf = tf.ConfigProto(\n allow_soft_placement=FLAGS.allow_soft_placement,\n log_device_placement=FLAGS.log_device_placement)\n sess = tf.Session(config=session_conf)\n with sess.as_default():\n # Load the saved meta graph and restore variables\n saver = tf.train.import_meta_graph(\"{}.meta\".format(checkpoint_file))\n saver.restore(sess, checkpoint_file)\n\n # Get the placeholders from the graph by name\n input_x = graph.get_operation_by_name(\"input_x\").outputs[0]\n # input_y = graph.get_operation_by_name(\"input_y\").outputs[0]\n dropout_keep_prob = graph.get_operation_by_name(\"dropout_keep_prob\").outputs[0]\n\n # Tensors we want to evaluate\n predictions = graph.get_operation_by_name(\"output/predictions\").outputs[0]\n scores = graph.get_operation_by_name(\"output/scores\").outputs[0]\n\n # Generate batches for one epoch\n batches = data_helpers.batch_iter(list(x_test), FLAGS.batch_size, 1, shuffle=False)\n\n # Collect the predictions here\n all_predictions = []\n all_scores = []\n all_top3 = []\n first=True\n\n\n for x_test_batch in batches:\n batch_predictions = sess.run(predictions, {input_x: x_test_batch, dropout_keep_prob: 1.0})\n batch_scores = sess.run(scores, {input_x: x_test_batch,dropout_keep_prob: 1.0})\n batch_top3 = (-batch_scores).argsort(1)[:,:3]\n all_predictions = np.concatenate([all_predictions, batch_predictions])\n #all_scores = np.concatenate([all_scores, batch_scores])\n if first:\n all_top3 = batch_top3\n first = False\n else:\n all_top3 = np.concatenate([all_top3, batch_top3])\n\n# Print accuracy if y_test is defined\nif y_test is not None:\n # this is not correct, because our y_test may have multiple classes. We should instead check if the predicted one\n # is among them.\n #correct_predictions = float(sum(all_predictions == y_test))\n correct_predictions=0\n correct_top3=0\n for i in range(len(y_test)):\n correct_predictions = correct_predictions + y_test[i][int(all_predictions[i])]\n correct_top3 = correct_top3 + np.max(y_test[i][all_top3[i]])\n print(\"Total number of test examples: {}\".format(len(y_test)))\n print(\"Accuracy: {:g}\".format(correct_predictions/float(len(y_test))))\n print(\"Accuracy top3: {:g}\".format(correct_top3/float(len(y_test))))\n\n# Save the evaluation to a csv\npredictions_human_readable = np.column_stack((np.array(x_raw), all_predictions))\nout_path = os.path.join(FLAGS.checkpoint_dir, \"..\", \"prediction.csv\")\nprint(\"Saving evaluation to {0}\".format(out_path))\nwith open(out_path, 'w',encoding=\"utf-8\") as f:\n csv.writer(f).writerows(predictions_human_readable)" ]
[ [ "tensorflow.flags.DEFINE_boolean", "tensorflow.Graph", "tensorflow.train.latest_checkpoint", "tensorflow.flags.DEFINE_string", "tensorflow.ConfigProto", "numpy.concatenate", "numpy.max", "tensorflow.Session", "numpy.array", "tensorflow.contrib.learn.preprocessing.VocabularyProcessor.restore", "tensorflow.flags.DEFINE_integer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pawel-ta/ranmath
[ "f52a15b10bdb5830a50c43da11fed5f182026587" ]
[ "Ranmath/MatrixGenerators/MultivariateGaussianGenerator.py" ]
[ "\nfrom .AbstractGenerator import AbstractGenerator\nimport numpy as np\nimport scipy.linalg as la\n\n\nclass MultivariateGaussianGenerator(AbstractGenerator):\n\n def __init__(self, C: np.ndarray, A: np.ndarray, number_of_iterations):\n super().__init__()\n self.__number_of_iterations = number_of_iterations\n self.__last_C = C\n self.__last_A = A\n\n @property\n def last_C(self):\n return self.__last_C\n\n @property\n def last_A(self):\n return self.__last_A\n\n def generate(self, verbose=False):\n\n if verbose:\n print(\"Generating using Multivariate Gaussian\")\n\n N, T = self.__last_C.shape, self.__last_A.shape\n if N[0] != N[1] or T[0] != T[1]:\n raise ValueError('C and A should be square matrices')\n N, T = N[0], T[0]\n\n C_root = la.sqrtm(self.__last_C).real\n A_root = la.sqrtm(self.__last_A).real\n\n array = []\n\n for iteration in range(self.__number_of_iterations):\n random_matrix = np.random.normal(size=(N, T))\n array.append(C_root @ random_matrix @ A_root)\n\n return np.array(array)\n" ]
[ [ "scipy.linalg.sqrtm", "numpy.random.normal", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.12", "0.14", "0.15" ], "tensorflow": [] } ]
paxtonfitzpatrick/timecorr
[ "fd6b797304c1c002a31f99f858cb7e51ed4a5de3" ]
[ "timecorr/timecorr.py" ]
[ "# coding: utf-8\n\nimport numpy as np\nfrom .helpers import isfc, gaussian_weights, format_data, null_combine, reduce, smooth\n\ndef timecorr(data, weights_function=gaussian_weights,\n weights_params=None, include_timepoints='all', exclude_timepoints=None,\n combine=null_combine, cfun=isfc, rfun=None):\n \"\"\"\n Computes dynamics correlations in single-subject or multi-subject data.\n\n Parameters\n ----------\n data: numpy array, pandas dataframe, or a list of numpy arrays/dataframes\n Each numpy array (or dataframe) should have size timepoints by features.\n If a list of arrays are passed, there should be one array per subject.\n\n weights_function: a function of the form func(T, params) where\n T is a non-negative integer specifying the number of timepoints to consider.\n\n The function should return a T by T array containing the timepoint-specific\n weights for each consecutive time point from 0 to T (not including T).\n\n Default: gaussian_weights; options: laplace_weights, gaussian_weights,\n t_weights, eye_weights, mexican_hat_weights\n\n weights_params: used to pass parameters to the weights_params function. This\n can be specified in any format (e.g. a scalar, list, object, dictionary,\n etc.).\n\n Default: None (use default parameters for the given weights function).\n Options: gaussian_params, laplace_params, t_params, eye_params,\n mexican_hat_params.\n\n include_timepoints: determines which timepoints are used to estimate the correlations\n at each timepoint. This is applied after the weights function to further constrain\n which timepoints may be considered in computing the correlations at each timepoint.\n\n Options: 'all' (default; include all timepoints), 'pre' (only include timepoints *before*\n the given timepoint), 'post' (only include timepoints *after* the given timepoint).\n\n exclude_timepoints: additional option, used to filter out any timepoints less than x units\n of the timepoint whose correlations are being estimated. For example, passing\n exclude_timepoints=3 will exclude any timepoints 3 or more samples from the given timepoint.\n When exclude timepoints is negative, it works inversely-- e.g. exclude_timepoints=-5 will\n exclude any timepoints within 5 or fewer samples of the given timepoint. Real-valued scalars\n are supported but are rounded to the nearest Integer.\n\n Default: None (no filtering).\n\n combine: a function applied to either a single matrix of vectorized correlation\n matrices, or a list of such matrices. The function should return either\n a numpy array or a list of numpy arrays.\n\n Default: helpers.null_combine (a function that returns its input). Other\n useful functions:\n\n helpers.corrmean_combine: take the element-wise average correlations across matrices\n helpers.tstat_combine: return element-wise t-statistics across matrices\n\n cfun: function to apply to the data array(s)\n This function should be of the form\n func(data, weights)\n\n The function should support data as a numpy array or list of numpy\n arrays. When a list of numpy arrays is passed, the function should\n apply the \"across subjects\" version of the analysis. When a single\n numpy array is passed, the function should apply the \"within subjects\"\n version of the analysis.\n\n weights is a numpy array with per-timepoint weights\n\n The function should return a single numpy array with 1 row and an\n arbitrary number of columns (the number of columns may be determined by\n the function).\n\n Default: A continuous verison of Inter-Subject Functional Connectivity\n (Simony et al. 2017). If only one data array is passed (rather than a\n list), the default cfun returns the moment-by-moment correlations for\n that array. (Reference: http://www.nature.com/articles/ncomms12141)\n\n rfun: function to use for dimensionality reduction.\n\n All hypertools and scikit-learn functions are supported: PCA, IncrementalPCA, SparsePCA,\n MiniBatchSparsePCA, KernelPCA, FastICA, FactorAnalysis, TruncatedSVD,\n DictionaryLearning, MiniBatchDictionaryLearning, TSNE, Isomap,\n SpectralEmbedding, LocallyLinearEmbedding, MDS, and UMAP.\n\n Can be passed as a string, but for finer control of the model\n parameters, pass as a dictionary, e.g.\n reduction={‘model’ : ‘PCA’, ‘params’ : {‘whiten’ : True}}.\n\n See scikit-learn specific model docs for details on parameters supported\n for each model.\n\n Another option is to use graph theoretic measures computed for each node.\n The following measures are supported (via the brainconn toolbox):\n eigenvector_centrality, pagerank_centrality, and strength. (Each\n of these must be specified as a string; dictionaries not supported.)\n\n Default: None (no dimensionality reduction)\n\n Returns\n ----------\n corrmats: moment-by-moment correlations\n \"\"\"\n\n def temporal_filter(T, k):\n k = np.round(k)\n filt = np.eye(T)\n for i in np.arange(1, np.min([np.abs(k) + 1, T])):\n filt = filt + np.eye(T, k=np.abs(i)) + np.eye(T, k=-np.abs(i))\n if k < 0:\n return 1 - filt\n else:\n return filt\n\n if type(data) == list:\n T = data[0].shape[0]\n return_list = True\n else:\n T = data.shape[0]\n data = [data]\n return_list = False\n\n data = format_data(data)\n\n weights = weights_function(T, weights_params)\n\n include_timepoints = include_timepoints.lower()\n if include_timepoints == 'all':\n pass\n elif include_timepoints == 'pre':\n weights = np.tril(weights)\n elif include_timepoints == 'post':\n weights = np.triu(weights)\n else:\n raise Exception(f'Invalid option for include_timepoints: \\'{include_timepoints}\\'. Must be one of: \\'all\\', \\'pre\\', or \\'post\\'.')\n\n if not (exclude_timepoints is None):\n weights = np.multiply(temporal_filter(T, exclude_timepoints), weights)\n\n if cfun:\n corrs = reduce(combine(cfun(data, weights)), rfun=rfun)\n\n if not (cfun is None):\n return_list = False\n\n else:\n corrs = combine(smooth(data, kernel_fun=weights_function, kernel_params=weights_params)).tolist()\n\n if return_list and (not (type(corrs) == list)):\n return [corrs]\n elif (not return_list) and (type(corrs) == list) and (len(corrs) == 1):\n return corrs[0]\n else:\n return corrs\n\n\n" ]
[ [ "numpy.abs", "numpy.eye", "numpy.round", "numpy.triu", "numpy.tril" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
forgi86/RNN-adaptation
[ "d32e8185c6a746060dd726a0f5080231e0c9439b" ]
[ "examples/WH/01_WH2009_train.py" ]
[ "import os\nimport torch\nimport pandas as pd\nimport numpy as np\nfrom models import WHNet3\nimport matplotlib.pyplot as plt\nimport time\nimport torchid.metrics\n\n\n# In[Main]\nif __name__ == '__main__':\n\n # In[Set seed for reproducibility]\n np.random.seed(0)\n torch.manual_seed(0)\n\n # In[Settings]\n lr_ADAM = 2e-4\n lr_BFGS = 1e0\n num_iter_ADAM = 40000 # ADAM iterations 20000\n num_iter_BFGS = 0 # final BFGS iterations\n msg_freq = 100\n n_skip = 5000\n n_fit = 20000\n decimate = 1\n n_batch = 1\n n_b = 8\n n_a = 8\n model_name = \"model_WH3\"\n\n num_iter = num_iter_ADAM + num_iter_BFGS\n\n # In[Column names in the dataset]\n COL_F = ['fs']\n COL_U = ['uBenchMark']\n COL_Y = ['yBenchMark']\n\n # In[Load dataset]\n df_X = pd.read_csv(os.path.join(\"data\", \"WH2009\", \"WienerHammerBenchmark.csv\"))\n\n # Extract data\n y = np.array(df_X[COL_Y], dtype=np.float32) # batch, time, channel\n u = np.array(df_X[COL_U], dtype=np.float32)\n fs = np.array(df_X[COL_F].iloc[0], dtype=np.float32)\n N = y.size\n ts = 1/fs\n t = np.arange(N)*ts\n\n # In[Fit data]\n y_fit = y[0:n_fit:decimate]\n u_fit = u[0:n_fit:decimate]\n t_fit = t[0:n_fit:decimate]\n\n # In[Prepare training tensors]\n u_fit_torch = torch.tensor(u_fit[None, :, :], dtype=torch.float, requires_grad=False)\n y_fit_torch = torch.tensor(y_fit[None, :, :], dtype=torch.float)\n\n # In[Prepare model]\n model = WHNet3()\n\n # In[Setup optimizer]\n optimizer_ADAM = torch.optim.Adam([\n {'params': model.parameters(), 'lr': lr_ADAM},\n ], lr=lr_ADAM)\n\n optimizer_LBFGS = torch.optim.LBFGS(list(model.parameters()), lr=lr_BFGS)\n\n\n def closure():\n optimizer_LBFGS.zero_grad()\n\n # Simulate\n y_hat = model(u_fit_torch)\n\n # Compute fit loss\n err_fit = y_fit_torch[:, n_skip:, :] - y_hat[:, n_skip:, :]\n loss = torch.mean(err_fit**2)*1000\n\n # Backward pas\n loss.backward()\n return loss\n\n\n # In[Train]\n LOSS = []\n start_time = time.time()\n for itr in range(0, num_iter):\n\n if itr < num_iter_ADAM:\n msg_freq = 10\n loss_train = optimizer_ADAM.step(closure)\n else:\n msg_freq = 10\n loss_train = optimizer_LBFGS.step(closure)\n\n LOSS.append(loss_train.item())\n if itr % msg_freq == 0:\n with torch.no_grad():\n RMSE = torch.sqrt(loss_train)\n print(f'Iter {itr} | Fit Loss {loss_train:.6f} | RMSE:{RMSE:.4f}')\n\n train_time = time.time() - start_time\n print(f\"\\nTrain time: {train_time:.2f}\")\n\n # In[Save model]\n model_folder = os.path.join(\"models\", model_name)\n if not os.path.exists(model_folder):\n os.makedirs(model_folder)\n\n torch.save(model.state_dict(), os.path.join(model_folder, \"model.pt\"))\n\n\n # In[Simulate one more time]\n with torch.no_grad():\n y_hat = model(u_fit_torch)\n\n # In[Detach]\n y_hat = y_hat.detach().numpy()[0, :, :]\n\n # In[Plot]\n plt.figure()\n plt.plot(t_fit, y_fit, 'k', label=\"$y$\")\n plt.plot(t_fit, y_hat, 'b', label=\"$\\hat y$\")\n plt.legend()\n\n # In[Plot loss]\n plt.figure()\n plt.plot(LOSS)\n plt.grid(True)\n\n # In[Plot]\n e_rms = torchid.metrics.error_rmse(y_hat, y_fit)[0]\n print(f\"RMSE: {e_rms:.2f}\") # target: 1mv\n\n\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.legend", "torch.mean", "numpy.random.seed", "torch.sqrt", "torch.manual_seed", "numpy.arange", "torch.tensor", "matplotlib.pyplot.plot", "torch.no_grad", "matplotlib.pyplot.grid", "numpy.array", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Peltarion/scaling_fl
[ "011f845a6472e9a3df338351b8970b0fc70cf242" ]
[ "scripts/spooky_author/split_data.py" ]
[ "import argparse\n\nimport pandas\nfrom sklearn.model_selection import train_test_split\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Split the spooky author dataset into two files, train and test.')\n parser.add_argument(\n '--full-path', type=str, help='Path of full dataset', required=True)\n parser.add_argument(\n '--train-path', type=str, help='Path to write training dataset', required=True)\n parser.add_argument(\n '--test-path', type=str, help='Path to write test dataset', required=True)\n args = parser.parse_args()\n\n data = pandas.read_csv(args.full_path, index_col='id')\n train_data, test_data = train_test_split(\n data, train_size=0.75, shuffle=True, random_state=1)\n\n # Store splits\n train_data.to_csv(args.train_path, index=True)\n print('Training data written to:', args.train_path)\n\n test_data.to_csv(args.test_path, index=True)\n print('Test data written to:', args.test_path)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.read_csv", "sklearn.model_selection.train_test_split" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
msobrevillac/Multilingual-RDF-Verbalizer
[ "ba396693f65eaf74d1f60eb9aed3e78ab9593b22" ]
[ "hierarchical-decoding/utils/loss_new.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\ndef linear_combination(x, y, epsilon): \n return epsilon*x + (1-epsilon)*y\n\ndef reduce_loss(loss, reduction='mean'):\n return loss.mean() if reduction=='mean' else loss.sum() if reduction=='sum' else loss\n\n# Implementation found at \n#https://medium.com/towards-artificial-intelligence/how-to-use-label-smoothing-for-regularization-aa349f7f1dbb\nclass LabelSmoothing(nn.Module):\n def __init__(self, size, padding_idx, smoothing=0.0, reduction='sum'):\n super().__init__()\n self.smoothing = smoothing\n self.reduction = reduction\n self.padding_idx = padding_idx\n \n def forward(self, x, target):\n n = x.size()[-1]\n pred = x.clone()\n for i, tgt in enumerate(target):\n if tgt.data == self.padding_idx:\n pred[i] = 0\n loss = reduce_loss(-pred.sum(dim=-1), self.reduction)\n nll = F.nll_loss(x, target, ignore_index=self.padding_idx, reduction=self.reduction)\n return linear_combination(loss/n, nll, self.smoothing)\n\n\nclass LossCompute:\n \"A loss compute and train function.\"\n def __init__(self, generator, criterion, opt=None):\n self.generator = generator\n self.criterion = criterion\n self.opt = opt\n \n def __call__(self, x, y, norm=1):\n x = self.generator(x)\n loss = self.criterion(x.contiguous().view(-1, x.size(-1)), \n y.contiguous().view(-1)) / norm\n if self.opt is not None:\n loss.backward()\n self.opt.step()\n self.opt.zero_grad()\n #self.opt.optimizer.zero_grad()\n return loss.item() * norm\n" ]
[ [ "torch.nn.functional.nll_loss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pps-lab/fl-analysis
[ "798fc0292d0611ec8900ebdb090b9e282d0df457" ]
[ "src/subspace/keras_ext/rproj_layers.py" ]
[ "# Copyright (c) 2018 Uber Technologies, Inc.\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Layer, InputSpec\nimport tensorflow.keras.backend as K\nfrom tensorflow.python.keras.utils import conv_utils\nfrom src.subspace.keras_ext.rproj_layers_util import _convert_string_dtype\n# from keras.backend.tensorflow_backend import _convert_string_dtype\nfrom tensorflow.keras import regularizers, constraints, initializers, activations\n\n###########\n#\n# Low Rank Basis Layers\n#\n# These layers are modified versions of standard Keras layers that\n# accept an OffsetCreator*Proj to create offsets from a weight basis\n# in a Dense/Sparse/Fastfood agnostic manner.\n#\n###########\n\nclass LowRankBasisLayer(Layer):\n '''Smarter version of Layer...'''\n\n def __init__(self, offset_creator_class, weight_basis, *args, **kwargs):\n super(LowRankBasisLayer, self).__init__(*args, **kwargs)\n # offset_creator is an object that creates theta offsets\n self.offset_creator = offset_creator_class()\n self.weight_basis = weight_basis\n\n # These may or may not be used by subclasses\n #self._basis_matrices = []\n #self._basis_matrix_normalizers = []\n\n # TODO check for use of basis_matrices\n\n @property\n def basis_matrices(self):\n print('USED HERE basis_matrices')\n return self._basis_matrices\n\n # TODO check for use of basis_matrix_normalizers\n @property\n def basis_matrix_normalizers(self):\n print('USED HERE basis_matrix_normalizers')\n return self._basis_matrix_normalizers\n \n def add_weight(self,\n name,\n shape,\n dtype=None,\n initializer=None,\n regularizer=None,\n trainable=True,\n constraint=None):\n '''Version of add_weight that creates a weight theta by instantiating\n theta_0 and then adding to it an offset from the member\n offset_creator.\n '''\n initializer = initializers.get(initializer)\n if dtype is None:\n dtype = K.floatx()\n\n # Create Theta_0\n value_0 = initializer(shape)\n theta_0 = tf.Variable(value_0, trainable=False, dtype=_convert_string_dtype(dtype), name='%s_theta0' % name)\n if isinstance(value_0, np.ndarray):\n theta_0._keras_shape = value_0.shape\n elif hasattr(value_0, 'get_shape'):\n theta_0._keras_shape = tuple(map(int, value_0.get_shape()))\n theta_0._uses_learning_phase = False\n\n # Call offset creator\n exec = self.offset_creator.create_theta_offset(self.weight_basis,\n theta_0.get_shape(),\n dtype=dtype,\n name=name)\n non_trainable_weights = exec.ww\n\n\n # if regularizer is not None:\n # self.add_loss(regularizer(theta))\n # if constraint is not None:\n # self.constraints[theta] = constraint\n #self._base_thetas.append(theta_0)\n #self._basis_matrices.append(ww)\n #self._non_trainable_weights.extend([theta_0, ww])\n self._non_trainable_weights.extend([theta_0] + [non_trainable_weights])\n return theta_0, exec\n\n def add_non_trainable_weight(self,\n name,\n shape,\n dtype=None,\n initializer=None,\n regularizer=None,\n constraint=None):\n '''Adds a weight variable to the layer.\n # Arguments\n name: String, the name for the weight variable.\n shape: The shape tuple of the weight.\n dtype: The dtype of the weight.\n initializer: An Initializer instance (callable).\n regularizer: An optional Regularizer instance.\n trainable: A boolean, whether the weight should\n be trained via backprop or not (assuming\n that the layer itself is also trainable).\n constraint: An optional Constraint instance.\n # Returns\n The created weight variable.\n '''\n initializer = initializers.get(initializer)\n if dtype is None:\n dtype = K.floatx()\n weight = K.variable(initializer(shape), dtype=dtype, name=name)\n if regularizer is not None:\n self.add_loss(regularizer(weight))\n if constraint is not None:\n self.constraints[weight] = constraint\n self._non_trainable_weights.append(weight)\n return weight\n\n\nclass RProjDense(LowRankBasisLayer):\n '''RProj version of Dense.'''\n\n def __init__(self, offset_creator_class, weight_basis,\n units,\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n if 'input_shape' not in kwargs and 'input_dim' in kwargs:\n kwargs['input_shape'] = (kwargs.pop('input_dim'),)\n super(RProjDense, self).__init__(offset_creator_class, weight_basis, **kwargs)\n self.units = units\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n self.input_spec = InputSpec(min_ndim=2)\n self.supports_masking = True\n\n def build(self, input_shape):\n assert len(input_shape) >= 2\n input_dim = input_shape[-1]\n\n self.kernel = self.add_weight(shape=(input_dim, self.units),\n initializer=self.kernel_initializer,\n name='kernel',\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n if self.use_bias:\n self.bias = self.add_weight(shape=(self.units,),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})\n self.built = True\n\n def call(self, inputs, **kwargs):\n\n kt0, eproj = self.kernel\n k = tf.add(kt0, eproj())\n\n bt0, eprojb = self.bias\n b = tf.add(bt0, eprojb())\n\n # Normal dense functionality\n output = K.dot(inputs, k)\n if self.use_bias:\n output = K.bias_add(output, b)\n if self.activation is not None:\n output = self.activation(output)\n return output\n\n def compute_output_shape(self, input_shape):\n assert input_shape and len(input_shape) >= 2\n assert input_shape[-1]\n output_shape = list(input_shape)\n output_shape[-1] = self.units\n return tuple(output_shape)\n\n\n\nclass _RProjConv(LowRankBasisLayer):\n '''Abstract nD convolution layer (private, used as implementation base).\n\n Only the intrinsic parameters (RProj) are Trainable.'''\n\n def __init__(self, offset_creator_class, weight_basis,\n rank,\n filters,\n kernel_size,\n strides=1,\n padding='valid',\n data_format=None,\n dilation_rate=1,\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n super(_RProjConv, self).__init__(offset_creator_class, weight_basis, **kwargs)\n self.rank = rank\n self.filters = filters\n self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size')\n self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')\n self.padding = conv_utils.normalize_padding(padding)\n self.data_format = conv_utils.normalize_data_format(data_format)\n self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank, 'dilation_rate')\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n self.input_spec = InputSpec(ndim=self.rank + 2)\n\n def build(self, input_shape):\n if self.data_format == 'channels_first':\n channel_axis = 1\n else:\n channel_axis = -1\n if input_shape[channel_axis] is None:\n raise ValueError('The channel dimension of the inputs '\n 'should be defined. Found `None`.')\n input_dim = input_shape[channel_axis]\n kernel_shape = self.kernel_size + (input_dim, self.filters)\n\n self.kernel = self.add_weight(shape=kernel_shape,\n initializer=self.kernel_initializer,\n name='kernel',\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n if self.use_bias:\n self.bias = self.add_weight(shape=(self.filters,),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n # Set input spec.\n self.input_spec = InputSpec(ndim=self.rank + 2,\n axes={channel_axis: input_dim})\n self.built = True\n\n def call(self, inputs):\n if self.rank == 1:\n outputs = K.conv1d(\n inputs,\n self.kernel,\n strides=self.strides[0],\n padding=self.padding,\n data_format=self.data_format,\n dilation_rate=self.dilation_rate[0])\n if self.rank == 2:\n outputs = K.conv2d(\n inputs,\n self.kernel,\n strides=self.strides,\n padding=self.padding,\n data_format=self.data_format,\n dilation_rate=self.dilation_rate)\n if self.rank == 3:\n outputs = K.conv3d(\n inputs,\n self.kernel,\n strides=self.strides,\n padding=self.padding,\n data_format=self.data_format,\n dilation_rate=self.dilation_rate)\n\n if self.use_bias:\n outputs = K.bias_add(\n outputs,\n self.bias,\n data_format=self.data_format)\n\n if self.activation is not None:\n return self.activation(outputs)\n return outputs\n\n def compute_output_shape(self, input_shape):\n if self.data_format == 'channels_last':\n space = input_shape[1:-1]\n new_space = []\n for i in range(len(space)):\n new_dim = conv_utils.conv_output_length(\n space[i],\n self.kernel_size[i],\n padding=self.padding,\n stride=self.strides[i],\n dilation=self.dilation_rate[i])\n new_space.append(new_dim)\n return (input_shape[0],) + tuple(new_space) + (self.filters,)\n if self.data_format == 'channels_first':\n space = input_shape[2:]\n new_space = []\n for i in range(len(space)):\n new_dim = conv_utils.conv_output_length(\n space[i],\n self.kernel_size[i],\n padding=self.padding,\n stride=self.strides[i],\n dilation=self.dilation_rate[i])\n new_space.append(new_dim)\n return (input_shape[0], self.filters) + tuple(new_space)\n\n\nclass RProjConv2D(_RProjConv):\n '''Low Rank Basis Conv2D\n Filters if number of filters, output dimension is filters\n TODO: Documentation / unit tests\n '''\n\n def __init__(self, offset_creator_class, weight_basis,\n filters,\n kernel_size,\n strides=(1, 1),\n padding='valid',\n data_format=None,\n dilation_rate=(1, 1),\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n super(RProjConv2D, self).__init__(\n offset_creator_class=offset_creator_class,\n weight_basis=weight_basis,\n rank=2,\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilation_rate=dilation_rate,\n activation=activation,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n bias_initializer=bias_initializer,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n activity_regularizer=activity_regularizer,\n kernel_constraint=kernel_constraint,\n bias_constraint=bias_constraint,\n **kwargs)\n self.input_spec = InputSpec(ndim=4)\n\n def build(self, input_shape):\n assert self.data_format != 'channels_first','only b01c supported'\n channel_axis = -1\n if input_shape[channel_axis] is None:\n raise ValueError('The channel dimension of the inputs '\n 'should be defined. Found `None`.')\n\n input_dim = input_shape[-1]\n\n self.units = self.filters\n\n kernel_shape = self.kernel_size + (input_dim, self.filters)\n self.kernel = self.add_weight(shape=kernel_shape,\n initializer=self.kernel_initializer,\n name='kernel',\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n\n if self.use_bias:\n self.bias = self.add_weight(shape=(self.filters,),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n # Set input spec.\n self.input_spec = InputSpec(ndim=self.rank + 2,\n axes={channel_axis: input_dim})\n self.built = True\n\n def call(self, inputs):\n assert self.rank == 2, 'only conv2d supported for now...'\n\n kt0, eproj = self.kernel\n k = tf.add(kt0, eproj())\n\n bt0, eprojb = self.bias\n b = tf.add(bt0, eprojb())\n\n if self.rank == 2:\n outputs = K.conv2d(\n inputs,\n k,\n strides=self.strides,\n padding=self.padding,\n data_format=self.data_format,\n dilation_rate=self.dilation_rate)\n\n if self.use_bias:\n outputs = K.bias_add(\n outputs,\n b,\n data_format=self.data_format)\n\n\n #if self.activation is not None:\n # assert False,'activation functions not supported'\n # return self.activation(outputs)\n return outputs\n\n def compute_output_shape(self, input_shape):\n if self.data_format == 'channels_last':\n space = input_shape[1:-1]\n new_space = []\n for i in range(len(space)):\n new_dim = conv_utils.conv_output_length(\n space[i],\n self.kernel_size[i],\n padding=self.padding,\n stride=self.strides[i],\n dilation=self.dilation_rate[i])\n new_space.append(new_dim)\n #self.filters*2 to accomodate LU representation\n return (input_shape[0],) + tuple(new_space) + (self.filters,)\n\n\nclass RProjBatchNormalization(LowRankBasisLayer):\n '''RProj version of BatchNormalization.'''\n\n def __init__(self, offset_creator_class, weight_basis,\n axis=-1,\n momentum=0.99,\n epsilon=1e-3,\n center=True,\n scale=True,\n beta_initializer='zeros',\n gamma_initializer='ones',\n moving_mean_initializer='zeros',\n moving_variance_initializer='ones',\n beta_regularizer=None,\n gamma_regularizer=None,\n beta_constraint=None,\n gamma_constraint=None,\n **kwargs):\n super(RProjBatchNormalization, self).__init__(offset_creator_class, weight_basis, **kwargs)\n self.supports_masking = True\n self.axis = axis\n self.momentum = momentum\n self.epsilon = epsilon\n self.center = center\n self.scale = scale\n self.beta_initializer = initializers.get(beta_initializer)\n self.gamma_initializer = initializers.get(gamma_initializer)\n self.moving_mean_initializer = initializers.get(moving_mean_initializer)\n self.moving_variance_initializer = initializers.get(moving_variance_initializer)\n self.beta_regularizer = regularizers.get(beta_regularizer)\n self.gamma_regularizer = regularizers.get(gamma_regularizer)\n self.beta_constraint = constraints.get(beta_constraint)\n self.gamma_constraint = constraints.get(gamma_constraint)\n\n def build(self, input_shape):\n dim = input_shape[self.axis]\n if dim is None:\n raise ValueError('Axis ' + str(self.axis) + ' of '\n 'input tensor should have a defined dimension '\n 'but the layer received an input with shape ' +\n str(input_shape) + '.')\n self.input_spec = InputSpec(ndim=len(input_shape),\n axes={self.axis: dim})\n shape = (dim,)\n\n if self.scale:\n self.gamma = self.add_weight(shape=shape,\n name='gamma',\n initializer=self.gamma_initializer,\n regularizer=self.gamma_regularizer,\n constraint=self.gamma_constraint)\n else:\n self.gamma = None\n if self.center:\n self.beta = self.add_weight(shape=shape,\n name='beta',\n initializer=self.beta_initializer,\n regularizer=self.beta_regularizer,\n constraint=self.beta_constraint)\n else:\n self.beta = None\n self.moving_mean = self.add_non_trainable_weight(\n shape=shape,\n name='moving_mean',\n initializer=self.moving_mean_initializer)\n self.moving_variance = self.add_non_trainable_weight(\n shape=shape,\n name='moving_variance',\n initializer=self.moving_variance_initializer)\n self.built = True\n\n def call(self, inputs, training=None):\n input_shape = K.int_shape(inputs)\n # Prepare broadcasting shape.\n ndim = len(input_shape)\n reduction_axes = list(range(len(input_shape)))\n del reduction_axes[self.axis]\n broadcast_shape = [1] * len(input_shape)\n broadcast_shape[self.axis] = input_shape[self.axis]\n\n # Determines whether broadcasting is needed.\n needs_broadcasting = (sorted(reduction_axes) != list(range(ndim))[:-1])\n\n def normalize_inference():\n if needs_broadcasting:\n # In this case we must explicitly broadcast all parameters.\n broadcast_moving_mean = K.reshape(self.moving_mean,\n broadcast_shape)\n broadcast_moving_variance = K.reshape(self.moving_variance,\n broadcast_shape)\n if self.center:\n broadcast_beta = K.reshape(self.beta, broadcast_shape)\n else:\n broadcast_beta = None\n if self.scale:\n broadcast_gamma = K.reshape(self.gamma,\n broadcast_shape)\n else:\n broadcast_gamma = None\n return K.batch_normalization(\n inputs,\n broadcast_moving_mean,\n broadcast_moving_variance,\n broadcast_beta,\n broadcast_gamma,\n epsilon=self.epsilon)\n else:\n return K.batch_normalization(\n inputs,\n self.moving_mean,\n self.moving_variance,\n self.beta,\n self.gamma,\n epsilon=self.epsilon)\n\n # If the learning phase is *static* and set to inference:\n if training in {0, False}:\n return normalize_inference()\n\n # If the learning is either dynamic, or set to training:\n normed_training, mean, variance = K.normalize_batch_in_training(\n inputs, self.gamma, self.beta, reduction_axes,\n epsilon=self.epsilon)\n\n self.add_update([K.moving_average_update(self.moving_mean,\n mean,\n self.momentum),\n K.moving_average_update(self.moving_variance,\n variance,\n self.momentum)],\n inputs)\n\n # Pick the normalized form corresponding to the training phase.\n return K.in_train_phase(normed_training,\n normalize_inference,\n training=training)\n\n\n\nclass RProjLocallyConnected2D(LowRankBasisLayer):\n \"\"\"Locally-connected layer for 2D inputs.\n The `LocallyConnected2D` layer works similarly\n to the `Conv2D` layer, except that weights are unshared,\n that is, a different set of filters is applied at each\n different patch of the input.\n \"\"\"\n\n # @interfaces.legacy_conv2d_support\n def __init__(self, offset_creator_class, weight_basis,\n filters,\n kernel_size,\n strides=(1, 1),\n padding='valid',\n data_format=None,\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n super(RProjLocallyConnected2D, self).__init__(offset_creator_class, weight_basis, **kwargs)\n self.filters = filters\n self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')\n self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')\n self.padding = conv_utils.normalize_padding(padding)\n if self.padding != 'valid':\n raise ValueError('Invalid border mode for LocallyConnected2D '\n '(only \"valid\" is supported): ' + padding)\n self.data_format = conv_utils.normalize_data_format(data_format)\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n self.input_spec = InputSpec(ndim=4)\n\n def build(self, input_shape):\n if self.data_format == 'channels_last':\n input_row, input_col = input_shape[1:-1]\n input_filter = input_shape[3]\n else:\n input_row, input_col = input_shape[2:]\n input_filter = input_shape[1]\n if input_row is None or input_col is None:\n raise ValueError('The spatial dimensions of the inputs to '\n ' a LocallyConnected2D layer '\n 'should be fully-defined, but layer received '\n 'the inputs shape ' + str(input_shape))\n output_row = conv_utils.conv_output_length(input_row, self.kernel_size[0],\n self.padding, self.strides[0])\n output_col = conv_utils.conv_output_length(input_col, self.kernel_size[1],\n self.padding, self.strides[1])\n self.output_row = output_row\n self.output_col = output_col\n self.kernel_shape = (output_row * output_col,\n self.kernel_size[0] * self.kernel_size[1] * input_filter,\n self.filters)\n self.kernel = self.add_weight(shape=self.kernel_shape,\n initializer=self.kernel_initializer,\n name='kernel',\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n if self.use_bias:\n self.bias = self.add_weight(shape=(output_row, output_col, self.filters),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n if self.data_format == 'channels_first':\n self.input_spec = InputSpec(ndim=4, axes={1: input_filter})\n else:\n self.input_spec = InputSpec(ndim=4, axes={-1: input_filter})\n self.built = True\n\n def compute_output_shape(self, input_shape):\n if self.data_format == 'channels_first':\n rows = input_shape[2]\n cols = input_shape[3]\n elif self.data_format == 'channels_last':\n rows = input_shape[1]\n cols = input_shape[2]\n\n rows = conv_utils.conv_output_length(rows, self.kernel_size[0],\n self.padding, self.strides[0])\n cols = conv_utils.conv_output_length(cols, self.kernel_size[1],\n self.padding, self.strides[1])\n\n if self.data_format == 'channels_first':\n return (input_shape[0], self.filters, rows, cols)\n elif self.data_format == 'channels_last':\n return (input_shape[0], rows, cols, self.filters)\n\n def call(self, inputs):\n _, _, filters = self.kernel_shape\n\n output = K.local_conv2d(inputs,\n self.kernel,\n self.kernel_size,\n self.strides,\n (self.output_row, self.output_col),\n self.data_format)\n\n if self.use_bias:\n if self.data_format == 'channels_first' or self.data_format == 'channels_last':\n output = K.bias_add(output, self.bias, data_format=self.data_format)\n\n output = self.activation(output)\n return output\n\n def get_config(self):\n config = {\n 'filters': self.filters,\n 'kernel_size': self.kernel_size,\n 'strides': self.strides,\n 'padding': self.padding,\n 'data_format': self.data_format,\n 'activation': activations.serialize(self.activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer': regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint)\n }\n base_config = super(LocallyConnected2D, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n" ]
[ [ "tensorflow.keras.backend.floatx", "tensorflow.keras.constraints.serialize", "tensorflow.keras.regularizers.serialize", "tensorflow.keras.backend.moving_average_update", "tensorflow.keras.backend.batch_normalization", "tensorflow.python.keras.utils.conv_utils.conv_output_length", "tensorflow.keras.backend.normalize_batch_in_training", "tensorflow.keras.backend.local_conv2d", "tensorflow.keras.backend.in_train_phase", "tensorflow.keras.backend.int_shape", "tensorflow.keras.backend.reshape", "tensorflow.keras.layers.InputSpec", "tensorflow.keras.initializers.get", "tensorflow.keras.backend.bias_add", "tensorflow.keras.backend.conv3d", "tensorflow.keras.backend.dot", "tensorflow.keras.initializers.serialize", "tensorflow.python.keras.utils.conv_utils.normalize_tuple", "tensorflow.keras.backend.conv2d", "tensorflow.keras.constraints.get", "tensorflow.keras.activations.serialize", "tensorflow.keras.regularizers.get", "tensorflow.keras.backend.conv1d", "tensorflow.python.keras.utils.conv_utils.normalize_data_format", "tensorflow.keras.activations.get", "tensorflow.python.keras.utils.conv_utils.normalize_padding" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
esztermarton/tf-quant-finance
[ "18afe2e56e657b4eaca72bd67ee1428891ebea46" ]
[ "tf_quant_finance/black_scholes/__init__.py" ]
[ "# Lint as: python3\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"TensorFlow Quantitative Finance volatility surfaces and vanilla options.\"\"\"\n\nfrom tf_quant_finance.black_scholes import crr_binomial_tree\nfrom tf_quant_finance.black_scholes import vanilla_prices\nfrom tf_quant_finance.black_scholes import approximations\nfrom tf_quant_finance.black_scholes.implied_vol_approximation import implied_vol as implied_vol_approx\nfrom tf_quant_finance.black_scholes.implied_vol_lib import implied_vol\nfrom tf_quant_finance.black_scholes.implied_vol_lib import ImpliedVolMethod\nfrom tf_quant_finance.black_scholes.implied_vol_newton_root import implied_vol as implied_vol_newton\n\nfrom tensorflow.python.util.all_util import remove_undocumented # pylint: disable=g-direct-tensorflow-import\n\nbinary_price = vanilla_prices.binary_price\noption_price = vanilla_prices.option_price\noption_price_binomial = crr_binomial_tree.option_price_binomial\n\n_allowed_symbols = [\n 'approximations',\n 'binary_price',\n 'implied_vol',\n 'implied_vol_approx',\n 'implied_vol_newton',\n 'option_price',\n 'option_price_binomial',\n 'ImpliedVolMethod',\n]\n\nremove_undocumented(__name__, _allowed_symbols)\n" ]
[ [ "tensorflow.python.util.all_util.remove_undocumented" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] } ]
psli01/DISTRE
[ "ba5115c1c2bea96ecccd91be781a6fcdaa3df6fb" ]
[ "experiments/utils/pr_curve_and_predictions.py" ]
[ "from typing import Optional\n\nimport sys\nsys.path.append('./')\n\nimport logging\nimport json\nimport pickle\nimport pathlib\nimport random\nfrom os.path import join\nfrom itertools import groupby\nfrom collections import namedtuple\nfrom operator import attrgetter\n\nimport fire\nimport numpy as np\nimport tre\nfrom tqdm import tqdm\nfrom sklearn.metrics import auc\nfrom allennlp.predictors import Predictor\nfrom allennlp.models.archival import load_archive\n \n\nFORMATTER = logging.Formatter(\"%(asctime)s — %(name)s — %(levelname)s — %(message)s\")\nlogger = logging.getLogger(__name__)\nhandler = logging.StreamHandler()\nhandler.setFormatter(FORMATTER)\nlogger.addHandler(handler)\nlogger.setLevel(logging.INFO)\n\n\nPrediction = namedtuple('Prediction', ['score', 'is_correct', 'bag_id', 'bag_labels', 'predicted_label'])\n \n\ndef instance_id(mention):\n head = mention['head']['word']\n tail = mention['tail']['word']\n return (head.lower(), tail.lower())\n\n\ndef load_nyt_test_bags(file):\n test_bags = {}\n \n with open(file, 'r') as f:\n nyt_dataset = json.load(f)\n \n for instance, mentions in groupby(sorted(nyt_dataset, key=lambda mention: instance_id(mention)), key=lambda mention: instance_id(mention)):\n head, tail = instance\n mention_instances = []\n \n for mention in mentions:\n mention_instances.append(dict(sentence=mention['sentence'], head=head, tail=tail, relation=mention['relation']))\n\n test_bags[instance] = mention_instances\n return test_bags\n\n\ndef compute_pr_curve_and_predictions(model_dir: str, test_file: str, archive_filename: str='model.tar.gz',\n eval_mode: Optional[str]=None, weights_file: Optional[str]=None,\n output_dir: Optional[str]=None, cuda_device: int=0,\n predictor_name='tre-classifier', max_instances: Optional[int]=None):\n if eval_mode is not None and eval_mode not in ['one', 'two', 'all']:\n raise ValueError(f\"Eval mode '{eval_mode}' not supported.\")\n\n logger.info(f\"Loading test file: '{test_file}'\")\n test_bags = load_nyt_test_bags(test_file)\n logger.info(f\"Test file: '{test_file}' contains {len(test_bags)} bags.\")\n\n archive_path = join(model_dir, archive_filename)\n logger.info(f\"Loading model archive: '{archive_path}'\")\n if weights_file is not None:\n weights_file = join(model_dir, weights_file)\n logger.info(f\"Loading weights file: '{weights_file}'\")\n predictor = Predictor.from_archive(load_archive(archive_path,\n cuda_device=cuda_device,\n weights_file=weights_file),\n predictor_name=predictor_name)\n\n id2label = predictor._model.vocab.get_index_to_token_vocabulary(namespace='labels')\n relation_at_index = list(id2label.values())\n \n num_relation_facts = 0\n \n n_pos = 0\n\n logger.info(f\"Using eval mode '{eval_mode}'.\")\n \n predictions = []\n for instance, bag_mentions in tqdm(list(test_bags.items())[:max_instances]):\n if eval_mode is not None:\n if len(bag_mentions) < 2:\n continue\n\n random.shuffle(bag_mentions)\n\n if eval_mode == 'one':\n bag_mentions = bag_mentions[:1]\n elif eval_mode == 'two':\n bag_mentions = bag_mentions[:2]\n\n bag_labels = set([mention['relation'] for mention in bag_mentions])\n bag_labels.discard('NA')\n \n result = predictor.predict_batch_json(bag_mentions)\n \n assert len(result['logits']) == 1\n \n if bag_labels:\n n_pos += 1\n \n num_relation_facts += len(bag_labels)\n\n # For each bag and positive relation create a prediction\n logits = result['logits'][0]\n for idx, logit in enumerate(logits):\n if relation_at_index[idx] == 'NA':\n continue\n\n is_correct = relation_at_index[idx] in bag_labels\n predictions.append(Prediction(score=logit,\n is_correct=is_correct,\n bag_id=instance,\n predicted_label=id2label[idx],\n bag_labels=bag_labels))\n\n print(num_relation_facts)\n \n predictions = sorted(predictions, key=attrgetter('score'), reverse=True)\n \n correct = 0\n precision_values = []\n recall_values = []\n for idx, prediction in enumerate(predictions):\n if prediction.is_correct:\n correct += 1\n precision_values.append(correct / (idx+1))\n recall_values.append(correct / num_relation_facts)\n\n def precision_at(n):\n return (sum([prediction.is_correct for prediction in predictions[:n]]) / n) * 100\n\n pr_metrics = {\n 'P/R AUC': auc(x=recall_values, y=precision_values),\n 'Precision@100': precision_at(100),\n 'Precision@200': precision_at(200),\n 'Precision@300': precision_at(300),\n 'Mean': np.mean([precision_at(i) for i in [100, 200, 300]])\n }\n\n logger.info(f'PR Metrics: {pr_metrics}')\n\n output_dir = output_dir or model_dir\n pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)\n\n if eval_mode is None:\n with open(join(output_dir, 'pr_metrics.json'), 'w') as pr_metrics_f:\n json.dump(pr_metrics, pr_metrics_f)\n\n with open(join(output_dir, 'predictions.pkl'), 'wb') as predictions_f:\n pickle.dump(predictions, predictions_f)\n\n np.save(join(output_dir, 'precision.npy'), precision_values)\n np.save(join(output_dir, 'recall.npy'), recall_values)\n else:\n with open(join(output_dir, f'pr_metrics_{eval_mode}.json'), 'w') as pr_metrics_f:\n json.dump(pr_metrics, pr_metrics_f) \n \n\nif __name__ == \"__main__\":\n fire.Fire(compute_pr_curve_and_predictions)\n" ]
[ [ "sklearn.metrics.auc" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
team1236/Tensorflow-2.0-Computer-Vision-Cookbook
[ "92ea6713f664cff9eccaaccea8ac756f808e2066" ]
[ "ch2/recipe7/data_aug_keras.py" ]
[ "import os\nimport pathlib\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow_docs as tfdocs\nimport tensorflow_docs.plots\nfrom glob import glob\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelBinarizer\nfrom tensorflow.keras.layers import *\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.preprocessing.image import *\n\n\ndef load_images_and_labels(image_paths, target_size=(64, 64)):\n images = []\n labels = []\n\n for image_path in image_paths:\n image = load_img(image_path, target_size=target_size)\n image = img_to_array(image)\n\n label = image_path.split(os.path.sep)[-2]\n\n images.append(image)\n labels.append(label)\n\n return np.array(images), np.array(labels)\n\n\ndef build_network(width, height, depth, classes):\n input_layer = Input(shape=(width, height, depth))\n\n x = Conv2D(filters=32,\n kernel_size=(3, 3),\n padding='same')(input_layer)\n x = ReLU()(x)\n x = BatchNormalization(axis=-1)(x)\n x = Conv2D(filters=32,\n kernel_size=(3, 3),\n padding='same')(x)\n x = ReLU()(x)\n x = BatchNormalization(axis=-1)(x)\n x = MaxPooling2D(pool_size=(2, 2))(x)\n x = Dropout(rate=0.25)(x)\n\n x = Conv2D(filters=64,\n kernel_size=(3, 3),\n padding='same')(x)\n x = ReLU()(x)\n x = BatchNormalization(axis=-1)(x)\n x = Conv2D(filters=64,\n kernel_size=(3, 3),\n padding='same')(x)\n x = ReLU()(x)\n x = BatchNormalization(axis=-1)(x)\n x = MaxPooling2D(pool_size=(2, 2))(x)\n x = Dropout(rate=0.25)(x)\n\n x = Flatten()(x)\n x = Dense(units=512)(x)\n x = ReLU()(x)\n x = BatchNormalization(axis=-1)(x)\n x = Dropout(rate=0.25)(x)\n\n x = Dense(units=classes)(x)\n output = Softmax()(x)\n\n return Model(input_layer, output)\n\n\ndef plot_model_history(model_history, metric, plot_name):\n plt.style.use('seaborn-darkgrid')\n plotter = tfdocs.plots.HistoryPlotter()\n plotter.plot({'Model': model_history}, metric=metric)\n\n plt.title(f'{metric.upper()}')\n plt.ylim([0, 1])\n\n plt.savefig(f'{plot_name}.png')\n plt.close()\n\n\nSEED = 999\nnp.random.seed(SEED)\n\nbase_path = (pathlib.Path.home() / '.keras' / 'datasets' /\n '101_ObjectCategories')\nimages_pattern = str(base_path / '*' / '*.jpg')\nimage_paths = [*glob(images_pattern)]\nimage_paths = [p for p in image_paths if\n p.split(os.path.sep)[-2] != 'BACKGROUND_Google']\nclasses = {p.split(os.path.sep)[-2] for p in image_paths}\n\nX, y = load_images_and_labels(image_paths)\nX = X.astype('float') / 255.0\ny = LabelBinarizer().fit_transform(y)\n\n(X_train, X_test,\n y_train, y_test) = train_test_split(X, y,\n test_size=0.2,\n random_state=SEED)\n\nEPOCHS = 40\nBATCH_SIZE = 64\nmodel = build_network(64, 64, 3, len(classes))\nmodel.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n\nhistory = model.fit(X_train, y_train,\n validation_data=(X_test, y_test),\n epochs=EPOCHS,\n batch_size=BATCH_SIZE)\nresult = model.evaluate(X_test, y_test)\nprint(f'Test accuracy: {result[1]}')\nplot_model_history(history, 'accuracy', 'normal')\n\n#######################################\n\nmodel = build_network(64, 64, 3, len(classes))\nmodel.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n\naugmenter = ImageDataGenerator(horizontal_flip=True,\n rotation_range=30,\n width_shift_range=0.1,\n height_shift_range=0.1,\n shear_range=0.2,\n zoom_range=0.2,\n fill_mode='nearest')\ntrain_generator = augmenter.flow(X_train, y_train, BATCH_SIZE)\nhistory = model.fit(train_generator,\n steps_per_epoch=len(X_train) // BATCH_SIZE,\n validation_data=(X_test, y_test),\n epochs=EPOCHS)\n\nresult = model.evaluate(X_test, y_test)\nprint(f'Test accuracy: {result[1]}')\nplot_model_history(history, 'accuracy', 'augmented')\n" ]
[ [ "numpy.random.seed", "tensorflow.keras.models.Model", "matplotlib.pyplot.ylim", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.savefig", "matplotlib.pyplot.close", "sklearn.preprocessing.LabelBinarizer", "numpy.array", "matplotlib.pyplot.style.use" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
akssri/rlax
[ "dce52b43c57be7b56d7632b39c0446164526a668" ]
[ "rlax/_src/distributions_test.py" ]
[ "# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Unit tests for `distributions.py`.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport chex\nimport jax\nimport numpy as np\nfrom rlax._src import distributions\n\n\nclass CategoricalSampleTest(parameterized.TestCase):\n\n @chex.all_variants()\n def test_categorical_sample(self):\n key = np.array([1, 2], dtype=np.uint32)\n probs = np.array([0.2, 0.3, 0.5])\n sample = self.variant(distributions.categorical_sample)(key, probs)\n self.assertEqual(sample, 0)\n\n @chex.all_variants()\n @parameterized.parameters(\n ((-1., 10., -1.),),\n ((0., 0., 0.),),\n ((1., np.inf, 3.),),\n ((1., 2., -np.inf),),\n ((1., 2., np.nan),),\n )\n def test_categorical_sample_on_invalid_distributions(self, probs):\n key = np.array([1, 2], dtype=np.uint32)\n probs = np.asarray(probs)\n sample = self.variant(distributions.categorical_sample)(key, probs)\n self.assertEqual(sample, -1)\n\n\nclass SoftmaxTest(parameterized.TestCase):\n\n def setUp(self):\n super(SoftmaxTest, self).setUp()\n\n self.logits = np.array([[1, 1, 0], [1, 2, 0]], dtype=np.float32)\n self.samples = np.array([0, 1], dtype=np.int32)\n\n self.expected_probs = np.array( # softmax with temperature=10\n [[0.34425336, 0.34425336, 0.31149334],\n [0.332225, 0.3671654, 0.3006096]],\n dtype=np.float32)\n probs = np.array( # softmax with temperature=1\n [[0.42231882, 0.42231882, 0.15536241],\n [0.24472848, 0.66524094, 0.09003057]],\n dtype=np.float32)\n logprobs = np.log(probs)\n self.expected_logprobs = np.array(\n [logprobs[0][self.samples[0]], logprobs[1][self.samples[1]]])\n self.expected_entropy = -np.sum(probs * logprobs, axis=-1)\n\n @chex.all_variants()\n def test_softmax_probs(self):\n \"\"\"Tests for a single element.\"\"\"\n distrib = distributions.softmax(temperature=10.)\n softmax = self.variant(distrib.probs)\n # For each element in the batch.\n for logits, expected in zip(self.logits, self.expected_probs):\n # Test outputs.\n actual = softmax(logits)\n np.testing.assert_allclose(expected, actual, atol=1e-4)\n\n @chex.all_variants()\n def test_softmax_probs_batch(self):\n \"\"\"Tests for a full batch.\"\"\"\n distrib = distributions.softmax(temperature=10.)\n softmax = self.variant(distrib.probs)\n # Test softmax output in batch.\n actual = softmax(self.logits)\n np.testing.assert_allclose(self.expected_probs, actual, atol=1e-4)\n\n @chex.all_variants()\n def test_softmax_logprob(self):\n \"\"\"Tests for a single element.\"\"\"\n distrib = distributions.softmax()\n logprob_fn = self.variant(distrib.logprob)\n # For each element in the batch.\n for logits, samples, expected in zip(\n self.logits, self.samples, self.expected_logprobs):\n # Test output.\n actual = logprob_fn(samples, logits)\n np.testing.assert_allclose(expected, actual, atol=1e-4)\n\n @chex.all_variants()\n def test_softmax_logprob_batch(self):\n \"\"\"Tests for a full batch.\"\"\"\n distrib = distributions.softmax()\n logprob_fn = self.variant(distrib.logprob)\n # Test softmax output in batch.\n actual = logprob_fn(self.samples, self.logits)\n np.testing.assert_allclose(self.expected_logprobs, actual, atol=1e-4)\n\n @chex.all_variants()\n def test_softmax_entropy(self):\n \"\"\"Tests for a single element.\"\"\"\n distrib = distributions.softmax()\n entropy_fn = self.variant(distrib.entropy)\n # For each element in the batch.\n for logits, expected in zip(self.logits, self.expected_entropy):\n # Test outputs.\n actual = entropy_fn(logits)\n np.testing.assert_allclose(expected, actual, atol=1e-4)\n\n @chex.all_variants()\n def test_softmax_entropy_batch(self):\n \"\"\"Tests for a full batch.\"\"\"\n distrib = distributions.softmax()\n entropy_fn = self.variant(distrib.entropy)\n # Test softmax output in batch.\n actual = entropy_fn(self.logits)\n np.testing.assert_allclose(self.expected_entropy, actual, atol=1e-4)\n\n\nclass EpsilonSoftmaxTest(parameterized.TestCase):\n\n def setUp(self):\n super(EpsilonSoftmaxTest, self).setUp()\n\n self.logits = np.array([[1, 1, 0], [1, 2, 0]], dtype=np.float32)\n self.samples = np.array([0, 1], dtype=np.int32)\n\n self.expected_probs = np.array( # softmax with temperature=10\n [[0.34316134, 0.34316134, 0.3136773],\n [0.3323358, 0.36378217, 0.30388197]],\n dtype=np.float32)\n probs = np.array( # softmax with temperature=10\n [[0.34316134, 0.34316134, 0.3136773],\n [0.3323358, 0.36378217, 0.30388197]],\n dtype=np.float32)\n probs = distributions._mix_with_uniform(probs, epsilon=0.1)\n logprobs = np.log(probs)\n self.expected_logprobs = np.array(\n [logprobs[0][self.samples[0]], logprobs[1][self.samples[1]]])\n self.expected_entropy = -np.sum(probs * logprobs, axis=-1)\n\n @chex.all_variants()\n def test_softmax_probs(self):\n \"\"\"Tests for a single element.\"\"\"\n distrib = distributions.epsilon_softmax(epsilon=0.1,\n temperature=10.)\n softmax = self.variant(distrib.probs)\n # For each element in the batch.\n for logits, expected in zip(self.logits, self.expected_probs):\n # Test outputs.\n actual = softmax(logits)\n np.testing.assert_allclose(expected, actual, atol=1e-4)\n\n @chex.all_variants()\n def test_softmax_probs_batch(self):\n \"\"\"Tests for a full batch.\"\"\"\n distrib = distributions.epsilon_softmax(epsilon=0.1,\n temperature=10.)\n softmax = self.variant(distrib.probs)\n # Test softmax output in batch.\n actual = softmax(self.logits)\n np.testing.assert_allclose(self.expected_probs, actual, atol=1e-4)\n\n @chex.all_variants()\n def test_safe_epsilon_softmax_equivalence(self):\n distrib = distributions.safe_epsilon_softmax(epsilon=0.1,\n temperature=10.)\n softmax = self.variant(distrib.probs)\n # Test softmax output in batch.\n actual = softmax(self.logits)\n np.testing.assert_allclose(self.expected_probs, actual, atol=1e-4)\n\n\nclass GreedyTest(parameterized.TestCase):\n\n def setUp(self):\n super(GreedyTest, self).setUp()\n\n self.preferences = np.array([[1, 1, 0], [1, 2, 0]], dtype=np.float32)\n self.samples = np.array([0, 1], dtype=np.int32)\n\n self.expected_probs = np.array(\n [[0.5, 0.5, 0.], [0., 1., 0.]], dtype=np.float32)\n self.expected_logprob = np.array(\n [-0.6931472, 0.], dtype=np.float32)\n self.expected_entropy = np.array(\n [0.6931472, 0.], dtype=np.float32)\n\n @chex.all_variants()\n def test_greedy_probs(self):\n \"\"\"Tests for a single element.\"\"\"\n distrib = distributions.greedy()\n greedy = self.variant(distrib.probs)\n # For each element in the batch.\n for preferences, expected in zip(self.preferences, self.expected_probs):\n # Test outputs.\n actual = greedy(preferences)\n np.testing.assert_allclose(expected, actual, atol=1e-4)\n\n @chex.all_variants()\n def test_greedy_probs_batch(self):\n \"\"\"Tests for a full batch.\"\"\"\n distrib = distributions.greedy()\n greedy = self.variant(distrib.probs)\n # Test greedy output in batch.\n actual = greedy(self.preferences)\n np.testing.assert_allclose(self.expected_probs, actual, atol=1e-4)\n\n @chex.all_variants()\n def test_greedy_logprob(self):\n \"\"\"Tests for a single element.\"\"\"\n distrib = distributions.greedy()\n logprob_fn = self.variant(distrib.logprob)\n # For each element in the batch.\n for preferences, samples, expected in zip(\n self.preferences, self.samples, self.expected_logprob):\n # Test output.\n actual = logprob_fn(samples, preferences)\n np.testing.assert_allclose(expected, actual, atol=1e-4)\n\n @chex.all_variants()\n def test_greedy_logprob_batch(self):\n \"\"\"Tests for a full batch.\"\"\"\n distrib = distributions.greedy()\n logprob_fn = self.variant(distrib.logprob)\n # Test greedy output in batch.\n actual = logprob_fn(self.samples, self.preferences)\n np.testing.assert_allclose(self.expected_logprob, actual, atol=1e-4)\n\n @chex.all_variants()\n def test_greedy_entropy(self):\n \"\"\"Tests for a single element.\"\"\"\n distrib = distributions.greedy()\n entropy_fn = self.variant(distrib.entropy)\n # For each element in the batch.\n for preferences, expected in zip(self.preferences, self.expected_entropy):\n # Test outputs.\n actual = entropy_fn(preferences)\n np.testing.assert_allclose(expected, actual, atol=1e-4)\n\n @chex.all_variants()\n def test_greedy_entropy_batch(self):\n \"\"\"Tests for a full batch.\"\"\"\n distrib = distributions.greedy()\n entropy_fn = self.variant(distrib.entropy)\n # Test greedy output in batch.\n actual = entropy_fn(self.preferences)\n np.testing.assert_allclose(self.expected_entropy, actual, atol=1e-4)\n\n\nclass EpsilonGreedyTest(parameterized.TestCase):\n\n def setUp(self):\n super(EpsilonGreedyTest, self).setUp()\n self.epsilon = 0.2\n\n self.preferences = np.array([[1, 1, 0, 0], [1, 2, 0, 0]], dtype=np.float32)\n self.samples = np.array([0, 1], dtype=np.int32)\n\n self.expected_probs = np.array(\n [[0.45, 0.45, 0.05, 0.05], [0.05, 0.85, 0.05, 0.05]], dtype=np.float32)\n self.expected_logprob = np.array(\n [-0.7985077, -0.1625189], dtype=np.float32)\n self.expected_entropy = np.array(\n [1.01823008, 0.58750093], dtype=np.float32)\n\n @chex.all_variants()\n def test_greedy_probs(self):\n \"\"\"Tests for a single element.\"\"\"\n distrib = distributions.epsilon_greedy(self.epsilon)\n probs_fn = self.variant(distrib.probs)\n # For each element in the batch.\n for preferences, expected in zip(self.preferences, self.expected_probs):\n # Test outputs.\n actual = probs_fn(preferences)\n np.testing.assert_allclose(expected, actual, atol=1e-4)\n\n @chex.all_variants()\n def test_greedy_probs_batch(self):\n \"\"\"Tests for a full batch.\"\"\"\n distrib = distributions.epsilon_greedy(self.epsilon)\n probs_fn = self.variant(distrib.probs)\n # Test greedy output in batch.\n actual = probs_fn(self.preferences)\n np.testing.assert_allclose(self.expected_probs, actual, atol=1e-4)\n\n @chex.all_variants()\n def test_greedy_logprob(self):\n \"\"\"Tests for a single element.\"\"\"\n distrib = distributions.epsilon_greedy(self.epsilon)\n logprob_fn = self.variant(distrib.logprob)\n # For each element in the batch.\n for preferences, samples, expected in zip(\n self.preferences, self.samples, self.expected_logprob):\n # Test output.\n actual = logprob_fn(samples, preferences)\n np.testing.assert_allclose(expected, actual, atol=1e-4)\n\n @chex.all_variants()\n def test_greedy_logprob_batch(self):\n \"\"\"Tests for a full batch.\"\"\"\n distrib = distributions.epsilon_greedy(self.epsilon)\n logprob_fn = self.variant(distrib.logprob)\n # Test greedy output in batch.\n actual = logprob_fn(self.samples, self.preferences)\n np.testing.assert_allclose(self.expected_logprob, actual, atol=1e-4)\n\n @chex.all_variants()\n def test_greedy_entropy(self):\n \"\"\"Tests for a single element.\"\"\"\n distrib = distributions.epsilon_greedy(self.epsilon)\n entropy_fn = self.variant(distrib.entropy)\n # For each element in the batch.\n for preferences, expected in zip(self.preferences, self.expected_entropy):\n # Test outputs.\n actual = entropy_fn(preferences)\n np.testing.assert_allclose(expected, actual, atol=1e-4)\n\n @chex.all_variants()\n def test_greedy_entropy_batch(self):\n \"\"\"Tests for a full batch.\"\"\"\n distrib = distributions.epsilon_greedy(self.epsilon)\n entropy_fn = self.variant(distrib.entropy)\n # Test greedy output in batch.\n actual = entropy_fn(self.preferences)\n np.testing.assert_allclose(self.expected_entropy, actual, atol=1e-4)\n\n @chex.all_variants()\n def test_safe_epsilon_softmax_equivalence(self):\n distrib = distributions.safe_epsilon_softmax(epsilon=self.epsilon,\n temperature=0)\n probs_fn = self.variant(distrib.probs)\n # Test greedy output in batch.\n actual = probs_fn(self.preferences)\n np.testing.assert_allclose(self.expected_probs, actual, atol=1e-4)\n\n logprob_fn = self.variant(distrib.logprob)\n # Test greedy output in batch.\n actual = logprob_fn(self.samples, self.preferences)\n np.testing.assert_allclose(self.expected_logprob, actual, atol=1e-4)\n\n sample_fn = self.variant(distrib.sample)\n # Optionally convert to device array.\n key = np.array([1, 2], dtype=np.uint32)\n actions = sample_fn(key, self.preferences)\n # test just the shape\n self.assertEqual(actions.shape, (2,))\n\n\nclass GaussianDiagonalTest(parameterized.TestCase):\n\n def setUp(self):\n super(GaussianDiagonalTest, self).setUp()\n\n self.mu = np.array([[1., -1], [0.1, -0.1]], dtype=np.float32)\n self.sigma = np.array([[0.1, 0.1], [0.2, 0.3]], dtype=np.float32)\n self.sample = np.array([[1.2, -1.1], [-0.1, 0.]], dtype=np.float32)\n\n # Expected values for the distribution's function were computed using\n # tfd.MultivariateNormalDiag (from the tensorflow_probability package).\n self.expected_prob_a = np.array(\n [1.3064219, 1.5219283], dtype=np.float32)\n self.expected_logprob_a = np.array(\n [0.26729202, 0.41997814], dtype=np.float32)\n self.expected_entropy = np.array(\n [-1.7672932, 0.02446628], dtype=np.float32)\n\n @chex.all_variants()\n def test_gaussian_prob(self):\n \"\"\"Tests for a single element.\"\"\"\n distrib = distributions.gaussian_diagonal()\n prob_fn = self.variant(distrib.prob)\n # For each element in the batch.\n for mu, sigma, sample, expected in zip(\n self.mu, self.sigma, self.sample, self.expected_prob_a):\n # Test outputs.\n actual = prob_fn(sample, mu, sigma)\n np.testing.assert_allclose(expected, actual, atol=1e-4)\n\n @chex.all_variants()\n def test_gaussian_prob_batch(self):\n \"\"\"Tests for a full batch.\"\"\"\n distrib = distributions.gaussian_diagonal()\n prob_fn = self.variant(distrib.prob)\n # Test greedy output in batch.\n actual = prob_fn(self.sample, self.mu, self.sigma)\n np.testing.assert_allclose(self.expected_prob_a, actual, atol=1e-4)\n\n @chex.all_variants()\n def test_gaussian_logprob(self):\n \"\"\"Tests for a single element.\"\"\"\n distrib = distributions.gaussian_diagonal()\n logprob_fn = self.variant(distrib.logprob)\n # For each element in the batch.\n for mu, sigma, sample, expected in zip(\n self.mu, self.sigma, self.sample, self.expected_logprob_a):\n # Test output.\n actual = logprob_fn(sample, mu, sigma)\n np.testing.assert_allclose(expected, actual, atol=1e-4)\n\n @chex.all_variants()\n def test_gaussian_logprob_batch(self):\n \"\"\"Tests for a full batch.\"\"\"\n distrib = distributions.gaussian_diagonal()\n logprob_fn = self.variant(distrib.logprob)\n # Test greedy output in batch.\n actual = logprob_fn(self.sample, self.mu, self.sigma)\n np.testing.assert_allclose(self.expected_logprob_a, actual, atol=1e-4)\n\n @chex.all_variants()\n def test_gaussian_entropy(self):\n \"\"\"Tests for a single element.\"\"\"\n distrib = distributions.gaussian_diagonal()\n entropy_fn = self.variant(distrib.entropy)\n # For each element in the batch.\n for mu, sigma, expected in zip(\n self.mu, self.sigma, self.expected_entropy):\n # Test outputs.\n actual = entropy_fn(mu, sigma)\n np.testing.assert_allclose(expected, actual, atol=1e-4)\n\n @chex.all_variants()\n def test_gaussian_entropy_batch(self):\n \"\"\"Tests for a full batch.\"\"\"\n distrib = distributions.gaussian_diagonal()\n entropy_fn = self.variant(distrib.entropy)\n # Test greedy output in batch.\n actual = entropy_fn(self.mu, self.sigma)\n np.testing.assert_allclose(self.expected_entropy, actual, atol=1e-4)\n\n\nclass ImportanceSamplingTest(parameterized.TestCase):\n\n def setUp(self):\n super(ImportanceSamplingTest, self).setUp()\n\n self.pi_logits = np.array([[0.2, 0.8], [0.6, 0.4]], dtype=np.float32)\n self.mu_logits = np.array([[0.8, 0.2], [0.6, 0.4]], dtype=np.float32)\n self.actions = np.array([1, 0], dtype=np.int32)\n\n pi = jax.nn.softmax(self.pi_logits)\n mu = jax.nn.softmax(self.mu_logits)\n self.expected_rhos = np.array(\n [pi[0][1] / mu[0][1], pi[1][0] / mu[1][0]], dtype=np.float32)\n\n @chex.all_variants()\n def test_importance_sampling_ratios_batch(self):\n \"\"\"Tests for a full batch.\"\"\"\n ratios_fn = self.variant(\n distributions.categorical_importance_sampling_ratios)\n # Test softmax output in batch.\n actual = ratios_fn(self.pi_logits, self.mu_logits, self.actions)\n np.testing.assert_allclose(self.expected_rhos, actual, atol=1e-4)\n\n\nclass CategoricalKLTest(parameterized.TestCase):\n\n def setUp(self):\n super(CategoricalKLTest, self).setUp()\n self.p_logits = np.array([[1, 1, 0], [1, 2, 0]], dtype=np.float32)\n p_probs = np.array([[0.42231882, 0.42231882, 0.15536241],\n [0.24472848, 0.66524094, 0.09003057]],\n dtype=np.float32)\n p_logprobs = np.log(p_probs)\n self.q_logits = np.array([[1, 2, 0], [1, 1, 0]], dtype=np.float32)\n q_probs = np.array([[0.24472848, 0.66524094, 0.09003057],\n [0.42231882, 0.42231882, 0.15536241]],\n dtype=np.float32)\n q_logprobs = np.log(q_probs)\n\n self.expected_kl = np.sum(p_probs * (p_logprobs - q_logprobs), axis=-1)\n\n @chex.all_variants()\n def test_categorical_kl_divergence_batch(self):\n \"\"\"Tests for a full batch.\"\"\"\n kl_fn = self.variant(distributions.categorical_kl_divergence)\n # Test softmax output in batch.\n actual = kl_fn(self.p_logits, self.q_logits)\n np.testing.assert_allclose(self.expected_kl, actual, atol=1e-4)\n\n\nclass CategoricalCrossEntropyTest(parameterized.TestCase):\n\n def setUp(self):\n super(CategoricalCrossEntropyTest, self).setUp()\n\n self.labels = np.array([[0., 1., 0.], [1., 0., 0.]], dtype=np.float32)\n self.logits = np.array([[10., 1., -2.], [1., 4., 0.2]], dtype=np.float32)\n\n self.expected = np.array([9.00013, 3.0696733], dtype=np.float32)\n\n @chex.all_variants()\n def test_categorical_cross_entropy_batch(self):\n \"\"\"Tests for a full batch.\"\"\"\n cross_entropy = self.variant(jax.vmap(\n distributions.categorical_cross_entropy))\n # Test outputs.\n actual = cross_entropy(self.labels, self.logits)\n np.testing.assert_allclose(self.expected, actual, atol=1e-4)\n\n\nif __name__ == '__main__':\n jax.config.update('jax_numpy_rank_promotion', 'raise')\n absltest.main()\n" ]
[ [ "numpy.log", "numpy.asarray", "numpy.testing.assert_allclose", "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ptelang/opencv_contrib
[ "dd68e396c76f1db4d82e5aa7a6545580939f9b9d" ]
[ "modules/rgbd/misc/python/test/test_rgbd.py" ]
[ "#!/usr/bin/env python\n\n# Python 2/3 compatibility\nfrom __future__ import print_function\n\nimport os, numpy\n\nimport cv2 as cv\n\nfrom tests_common import NewOpenCVTests\n\nclass rgbd_test(NewOpenCVTests):\n\n def test_computeRgbdPlane(self):\n\n depth_image = self.get_sample('/cv/rgbd/depth.png', cv.IMREAD_ANYDEPTH)\n if depth_image is None:\n raise unittest.SkipTest(\"Missing files with test data\")\n\n K = numpy.array([[525, 0, 320.5], [0, 525, 240.5], [0, 0, 1]])\n points3d = cv.rgbd.depthTo3d(depth_image, K)\n normals_computer = normals_computer = cv.rgbd.RgbdNormals_create(480, 640, 5, K)\n normals = normals_computer.apply(points3d)\n rgbd_plane = cv.rgbd.RgbdPlane_create(cv.rgbd.RgbdPlane_RGBD_PLANE_METHOD_DEFAULT, 40, 1600, 0.01, 0, 0, 0)\n _, planes_coeff = rgbd_plane.apply(points3d, normals)\n\n planes_coeff_expected = \\\n numpy.asarray([[[-0.02447728, -0.8678335 , -0.49625182, 4.02800846]],\n [[-0.05055107, -0.86144137, -0.50533485, 3.95456314]],\n [[-0.03294908, -0.86964548, -0.49257591, 3.97052431]],\n [[-0.02886586, -0.87153459, -0.48948362, 7.77550507]],\n [[-0.04455929, -0.87659335, -0.47916424, 3.93200684]],\n [[-0.21514639, 0.18835169, -0.95824611, 7.59479475]],\n [[-0.01006953, -0.86679155, -0.49856904, 4.01355648]],\n [[-0.00876531, -0.87571168, -0.48275498, 3.96768975]],\n [[-0.06395926, -0.86951321, -0.48975089, 4.08618736]],\n [[-0.01403128, -0.87593341, -0.48222789, 7.74559402]],\n [[-0.01143177, -0.87495202, -0.4840748 , 7.75355816]]],\n dtype=numpy.float32)\n\n eps = 0.05\n self.assertLessEqual(cv.norm(planes_coeff, planes_coeff_expected, cv.NORM_L2), eps)\n\nif __name__ == '__main__':\n NewOpenCVTests.bootstrap()\n" ]
[ [ "numpy.asarray", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bagustris/ravdess_song_speech
[ "05cf32d2530bc26f309c2e068817afcb6847edfc" ]
[ "code/song_gemaps_hsf_cv.py" ]
[ "#!/usr/bin/env python3 \n\n# load needed modules\nimport numpy as np\nimport pandas as pd \nimport matplotlib.pyplot as plt\n\nfrom keras.models import Sequential \nfrom keras.layers import Dense, Activation, Flatten, CuDNNLSTM, Flatten \nfrom keras.layers import Dropout, BatchNormalization, Bidirectional\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom keras.preprocessing import sequence\n\nfrom sklearn.model_selection import train_test_split \nfrom sklearn.metrics import confusion_matrix \nfrom sklearn.model_selection import StratifiedKFold, cross_val_score\nfrom sklearn.preprocessing import LabelEncoder, StandardScaler\n\nimport random as rn\nimport tensorflow as tf\n\n#np.random.seed(123)\n#rn.seed(123)\n#tf.set_random_seed(123)\n\n# load feature data\nX = np.load('song_gemaps_hfs.npy') \ny = np.load('label_gemaps.npy')\n\nX = X.reshape(X.shape[0], 1, X.shape[1])\n\n# invert labels to 1D label\nlabel_encoder = LabelEncoder()\ny = label_encoder.fit_transform(np.argmax(y, axis=1))\n\n# function to define model\ndef create_model(): \n model = Sequential()\n model.add(BatchNormalization(axis=-1, input_shape=(X.shape[1], X.shape[2])))\n model.add(CuDNNLSTM(256, return_sequences=True)) \n model.add(CuDNNLSTM(256, return_sequences=True))\n model.add(CuDNNLSTM(256, return_sequences=True))\n model.add(Flatten())\n model.add(Dropout(0.4))\n model.add(Dense(8, activation='softmax')) #unit must match n classes\n \n # model compilation \n model.compile(loss='sparse_categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) \n return model\n\n\n## create model\n#model = KerasClassifier(build_fn=create_model, epochs=200, batch_size=16, verbose=1)\n\n## evaluate using 5-fold cross validation\n#kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=123)\n#results = cross_val_score(model, X, y, cv=kfold)\n#print(results.mean())\n\n\n## Without cross-validation\n# create the model \nmodel = create_model() \nprint(model.summary())\n\n# train the model\ntrain_x, test_x, train_y, test_y = train_test_split(X, y, test_size=0.1)\nhist = model.fit(train_x, train_y, epochs=200, batch_size=16)\n\n## evaluate model, test data may differ from validation data\n#evaluate = model.evaluate(test_x, test_y, batch_size=16)\n#print(evaluate)\n\n# make prediction for confusion_matrix\nimport os\nfrom sklearn.metrics import confusion_matrix\nimport seaborn as sns \npredict = model.predict(test_x, batch_size=16)\nemotions=['neutral', 'calm', 'happy', 'sad', 'angry', 'fearful', 'disgust', 'surprised'] \n\n# predicted emotions from the test set \ny_pred = np.argmax(predict, 1) \npredicted_emo = [] \nfor i in range(0,test_y.shape[0]): \n emo = emotions[y_pred[i]] \n predicted_emo.append(emo)\n\n# get actual emotion\nactual_emo = [] \n#y_true = np.argmax(test_y, 1) \ny_true = test_y\nfor i in range(0,test_y.shape[0]):\n emo = emotions[y_true[i]] \n actual_emo.append(emo)\n\n# generate the confusion matrix \ncm = confusion_matrix(actual_emo, predicted_emo)\ncm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n#index = ['angry', 'calm', 'fearful', 'happy', 'neutral', 'sad'] \n#columns = ['angry', 'calm', 'fearful', 'happy', 'neutral', 'sad'] \n#cm_df = pd.DataFrame(cm, index, columns)\n#plt.figure(figsize=(10, 6)) \n#sns.heatmap(cm_df, annot=True)\n#filename = os.path.basename(__file__)[:-3] + '.svg'\n#plt.savefig(filename)\nprint(\"UAR: \", cm.trace()/cm.shape[0])\n" ]
[ [ "sklearn.metrics.confusion_matrix", "sklearn.model_selection.train_test_split", "numpy.argmax", "numpy.load", "sklearn.preprocessing.LabelEncoder" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vikramborana/face_recognition
[ "6afb54ca3fcea2e37dcebcb627d116c271ff21d4" ]
[ "Hritik/labelface.py" ]
[ "import face_recognition\nfrom PIL import Image, ImageDraw\nimport numpy as np\n\n# This is an example of running face recognition on a single image\n# and drawing a box around each person that was identified.\n\n# Load a sample picture and learn how to recognize it.\nhritik_image = face_recognition.load_image_file(\"./know/hritik.jpg\")\nhritik_face_encoding = face_recognition.face_encodings(hritik_image)[0]\n\n# Load a second sample picture and learn how to recognize it.\nvishal_image = face_recognition.load_image_file(\"./know/vishal.jpg\")\nvishal_face_encoding = face_recognition.face_encodings(vishal_image)[0]\n\nvikram_image = face_recognition.load_image_file(\"./know/vikram.jpg\")\nvikram_face_encoding = face_recognition.face_encodings(vikram_image)[0]\n\nashu_image = face_recognition.load_image_file(\"./know/ashutosh.jpg\")\nashu_face_encoding = face_recognition.face_encodings(ashu_image)[0]\n\n# Create arrays of known face encodings and their names\nknown_face_encodings = [\n hritik_face_encoding,\n vishal_face_encoding,\n vikram_face_encoding,\n ashu_face_encoding\n]\nknown_face_names = [\n \"hritik\",\n \"vishal\",\n \"vikram\",\n \"ashutosh\"\n]\n\n# Load an image with an unknown face\nunknown_image = face_recognition.load_image_file(\"14.jpeg\")\n\n# Find all the faces and face encodings in the unknown image\nface_locations = face_recognition.face_locations(unknown_image)\nface_encodings = face_recognition.face_encodings(unknown_image, face_locations)\n\n# Convert the image to a PIL-format image so that we can draw on top of it with the Pillow library\n# See http://pillow.readthedocs.io/ for more about PIL/Pillow\npil_image = Image.fromarray(unknown_image)\n# Create a Pillow ImageDraw Draw instance to draw with\ndraw = ImageDraw.Draw(pil_image)\n\n# Loop through each face found in the unknown image\nfor (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):\n # See if the face is a match for the known face(s)\n matches = face_recognition.compare_faces(known_face_encodings, face_encoding , tolerance=0.5)\n\n name = \"Unknown\"\n\n # If a match was found in known_face_encodings, just use the first one.\n # if True in matches:\n # first_match_index = matches.index(True)\n # name = known_face_names[first_match_index]\n\n # Or instead, use the known face with the smallest distance to the new face\n face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)\n best_match_index = np.argmin(face_distances)\n if matches[best_match_index]:\n name = known_face_names[best_match_index]\n\n # Draw a box around the face using the Pillow module\n draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 255))\n\n # Draw a label with a name below the face\n text_width, text_height = draw.textsize(name)\n draw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=(0, 0, 255), outline=(0, 0, 255))\n draw.text((left + 6, bottom - text_height - 5), name, fill=(255, 255, 255, 255))\n\n\n# Remove the drawing library from memory as per the Pillow docs\ndel draw\n\n# Display the resulting image\npil_image.show()\n\n# You can also save a copy of the new image to disk if you want by uncommenting this line\n# pil_image.save(\"image_with_boxes.jpg\")\n" ]
[ [ "numpy.argmin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pints-team/performance-testing
[ "30ee6e5ab7eff48e47bea505e52e20a9d3c537f7" ]
[ "pmatrix/_tasks.py" ]
[ "import numpy as np\nimport multiprocessing\nfrom itertools import repeat\nfrom GPyOpt.methods import BayesianOptimization\nimport pickle\nimport numpy as np\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport os\n\nimport pmatrix\nimport pints\n\n\ndef to_filename(noise_level, model, hyper_method):\n noise_str = str(noise_level).replace('.', '_')\n model_str = model.__name__\n hyper_method_str = hyper_method.method_name\n return pmatrix.DIR_RESULT + '/' + 'results-%s-%s-%s.pickle' % (model_str, hyper_method_str, noise_str)\n\n\ndef run_single(noise_level, model, hyper_method, max_tuning_runs, num_samples, only_if_not_exist=False):\n fname = to_filename(noise_level, model, hyper_method)\n\n # dont run if flag set and file already exists\n if only_if_not_exist and os.path.isfile(fname):\n print('WARNING: skipping since results file', fname, 'already exists')\n return\n\n parameters = model().suggested_parameters()\n lower = np.asarray(parameters) / 10.0\n upper = np.asarray(parameters) * 10.0\n times = model().suggested_times()\n hmethod = hyper_method(model,\n noise_level,\n times,\n parameters, lower, upper)\n\n if issubclass(hyper_method, pmatrix.HyperSampler):\n if hmethod.uses_gradients() and not issubclass(model, pints.ForwardModelS1):\n print(\"WARNING: not running combination of \",\n hyper_method.method_name, \"and\", model, \"as latter has no gradients\")\n return\n output = mcmc_sampler(num_samples,\n max_tuning_runs,\n hmethod)\n elif issubclass(hyper_method, pmatrix.HyperOptimiser):\n output = optimise_sampler(num_samples,\n max_tuning_runs,\n hmethod\n )\n elif issubclass(hyper_method, pmatrix.HyperNestedSampler):\n output = mcmc_sampler(num_samples,\n max_tuning_runs,\n hmethod\n )\n\n else:\n raise TypeError(\n \"hyper_method must be an instance of HyperSampler \"\n \"or HyperOptimiser or HyperNestedSampler\"\n )\n\n print('writing ' + fname)\n pickle.dump(output, open(fname, 'wb'))\n\n\ndef plot_matrix(noise_levels, models, hyper_optimisers, hyper_mcmcs, hyper_nested, max_tuning_runs, num_samples):\n f = plt.figure()\n y = range(len(models))\n y_labels = [m.__name__ for m in models]\n x = range(len(hyper_optimisers))\n x_labels = [o.method_name for o in hyper_optimisers]\n x_mcmc = range(len(hyper_mcmcs))\n x_mcmc_labels = [m.method_name for m in hyper_mcmcs]\n for ni, noise in enumerate(noise_levels):\n score = np.zeros((len(models), len(hyper_optimisers), num_samples))\n time = np.zeros((len(models), len(hyper_optimisers), num_samples))\n rhat = np.zeros((len(models), len(hyper_mcmcs), num_samples))\n ess = np.zeros((len(models), len(hyper_mcmcs), num_samples))\n time_mcmc = np.zeros((len(models), len(hyper_mcmcs), num_samples))\n for nm, model in enumerate(models):\n for no, optimiser in enumerate(hyper_optimisers):\n fname = to_filename(noise, model, optimiser)\n if os.path.exists(fname):\n print('reading results for (', model.__name__,\n ',', optimiser.method_name, ',', noise, ')')\n output = pickle.load(open(fname, 'rb'))\n assert(len(output[:, 1]) == num_samples)\n score[nm, no, :] = output[:, 1]\n time[nm, no, :] = output[:, 2]\n else:\n print('WARNING: no results for (', model.__name__,\n ',', optimiser.method_name, ',', noise, ')')\n score[nm, no, :] = float('nan')\n time[nm, no, :] = float('nan')\n for no, mcmc in enumerate(hyper_mcmcs):\n fname = to_filename(noise, model, mcmc)\n if os.path.exists(fname):\n print('reading ' + fname)\n output = pickle.load(open(fname, 'rb'))\n assert(len(output[:, 1] == num_samples))\n rhat[nm, no, :] = output[:, 0]\n ess[nm, no, :] = output[:, 1]\n time_mcmc[nm, no, :] = output[:, 2]\n else:\n print('WARNING: no results for (', model.__name__,\n ',', mcmc.method_name, ',', noise, ')')\n rhat[nm, no, :] = float('nan')\n ess[nm, no, :] = float('nan')\n time_mcmc[nm, no, :] = float('nan')\n for no, nested in enumerate(hyper_nested):\n fname = to_filename(noise, model, nested)\n if os.path.exists(fname):\n print('reading ' + fname)\n output = pickle.load(open(fname, 'rb'))\n assert(len(output[:, 1] == num_samples))\n ess_nested[nm, no, :] = output[:, 0]\n time_nested[nm, no, :] = output[:, 1]\n else:\n print('WARNING: no results for (', model.__name__,\n ',', mcmc.method_name, ',', noise, ')')\n ess_nested[nm, no, :] = float('nan')\n time_nested[nm, no, :] = float('nan')\n\n normalise = False\n if normalise:\n for nm, model in enumerate(models):\n min_score = np.min(score[nm, :, :], axis=(0, 1))\n max_score = np.max(score[nm, :, :], axis=(0, 1))\n score[nm, :, :] = (score[nm, :, :] -\n min_score) / (max_score - min_score)\n min_time = np.min(time[nm, :, :], axis=(0, 1))\n max_time = np.max(time[nm, :, :], axis=(0, 1))\n time[nm, :, :] = (time[nm, :, :] -\n min_time) / (max_time - min_time)\n min_rhat = np.min(rhat[nm, :, :], axis=(0, 1))\n max_rhat = np.max(rhat[nm, :, :], axis=(0, 1))\n rhat[nm, :, :] = (rhat[nm, :, :] -\n min_rhat) / (max_rhat - min_rhat)\n min_ess = np.min(ess[nm, :, :], axis=(0, 1))\n max_ess = np.max(ess[nm, :, :], axis=(0, 1))\n ess[nm, :, :] = (ess[nm, :, :] -\n min_ess) / (max_ess - min_ess)\n min_time_mcmc = np.min(time_mcmc[nm, :, :], axis=(0, 1))\n max_time_mcmc = np.max(time_mcmc[nm, :, :], axis=(0, 1))\n time_mcmc[nm, :, :] = (time_mcmc[nm, :, :] -\n min_time_mcmc) / (max_time_mcmc - min_time_mcmc)\n\n plt.clf()\n imshow = plt.imshow(np.mean(score, axis=2), cmap='RdYlBu_r',\n interpolation='nearest')\n plt.xticks(x, x_labels, rotation=45)\n plt.yticks(y, y_labels)\n plt.colorbar(label='score (mean)')\n plt.tight_layout()\n plt.savefig(pmatrix.DIR_PLOT+'/'+'score_mean_with_noise_%d.pdf' % ni)\n plt.clf()\n imshow = plt.imshow(np.min(score, axis=2), cmap='RdYlBu_r',\n interpolation='nearest')\n plt.xticks(x, x_labels, rotation=45)\n plt.yticks(y, y_labels)\n plt.colorbar(label='score (min)')\n plt.tight_layout()\n plt.savefig(pmatrix.DIR_PLOT+'/'+'score_min_with_noise_%d.pdf' % ni)\n plt.clf()\n plt.imshow(np.mean(time, axis=2),\n cmap='RdYlBu_r', interpolation='nearest')\n plt.xticks(x, x_labels, rotation=45)\n plt.yticks(y, y_labels)\n plt.colorbar(label='time (mean)')\n plt.tight_layout()\n plt.savefig(pmatrix.DIR_PLOT+'/'+'time_mean_with_noise_%d.pdf' % ni)\n plt.clf()\n plt.imshow(np.min(time, axis=2),\n cmap='RdYlBu_r', interpolation='nearest')\n plt.xticks(x, x_labels, rotation=45)\n plt.yticks(y, y_labels)\n plt.colorbar(label='time (min)')\n plt.tight_layout()\n plt.savefig(pmatrix.DIR_PLOT+'/'+'time_min_with_noise_%d.pdf' % ni)\n\n plt.clf()\n imshow = plt.imshow(np.mean(rhat, axis=2), cmap='RdYlBu_r',\n interpolation='nearest')\n plt.xticks(x_mcmc, x_mcmc_labels, rotation=45)\n plt.yticks(y, y_labels)\n plt.colorbar(label='rhat (mean)')\n plt.tight_layout()\n plt.savefig(pmatrix.DIR_PLOT+'/'+'rhat_mean_with_noise_%d.pdf' % ni)\n plt.clf()\n imshow = plt.imshow(np.min(rhat, axis=2), cmap='RdYlBu_r',\n interpolation='nearest')\n plt.xticks(x_mcmc, x_mcmc_labels, rotation=45)\n plt.yticks(y, y_labels)\n plt.colorbar(label='rhat (min)')\n plt.tight_layout()\n plt.savefig(pmatrix.DIR_PLOT+'/'+'rhat_min_with_noise_%d.pdf' % ni)\n\n plt.clf()\n plt.imshow(np.mean(ess, axis=2),\n cmap='RdYlBu_r', interpolation='nearest')\n plt.xticks(x_mcmc, x_mcmc_labels, rotation=45)\n plt.yticks(y, y_labels)\n plt.colorbar(label='ess (mean)')\n plt.tight_layout()\n plt.savefig(pmatrix.DIR_PLOT+'/'+'ess_mean_with_noise_%d.pdf' % ni)\n plt.clf()\n plt.imshow(np.min(ess, axis=2),\n cmap='RdYlBu_r', interpolation='nearest')\n plt.xticks(x_mcmc, x_mcmc_labels, rotation=45)\n plt.yticks(y, y_labels)\n plt.colorbar(label='ess (min)')\n plt.tight_layout()\n plt.savefig(pmatrix.DIR_PLOT+'/'+'ess_min_with_noise_%d.pdf' % ni)\n\n plt.clf()\n plt.imshow(np.mean(time_mcmc, axis=2),\n cmap='RdYlBu_r', interpolation='nearest')\n plt.xticks(x_mcmc, x_mcmc_labels, rotation=45)\n plt.yticks(y, y_labels)\n plt.colorbar(label='time_mcmc (mean)')\n plt.tight_layout()\n plt.savefig(pmatrix.DIR_PLOT+'/'+'time_mcmc_mean_with_noise_%d.pdf' % ni)\n plt.clf()\n plt.imshow(np.min(time_mcmc, axis=2),\n cmap='RdYlBu_r', interpolation='nearest')\n plt.xticks(x_mcmc, x_mcmc_labels, rotation=45)\n plt.yticks(y, y_labels)\n plt.colorbar(label='time_mcmc (min)')\n plt.tight_layout()\n plt.savefig(pmatrix.DIR_PLOT+'/'+'time_mcmc_min_with_noise_%d.pdf' % ni)\n\n plt.clf()\n plt.imshow(np.mean(ess_nested, axis=2),\n cmap='RdYlBu_r', interpolation='nearest')\n plt.xticks(x_nested, x_nested_labels, rotation=45)\n plt.yticks(y, y_labels)\n plt.colorbar(label='ess (mean)')\n plt.tight_layout()\n plt.savefig(pmatrix.DIR_PLOT+'/'+'ess_nested_mean_with_noise_%d.pdf' % ni)\n plt.clf()\n plt.imshow(np.min(ess_nested, axis=2),\n cmap='RdYlBu_r', interpolation='nearest')\n plt.xticks(x_nested, x_nested_labels, rotation=45)\n plt.yticks(y, y_labels)\n plt.colorbar(label='ess (min)')\n plt.tight_layout()\n plt.savefig(pmatrix.DIR_PLOT+'/'+'ess_nested_min_with_noise_%d.pdf' % ni)\n\n plt.clf()\n plt.imshow(np.mean(time_nested, axis=2),\n cmap='RdYlBu_r', interpolation='nearest')\n plt.xticks(x_nested, x_nested_labels, rotation=45)\n plt.yticks(y, y_labels)\n plt.colorbar(label='time_nested (mean)')\n plt.tight_layout()\n plt.savefig(pmatrix.DIR_PLOT+'/'+'time_nested_mean_with_noise_%d.pdf' % ni)\n plt.clf()\n plt.imshow(np.min(time_nested, axis=2),\n cmap='RdYlBu_r', interpolation='nearest')\n plt.xticks(x_nested, x_nested_labels, rotation=45)\n plt.yticks(y, y_labels)\n plt.colorbar(label='time_nested (min)')\n plt.tight_layout()\n plt.savefig(pmatrix.DIR_PLOT+'/'+'time_nested_min_with_noise_%d.pdf' % ni)\n\n\ndef optimise(sample_num, hyper, x):\n print('optimise for sample', sample_num)\n return hyper.optimise(x)\n\n\ndef optimise_sampler(num_samples, max_tuning_runs, hyper):\n # tune hyper\n print(\"TUNING HYPER-PARAMETERS for hyper=\", hyper)\n if (hyper.n_parameters() > 0):\n # myBopt = BayesianOptimization(f=hyper, domain=hyper.bounds(), num_cores=os.environ['OMP_NUM_THREADS'])\n myBopt = BayesianOptimization(f=hyper, domain=hyper.bounds())\n myBopt.run_optimization(max_iter=max_tuning_runs)\n x_opt = myBopt.x_opt\n else:\n x_opt = []\n\n # take samples\n print(\"TAKING SAMPLES\")\n p = multiprocessing.Pool(int(os.environ['OMP_NUM_THREADS']))\n args = zip(range(num_samples), repeat(hyper), repeat(x_opt))\n results = p.starmap(optimise, args)\n return np.array(results)\n\n\ndef sample(sample_num, hyper, x):\n print('sampling for sample', sample_num)\n return hyper.sample(x)\n\n\ndef mcmc_sampler(num_samples, max_tuning_runs, hyper):\n # tune hyper\n print(\"TUNING HYPER-PARAMETERS for hyper=\", hyper)\n if (hyper.n_parameters() > 0):\n # myBopt = BayesianOptimization(f=hyper, domain=hyper.bounds(), num_cores=os.environ['OMP_NUM_THREADS'])\n myBopt = BayesianOptimization(f=hyper,\n domain=hyper.bounds(),\n constraints=hyper.constraints())\n myBopt.run_optimization(max_iter=max_tuning_runs)\n x_opt = myBopt.x_opt\n else:\n x_opt = []\n\n print(\"TAKING SAMPLES\")\n p = multiprocessing.Pool(int(os.environ['OMP_NUM_THREADS']))\n args = zip(range(num_samples), repeat(hyper), repeat(x_opt))\n results = p.starmap(sample, args)\n return np.array(results)\n" ]
[ [ "matplotlib.pyplot.yticks", "matplotlib.pyplot.tight_layout", "numpy.min", "numpy.asarray", "matplotlib.use", "matplotlib.pyplot.savefig", "matplotlib.pyplot.colorbar", "numpy.max", "matplotlib.pyplot.clf", "numpy.mean", "matplotlib.pyplot.xticks", "numpy.array", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Bertinus/IRM-games
[ "e8a94e9647d1ea7211236bbd3f4ed16b1e8207b6" ]
[ "IRM_methods.py" ]
[ "import tensorflow as tf\nimport torch\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.utils import shuffle\nfrom tqdm import tqdm_notebook as tqdm\n\ntf.compat.v1.enable_eager_execution()\n\n\nclass AbstractIrmGame:\n \"\"\" Abstract class for IRM games. \"\"\"\n\n def __init__(self, models, optimizers, extra_grad, variable_phi, n_epochs, batch_size, termination_acc, warm_start):\n self.models = models # List of models for all the environments\n self.optimizers = optimizers # List of optimizers for all the environments\n self.extra_grad = extra_grad\n self.variable_phi = variable_phi\n\n self.n_epochs = n_epochs # Number of epochs\n self.batch_size = batch_size # Batch size for each gradient update\n self.termination_acc = termination_acc # Threshold on accuracy below which we terminating\n self.warm_start = warm_start # minimum number of steps we have to train before terminating\n\n self.n_env = len(models) if not variable_phi else len(models) - 1\n\n self.keras_criterion = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n self.torch_criterion = torch.nn.CrossEntropyLoss()\n\n self.grads = []\n self.grad_norms = [[] for _ in range(self.n_env)]\n self.losses = [[] for _ in range(self.n_env)]\n self.train_accs = []\n self.test_accs = []\n self.env_train_accs = [[] for _ in range(self.n_env)]\n\n @staticmethod\n def to_array(data):\n raise NotImplementedError\n\n @staticmethod\n def to_tensor(data):\n raise NotImplementedError\n\n @staticmethod\n def zeros(shape):\n raise NotImplementedError\n\n def predict(self, x, shape, keep_grad_idx, as_array):\n raise NotImplementedError\n\n def loss(self, x, y, i_env):\n raise NotImplementedError\n\n def zero_grad(self):\n raise NotImplementedError\n\n def update_optimizer(self, i_env):\n raise NotImplementedError\n\n def evaluate(self, x, y):\n accuracy = tf.keras.metrics.SparseCategoricalAccuracy()\n\n y_, env_preds = self.predict(x=x, shape=(y.shape[0], 2), keep_grad_idx=None, as_array=True)\n\n accuracy.update_state(y_true=y, y_pred=y_)\n acc = accuracy.result().numpy()\n\n env_accs = []\n for i_env in range(self.n_env):\n accuracy.reset_states()\n accuracy.update_state(y_true=y, y_pred=env_preds[i_env])\n env_accs.append(accuracy.result().numpy())\n\n return acc, env_accs\n\n def concatenate_train_data(self, data_tuple):\n x = data_tuple[0][0] # Combined data from environments\n for i in range(1, self.n_env):\n x_c = data_tuple[i][0]\n x = np.concatenate((x, x_c), axis=0)\n\n y = data_tuple[0][1] # Combined labels from environments\n for i in range(1, self.n_env):\n y_c = data_tuple[i][1]\n y = np.concatenate((y, y_c), axis=0)\n\n return x, y\n\n def fit(self, data_tuple_train, data_tuple_test, env_wise=False):\n x_train_all, y_train_all = self.concatenate_train_data(data_tuple_train)\n x_test_all, y_test_all = data_tuple_test[0], data_tuple_test[1]\n\n flag = False\n n_examples = data_tuple_train[0][0].shape[0]\n steps = 0\n\n for i_epoch in range(self.n_epochs):\n print(\"Epoch %i/%i...\" % (i_epoch + 1, self.n_epochs))\n\n epoch_data = []\n for env in range(self.n_env):\n x_env = data_tuple_train[env][0]\n y_env = data_tuple_train[env][1]\n epoch_data.append(shuffle(x_env, y_env))\n\n count = 0\n for offset in tqdm(range(0, n_examples, self.batch_size)):\n end = offset + self.batch_size\n x_batches = [] # list to store batches for each environment\n y_batches = [] # list to store batches of labels for each environment\n self.grads = [] # list to store gradients\n countp = count % self.n_env # countp decides the index of the model which trains in the current step\n\n self.zero_grad()\n for i_env in range(self.n_env):\n x_batches.append(epoch_data[i_env][0][offset:end, :])\n y_batches.append(epoch_data[i_env][1][offset:end, :])\n\n grad, loss_value = self.loss(i_env=i_env,\n x=x_batches[i_env],\n y=y_batches[i_env])\n\n self.grads.append(grad)\n self.losses[i_env].append(loss_value)\n\n ###\n # Old\n # Update the environment whose turn it is to learn\n # self.update_optimizer(i_env=countp)\n ##\n # New\n for i_env in range(self.n_env):\n self.update_optimizer(i_env)\n ###\n\n # Compute training accuracy\n train_acc, env_train_accs = self.evaluate(x=x_train_all, y=y_train_all)\n self.train_accs.append(train_acc)\n for i_env in range(self.n_env):\n self.env_train_accs[i_env].append(env_train_accs[i_env])\n\n # Compute test accuracy\n test_acc, env_test_accs = self.evaluate(x=x_test_all, y=y_test_all)\n self.test_accs.append(test_acc)\n\n # for i_env in range(self.n_env):\n # self.grad_norms[i_env].append(\n # tf.linalg.global_norm(self.grads[i_env])\n # )\n\n if steps >= self.warm_start and train_acc < self.termination_acc:\n # Terminate after warm start and train acc touches threshold we dont want it to fall below\n # flag = True\n # print(\"Early termination.\")\n # break\n pass\n\n count = count + 1\n steps = steps + 1\n\n self.plot(env_wise)\n\n if flag:\n break\n\n # print train and test accuracy\n print(\"Training accuracy: %.4f\" % self.train_accs[-1])\n print(\"Testing accuracy: %.4f\" % self.test_accs[-1])\n\n def plot(self, env_wise):\n fig, ax1 = plt.subplots(figsize=(10, 6))\n ax1.set_xlabel(\"Training steps\")\n ax1.set_ylabel(\"Accuracy\")\n ax1.plot(self.train_accs, label=\"train acc\")\n ax1.plot(self.test_accs, label=\"test acc\")\n\n if env_wise:\n for i_env in range(self.n_env):\n ax1.plot(self.env_train_accs[i_env], label=\"train acc - env %i\" % (i_env + 1))\n\n plt.legend()\n plt.show()\n\n fig, ax1 = plt.subplots(figsize=(10, 6))\n ax1.set_xlabel(\"Training steps\")\n ax1.set_ylabel(\"Loss\")\n\n if env_wise:\n for i_env in range(self.n_env):\n ax1.plot(self.losses[i_env], label=\"loss - env %i\" % (i_env + 1))\n\n plt.legend()\n plt.show()\n\n fig, ax1 = plt.subplots(figsize=(10, 6))\n ax1.set_xlabel(\"Training steps\")\n ax1.set_ylabel(\"Gradient norms\")\n\n if env_wise:\n for i_env in range(self.n_env):\n ax1.plot(self.grad_norms[i_env], label=\"gradient norm - env %i\" % (i_env + 1))\n\n plt.legend()\n plt.show()\n\n def mean_plot(self, env_wise):\n n = len(self.train_accs)\n train_accs = [np.mean([self.train_accs[2*i], self.train_accs[2*i+1]]) for i in range(n//2)]\n test_accs = [np.mean([self.test_accs[2*i], self.test_accs[2*i+1]]) for i in range(n//2)]\n env_train_accs = [[np.mean([self.env_train_accs[i_env][2*i], self.env_train_accs[i_env][2*i + 1]])\n for i in range(n // 2)] for i_env in range(self.n_env)]\n\n plt.figure(figsize=(10, 6))\n plt.xlabel(\"Training steps pairs\")\n plt.ylabel(\"Mean accuracy\")\n\n plt.plot(train_accs, label=\"mean train acc\")\n plt.plot(test_accs, label=\"mean test acc\")\n\n if env_wise:\n for i_env in range(self.n_env):\n plt.plot(env_train_accs[i_env], label=\"train - env %i\" % (i_env + 1))\n\n plt.legend()\n plt.show()\n\n\nclass TensorflowIrmGame(AbstractIrmGame):\n @staticmethod\n def to_array(data):\n return data\n\n @staticmethod\n def to_tensor(data):\n return data\n\n @staticmethod\n def zeros(shape):\n return tf.zeros(shape, dtype=tf.float32)\n\n def predict(self, x, shape, keep_grad_idx, as_array):\n x = self.to_tensor(x)\n y = self.zeros(shape)\n env_preds = []\n\n for i_env in range(self.n_env):\n env_pred = self.models[i_env](x)\n y = y + (1./self.n_env) * env_pred\n env_preds.append(env_pred)\n\n if as_array:\n y = self.to_array(y)\n for i_env in range(self.n_env):\n env_preds[i_env] = self.to_array(env_preds[i_env])\n\n return y, env_preds\n\n def loss(self, x, y, i_env):\n with tf.GradientTape() as tape:\n y_, _ = self.predict(x=x, shape=(y.shape[0], 2), keep_grad_idx=None, as_array=False)\n loss_value = self.keras_criterion(y_true=y, y_pred=y_)\n\n return tape.gradient(loss_value, self.models[i_env].trainable_variables), loss_value\n\n def zero_grad(self):\n pass\n\n def update_optimizer(self, i_env):\n self.optimizers[i_env].apply_gradients(zip(self.grads[i_env], self.models[i_env].trainable_variables))\n\n\nclass PytorchIrmGame(AbstractIrmGame):\n @staticmethod\n def to_array(data):\n return data.data.numpy()\n\n @staticmethod\n def to_tensor(data):\n return torch.tensor(data, dtype=torch.float32)\n\n @staticmethod\n def zeros(shape):\n return torch.zeros(shape, dtype=torch.float32)\n\n def predict(self, x, shape, keep_grad_idx, as_array):\n x = self.to_tensor(x)\n y = self.zeros(shape)\n env_preds = []\n\n if not as_array:\n for i_env in range(self.n_env):\n if i_env == keep_grad_idx:\n env_pred = self.models[i_env](x)\n else:\n with torch.no_grad():\n env_pred = self.models[i_env](x)\n\n y = y + (1. / self.n_env) * env_pred\n env_preds.append(env_pred.detach())\n\n else:\n with torch.no_grad():\n for i_env in range(self.n_env):\n env_pred = self.models[i_env](x)\n\n y = y + (1. / self.n_env) * env_pred\n env_preds.append(env_pred)\n\n if as_array:\n y = self.to_array(y)\n for i_env in range(self.n_env):\n env_preds[i_env] = self.to_array(env_preds[i_env])\n\n return y, env_preds\n\n def loss(self, x, y, i_env):\n pred, _ = self.predict(x=x, shape=(y.shape[0], 2), keep_grad_idx=i_env, as_array=False)\n y = torch.tensor(y).squeeze()\n\n loss = self.torch_criterion(target=y, input=pred)\n loss.backward()\n\n return None, loss.item()\n\n def zero_grad(self):\n for i_env in range(self.n_env):\n self.optimizers[i_env].zero_grad()\n\n def update_optimizer(self, i_env):\n self.optimizers[i_env].step()\n\n\nclass fixed_irm_game_model:\n def __init__(self, model_list, learning_rate, num_epochs, batch_size, termination_acc, warm_start, beta_1=0.9):\n self.model_list = model_list # list of models for all the environments\n self.num_epochs = num_epochs # number of epochs\n self.batch_size = batch_size # batch size for each gradient update\n self.termination_acc = termination_acc # threshold on accuracy below which we terminating\n self.warm_start = warm_start # minimum number of steps we have to train before terminating due to accuracy\n # falling below threshold\n self.learning_rate = learning_rate # learning rate in adam\n self.beta_1 = beta_1\n\n def fit(self, data_tuple_list):\n n_e = len(data_tuple_list) # number of environments\n # combine the data from the different environments x_in: combined data from environments, y_in: combined\n # labels from environments, e_in: combined environment indices from environments\n x_in = data_tuple_list[0][0]\n for i in range(1, n_e):\n x_c = data_tuple_list[i][0]\n x_in = np.concatenate((x_in, x_c), axis=0)\n y_in = data_tuple_list[0][1]\n for i in range(1, n_e):\n y_c = data_tuple_list[i][1]\n y_in = np.concatenate((y_in, y_c), axis=0)\n e_in = data_tuple_list[0][2]\n for i in range(1, n_e):\n e_c = data_tuple_list[i][2]\n e_in = np.concatenate((e_in, e_c), axis=0)\n\n # cross entropy loss\n\n def loss_comb(model_list, x, y):\n loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n n_e = len(model_list)\n y_ = tf.zeros_like(y, dtype=tf.float32)\n # predict the model output from the ensemble\n for i in range(n_e):\n model_i = model_list[i]\n y_ = y_ + 0.5 * model_i(x)\n\n return loss_object(y_true=y, y_pred=y_)\n\n # gradient of cross entropy loss for environment e\n def grad_comb(model_list, inputs, targets, e):\n with tf.GradientTape() as tape:\n loss_value = loss_comb(model_list, inputs, targets)\n return loss_value, tape.gradient(loss_value, model_list[e].trainable_variables)\n\n model_list = self.model_list\n learning_rate = self.learning_rate\n beta_1 = self.beta_1\n\n # initialize optimizer for all the environments and representation learner and store it in a list\n optimizer_list = []\n for e in range(n_e):\n optimizer_list.append(tf.keras.optimizers.Adam(learning_rate=learning_rate, beta_1=beta_1))\n\n ####### train\n train_accuracy_results_0 = [] # list to store training accuracy\n\n flag = False\n num_epochs = self.num_epochs\n batch_size = self.batch_size\n num_examples = data_tuple_list[0][0].shape[0]\n period = n_e\n termination_acc = self.termination_acc\n warm_start = self.warm_start\n steps = 0\n for epoch in range(num_epochs):\n print(\"Epoch: \" + str(epoch))\n datat_list = []\n for e in range(n_e):\n x_e = data_tuple_list[e][0]\n y_e = data_tuple_list[e][1]\n datat_list.append(shuffle(x_e, y_e))\n count = 0\n for offset in tqdm(range(0, num_examples, batch_size)):\n end = offset + batch_size\n batch_x_list = [] # list to store batches for each environment\n batch_y_list = [] # list to store batches of labels for each environment\n loss_value_list = [] # list to store loss values\n grads_list = [] # list to store gradients\n countp = count % period # countp decides the index of the model which trains in the current step\n for e in range(n_e):\n batch_x_list.append(datat_list[e][0][offset:end, :])\n batch_y_list.append(datat_list[e][1][offset:end, :])\n loss_value, grads = grad_comb(model_list, batch_x_list[e], batch_y_list[e], e)\n grads_list.append(grads)\n # update the environment whose turn it is to learn\n optimizer_list[countp].apply_gradients(zip(grads_list[countp], model_list[countp].trainable_variables))\n\n # computing training accuracy\n y_ = tf.zeros_like(y_in, dtype=tf.float32)\n for e in range(n_e):\n y_ = y_ + model_list[e](x_in)\n epoch_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()\n acc_train = np.float(epoch_accuracy(y_in, y_))\n train_accuracy_results_0.append(acc_train)\n\n if steps >= warm_start and acc_train < termination_acc: ## Terminate after warm start and train\n # acc touches threshold we dont want it to fall below\n flag = True\n print(\"Early termination.\")\n break\n\n count = count + 1\n steps = steps + 1\n self.train_accuracy_results = train_accuracy_results_0\n\n if flag:\n break\n\n self.model_list = model_list\n\n self.x_in = x_in\n self.y_in = y_in\n\n def evaluate(self, data_tuple_test):\n ##### evaluations jmtd\n x_test = data_tuple_test[0]\n y_test = data_tuple_test[1]\n x_in = self.x_in\n y_in = self.y_in\n\n model_list = self.model_list\n n_e = len(model_list)\n train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()\n test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()\n\n ytr_ = tf.zeros_like(y_in, dtype=tf.float32)\n for e in range(n_e):\n ytr_ = ytr_ + model_list[e](x_in)\n train_acc = np.float(train_accuracy(y_in, ytr_))\n\n yts_ = tf.zeros_like(y_test, dtype=tf.float32)\n for e in range(n_e):\n yts_ = yts_ + model_list[e](x_test)\n\n test_acc = np.float(test_accuracy(y_test, yts_))\n\n self.train_acc = train_acc\n self.test_acc = test_acc\n\n\nclass no_oscillation_irm_game_model:\n def __init__(self, model_list, learning_rate, num_epochs, batch_size, termination_acc, warm_start):\n\n self.model_list = model_list # list of models for all the environments\n self.num_epochs = num_epochs # number of epochs\n self.batch_size = batch_size # batch size for each gradient update\n self.termination_acc = termination_acc # threshold on accuracy below which we terminating\n self.warm_start = warm_start # minimum number of steps we have to train before terminating due to accuracy\n # falling below threshold\n self.learning_rate = learning_rate # learning rate in adam\n\n def fit(self, data_tuple_list):\n n_e = len(data_tuple_list) # number of environments\n # combine the data from the different environments x_in: combined data from environments, y_in: combined\n # labels from environments, e_in: combined environment indices from environments\n x_in = data_tuple_list[0][0]\n for i in range(1, n_e):\n x_c = data_tuple_list[i][0]\n x_in = np.concatenate((x_in, x_c), axis=0)\n y_in = data_tuple_list[0][1]\n for i in range(1, n_e):\n y_c = data_tuple_list[i][1]\n y_in = np.concatenate((y_in, y_c), axis=0)\n e_in = data_tuple_list[0][2]\n for i in range(1, n_e):\n e_c = data_tuple_list[i][2]\n e_in = np.concatenate((e_in, e_c), axis=0)\n\n # cross entropy loss\n\n def loss_comb(model_list, x, y):\n loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n n_e = len(model_list)\n y_ = tf.zeros_like(y, dtype=tf.float32)\n # predict the model output from the ensemble\n for i in range(n_e):\n model_i = model_list[i]\n y_ = y_ + 0.5 * model_i(x)\n\n return loss_object(y_true=y, y_pred=y_)\n\n # gradient of cross entropy loss for environment e\n def grad_comb(model_list, inputs, targets, e):\n with tf.GradientTape() as tape:\n loss_value = loss_comb(model_list, inputs, targets)\n return loss_value, tape.gradient(loss_value, model_list[e].trainable_variables)\n\n model_list = self.model_list\n learning_rate = self.learning_rate\n\n # initialize optimizer for all the environments and representation learner and store it in a list\n optimizer_list = []\n for e in range(n_e):\n optimizer_list.append(tf.keras.optimizers.Adam(learning_rate=learning_rate))\n\n ####### train\n\n train_accuracy_results_0 = [] # list to store training accuracy\n\n flag = 'false'\n num_epochs = self.num_epochs\n batch_size = self.batch_size\n num_examples = data_tuple_list[0][0].shape[0]\n period = n_e\n termination_acc = self.termination_acc\n warm_start = self.warm_start\n steps = 0\n for epoch in range(num_epochs):\n print(\"Epoch: \" + str(epoch))\n datat_list = []\n for e in range(n_e):\n x_e = data_tuple_list[e][0]\n y_e = data_tuple_list[e][1]\n datat_list.append(shuffle(x_e, y_e))\n count = 0\n for offset in range(0, num_examples, batch_size):\n end = offset + batch_size\n batch_x_list = [] # list to store batches for each environment\n batch_y_list = [] # list to store batches of labels for each environment\n loss_value_list = [] # list to store loss values\n grads_list = [] # list to store gradients\n countp = count % period # countp decides the index of the model which trains in the current step\n for e in range(n_e):\n batch_x_list.append(datat_list[e][0][offset:end, :])\n batch_y_list.append(datat_list[e][1][offset:end, :])\n loss_value, grads = grad_comb(model_list, batch_x_list[e], batch_y_list[e], e)\n grads_list.append(grads)\n # update the environment whose turn it is to learn\n optimizer_list[countp].apply_gradients(zip(grads_list[countp], model_list[countp].trainable_variables))\n\n # computing training accuracy\n y_ = tf.zeros_like(y_in, dtype=tf.float32)\n for e in range(n_e):\n y_ = y_ + model_list[e](x_in)\n epoch_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()\n acc_train = np.float(epoch_accuracy(y_in, y_))\n train_accuracy_results_0.append(acc_train)\n\n if (\n steps >= warm_start and acc_train < termination_acc): ## Terminate after warm start and train\n # acc touches threshold we dont want it to fall below\n flag = 'true'\n break\n\n count = count + 1\n steps = steps + 1\n self.train_accuracy_results = train_accuracy_results_0\n if (flag == 'true'):\n break\n self.model_list = model_list\n\n self.x_in = x_in\n self.y_in = y_in\n\n def evaluate(self, data_tuple_test):\n ##### evaluations jmtd\n x_test = data_tuple_test[0]\n y_test = data_tuple_test[1]\n x_in = self.x_in\n y_in = self.y_in\n\n model_list = self.model_list\n n_e = len(model_list)\n train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()\n test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()\n\n ytr_ = tf.zeros_like(y_in, dtype=tf.float32)\n for e in range(n_e):\n ytr_ = ytr_ + model_list[e](x_in)\n train_acc = np.float(train_accuracy(y_in, ytr_))\n\n yts_ = tf.zeros_like(y_test, dtype=tf.float32)\n for e in range(n_e):\n yts_ = yts_ + model_list[e](x_test)\n\n test_acc = np.float(test_accuracy(y_test, yts_))\n\n self.train_acc = train_acc\n self.test_acc = test_acc\n\n\nclass variable_irm_game_model:\n def __init__(self, model_list, learning_rate, num_epochs, batch_size, termination_acc, warm_start):\n self.model_list = model_list # list of models for the environments and representation learner\n self.num_epochs = num_epochs # number of epochs\n self.batch_size = batch_size # batch size for each gradient update\n self.termination_acc = termination_acc # threshold on accuracy below which we terminate\n self.warm_start = warm_start # minimum number of steps before terminating\n self.learning_rate = learning_rate # learning rate for Adam optimizer\n\n def fit(self, data_tuple_list):\n n_e = len(data_tuple_list) # number of environments\n # combine the data from the different environments x_in: combined data (features) from environments, y_in:\n # combined labels from environments, e_in: combined environment indices from environments\n x_in = data_tuple_list[0][0]\n for i in range(1, n_e):\n x_c = data_tuple_list[i][0]\n x_in = np.concatenate((x_in, x_c), axis=0)\n y_in = data_tuple_list[0][1]\n for i in range(1, n_e):\n y_c = data_tuple_list[i][1]\n y_in = np.concatenate((y_in, y_c), axis=0)\n e_in = data_tuple_list[0][2]\n for i in range(1, n_e):\n e_c = data_tuple_list[i][2]\n e_in = np.concatenate((e_in, e_c), axis=0)\n\n # cross entropy loss\n\n def loss_comb(model_list, x, y):\n loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n n_e = len(model_list) - 1\n y_ = tf.zeros_like(y, dtype=tf.float32)\n # pass the data from the representation learner\n z = model_list[n_e](x)\n # pass the output from the representation learner into the environments and aggregate them \n for i in range(n_e):\n model_i = model_list[i]\n y_ = y_ + 0.5 * model_i(z)\n\n return loss_object(y_true=y, y_pred=y_)\n\n # gradient of cross entropy loss for environment e\n def grad_comb(model_list, inputs, targets, e):\n with tf.GradientTape() as tape:\n loss_value = loss_comb(model_list, inputs, targets)\n return loss_value, tape.gradient(loss_value, model_list[e].trainable_variables)\n\n model_list = self.model_list\n learning_rate = self.learning_rate\n\n # initialize optimizer for all the environments and representation learner and store it in a list\n optimizer_list = []\n for e in range(n_e + 1):\n if (e <= n_e - 1):\n optimizer_list.append(tf.keras.optimizers.Adam(learning_rate=learning_rate))\n if (e == n_e):\n optimizer_list.append(tf.keras.optimizers.Adam(learning_rate=learning_rate * 0.1))\n\n ####### train\n\n train_accuracy_results_0 = [] # list to store training accuracy\n\n flag = 'false'\n num_epochs = self.num_epochs\n batch_size = self.batch_size\n num_examples = data_tuple_list[0][0].shape[0]\n period = n_e + 1 #\n termination_acc = self.termination_acc\n warm_start = self.warm_start\n steps = 0\n for epoch in range(num_epochs):\n print(\"Epoch: \" + str(epoch))\n datat_list = []\n for e in range(n_e + 1):\n if (e <= n_e - 1):\n x_e = data_tuple_list[e][0]\n y_e = data_tuple_list[e][1]\n datat_list.append(shuffle(x_e, y_e))\n if (e == n_e):\n datat_list.append(shuffle(x_in, y_in))\n count = 0\n for offset in range(0, num_examples, batch_size):\n end = offset + batch_size\n batch_x_list = [] # list to store batches for each environment\n batch_y_list = [] # list to store batches of labels for each environment\n loss_value_list = [] # list to store loss values\n grads_list = [] # list to store gradients\n countp = period - 1 - (\n count % period) # countp decides the index of the model which trains in the current step\n for e in range(n_e + 1):\n batch_x_list.append(datat_list[e][0][offset:end, :])\n batch_y_list.append(datat_list[e][1][offset:end, :])\n loss_value, grads = grad_comb(model_list, batch_x_list[e], batch_y_list[e], e)\n grads_list.append(grads)\n\n # update either a representation learner or an environment model \n optimizer_list[countp].apply_gradients(zip(grads_list[countp], model_list[countp].trainable_variables))\n\n # computing training accuracy\n y_ = tf.zeros_like(y_in, dtype=tf.float32)\n z_in = model_list[n_e](x_in)\n for e in range(n_e):\n y_ = y_ + model_list[e](z_in)\n epoch_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()\n acc_train = np.float(epoch_accuracy(y_in, y_))\n train_accuracy_results_0.append(acc_train)\n\n if (steps >= warm_start and acc_train < termination_acc): # Terminate after warm start and train\n # accuracy touches threshold we dont want it to fall below\n flag = 'true'\n break\n\n count = count + 1\n steps = steps + 1\n self.train_accuracy_results = train_accuracy_results_0\n\n if (flag == 'true'):\n break\n\n self.model_list = model_list\n\n self.x_in = x_in\n self.y_in = y_in\n\n def evaluate(self, data_tuple_test):\n ##### evaluations jmtd\n x_test = data_tuple_test[0]\n y_test = data_tuple_test[1]\n x_in = self.x_in\n y_in = self.y_in\n\n model_list = self.model_list\n n_e = len(model_list) - 1\n train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()\n test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()\n\n # compute training accuracy\n ytr_ = tf.zeros_like(y_in, dtype=tf.float32)\n z_in = model_list[n_e](x_in)\n for e in range(n_e):\n ytr_ = ytr_ + model_list[e](z_in)\n train_acc = np.float(train_accuracy(y_in, ytr_))\n\n # compute testing accuracy\n z_test = model_list[n_e](x_test)\n yts_ = tf.zeros_like(y_test, dtype=tf.float32)\n for e in range(n_e):\n yts_ = yts_ + model_list[e](z_test)\n\n test_acc = np.float(test_accuracy(y_test, yts_))\n\n self.train_acc = train_acc\n self.test_acc = test_acc\n\n\nclass standard_erm_model:\n def __init__(self, model, num_epochs, batch_size, learning_rate):\n\n self.model = model\n self.num_epochs = num_epochs\n self.batch_size = batch_size\n self.learning_rate = learning_rate\n\n def fit(self, data_tuple_list):\n learning_rate = self.learning_rate\n num_epochs = self.num_epochs\n n_e = len(data_tuple_list)\n x_in = data_tuple_list[0][0]\n for i in range(1, n_e):\n x_c = data_tuple_list[i][0]\n x_in = np.concatenate((x_in, x_c), axis=0)\n y_in = data_tuple_list[0][1]\n for i in range(1, n_e):\n y_c = data_tuple_list[i][1]\n y_in = np.concatenate((y_in, y_c), axis=0)\n e_in = data_tuple_list[0][2]\n for i in range(1, n_e):\n e_c = data_tuple_list[i][2]\n e_in = np.concatenate((e_in, e_c), axis=0)\n\n ### fit the model\n model = self.model\n batch_size = self.batch_size\n\n model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n model.fit(x_in, y_in, epochs=num_epochs, batch_size=batch_size)\n\n self.x_in = x_in\n self.y_in = y_in\n\n def evaluate(self, data_tuple_test):\n ##### evaluations jmtd\n x_test = data_tuple_test[0]\n y_test = data_tuple_test[1]\n x_in = self.x_in\n y_in = self.y_in\n\n model = self.model\n train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()\n test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()\n\n ytr_ = model.predict(x_in)\n train_acc = np.float(train_accuracy(y_in, ytr_))\n\n yts_ = model.predict(x_test)\n\n test_acc = np.float(test_accuracy(y_test, yts_))\n\n self.train_acc = train_acc\n self.test_acc = test_acc\n\n\nclass irm_model:\n def __init__(self, model, learning_rate, batch_size, steps_max, steps_threshold, gamma_new):\n self.model = model # initialized model passed\n self.learning_rate = learning_rate # learning rate for Adam optimizer\n self.batch_size = batch_size # batch size per gradient update\n self.steps_max = steps_max # maximum number of gradient steps\n self.steps_threshold = steps_threshold # threshold on the number of steps after which we use penalty gamma_new\n self.gamma_new = gamma_new # penalty value; note penalty is set to 1 initially and gamma_new only kicks in\n # after steps exceeed steps_threshold\n\n def fit(self, data_tuple_list):\n\n n_e = len(data_tuple_list) # number of environments\n # combine the data from the different environments, x_in: combined data (features) from different environments\n x_in = data_tuple_list[0][0]\n for i in range(1, n_e):\n x_c = data_tuple_list[i][0]\n x_in = np.concatenate((x_in, x_c), axis=0)\n y_in = data_tuple_list[0][1]\n for i in range(1, n_e):\n y_c = data_tuple_list[i][1]\n y_in = np.concatenate((y_in, y_c), axis=0)\n e_in = data_tuple_list[0][2]\n for i in range(1, n_e):\n e_c = data_tuple_list[i][2]\n e_in = np.concatenate((e_in, e_c), axis=0)\n\n self.x_in = x_in\n self.y_in = y_in\n\n # cross entropy (we do not use the cross entropy from keras because there are issues when computing gradient\n # of the gradient)\n def cross_entropy_manual(y, y_pred):\n y_p = tf.math.log(tf.nn.softmax(y_pred))\n n_p = np.float(tf.shape(y_p)[0])\n ind_0 = tf.where(y == 0)[:, 0]\n ind_1 = tf.where(y == 1)[:, 0]\n y_p0 = tf.gather(y_p, ind_0)[:, 0]\n y_p1 = tf.gather(y_p, ind_1)[:, 1]\n ent_0 = tf.reduce_sum(y_p0)\n ent_1 = tf.reduce_sum(y_p1)\n total = -(ent_0 + ent_1) / n_p\n return total\n\n # cross entropy loss for environment e\n def loss_n(model, x, e, y, w, k):\n index = np.where(e == k)\n y1_ = model(x[index[0]]) * w\n y1 = y[index[0]]\n\n return cross_entropy_manual(y1, y1_)\n\n # gradient of cross entropy loss w.r.t w for environment e\n\n def grad_norm_n(model, x, e, y, w, k):\n with tf.GradientTape() as g:\n g.watch(w)\n loss_value = loss_n(model, x, e, y, w, k)\n return g.gradient(loss_value, w) ** 2\n\n # total cross entropy loss across all environments \n def loss_0(model, x, e, y, w):\n y_ = model(x)\n loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n return loss_object(y_true=y, y_pred=y_)\n\n # sum of cross entropy loss and penalty \n def loss_total(model, x, e, y, w, gamma, n_e):\n loss0 = loss_0(model, x, e, y, w)\n loss_penalty = 0.0\n for k in range(n_e):\n loss_penalty += gamma * grad_norm_n(model, x, e, y, w, k)\n\n return (loss0 + loss_penalty) * (1 / gamma)\n\n # gradient of sum of cross entropy loss and penalty w.r.t model parameters\n\n def grad_total_n(model, x, e, y, w, gamma, n_e):\n with tf.GradientTape() as tape:\n loss_value = loss_total(model, x, e, y, w, gamma, n_e)\n return loss_value, tape.gradient(loss_value, model.trainable_variables)\n\n model = self.model\n learning_rate = self.learning_rate\n optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)\n\n ## train \n train_loss_results = []\n train_accuracy_results = []\n flag = 'false'\n batch_size = self.batch_size\n num_examples = x_in.shape[0]\n gamma = 1.0\n w = tf.constant(1.0)\n steps = 0\n steps_max = self.steps_max\n steps_threshold = self.steps_threshold\n gamma_new = self.gamma_new\n while (steps <= steps_max):\n (xt, yt, et) = shuffle(x_in, y_in, e_in)\n epoch_loss_avg = tf.keras.metrics.Mean()\n epoch_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()\n count = 0\n if (steps >= steps_threshold):\n gamma = gamma_new\n for offset in range(0, num_examples, batch_size):\n end = offset + batch_size\n batch_x, batch_y, batch_e = xt[offset:end, :], yt[offset:end, :], et[offset:end, :]\n loss_values, grads = grad_total_n(model, batch_x, batch_e, batch_y, w, gamma, n_e)\n optimizer.apply_gradients(zip(grads, model.trainable_variables))\n epoch_loss_avg(loss_values)\n epoch_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()\n acc_train = np.float(epoch_accuracy(y_in, model(x_in)))\n train_loss_results.append(epoch_loss_avg.result())\n train_accuracy_results.append(epoch_accuracy.result())\n count = count + 1\n steps = steps + 1\n\n def evaluate(self, data_tuple_test):\n x_test = data_tuple_test[0]\n y_test = data_tuple_test[1]\n x_in = self.x_in\n y_in = self.y_in\n train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()\n test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()\n model = self.model\n ytr_ = model.predict(x_in)\n train_acc = np.float(train_accuracy(y_in, ytr_))\n\n yts_ = model.predict(x_test)\n test_acc = np.float(test_accuracy(y_test, yts_))\n\n self.train_acc = train_acc\n self.test_acc = test_acc\n" ]
[ [ "matplotlib.pyplot.legend", "tensorflow.zeros", "torch.zeros", "tensorflow.reduce_sum", "tensorflow.compat.v1.enable_eager_execution", "matplotlib.pyplot.plot", "numpy.concatenate", "numpy.mean", "torch.no_grad", "tensorflow.where", "numpy.where", "torch.nn.CrossEntropyLoss", "torch.tensor", "tensorflow.gather", "tensorflow.keras.metrics.Mean", "matplotlib.pyplot.figure", "tensorflow.shape", "tensorflow.zeros_like", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "tensorflow.GradientTape", "tensorflow.nn.softmax", "tensorflow.constant", "tensorflow.keras.metrics.SparseCategoricalAccuracy", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "sklearn.utils.shuffle", "matplotlib.pyplot.subplots", "tensorflow.keras.optimizers.Adam", "matplotlib.pyplot.xlabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
gilangsamudra/Data_Mining_HousePrices
[ "41355631db1ce680e3cc7f85cbe888cca915c5ab" ]
[ "houseprice-regresion.py" ]
[ "# Bismillah\n# Import all necessary libraries\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom statsmodels.formula.api import ols\n\n# Load dataset\ndf = pd.read_csv('D:/Phyton Code/Data_Mining_HousePrices/kc_house_data.csv')\n\n# # Check data quality\n# df.info()\n# df.head()\n# df.tail()\n# df.describe()\n# df.isnull().any()\n# df.dtypes\n\n# Create a figure object\nfig = plt.figure(figsize=(12, 6))\nsqft = fig.add_subplot(121)\ncost = fig.add_subplot(122)\n\nsqft.hist(df.sqft_living, bins=80)\nsqft.set_xlabel('Ft^2')\nsqft.set_title('Histogram of Housing Prices')\n\ncost.hist(df.price, bins=80)\ncost.set_xlabel('Price ($)')\ncost.set_title(\"Histogram of Housing Prices\")\n\n# Do a simple single variable regresion calculation\nm = ols('price ~ sqft_living', df).fit()\nprint(m.summary())\n\n# Do a multivariate regresion calculation\nmm = ols('price ~ sqft_living + bedrooms + condition + grade', df).fit()\nprint(mm.summary())\n\nsns.jointplot(x='sqft_living', y='price', data=df, kind='reg', fit_reg=True,\n height=7)\n" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
aliddell/spiketag
[ "f5600126c2c6c9be319e8b808d51ea33be843909" ]
[ "spiketag/view/raster_view.py" ]
[ "import numpy as np\r\nfrom ..base.CLU import CLU\r\nfrom .color_scheme import palette\r\nfrom .scatter_2d_view import scatter_2d_view\r\nfrom vispy import scene, app, visuals\r\nfrom numba import njit, prange\r\n\r\n\r\n@njit(cache=True, parallel=True, fastmath=True)\r\ndef get_population_firing_count(spike_times, fs, t_window=5e-3):\r\n '''\r\n calculate population_firing_rate\r\n '''\r\n _spk_times = spike_times/fs\r\n ts = np.arange(_spk_times[0]+t_window/2, _spk_times[-1]-t_window/2, t_window) \r\n firing_count = np.zeros_like(ts)\r\n for i in prange(ts.shape[0]):\r\n firing_count[i] = np.sum(np.logical_and(_spk_times >= ts[i]-t_window/2,\r\n _spk_times < ts[i]+t_window/2))\r\n pfr = np.zeros((ts.shape[0], 2), np.float32)\r\n pfr[:, 0] = ts\r\n pfr[:, 1] = firing_count\r\n return pfr \r\n\r\n\r\nclass raster_view(scatter_2d_view):\r\n\r\n def __init__(self, fs=25e3, n_units=None, time_tick=1, population_firing_count_ON=True, t_window=5e-3, view_window=10):\r\n super(raster_view, self).__init__(symbol='|', marker_size=6., edge_width=1e-3, second_view=population_firing_count_ON)\r\n super(raster_view, self).attach_xaxis()\r\n self._time_tick = time_tick \r\n self._fs = fs\r\n self._n_units = n_units\r\n self._view_window = view_window\r\n self._second_view = population_firing_count_ON\r\n if self._second_view:\r\n self.attach_yaxis()\r\n self._t_window = t_window\r\n\r\n ### ----------------------------------------------\r\n ### public method \r\n ### ----------------------------------------------\r\n\r\n def set_data(self, spkid_matrix):\r\n '''\r\n spkid_matrix: n*2 matrix, n spikes, first column is #sample, second column is the spike id\r\n '''\r\n self._spike_time = spkid_matrix[:,0] \r\n self._spike_id = spkid_matrix[:,1]\r\n if self._n_units is None: # if not given from user (user can read from fpga.n_units), will use what's there in the data\r\n self._n_units = len(np.unique(self._spike_id)) + 1\r\n print('load {} units'.format(self._n_units))\r\n # self._clu = CLU(spkid_matrix[:,1].astype(np.int64))\r\n\r\n if self._second_view:\r\n self._pfr = get_population_firing_count(self._spike_time, self._fs, self._t_window)\r\n self._draw(self._pfr)\r\n else:\r\n self._draw()\r\n\r\n\r\n def attach_yaxis(self, axis_color=(0,1,1,0.8)):\r\n '''\r\n Provide y axis for the population rate\r\n '''\r\n fg = axis_color\r\n # text show amplitude\r\n self.amp_text = scene.Text(\"\", pos=(0, 0), italic=False, bold=False, anchor_x='left', anchor_y='center',\r\n color=axis_color, font_size=10, parent=self._view2)\r\n self.amp_text.pos = (0, 20)\r\n\r\n # x axis shows time and can be moved horizontally for clipping\r\n self._yaxis = scene.AxisWidget(orientation='left', text_color=fg, axis_color=fg, tick_color=fg)\r\n self._yaxis.stretch = (0, 1)\r\n self._grid.add_widget(self._yaxis, row=10, col=0, row_span=3)\r\n self._yaxis.link_view(self._view2)\r\n\r\n\r\n @property\r\n def binsize(self):\r\n return int(self._fs * self._time_tick)\r\n\r\n def highlight(self, global_idx):\r\n ''' Transform the global idx to the view idx:\r\n Listen the select event from other view, and find the intersect spikes in current clus which selected to display within amplitude view. \r\n '''\r\n # find the intersect cluster between other view and amplitude view\r\n local_idx = self._clu.global2local(global_idx)\r\n current_clus = self._clu.select_clus\r\n common_clus = np.intersect1d(current_clus, np.array(local_idx.keys()))\r\n \r\n # the spike idx in parent-class is |cluster1|cluster2|cluster3|....|,\r\n # so the local idx in cluster2 is need to plus len(cluster1)\r\n view_idx = np.array([],dtype='int64')\r\n if len(common_clus) > 0:\r\n for clu in common_clus:\r\n before = current_clus[np.where(current_clus < clu)]\r\n for b in before:\r\n local_idx[clu] += self._clu.index_count[b]\r\n view_idx = np.hstack((view_idx, local_idx[clu]))\r\n \r\n super(raster_view, self)._highlight(view_idx)\r\n\r\n def select(self, view_idx):\r\n ''' \r\n Transfrom the view idx to the global idx.\r\n '''\r\n # all clusters within the view currently\r\n current_clus = self._clu.select_clus\r\n local_idx = {}\r\n \r\n # assign idx to different range |cluster1|cluser2|cluster3|....|\r\n # according the length of cluster\r\n left = 0\r\n for clu in current_clus:\r\n right = left + self._clu.index_count[clu]\r\n index = view_idx[(view_idx>=left)&(view_idx<right)]\r\n if len(index) > 0:\r\n local_idx[clu] = index - left\r\n left = right\r\n global_idx = self._clu.local2global(local_idx)\r\n self._clu.select(global_idx, caller=self.__module__)\r\n\r\n \r\n ### ----------------------------------------------\r\n ### private method \r\n ### ----------------------------------------------\r\n\r\n def _draw(self, pfr=None, delimit=True):\r\n \r\n poses = None\r\n colors = None\r\n self._y_bound = (0., 10.)\r\n span = self._y_bound[1] / self._n_units #len(self._clu.index_id)\r\n\r\n for spk_id in range(self._n_units):\r\n times = self._spike_time[self._spike_id==spk_id]\r\n x, y = times / self.binsize, np.full(times.shape, spk_id * span)\r\n pos = np.column_stack((x,y))\r\n color = np.tile(np.hstack((palette[spk_id],1)),(pos.shape[0],1))\r\n\r\n if poses is None and colors is None:\r\n poses = pos\r\n colors = color\r\n else:\r\n poses = np.concatenate((poses, pos))\r\n colors = np.concatenate((colors, color))\r\n \r\n super(raster_view, self).set_data(pos=poses, colors=colors, delimit=delimit)\r\n\r\n if self._second_view:\r\n # pfr is population firing rate\r\n self._line.set_data(pfr, symbol='o', color='w', edge_color='w',\r\n marker_size=5, face_color=(0.2, 0.2, 1))\r\n # self._view2.camera.set_range()\r\n\r\n\r\n def on_key_press(self, e):\r\n '''\r\n r: reset the camera\r\n '''\r\n if e.text == 'r':\r\n self.set_range()\r\n\r\n def set_range(self):\r\n self._view.camera.set_range()\r\n if self._second_view:\r\n self._view2.camera.set_range()\r\n\r\n\r\n def fromfile(self, filename='./fet.bin'):\r\n '''\r\n load and interact with spike rasters\r\n filename: the file that contains BMI feature-spike packet\r\n '''\r\n fet_packet = np.memmap(filename, dtype=np.int32).reshape(-1,7)\r\n spkid_packet = fet_packet[:, [0,-1]]\r\n spkid_packet = np.delete(spkid_packet, np.where(spkid_packet[:,1]==0), axis=0)\r\n self.set_data(spkid_packet)\r\n self.set_range()\r\n\r\n\r\n def update_fromfile(self, filename='./fet.bin', last_N=8000):\r\n '''\r\n filename: the file that contains BMI feature-spike packet\r\n last_N: only set_data for the last_N spikes in the file\r\n view_window: x second for visualization\r\n '''\r\n try:\r\n fet_packet = np.memmap(filename, dtype=np.int32).reshape(-1,7)\r\n # print(fet_packet.shape)\r\n N = last_N\r\n if fet_packet.shape[0]>N:\r\n spkid_packet = fet_packet[-N:, [0,-1]]\r\n spkid_packet = np.delete(spkid_packet, np.where(spkid_packet[:,1]==0), axis=0) \r\n else:\r\n spkid_packet = fet_packet[:, [0,-1]]\r\n spkid_packet = np.delete(spkid_packet, np.where(spkid_packet[:,1]==0), axis=0) \r\n self.set_data(spkid_packet)\r\n xmin = (spkid_packet[-1, 0]-self._view_window*self._fs)/self._fs\r\n xmax = spkid_packet[-1, 0]/self._fs\r\n self._view.camera.set_range(x=(xmin, xmax), y=self._y_bound)\r\n self._view2.camera.set_range(x=(xmin, xmax), y=(0, 3+int(self._pfr[:,1].max())))\r\n except:\r\n pass\r\n" ]
[ [ "numpy.hstack", "numpy.logical_and", "numpy.unique", "numpy.arange", "numpy.memmap", "numpy.full", "numpy.concatenate", "numpy.zeros_like", "numpy.column_stack", "numpy.array", "numpy.zeros", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dburkhardt/neurips2021_multimodal_viash
[ "e3449af07749bac6faf32613f91fd149a23250a6", "e3449af07749bac6faf32613f91fd149a23250a6" ]
[ "src/predict_modality/starter_kit/starter_kit_python/script.py", "src/match_modality/methods/dummy_random/script.py" ]
[ "# Dependencies:\n# pip: scikit-learn, anndata, scanpy\n#\n# Python starter kit for the NeurIPS 2021 Single-Cell Competition.\n# Parts with `TODO` are supposed to be changed by you.\n#\n# More documentation:\n#\n# https://viash.io/docs/creating_components/python/\n\nimport logging\nimport anndata as ad\n\nfrom scipy.sparse import csc_matrix\n\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.linear_model import LinearRegression\n\nlogging.basicConfig(level=logging.INFO)\n\n## VIASH START\n# Anything within this block will be removed by `viash` and will be\n# replaced with the parameters as specified in your config.vsh.yaml.\npar = {\n 'input_train_mod1': 'sample_data/openproblems_bmmc_multiome_starter/openproblems_bmmc_multiome_starter.train_mod1.h5ad',\n 'input_train_mod2': 'sample_data/openproblems_bmmc_multiome_starter/openproblems_bmmc_multiome_starter.train_mod2.h5ad',\n 'input_test_mod1': 'sample_data/openproblems_bmmc_multiome_starter/openproblems_bmmc_multiome_starter.test_mod1.h5ad',\n 'distance_method': 'minkowski',\n 'output': 'output.h5ad',\n 'n_pcs': 50,\n}\n## VIASH END\n\n# TODO: change this to the name of your method\nmethod_id = \"python_starter_kit\"\n\nlogging.info('Reading `h5ad` files...')\ninput_train_mod1 = ad.read_h5ad(par['input_train_mod1'])\ninput_train_mod2 = ad.read_h5ad(par['input_train_mod2'])\ninput_test_mod1 = ad.read_h5ad(par['input_test_mod1'])\n\ninput_train = ad.concat(\n {\"train\": input_train_mod1, \"test\": input_test_mod1},\n axis=0,\n join=\"outer\",\n label=\"group\",\n fill_value=0,\n index_unique=\"-\"\n)\n\n# TODO: implement own method\n\n# Do PCA on the input data\nlogging.info('Performing dimensionality reduction on modality 1 values...')\nembedder_mod1 = TruncatedSVD(n_components=50)\nmod1_pca = embedder_mod1.fit_transform(input_train.X)\n\nlogging.info('Performing dimensionality reduction on modality 2 values...')\nembedder_mod2 = TruncatedSVD(n_components=50)\nmod2_pca = embedder_mod2.fit_transform(input_train_mod2.X)\n\n# split dimred back up\nX_train = mod1_pca[input_train.obs['group'] == 'train']\nX_test = mod1_pca[input_train.obs['group'] == 'test']\ny_train = mod2_pca\n\nassert len(X_train) + len(X_test) == len(mod1_pca)\n\n# Get all responses of the training data set to fit the\n# KNN regressor later on.\n#\n# Make sure to use `toarray()` because the output might\n# be sparse and `KNeighborsRegressor` cannot handle it.\n\nlogging.info('Running Linear regression...')\n\nreg = LinearRegression()\n\n# Train the model on the PCA reduced modality 1 and 2 data\nreg.fit(X_train, y_train)\ny_pred = reg.predict(X_test)\n\n# Project the predictions back to the modality 2 feature space\ny_pred = y_pred @ embedder_mod2.components_\n\n# Store as sparse matrix to be efficient. Note that this might require\n# different classifiers/embedders before-hand. Not every class is able\n# to support such data structures.\ny_pred = csc_matrix(y_pred)\n\nadata = ad.AnnData(\n X=y_pred,\n obs=input_test_mod1.obs,\n var=input_train_mod2.var,\n uns={\n 'dataset_id': input_train_mod1.uns['dataset_id'],\n 'method_id': method_id,\n },\n)\n\nlogging.info('Storing annotated data...')\nadata.write_h5ad(par['output'], compression = \"gzip\")\n", "import anndata as ad\nimport numpy as np\nimport scipy.sparse\nfrom sklearn.preprocessing import normalize\n\n# VIASH START\npar = {\n \"input_train_mod1\": \"resources_test/match_modality/openproblems_bmmc_multiome_starter/openproblems_bmmc_multiome_starter.train_mod1.h5ad\",\n \"input_train_mod2\": \"resources_test/match_modality/openproblems_bmmc_multiome_starter/openproblems_bmmc_multiome_starter.train_mod2.h5ad\",\n \"input_train_sol\": \"resources_test/match_modality/openproblems_bmmc_multiome_starter/openproblems_bmmc_multiome_starter.train_sol.h5ad\",\n \"input_test_mod1\": \"resources_test/match_modality/openproblems_bmmc_multiome_starter/openproblems_bmmc_multiome_starter.test_mod1.h5ad\",\n \"input_test_mod2\": \"resources_test/match_modality/openproblems_bmmc_multiome_starter/openproblems_bmmc_multiome_starter.test_mod2.h5ad\",\n \"output\": \"resources_test/match_modality/openproblems_bmmc_multiome_starter/openproblems_bmmc_multiome_starter.prediction.h5ad\",\n}\n# VIASH END\n\nprint(\"Load datasets\")\ninput_test_mod1 = ad.read_h5ad(par[\"input_test_mod1\"])\ninput_test_mod2 = ad.read_h5ad(par[\"input_test_mod2\"])\n\n# determine number of values in array\nnum_values = min(1000, input_test_mod1.n_obs) * input_test_mod1.n_obs\nindices = np.random.randint(input_test_mod1.n_obs**2, size=num_values)\n\nmat_x = np.random.rand(num_values)\nmat_i = indices % input_test_mod1.n_obs\nmat_j = (indices / input_test_mod1.n_obs).astype(int)\npairing_matrix = scipy.sparse.csr_matrix(\n (mat_x, (mat_i, mat_j)),\n shape=(input_test_mod1.n_obs, input_test_mod2.n_obs)\n)\n\n# row normalise\nprob_matrix = normalize(pairing_matrix, norm=\"l1\")\n\n# Write out prediction\nprediction = ad.AnnData(\n X=prob_matrix,\n uns={\n \"method_id\": \"dummy_random\",\n \"dataset_id\": input_test_mod1.uns[\"dataset_id\"]\n }\n)\nprediction.write_h5ad(par[\"output\"])\n" ]
[ [ "sklearn.decomposition.TruncatedSVD", "scipy.sparse.csc_matrix", "sklearn.linear_model.LinearRegression" ], [ "sklearn.preprocessing.normalize", "numpy.random.rand", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
posterrieri/mllib
[ "809265573eb5af5c68f92537ed90390795008e40" ]
[ "mllib/supervised/parametric.py" ]
[ "#!/usr/bin/env python3\nimport numpy as np\n\n\nclass LinearRegression:\n \"\"\"Linear regression algorithm\"\"\"\n def fit(self, X, y, lamb=0, add_intercept=True, iters=1000, lr=0.006):\n \"\"\"Fits the training data using normal equation\"\"\"\n if add_intercept:\n X = np.column_stack((np.ones((X.shape[0], 1), dtype=X.dtype), X))\n n, p = X.shape\n self.X = X\n self.y = y\n self.Theta = np.random.randn(n + 1, 1)\n # self.Theta = np.linalg.inv(X.T @ X) @ X.T @ y\n loss_prime = lambda x, y, theta: (x @ theta - y).T @ x\n self._gradient_descent(iters=iters, loss_prime=loss_prime, lr=lr)\n\n def predict(self, X, add_intercept=True):\n \"\"\"Makes predictions on the given data\"\"\"\n if add_intercept:\n X = np.column_stack((np.ones((X.shape[0], 1), dtype=X.dtype), X))\n return X @ self.Theta\n\n def _gradient_descent(self, iters, loss_prime, lr):\n \"\"\"Gradient descent algorithm\"\"\"\n for _ in range(iters):\n grad = loss_prime(self.X, self.y, self.Theta)\n self.Theta -= lr * grad.T\n\n\nclass LogReg:\n def __init__(self, r=0.5):\n self.r = r\n\n def loss(self, x, y):\n expo = np.exp(self.Theta @ x)\n if y == 1:\n return expo/(1+expo)\n else:\n return 1/(1+expo)\n\nclass LogisticRegressionClassifier:\n \"\"\"Logistic regression\"\"\"\n def __init__(self, r=0.5):\n self.r = r\n\n def fit(self, X, y, iters=500, lr=0.01):\n \"\"\"Fits the training data\"\"\"\n self.y = y\n self.X = np.column_stack((np.ones((X.shape[0], 1), dtype=X.dtype), X))\n self.m, self.n = X.shape\n self.Theta = np.random.randn(self.n + 1, 1)\n loss_prime = lambda x, y, theta: 1 / self.m * (self._sigmoid(x @ theta)\n - y).T @ x\n self._gradient_descent(iters=iters, loss_prime=loss_prime, lr=lr)\n\n def predict(self, X):\n \"\"\"Makes prediction\"\"\"\n X = np.column_stack((np.ones((X.shape[0], 1), dtype=X.dtype), X))\n return np.array([[1 if res > self.r else -1]\n for res in self._sigmoid(X @ self.Theta)])\n\n def _sigmoid(self, Z):\n \"\"\"Sigmoid function\"\"\"\n return np.exp(Z) / (1 + np.exp(Z))\n\n def _gradient_descent(self, iters, loss_prime, lr):\n \"\"\"Gradient descent algorithm\"\"\"\n for _ in range(iters):\n grad = loss_prime(self.X, self.y, self.Theta)\n self.Theta -= lr * grad.T\n" ]
[ [ "numpy.exp", "numpy.random.randn", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hyounghk/ArraMon
[ "c8366b01420ac1a32871b898129ccf1e9c0fe6de" ]
[ "src/main_nosim.py" ]
[ "import random\nimport numpy as np\n\nimport torch\nfrom torch import nn, optim\nimport torchvision.transforms as transforms\nfrom data import NADataset, TorchDataset\nimport argparse\nfrom tqdm import tqdm\nfrom decoders_nosim import ActionDecoder\nfrom metric_dtw import DTW\nfrom sim_mul import simulator\nimport os\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nparser = argparse.ArgumentParser(description='NA Task')\nparser.add_argument('--workers', default=4, type=int, help='num of workers')\nparser.add_argument('--batch_size', default=8, type=int, help='batch size')\nparser.add_argument('--batch_size_val', default=8, type=int, help='batch size')\nparser.add_argument('--num_epochs', default=300, type=int, help='num of epochs')\nparser.add_argument('--max_input', default=150, type=int, help='max input size')\nparser.add_argument('--seed', default=1234, type=int, help='seeds')\nparser.add_argument('--hsz', default=128, type=int, help='hidden size')\nparser.add_argument('--lr', default=0.001, type=float, help='hidden size')\nparser.add_argument('--port', default=1111, type=int, help='port number')\nparser.add_argument('--sim_num', default=4, type=int, help='the num of sims')\n\n\ns_turn = 0\ne_turn = 2\n\ndef get_tuple(args, split, batch_size, shuffle=True, drop_last=True, max_length=100):\n dataset = NADataset(split)\n torch_ds = TorchDataset(dataset, max_length=max_length)\n\n print(\"The size of data split %s is %d\" % (split, len(torch_ds)))\n loader = torch.utils.data.DataLoader(torch_ds,\n batch_size=batch_size, shuffle=shuffle,\n num_workers=args.workers, pin_memory=True,\n drop_last=drop_last)\n\n return dataset, torch_ds, loader\n\ndef calLossAvg(loss):\n seq_len = (loss != 0.0).float().sum(-1)\n loss_avg = loss.sum(-1) / (seq_len + 0.000001)\n loss_avg = loss_avg.mean()\n return loss_avg\n\ndef train(args, sim, model_navi, model_assem, optimizer, train_tuple, valid_seen_tuple):\n \n sim.resnet_extractor.eval()\n train_ds, train_tds, train_loader = train_tuple\n\n loss_func = torch.nn.CrossEntropyLoss(ignore_index=0, reduction='none')\n\n best_dtw_score = 0.0\n\n for epoch in range(args.num_epochs):\n\n iterator = tqdm(enumerate(train_loader), total=len(train_tds)//args.batch_size, unit=\"batch\")\n\n pos_error = 0.0\n total_cnt = 0.0\n tot_init_dist = 0.0\n\n for k, (mapids, insts, actions, paths, rots, objPos) in iterator:\n\n res = sim.mapIDset(mapids, train=True, epoch=epoch)\n\n inst_navi, inst_assem, leng_navi, leng_assem = insts\n\n inst_navi, inst_assem, leng_navi, leng_assem = \\\n inst_navi.cuda(), inst_assem.cuda(), leng_navi.cuda(), leng_assem.cuda() \n\n acts_navi, acts_assem, leng_acts_navi, leng_acts_assem = actions\n acts_navi, acts_assem, leng_acts_navi, leng_acts_assem = \\\n acts_navi.cuda(), acts_assem.cuda(), leng_acts_navi.cuda(), leng_acts_assem.cuda()\n\n path_navi, path_assem, path_leng_navi, path_leng_assem = paths\n rot_navi, rot_assem, rot_leng_navi, rot_leng_assem = rots\n\n pos_navi, pos_assem = objPos\n pos_navi, pos_assem = pos_navi.cuda(), pos_assem.cuda()\n\n bsz = pos_navi.size(0)\n losses_navi = []\n losses_assem = []\n object_pos = torch.zeros(bsz).cuda()\n init_dist = torch.zeros(bsz).cuda()\n\n global s_turn, e_turn\n for i in range(s_turn, e_turn):\n\n optimizer.zero_grad()\n\n A, logit_navi, B, C, D, pos_seq_gt, obj_ids, E, agent_pos = model_navi(epoch, mapids, i, inst_navi[:,i,:], leng_navi[:,i], acts_navi[:,i,:], path_navi[:,i,:], rot_navi[:,i,:], phase='navi')\n\n res = sim.shift(train=True, epoch=epoch)\n\n diff_pow = (pos_navi[:,i,:] - agent_pos.cuda())**2\n innersum = torch.sum(diff_pow, dim=-1)\n object_pos += torch.sqrt(innersum)\n\n diff_pow = (pos_navi[:,i,:] - path_navi[:,i,0,:].cuda())**2\n innersum = torch.sum(diff_pow, dim=-1)\n init_dist += torch.sqrt(innersum)\n\n\n loss_navi = loss_func(logit_navi.contiguous().view(-1, 5), acts_navi[:,i,1:].contiguous().view(-1))\n \n if True: \n bsz = logit_navi.size(0)\n loss_navi = loss_navi.view(bsz, -1)\n loss_navi = calLossAvg(loss_navi)\n\n loss_navi.backward()\n nn.utils.clip_grad_norm_(model_navi.parameters(), 5.)\n optimizer.step()\n\n\n optimizer.zero_grad()\n _, logit_assem, _, _, _, pos_seq_gt, _, obj_place, agent_pos = model_assem(epoch, mapids, i, inst_assem[:,i,:], leng_assem[:,i], acts_assem[:,i,:], path_assem[:,i,:], rot_assem[:,i,:], phase='assembly')\n \n res = sim.shift(train=True, epoch=epoch)\n\n loss_assem = loss_func(logit_assem.contiguous().view(-1, 5), acts_assem[:,i,1:].contiguous().view(-1))\n \n if True: \n loss_assem = loss_assem.view(bsz, -1)\n loss_assem = calLossAvg(loss_assem)\n loss_assem.backward()\n nn.utils.clip_grad_norm_(model_assem.parameters(), 5.)\n optimizer.step()\n\n losses_navi.append(loss_navi)\n losses_assem.append(loss_assem)\n\n total_cnt += bsz\n pos_error += object_pos / 3\n loss_total = sum(losses_navi) + sum(losses_assem)\n iterator.set_postfix(loss=sum(losses_navi).item())\n\n dtw_score = evaluation(args, epoch, sim, model_navi, model_assem, valid_seen_tuple)\n\n if dtw_score > best_dtw_score:\n dtw_score = best_dtw_score\n save(model_navi, model_assem, \"best_model\", epoch)\n\ndef save(model_navi, model_assem, name, epoch):\n model_navi_path = os.path.join(\"best_models\", '%s_model_navi_%s.pth' % (name, str(epoch)))\n model_assem_path = os.path.join(\"best_models\", '%s_model_assem_%s.pth' % (name, str(epoch)))\n torch.save(model_navi.state_dict(), model_navi_path)\n torch.save(model_assem.state_dict(), model_assem_path)\n\ndef load(model_navi, model_assem, name, epoch):\n model_navi_path = os.path.join(\"best_models\", '%s_model_navi_%s.pth' % (name, str(epoch)))\n model_assem_path = os.path.join(\"best_models\", '%s_model_assem_%s.pth' % (name, str(epoch)))\n model_navi_state_dict = torch.load(model_navi_path)\n model_assem_state_dict = torch.load(model_assem_path)\n model_navi.load_state_dict(model_navi_state_dict)\n model_assem.load_state_dict(model_assem_state_dict)\n\n\ndef evaluation(args, epoch, sim, model_navi, model_assem, valid_tuple, log_name=\"scores.txt\"):\n with torch.no_grad():\n valid_ds, valid_tds, valid_loader = valid_tuple\n\n model_navi.eval()\n model_assem.eval()\n sim.resnet_extractor.eval()\n dtw = DTW()\n\n total_outter_score = 0.0\n total_cnt = 0.0\n pos_error = 0.0\n pos_error_each = 0.0\n coc_3_total = 0.0\n coc_5_total = 0.0\n coc_7_total = 0.0\n tot_init_dist = 0.0\n placement_dist = 0.0\n placement_error = 0.0\n placement_error_0 = 0.0\n placement_error_3 = 0.0\n placement_error_5 = 0.0\n placement_error_7 = 0.0\n placement_success = 0.0\n placement_success_0 = 0.0\n placement_success_3 = 0.0\n placement_success_5 = 0.0\n placement_success_7 = 0.0\n pick_score_turn1 = 0.0\n pick_score_turn2 = 0.0\n\n dtw_score_each = torch.zeros(3)\n dtw_score_each_tot = 0.0\n\n map_path = {}\n\n iterator = tqdm(enumerate(valid_loader), total=len(valid_tds)//args.batch_size, unit=\"batch\")\n\n for k, (mapids, insts, actions, paths, rots, objPos) in iterator:\n\n res = sim.mapIDset(mapids, epoch=epoch)\n\n inst_navi, inst_assem, leng_navi, leng_assem = insts\n inst_navi, inst_assem, leng_navi, leng_assem = \\\n inst_navi.cuda(), inst_assem.cuda(), leng_navi.cuda(), leng_assem.cuda()\n\n acts_navi, acts_assem, leng_acts_navi, leng_acts_assem = actions\n acts_navi, acts_assem, leng_acts_navi, leng_acts_assem = \\\n acts_navi.cuda(), acts_assem.cuda(), leng_acts_navi, leng_acts_assem \n\n path_navi, path_assem, path_leng_navi, path_leng_assem = paths\n rot_navi, rot_assem, rot_leng_navi, rot_leng_assem = rots\n\n\n pos_navi, pos_assem = objPos\n pos_navi, pos_assem = pos_navi.cuda(), pos_assem.cuda()\n\n\n\n pos_seq_navi_list = []\n pos_len_navi_list = []\n pos_seq_navi_list_gt = []\n pos_len_navi_list_gt = []\n\n pos_seq_assem_list = []\n pos_len_assem_list = []\n pos_seq_assem_list_gt = []\n pos_len_assem_list_gt = []\n\n collected_object = [[], []]\n\n bsz = pos_navi.size(0)\n init_dist = torch.zeros(bsz).cuda()\n object_pos = torch.zeros(bsz).cuda()\n object_pos_each = torch.zeros(bsz, 3).cuda()\n coc_0 = torch.zeros(bsz, 2).cuda()\n coc_3 = torch.zeros(bsz, 2).cuda()\n coc_5 = torch.zeros(bsz, 2).cuda()\n coc_7 = torch.zeros(bsz, 2).cuda()\n placement = torch.zeros(bsz, 2, 3).cuda()\n object_dist = torch.zeros(bsz, 2).cuda()\n object_place = torch.zeros(bsz, 2).cuda()\n object_place_0 = torch.zeros(bsz, 2).cuda()\n object_place_3 = torch.zeros(bsz, 2).cuda()\n object_place_5 = torch.zeros(bsz, 2).cuda()\n object_place_7 = torch.zeros(bsz, 2).cuda()\n object_success = torch.zeros(bsz, 2).cuda()\n object_success_0 = torch.zeros(bsz, 2).cuda()\n object_success_3 = torch.zeros(bsz, 2).cuda()\n object_success_5 = torch.zeros(bsz, 2).cuda()\n object_success_7 = torch.zeros(bsz, 2).cuda()\n\n global s_turn, e_turn\n for i in range(s_turn, e_turn):\n _, logit_navi, _, pos_seq_navi, pos_len_navi, pos_seq_gt, obj_ids, obj_place, agent_pos = model_navi(-1, mapids, i, inst_navi[:,i,:], leng_navi[:,i], acts_navi[:,i,:], path_navi[:,i,:], rot_navi[:,i,:], phase='navi')\n\n pos_seq_navi_list.append(pos_seq_navi.cpu())\n pos_len_navi_list.append(pos_len_navi.cpu())\n pos_seq_navi_list_gt.append(pos_seq_gt.cpu())\n collected_object[i].append(sum((obj_ids==i).float()))\n\n\n diff_pow = (pos_navi[:,i,:] - agent_pos.cuda())**2\n innersum = torch.sum(diff_pow, dim=-1)\n object_pos += torch.sqrt(innersum)\n object_pos_each[:, i] = torch.sqrt(innersum)\n\n coc_0[:,i] = (obj_ids==i).float()\n coc_3[:,i] = (torch.sqrt(innersum) < 3.0).float()\n coc_5[:,i] = (torch.sqrt(innersum) < 5.0).float()\n coc_7[:,i] = (torch.sqrt(innersum) < 7.0).float()\n\n\n diff_pow = (pos_navi[:,i,:] - path_navi[:,i,0,:].cuda())**2\n innersum = torch.sum(diff_pow, dim=-1)\n init_dist += torch.sqrt(innersum)\n\n res = sim.shift(epoch=epoch)\n\n _, logit_assem, _, pos_seq_assem, pos_len_assem, pos_seq_gt, _, obj_place, agent_pos = model_assem(-1, mapids, i, inst_assem[:,i,:], leng_assem[:,i], acts_assem[:,i,:], path_assem[:,i,:], rot_assem[:,i,:], phase='assembly')\n\n pos_seq_assem_list.append(pos_seq_assem.cpu())\n pos_len_assem_list.append(pos_len_assem.cpu())\n pos_seq_assem_list_gt.append(pos_seq_gt.cpu())\n\n obj_place[obj_place==-1] = 100\n\n placement[:,i,:] = obj_place.cuda()\n manhattanD = torch.abs(pos_assem[:,i,:] - obj_place.cuda())\n innersum = torch.sum(manhattanD, dim=-1)\n\n object_dist[:,i] += innersum\n object_place[:,i] += 1/(1 + innersum ** 2)\n object_place_0[:,i] += 1/(1 + innersum ** 2)\n object_place_3[:,i] += 1/(1 + innersum ** 2)\n object_place_5[:,i] += 1/(1 + innersum ** 2)\n object_place_7[:,i] += 1/(1 + innersum ** 2)\n object_success[:,i] += (innersum == 0).float()\n object_success_0[:,i] += (innersum == 0).float()\n object_success_3[:,i] += (innersum == 0).float()\n object_success_5[:,i] += (innersum == 0).float()\n object_success_7[:,i] += (innersum == 0).float()\n\n\n if True:\n object_place_0[:,i][coc_0[:,i]!=1] = 0\n object_success_0[:,i] *= coc_0[:,i] \n\n object_place_3[:,i][coc_3[:,i]!=1] = 0\n object_success_3[:,i] *= coc_3[:,i] \n\n object_place_5[:,i][coc_5[:,i]!=1] = 0\n object_success_5[:,i] *= coc_5[:,i] \n\n object_place_7[:,i][coc_7[:,i]!=1] = 0\n object_success_7[:,i] *= coc_7[:,i] \n\n\n res = sim.shift(epoch=epoch)\n\n tot_init_dist += init_dist / 2\n pos_error += object_pos / 2\n pos_error_each += object_pos_each\n coc_3_total += coc_3\n coc_5_total += coc_5\n coc_7_total += coc_7\n placement_dist += object_dist \n placement_error += object_place \n placement_error_0 += object_place_0 \n placement_error_3 += object_place_3 \n placement_error_5 += object_place_5 \n placement_error_7 += object_place_7 \n placement_success += object_success\n placement_success_0 += object_success_0\n placement_success_3 += object_success_3\n placement_success_5 += object_success_5\n placement_success_7 += object_success_7\n pick_score_turn1 += sum(collected_object[0])\n pick_score_turn2 += sum(collected_object[1]) \n bsz = path_navi.size(0)\n total_cnt += bsz\n\n for idx in range(bsz):\n\n total_inner_score = 0.0\n\n mapid = mapids[idx].item()\n\n map_path[mapid] = {\"path_gen\":[], \"path_gt\":[], \"dtw\":[], \"path_assem_gen\":[], \"path_assem_gt\":[], \"ptc\":[], \"ctc0\":[], \"ctc3\":[], \"ctc5\":[], \"ctc7\":[], \"placement\":[]}\n\n for j in range(s_turn, e_turn):\n\n dtw_score = dtw(pos_seq_navi_list[j][idx], pos_seq_navi_list_gt[j][idx],\n pos_len_navi_list[j][idx], path_leng_navi[idx][j], metric='ndtw')\n\n path_gen = pos_seq_navi_list[j][idx][:pos_len_navi_list[j][idx]].tolist()\n path_gt = pos_seq_navi_list_gt[j][idx][:path_leng_navi[idx][j]].tolist()\n\n\n path_assem_gen = pos_seq_assem_list[j][idx][:pos_len_assem_list[j][idx]].tolist()\n path_assem_gt = pos_seq_assem_list_gt[j][idx][:path_leng_assem[idx][j]].tolist()\n\n map_path[mapid][\"path_gen\"].append(path_gen)\n map_path[mapid][\"path_gt\"].append(path_gt)\n map_path[mapid][\"path_assem_gen\"].append(path_assem_gen)\n map_path[mapid][\"path_assem_gt\"].append(path_assem_gt)\n map_path[mapid][\"dtw\"].append(dtw_score.item())\n map_path[mapid][\"ptc\"].append(object_success[idx,j].item())\n map_path[mapid][\"ctc0\"].append(coc_0[idx,j].item())\n map_path[mapid][\"ctc3\"].append(coc_3[idx,j].item())\n map_path[mapid][\"ctc5\"].append(coc_5[idx,j].item())\n map_path[mapid][\"ctc7\"].append(coc_7[idx,j].item())\n map_path[mapid][\"placement\"].append(placement[idx,j].tolist())\n\n total_inner_score += dtw_score\n dtw_score_each[j] += dtw_score\n\n total_inner_score /= 2\n total_outter_score += total_inner_score\n\n\n dtw_avg = total_outter_score / total_cnt\n pick_score_avg = (pick_score_turn1 + pick_score_turn2)/2/ total_cnt\n with open(\"evalScores/\" + log_name, 'a') as f:\n print(\"epoch\", epoch, file=f)\n print(\"eval target-agent init dist\", torch.sum(tot_init_dist)/total_cnt, file=f)\n print(\"eval target-agent dist\", torch.sum(pos_error)/total_cnt, file=f)\n print(file=f)\n print(\"eval CTC-3 1 turn\", torch.sum(coc_3_total[:,0])/total_cnt, file=f)\n print(\"eval CTC-3 2 turn\", torch.sum(coc_3_total[:,1])/total_cnt, file=f)\n print(\"eval CTC-3 total\", torch.sum(coc_3_total)/2/total_cnt, file=f)\n print(file=f)\n print(\"eval CTC-5 1 turn\", torch.sum(coc_5_total[:,0])/total_cnt, file=f)\n print(\"eval CTC-5 2 turn\", torch.sum(coc_5_total[:,1])/total_cnt, file=f)\n print(\"eval CTC-5 total\", torch.sum(coc_5_total)/2/total_cnt, file=f)\n print(file=f)\n print(\"eval CTC-7 1 turn\", torch.sum(coc_7_total[:,0])/total_cnt, file=f)\n print(\"eval CTC-7 2 turn\", torch.sum(coc_7_total[:,1])/total_cnt, file=f) \n print(\"eval CTC-7 total\", torch.sum(coc_7_total)/2/total_cnt, file=f) \n print(file=f)\n print(\"eval target-agent dist 1 turn\", torch.sum(pos_error_each[:,0])/total_cnt, file=f)\n print(\"eval target-agent dist 2 turn\", torch.sum(pos_error_each[:,1])/total_cnt, file=f)\n print(file=f)\n\n print(\"eval placement dist 1 turn\", torch.sum(placement_dist[:,0])/total_cnt, file=f)\n print(\"eval placement dist 2 turn\", torch.sum(placement_dist[:,1])/total_cnt, file=f)\n print(\"eval placement dist total\", torch.sum(placement_dist)/2/total_cnt, file=f)\n print(file=f)\n print(\"eval rPOD 1 turn\", torch.sum(placement_error[:,0])/total_cnt, file=f)\n print(\"eval rPOD 2 turn\", torch.sum(placement_error[:,1])/total_cnt, file=f)\n print(\"eval rPOD total\", torch.sum(placement_error)/2/total_cnt, file=f)\n print(file=f)\n print(\"eval rPOD_0 1 turn\", torch.sum(placement_error_0[:,0])/total_cnt, file=f)\n print(\"eval rPOD_0 2 turn\", torch.sum(placement_error_0[:,1])/total_cnt, file=f)\n print(\"eval rPOD_0 total\", torch.sum(placement_error_0)/2/total_cnt, file=f)\n print(file=f)\n print(\"eval rPOD_3 1 turn\", torch.sum(placement_error_3[:,0])/total_cnt, file=f)\n print(\"eval rPOD_3 2 turn\", torch.sum(placement_error_3[:,1])/total_cnt, file=f)\n print(\"eval rPOD_3 total\", torch.sum(placement_error_3)/2/total_cnt, file=f)\n print(file=f)\n print(\"eval rPOD_5 1 turn\", torch.sum(placement_error_5[:,0])/total_cnt, file=f)\n print(\"eval rPOD_5 2 turn\", torch.sum(placement_error_5[:,1])/total_cnt, file=f)\n print(\"eval rPOD_5 total\", torch.sum(placement_error_5)/2/total_cnt, file=f)\n print(file=f)\n print(\"eval rPOD_7 1 turn\", torch.sum(placement_error_7[:,0])/total_cnt, file=f)\n print(\"eval rPOD_7 2 turn\", torch.sum(placement_error_7[:,1])/total_cnt, file=f)\n print(\"eval rPOD_7 total\", torch.sum(placement_error_7)/2/total_cnt, file=f)\n print(file=f)\n \n\n print(\"eval PTC 1 turn\", torch.sum(placement_success[:,0])/total_cnt, file=f)\n print(\"eval PTC 2 turn\", torch.sum(placement_success[:,1])/total_cnt, file=f)\n print(\"eval PTC total\", torch.sum(placement_success)/2/total_cnt, file=f)\n print(file=f)\n print(\"eval PTC_0 1 turn\", torch.sum(placement_success_0[:,0])/total_cnt, file=f)\n print(\"eval PTC_0 2 turn\", torch.sum(placement_success_0[:,1])/total_cnt, file=f)\n print(\"eval PTC_0 total\", torch.sum(placement_success_0)/2/total_cnt, file=f)\n print(file=f)\n print(\"eval PTC_3 1 turn\", torch.sum(placement_success_3[:,0])/total_cnt, file=f)\n print(\"eval PTC_3 2 turn\", torch.sum(placement_success_3[:,1])/total_cnt, file=f)\n print(\"eval PTC_3 total\", torch.sum(placement_success_3)/2/total_cnt, file=f)\n print(file=f)\n print(\"eval PTC_5 1 turn\", torch.sum(placement_success_5[:,0])/total_cnt, file=f)\n print(\"eval PTC_5 2 turn\", torch.sum(placement_success_5[:,1])/total_cnt, file=f)\n print(\"eval PTC_5 total\", torch.sum(placement_success_5)/2/total_cnt, file=f)\n print(file=f)\n print(\"eval PTC_7 1 turn\", torch.sum(placement_success_7[:,0])/total_cnt, file=f)\n print(\"eval PTC_7 2 turn\", torch.sum(placement_success_7[:,1])/total_cnt, file=f)\n print(\"eval PTC_7 total\", torch.sum(placement_success_7)/2/total_cnt, file=f)\n print(file=f)\n print(\"eval DTW 1 turn\", dtw_score_each[0]/total_cnt, file=f)\n print(\"eval DTW 2 turn\", dtw_score_each[1]/total_cnt, file=f)\n print(\"eval DTW total\", (dtw_score_each[0] + dtw_score_each[1])/2/total_cnt, file=f)\n # print(\"dtw_avg\", dtw_avg, file=f)\n print(\"pick_score 1 turn\", pick_score_turn1/total_cnt, file=f)\n print(\"pick_score 2 turn\", pick_score_turn2/total_cnt, file=f)\n print(\"pick_score_avg\", pick_score_avg, file=f)\n\n\n model_navi.train()\n model_assem.train()\n sim.resnet_extractor.eval()\n return dtw_avg\n\nif __name__ == '__main__':\n args = parser.parse_args()\n print(args)\n\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed(args.seed)\n\n train_tuple = get_tuple(args, 'train', args.batch_size, shuffle=True, drop_last=True, max_length=args.max_input)\n val_seen_tuple = get_tuple(args, 'valid_seen', args.batch_size_val, shuffle=False, drop_last=True, max_length=args.max_input)\n args.ntoken = train_tuple[0].tok.vocab_size\n sim = simulator([args.port], args.sim_num)\n res = sim.reset()\n assert 'd' in res\n\n model_navi = ActionDecoder(sim, args.hsz, args.ntoken).cuda()\n model_assem = ActionDecoder(sim, args.hsz, args.ntoken).cuda()\n\n optimizer = optim.Adam(list(model_navi.parameters()) + list(model_assem.parameters()),\\\n lr=args.lr)\n\n model_navi = nn.DataParallel(model_navi)\n model_assem = nn.DataParallel(model_assem)\n\n train(args, sim, model_navi, model_assem, optimizer, train_tuple, val_seen_tuple)\n\n\n\n\n" ]
[ [ "torch.nn.CrossEntropyLoss", "numpy.random.seed", "torch.load", "torch.cuda.manual_seed", "matplotlib.use", "torch.manual_seed", "torch.zeros", "torch.utils.data.DataLoader", "torch.sum", "torch.sqrt", "torch.no_grad", "torch.nn.DataParallel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bantnd/pytorch-vsumm-reinforce
[ "d042203d5dd03086d53ef2ff7dde9999cb81e22f" ]
[ "utils/generate_dataset.py" ]
[ "\"\"\"\n Generate Dataset\n\n 1. Converting video to frames\n 2. Extracting features\n 3. Getting change points\n 4. User Summary ( for evaluation )\n\n\"\"\"\nimport sys\nsys.path.append('.')\nsys.path.append('../networks')\nfrom networks.CNN import ResNet\nfrom utils.KTS.cpd_auto import cpd_auto\nfrom tqdm import tqdm\nimport math\nimport cv2\nimport numpy as np\nimport h5py\nimport argparse\nfrom pathlib import Path\nimport shutil\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-p', '--path', type=str, default='data/original', help=\"path of video file.\")\nparser.add_argument('-f', '--frame', type=str, default='data/frames', help=\"path of frame, where will extract from videos\")\nparser.add_argument('--h5-gen', type=str, default='data/eccv16_dataset_tiktok2_google_pool5.h5', help=\"path to h5 generated file\")\nargs = parser.parse_args()\n\n#frame options\nWIDTH=224\nHIGH=224\nT_FPS=5 #fps after extracted video\n\nclass Generate_Dataset:\n def __init__(self, video_path,frame_path, save_path):\n self.resnet = ResNet()\n self.dataset = {}\n self.video_list = []\n self.video_path = ''\n #self.frame_root_path = './frames'\n self.frame_root_path = Path(__file__).resolve().parent.parent/Path(frame_path)\n self.h5_file = h5py.File(save_path, 'w')\n\n self._set_video_list(Path(__file__).resolve().parent.parent/Path(video_path))\n print('Video path : {} H5 autogen path : {}'.format(video_path, save_path))\n\n def _set_video_list(self, video_path):\n if Path.is_dir(video_path):\n self.video_path = video_path\n self.video_list = sorted(Path.iterdir(video_path))\n #self.video_list.sort()\n else:\n self.video_path = ''\n self.video_list.append(video_path)\n\n for idx, file_name in enumerate(self.video_list):\n self.dataset['video_{}'.format(idx+1)] = {}\n self.h5_file.create_group('video_{}'.format(idx+1))\n\n\n def _extract_feature(self, frame):\n res_pool5 = self.resnet(frame)\n frame_feat = res_pool5.cpu().data.numpy().flatten()\n\n return frame_feat\n\n def _get_change_points(self, video_feat, n_frame, fps):\n print('n_frame {} fps {}'.format(n_frame, fps))\n n = n_frame / math.ceil(fps)\n m = int(math.ceil(n/2.0))\n K = np.dot(video_feat, video_feat.T)\n change_points, _ = cpd_auto(K, m, 1)\n change_points = np.concatenate(([0], change_points, [n_frame-1]))\n\n temp_change_points = []\n for idx in range(len(change_points)-1):\n segment = [change_points[idx], change_points[idx+1]-1]\n if idx == len(change_points)-2:\n segment = [change_points[idx], change_points[idx+1]]\n\n temp_change_points.append(segment)\n change_points = np.array(list(temp_change_points))\n\n temp_n_frame_per_seg = []\n for change_points_idx in range(len(change_points)):\n n_frame = change_points[change_points_idx][1] - change_points[change_points_idx][0]\n temp_n_frame_per_seg.append(n_frame)\n n_frame_per_seg = np.array(list(temp_n_frame_per_seg))\n\n return change_points, n_frame_per_seg\n\n # TODO : save dataset\n def _save_dataset(self):\n pass\n\n def generate_dataset(self):\n for video_idx, video_filename in enumerate(tqdm(self.video_list)):\n #for video_idx, video_filename in enumerate(self.video_list):\n video_path = video_filename\n #if Path.is_dir(self.video_path):\n # video_path = os.path.join(self.video_path, video_filename)\n\n #video_basename = os.path.basename(video_path).split('.')[0]\n video_basename = Path(video_path).stem\n\n if not Path.exists(self.frame_root_path/Path(video_basename)):\n Path.mkdir(self.frame_root_path/Path(video_basename),parents=True)\n else:\n shutil.rmtree(self.frame_root_path/Path(video_basename))\n Path.mkdir(self.frame_root_path/Path(video_basename),parents=True)\n \n video_capture = cv2.VideoCapture(str(video_path))\n\n fps = video_capture.get(cv2.CAP_PROP_FPS)\n n_frames = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))\n\n #frame_list = []\n picks = []\n video_feat = None\n video_feat_for_train = None\n for frame_idx in tqdm(range(n_frames-1)):\n success, frame = video_capture.read()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = cv2.resize(frame, (WIDTH, HIGH))\n if success:\n frame_feat = self._extract_feature(frame)\n\n if frame_idx % round(fps/T_FPS) == 0:\n picks.append(frame_idx)\n\n if video_feat_for_train is None:\n video_feat_for_train = frame_feat\n else:\n video_feat_for_train = np.vstack((video_feat_for_train, frame_feat))\n \n img_filename = \"{}.jpg\".format(str(frame_idx).zfill(6))\n cv2.imwrite(str(self.frame_root_path/ video_basename/ img_filename), frame)\n\n if video_feat is None:\n video_feat = frame_feat\n else:\n video_feat = np.vstack((video_feat, frame_feat))\n\n \n\n else:\n break\n\n video_capture.release()\n\n change_points, n_frame_per_seg = self._get_change_points(video_feat, n_frames, fps)\n\n # self.dataset['video_{}'.format(video_idx+1)]['frames'] = list(frame_list)\n # self.dataset['video_{}'.format(video_idx+1)]['features'] = list(video_feat)\n # self.dataset['video_{}'.format(video_idx+1)]['picks'] = np.array(list(picks))\n # self.dataset['video_{}'.format(video_idx+1)]['n_frames'] = n_frames\n # self.dataset['video_{}'.format(video_idx+1)]['fps'] = fps\n # self.dataset['video_{}'.format(video_idx+1)]['change_points'] = change_points\n # self.dataset['video_{}'.format(video_idx+1)]['n_frame_per_seg'] = n_frame_per_seg\n\n self.h5_file['video_{}'.format(video_idx+1)]['features'] = list(video_feat_for_train)\n self.h5_file['video_{}'.format(video_idx+1)]['picks'] = np.array(list(picks))\n self.h5_file['video_{}'.format(video_idx+1)]['n_frames'] = n_frames\n self.h5_file['video_{}'.format(video_idx+1)]['fps'] = fps\n self.h5_file['video_{}'.format(video_idx+1)]['change_points'] = change_points\n self.h5_file['video_{}'.format(video_idx+1)]['n_frame_per_seg'] = n_frame_per_seg\n\nif __name__ == \"__main__\":\n gen = Generate_Dataset(args.path,args.frame, args.h5_gen)\n gen.generate_dataset()\n gen.h5_file.close()\n" ]
[ [ "numpy.concatenate", "numpy.dot", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
saobangmath/CZ4034-Aspect-Classification-Model
[ "2a5e1d1e26bb6ec524bd13f3adc03bcc57ab74b9" ]
[ "utils/utils.py" ]
[ "import time\nimport inspect\n\nimport torch\nfrom loguru import logger\n\n\ndef to_device(x, device):\n if not isinstance(x, dict):\n return x\n\n new_x = {}\n\n for k, v in x.items():\n if isinstance(v, torch.Tensor):\n new_v = v.to(device)\n elif isinstance(v, (tuple, list)) and len(v) > 0 and isinstance(v[0], torch.Tensor):\n new_v = [i.to(device) for i in v]\n else:\n new_v = v\n\n new_x[k] = new_v\n\n return new_x\n\n\ndef aggregate_dict(x):\n \"\"\"Aggregate a list of dict to form a new dict\"\"\"\n agg_x = {}\n\n for ele in x:\n assert isinstance(ele, dict)\n\n for k, v in ele.items():\n if k not in agg_x:\n agg_x[k] = []\n\n if isinstance(v, (tuple, list)):\n agg_x[k].extend(list(v))\n else:\n agg_x[k].append(v)\n\n # Stack if possible\n new_agg_x = {}\n for k, v in agg_x.items():\n try:\n v = torch.cat(v, dim=0)\n except Exception:\n pass\n new_agg_x[k] = v\n\n return new_agg_x\n\n\ndef raise_or_warn(action, msg):\n if action == \"raise\":\n raise ValueError(msg)\n else:\n logger.warning(msg)\n\n\nclass ConfigComparer:\n \"\"\"Compare two config dictionaries. Useful for checking when resuming from\n previous session.\"\"\"\n\n _to_raise_error = [\n \"model->model_name_or_path\"\n ]\n _to_warn = [\n \"model->config_name\", \"model->tokenizer_name\", \"model->cache_dir\", \"model->freeze_base_model\", \"model->fusion\",\n \"model->lambdas\"\n ]\n\n def __init__(self, cfg_1, cfg_2):\n self.cfg_1 = cfg_1\n self.cfg_2 = cfg_2\n\n def compare(self):\n for components, action in \\\n [(self._to_raise_error, \"raise\"), (self._to_warn, \"warn\")]:\n for component in components:\n curr_scfg_1, curr_scfg_2 = self.cfg_1, self.cfg_2 # subconfigs\n for key in component.split(\"->\"):\n if key not in curr_scfg_1 or key not in curr_scfg_2:\n raise ValueError(\n f\"Component {component} not found in config file.\")\n curr_scfg_1 = curr_scfg_1[key]\n curr_scfg_2 = curr_scfg_2[key]\n if curr_scfg_1 != curr_scfg_2:\n msg = (f\"Component {component} is different between \"\n f\"two config files\\nConfig 1: {curr_scfg_1}\\n\"\n f\"Config 2: {curr_scfg_2}.\")\n raise_or_warn(action, msg)\n return True\n\n\ndef collect(config, args, collected):\n \"\"\"Recursively collect each argument in `args` from `config` and write to\n `collected`.\"\"\"\n if not isinstance(config, dict):\n return\n\n keys = list(config.keys())\n for arg in args:\n if arg in keys:\n if arg in collected: # already collected\n raise RuntimeError(f\"Found repeated argument: {arg}\")\n collected[arg] = config[arg]\n\n for key, sub_config in config.items():\n collect(sub_config, args, collected)\n\n\ndef from_config(main_args=None, requires_all=False):\n \"\"\"Wrapper for all classes, which wraps `__init__` function to take in only\n a `config` dict, and automatically collect all arguments from it. An error\n is raised when duplication is found. Note that keyword arguments are still\n allowed, in which case they won't be collected from `config`.\n\n Parameters\n ----------\n main_args : str\n If specified (with \"a->b\" format), arguments will first be collected\n from this subconfig. If there are any arguments left, recursively find\n them in the entire config. Multiple main args are to be separated by\n \",\".\n requires_all : bool\n Whether all function arguments must be found in the config.\n \"\"\"\n global_main_args = main_args\n if global_main_args is not None:\n global_main_args = global_main_args.split(\",\")\n global_main_args = [args.split(\"->\") for args in global_main_args]\n\n def decorator(init):\n init_args = inspect.getfullargspec(init)[0][1:] # excluding self\n\n def wrapper(self, config=None, main_args=None, **kwargs):\n # Add config to self\n if config is not None:\n self.config = config\n\n # Get config from self\n elif getattr(self, \"config\", None) is not None:\n config = self.config\n\n if main_args is None:\n main_args = global_main_args\n else:\n # Overwrite global_main_args\n main_args = main_args.split(\",\")\n main_args = [args.split(\"->\") for args in main_args]\n\n collected = kwargs # contains keyword arguments\n not_collected = [arg for arg in init_args if arg not in collected]\n # Collect from main args\n if config is not None and main_args is not None \\\n and len(not_collected) > 0:\n for main_arg in main_args:\n sub_config = config\n for arg in main_arg:\n if arg not in sub_config:\n break # break when `main_args` is invalid\n sub_config = sub_config[arg]\n else:\n collect(sub_config, not_collected, collected)\n not_collected = [arg for arg in init_args\n if arg not in collected]\n if len(not_collected) == 0:\n break\n # Collect from the rest\n not_collected = [arg for arg in init_args if arg not in collected]\n if config is not None and len(not_collected) > 0:\n collect(config, not_collected, collected)\n # Validate\n if requires_all and (len(collected) < len(init_args)):\n not_collected = [arg for arg in init_args\n if arg not in collected]\n raise RuntimeError(\n f\"Found missing argument(s) when initializing \"\n f\"{self.__class__.__name__} class: {not_collected}.\")\n # Call function\n return init(self, **collected)\n return wrapper\n return decorator\n\n\nclass Timer:\n def __init__(self):\n self.global_start_time = time.time()\n self.start_time = None\n self.last_interval = None\n self.accumulated_interval = None\n\n def start(self):\n assert self.start_time is None\n self.start_time = time.time()\n\n def end(self):\n assert self.start_time is not None\n self.last_interval = time.time() - self.start_time\n self.start_time = None\n\n # Update accumulated interval\n if self.accumulated_interval is None:\n self.accumulated_interval = self.last_interval\n else:\n self.accumulated_interval = (\n 0.9 * self.accumulated_interval + 0.1 * self.last_interval)\n\n def get_last_interval(self):\n return self.last_interval\n\n def get_accumulated_interval(self):\n return self.accumulated_interval\n\n def get_total_time(self):\n return time.time() - self.global_start_time\n\ndef compute_metrics_from_inputs_and_outputs(inputs, outputs, confidence_threshold=0.5, show_progress=False,\n output_acc=True):\n if isinstance(inputs, dict):\n inputs = [inputs]\n if isinstance(outputs, dict):\n outputs = [outputs]\n\n input_ids_all = []\n\n food_score_preds_all, food_existence_preds_all = [], []\n service_score_preds_all, service_existence_preds_all = [], []\n price_score_preds_all, price_existence_preds_all = [], []\n if output_acc:\n food_score_label_all, service_score_label_all, price_score_label_all = [], [], []\n\n if show_progress:\n from tqdm import tqdm\n else:\n tqdm = lambda x, **kwargs: x\n\n for inputs_i, outputs_i in tqdm(zip(inputs, outputs), desc=\"Processing predictions\"): # by batch\n input_ids = inputs_i[\"input_ids\"]\n input_ids_all.append(input_ids)\n\n # Groundtruths\n if output_acc:\n food_score_label = inputs_i[\"food_score_label\"]\n service_score_label = inputs_i[\"service_score_label\"]\n price_score_label = inputs_i[\"price_score_label\"]\n\n # Predictions\n food_score_preds, food_existence_preds = outputs_i[\"food_score_preds\"], outputs_i[\"food_existence_preds\"]\n service_score_preds, service_existence_preds = outputs_i[\"service_score_preds\"], outputs_i[\"service_existence_preds\"]\n price_score_preds, price_existence_preds = outputs_i[\"price_score_preds\"], outputs_i[\"price_existence_preds\"]\n\n # Aggregate\n food_score_preds_all.append(food_score_preds)\n food_existence_preds_all.append(food_existence_preds)\n service_score_preds_all.append(service_score_preds)\n service_existence_preds_all.append(service_existence_preds)\n price_score_preds_all.append(price_score_preds)\n price_existence_preds_all.append(price_existence_preds)\n\n if output_acc:\n food_score_label_all.append(food_score_label)\n service_score_label_all.append(service_score_label)\n price_score_label_all.append(price_score_label)\n\n # Combine results\n food_score_preds_all = torch.cat(food_score_preds_all, dim=0)\n food_existence_preds_all = torch.cat(food_existence_preds_all, dim=0)\n service_score_preds_all = torch.cat(service_score_preds_all, dim=0)\n service_existence_preds_all = torch.cat(service_existence_preds_all, dim=0)\n price_score_preds_all = torch.cat(price_score_preds_all, dim=0)\n price_existence_preds_all = torch.cat(price_existence_preds_all, dim=0)\n if output_acc:\n food_score_label_all = torch.cat(food_score_label_all, dim=0)\n service_score_label_all = torch.cat(service_score_label_all, dim=0)\n price_score_label_all = torch.cat(price_score_label_all, dim=0)\n\n # Calculate accuracy\n if output_acc:\n # Get predictions\n # food\n food_score_preds_all = food_score_preds_all.int()\n food_existence_mask = (food_existence_preds_all > confidence_threshold)\n food_score_preds_all[~food_existence_mask] = 0\n food_score_correct_all = (food_score_preds_all == food_score_label_all)\n food_acc = food_score_correct_all.sum() / float(len(food_score_correct_all)) # scalar\n # service\n service_score_preds_all = service_score_preds_all.int()\n service_existence_mask = (service_existence_preds_all > confidence_threshold)\n service_score_preds_all[~service_existence_mask] = 0\n service_score_correct_all = (service_score_preds_all == service_score_label_all)\n service_acc = service_score_correct_all.sum() / float(len(service_score_correct_all)) # scalar\n # score\n price_score_preds_all = price_score_preds_all.int()\n price_existence_mask = (price_existence_preds_all > confidence_threshold)\n price_score_preds_all[~price_existence_mask] = 0\n price_score_correct_all = (price_score_preds_all == price_score_label_all)\n price_acc = price_score_correct_all.sum() / float(len(price_score_correct_all)) # scalar\n # total accuracy\n total_acc = (food_acc + service_acc + price_acc) / 3\n\n acc = {\"total_acc\": total_acc, \"food_acc\": food_acc, \"service_acc\": service_acc, \"price_acc\": price_acc}\n return acc\n" ]
[ [ "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Loliver1224/Creative-Experiment
[ "14857078995afca729c9f2935c7e741a8be42edc" ]
[ "main.py" ]
[ "import pandas as pd\nimport numpy as np\nimport pyper\nfrom graphviz import Source\nimport webbrowser\nimport tempfile\nfrom os import getcwd\nfrom shutil import copy2 as copyfile\nimport sys\n\nfrom SAM import SAM\nfrom testdata import make_test_data\n\nnp.set_printoptions(precision=2, floatmode='fixed', suppress=True)\nnp.random.seed(0)\n\n\"\"\" --- make pyper instance ---\n\"\"\"\nr = pyper.R(use_pandas='True')\n\n\"\"\" --- import csv file ---\n\"\"\"\nprint(\"current: \" + getcwd())\n# filename = input(\"csv file name? >> \")\n# df = pd.read_csv(filename)\ndf = make_test_data(size=2000)\n\ncols = list(df.columns)\n\n\"\"\" --- data pre-processing ---\n\"\"\"\n\n\"\"\" --- run SAM ---\n\"\"\"\n# セルフ推論\n# causal_discovered_data = np.array([[0, 0, 0, 1],\n# [0, 0, 0, 1],\n# [1, 1, 0, 1],\n# [0, 0, 0, 0]])\n\ncausal_discovered_data = SAM(train_epochs=10000, test_epochs=1000).predict(in_data=df, run_times=16)\n# テスト用\n# causal_discovered_data = np.array([[0, 0, 0, 0],\n# [1, 0, 0, 0],\n# [1, 0, 0, 0],\n# [1, 1, 1, 0]])\n# 転置(すなわち因果関係を逆方向に)したら適合度上がることも\n# causal_discovered_data = causal_discovered_data.transpose()\n\nprint(pd.DataFrame(data=causal_discovered_data, index=cols, columns=cols))\nthreshold = float(input(\"threshold? >> \"))\ncausal_discovered_data = causal_discovered_data >= threshold\n\n\"\"\" --- forward dataframe to R as \"data\" ---\n\"\"\"\nr.assign(\"data\", df)\n\n\"\"\" --- calc corr ---\n\"\"\"\nr(\"corr <- cor(data)\")\n\n\"\"\" --- import libraries used in R ---\n\"\"\"\nr(\"library(sem)\")\nr(\"library(DiagrammeR)\")\n\n\"\"\" --- make SEM model ---\n\"\"\"\nmodel_text = \"model <- specifyModel(text=\\\"\\n\"\ncausal_index = list(zip(*np.where(causal_discovered_data)))\ncounter = 1\nfor a, b in causal_index:\n if causal_discovered_data[b, a]:\n if a < b:\n model_text += f\"{cols[a]} <-> {cols[b]},b{counter},NA\\n\"\n else:\n model_text += f\"{cols[a]} -> {cols[b]},b{counter},NA\\n\"\n counter += 1\ncounter = 1\nfor val in cols:\n model_text += f\"{val} <-> {val},e{counter},NA\\n\"\n counter += 1\nmodel_text += \"\\\")\"\n\nprint(model_text)\nr(model_text)\n\nr(\"ans <- sem(model, corr, nrow(data))\")\n# print(r('stdCoef(ans)'))\nprint(r('summary(ans,fit.indices = c(\"GFI\",\"AGFI\",\"SRMR\",\"RMSEA\"))'))\n\n\"\"\" --- rendering ---\n\"\"\"\nwith tempfile.TemporaryDirectory() as tempdir:\n # 作業ディレクトリを一時ディレクトリに設定\n tempdir = tempdir.replace('\\\\', '/')\n r('setwd(\\\"' + tempdir + '\\\")')\n r('pathDiagram(ans, \"output_diagram\", output.type = \"graphics\",\\\n ignore.double = FALSE, edge.labels = \"values\", digits = 3)')\n\n output_format = input(\"output format? (png / svg / pdf) >> \")\n output_filename = input(\"output file name? >> \") + '.' + output_format\n try:\n Source.from_file(filename=tempdir + \"/output_diagram.dot\", format=output_format, engine='dot').render()\n copyfile(tempdir + \"/output_diagram.dot.\" + output_format, output_filename)\n except Exception as e:\n print(\"RenderingError: パス図の出力において例外が発生しました\")\n print(e)\n sys.exit(1)\n finally:\n r('setwd(\\\"' + getcwd() + '\\\"')\n print(\"Rendering Successed.\")\n\n\"\"\" --- show rendered Diagram in web browser ---\n\"\"\"\n# Chrome指定\nbrowser = webbrowser.get(\n '\"C:\\\\Program Files (x86)\\\\Google\\\\Chrome\\\\Application\\\\chrome.exe\" %s')\nbrowser.open(\"file:///\" + getcwd() + \"/\" + output_filename)\n" ]
[ [ "numpy.set_printoptions", "numpy.where", "numpy.random.seed", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
j20232/moco_image_pipeline
[ "997ae76e795548e75f95e862284c1fc0a3c7541a" ]
[ "mcp/augmentation/album.py" ]
[ "import numpy as np\nfrom PIL import Image, ImageOps, ImageEnhance\nimport albumentations as A\n\n# ndarray: H x W x C\n\n\ndef apply_aug(aug, image):\n return aug(image=image)[\"image\"]\n\n\n# ----------------------------------- Blur -------------------------------------------\nclass RandomBlur():\n def __init__(self, prob, blur_limit=9):\n self.prob = np.clip(prob, 0.0, 1.0)\n self.blur_limit = blur_limit\n\n def __call__(self, img):\n if np.random.uniform() < self.prob:\n r = np.random.uniform()\n if r < 0.4:\n img = apply_aug(A.Blur(blur_limit=self.blur_limit, always_apply=True), img)\n elif r < 0.6:\n img = apply_aug(A.GaussianBlur(blur_limit=self.blur_limit, always_apply=True), img)\n else:\n img = apply_aug(A.MotionBlur(blur_limit=self.blur_limit, always_apply=True), img)\n return img\n\n\n# ----------------------------------- Noise -------------------------------------------\n\nclass GaussNoise():\n def __init__(self, prob, var_limit=(0.0, 0.07)):\n self.prob = np.clip(prob, 0.0, 1.0)\n self.var_limit = var_limit\n\n def __call__(self, img):\n return apply_aug(A.GaussNoise(var_limit=self.var_limit, p=self.prob), img)\n\n\nclass MultiplicativeNoise():\n def __init__(self, prob, var_limit=(0.6, 1.1)):\n self.prob = np.clip(prob, 0.0, 1.0)\n self.var_limit = var_limit\n\n def __call__(self, img):\n return apply_aug(A.MultiplicativeNoise(multiplier=self.var_limit, p=self.prob), img)\n\n\n# ---------------------------------- Distortion ---------------------------------------\n\nclass GridDistortion():\n def __init__(self, prob, num_steps=10, distort_limit=0.7):\n self.prob = np.clip(prob, 0.0, 1.0)\n self.num_steps = num_steps\n self.distort_limit = distort_limit\n\n def __call__(self, img):\n return apply_aug(A.GridDistortion(p=self.prob, num_steps=self.num_steps,\n distort_limit=self.distort_limit), img)\n\n\nclass ElasticTransform():\n def __init__(self, prob, sigma=40, alpha=1, alpha_affine=15):\n self.prob = np.clip(prob, 0.0, 1.0)\n self.sigma = sigma\n self.alpha = alpha\n self.alpha_affine = alpha_affine\n\n def __call__(self, img):\n return apply_aug(A.ElasticTransform(p=self.prob, sigma=self.sigma,\n alpha=self.alpha, alpha_affine=self.alpha_affine), img)\n\n\nclass ShiftScaleRotate():\n def __init__(self, prob, shift_limit=0.0625, scale_limit=0.2, rotate_limit=20):\n self.prob = prob\n self.shift_limit = shift_limit\n self.scale_limit = scale_limit\n self.rotate_limit = rotate_limit\n\n def __call__(self, img):\n return apply_aug(A.ShiftScaleRotate(p=self.prob, shift_limit=self.shift_limit,\n scale_limit=self.scale_limit,\n rotate_limit=self.rotate_limit), img)\n\n# ----------------------------------- Histogram ----------------------------------------\n\n\nclass HueSaturationValue():\n def __init__(self, prob, hue_shift_limit=20, sat_shift_limit=40, val_shift_limit=100):\n self.prob = np.clip(prob, 0.0, 1.0)\n self.hue_shift_limit = hue_shift_limit\n self.sat_shift_limit = sat_shift_limit\n self.val_shift_limit = val_shift_limit\n\n def __call__(self, img):\n out = img if img.dtype == \"uint8\" else (img * 255).astype(np.uint8)\n\n out = apply_aug(A.HueSaturationValue(p=self.prob, hue_shift_limit=self.hue_shift_limit,\n sat_shift_limit=self.sat_shift_limit,\n val_shift_limit=self.val_shift_limit), out)\n return out if img.dtype == \"uint8\" else (out / 255).astype(np.float64)\n\n\nclass RandomBrightnessContrast():\n def __init__(self, prob, brightness_limit=2.0, contrast_limit=0.6):\n self.prob = np.clip(prob, 0.0, 1.0)\n self.brightness_limit = brightness_limit\n self.contrast_limit = contrast_limit\n\n def __call__(self, img):\n return apply_aug(A.RandomBrightnessContrast(p=self.prob,\n brightness_limit=self.brightness_limit,\n contrast_limit=self.contrast_limit,\n brightness_by_max=False,\n ), img)\n\n\nclass RandomCLAHE():\n def __init__(self, prob, clip_limit=40.0, tile_grid_size=(16, 16)):\n self.prob = np.clip(prob, 0.0, 1.0)\n self.clip_limit = clip_limit\n self.tile_grid_size = tile_grid_size\n\n def __call__(self, img):\n out = img if img.dtype == \"uint8\" else (img * 255).astype(np.uint8)\n out = apply_aug(A.CLAHE(p=self.prob, clip_limit=self.clip_limit,\n tile_grid_size=self.tile_grid_size), out)\n return out if img.dtype == \"uint8\" else (out / 255).astype(np.float64)\n\n\n# ------------------------------------- Removal ------------------------------------------\n\nclass CoarseDropout():\n def __init__(self, prob, max_holes=10, max_height=12, max_width=12):\n self.prob = np.clip(prob, 0.0, 1.0)\n self.max_holes = max_holes\n self.max_height = max_height\n self.max_width = max_width\n\n def __call__(self, img):\n return apply_aug(A.CoarseDropout(p=self.prob, max_holes=self.max_holes,\n max_height=self.max_height, max_width=self.max_width,\n fill_value=np.median(img)), img)\n\n\n# ------------------------------------------- Augmix -------------------------------------------\n# Reference: https://www.kaggle.com/haqishen/augmix-based-on-albumentations\n\n\ndef int_parameter(level, maxval):\n \"\"\"Helper function to scale `val` between 0 and maxval .\n Args:\n level: Level of the operation that will be between [0, `PARAMETER_MAX`].\n maxval: Maximum value that the operation can have. This will be scaled to\n level/PARAMETER_MAX.\n Returns:\n An int that results from scaling `maxval` according to `level`.\n \"\"\"\n return int(level * maxval / 10)\n\n\ndef float_parameter(level, maxval):\n \"\"\"Helper function to scale `val` between 0 and maxval.\n Args:\n level: Level of the operation that will be between [0, `PARAMETER_MAX`].\n maxval: Maximum value that the operation can have. This will be scaled to\n level/PARAMETER_MAX.\n Returns:\n A float that results from scaling `maxval` according to `level`.\n \"\"\"\n return float(level) * maxval / 10.\n\n\ndef sample_level(n):\n return np.random.uniform(low=0.1, high=n)\n\n\ndef autocontrast(pil_img, _):\n return ImageOps.autocontrast(pil_img)\n\n\ndef equalize(pil_img, _):\n return ImageOps.equalize(pil_img)\n\n\ndef posterize(pil_img, level):\n level = int_parameter(sample_level(level), 4)\n return ImageOps.posterize(pil_img, 4 - level)\n\n\ndef rotate(pil_img, level):\n degrees = int_parameter(sample_level(level), 30)\n if np.random.uniform() > 0.5:\n degrees = -degrees\n return pil_img.rotate(degrees, resample=Image.BILINEAR)\n\n\ndef solarize(pil_img, level):\n level = int_parameter(sample_level(level), 256)\n return ImageOps.solarize(pil_img, 256 - level)\n\n\ndef shear_x(pil_img, level):\n level = float_parameter(sample_level(level), 0.3)\n if np.random.uniform() > 0.5:\n level = -level\n return pil_img.transform(pil_img.size,\n Image.AFFINE, (1, level, 0, 0, 1, 0),\n resample=Image.BILINEAR)\n\n\ndef shear_y(pil_img, level):\n level = float_parameter(sample_level(level), 0.3)\n if np.random.uniform() > 0.5:\n level = -level\n return pil_img.transform(pil_img.size,\n Image.AFFINE, (1, 0, 0, level, 1, 0),\n resample=Image.BILINEAR)\n\n\ndef translate_x(pil_img, level):\n level = int_parameter(sample_level(level), pil_img.size[0] / 3)\n if np.random.random() > 0.5:\n level = -level\n return pil_img.transform(pil_img.size,\n Image.AFFINE, (1, 0, level, 0, 1, 0),\n resample=Image.BILINEAR)\n\n\ndef translate_y(pil_img, level):\n level = int_parameter(sample_level(level), pil_img.size[0] / 3)\n if np.random.random() > 0.5:\n level = -level\n return pil_img.transform(pil_img.size,\n Image.AFFINE, (1, 0, 0, 0, 1, level),\n resample=Image.BILINEAR)\n\n\n# operation that overlaps with ImageNet-C's test set\ndef color(pil_img, level):\n level = float_parameter(sample_level(level), 1.8) + 0.1\n return ImageEnhance.Color(pil_img).enhance(level)\n\n\n# operation that overlaps with ImageNet-C's test set\ndef contrast(pil_img, level):\n level = float_parameter(sample_level(level), 1.8) + 0.1\n return ImageEnhance.Contrast(pil_img).enhance(level)\n\n\n# operation that overlaps with ImageNet-C's test set\ndef brightness(pil_img, level):\n level = float_parameter(sample_level(level), 1.8) + 0.1\n return ImageEnhance.Brightness(pil_img).enhance(level)\n\n\n# operation that overlaps with ImageNet-C's test set\ndef sharpness(pil_img, level):\n level = float_parameter(sample_level(level), 1.8) + 0.1\n return ImageEnhance.Sharpness(pil_img).enhance(level)\n\n\ndef normalize(image):\n \"\"\"Normalize input image channel-wise to zero mean and unit variance.\"\"\"\n return image - 127\n\n\ndef apply_op(image, op, severity):\n # image = np.clip(image, 0, 255)\n pil_img = Image.fromarray(image) # Convert to PIL.Image\n pil_img = op(pil_img, severity)\n return np.asarray(pil_img)\n\n\ndef augment_and_mix(image, severity=3, width=3, depth=-1, alpha=1.):\n \"\"\"Perform AugMix augmentations and compute mixture.\n Args:\n image: Raw input image as float32 np.ndarray of shape (h, w, c)\n severity: Severity of underlying augmentation operators (between 1 to 10).\n width: Width of augmentation chain\n depth: Depth of augmentation chain. -1 enables stochastic depth uniformly\n from [1, 3]\n alpha: Probability coefficient for Beta and Dirichlet distributions.\n Returns:\n mixed: Augmented and mixed image.\n \"\"\"\n\n augmentations = [\n autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y,\n translate_x, translate_y\n ]\n\n ws = np.float32(np.random.dirichlet([alpha] * width))\n m = np.float32(np.random.beta(alpha, alpha))\n\n mix = np.zeros_like(image).astype(np.float32)\n for i in range(width):\n image_aug = image.copy()\n depth = depth if depth > 0 else np.random.randint(1, 4)\n for _ in range(depth):\n op = np.random.choice(augmentations)\n image_aug = apply_op(image_aug, op, severity)\n # Preprocessing commutes since all coefficients are convex\n mix += ws[i] * image_aug\n # mix += ws[i] * normalize(image_aug)\n\n mixed = (1 - m) * image + m * mix\n # mixed = (1 - m) * normalize(image) + m * mix\n return mixed\n\n\nclass RandomAugMix():\n\n def __init__(self, prob=0.1, severity=2, width=3, depth=2, alpha=1.):\n self.prob = prob\n self.severity = severity\n self.width = width\n self.depth = depth\n self.alpha = alpha\n\n def __call__(self, img):\n if np.random.uniform() > self.prob:\n return img\n tmp = (img * 255).astype(np.uint8) if img.dtype != \"uint8\" else img\n out = augment_and_mix(tmp, self.severity, self.width, self.depth, self.alpha)\n if type(img) is np.ndarray:\n if img.dtype != \"uint8\":\n out = (out / 255).astype(np.float64)\n return out\n" ]
[ [ "numpy.random.random", "numpy.random.beta", "numpy.clip", "numpy.asarray", "numpy.random.choice", "numpy.median", "numpy.zeros_like", "numpy.random.uniform", "numpy.random.dirichlet", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mokadyr/structural-analogy
[ "70bbfa183b00a9bf103e493019486a17285e105a" ]
[ "train.py" ]
[ "\nimport models\nimport os\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data\nimport math\n\nimport sys\nfrom PIL import Image\nimport torchvision\nimport argparse\nimport random\nfrom utils import adjust_scales2image, generate_noise2, calc_gradient_penalty\nfrom imresize import imresize2\nimport os.path as osp\nimport torchvision.utils as vutils\n\n\ndef draw_concat(Gs,reals, NoiseAmp, in_s, mode, opt):\n if len(Gs) > 0:\n if mode == 'rand':\n count = 0\n for G,real_curr,real_next,noise_amp in zip(Gs,reals,reals[1:],NoiseAmp):\n G = G.cuda()\n if count == 0:\n z = generate_noise2([1, 3, real_curr.shape[2], real_curr.shape[3]], device=opt.device)\n G_z = in_s\n else:\n z = generate_noise2([1, opt.nc_z,real_curr.shape[2], real_curr.shape[3]], device=opt.device)\n\n G_z = G_z[:,:,0:real_curr.shape[2],0:real_curr.shape[3]]\n z_in = noise_amp*z+G_z\n if count > opt.switch_scale:\n G_z = G(z_in.detach())\n else:\n G_z = G(z_in.detach(), G_z)\n G_z = imresize2(G_z.detach(),1/opt.scale_factor,opt)\n G_z = G_z[:,:,0:real_next.shape[2],0:real_next.shape[3]]\n count += 1\n\n if mode == 'rec':\n count = 0\n for G,real_curr,real_next,noise_amp in zip(Gs,reals,reals[1:],NoiseAmp):\n G = G.cuda()\n if count == 0:\n size = list(real_curr.size())\n #print(size)\n G_z = generate_noise2(size, device=opt.device)\n G_z = G_z[:, :, 0:real_curr.shape[2], 0:real_curr.shape[3]]\n if count > opt.switch_scale:\n G_z = G(G_z)\n else:\n G_z = G(G_z, G_z)\n G_z = imresize2(G_z.detach(), 1/opt.scale_factor,opt)\n G_z = G_z[:,:,0:real_next.shape[2],0:real_next.shape[3]]\n count += 1\n return G_z\n\n\ndef init_models(opt):\n\n #generator initialization:\n netG = models.Generator_no_res(opt).to(opt.device)\n netG.apply(models.weights_init)\n\n #discriminator initialization:\n netD = models.WDiscriminator(opt).to(opt.device)\n netD.apply(models.weights_init)\n\n return netD, netG\n\n\ndef init_models_res(opt):\n # generator initialization:\n netG = models.Generator(opt).to(opt.device)\n netG.apply(models.weights_init)\n\n # discriminator initialization:\n netD = models.WDiscriminator(opt).to(opt.device)\n netD.apply(models.weights_init)\n\n return netD, netG\n\n\ndef transform_input(img_path, opt):\n\n res = []\n image = Image.open(img_path).convert('RGB')\n for ii in range(0, opt.stop_scale + 1, 1):\n scale = math.pow(opt.scale_factor, opt.stop_scale - ii)\n\n s_size = math.ceil(scale * opt.img_size)\n\n transform = torchvision.transforms.Compose([\n torchvision.transforms.Resize((s_size, s_size)),\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n ])\n\n sample = transform(image)\n res.append(sample.unsqueeze(0))\n\n return res\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu_id', default=0, type=int, help='gpu id, if the value is -1, the cpu is used')\n parser.add_argument('--not_cuda', action='store_true', help='disables cuda', default=0)\n\n # load, input, save configurations:\n parser.add_argument('--load', default='', help=\"path to continue training\")\n parser.add_argument('--manualSeed', type=int, help='manual seed')\n parser.add_argument('--nc_z', type=int, help='noise # channels', default=3)\n parser.add_argument('--nc_im', type=int, help='image # channels', default=3)\n\n # networks hyper parameters:\n parser.add_argument('--nfc', type=int, default=32)\n parser.add_argument('--min_nfc', type=int, default=32)\n parser.add_argument('--ker_size', type=int, help='kernel size', default=3)\n parser.add_argument('--num_layer', type=int, help='number of layers', default=5)\n parser.add_argument('--stride', help='stride', default=1)\n parser.add_argument('--padd_size', type=int, help='net pad size', default=0)\n\n # pyramid parameters:\n parser.add_argument('--scale_factor', type=float, help='pyramid scale factor', default=0.75)\n parser.add_argument('--noise_amp_a', type=float, help='addative noise cont weight', default=0.1)\n parser.add_argument('--noise_amp_b', type=float, help='addative noise cont weight', default=0.1)\n parser.add_argument('--min_size', type=int, help='image minimal size at the coarser scale', default=18)\n parser.add_argument('--max_size', type=int, help='image minimal size at the coarser scale', default=250)\n\n # optimization hyper parameters:\n parser.add_argument('--niter', type=int, default=20000, help='number of epochs to train per scale')\n parser.add_argument('--lr_g', type=float, default=0.0005, help='learning rate, default=0.0005')\n parser.add_argument('--lr_d', type=float, default=0.0001, help='learning rate, default=0.0005')\n parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')\n parser.add_argument('--lambda_grad', type=float, help='gradient penelty weight', default=0.1)\n parser.add_argument('--alpha', type=float, help='reconstruction loss weight', default=1.0)\n parser.add_argument('--beta', type=float, help='cycle loss weight', default=1.0)\n parser.add_argument('--lambda_g', type=float, default=1.0, help='change ratio between gan loss, multiply by the gan loss of image B')\n\n #main arguments\n parser.add_argument('--input_a', help='input image path', required=True)\n parser.add_argument('--input_b', help='input image path', required=True)\n parser.add_argument('--switch_res', type=int, default=2, help='how many levels will not be residual')\n parser.add_argument('--img_size', type=int, default=220, help='image size of the output')\n parser.add_argument('--out', required=True)\n parser.add_argument('--print_interval', type=int, default=1000)\n opt = parser.parse_args()\n\n if not os.path.exists(opt.out):\n os.makedirs(opt.out)\n\n torch.cuda.set_device(opt.gpu_id)\n\n opt.device = \"cuda:%s\" % opt.gpu_id\n opt.niter_init = opt.niter\n opt.noise_amp_init = opt.noise_amp_a\n opt.nfc_init = opt.nfc\n opt.min_nfc_init = opt.min_nfc\n opt.scale_factor_init = opt.scale_factor\n\n adjust_scales2image(opt.img_size, opt)\n\n if opt.manualSeed is None:\n opt.manualSeed = random.randint(1, 10000)\n print(\"Random Seed: \", opt.manualSeed)\n random.seed(opt.manualSeed)\n torch.manual_seed(opt.manualSeed)\n if torch.cuda.is_available() and opt.gpu_id == -1:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\n Gs_a = []\n reals_a = []\n NoiseAmp_a = []\n \n Gs_b = []\n reals_b = []\n NoiseAmp_b = []\n \n nfc_prev = 0\n scale_num = 0\n\n r_loss = nn.MSELoss()\n\n data_a = transform_input(opt.input_a, opt)\n data_b = transform_input(opt.input_b, opt)\n\n size_arr = []\n for ii in range(0, opt.stop_scale + 1, 1):\n scale = math.pow(opt.scale_factor, opt.stop_scale - ii)\n size_arr.append(math.ceil(scale * opt.img_size))\n\n opt.switch_scale = opt.stop_scale - opt.switch_res\n\n opt.nzx = size_arr[0]\n opt.nzy = size_arr[0]\n in_s = torch.full([1, opt.nc_z,opt.nzx,opt.nzy], 0, device=opt.device)\n\n if opt.load != '':\n Gs_a = torch.load('%s/Gs_a.pth' % opt.load)\n Gs_b = torch.load('%s/Gs_b.pth' % opt.load)\n NoiseAmp_a = torch.load('%s/NoiseAmp_a.pth' % opt.load)\n NoiseAmp_b = torch.load('%s/NoiseAmp_b.pth' % opt.load)\n scale_num = len(Gs_a)\n opt.noise_amp_a = NoiseAmp_a[-1]\n opt.noise_amp_b = NoiseAmp_b[-1]\n print(\"Loading until scale \" + str(scale_num))\n nfc_prev = min(opt.nfc_init * pow(2, math.floor((scale_num-1) / 4)), 128)\n else:\n opt.load = opt.out\n\n while scale_num < opt.stop_scale + 1:\n\n opt.nfc = min(opt.nfc_init * pow(2, math.floor(scale_num / 4)), 128)\n opt.min_nfc = min(opt.min_nfc_init * pow(2, math.floor(scale_num / 4)), 128)\n\n if scale_num > opt.switch_scale:\n D_a, G_a = init_models(opt)\n D_b, G_b = init_models(opt)\n print(\"No Residual layer\")\n else:\n D_a, G_a = init_models_res(opt)\n D_b, G_b = init_models_res(opt)\n print(\"Residual layer\")\n\n if nfc_prev == opt.nfc:\n print(\"Load weights of last layer \" + str(scale_num-1))\n G_a.load_state_dict(torch.load('%s/netG_a_%d.pth' % (opt.load, scale_num-1)))\n D_a.load_state_dict(torch.load('%s/netD_a_%d.pth' % (opt.load, scale_num-1)))\n G_b.load_state_dict(torch.load('%s/netG_b_%d.pth' % (opt.load, scale_num-1)))\n D_b.load_state_dict(torch.load('%s/netD_b_%d.pth' % (opt.load, scale_num-1)))\n opt.load = opt.out\n\n optimizerD = optim.Adam(list(D_a.parameters()) + list(D_b.parameters()), lr=opt.lr_d, betas=(opt.beta1, 0.999))\n optimizerG = optim.Adam(list(G_a.parameters()) + list(G_b.parameters()), lr=opt.lr_g, betas=(opt.beta1, 0.999))\n n_iters = opt.niter\n\n opt.nzx = size_arr[len(Gs_a)]\n opt.nzy = size_arr[len(Gs_a)]\n\n noise_amount_a = 0\n noise_cnt_a = 0\n\n noise_amount_b = 0\n noise_cnt_b = 0\n\n i = 0\n\n for epoch in range(n_iters):\n\n real_a = data_a[len(Gs_a)].cuda()\n\n real_b = data_b[len(Gs_b)].cuda()\n\n noise_ = generate_noise2([1, opt.nc_z, opt.nzx, opt.nzy], device=opt.device)\n\n if Gs_a == []:\n noise_a = noise_\n prev_a = torch.full([1, opt.nc_z, opt.nzx, opt.nzy], 0, device=opt.device)\n else:\n prev_a = draw_concat(Gs_a,list(data_a), NoiseAmp_a, in_s, 'rand', opt)\n noise_a = opt.noise_amp_a * noise_ + prev_a\n\n noise_ = generate_noise2([1, opt.nc_z, opt.nzx, opt.nzy], device=opt.device)\n\n if Gs_b == []:\n noise_b = noise_\n prev_b = torch.full([1, opt.nc_z, opt.nzx, opt.nzy], 0, device=opt.device)\n else:\n prev_b = draw_concat(Gs_b,list(data_b), NoiseAmp_b, in_s, 'rand', opt)\n noise_b = opt.noise_amp_b * noise_ + prev_b\n\n if scale_num > opt.switch_scale:\n fake_a = G_a(noise_a.detach())\n fake_b = G_b(noise_b.detach())\n else:\n fake_a = G_a(noise_a.detach(), prev_a.detach())\n fake_b = G_b(noise_b.detach(), prev_b.detach())\n\n if Gs_a == []:\n z_prev_a = generate_noise2([1, opt.nc_z, opt.nzx, opt.nzy], device=opt.device)\n else:\n z_prev_a = draw_concat(Gs_a,list(data_a), NoiseAmp_a, in_s, 'rec', opt)\n\n if epoch == 0 and i == 0:\n if Gs_a == []:\n opt.noise_amp_a = opt.noise_amp_init\n else:\n criterion = nn.MSELoss()\n RMSE = torch.sqrt(criterion(real_a, z_prev_a))\n opt.noise_amp_a = opt.noise_amp_init * RMSE\n\n if Gs_b == []:\n z_prev_b = generate_noise2([1, opt.nc_z, opt.nzx, opt.nzy], device=opt.device)\n else:\n z_prev_b = draw_concat(Gs_b,list(data_b), NoiseAmp_b, in_s, 'rec', opt)\n\n if epoch == 0 and i == 0:\n if Gs_b == []:\n opt.noise_amp_b = opt.noise_amp_init\n else:\n criterion = nn.MSELoss()\n RMSE = torch.sqrt(criterion(real_b, z_prev_b))\n opt.noise_amp_b = opt.noise_amp_init * RMSE\n\n i += 1\n\n if scale_num > opt.switch_scale:\n generated_a = G_a(z_prev_a.detach())\n generated_b = G_b(z_prev_b.detach())\n else:\n generated_a = G_a(z_prev_a.detach(), z_prev_a.detach())\n generated_b = G_b(z_prev_b.detach(), z_prev_b.detach())\n\n if scale_num > opt.switch_scale:\n mix_g_a = G_a(fake_b)\n mix_g_b = G_b(fake_a)\n else:\n mix_g_a = G_a(fake_b, fake_b)\n mix_g_b = G_b(fake_a, fake_a)\n\n other_noise_a = generate_noise2([1, opt.nc_z, opt.nzx, opt.nzy], device=opt.device)\n other_noise_b = generate_noise2([1, opt.nc_z, opt.nzx, opt.nzy], device=opt.device)\n\n noisy_real_b = opt.noise_amp_a * other_noise_a + real_b\n noisy_real_a = opt.noise_amp_b * other_noise_b + real_a\n\n\n #############################\n #### Train D_a ####\n #############################\n\n D_a.zero_grad()\n\n output = D_a(real_a).to(opt.device)\n errD_real = -2 * output.mean() # -a\n errD_real.backward(retain_graph=True)\n\n output_a = D_a(mix_g_a.detach())\n output_a2 = D_a(fake_a.detach())\n errD_fake_a = output_a.mean() + output_a2.mean()\n errD_fake_a.backward(retain_graph=True)\n\n gradient_penalty_a = calc_gradient_penalty(D_a, real_a, mix_g_a, opt.lambda_grad, opt.device)\n gradient_penalty_a += calc_gradient_penalty(D_a, real_a, fake_a, opt.lambda_grad, opt.device)\n gradient_penalty_a.backward(retain_graph=True)\n\n #############################\n #### Train D_b ####\n #############################\n\n D_b.zero_grad()\n\n output = D_b(real_b).to(opt.device)\n errD_real = -2 * output.mean() # -a\n errD_real.backward(retain_graph=True)\n\n output_b = D_b(mix_g_b.detach())\n output_b2 = D_b(fake_b.detach())\n errD_fake_b = output_b.mean() + output_b2.mean()\n errD_fake_b.backward(retain_graph=True)\n\n gradient_penalty_b = calc_gradient_penalty(D_b, real_b, mix_g_b, opt.lambda_grad, opt.device)\n gradient_penalty_b += calc_gradient_penalty(D_b, real_b, fake_b, opt.lambda_grad, opt.device)\n gradient_penalty_b.backward(retain_graph=True)\n\n optimizerD.step()\n\n #############################\n #### Train G ####\n #############################\n\n G_a.zero_grad()\n G_b.zero_grad()\n\n output_a = D_a(mix_g_a)\n output_a2 = D_a(fake_a)\n errG_a = -output_a.mean() -output_a2.mean()\n errG_a.backward(retain_graph=True)\n\n output_b = D_b(mix_g_b)\n output_b2 = D_b(fake_b)\n errG_b = opt.lambda_g * (-output_b.mean() -output_b2.mean())\n errG_b.backward(retain_graph=True)\n\n if opt.alpha > 0:\n rec_loss_a = opt.alpha * r_loss(generated_a, real_a)\n rec_loss_a.backward(retain_graph=True)\n\n rec_loss_b = opt.alpha * r_loss(generated_b, real_b)\n rec_loss_b.backward(retain_graph=True)\n\n if opt.beta > 0:\n if scale_num > opt.switch_scale:\n cycle_a = G_a(mix_g_b)\n else:\n cycle_a = G_a(mix_g_b, mix_g_b)\n\n cycle_loss_a = opt.beta * r_loss(cycle_a, fake_a)\n cycle_loss_a.backward(retain_graph=True)\n\n if opt.beta > 0:\n if scale_num > opt.switch_scale:\n cycle_b = G_b(mix_g_a)\n else:\n cycle_b = G_b(mix_g_a, mix_g_a)\n\n cycle_loss_b = opt.beta * r_loss(cycle_b, fake_b)\n cycle_loss_b.backward(retain_graph=True)\n\n optimizerG.step()\n\n if (epoch+1) % opt.print_interval == 0:\n vutils.save_image(fake_a.clone(), osp.join(opt.out, str(scale_num) + \"_fake_a_\" + str(epoch) + \".png\"), normalize=True)\n vutils.save_image(mix_g_a.clone(), osp.join(opt.out, str(scale_num) + \"_b2a_\" + str(epoch) + \".png\"),\n normalize=True)\n\n if epoch == 0:\n vutils.save_image(real_a.clone(), osp.join(opt.out, str(scale_num) + \"_real_a_\" + str(epoch) + \".png\"), normalize=True)\n\n vutils.save_image(fake_b.clone(), osp.join(opt.out, str(scale_num) + \"_fake_b_\" + str(epoch) + \".png\"),\n normalize=True)\n vutils.save_image(mix_g_b.clone(), osp.join(opt.out, str(scale_num) + \"_a2b_\" + str(epoch) + \".png\"),\n normalize=True)\n if epoch == 0:\n vutils.save_image(real_b.clone(), osp.join(opt.out, str(scale_num) + \"_real_b_\" + str(epoch) + \".png\"), normalize=True)\n\n print(\"debug imgs saved, scale_num=%0d, epoch=%0d \" % (scale_num, epoch))\n sys.stdout.flush()\n\n if scale_num == opt.stop_scale:\n vutils.save_image(fake_a.clone(), osp.join(opt.out, \"final_fake_a_\" + str(epoch) + \".png\"),\n normalize=True)\n vutils.save_image(mix_g_a.clone(), osp.join(opt.out, \"final_b2a_\" + str(epoch) + \".png\"),\n normalize=True)\n\n vutils.save_image(fake_b.clone(), osp.join(opt.out, \"final_fake_b_\" + str(epoch) + \".png\"),\n normalize=True)\n vutils.save_image(mix_g_b.clone(), osp.join(opt.out, \"final_a2b_\" + str(epoch) + \".png\"),\n normalize=True)\n\n Gs_a.append(G_a)\n NoiseAmp_a.append(opt.noise_amp_a)\n\n torch.save(Gs_a, '%s/Gs_a.pth' % (opt.out))\n torch.save(reals_a, '%s/reals_a.pth' % (opt.out))\n torch.save(NoiseAmp_a, '%s/NoiseAmp_a.pth' % (opt.out))\n\n torch.save(G_a.state_dict(), '%s/netG_a_%d.pth' % (opt.out, scale_num))\n torch.save(D_a.state_dict(), '%s/netD_a_%d.pth' % (opt.out, scale_num))\n\n Gs_b.append(G_b)\n NoiseAmp_b.append(opt.noise_amp_b)\n\n torch.save(Gs_b, '%s/Gs_b.pth' % (opt.out))\n torch.save(reals_b, '%s/reals_b.pth' % (opt.out))\n torch.save(NoiseAmp_b, '%s/NoiseAmp_b.pth' % (opt.out))\n\n torch.save(G_b.state_dict(), '%s/netG_b_%d.pth' % (opt.out, scale_num))\n torch.save(D_b.state_dict(), '%s/netD_b_%d.pth' % (opt.out, scale_num))\n\n print(\"Layer weights saved successfully\")\n\n scale_num += 1\n nfc_prev = opt.nfc\n del D_a, G_a\n del D_b, G_b\n\n\n\n\n\n" ]
[ [ "torch.nn.MSELoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
grahamdelafield/DeepRTplus
[ "e4fe740e28af7ba427266fc039a7b5c3845671fd" ]
[ "prediction_emb_cpu.py" ]
[ "import torch\nimport numpy as np\nfrom torch.autograd import Variable\nfrom capsule_network_emb_cpu import *\nimport pickle\nfrom sys import argv\n\ndef pred_from_model(conv1_kernel,\n conv2_kernel,\n param_path, \n RTdata,\n PRED_BATCH):\n '''\n write extracted features as np.array to pkl\n ''' \n model = CapsuleNet(conv1_kernel,conv2_kernel)\n model.load_state_dict(torch.load(param_path))\n if True == CUDA:\n model.cuda()\n \n print('>> note: predicting using the model:',param_path)\n \n pred = np.array([])\n \n # TODO: handle int\n # TODO: if Batch == 16, peptide number cannot be: 16X+1\n pred_batch_number = int(RTdata.test.shape[0] / PRED_BATCH)+1\n for bi in range(pred_batch_number):\n test_batch = Variable(RTdata.test[bi*PRED_BATCH:(bi+1)*PRED_BATCH,:])\n if True == CUDA:\n test_batch = test_batch.cuda()\n pred_batch = model(test_batch)\n pred = np.append(pred, pred_batch[0].data.cpu().numpy().flatten()) \n if False == CUDA:\n pred_batch = model(test_batch)\n pred = np.append(pred, pred_batch[0].data.numpy().flatten()) \n return RTdata.test_label.numpy().flatten(), pred\n\nimport copy\ndef ensemble(obse,pred_list):\n pred_ensemble = copy.deepcopy(pred_list[0])\n for i in range(len(pred_list)-1):\n pred_ensemble += pred_list[i+1]\n pred_ensemble = pred_ensemble/len(pred_list)\n print('[ensemble %d] %.5f %.5f' %(len(pred_list),Pearson(obse,pred_ensemble),Delta_t95(obse,pred_ensemble)))\n return pred_ensemble \n\ndef ensemble1round(job_seed_round,conv1,conv2,S):\n obse,pred1=pred_from_model(conv1,conv2,job_seed_round+'epoch_10.pt',RTtest,1000)\n _,pred2=pred_from_model(conv1,conv2,job_seed_round+'epoch_12.pt',RTtest,1000)\n _,pred3=pred_from_model(conv1,conv2,job_seed_round+'epoch_14.pt',RTtest,1000)\n _,pred4=pred_from_model(conv1,conv2,job_seed_round+'epoch_16.pt',RTtest,1000)\n _,pred5=pred_from_model(conv1,conv2,job_seed_round+'epoch_18.pt',RTtest,1000)\n obse,pred1,pred2,pred3,pred4,pred5=obse*S,pred1*S,pred2*S,pred3*S,pred4*S,pred5*S\n pred_ensemble=ensemble(obse,[pred1,pred2,pred3,pred4,pred5])\n return obse, pred_ensemble\n\nscale = int(argv[1])\nround1model = argv[2] # 'work/dia/59/1/'\nconv1 = int(argv[3])\n# round2dir = argv[4] # 'work/dia/59/2/'\n# conv2 = int(argv[5])\n# round3dir = argv[6] # 'work/dia/59/3/'\n# conv3 = int(argv[7])\n# result_ensemble = argv[8]\n# print(argv)\ntest_path = argv[4]\n\n# test_path = 'data/dia_test_59.txt' # same as in capsule_network.py\n# RTtest = RTdata(dictionary, max_length, test_path)\n# desparse(RTtest)\ncorpus = Corpus(dictionary, # format: Corpus(dictionary, train_path, val_path='', test_path='', pad_length=0)\n train_path,\n test_path=test_path,\n pad_length=max_length) \nRTtest = corpus\n\n# obse, pred_r1 = ensemble1round(round1dir,conv1,conv1,scale)\n# _, pred_r2 = ensemble1round(round2dir,conv2,conv2,scale)\n# _, pred_r3 = ensemble1round(round3dir,conv3,conv3,scale)\n# pred_ensemble = ensemble(obse,[pred_r1,pred_r2,pred_r3])\n\nobse,pred1=pred_from_model(conv1,conv1,round1model,RTtest,15)\npred_ensemble = pred1*scale\nobse = obse*scale\nwith open(test_path+'.pred', 'w') as fo:\n fo.write('observed\\tpredicted\\n')\n for i in range(len(obse)):\n fo.write('%.5f\\t%.5f\\n' % (obse[i],pred_ensemble[i]))\n\nprint(\">> note: prediction done!\")\n# usage: python prediction.py 46 'work/dia/10/1/epoch_20.pt' 10 'data/SCX.txt'\n" ]
[ [ "torch.autograd.Variable", "numpy.array", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
prjemian/pnx
[ "75477ab63518d8c134ebc02bf839c60042a0461e" ]
[ "punx/ignore_now/validate.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#-----------------------------------------------------------------------------\n# :author: Pete R. Jemian\n# :email: [email protected]\n# :copyright: (c) 2016, Pete R. Jemian\n#\n# Distributed under the terms of the Creative Commons Attribution 4.0 International Public License.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n'''\nvalidate NeXus NXDL and HDF5 data files\n\n.. autosummary::\n \n ~validate_xml\n ~NxdlPattern\n ~CustomNxdlPattern\n ~Data_File_Validator\n\n.. rubric:: CHECKLIST\n\nThese are considerata for the validation of NeXus HDF5 data files.\nCompare these validation steps with rules and documentation\nin the NeXus manual and the XML Schema files (``nxdl.xsd`` and ``nxdlTypes.xsd``).\nCheckboxes indicate which steps have been implemented in code below.\n\n* [x] make a list of all address nodes in the file to be evaluated\n* [x] attributes are also in this list\n* [x] use a structure to hold results for each node\n\n.. rubric:: File\n\n#. [x] verify attributes\n#. [x] verify file level as group using NX_class = NXroot\n#. [ ] identify any objects at root level that are not in NXroot (which is OK)\n#. [x] verify default plot identified\n\n #. [x] version 1\n #. [x] version 2\n #. [x] version 3\n #. [x] version 3+niac2014\n\n.. rubric:: Groups\n\n#. [x] compare name with pattern *validItemName*\n#. [x] determine NX_class, if any\n#. [x] verify NX_class in nxdl_dict\n#. [ ] is name flexible?\n#. [ ] What to do with NXDL symbol tables?\n#. [x] observe attribute: minOccurs\n#. [ ] observe attribute: maxOccurs\n#. [ ] check for items defined by NX_class\n#. [ ] check for items required by NX_class\n#. [ ] check for items not defined by NX_class\n#. [x] observe NXDL specification: ignoreExtraGroups\n#. [x] observe NXDL specification: ignoreExtraFields\n#. [x] observe NXDL specification: ignoreExtraAttributes\n#. [x] validate any attributes\n#. [x] validate any links\n#. [x] validate any fields\n\n.. rubric:: Links\n\n#. [x] compare name with pattern *validItemName*\n#. [ ] is name flexible?\n#. [x] is target attribute defined?\n#. [x] is target address absolute?\n#. [x] does target address exist?\n#. [ ] construct NX classpath from target\n#. [ ] compare NX classpath with NXDL specification\n\n.. rubric:: Fields\n\n#. [x] compare name with pattern\n#. [x] is name flexible?\n#. [x] observe attribute: minOccurs\n#. [x] is units attribute defined?\n#. [x] check units are consistent against NXDL\n#. [x] check data shape against NXDL\n#. [x] check data type against NXDL\n#. [x] check for attributes defined by NXDL\n#. [x] check AXISNAME_indices are each within signal data rank\n\n.. rubric:: Attributes\n\n#. [x] compare name with pattern\n#. [ ] check data type against NXDL\n#. [ ] check nxdl.xsd for how to handle these attributes regarding finding.WARN\n'''\n\nimport collections\nimport h5py\nimport lxml.etree\nimport numpy\nimport os\nimport re\nimport sys\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\nimport punx\nfrom punx import finding\nfrom punx import h5structure\n\n# TODO: issue #14: http://download.nexusformat.org/doc/html/search.html?q=warning&check_keywords=yes&area=default\n\n__url__ = 'http://punx.readthedocs.org/en/latest/validate.html'\n\n\ndef validate_xml(xml_file_name):\n '''\n validate an NXDL XML file against an XML Schema file\n\n :param str xml_file_name: name of XML file\n '''\n from punx import cache\n xml_tree = lxml.etree.parse(xml_file_name)\n xsd = cache.get_XML_Schema()\n try:\n result = xsd.assertValid(xml_tree)\n except lxml.etree.DocumentInvalid as exc:\n msg = 'DocumentInvalid:\\n'\n msg += 'file: ' + xml_file_name + '\\n'\n msg += str(exc)\n raise punx.InvalidNxdlFile(msg)\n return result\n\n\nclass NxdlPattern(object):\n '''\n common regular expression pattern for validation\n \n :param obj parent: instance of :class:`Data_File_Validator`\n :param str pname: pattern identifying name\n :param str xpath_str: XPath search string, expect list of length = 1\n '''\n \n def __init__(self, parent, pname, xpath_str):\n from punx import cache\n self.name = pname\n self.xpath_str = xpath_str\n rules = cache.get_nxdl_xsd()\n\n r = rules.xpath(xpath_str, namespaces=parent.ns)\n\n if r is None or len(r) != 1:\n msg = 'could not read *' + pname + '* from *nxdl.xsd*'\n raise ValueError(msg)\n\n self.regexp_pattern_str = r[0].attrib.get('value', None)\n self.re_obj = re.compile('^' + self.regexp_pattern_str + '$')\n \n def match(self, text):\n '''regular expression search'''\n return self.re_obj.match(text)\n\n\nclass CustomNxdlPattern(NxdlPattern): # lgtm [py/missing-call-to-init] \n '''\n custom regular expression pattern for validation\n \n :param obj parent: instance of :class:`Data_File_Validator`\n :param str pname: pattern identifying name\n :param str regexp_pattern_str: regular expression to match\n '''\n \n def __init__(self, parent, pname, regexp_pattern_str):\n self.name = pname\n self.xpath_str = None\n\n self.regexp_pattern_str = regexp_pattern_str\n self.re_obj = re.compile('^' + self.regexp_pattern_str + '$')\n \n def match(self, text):\n '''regular expression search'''\n return self.re_obj.match(text)\n\n\nclass Data_File_Validator(object):\n '''\n manage the validation of a NeXus HDF5 data file\n '''\n \n def __init__(self, fname):\n from punx import cache\n from punx import nxdlstructure\n if not os.path.exists(fname):\n raise punx.FileNotFound(fname)\n self.fname = fname\n\n self.findings = [] # list of Finding() instances\n self.addresses = collections.OrderedDict() # dictionary of all HDF5 address nodes in the data file\n\n self.ns = cache.NX_DICT\n self.nxdl_rules = nxdlstructure.get_nxdl_rules()\n self.get_data_types()\n self.nxdl_dict = nxdlstructure.get_NXDL_specifications()\n\n try:\n self.h5 = h5py.File(fname, 'r')\n except IOError:\n raise punx.HDF5_Open_Error(fname)\n self._init_patterns()\n \n def _init_patterns(self):\n self.patterns = {}\n for item in ('validItemName', 'validNXClassName', \n 'validTargetName'):\n xps = '//*[@name=\"' # XPath String query\n xps += item\n xps += '\"]/xs:restriction/xs:pattern'\n self.patterns[item] = NxdlPattern(self, item, xps)\n\n # strict match: [a-z_][a-z\\d_]*\n # flexible match: [A-Za-z_][\\w_]* but gets finding.WARN per manual\n # advisory changed to finding.NOTE\n p = CustomNxdlPattern(self, 'validItemName-strict', r'[a-z_][a-z0-9_]*')\n self.patterns[p.name] = p\n self.__unique_findings__ = {}\n \n \n def close(self):\n if self.h5 is not None:\n self.h5.close()\n self.h5 = None\n \n def get_data_types(self):\n '''\n generate dictionary of acceptable Python data types, based on NeXus data type keys\n '''\n # Is there a better way to define these? Using nxdlTypes.xsd?\n # TODO: #21 : augment from self.nxdl_rules.nxdlTypes\n \n self.data_types = {\n 'NX_CHAR': [str, numpy.string_, numpy.ndarray],\n 'NX_UINT': (numpy.uint, numpy.uint8, numpy.uint16, numpy.uint32, numpy.uint64),\n 'NX_INT': (int, numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64),\n 'NX_FLOAT': (float, numpy.float, numpy.float16, numpy.float32, numpy.float64),\n 'NX_BINARY': (None, ), # #FIXME:\n 'NX_BOOLEAN': (None, ), # FIXME:\n }\n if sys.version_info.major == 2: # python2 only\n self.data_types['NX_CHAR'].append(unicode) # not in python3\n # definitions dependent on other definitions\n # (can add the lists together as needed)\n self.data_types['NX_INT'] += self.data_types['NX_UINT']\n self.data_types['NX_POSINT'] = self.data_types['NX_INT'] # need to restrict this\n self.data_types['NX_NUMBER'] = self.data_types['NX_INT'] + self.data_types['NX_FLOAT']\n self.data_types['ISO8601'] = self.data_types['NX_CHAR']\n self.data_types['NX_DATE_TIME'] = self.data_types['NX_CHAR']\n \n def validate(self):\n '''\n start the validation process from the file root\n '''\n self.validate_HDF5_group(self.h5)\n t = self.validate_default_plot()\n f = finding.TF_RESULT[t]\n title = \"* valid NeXus data file\"\n msg = 'This file is '\n if not t:\n msg += 'not '\n title = \"!\" + title[1:]\n msg += 'valid by the NeXus standard.'\n self.new_finding(title, \"/\", f, msg)\n \n def validate_HDF5_group(self, group):\n '''\n review the HDF5 group: group\n\n :param obj group: instance of h5py.File of h5py.Group\n \n Verify that items presented in data file are valid.\n '''\n nx_class_name = self.get_hdf5_attribute(group, 'NX_class', report=True)\n if nx_class_name is None:\n if isinstance(group, h5py.File):\n nx_class_name = 'NXroot'\n msg = 'file root (assumed): NXroot'\n self.new_finding('@NX_class', group.name, finding.OK, msg)\n else:\n self.validate_item_name(group.name, 'validNXClassName')\n msg = 'no @NX_class attribute, not a NeXus group'\n self.new_finding('@NX_class', group.name, finding.NOTE, msg)\n return # evaluate any further?\n else:\n self.validate_item_name(group.name)\n\n aname = group.name + '@NX_class'\n t = nx_class_name in self.nxdl_dict\n f = finding.TF_RESULT[t]\n msg = {True: 'known: ', False: 'unknown: '}[t] + str(nx_class_name)\n self.new_finding('@NX_class', aname, f, msg)\n \n nx_class_object = self.nxdl_dict.get(nx_class_name)\n if nx_class_object is None:\n msg = 'ignoring content in group not defined by NeXus'\n self.new_finding('not NeXus group', group.name, finding.UNUSED, msg)\n return # ignore content in HDF5 groups that are not NeXus classes\n\n nx_class_defaults = nx_class_object.attributes['defaults']\n \n for k in group.attrs.keys(): # review the group's attributes\n k = punx.h5structure.decode_byte_string(k)\n if k in ('NX_class', ):\n pass # handled elsewhere\n else:\n aname = group.name + '@' + k\n if not nx_class_defaults['ignoreExtraAttributes']:\n # TODO: don't validate attribute names defined in NXDL rules!\n self.validate_item_name(k, parent=group)\n if k == 'default' and nx_class_name in ('NXroot', 'NXentry', 'NXsubentry'):\n target = self.get_hdf5_attribute(group, 'default')\n t = target in group\n f = {True: finding.OK, False: finding.ERROR}[t]\n msg = {True: 'exists: ', False: 'does not exist: '}[t] + target\n self.new_finding('default plot group', aname, f, msg)\n\n for child_name in group: # review the group's children\n try:\n child = group[child_name]\n except KeyError as _exc:\n if True:\n filename = self.missing_file_link(str(_exc))\n if filename is not None:\n title = 'external file link'\n msg = 'missing file: ' + filename\n self.new_finding(title, group.name+'/'+child_name, finding.ERROR, msg)\n continue\n if h5structure.isNeXusLink(child):\n if nx_class_defaults['ignoreExtraGroups']: # TODO: Is this proper for links?\n title = nx_class_name+'@ignoreExtraGroups'\n msg = 'link ignored per NXDL specification'\n self.new_finding(title, child.name, finding.UNUSED, msg)\n else:\n self.validate_NeXus_link(child, group)\n elif h5structure.isHdf5Group(child):\n if nx_class_defaults['ignoreExtraGroups']:\n title = nx_class_name+'@ignoreExtraGroups'\n msg = 'subgroup ignored per NXDL specification'\n self.new_finding(title, child.name, finding.UNUSED, msg)\n else:\n self.validate_HDF5_group(child)\n elif h5structure.isHdf5Dataset(child):\n if nx_class_defaults['ignoreExtraFields']:\n title = nx_class_name+'@ignoreExtraFields'\n msg = 'field ignored per NXDL specification'\n self.new_finding(title, child.name, finding.UNUSED, msg)\n else:\n self.validate_HDF5_dataset(child, group)\n else:\n msg = str(_exc) + '\\n' + 'unexpected: ' + child.name\n raise ValueError(msg)\n \n self.validate_NXDL_specification(group, nx_class_name)\n # FIXME: need special handling for application definitions\n# if nx_class_name in ('NXsubentry', 'NXentry') and 'definition' in group:\n# # application definition masquerading as NXentry or NXsubentry\n# app_def_name = group['definition'][0]\n# self.validate_NXDL_specification(group, app_def_name)\n \n def validate_HDF5_dataset(self, dataset, group):\n '''\n review the HDF5 dataset: dataset\n\n :param obj dataset: instance of h5py.Dataset\n :param obj group: instance of h5py.Group or h5py.File, needed to check against NXDL\n '''\n self.validate_item_name(dataset.name)\n field_rules = self.nxdl_rules.nxdl.children['field']\n nx_class_name = self.get_hdf5_attribute(group, 'NX_class')\n nx_class_object = self.nxdl_dict.get(nx_class_name)\n\n for k in dataset.attrs.keys(): # review the dataset's attributes\n k = punx.h5structure.decode_byte_string(k)\n aname = dataset.name + '@' + k\n self.validate_item_name(k, parent=dataset)\n v = self.get_hdf5_attribute(dataset, k, report=True)\n if k in field_rules.attrs:\n rules = field_rules.attrs[k]\n if len(rules.enum) > 0:\n t = v in rules.enum\n f = {True: finding.OK, False: finding.WARN}[t]\n msg = 'value=' + v\n if t:\n msg += ' :recognized'\n else:\n msg += ' : expected one of these: ' + '|'.join(rules.enum)\n self.new_finding('enumeration: @' + k, aname, f, msg)\n else:\n if k not in ('target',): # link target attribute checked elsewhere\n if nx_class_object is not None: # only check if NXDL exists\n if nx_class_object.attributes['defaults']['ignoreExtraAttributes']:\n msg = 'attribute ignored per NXDL specification'\n self.new_finding(nx_class_name + '@ignoreExtraAttributes', aname, finding.NOTE, msg)\n else:\n msg = 'attribute not defined in NXDL'\n self.new_finding(nx_class_name + '@' + k, aname, finding.NOTE, msg)\n \n self.validate_numerical_dataset(dataset, group)\n\n # check the type of this field\n # https://github.com/prjemian/punx/blob/b595fdf9910dbab113cfe8febbb37e6c5b48d74f/src/punx/validate.py#L761\n\n # review the dataset's content\n nx_class_name = self.get_hdf5_attribute(group, 'NX_class')\n if nx_class_name in self.nxdl_dict:\n nx_class = self.nxdl_dict[nx_class_name]\n rules = nx_class.fields.get(dataset.name.split('/')[-1])\n if rules is not None:\n if len(rules.enum) > 0:\n pass # TODO:\n defaults = rules.attributes['defaults']\n nx_type = defaults['type'] # TODO: check for this\n minO = defaults['minOccurs'] # TODO: check for this\n # in either case, validation of maxOccurs for datasets is not informative\n # HDF5 will not allow more than one instance of a name within a group\n # maxOccurs: is either 1 or, if name is flexible, unbounded\n\n isSpecifiedName = defaults['nameType'] == 'specified'\n f = finding.OK\n msg = 'name is ' + {True: 'specified', False: 'flexible'}[isSpecifiedName]\n self.new_finding('@nameType', dataset.name, f, msg)\n \n def validate_NeXus_link(self, link, group):\n '''\n review the NeXus link: link\n \n :param obj link: instance of h5py.Group or h5py.Dataset\n :param obj group: instance of h5py.Group, needed to check against NXDL\n '''\n self.validate_item_name(link.name, 'validTargetName')\n\n target = self.get_hdf5_attribute(link, 'target', report=True)\n if target is not None:\n aname = link.name + '@target'\n target_exists = target in self.h5\n f = finding.TF_RESULT[target_exists]\n msg = {True: target, False: 'does not exist'}[target_exists]\n self.new_finding('link target exists', aname, f, msg)\n else:\n self.new_finding('link', link.name, finding.ERROR, 'no target')\n \n def validate_NXDL_specification(self, group, nx_class_name):\n '''\n validate the group with the NXDL specification\n\n :param obj group: instance of h5py.Group or h5py.File\n :param str nx_class_name: name of a NeXus NXDL class\n \n Verify that items specified in NXDL file are present in the data file.\n '''\n nx_class_object = self.nxdl_dict.get(nx_class_name)\n if nx_class_object is None:\n return\n\n msg = 'validate with ' + nx_class_name + ' specification (incomplete)'\n self.new_finding('NXDL review', group.name, finding.TODO, msg)\n \n # specified group attributes are handled elsewhere\n # group_defaults = nx_class_object.attributes['defaults']\n\n # validate provided, required, and optional fields\n for field_name, rules in sorted(nx_class_object.fields.items()):\n self.validate_NXDL_field_specification(field_name, group, rules)\n\n # validate provided, required, and optional groups (recursive as directed)\n for subgroup_name, rules in nx_class_object.groups.items():\n self.validate_NXDL_group_specification(subgroup_name, group, rules)\n \n def validate_NXDL_group_specification(self, subgroup_name, group, rules):\n '''\n validate the group/subgroup with the NXDL specification\n\n :param str subgroup_name: name of subgroup in group\n :param obj group: instance of h5py.Group or h5py.File\n :param obj rules: instance of nxdlstructure.NX_group\n \n Verify this HDF5 group conforms to the NXDL specification\n '''\n #nx_class_name = self.get_hdf5_attribute(group, 'NX_class')\n defaults = rules.attributes['defaults']\n target_exists = subgroup_name in group\n\n deprecated = defaults['deprecated']\n if deprecated is not None:\n if target_exists:\n obj = group[subgroup_name]\n nm = '/'.join(rules.NX_class, subgroup_name) + '@deprecated'\n self.new_finding(nm, obj.name, finding.NOTE, deprecated)\n\n minO = defaults['minOccurs']\n maxO = defaults['maxOccurs']\n if int(minO) > 0:\n if defaults['name'] is None:\n matches = [node for node in group.values() if h5structure.isNeXusGroup(node, rules.NX_class)]\n if len(matches) < int(minO):\n nm = group.name\n m = 'must have at least ' + str(minO) + ' group: ' + rules.NX_class \n f = finding.WARN\n self.new_finding(rules.NX_class+' required group', nm, f, m)\n else:\n nm = group.name + '/' + subgroup_name\n f = {True: finding.OK, False: finding.WARN}[target_exists]\n m = rules.NX_class + {True: ' found', False: ' not found'}[target_exists]\n self.new_finding(rules.NX_class+' required group', nm, f, m)\n if maxO == 'unbounded':\n pass\n # TODO: what else?\n \n def validate_NXDL_field_specification(self, field_name, group, rules):\n '''\n validate the group/field with the NXDL specification\n\n :param str field_name: name of field in group\n :param obj group: instance of h5py.Group or h5py.File\n :param obj rules: instance of nxdlstructure.NX_field\n \n Verify this HDF5 field conforms to the NXDL specification\n '''\n nx_class_name = self.get_hdf5_attribute(group, 'NX_class')\n defaults = rules.attributes['defaults']\n nx_type = self.data_types[defaults['type']]\n target_exists = field_name in group\n if target_exists:\n try:\n dataset = group[field_name]\n except KeyError as _exc:\n filename = self.missing_file_link(str(_exc))\n if filename is not None:\n title = 'external file link'\n msg = 'missing file: ' + filename\n self.new_finding(title, group.name+'/'+field_name, finding.ERROR, msg)\n return\n else:\n dataset = None\n nm = '/'.join((nx_class_name, field_name))\n\n # check the attributes specified in NXDL rules\n for k, attr in rules.attributes['nxdl.xsd'].items():\n if k in ('minOccurs maxOccurs name nameType type units'.split()):\n pass\n elif target_exists:\n if k == 'deprecated':\n m = defaults['deprecated']\n if m is not None:\n self.new_finding(nm+'@deprecated', dataset.name, finding.NOTE, m)\n elif k in dataset.attrs:\n aname = dataset.name + '@' + k\n ttl = 'NXDL attribute type: '\n ttl += '/'.join((nx_class_name, field_name))\n ttl += '@' + k\n v = self.get_hdf5_attribute(dataset, k)\n \n # check type against NXDL\n attr_type = attr.type.split(':')[-1] # strip off XML namespace prefix, if found\n if attr_type not in self.data_types:\n if attr_type == 'str':\n attr_type = 'NX_CHAR'\n else:\n msg = 'type(' + aname\n msg += ') = \"' + attr_type\n msg += '\" not in known data types'\n raise KeyError(msg)\n t = type(v) in self.data_types[attr_type]\n f = {True: finding.OK, False: finding.WARN}[t]\n if isinstance(v, numpy.ndarray) and isinstance(v[0], numpy.bytes_):\n m = 'byte-string'\n elif isinstance(v, numpy.ndarray):\n m = type(v[0]).__name__\n else:\n m = type(v).__name__\n m += ' : ' + attr_type\n self.new_finding(ttl, aname, f, m)\n \n # check if value matches enumeration\n if len(attr.enum) > 0:\n t = v in attr.enum\n f = {True: finding.OK, False: finding.WARN}[t]\n m = str(v)\n if t:\n m += ': expected'\n else:\n m += ': not in list: ' + ','.join(attr.enum)\n ttl = 'NXDL attribute enum: '\n ttl += '/'.join((nx_class_name, field_name))\n ttl += '@' + k\n self.new_finding(ttl, aname, f, m)\n elif attr.required:\n if k not in ('name', 'minOccurs', 'maxOccurs',):\n nm = 'NXDL attribute: ' + '/'.join((nx_class_name, field_name))\n nm += '@' + k\n m = 'required attribute not found'\n self.new_finding(nm, dataset.name, finding.WARN, m)\n\n minO = defaults['minOccurs']\n maxO = defaults['maxOccurs']\n required_name = defaults['nameType'] == 'specified'\n if int(minO) > 0 and required_name:\n f = {True: finding.OK, False: finding.WARN}[target_exists]\n m = {True: '', False: ' not'}[target_exists] + ' found'\n nm = group.name + '/' + field_name\n self.new_finding(nx_class_name+' required field', nm, f, m)\n\n t = len(dataset.shape) == len(rules.dims) # check rank against specification\n f = {True: finding.OK, False: finding.WARN}[t] # TODO: ? change WARN to NOTE ?\n m = {True: 'matches', False: 'does not match'}[target_exists] + ' NXDL specification'\n self.new_finding(nx_class_name+' field rank', nm, f, m)\n\n if target_exists:\n if str(dataset.dtype).startswith('|S'):\n t = type('numpy string array') in nx_type\n m = 'str'\n elif str(dataset.dtype).startswith('|O'):\n t = type(dataset[0]) in nx_type\n m = 'str'\n else:\n t = dataset.dtype in nx_type\n m = str(dataset.dtype)\n if 'object' == m:\n if dataset.ndim == 0:\n m = type(dataset.value).__name__\n else:\n m = type(dataset[0]).__name__\n if 'unicode' == m:\n m = 'str'\n f = {True: finding.OK, False: finding.WARN}[t]\n m += {True: ' : ', False: ' not '}[t] + defaults['type']\n nm = group.name + '/' + field_name\n ttl = '/'.join((nx_class_name, field_name))\n self.new_finding('NXDL data type: '+ttl, nm, f, m)\n \n # TODO: #16 check if unknown names are allowed to be flexible \n\n def validate_item_name(self, h5_addr, key=None, parent=None):\n '''\n validate *h5_addr* using *validItemName* regular expression\n \n This is used for the names of groups, fields, links, and attributes.\n \n :param str h5_addr: full HDF5 address of item, for reference only,\n for attributes, use an @ symbol, such as these examples:\n \n ============================= ============\n *h5_addr* *short_name*\n ============================= ============\n ``/entry/user`` ``user``\n ``/entry/data01/data`` ``data``\n ``/entry/data01/data@signal`` ``signal``\n ============================= ============\n\n :param str key: named key to search, default: None (``validItemName``)\n :param obj parent: HDF5 parent object, default: None\n\n This method will separate out the last part of the name for validation. \n Then, it is tested against the strict or relaxed regular expressions for \n a valid item name. The finding for each name is classified by the\n next table:\n \n ===== ======= ======= ================================================================\n order finding match description\n ===== ======= ======= ================================================================\n 1 OK strict matches most stringent NeXus specification\n 2 NOTE relaxed matches NeXus specification that is most generally accepted\n 3 ERROR UTF8 specific to strings with UnicodeDecodeError (see issue #37)\n 4 WARN HDF5 acceptable to HDF5 but not NeXus\n ===== ======= ======= ================================================================\n \n :see: http://download.nexusformat.org/doc/html/datarules.html?highlight=regular%20expression\n '''\n full_name = h5_addr\n if key is None:\n key_relaxed = 'validItemName'\n key_strict = 'validItemName-strict'\n\n short_name = h5_addr.split('/')[-1]\n if parent is not None:\n full_name = parent.name + '@' + h5_addr\n\n if short_name == 'NX_class':\n # special case\n self.new_finding('NeXus internal attribute', \n full_name, \n finding.OK, \n 'marks this HDF5 group as NeXus group')\n return\n \n # strict match: [a-z_][a-z\\d_]*\n # flexible match: [A-Za-z_][\\w_]* but gets finding.WARN per manual\n \n p = self.patterns[key_strict]\n m = p.match(short_name)\n if m is not None and m.string == short_name:\n f = finding.OK\n key = key_strict\n msg = 'strict re: ' + p.regexp_pattern_str\n else:\n p = self.patterns[key_relaxed]\n m = p.match(short_name)\n if m is not None and m.string == short_name:\n f = finding.NOTE\n key = key_relaxed\n msg = 'relaxed re: ' + p.regexp_pattern_str\n else:\n # test if string rendering raises UnicodeDecodeError\n key = 'validItemName'\n if parent is None:\n msg = 'valid HDF5 item name, not valid with NeXus'\n else:\n msg = 'valid HDF5 attribute name, not valid with NeXus'\n try: # to raise the exception\n _test = '%s' % str(m)\n f = finding.WARN\n except UnicodeDecodeError as _exc:\n f = finding.ERROR\n msg += ', UnicodeDecodeError'\n else:\n # TODO: validate full_name against other keys\n # validNXClassName\n # validTargetName\n f = finding.TODO\n msg = 'TODO: validate full_name against ' + key\n pass\n\n self.new_finding(key, full_name, f, msg)\n \n def validate_default_plot(self):\n '''\n check that data file defines the default plottable data\n \n :see: http://download.nexusformat.org/doc/html/datarules.html#find-the-plottable-data\n '''\n classpath_dict = collections.OrderedDict()\n for results in self.addresses.values():\n cp = results.classpath\n if cp not in classpath_dict:\n classpath_dict[cp] = []\n classpath_dict[cp].append(results.h5_address)\n candidates = self.identify_default_plot_candidates()\n \n if self.default_plot_addr_v3(candidates['v3'], classpath_dict) is not None:\n return True\n elif self.default_plot_addr_v2(candidates['v2']) is not None:\n return True\n elif self.default_plot_addr_v1(candidates['v1']) is not None:\n return True\n elif self.no_NXdata_children_of_NXentry(candidates['niac2016']):\n return True\n \n k = '/NXentry/NXdata/field'\n if k in classpath_dict and len(classpath_dict[k]) == 1:\n m = 'only one /NXentry/NXdata/field exists but no signal indicated'\n else:\n m = '/NXentry/NXdata/field exists but no signal indicated'\n f = finding.WARN\n self.new_finding('NeXus default plot', k, f, m)\n\n f = finding.TF_RESULT['/NXentry/NXdata/field' in classpath_dict]\n return f != finding.ERROR\n \n def identify_default_plot_candidates(self):\n '''\n find the HDF5 addresses that might provide the default plottable data\n \n :see: http://download.nexusformat.org/doc/html/datarules.html#find-the-plottable-data\n :see: http://download.nexusformat.org/doc/html/preface.html?highlight=class%20path#class-path-specification\n \n There are different methods to identify the default data to be plotted.\n These can be distinguished by differences in the NeXus class path\n (the sequence of NeXus classes and other elements that describe an object in\n a NeXus HDF5 data file). As used here, the text ``field`` is used\n instead of the name of the field (as shown in the NeXus manual) but the name of the\n attribute is given.\n \n =========== =======================================\n version NeXus classpath signature\n =========== =======================================\n niac2016 /NXentry (no NXdata group)\n v3 /NXentry/NXdata@signal\n v3+niac2014 /@default/NXentry@default/NXdata@signal\n v2 /NXentry/NXdata/field@signal\n v1 /NXentry/NXdata/field@signal\n =========== =======================================\n \n Versions *v1* and *v2* differ in their use of other attributes\n such as *axes* (v2) versus *axis* (v1) and *primary* (v1).\n with other attributes such as */NXentry/NXdata/field2@primary*.\n Since these other attributes are not always present, or\n might be used to indicate alternatives, a test for *v1*\n can fail due to both false negatives and false positives.\n '''\n # prepare dictionaries of candidates for the default plot\n candidates = dict(v1 = {}, v2 = {}, v3 = {}, niac2016 = {})\n for node_name in self.h5:\n node = self.h5[node_name]\n if h5structure.isNeXusGroup(node, 'NXentry'):\n candidates['niac2016'][node.name] = '/NXentry'\n for subnode_name in node:\n subnode = node[subnode_name]\n if h5structure.isNeXusGroup(subnode, 'NXdata'):\n if node.name in candidates['niac2016']:\n # reject this node from niac2016 since it has NXdata group\n del candidates['niac2016'][node.name]\n\n signal = self.get_hdf5_attribute(subnode, 'signal')\n if isinstance(signal, (bytes, numpy.bytes_)):\n signal = signal.decode()\n if signal is not None:\n k = subnode.name + '@signal'\n candidates['v3'][k] = '/NXentry/NXdata@signal'\n for ss_node_name in subnode:\n try:\n ss_node = subnode[ss_node_name]\n except KeyError:\n continue\n if not h5structure.isNeXusDataset(ss_node):\n continue\n if self.get_hdf5_attribute(ss_node, 'signal') is not None:\n k = ss_node.name + '@signal'\n # TODO: verify the value is a number (either as float, int, or str of some sort)\n candidates['v2'][k] = '/NXentry/NXdata/field@signal'\n candidates['v1'][k] = '/NXentry/NXdata/field@signal'\n return candidates\n\n def default_plot_addr_v1(self, group_dict):\n '''\n return the HDF5 address of the v1 default plottable data or None\n \n :see: http://download.nexusformat.org/doc/html/datarules.html#version-1\n '''\n default_plot_addr = []\n for primary_field_addr, nx_classpath in group_dict.items():\n title = 'NXdata group default plot v1'\n # need the NXdata group of this field\n primary = self.h5[primary_field_addr.split('@')[0]]\n nxdata_addr = '/'.join(primary.name.split('/')[:-1])\n nxdata = self.h5[nxdata_addr]\n signal_field_list = []\n for field_name in nxdata:\n field = nxdata[field_name]\n if h5structure.isNeXusDataset(field):\n signal = self.get_hdf5_attribute(field, 'signal', report=True)\n if signal is None:\n continue\n elif signal in (1, '1'):\n signal_field_list.append(field)\n else:\n m = 'expected @signal=1, found: ' + signal\n addr = field.name + '@signal'\n self.new_finding(title, addr, finding.ERROR, m)\n continue\n # TODO: @axis, @primary, and dimension scales\n # TODO: signal and dimension scales data shape\n\n if len(signal_field_list) == 1:\n m = 'NXdata group default plot using v1'\n self.new_finding(title, signal_field_list[0], finding.OK, m)\n default_plot_addr.append(signal_field_list[0])\n elif len(signal_field_list) == 0:\n m = 'NXdata group does not define a default plot using v1'\n self.new_finding(title, nxdata_addr, finding.WARN, m)\n else:\n m = 'NXdata group defines more than one default plot using v1'\n self.new_finding(title, nxdata_addr, finding.NOTE, m)\n \n cp = '/NXentry/NXdata/field@signal'\n title = 'NeXus default plot v1'\n if len(default_plot_addr) == 1:\n m = 'NeXus data file default plot defined'\n self.new_finding(title, default_plot_addr[0], finding.OK, m)\n return default_plot_addr[0]\n elif len(default_plot_addr) == 0:\n m = 'NeXus data file does not define a default plot using v1'\n #self.new_finding(title, cp, finding.WARN, m)\n else:\n m = 'NeXus data file defines more than one default plot using v1'\n self.new_finding(title, cp, finding.WARN, m)\n return default_plot_addr\n \n def default_plot_addr_v2(self, group_dict):\n '''\n return the HDF5 address of the v2 default plottable data or None\n \n :see: http://download.nexusformat.org/doc/html/datarules.html#version-2\n '''\n default_plot_addr = []\n for h5_addr, nx_classpath in group_dict.items():\n title = 'NeXus default plot v2'\n try:\n field = self.h5[h5_addr.split('@')[0]]\n except KeyError:\n continue\n signal = self.get_hdf5_attribute(field, 'signal', report=True)\n if signal in (1, '1'):\n m = nx_classpath + ' = 1'\n self.new_finding(title, field.name, finding.OK, m)\n default_plot_addr.append(field.name)\n else:\n m = 'expected @signal=1, found: ' + signal\n self.new_finding(title, h5_addr, finding.ERROR, m)\n # TODO: @axes and dimension scales (see issue #41)\n # TODO: signal and dimension scales data shape\n\n cp = '/NXentry/NXdata/field@signal'\n title = 'NeXus default plot v2'\n if len(default_plot_addr) == 1:\n m = 'NeXus data file default plot defined using v2'\n self.new_finding(title, default_plot_addr[0], finding.OK, m)\n return default_plot_addr[0]\n elif len(default_plot_addr) == 0:\n m = 'NeXus data file does not define a default plot using v2'\n #self.new_finding(title, cp, finding.WARN, m)\n else:\n m = 'NeXus data file defines more than one default plot using v2'\n self.new_finding(title, cp, finding.NOTE, m)\n return default_plot_addr\n \n def default_plot_addr_v3(self, group_dict, classpath_dict):\n '''\n return the HDF5 address of the v3 default plottable data or None\n \n :see: http://download.nexusformat.org/doc/html/datarules.html#version-3\n '''\n # TODO: this change will be disruptive, better in a branch\n# if '/NXentry/NXdata/field@signal' in classpath_dict:\n# pass\n# elif '/NXentry/NXdata/field' in classpath_dict:\n# field_list = classpath_dict['/NXentry/NXdata/field']\n# for entry in classpath_dict['/NXentry']:\n# count = len([i for i in field_list if i.startswith(entry)])\n# pass\n \n default_plot_addr = []\n for h5_addr, nx_classpath in group_dict.items():\n dimension_scales = []\n dimension_scales_ok = True # assume until proven otherwise\n title = 'NXdata group default plot v3'\n nxdata = self.h5[h5_addr.split('@')[0]]\n signal_name = self.get_hdf5_attribute(nxdata, 'signal', report=True)\n if signal_name not in nxdata:\n m = nx_classpath + ' field not found: ' + signal_name\n self.new_finding(title, nxdata.name + '@signal', finding.ERROR, m)\n continue\n else:\n signal_data = nxdata[signal_name]\n m = 'NXdata@signal = ' + signal_name\n addr = nxdata.name\n self.new_finding(title, addr+'@signal', finding.OK, m)\n\n axes_names = self.get_hdf5_attribute(nxdata, 'axes', report=True)\n if axes_names is None: # no axes attribute: use array indices as dimension scales\n for dim in signal_data.shape:\n dimension_scales.append(numpy.ndarray(dim))\n else:\n if isinstance(axes_names, str):\n for delim in (':', ' '):\n # replace alternate delimiters (\":\", \" \") with \",\"\n axes_names = axes_names.replace(delim, ',')\n axes_names = axes_names.split(',')\n for axis_name in axes_names:\n ttl = 'NXdata@axes'\n if axis_name == '.':\n pass\n elif axis_name in nxdata: # does axis exist?\n m = 'axes dataset found: ' + axis_name\n f = finding.OK\n self.new_finding(ttl+'='+axis_name, addr+'@axes', f, m)\n # check @AXISNAME_indices holds index of dimension scale data to use\n # dimension scale = index 'indices' of nxdata[axis_name]\n axis_data = nxdata[axis_name]\n indices = self.get_hdf5_attribute(nxdata, axis_name+'_indices')\n if indices is None:\n if len(axis_data.shape) == 1:\n m = 'not provided, assume = 0'\n self.new_finding('NXdata@'+axis_name+'_indices', \n nxdata.name+'@'+axis_name+'_indices', \n finding.NOTE, \n m)\n else:\n m = 'not provided, uncertain how to use'\n self.new_finding('NXdata@'+axis_name+'_indices', \n nxdata.name+'@'+axis_name+'_indices', \n finding.WARN, \n m)\n dimension_scales_ok = False\n else:\n if not isinstance(indices, (tuple, numpy.ndarray)):\n indices = [indices,]\n indices = numpy.array([int(v) for v in indices], dtype=int)\n t = numpy.all(indices < len(signal_data.shape))\n if len(indices) == 1:\n indices = indices[0]\n f = finding.TF_RESULT[t]\n m = 'value = ' + str(indices)\n m += {True:': ok', False:': invalid'}[t]\n self.new_finding('NXdata@'+axis_name+'_indices', \n nxdata.name+'@'+axis_name+'_indices', \n f, \n m)\n if not t:\n dimension_scales_ok = False\n\n if len(axis_data.shape) == 1:\n dimension_scales.append(axis_data)\n elif len(axis_data.shape) == 2:\n if not isinstance(indices, numpy.ndarray):\n dimension_scales.append(axis_data[indices])\n else:\n for indx in indices:\n dimension_scales.append(axis_data[indx])\n else:\n m = axis_data.name + '@axes, axis=' + axis_name\n m += ' has rank=' + str(len(axis_data.shape))\n m += '\\n This needs special handling. Send email to the developer.'\n raise ValueError(m)\n else:\n m = 'axes dataset not found: ' + axis_name\n f = finding.WARN\n self.new_finding(ttl+'='+axis_name, addr+'@axes', f, m)\n dimension_scales_ok = False\n\n if len(dimension_scales) == len(signal_data.shape):\n if len(dimension_scales) == 1:\n length_ok = dimension_scales[0].shape[0] - signal_data.shape[0] in (0, 1)\n # 0 : dimension scale values are bin centers\n # 1 : dimension scale values are bin edges\n if not length_ok:\n ttl = 'dimension scale for NXdata@signal'\n m = 'array lengths are not the same'\n self.new_finding(ttl, signal_data.name, finding.WARN, m)\n dimension_scales_ok = False\n else:\n for i, dscale in enumerate(dimension_scales):\n length_ok = dscale.shape[0] - signal_data.shape[i] in (0, 1)\n if not length_ok:\n ttl = 'dimension scale for NXdata@signal[' + str(i) + ']'\n m = 'array lengths are not the same'\n self.new_finding(ttl, signal_data.name, finding.WARN, m)\n dimension_scales_ok = False\n else:\n m = 'rank(' + signal_name + ') != number of dimension scales'\n self.new_finding('NXdata@signal rank', signal_data.name, finding.WARN, m)\n default_plot_addr.append(addr)\n if dimension_scales_ok:\n m = 'dimension scale(s) verified'\n self.new_finding('NXdata dimension scale(s)', nxdata.name, finding.OK, m)\n\n title = 'NeXus default plot v3'\n # TODO: report default plot in each NXdata group\n # TODO: report if file has one clearly designated default plot\n # TODO: report if file has one default plot\n if len(default_plot_addr) == 1:\n m = 'NeXus data file default plot: /NXentry/NXdata@signal'\n cp = nx_classpath + '='\n cp += self.get_hdf5_attribute(self.h5[default_plot_addr[0]], 'signal')\n self.new_finding(cp, default_plot_addr[0], finding.OK, title)\n return default_plot_addr[0]\n elif len(default_plot_addr) == 0:\n m = 'NeXus data file does not define a default plot using v3'\n # self.new_finding(title, cp, finding.WARN, m)\n else:\n # use NIAC2014 terms to find unique address\n unique_list = self.default_plot_addr_v3_niac2014(default_plot_addr)\n m = title + '+niac2014'\n if len(unique_list) == 1:\n self.new_finding(nx_classpath, unique_list[0], finding.OK, m)\n return unique_list[0]\n else:\n for _addr in default_plot_addr:\n cp = nx_classpath + '='\n cp += self.get_hdf5_attribute(self.h5[_addr], 'signal')\n self.new_finding(cp, _addr, finding.NOTE, title)\n return default_plot_addr\n \n def default_plot_addr_v3_niac2014(self, address_list):\n '''\n return a list of default plottable data as directed by @default attributes\n \n :param [str] address_list: list of absolute HDF5 addresses with v3 default plottable data\n \n Each address fits this NeXus class path: /NXentry/NXdata/field \n '''\n unique_list = []\n for k in address_list:\n nxentry_name = k.split('/')[1]\n root_default = self.get_hdf5_attribute(self.h5, 'default', nxentry_name)\n if root_default == nxentry_name:\n nxentry = self.h5[nxentry_name]\n nxdata_name = k.split('/')[2]\n nxentry_default = self.get_hdf5_attribute(nxentry, 'default', nxdata_name)\n if nxentry_default == nxdata_name:\n unique_list.append(k)\n return unique_list\n \n def no_NXdata_children_of_NXentry(self, group_dict):\n '''\n As of NIAC2016, it is not required that there be any NXdata as a child of NXentry\n '''\n title = 'NXdata optional per NIAC2016'\n cp = '/NXentry'\n m = 'NeXus allows NXentry without NXdata subgroup'\n if len(group_dict) == 1:\n for h5_addr, nx_classpath in group_dict.items():\n t = h5_addr in self.h5\n f = finding.TF_RESULT[t]\n self.new_finding(title, cp, finding.NOTE, m)\n return t\n \n return False\n \n def validate_numerical_dataset(self, dataset, group):\n '''\n review the units attribute of an HDF5 dataset\n\n :param obj dataset: instance of h5py.Dataset\n :param obj group: instance of h5py.Group or h5py.File, needed to check against NXDL\n '''\n if dataset.dtype not in self.data_types['NX_NUMBER']:\n return\n\n # check the units of numerical fields\n title = 'field@units'\n units = self.get_hdf5_attribute(dataset, 'units', report=True)\n t = units is not None\n f = {True: finding.OK, False: finding.NOTE}[t]\n msg = {True: 'exists', False: 'does not exist'}[t]\n if t:\n t = len(units) > 0\n f = {True: finding.OK, False: finding.NOTE}[t]\n msg = {True: 'value: ' + units, False: 'has no value'}[t]\n self.new_finding(title, dataset.name + '@units', f, msg)\n # TODO: compare value of dataset@units with NXDL@units specification\n # this could easily require a deep analysis\n \n # TODO: issue #13: check field dimensions against \"rank\" : len(shape) == len(NXDL/dims) \n shape = dataset.shape\n if shape != (1,): # ignore scalars\n __ = None # used as a NOP breakpoint after previous definition\n\n def get_hdf5_attribute(self, obj, attribute, default=None, report=False):\n '''\n HDF5 attribute strings might be coded in several ways\n \n :param obj obj: instance of h5py.File, h5py.Group, or h5py.Dataset\n :param str attribute: name of requested attribute\n :param obj default: value if attribute not found (usually str)\n :param bool report: check & report if value is an ndarray of variable length string\n '''\n a = obj.attrs.get(attribute, default)\n if isinstance(a, numpy.ndarray):\n if len(a) > 0:\n if isinstance(a[0], (bytes, numpy.bytes_)):\n a = [str(v.decode()) for v in a]\n if report:\n gname = obj.name + '@' + attribute\n msg = 'variable length string'\n if len(a) > 1:\n msg += ' array'\n msg += ': ' + str(a)\n self.new_finding('attribute data type', gname, finding.NOTE, msg)\n if len(a) == 1:\n a = a[0]\n elif isinstance(a, (int, numpy.int16, numpy.int32, numpy.int64)):\n a = str(a)\n if sys.version_info.major == 3:\n if isinstance(a, bytes):\n a = str(a.decode())\n return a\n\n def reconstruct_classpath(self, h5_address, *args, **kwargs):\n '''\n build the classpath from the h5_address\n '''\n path = h5_address.lstrip('/').split('@')[0]\n if len(path) == 0:\n return\n \n # reconstruct the NeXus classpath\n cp = '' # classpath to be built\n hp = '' # HDF5 address to be built\n for item in path.split('/'):\n hp += '/' + item\n if hp in self.h5:\n try:\n item = self.h5[hp]\n except KeyError as _exc:\n cp += '/missing_external_file_link'\n continue\n if h5structure.isHdf5Dataset(item):\n cp += '/field'\n else:\n obj = self.h5[hp]\n nx_class = self.get_hdf5_attribute(obj, 'NX_class', '-')\n cp += '/' + str(nx_class)\n if '@' in h5_address:\n cp += '@' + h5_address.split('@')[-1]\n \n return cp\n\n def new_finding(self, test_name, h5_address, status, comment):\n '''\n accumulate a list of findings\n \n :param str test_name: brief name of this test\n :param str h5_address: HDF5 address\n :param obj status: instance of finding.ValidationResultStatus,\n should be the same text as other instances of this test\n :param str comment: free-form explanation\n '''\n\n addr = str(h5_address)\n unique_key = addr + ':' + test_name\n if unique_key in self.__unique_findings__:\n # ensure that each test is only recorded once\n return\n f = finding.Finding(test_name, addr, status, comment)\n self.findings.append(f)\n self.__unique_findings__[unique_key] = f\n if addr not in self.addresses:\n # accumulate a dictionary of HDF5 object addresses\n self.addresses[addr] = finding.CheckupResults(addr)\n self.addresses[addr].classpath = self.reconstruct_classpath(addr)\n self.addresses[addr].findings.append(f)\n \n def report_findings(self, statuses=()):\n '''\n make a table of the validation findings\n \n :param statuses: List (or tuple) of finding statuses to be shown.\n Use the `finding.VALID_STATUS_LIST` (as shown below)\n or create your own list:\n\n :data:`finding.VALID_STATUS_LIST` ``(OK, NOTE, WARN, ERROR, TODO, UNUSED, COMMENT)``\n \n See :mod:`finding` for details.\n\n :returns str: table of results or `None` if no results match.\n '''\n import pyRestTable\n\n t = pyRestTable.Table()\n t.labels = 'address validation status comment(s)'.split()\n if isinstance(statuses, finding.ValidationResultStatus):\n statuses = [statuses,]\n for f in sorted(self.findings, key=self.findings_comparator):\n if f.status in statuses:\n t.rows.append((f.h5_address, f.test_name, f.status, f.comment))\n if len(t.rows) == 0:\n return 'None'\n return t.reST()\n \n def findings_comparator(self, finding):\n '''\n custom sorting key for all HDF5 addresses\n '''\n if finding.h5_address.find('@') >= 0:\n address, attribute = finding.h5_address.split('@')\n else:\n address = finding.h5_address\n attribute = None\n try:\n if attribute is not None:\n k = '!_4_'\n elif isinstance(self.h5[address], h5py.Dataset):\n k = '!_3_'\n elif isinstance(self.h5[address], h5py.Group):\n k = '!_1_'\n elif isinstance(self.h5[address], h5py.File):\n k = '!_0_'\n else:\n k = '!_5_'\n except KeyError as exc:\n k = '!_6_'\n key = address + k\n if attribute is not None:\n key += '@' + attribute\n key += '!__status_' + finding.status.key\n key += '!__title_' + finding.test_name\n return key\n\n def report_findings_summary(self):\n '''\n make a summary table of the validation findings (count how many of each status)\n '''\n import pyRestTable\n\n # count each category\n summary = collections.OrderedDict()\n for k in finding.VALID_STATUS_LIST:\n summary[str(k.key)] = 0\n xref = {str(k): k for k in finding.VALID_STATUS_LIST}\n for f in self.findings:\n summary[str(f.status)] += 1\n\n t = pyRestTable.Table()\n t.labels = 'status count description'.split()\n for k, v in summary.items():\n t.rows.append((k, v, xref[k].description))\n t.rows.append(('--', '--', '--'))\n t.rows.append(('TOTAL', len(self.findings), '--'))\n return t.reST()\n \n def report_classpath(self):\n '''\n make a table of the known NeXus class paths\n '''\n import pyRestTable\n t = pyRestTable.Table()\n t.labels = 'HDF5-address NeXus-classpath'.split()\n for k, v in self.addresses.items():\n t.rows.append((k, v.classpath))\n return t.reST()\n \n def missing_file_link(self, text):\n '''\n Return file name if error message from KeyError due to missing external file link, else None\n \n Such as::\n \n Unable to open object (Unable to open file: name = 'data\\\\../nt15698-1/processing/waxs_mask.nxs', errno = 2, error message = 'no such file or directory', flags = 0, o_flags = 0)\n \n Returns::\n \n data\\\\../nt15698-1/processing/waxs_mask.nxs\n \n '''\n filename = None\n m1 = 'Unable to open object (Unable to open file: name = '\n p1 = text.find(m1)\n if p1 >= 0:\n p2 = text.find(', errno =')\n filename = text[p1 + len(m1) : p2].strip(\"'\")\n return filename\n\nif __name__ == '__main__':\n print(\"Start this module using: python main.py validate ...\")\n exit(0)\n" ]
[ [ "numpy.ndarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
557mp/pk_story
[ "adcee7cfcc3b5d95601565066a6e8e7587974059" ]
[ "3_conditional_seqgan/sequence_gan_load_test.py" ]
[ "import numpy as np\nimport tensorflow as tf\nimport random\nfrom dataloader import Gen_Data_loader, Dis_dataloader\nfrom generator import Generator\nfrom discriminator_ import Discriminator\nfrom rollout import ROLLOUT\nimport pickle\n\n#########################################################################################\n# Generator Hyper-parameters\n######################################################################################\nEMB_DIM = 200 # embedding dimension (pretrained: 200, pk: 30)\nHIDDEN_DIM = 300 # hidden state dimension of lstm cell\nSEQ_LENGTH = 30 # sequence length\nSTART_TOKEN = 0\nPRE_EPOCH_NUM = 120 # supervise (maximum likelihood estimation) epochs\nSEED = 88\nBATCH_SIZE = 64\nTYPE_SIZE = 18 # conditional type size\n\n#########################################################################################\n# Discriminator Hyper-parameters\n#########################################################################################\ndis_embedding_dim = EMB_DIM\ndis_filter_sizes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 30]\ndis_num_filters = [100, 200, 200, 200, 200, 100, 100, 100, 100, 100, 160, 160]\ndis_dropout_keep_prob = 0.75\ndis_l2_reg_lambda = 0.2\n\n#########################################################################################\n# Basic Training Parameters\n#########################################################################################\nTOTAL_BATCH = 200\ngenerated_num = 1000\nsample_num = 10\n\n# original seqgan parameter\n# HIDDEN_DIM = 32\n# PRE_EPOCH_NUM = 120\n# TOTAL_BATCH = 200\n# generated_num = 10000\n# dis_filter_sizes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20]\n# dis_num_filters = [100, 200, 200, 200, 200, 100, 100, 100, 100, 100, 160, 160]\n\n# original seqgan parameter\n# HIDDEN_DIM = 32\n# PRE_EPOCH_NUM = 120\n# TOTAL_BATCH = 200\n# generated_num = 10000\n\npositive_file = './data/3_pk_type_data_index.txt'\nnegative_file = 'save/negative_sample.txt'\neval_file = 'save/eval_file.txt'\n# \"pretrain\" or \"poke\"\nembed_flag = \"pretrain\"\n\na = open('./data/3_pk_type_data_index.pkl', 'rb')\nreal_data = pickle.load(a)\n\na = open('./data/pk_pos2idx.pkl', 'rb')\nvocab_to_int = pickle.load(a)\n\na = open('./data/pk_idx2pos.pkl', 'rb')\nint_to_vocab = pickle.load(a)\n\na = open('./data/pk_type2idx.pkl', 'rb')\ntype2idx = pickle.load(a)\n\na = open('./data/pk_idx2type.pkl', 'rb')\nidx2type = pickle.load(a)\n\nif embed_flag == \"pretrain\":\n a = open('./data/pretrain_embedding_vec.pkl', 'rb')\nelif embed_flag == \"poke\":\n a = open('./data/pk_embedding_vec.pkl', 'rb')\nword_embedding_matrix = pickle.load(a)\nword_embedding_matrix = word_embedding_matrix.astype(np.float32)\n\n# a = open('./data/word_dict.pickle', 'rb')\n# word_dict = pickle.load(a)\n\nreal_data_vocab = [[int_to_vocab[i] for i in sample if int_to_vocab[i] != 'UNK']\n for type_story in real_data.values() for sample in type_story]\nreal_data_vocab = [' '.join(sample) for sample in real_data_vocab]\nprint(len(real_data_vocab))\n\n\ndef generate_samples(sess, trainable_model, batch_size, generated_num, output_file, word_embedding_matrix, type_idx):\n # Generate Samples\n generated_samples = []\n generating_types = []\n for _ in range(int(generated_num / batch_size)):\n sample = trainable_model.generate(sess, word_embedding_matrix, type_idx)\n generated_samples.extend(sample)\n generating_types.extend(type_idx)\n\n with open(output_file, 'w') as fout:\n for i in range(len(generated_samples)):\n buffer = str(generating_types[i])\n buffer2 = ' '.join([str(x) for x in generated_samples[i]]) + '\\n'\n fout.write(buffer + ' ' + buffer2)\n\n\ndef pre_train_epoch(sess, trainable_model, data_loader, word_embedding_matrix):\n # Pre-train the generator using MLE for one epoch\n supervised_g_losses = []\n data_loader.reset_pointer()\n\n for it in range(data_loader.num_batch): # 빨리 돌리려면 여기를 1로\n seq, type = data_loader.next_batch()\n _, g_loss = trainable_model.pretrain_step(sess, seq, word_embedding_matrix, type)\n supervised_g_losses.append(g_loss)\n\n return np.mean(supervised_g_losses)\n\n\ndef make_sample(eval_file, int_to_vocab, sample_num):\n samples = []\n types = []\n with open(eval_file, 'r') as f:\n for line in f:\n line = line.strip()\n line = line.split()\n parse_line = [int(x) for x in line]\n types.append(parse_line[0])\n samples.append(parse_line[1:])\n\n type_int = types[:sample_num]\n sample_int = samples[:sample_num]\n type_str = [idx2type[i] for i in type_int]\n sample_vocab = [[int_to_vocab[i] for i in sample] for sample in sample_int]\n sample_result = []\n for i in range(len(sample_vocab)):\n sample_result.append(type_str[i] + ' ' + ' '.join(sample_vocab[i]))\n return sample_result\n\n################################## main() #########################################\n\n# load model path (./chekckpoint)\nload_model_path = './checkpoint/seqGAN_ours'\n\ntf.reset_default_graph()\n\nrandom.seed(SEED)\nnp.random.seed(SEED)\n\ngen_data_loader = Gen_Data_loader(BATCH_SIZE, SEQ_LENGTH)\nvocab_size = len(vocab_to_int) # 6447\nprint(vocab_size)\ndis_data_loader = Dis_dataloader(BATCH_SIZE, SEQ_LENGTH)\n\ngenerator = Generator(vocab_size, BATCH_SIZE, EMB_DIM, HIDDEN_DIM, SEQ_LENGTH, START_TOKEN, TYPE_SIZE)\ndiscriminator = Discriminator(sequence_length=SEQ_LENGTH, batch_size=BATCH_SIZE, num_classes=2,\n word_embedding_matrix=word_embedding_matrix,\n embedding_size=dis_embedding_dim, filter_sizes=dis_filter_sizes,\n num_filters=dis_num_filters, type_size=TYPE_SIZE, l2_reg_lambda=dis_l2_reg_lambda)\nrollout = ROLLOUT(generator, 0.8, word_embedding_matrix)\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\n\nsess = tf.Session(config=config)\nsaver = tf.train.Saver()\nsess.run(tf.global_variables_initializer())\n\nprint('#########################################################################')\nprint('Restore Trained Seqgan parameters...')\nsaver.restore(sess, load_model_path)\nprint(\"Model restored.\")\n\n# Generate samples using Trained Model\nrandom_type = np.random.randint(0, TYPE_SIZE, BATCH_SIZE)\ngenerate_samples(sess, generator, BATCH_SIZE, generated_num, eval_file, word_embedding_matrix, random_type)\n\nsamples = make_sample(eval_file, int_to_vocab, generated_num)\nsamples = [[word for word in sample.split() if word != 'UNK'] for sample in samples]\nsamples = [' '.join(sample) for sample in samples]\n\nf = open('./save/eval_seqgan_vocab.txt', 'w')\nfor token in samples:\n token = token + '\\n'\n f.write(token)\nf.close()\n" ]
[ [ "numpy.random.seed", "tensorflow.ConfigProto", "tensorflow.global_variables_initializer", "tensorflow.reset_default_graph", "numpy.mean", "tensorflow.Session", "tensorflow.train.Saver", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
fusion-flap/flap_nstx_gpi
[ "cf7d4bdecea8fd7434f8f7eb64e1a7b13fc0f759" ]
[ "publications/plot_results_for_rsi_2021.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 6 14:35:54 2020\n\n@author: mlampert\n\"\"\"\nimport os\nimport copy\nimport pickle\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom matplotlib.gridspec import GridSpec\n\nfrom flap_nstx.analysis import calculate_nstx_gpi_frame_by_frame_velocity, flap_nstx_thomson_data\nfrom flap_nstx.analysis import calculate_radial_acceleration_diagram, plot_nstx_gpi_velocity_distribution\nfrom flap_nstx.analysis import nstx_gpi_generate_synthetic_data, test_spatial_displacement_estimation, show_nstx_gpi_video_frames\n\nimport flap\nimport flap_nstx\n\nthisdir = os.path.dirname(os.path.realpath(__file__))\nfn = os.path.join(thisdir,\"../flap_nstx.cfg\")\nflap.config.read(file_name=fn)\nflap_nstx.register()\nstyled=True\n\nif styled:\n plt.rc('font', family='serif', serif='Helvetica')\n labelsize=9.\n linewidth=0.5\n major_ticksize=2.\n plt.rc('text', usetex=False)\n plt.rcParams['pdf.fonttype'] = 42\n plt.rcParams['ps.fonttype'] = 42\n plt.rcParams['lines.linewidth'] = linewidth\n plt.rcParams['axes.linewidth'] = linewidth\n plt.rcParams['axes.labelsize'] = labelsize\n plt.rcParams['axes.titlesize'] = labelsize\n \n plt.rcParams['xtick.labelsize'] = labelsize\n plt.rcParams['xtick.major.size'] = major_ticksize\n plt.rcParams['xtick.major.width'] = linewidth\n plt.rcParams['xtick.minor.width'] = linewidth/2\n plt.rcParams['xtick.minor.size'] = major_ticksize/2\n \n plt.rcParams['ytick.labelsize'] = labelsize\n plt.rcParams['ytick.major.width'] = linewidth\n plt.rcParams['ytick.major.size'] = major_ticksize\n plt.rcParams['ytick.minor.width'] = linewidth/2\n plt.rcParams['ytick.minor.size'] = major_ticksize/2\n plt.rcParams['legend.fontsize'] = labelsize\nelse:\n import matplotlib.style as pltstyle\n pltstyle.use('default')\n \n \ndef plot_results_for_rsi_2021_paper(plot_figure=1, \n save_data_into_txt=False):\n \n \n wd=flap.config.get_all_section('Module NSTX_GPI')['Working directory']\n if plot_figure == 1 or plot_figure == 2:\n pickle_file=wd+'/processed_data/2021_rsi_fig12.pickle'\n try:\n d1,d2,d3,d4=pickle.load(open(pickle_file,'rb'))\n flap.add_data_object(d1, 'GPI_SLICED_FULL')\n flap.add_data_object(d2, 'GPI_GAS_CLOUD')\n flap.add_data_object(d3, 'GPI_SLICED_DENORM_CCF_VEL')\n flap.add_data_object(d4, 'GPI_CCF_F_BY_F')\n except:\n calculate_nstx_gpi_frame_by_frame_velocity(exp_id=141319, \n time_range=[0.552497-500e-6,0.552497+500e-6], \n plot=False,\n subtraction_order_for_velocity=4,\n skip_structure_calculation=False,\n correlation_threshold=0.,\n pdf=False, \n nlevel=51, \n nocalc=False, \n filter_level=3, \n normalize_for_size=True,\n normalize_for_velocity=True,\n threshold_coeff=1.,\n normalize_f_high=1e3, \n \n normalize='roundtrip', \n velocity_base='cog', \n return_results=False, \n plot_gas=True)\n \n pickle.dump((flap.get_data_object('GPI_SLICED_FULL'),\n flap.get_data_object('GPI_GAS_CLOUD'),\n flap.get_data_object('GPI_SLICED_DENORM_CCF_VEL'),\n flap.get_data_object('GPI_CCF_F_BY_F')), open(pickle_file, 'wb'))\n \n if plot_figure == 1:\n flap.get_data('NSTX_GPI',\n exp_id=141319,\n name='',\n object_name='GPI')\n flap.slice_data('GPI', slicing={'Time':flap.Intervals(0.552497-500e-6,0.552497+500e-6)}, output_name='GPI_SLICED_FULL')\n data_object_name='GPI_SLICED_DENORM_CCF_VEL'\n detrended=flap_nstx.analysis.detrend_multidim(data_object_name,\n exp_id=141319,\n order=1, \n coordinates=['Image x', 'Image y'], \n output_name='GPI_DETREND_VEL')\n \n d=copy.deepcopy(flap.get_data_object(data_object_name))\n \n d.data=d.data-detrended.data\n flap.add_data_object(d,'GPI_TREND')\n \n signals=[data_object_name,\n 'GPI_TREND',\n 'GPI_DETREND_VEL']\n \n pdf=PdfPages(wd+'/plots/2021_rsi/figure_1_trend_subtraction.pdf')\n temp_vec=[flap.slice_data(signals[i],slicing={'Sample':20876}).data for i in range(3)]\n z_range=[np.min(temp_vec),\n np.max(temp_vec)]\n gs=GridSpec(1,3)\n plt.figure() \n ax,fig=plt.subplots(figsize=(8.5/2.54,2))\n for index_grid_x in range(3):\n plt.subplot(gs[0,index_grid_x])\n visibility=[True,True]\n if index_grid_x != 0:\n visibility[1]=False\n\n flap.plot(signals[index_grid_x], \n plot_type='contour', \n slicing={'Sample':20876},\n axes=['Image x', 'Image y'],\n options={'Z range':z_range,\n 'Interpolation': 'Closest value',\n 'Clear':False,\n 'Equal axes':True,\n 'Axes visibility':visibility,\n 'Colorbar':True,\n },\n plot_options={'levels':51},\n )\n if save_data_into_txt:\n data=flap.get_data_object(signals[index_grid_x]).slice_data(slicing={'Sample':20876}).data\n filename=wd+'/data_accessibility/2021_rsi/figure_1_1_'+signals[index_grid_x]+'.txt'\n file1=open(filename, 'w+')\n for i in range(len(data[0,:])):\n string=''\n for j in range(len(data[:,0])):\n string+=str(data[j,i])+'\\t'\n string+='\\n'\n file1.write(string)\n \n # for index_grid_x in range(3):\n # plt.subplot(gs[1,index_grid_x])\n # visibility=[True,True]\n # flap.plot(signals[index_grid_x], \n # plot_type='xy', \n # slicing={'Time':0.3249560, 'Image y':40},\n # axes=['Image x'],\n # options={'Interpolation': 'Closest value',\n # 'Clear':False,\n # 'Axes visibility':visibility,\n # }\n # )\n # if index_grid_x == 0:\n # print(np.sum(flap.slice_data(signals[index_grid_x], slicing={'Time':0.3249560}).data))\n \n # if save_data_into_txt:\n # data=flap.get_data_object(signals[index_grid_x]).slice_data(slicing={'Time':0.3249560}).data\n # filename=wd+'/data_accessibility/2021_rsi/figure_1_2_'+signals[index_grid_x]+'.txt'\n # file1=open(filename, 'w+')\n # for i in range(len(data[0,:])):\n # string=''\n # for j in range(len(data[:,0])):\n # string+=str(data[j,i])+'\\t'\n # string+='\\n'\n # file1.write(string)\n pdf.savefig()\n pdf.close()\n \n if plot_figure == 2:\n pdf=PdfPages(wd+'/plots/2021_rsi/figure_frame_by_frame.pdf')\n gs=GridSpec(1,3)\n plt.figure() \n ax,fig=plt.subplots(figsize=(8.5/2.54,2))\n plt.subplot(gs[0])\n flap.plot('GPI_SLICED_FULL', \n plot_type='contour', \n slicing={'Sample':20876}, \n axes=['Image x', 'Image y'],\n options={'Z range':[0,4096],\n 'Interpolation': 'Closest value',\n 'Clear':False,\n 'Equal axes':True,\n 'Axes visibility':[True,True],\n 'Colorbar':True,\n },\n plot_options={'levels':51},\n )\n plt.title(\"552.497ms\")\n if save_data_into_txt:\n data=flap.get_data_object('GPI_SLICED_FULL').slice_data(slicing={'Sample':20876}).data\n filename=wd+'/data_accessibility/2021_rsi/figure_2a.txt'\n file1=open(filename, 'w+')\n for i in range(len(data[0,:])):\n string=''\n for j in range(len(data[:,0])):\n string+=str(data[j,i])+'\\t'\n string+='\\n'\n file1.write(string)\n file1.close()\n plt.subplot(gs[1])\n flap.plot('GPI_SLICED_FULL', \n plot_type='contour', \n slicing={'Sample':20877}, \n axes=['Image x', 'Image y'],\n options={'Z range':[0,4096],\n 'Interpolation': 'Closest value',\n 'Clear':False,\n 'Equal axes':True,\n 'Axes visibility':[True,False],\n 'Colorbar':True,\n },\n plot_options={'levels':51},\n )\n plt.title(\"552.500ms\")\n if save_data_into_txt:\n data=flap.get_data_object('GPI_SLICED_FULL').slice_data(slicing={'Sample':20877}).data\n filename=wd+'/data_accessibility/2021_rsi/figure_2b.txt'\n file1=open(filename, 'w+')\n for i in range(len(data[0,:])):\n string=''\n for j in range(len(data[:,0])):\n string+=str(data[j,i])+'\\t'\n string+='\\n'\n file1.write(string)\n file1.close()\n \n plt.subplot(gs[2])\n flap.plot('GPI_CCF_F_BY_F', \n plot_type='contour', \n slicing={'Sample':20877,\n 'Image x':flap.Intervals(-32,32), #, 'Image x':flap.Intervals(-10,10),'Image y':flap.Intervals(-10,10)}, \n 'Image y':flap.Intervals(-40,40),},\n axes=['Image x', 'Image y'],\n options={\n #'Z range':[0,2048],\n 'Interpolation': 'Closest value',\n 'Clear':False,\n 'Equal axes':True,\n 'Axes visibility':[True,True],\n #'Colormap':colormap,\n 'Colorbar':True,\n #'Overplot options':oplot_options,\n },\n plot_options={'levels':51},\n )\n plt.title(\"CCF\")\n \n if save_data_into_txt:\n data=flap.get_data_object('GPI_CCF_F_BY_F').slice_data(slicing={'Sample':20877}).data\n filename=wd+'/data_accessibility/2021_rsi/figure_2c.txt'\n file1=open(filename, 'w+')\n for i in range(len(data[0,:])):\n string=''\n for j in range(len(data[:,0])):\n string+=str(data[j,i])+'\\t'\n string+='\\n'\n file1.write(string)\n file1.close()\n \n \n pdf.savefig()\n pdf.close()\n \n if plot_figure == 3:\n time_range=[0.34725,0.34775]\n #calculate_nstx_gpi_avg_frame_velocity(exp_id=141319,\n # time_range=[0.552,0.553], \n calculate_nstx_gpi_frame_by_frame_velocity(exp_id=139901,\n time_range=time_range, \n normalize='roundtrip', \n normalize_for_size=True, \n skip_structure_calculation=True, \n plot=False, \n pdf=False,\n nocalc=True,\n plot_scatter=False,\n plot_for_publication=True,\n correlation_threshold=0.0,\n return_results=True,\n subtraction_order_for_velocity=4)\n corr_thres=np.arange(11)/10\n for i_corr in range(11):\n results=calculate_nstx_gpi_frame_by_frame_velocity(exp_id=139901,\n time_range=time_range, \n normalize='roundtrip', \n normalize_for_size=True, \n skip_structure_calculation=False, \n plot=False, \n pdf=False,\n nocalc=True,\n plot_scatter=False,\n plot_for_publication=True,\n correlation_threshold=corr_thres[i_corr],\n return_results=True,\n subtraction_order_for_velocity=4)\n print(results['Velocity ccf'][:,0].shape)\n time=results['Time']\n if i_corr==0:\n pol_vel=np.zeros([len(results['Velocity ccf'][:,0]),11])\n rad_vel=np.zeros([len(results['Velocity ccf'][:,0]),11])\n pol_vel[:,i_corr]=results['Velocity ccf'][:,0]\n rad_vel[:,i_corr]=results['Velocity ccf'][:,1]\n \n pdf=PdfPages(wd+'/plots/2021_rsi/figure_vel_vs_corr_thres.pdf')\n \n styled=True\n if styled:\n plt.rc('font', family='serif', serif='Helvetica')\n labelsize=9.\n linewidth=0.5\n major_ticksize=2.\n plt.rc('text', usetex=False)\n plt.rcParams['pdf.fonttype'] = 42\n plt.rcParams['ps.fonttype'] = 42\n plt.rcParams['lines.linewidth'] = linewidth\n plt.rcParams['axes.linewidth'] = linewidth\n plt.rcParams['axes.labelsize'] = labelsize\n plt.rcParams['axes.titlesize'] = labelsize\n \n plt.rcParams['xtick.labelsize'] = labelsize\n plt.rcParams['xtick.major.size'] = major_ticksize\n plt.rcParams['xtick.major.width'] = linewidth\n plt.rcParams['xtick.minor.width'] = linewidth/2\n plt.rcParams['xtick.minor.size'] = major_ticksize/2\n \n plt.rcParams['ytick.labelsize'] = labelsize\n plt.rcParams['ytick.major.width'] = linewidth\n plt.rcParams['ytick.major.size'] = major_ticksize\n plt.rcParams['ytick.minor.width'] = linewidth/2\n plt.rcParams['ytick.minor.size'] = major_ticksize/2\n plt.rcParams['legend.fontsize'] = labelsize\n else:\n import matplotlib.style as pltstyle\n pltstyle.use('default')\n plt.figure() \n ax,fig=plt.subplots(figsize=(8.5/2.54,2))\n for i in range(0,7,1):\n plt.plot(time, pol_vel[:,(10-i)]/1e3-i*10, label=str((10-i)/10))\n \n plt.title('Poloidal velocity vs. \\n correlation threshold')\n plt.xlabel('Time [s]')\n plt.ylabel('vpol [m/s]')\n #plt.legend()\n \n pdf.savefig()\n pdf.close()\n \n if save_data_into_txt:\n filename=wd+'/data_accessibility/2021_rsi/figure_3.txt'\n file1=open(filename, 'w+')\n string='Time [s]'\n for i in range(1,7,1):\n string+='\\t rho='+str((10-i)/10)\n string+='\\n'\n file1.write(string)\n for j in range(len(time)):\n string=str(time[j])\n for i in range(1,7,1):\n string+='\\t'+str(pol_vel[j,(10-i)]/1e3-i*10)\n string+='\\n'\n file1.write(string)\n file1.close()\n \n \n if plot_figure == 4:\n test_spatial_displacement_estimation(plot_sample_gaussian=True, \n pdf=True,\n save_data_into_txt=save_data_into_txt)\n \n if save_data_into_txt:\n data=flap.get_data_object('gaussian').slice_data(slicing={'Sample':0}).data\n filename=wd+'/data_accessibility/2021_rsi/figure_4a.txt'\n file1=open(filename, 'w+')\n for i in range(len(data[0,:])):\n string=''\n for j in range(len(data[:,0])):\n string+=str(data[j,i])+'\\t'\n string+='\\n'\n file1.write(string)\n file1.close()\n \n data=flap.get_data_object('gaussian').slice_data(slicing={'Sample':1}).data\n filename=wd+'/data_accessibility/2021_rsi/figure_4b.txt'\n file1=open(filename, 'w+')\n for i in range(len(data[0,:])):\n string=''\n for j in range(len(data[:,0])):\n string+=str(data[j,i])+'\\t'\n string+='\\n'\n file1.write(string)\n file1.close() \n \n data=flap.get_data_object('GPI_FRAME_12_CCF').slice_data(slicing={'Sample':0}).data\n filename=wd+'/data_accessibility/2021_rsi/figure_4c.txt'\n file1=open(filename, 'w+')\n for i in range(len(data[0,:])):\n string=''\n for j in range(len(data[:,0])):\n string+=str(data[j,i])+'\\t'\n string+='\\n'\n file1.write(string)\n file1.close() \n \n if plot_figure == 5:\n \n test_spatial_displacement_estimation(gaussian_frame_vs_structure_size=True, \n gaussian_frame_size=True, \n gaussian=True, \n interpolation='parabola', \n pdf=True, \n nocalc=True, \n frame_size_range=[8,200], \n frame_size_step=8,\n save_data_into_txt=save_data_into_txt)\n if plot_figure == 6:\n test_spatial_displacement_estimation(plot_sample_random=True, \n pdf=True,\n save_data_into_txt=save_data_into_txt)\n \n if save_data_into_txt:\n data=flap.get_data_object('random').slice_data(slicing={'Sample':0}).data\n filename=wd+'/data_accessibility/2021_rsi/figure_6a.txt'\n file1=open(filename, 'w+')\n for i in range(len(data[0,:])):\n string=''\n for j in range(len(data[:,0])):\n string+=str(data[j,i])+'\\t'\n string+='\\n'\n file1.write(string)\n file1.close()\n \n data=flap.get_data_object('random').slice_data(slicing={'Sample':1}).data\n filename=wd+'/data_accessibility/2021_rsi/figure_6b.txt'\n file1=open(filename, 'w+')\n for i in range(len(data[0,:])):\n string=''\n for j in range(len(data[:,0])):\n string+=str(data[j,i])+'\\t'\n string+='\\n'\n file1.write(string)\n file1.close() \n \n data=flap.get_data_object('GPI_FRAME_12_CCF').slice_data(slicing={'Sample':0}).data\n filename=wd+'/data_accessibility/2021_rsi/figure_6c.txt'\n file1=open(filename, 'w+')\n for i in range(len(data[0,:])):\n string=''\n for j in range(len(data[:,0])):\n string+=str(data[j,i])+'\\t'\n string+='\\n'\n file1.write(string)\n file1.close() \n \n \n if plot_figure == 7:\n test_spatial_displacement_estimation(random=True, \n pdf=True, \n nocalc=False,\n save_data_into_txt=save_data_into_txt)\n \n if plot_figure == 8:\n plt.figure()\n ax,fig=plt.subplots(figsize=(3.35*2,5.5))\n pdf=PdfPages(wd+'/plots/2021_rsi/figure_141319_0.552497_9_frame.pdf')\n \n show_nstx_gpi_video_frames(exp_id=141319, \n start_time=0.552497-5*2.5e-6,\n n_frame=9,\n logz=False,\n z_range=[0,3900],\n plot_filtered=False, \n normalize=False,\n cache_data=False, \n plot_flux=False, \n plot_separatrix=True, \n flux_coordinates=False,\n device_coordinates=True,\n new_plot=False,\n save_pdf=True,\n colormap='gist_ncar',\n save_for_paraview=False,\n colorbar_visibility=True,\n save_data_for_publication=save_data_into_txt\n )\n pdf.savefig()\n pdf.close()\n \n if plot_figure == 9:\n results=calculate_nstx_gpi_frame_by_frame_velocity(exp_id=141319,\n time_range=[0.552,0.553], \n normalize='roundtrip', \n normalize_for_size=True, \n normalize_for_velocity=False,\n skip_structure_calculation=False, \n plot=True, \n pdf=True,\n nocalc=False,\n plot_scatter=False,\n plot_for_publication=True,\n remove_interlaced_structures=True,\n return_results=True,\n subtraction_order_for_velocity=4)\n\n if save_data_into_txt:\n time=results['Time']\n filename=wd+'/data_accessibility/2021_rsi/figure_9.txt'\n file1=open(filename, 'w+')\n string='Time [s] \\t v_rad_ccf \\t v_rad_str \\t v_pol_ccf \\t v_pol_str \\n'\n file1.write(string)\n for i in range(len(time)):\n string=str(time[i])+'\\t'+\\\n str(results['Velocity ccf'][i,0])+'\\t'+\\\n str(results['Velocity str max'][i,0])+'\\t'+\\\n str(results['Velocity ccf'][i,1])+'\\t'+\\\n str(results['Velocity str max'][i,1])+'\\n'\n file1.write(string)\n file1.close()" ]
[ [ "matplotlib.backends.backend_pdf.PdfPages", "matplotlib.pyplot.title", "matplotlib.style.use", "numpy.min", "numpy.arange", "matplotlib.pyplot.rc", "matplotlib.pyplot.subplots", "numpy.max", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.subplot", "matplotlib.gridspec.GridSpec", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vkuznet/CMSMonitoring
[ "af85d41846a19f68ed2fa1a1761a536fcbd16eb7" ]
[ "src/python/CMSMonitoring/eos_path_size.py" ]
[ "# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author: Ceyhun Uzunoglu <ceyhunuzngl AT gmail [DOT] com>\n# Create html table for EOS paths' size as a result of xrdcp command\n#\n# acronjob:\n# - $HOME/CMSMonitoring/scripts/eos_path_size.sh\n# How it works:\n# - Gets only paths from XRDCP command\n# - All used values are results of EOS command\n# - This script can use direct results of `eos` command or a file that includes the result of `eos` command\n# Parameters:\n# - --output_file: Sets the output html\n# - --input_eos_file: If provided, the file that contains EOS command results will be used.\n# This file is updated every 10 minutes by VOCMS team and they are managing the cron job\n# If not provided, make sure that you are a quota admin to run `eos` command\n\n\nimport json\nimport os\nimport sys\nfrom datetime import datetime\n\nimport click\nimport numpy as np\nimport pandas as pd\nfrom schema import Schema, Use, SchemaError\n\npd.options.display.float_format = \"{:,.2f}\".format\npd.set_option(\"display.max_colwidth\", -1)\n\nEXCLUDED_PATHS = [\"/eos/cms/store/cmst3\", \"/eos/recovered\", \"/eos/totem\"]\nRECYCLE = \"/eos/cms/proc/recycle/\"\nTB_DENOMINATOR = 10 ** 12\n\n\ndef get_paths_from_xrdcp(json_data):\n \"\"\"Get paths from XRDCP command and filter our cmst3 paths \"\"\"\n data = json.load(json_data)\n paths = [elem[\"path\"][0] for elem in data['storageservice']['storageshares']]\n # Filter out cmst3/*\n paths = [path for path in paths if all((exc not in path) for exc in EXCLUDED_PATHS)]\n return paths\n\n\n# EOS operation\ndef get_validated_eos_results(eos_lines_list):\n \"\"\"Validate, convert types and filter\n\n Example line: \"quota=node uid=akhukhun space=/eos/cms/store/ usedbytes=0 ...\"\n Filter: \"gid=ALL\" or \"gid=project\" filter is applied\n \"\"\"\n schema = Schema(\n [\n {\n 'quota': str,\n 'gid': Use(str),\n 'space': Use(str),\n 'usedbytes': Use(float),\n 'usedlogicalbytes': Use(float),\n 'usedfiles': Use(float),\n 'maxbytes': Use(float),\n 'maxlogicalbytes': Use(float),\n 'maxfiles': Use(float),\n 'percentageusedbytes': Use(float),\n 'statusbytes': str,\n 'statusfiles': str,\n # No 'uid'\n }\n ]\n )\n # Get only rows that contain \"gid\" and it's value should be \"ALL\" or \"project\"\n dict_array = list(\n map(\n lambda line: dict(tuple(s.split(\"=\")) for s in line.strip().split(' ')),\n [row for row in eos_lines_list if ((\"gid=ALL\" in row) or (\"gid=project\" in row))]\n )\n )\n try:\n # Validate and convert types\n return schema.validate(dict_array)\n except SchemaError as e:\n print(tstamp(), \"Data is not valid:\", str(e))\n sys.exit(1)\n\n\ndef get_eos(file_path=None):\n \"\"\"Get EOS output raw results from either eos command or from a file that contain those results\"\"\"\n if file_path:\n print(tstamp(), \"EOS file path is provided, reading from file\")\n # Read eos result from file\n with open(file_path) as file:\n return get_validated_eos_results(file.readlines())\n else:\n print(tstamp(), \"EOS file path is NOT provided\")\n print(tstamp(), \"Running eos quota ls command\")\n # For VOC team: run eos command and get output\n try:\n # export EOSHOME=\"\" is needed to avoid warning messages\n return get_validated_eos_results(\n str(os.system('export EOSHOME=\"\" && eos -r 103074 1399 quota ls -m')).split(\"\\n\"))\n except OSError as e:\n print(tstamp(), 'ERROR: Cannot get the eos quota ls output from EOS:', str(e))\n sys.exit(1)\n\n\ndef create_eos_df(file_path):\n \"\"\"Create dataframe from EOS output lines\"\"\"\n df = pd.DataFrame(get_eos(file_path)).rename(columns={'space': 'path'})\n # Re-order and drop unwanted columns: 'usedfiles', 'statusbytes', 'statusfiles', 'quota', 'gid'\n return df[['path', 'usedlogicalbytes', 'maxlogicalbytes', 'usedbytes', 'maxbytes', 'percentageusedbytes']]\n\n\ndef tstamp():\n \"\"\"Return timestamp for logging\"\"\"\n return datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')\n\n\ndef get_update_time(input_eos_file):\n \"\"\"Create update time depending on reading EOS results from file or directly from command\"\"\"\n if input_eos_file:\n # Set update time to eos file modification time if file input used\n try:\n return datetime.utcfromtimestamp(os.path.getmtime(input_eos_file)).strftime('%Y-%m-%dT%H:%M:%SZ')\n except OSError as e:\n print(tstamp(), \"ERROR: coul not get last modification time of file:\", str(e))\n else:\n return datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\n\ndef get_html_template(base_html_directory=None):\n \"\"\" Reads partial html file and return it as strings\n \"\"\"\n if base_html_directory is None:\n base_html_directory = os.getcwd()\n with open(os.path.join(base_html_directory, \"main.html\")) as f:\n main_html = f.read()\n return main_html\n\n\ndef create_html(df, update_time, total_row, base_html_directory):\n \"\"\"Create html page with given dataframe\n \"\"\"\n # Get main html\n main_html = get_html_template(base_html_directory=base_html_directory)\n main_html = main_html.replace(\"__UPDATE_TIME__\", update_time)\n main_html = main_html.replace(\"__EXCLUDED_PATHS__\", \"*,\".join(EXCLUDED_PATHS) + \"*\")\n # Total row placeholder\n total_on_header = \"\"\"\n <tr >\n <th>{Path}</th>\n <th>{logical used(TB)}</th>\n <th>{logical quota(TB)}</th>\n <th>{raw used(TB)}</th>\n <th>{raw quota(TB)}</th>\n <th>{used/quota}</th>\n <th>{logical/raw (quotas)}</th>\n </tr>\n </thead>\n \"\"\".format(**total_row)\n main_column = df[\"Path\"].copy()\n df[\"Path\"] = (\n '<a class=\"Path\">'\n + main_column\n + '</a><br>'\n )\n # Pandas df to html\n html = df.to_html(escape=False, index=False)\n # Add total ro to header. Be careful there should be only one <thead>...</thead>!\n html = html.replace(\" </thead>\", total_on_header)\n # cleanup of the default dump\n html = html.replace(\n 'table border=\"1\" class=\"dataframe\"',\n 'table id=\"dataframe\" class=\"display compact\" style=\"width:100%;\"',\n )\n html = html.replace('style=\"text-align: right;\"', \"\")\n\n # Add pandas dataframe html to main body\n main_html = main_html.replace(\"____MAIN_BLOCK____\", html)\n return main_html\n\n\[email protected]()\[email protected](\"--output_file\", default=None, required=True, help=\"For example: /eos/.../www/test/test.html\")\[email protected](\"--input_eos_file\", default=None, required=False,\n help=\"Result of 'eos -r 103074 1399 quota ls -m', i.e.: /eos/cms/store/accounting/eos_quota_ls.txt\")\[email protected](\"--static_html_dir\", default=None, required=True,\n help=\"Html directory for main html template. For example: ~/CMSMonitoring/src/html/eos_path_size\")\ndef main(output_file=None, input_eos_file=None, static_html_dir=None):\n \"\"\"\n Main function combines xrdcp and EOS results then creates HTML page\n \"\"\"\n # Get xrdcp command output as input to python script.\n xrdcp_paths = get_paths_from_xrdcp(sys.stdin)\n\n # Get EOS values as pandas dataframe either from file or directly from EOS command\n df = create_eos_df(input_eos_file)\n\n # Filter dataframe to only include paths from xrdcp\n df = df[df['path'].isin(xrdcp_paths)]\n\n # RECYCLE: divide these columns to 2\n cols_to_divide = [\"usedlogicalbytes\", \"maxlogicalbytes\", \"usedbytes\", \"maxbytes\"]\n\n # Resetting index sets index number to 0, so get nested value and divide to 2\n recycle_dict = {k: v[0] / 2 for k, v in\n df.loc[df['path'] == RECYCLE, cols_to_divide].reset_index(drop=True).to_dict().items()}\n df.loc[df['path'] == RECYCLE, cols_to_divide] = [recycle_dict[x] for x in cols_to_divide] # Guarantees the order\n\n # Calculate totals, after exclusions!\n total = df[[\"usedlogicalbytes\", \"maxlogicalbytes\", \"usedbytes\", \"maxbytes\"]].sum()\n total_row = {\n 'Path': 'total',\n 'logical used(TB)': \"{:,.2f}\".format(total[\"usedlogicalbytes\"] / TB_DENOMINATOR),\n 'logical quota(TB)': \"{:,.2f}\".format(total[\"maxlogicalbytes\"] / TB_DENOMINATOR),\n 'raw used(TB)': \"{:,.2f}\".format(total[\"usedbytes\"] / TB_DENOMINATOR),\n 'raw quota(TB)': \"{:,.2f}\".format(total[\"maxbytes\"] / TB_DENOMINATOR),\n 'used/quota': \"{:,.2f}%\".format((total[\"usedlogicalbytes\"] / total[\"maxlogicalbytes\"]) * 100),\n 'logical/raw (quotas)': \"{:,.1f}%\".format((total[\"maxlogicalbytes\"] / total[\"maxbytes\"]) * 100),\n }\n\n # Clear inf and nan, also arrange percentage\n df[\"logical/raw quotas\"] = (df[\"maxlogicalbytes\"] / df[\"maxbytes\"]) * 100\n df[\"logical/raw quotas\"] = df[\"logical/raw quotas\"].apply(\n lambda x: \"-\" if np.isnan(x) or np.isinf(x) else \"{:,.1f}%\".format(x))\n # Arrange percentage of used/quota\n df[\"percentageusedbytes\"] = df[\"percentageusedbytes\"].apply(lambda x: \"{:,.2f}%\".format(x))\n\n # Convert to TB\n df[\"usedlogicalbytes\"] = df[\"usedlogicalbytes\"] / TB_DENOMINATOR\n df[\"maxlogicalbytes\"] = df[\"maxlogicalbytes\"] / TB_DENOMINATOR\n df[\"usedbytes\"] = df[\"usedbytes\"] / TB_DENOMINATOR\n df[\"maxbytes\"] = df[\"maxbytes\"] / TB_DENOMINATOR\n\n # Rename columns\n df = df.rename(columns={\n \"path\": \"Path\",\n \"usedlogicalbytes\": \"logical used(TB)\",\n \"maxlogicalbytes\": \"logical quota(TB)\",\n \"usedbytes\": \"raw used(TB)\",\n \"maxbytes\": \"raw quota(TB)\",\n \"percentageusedbytes\": \"used/quota\",\n \"logical/raw quotas\": \"logical/raw (quotas)\",\n })\n # Reorder\n df = df[[\"Path\", \"logical used(TB)\", \"logical quota(TB)\", \"raw used(TB)\", \"raw quota(TB)\", \"used/quota\",\n \"logical/raw (quotas)\"]]\n update_time = get_update_time(input_eos_file)\n html = create_html(df=df,\n update_time=update_time,\n total_row=total_row,\n base_html_directory=static_html_dir)\n\n with open(output_file, \"w\") as f:\n f.write(html)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.isnan", "pandas.set_option", "numpy.isinf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
HyeonwooNoh/tensorflow_hw
[ "5828e285209ff8c3d1bef2e4bd7c55ca611080d5", "b794611c9b90763ffbe2cb01f91b3a9e33c9b892" ]
[ "tensorflow/contrib/tpu/python/tpu/tpu_feed.py", "tensorflow/contrib/cudnn_rnn/python/kernel_tests/cudnn_rnn_test.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ===================================================================\n\n\"\"\"Helper library for handling infeed between hosts and TPUs.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.contrib.tpu.python.ops import tpu_ops\nfrom tensorflow.contrib.tpu.python.tpu import tpu_sharding\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\n\n\nclass InfeedQueue(object):\n \"\"\"A helper object to build a device infeed queue.\n\n The InfeedQueue builds the host-side and device-side Ops to enqueue and\n dequeue elements, respectively, and ensures that their types and\n shapes match.\n \"\"\"\n\n def __init__(self,\n number_of_tuple_elements=None,\n tuple_types=None,\n tuple_shapes=None,\n shard_dimensions=None,\n name=None):\n \"\"\"Creates a new InfeedQueue with the given configuration.\n\n The configuration need not be fully specified at creation since it\n can be modified subsequently by methods that set the values\n explicitly or infer them from the shapes of inputs.\n\n Args:\n number_of_tuple_elements: the number of Tensors fed atomically through the\n queue, must be present unless it can be inferred from other arguments.\n tuple_types: if not None, a list of types of the elements of the queue.\n tuple_shapes: if not None, a list of shapes of the elements of the queue.\n shard_dimensions: if not None, a list of dimensions on which the\n elements of the queue should be sharded during automatic\n parallelization.\n name: the name of the queue.\n\n Raises:\n ValueError: if number_of_tuple_elements <= 0; or\n number_of_tuple_arguments, tuple_types, tuple_shapes, and\n shard_dimensions are all None; or the length of tuple_types,\n tuple_shapes, or shard_dimensions is not equal to\n number_of_tuple_elements; or any element of shard_dimensions\n can't be converted to a Dimension.\n TypeError: if any element of tuple_types or tuple_shapes can't\n be converted to a dtype or TensorShape, respectively.\n \"\"\"\n self._frozen = False\n self._generated_enqueue_ops = False\n self._generated_dequeue_op = False\n self._name = \"InfeedQueue\" if name is None else name\n if number_of_tuple_elements is None:\n if tuple_types is not None:\n number_of_tuple_elements = len(tuple_types)\n elif tuple_shapes is not None:\n number_of_tuple_elements = len(tuple_shapes)\n elif shard_dimensions is not None:\n number_of_tuple_elements = len(shard_dimensions)\n else:\n raise ValueError(\n \"number of tuple elements cannot be inferred from InfeedQueue \"\n \"constructor\"\n )\n if number_of_tuple_elements <= 0:\n raise ValueError(\"number_of_tuple_elements %d must be > 0\" %\n number_of_tuple_elements)\n # Make an empty sharding policy for each tuple element.\n self._sharding_policies = [\n tpu_sharding.ShardingPolicy()\n for _ in xrange(number_of_tuple_elements)\n ]\n if tuple_types is not None:\n self.set_tuple_types(tuple_types)\n else:\n self._tuple_types = None\n if tuple_shapes is not None:\n self.set_tuple_shapes(tuple_shapes)\n else:\n self._tuple_shapes = None\n if shard_dimensions is not None:\n self.set_shard_dimensions(shard_dimensions)\n self._validate()\n\n def _validate(self):\n \"\"\"Checks that the configuration is self-consistent.\n\n Raises:\n ValueError: if the shapes and sharding policies don't match.\n \"\"\"\n if self.tuple_shapes is not None:\n for (policy, shape) in zip(self._sharding_policies, self._tuple_shapes):\n # Raise an error if the policy is incompatible with the shape.\n _ = policy.get_sharded_shape(shape)\n\n @property\n def number_of_tuple_elements(self):\n \"\"\"Returns the number of InfeedQueue tuple elements.\"\"\"\n return len(self._sharding_policies)\n\n @property\n def tuple_types(self):\n \"\"\"Returns the types of the InfeedQueue tuple elements.\"\"\"\n return self._tuple_types\n\n def set_tuple_types(self, tuple_types):\n \"\"\"Sets the type of each element of the queue.\n\n tuple_types must be a list of length\n self.number_of_tuple_elements, and each element must be\n convertible to a dtype.\n\n Args:\n tuple_types: the types of each queue element.\n\n Raises:\n ValueError: if tuple_types is not of length\n self.number_of_tuple_elements.\n TypeError: if an element of tuple_types cannot be converted to a\n dtype.\n \"\"\"\n if len(tuple_types) != self.number_of_tuple_elements:\n raise ValueError(\"tuple_types is %s, but must be a list of length %d\" %\n (str(tuple_types), self.number_of_tuple_elements))\n if self._frozen:\n for (frozen, updated) in zip(self._tuple_types, tuple_types):\n if frozen != updated:\n raise ValueError(\n \"Trying to update InfeedQueue with frozen configuration with an \"\n \"incompatible type. Frozen types are %s, updated types are %s\" % (\n str(self._tuple_types), str(tuple_types)))\n else:\n try:\n self._tuple_types = [dtypes.as_dtype(t) for t in tuple_types]\n except (TypeError) as e:\n raise TypeError(\n \"tuple_types is %s, but must be a list of elements each \"\n \"convertible to dtype: got error %s\" % (str(tuple_types), str(e)))\n\n @property\n def tuple_shapes(self):\n \"\"\"Returns the shapes of the InfeedQueue tuple elements.\"\"\"\n return self._tuple_shapes\n\n def set_tuple_shapes(self, tuple_shapes):\n \"\"\"Sets the shape of each element of the queue.\n\n tuple_shapes must be a list of length\n self.number_of_tuple_elements, and each element must be\n convertible to a TensorShape.\n\n Args:\n tuple_shapes: the shapes of each queue element.\n\n Raises:\n ValueError: if tuple_shapes is not of length\n self.number_of_tuple_elements.\n TypeError: if an element of tuple_shapes cannot be converted to\n a TensorShape.\n \"\"\"\n if len(tuple_shapes) != self.number_of_tuple_elements:\n raise ValueError(\"tuple_shapes is %s, but must be a list of length %d\" %\n (str(tuple_shapes), self.number_of_tuple_elements))\n try:\n tuple_shapes = [tensor_shape.as_shape(shape) for shape in tuple_shapes]\n except (ValueError, TypeError) as e:\n raise TypeError(\n \"tuple_shapes is %s, but must be a list of elements each \"\n \"convertible to TensorShape: got error %s\" % (str(tuple_shapes),\n str(e)))\n if self._frozen:\n for (frozen, updated) in zip(self._tuple_shapes, tuple_shapes):\n if frozen != updated:\n raise ValueError(\n \"Trying to update InfeedQueue with frozen configuration with an \"\n \"incompatible shape. Frozen shapes are %s, updated shapes are %s\"\n % (str(self._tuple_shapes), str(tuple_shapes)))\n else:\n self._tuple_shapes = tuple_shapes\n self._validate()\n\n @property\n def sharding_policies(self):\n \"\"\"Returns the sharding policies of the InfeedQueue tuple elements.\"\"\"\n return self._sharding_policies\n\n @property\n def shard_dimensions(self):\n \"\"\"Gets the shard dimension of each tuple element.\n\n Returns:\n A list of length number_of_tuple_elements, where each list entry\n is the shard dimension of that tuple element or None if the\n shard dimension has not been set.\n \"\"\"\n # The number of shards is always the same for all the policies.\n return [policy.shard_dimension for policy in self._sharding_policies]\n\n def set_shard_dimensions(self, shard_dimensions):\n \"\"\"Sets the shard_dimension of each element of the queue.\n\n shard_dimensions must be a list of length\n self.number_of_tuple_elements, and each element must be\n convertible to a Dimension compatible with self.tuple_shapes.\n\n Args:\n shard_dimensions: the dimensions of each queue element.\n\n Raises:\n ValueError: if shard_dimensions is not of length\n self.number_of_tuple_elements; or an element of\n shard_dimensions cannot be converted to a Dimension; or an\n element of shard_dimensions is a Dimension that is out of\n range for the corresponding tuple element shape.\n \"\"\"\n if len(shard_dimensions) != self.number_of_tuple_elements:\n raise ValueError(\"shard_dimensions is %s, but must be a list of length %d\"\n % (str(shard_dimensions),\n self.number_of_tuple_elements))\n for (policy, dimension) in zip(self._sharding_policies, shard_dimensions):\n policy.set_shard_dimension(dimension)\n self._validate()\n\n @property\n def number_of_shards(self):\n \"\"\"Gets the number of shards to use for the InfeedQueue.\n\n Returns:\n Number of shards or None if the number of shards has not been set.\n \"\"\"\n # The number of shards is always the same for all the policies.\n return self._sharding_policies[0].number_of_shards\n\n def set_number_of_shards(self, number_of_shards):\n \"\"\"Sets the number of shards to use for the InfeedQueue.\n\n Args:\n number_of_shards: number of ways to shard the InfeedQueue.\n\n Raises:\n ValueError: if number_of_shards is not > 0; or the policies have\n been frozen and number_of_shards was already set to something\n else.\n \"\"\"\n for policy in self._sharding_policies:\n policy.set_number_of_shards(number_of_shards)\n self._validate()\n\n def set_configuration_from_input_tensors(self, input_tensors):\n \"\"\"Sets the shapes and types of the queue tuple elements.\n\n input_tensors is a list of Tensors whose types and shapes are used\n to set the queue configuration.\n\n Args:\n input_tensors: list of Tensors of the same types and shapes as\n the desired queue Tuple.\n\n Raises:\n ValueError: if input_tensors is not a list of length\n self.number_of_tuple_elements\n \"\"\"\n if len(input_tensors) != self.number_of_tuple_elements:\n raise ValueError(\n \"input_tensors is %s, but should be a list of %d Tensors\", (\n str(input_tensors), self.number_of_tuple_elements))\n self.set_tuple_shapes([t.shape for t in input_tensors])\n self.set_tuple_types([t.dtype for t in input_tensors])\n\n def set_configuration_from_sharded_input_tensors(self, input_tensors):\n \"\"\"Sets the shapes and types of the queue tuple elements.\n\n input_tensors is a list of lists of Tensors whose types and shapes are used\n to set the queue configuration. The length of the outer list is the number\n of shards required, and each inner list is the tuple of Tensors to use to\n determine the types and shapes of the corresponding shard. This method\n depends on the shard dimension, and calling it freezes the shard policy.\n\n Args:\n input_tensors: list of lists of Tensors. The outer list length corresponds\n to the desired number of shards, and each inner list is the size\n and shape of the desired configuration of the corresponding shard.\n\n Raises:\n ValueError: if any inner list is not a list of length\n self.number_of_tuple_elements; or the inner lists do not combine to\n form a consistent unsharded shape.\n TypeError: if the types of the Tensors in the inner lists do not match.\n \"\"\"\n if not self._frozen:\n # Unset the tuple shapes in case the configuration becomes\n # transiently inconsistent.\n self._tuple_shapes = None\n number_of_shards = len(input_tensors)\n self.set_number_of_shards(number_of_shards)\n for t in input_tensors:\n if len(t) != self.number_of_tuple_elements:\n raise ValueError(\n \"input_tensors is %s but must be a list of lists, where each inner\"\n \" list has length number_of_tuple_elements=%d\" % (\n str(input_tensors), self.number_of_tuple_elements))\n # Transpose the inputs to make a list of shard shapes for each tuple\n # element.\n sharded_shapes = [[t[i].shape for t in input_tensors]\n for i in xrange(self.number_of_tuple_elements)]\n # For each tuple, get the unsharded shape using that tuple's policy.\n unsharded_shapes = [\n policy.get_unsharded_shape(s)\n for (policy, s) in zip(self._sharding_policies, sharded_shapes)\n ]\n self.set_tuple_shapes(unsharded_shapes)\n for i in xrange(1, self.number_of_shards):\n for (t1, t2) in zip(input_tensors[0], input_tensors[i]):\n if t1.dtype != t2.dtype:\n raise TypeError(\n \"types of the tuple elements of input_tensors %s are not \"\n \"consistent\" % str(input_tensors))\n self.set_tuple_types([t.dtype for t in input_tensors[0]])\n\n def freeze(self):\n \"\"\"Freezes the InfeedQueue so it can no longer be modified.\n\n The configuration is implicitly frozen before any host-side or\n device-side Ops are generated. The configuration cannot be frozen\n until the types and shapes of the tuple elements have been set.\n\n Raises:\n ValueError: if the types or shapes of the tuple elements have not been\n set.\n \"\"\"\n self._frozen = True\n if self._tuple_types is None:\n raise ValueError(\n \"Can't freeze an InfeedQueue without setting all tuple types.\")\n if self._tuple_shapes is None:\n raise ValueError(\n \"Can't freeze an InfeedQueue without setting all tuple shapes.\")\n for shape in self._tuple_shapes:\n if shape.dims is None:\n raise ValueError(\n \"Can't freeze an InfeedQueue without setting all tuple shapes.\")\n for policy in self._sharding_policies:\n policy.freeze()\n self._validate()\n\n def generate_dequeue_op(self):\n \"\"\"Generates the device-side Op to dequeue a tuple from the queue.\n\n Implicitly freezes the queue configuration if it is not already\n frozen, which will raise errors if the shapes and types have not\n been fully specified.\n\n Returns:\n A list of Outputs corresponding to a shard of infeed dequeued\n into XLA, suitable for use within a replicated block.\n\n Raises:\n ValueError: if the types or shapes of the tuple elements have not been\n set; or if a dequeue op has already been generated.\n \"\"\"\n self.freeze()\n if self._generated_dequeue_op:\n raise ValueError(\"Can't generate two dequeue Ops from the same queue\")\n self._generated_dequeue_op = True\n full_name = \"%s/dequeue\" % self._name\n sharded_shapes = [\n policy.get_sharded_shape(shape)\n for (shape, policy) in zip(self._tuple_shapes, self._sharding_policies)\n ]\n return tpu_ops.infeed_dequeue_tuple(\n dtypes=self._tuple_types, shapes=sharded_shapes, name=full_name)\n\n def _generate_enqueue_op(self,\n inputs,\n name_prefix,\n index,\n device=None,\n tpu_ordinal=-1):\n \"\"\"Generate a host-side Op to enqueue a tuple to the queue.\n\n If device is None the inputs are all required to have the same\n device specification, and the enqueue Op is colocated with\n inputs[0]. Otherwise the enqueue Op is placed on 'device'.\n\n Args:\n inputs: a list of Tensors with the types and shapes of the tuple elements.\n name_prefix: the base name for the Op.\n index: the shard index, used to uniquify the Op name.\n device: device to place the Op on, or None if it should be\n colocated with the inputs.\n tpu_ordinal: ordinal of the TPU device on the host to use for\n infeed if device is a CPU device. Should be set to -1 if device\n is a TPU device.\n\n Returns:\n An Op corresponding to a shard of infeed enqueued at the host,\n suitable for use within a replicated block.\n\n Raises:\n ValueError: if device is None and inputs do not all have the\n same device specification.\n \"\"\"\n full_name = \"%s/%d\" % (name_prefix, index)\n shapes = [t.shape for t in inputs]\n if device is None:\n devices = [t.device for t in inputs]\n for i in xrange(1, self.number_of_tuple_elements):\n if devices[0] != devices[i]:\n raise ValueError(\n \"input devices for shard %d are %s, but should all be the same\",\n index, str(devices))\n with ops.colocate_with(inputs[0]):\n return tpu_ops.infeed_enqueue_tuple(\n inputs=inputs,\n shapes=shapes,\n name=full_name,\n device_ordinal=tpu_ordinal)\n else:\n with ops.device(device):\n return tpu_ops.infeed_enqueue_tuple(\n inputs=inputs,\n shapes=shapes,\n name=full_name,\n device_ordinal=tpu_ordinal)\n\n def generate_enqueue_ops(self, sharded_inputs, tpu_ordinal_function=None):\n \"\"\"Generates the host-side Ops to enqueue the shards of a tuple.\n\n sharded_inputs is a list, one for each shard, of lists of\n Tensors. sharded_inputs[0] is the tuple of Tensors to use to feed\n shard 0 if the queue. Returns the host-side Ops that must be run to\n enqueue the sharded tuple. The Op for shard i is colocated with the inputs\n for shard i.\n\n Implicitly freezes the queue configuration if it is not already\n frozen. If the configuration has already been frozen, and is not\n compatible with the types and shapes of sharded_inputs, an error\n will be raised.\n\n Args:\n sharded_inputs: a list of lists of Tensors. The length of the outer list\n determines the number of shards. Each inner list indicates the types\n and shapes of the tuples in the corresponding shard.\n tpu_ordinal_function: if not None, a function that takes the\n shard index as input and returns the ordinal of the TPU device\n the shard's infeed should be placed on. tpu_ordinal_function must be\n set if the inputs are placed on CPU devices.\n\n Returns:\n A list of host-side Ops, one for each shard, that when executed together\n will enqueue a full-size element of infeed.\n\n Raises:\n ValueError: if the queue configuration has previously been frozen and the\n shapes of the elements of sharded_inputs are not compatible with the\n frozen configuration; or if the shapes of the elements of sharded_inputs\n don't form a consistent unsharded tuple; or if the elements of a tuple\n have different device constraints.\n TypeError: if the queue configuration has previously been frozen and the\n types of the elements of sharded_inputs are not compatible with the\n frozen configuration; or if the types of the elements of sharded_inputs\n don't form a consistent unsharded tuple.\n \"\"\"\n self.set_configuration_from_sharded_input_tensors(sharded_inputs)\n self.freeze()\n if self._generated_enqueue_ops:\n raise ValueError(\"Can't generate two enqueue Ops from the same queue\")\n self._generated_enqueue_ops = True\n if tpu_ordinal_function is None:\n tpu_ordinal_function = lambda index: -1\n name_prefix = \"%s/enqueue\" % self._name\n return [\n self._generate_enqueue_op(shard, name_prefix, index,\n tpu_ordinal=tpu_ordinal_function(index))\n for (shard, index) in zip(sharded_inputs, xrange(self.number_of_shards))\n ]\n\n # TODO(misard) Generalize this to the case of systems that don't\n # have 8 devices per host, and figure out what to do with\n # model-parallelism.\n def _default_placement_function(self, index):\n return \"/task:%d/device:CPU:0\" % (index / 8)\n\n def _default_ordinal_function(self, index):\n return index % 8\n\n # TODO(b/36470756) remove this from tutorials once we have a better story\n # for automatic placement of input pipelines.\n def split_inputs_and_generate_enqueue_ops(self,\n inputs,\n global_tpu_id=None,\n placement_function=None,\n tpu_ordinal_function=None):\n \"\"\"POORLY-PERFORMING ON MULTI-HOST SYSTEMS.\n\n Generates the host-side Ops to enqueue a tuple.\n\n This method performs poorly because it takes an entire input on a single\n host, splits it, and distributes it to all of the cores. It is present only\n to simplify tutorial examples.\n\n inputs is a list of Tensors to use to feed the queue. Each input is split\n into self.number_of_shards shards. Returns an Op for each shard to enqueue\n the shard. The Op for shard i is placed on device placement_function(i).\n\n Implicitly freezes the queue configuration if it is not already\n frozen. If the configuration has already been frozen, and is not\n compatible with the types and shapes of inputs, an error\n will be raised.\n\n Args:\n inputs: a list of Tensors which indicates the types and shapes of the\n queue tuple.\n global_tpu_id: if not None, a Numpy 2D array indicating the global\n id of each TPU device in the system. The outer dimension of the\n array is host task id, and the inner dimension is device ordinal,\n so e.g., global_tpu_id[x][y] indicates the global id of device\n /task:x/device:TPU_NODE:y. If global_tpu_id is not None, but\n placement_function and ordinal_function are None, then global_tpu_id\n will be used to place infeed on the TPUs with the first k global ids,\n where k is the number of shards in the queue.\n placement_function: if not None, a function that takes the shard\n index as input and returns a device string indicating which\n device the shard's infeed should be placed on. If placement_function\n and tpu_ordinal_function are None, inputs are sharded round-robin\n across the devices in the system.\n tpu_ordinal_function: if not None, a function that takes the\n shard index as input and returns the ordinal of the TPU device\n the shard's infeed should be placed on. If placement_function\n and tpu_ordinal_function are None, inputs are sharded round-robin\n across the devices in the system.\n\n Returns:\n A list of host-side Ops, one for each shard, that when executed together\n will enqueue a full-size element of infeed.\n\n Raises:\n ValueError: if the queue configuration has previously been frozen and the\n shapes of the elements of inputs are not compatible with the frozen\n configuration.\n TypeError: if the queue configuration has previously been frozen and the\n types of the elements of inputs are not compatible with the frozen\n configuration.\n \"\"\"\n if global_tpu_id is None:\n if placement_function is None:\n placement_function = self._default_placement_function\n if tpu_ordinal_function is None:\n tpu_ordinal_function = self._default_ordinal_function\n else:\n global_id_map = {}\n for host, devices in enumerate(global_tpu_id):\n for ordinal, global_id in enumerate(devices):\n global_id_map[global_id] = (host, ordinal)\n\n def _placement_function_from_map(index):\n return \"/task:%d/device:CPU:0\" % global_id_map[index][0]\n\n def _ordinal_function_from_map(index):\n return global_id_map[index][1]\n\n if placement_function is None:\n placement_function = _placement_function_from_map\n if tpu_ordinal_function is None:\n tpu_ordinal_function = _ordinal_function_from_map\n self.set_configuration_from_input_tensors(inputs)\n self.freeze()\n if self._generated_enqueue_ops:\n raise ValueError(\"Can't generate two enqueue Ops from the same queue\")\n self._generated_enqueue_ops = True\n split_name_prefix = \"%s/split\" % self._name\n if self.number_of_shards == 1:\n transposed_sharded_inputs = [[inp] for inp in inputs]\n else:\n\n def split_fn(inp, num_shards, axis, name):\n with ops.colocate_with(inp):\n return array_ops.split(inp, num_shards, axis=axis, name=name)\n\n transposed_sharded_inputs = [\n split_fn(\n inp,\n self.number_of_shards,\n axis=policy.shard_dimension,\n name=\"%s/%d\" % (split_name_prefix, index))\n for (inp, policy, index) in zip(inputs, self._sharding_policies,\n xrange(self.number_of_tuple_elements))\n ]\n sharded_inputs = [[shard[i] for shard in transposed_sharded_inputs]\n for i in xrange(self.number_of_shards)]\n name_prefix = \"%s/enqueue\" % self._name\n return [\n self._generate_enqueue_op(\n shard,\n name_prefix,\n index,\n device=placement_function(index),\n tpu_ordinal=tpu_ordinal_function(index))\n for (shard, index) in zip(sharded_inputs, xrange(self.number_of_shards))\n ]\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Cudnn RNN models.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\nimport os\nimport unittest\n\nimport numpy as np\n\nfrom tensorflow.contrib.cudnn_rnn.python.layers import cudnn_rnn\nfrom tensorflow.contrib.cudnn_rnn.python.ops import cudnn_rnn_ops\nfrom tensorflow.contrib.rnn.python.ops import rnn as contrib_rnn_lib\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import random_seed\nfrom tensorflow.python.framework.test_util import TensorFlowTestCase\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gen_nn_ops\nfrom tensorflow.python.ops import gradients_impl as gradients\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import rnn as rnn_lib\nfrom tensorflow.python.ops import rnn_cell_impl\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variable_scope as vs\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.ops.losses import losses\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import gradient_descent\nfrom tensorflow.python.training import saver as saver_lib\n\nCUDNN_LSTM = cudnn_rnn_ops.CUDNN_LSTM\nCUDNN_GRU = cudnn_rnn_ops.CUDNN_GRU\nCUDNN_RNN_RELU = cudnn_rnn_ops.CUDNN_RNN_RELU\nCUDNN_RNN_TANH = cudnn_rnn_ops.CUDNN_RNN_TANH\nCUDNN_RNN_UNIDIRECTION = cudnn_rnn_ops.CUDNN_RNN_UNIDIRECTION\nCUDNN_RNN_BIDIRECTION = cudnn_rnn_ops.CUDNN_RNN_BIDIRECTION\n\nCUDNN_LSTM_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_LSTM_PARAMS_PER_LAYER\nCUDNN_GRU_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_GRU_PARAMS_PER_LAYER\nCUDNN_RNN_TANH_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_RNN_TANH_PARAMS_PER_LAYER\nCUDNN_RNN_RELU_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_RNN_RELU_PARAMS_PER_LAYER\n\n\nclass CudnnTestModel(object):\n \"\"\"Model with convenient APIs for easier building and running test graph.\n\n The graph built is used by all tests below to avoid repeatedly building\n similar test graphs.\n \"\"\"\n\n def __init__(self,\n rnn_mode,\n num_layers,\n num_units,\n input_size,\n direction=CUDNN_RNN_UNIDIRECTION,\n dropout=0.,\n dtype=dtypes.float32,\n training=False,\n kernel_initializer=None,\n bias_initializer=None):\n if dtype not in (dtypes.float32, dtypes.float64):\n raise ValueError(\"Invalid dtype: %s\" % dtype)\n self._dtype = dtype\n\n self._inputs = array_ops.placeholder(\n dtype=dtype, shape=[None, None, input_size], name=\"inputs\")\n h = array_ops.placeholder(\n dtype=dtype, shape=[None, None, num_units], name=\"h\")\n c = array_ops.placeholder(\n dtype=dtype, shape=[None, None, num_units], name=\"c\")\n if rnn_mode == CUDNN_LSTM:\n model_fn = cudnn_rnn.CudnnLSTM\n self._initial_state = (h, c)\n elif rnn_mode == CUDNN_GRU:\n model_fn = cudnn_rnn.CudnnGRU\n self._initial_state = (h,)\n elif rnn_mode == CUDNN_RNN_TANH:\n model_fn = cudnn_rnn.CudnnRNNTanh\n self._initial_state = (h,)\n elif rnn_mode == CUDNN_RNN_RELU:\n model_fn = cudnn_rnn.CudnnRNNRelu\n self._initial_state = (h,)\n else:\n raise ValueError(\"Invalid rnn_mode: %s\" % rnn_mode)\n self._rnn = model_fn(\n num_layers,\n num_units,\n direction=direction,\n dropout=dropout,\n dtype=dtype,\n kernel_initializer=kernel_initializer,\n bias_initializer=bias_initializer)\n self._rnn.build([None, None, input_size])\n\n self._outputs, self._output_state = self._rnn(\n self._inputs, initial_state=self._initial_state, training=training)\n\n def _AddUp(self, outputs, output_state):\n total = math_ops.reduce_sum(outputs)\n for s in output_state:\n total += math_ops.reduce_sum(s)\n return total\n\n @property\n def inputs(self):\n return self._inputs\n\n @property\n def initial_state(self):\n return self._initial_state\n\n @property\n def outputs(self):\n return self._outputs\n\n @property\n def output_state(self):\n return self._output_state\n\n @property\n def rnn(self):\n return self._rnn\n\n @property\n def total_sum(self):\n return self._AddUp(self.outputs, self.output_state)\n\n def SynthesizeInput(self, seq_length, batch_size, seed=1234):\n \"\"\"Synthesizes input and initial state values for testing.\"\"\"\n np.random.seed(seed)\n num_layers = self._rnn.num_layers\n dir_count = self._rnn.num_dirs\n num_units = self._rnn.num_units\n input_size = self._rnn.input_size\n\n np_dtype = np.float32 if self._dtype == dtypes.float32 else np.float64\n inputs = np.random.randn(seq_length, batch_size,\n input_size).astype(np_dtype)\n input_h = np.random.randn(num_layers * dir_count, batch_size,\n num_units).astype(np_dtype)\n if self._rnn.rnn_mode == CUDNN_LSTM:\n input_c = np.random.randn(num_layers * dir_count, batch_size,\n num_units).astype(np_dtype)\n initial_state = (input_h, input_c)\n else:\n initial_state = (input_h,)\n return inputs, initial_state\n\n def ZeroState(self, batch_size):\n num_layers = self._rnn.num_layers\n dir_count = self._rnn.num_dirs\n num_units = self._rnn.num_units\n\n np_dtype = np.float32 if self._dtype == dtypes.float32 else np.float64\n input_h = np.zeros((num_layers * dir_count, batch_size,\n num_units)).astype(np_dtype)\n if self._rnn.rnn_mode == CUDNN_LSTM:\n input_c = np.zeros((num_layers * dir_count, batch_size,\n num_units)).astype(np_dtype)\n initial_state = (input_h, input_c)\n else:\n initial_state = (input_h,)\n return initial_state\n\n def FProp(self, inputs_t, initial_state_t, training):\n \"\"\"Builds additional subgraph with given inputs and state.\n\n Args:\n inputs_t: a tensor.\n initial_state_t: a tensor.\n training: boolean, true if training mode.\n Returns:\n A tensor of the forward pass output of the model.\n \"\"\"\n outputs, output_state = self._rnn(\n inputs_t, initial_state=initial_state_t, training=training)\n return self._AddUp(outputs, output_state)\n\n def Feed(self, sess, inputs, initial_state=None, return_sum=True):\n \"\"\"Runs graph with given inputs and initial state.\"\"\"\n batch_size = inputs.shape[1]\n if initial_state is None:\n initial_state = self.ZeroState(batch_size)\n if return_sum:\n return sess.run(\n self.total_sum,\n feed_dict={self.inputs: inputs,\n self.initial_state: initial_state})\n else:\n return sess.run(\n [self.outputs, self.output_state],\n feed_dict={self.inputs: inputs,\n self.initial_state: initial_state})\n\n\ndef _CreateCudnnCompatibleCanonicalRNN(rnn, inputs, is_bidi=False, scope=None):\n mode = rnn.rnn_mode\n num_units = rnn.num_units\n num_layers = rnn.num_layers\n\n # To reuse cuDNN-trained models, must use cudnn compatible rnn cells.\n if mode == CUDNN_LSTM:\n single_cell = lambda: cudnn_rnn_ops.CudnnCompatibleLSTMCell(num_units)\n elif mode == CUDNN_GRU:\n single_cell = lambda: cudnn_rnn_ops.CudnnCompatibleGRUCell(num_units)\n elif mode == CUDNN_RNN_TANH:\n single_cell = (lambda: rnn_cell_impl.BasicRNNCell(num_units, math_ops.tanh))\n elif mode == CUDNN_RNN_RELU:\n single_cell = (\n lambda: rnn_cell_impl.BasicRNNCell(num_units, gen_nn_ops.relu))\n else:\n raise ValueError(\"%s is not supported!\" % mode)\n\n if not is_bidi:\n cell = rnn_cell_impl.MultiRNNCell(\n [single_cell() for _ in range(num_layers)])\n return rnn_lib.dynamic_rnn(\n cell, inputs, dtype=dtypes.float32, time_major=True, scope=scope)\n else:\n cells_fw = [single_cell() for _ in range(num_layers)]\n cells_bw = [single_cell() for _ in range(num_layers)]\n\n (outputs, output_state_fw,\n output_state_bw) = contrib_rnn_lib.stack_bidirectional_dynamic_rnn(\n cells_fw,\n cells_bw,\n inputs,\n dtype=dtypes.float32,\n time_major=True,\n scope=scope)\n return outputs, (output_state_fw, output_state_bw)\n\n\nclass CudnnRNNTestBasic(TensorFlowTestCase):\n\n @unittest.skipUnless(test.is_built_with_cuda(),\n \"Test only applicable when running on GPUs\")\n def testLayerBasic(self):\n num_layers = 4\n num_units = 2\n batch_size = 8\n direction = CUDNN_RNN_UNIDIRECTION\n dir_count = 1\n\n with vs.variable_scope(\"main\"):\n kernel_initializer = init_ops.constant_initializer(0.)\n bias_initializer = init_ops.constant_initializer(0.)\n inputs = random_ops.random_uniform([\n num_layers * dir_count, batch_size, num_units], dtype=dtypes.float32)\n\n lstm = cudnn_rnn.CudnnLSTM(num_layers, num_units,\n direction=direction,\n kernel_initializer=kernel_initializer,\n bias_initializer=bias_initializer,\n name=\"awesome_lstm\")\n\n # Build the layer\n outputs1, _ = lstm(inputs)\n # Reuse the layer\n outputs2, _ = lstm(inputs)\n\n total_sum1 = math_ops.reduce_sum(outputs1)\n total_sum2 = math_ops.reduce_sum(outputs2)\n\n with vs.variable_scope(\"main\", reuse=True):\n lstm = cudnn_rnn.CudnnLSTM(num_layers, num_units,\n direction=direction,\n kernel_initializer=kernel_initializer,\n bias_initializer=bias_initializer,\n name=\"awesome_lstm\")\n\n # Reuse the layer\n outputs3, _ = lstm(inputs)\n total_sum3 = math_ops.reduce_sum(outputs3)\n\n self.assertEqual(1, len(variables.trainable_variables()))\n self.assertEqual(1, len(ops.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS)))\n self.assertEqual(\"main/awesome_lstm/opaque_kernel\",\n variables.trainable_variables()[0].op.name)\n\n with self.test_session(use_gpu=True) as sess:\n sess.run(variables.global_variables_initializer())\n (total_sum1_v, total_sum2_v, total_sum3_v) = sess.run(\n [total_sum1, total_sum2, total_sum3])\n self.assertEqual(0, total_sum1_v)\n self.assertEqual(0, total_sum2_v)\n self.assertEqual(0, total_sum3_v)\n\n\n# TODO(jamesqin): Transform to parameterized test after it is included in the\n# TF open source codebase.\nclass CudnnRNNTestSaveRestore(TensorFlowTestCase):\n\n def _CompareWeights(self, lhs, rhs):\n self.assertEqual(len(lhs), len(rhs))\n for lw, rw in zip(lhs, rhs):\n self.assertAllEqual(lw, rw)\n\n def _CompareBiases(self, lhs, rhs, rnn_mode, num_layers, direction):\n self.assertEqual(len(lhs), len(rhs))\n if rnn_mode == CUDNN_LSTM:\n num_params_per_layer = CUDNN_LSTM_PARAMS_PER_LAYER\n elif rnn_mode == CUDNN_GRU:\n num_params_per_layer = CUDNN_GRU_PARAMS_PER_LAYER\n elif rnn_mode == CUDNN_RNN_TANH:\n num_params_per_layer = CUDNN_RNN_TANH_PARAMS_PER_LAYER\n else:\n num_params_per_layer = CUDNN_RNN_RELU_PARAMS_PER_LAYER\n num_dirs = 1 if direction == CUDNN_RNN_UNIDIRECTION else 2\n num_params_per_layer *= num_dirs\n self.assertEqual(num_params_per_layer * num_layers, len(lhs))\n\n for i in range(num_layers):\n layer_lhs = lhs[i * num_params_per_layer: (i+1) * num_params_per_layer]\n layer_rhs = rhs[i * num_params_per_layer: (i+1) * num_params_per_layer]\n if direction == CUDNN_RNN_UNIDIRECTION:\n self._CompareSingleLayerBiases(layer_lhs, layer_rhs)\n else:\n size = len(layer_lhs)\n fw_lhs, bw_lhs = layer_lhs[:size//2], layer_lhs[size//2:]\n fw_rhs, bw_rhs = layer_rhs[:size//2], layer_rhs[size//2:]\n self._CompareSingleLayerBiases(fw_lhs, fw_rhs)\n self._CompareSingleLayerBiases(bw_lhs, bw_rhs)\n\n def _CompareSingleLayerBiases(self, lhs, rhs):\n self.assertEqual(len(lhs), len(rhs))\n\n lf_lhs, rt_lhs = lhs[:len(lhs)//2], lhs[len(lhs)//2:]\n lf_rhs, rt_rhs = rhs[:len(rhs)//2], rhs[len(rhs)//2:]\n self.assertEqual(len(lf_lhs), len(rt_lhs))\n self.assertEqual(len(lf_rhs), len(rt_rhs))\n\n sum_lhs, sum_rhs = [], []\n for lf, rt in zip(lf_lhs, rt_lhs):\n sum_lhs.append(lf + rt)\n for lf, rt in zip(lf_rhs, rt_rhs):\n sum_rhs.append(lf + rt)\n self.assertEqual(len(sum_lhs), len(sum_rhs))\n for lf, rt in zip(sum_lhs, sum_rhs):\n self.assertAllEqual(lf, rt)\n\n def _TestSaveRestoreVariable(self, rnn_mode, direction, dtype):\n input_size = 3\n num_layers = 2\n num_units = 7\n with ops.Graph().as_default() as g:\n random_seed.set_random_seed(1234)\n model = CudnnTestModel(\n rnn_mode,\n num_layers,\n num_units,\n input_size,\n direction=direction,\n dtype=dtype)\n rnn = model.rnn\n save_path = os.path.join(self.get_temp_dir(),\n \"save-restore-variable-test\")\n saver = saver_lib.Saver()\n weights, biases = model.rnn.saveable._OpaqueParamsToCanonical()\n opaque_params = rnn.trainable_variables[0]\n # CudnnTestModel() creates CudnnOpaqueParamsSaveable that helps saver save\n # Cudnn vars in canonical format.\n reset_op = state_ops.assign(\n opaque_params,\n array_ops.zeros(array_ops.shape(opaque_params), dtype=dtype))\n # Passing graph explicitly, otherwise an old sess would be reused.\n with self.test_session(use_gpu=True, graph=g) as sess:\n sess.run(variables.global_variables_initializer())\n val = saver.save(sess, save_path)\n self.assertEqual(save_path, val)\n weights_v, biases_v = sess.run([weights, biases])\n\n # Reset opaque param\n sess.run(reset_op)\n saver.restore(sess, save_path)\n weights_v_restored, biases_v_restored = sess.run([weights, biases])\n\n self._CompareWeights(weights_v, weights_v_restored)\n self._CompareBiases(biases_v, biases_v_restored, rnn_mode, num_layers,\n direction)\n\n def _TestSaveRestoreTwoVariables(self, rnn_mode, direction, dtype):\n input_size = 3\n num_layers = 2\n num_units = 7\n with ops.Graph().as_default() as g:\n random_seed.set_random_seed(1234)\n with vs.variable_scope(\"m1\"):\n model1 = CudnnTestModel(\n rnn_mode,\n num_layers,\n num_units,\n input_size,\n direction=direction,\n dtype=dtype)\n with vs.variable_scope(\"m2\"):\n model2 = CudnnTestModel(\n rnn_mode,\n num_layers,\n num_units,\n input_size,\n direction=direction,\n dtype=dtype)\n opaque_params = (model1.rnn.trainable_variables[0],\n model2.rnn.trainable_variables[0])\n weights1, biases1 = model1.rnn.saveable._OpaqueParamsToCanonical()\n weights2, biases2 = model2.rnn.saveable._OpaqueParamsToCanonical()\n reset_params = [\n state_ops.assign(params,\n array_ops.zeros_like(params, dtype=dtype))\n for params in opaque_params\n ]\n reset_op = control_flow_ops.group(*reset_params)\n save_path = os.path.join(self.get_temp_dir(),\n \"save-restore-variable-test2\")\n saver = saver_lib.Saver()\n # Passing graph explicitly, otherwise an old sess would be reused.\n with self.test_session(use_gpu=True, graph=g) as sess:\n sess.run(variables.global_variables_initializer())\n val = saver.save(sess, save_path)\n self.assertEqual(save_path, val)\n\n weights1_v, biases1_v = sess.run([weights1, biases1])\n weights2_v, biases2_v = sess.run([weights2, biases2])\n\n sess.run(reset_op)\n saver.restore(sess, save_path)\n weights1_v_restored, biases1_v_restored = sess.run([weights1, biases1])\n weights2_v_restored, biases2_v_restored = sess.run([weights2, biases2])\n\n self._CompareWeights(weights1_v, weights1_v_restored)\n self._CompareWeights(weights2_v, weights2_v_restored)\n self._CompareBiases(biases1_v, biases1_v_restored, rnn_mode, num_layers,\n direction)\n self._CompareBiases(biases2_v, biases2_v_restored, rnn_mode, num_layers,\n direction)\n\n def _TestSaveRestoreOutput(self, rnn_mode, direction, dtype):\n with ops.Graph().as_default() as g:\n num_layers = 2\n num_units = 7\n input_size = 7\n seq_length = 8\n batch_size = 4\n model = CudnnTestModel(\n rnn_mode,\n num_layers,\n num_units,\n input_size,\n direction=direction,\n dtype=dtype,\n training=False)\n rnn = model.rnn\n\n save_path = os.path.join(self.get_temp_dir(), \"save-restore-output-test\")\n saver = saver_lib.Saver()\n\n # Only one opaque var in a cudnn layer.\n assert len(rnn.trainable_variables) == 1\n reset_params = state_ops.assign(\n rnn.trainable_variables[0],\n array_ops.zeros(\n array_ops.shape(rnn.trainable_variables[0]), dtype=dtype))\n\n # Passing graph explicitly, otherwise an old sess would be reused.\n with self.test_session(use_gpu=True, graph=g) as sess:\n sess.run(variables.global_variables_initializer())\n inputs, initial_state = model.SynthesizeInput(seq_length, batch_size)\n total_sum_v = model.Feed(sess, inputs, initial_state)\n val = saver.save(sess, save_path)\n self.assertEqual(save_path, val)\n\n sess.run(reset_params)\n saver.restore(sess, save_path)\n total_sum_v_restored = model.Feed(sess, inputs, initial_state)\n self.assertAllClose(total_sum_v, total_sum_v_restored, atol=1e-5)\n\n def _TestSaveRestoreHelper(self, rnn_mode):\n directions = [CUDNN_RNN_UNIDIRECTION, CUDNN_RNN_BIDIRECTION]\n dtype_list = [dtypes.float32, dtypes.float64]\n for direction, dtype in itertools.product(directions, dtype_list):\n self._TestSaveRestoreVariable(rnn_mode, direction, dtype)\n self._TestSaveRestoreTwoVariables(rnn_mode, direction, dtype)\n self._TestSaveRestoreOutput(rnn_mode, direction, dtype)\n\n @unittest.skipUnless(test.is_built_with_cuda(),\n \"Test only applicable when running on GPUs\")\n def testSaveRestoreRepeatedlyCreateCustomSaveable(self):\n input_size = 3\n num_layers = 2\n num_units = 7\n with ops.Graph().as_default():\n random_seed.set_random_seed(1234)\n model = CudnnTestModel(\n CUDNN_LSTM,\n num_layers,\n num_units,\n input_size,\n direction=CUDNN_RNN_UNIDIRECTION,\n dtype=dtypes.float32)\n with self.assertRaisesRegexp(RuntimeError,\n \"Cudnn saveable already created\"):\n model.rnn._create_saveable()\n\n @unittest.skipUnless(test.is_built_with_cuda(),\n \"Test only applicable when running on GPUs\")\n def testSaveRestoreLSTM(self):\n self._TestSaveRestoreHelper(CUDNN_LSTM)\n\n @unittest.skipUnless(test.is_built_with_cuda(),\n \"Test only applicable when running on GPUs\")\n def testSaveRestoreGRU(self):\n self._TestSaveRestoreHelper(CUDNN_GRU)\n\n @unittest.skipUnless(test.is_built_with_cuda(),\n \"Test only applicable when running on GPUs\")\n def testSaveRestoreRNNTanh(self):\n self._TestSaveRestoreHelper(CUDNN_RNN_TANH)\n\n @unittest.skipUnless(test.is_built_with_cuda(),\n \"Test only applicable when running on GPUs\")\n def testSaveRestoreRNNRelu(self):\n self._TestSaveRestoreHelper(CUDNN_RNN_RELU)\n\n\n# TODO(jamesqin): Transform to parameterized test after it is included in the\n# TF open source codebase.\nclass CudnnRNNTestCompatibleRNNCells(TensorFlowTestCase):\n\n @unittest.skipUnless(test.is_built_with_cuda(),\n \"Test only applicable when running on GPUs\")\n def testCudnnCompatibleLSTM(self):\n self._TestCudnnCompatibleRnnCellsHelper(CUDNN_LSTM)\n\n @unittest.skipUnless(test.is_built_with_cuda(),\n \"Test only applicable when running on GPUs\")\n def testCudnnCompatibleGRU(self):\n self._TestCudnnCompatibleRnnCellsHelper(CUDNN_GRU)\n\n @unittest.skipUnless(test.is_built_with_cuda(),\n \"Test only applicable when running on GPUs\")\n def testCudnnCompatibleRNNTanh(self):\n self._TestCudnnCompatibleRnnCellsHelper(CUDNN_RNN_TANH)\n\n @unittest.skipUnless(test.is_built_with_cuda(),\n \"Test only applicable when running on GPUs\")\n def testCudnnCompatibleRNNRelu(self):\n self._TestCudnnCompatibleRnnCellsHelper(CUDNN_RNN_RELU)\n\n def _TestCudnnCompatibleRnnCellsHelper(self, rnn_mode):\n configs = [\n {\n \"num_layers\": 1,\n \"seq_length\": 3,\n \"num_units\": 4,\n \"input_size\": 5,\n \"batch_size\": 6,\n },\n {\n \"num_layers\": 2,\n \"seq_length\": 8,\n \"num_units\": 4,\n \"input_size\": 8,\n \"batch_size\": 16,\n },\n {\n \"num_layers\": 2,\n \"seq_length\": 3,\n \"num_units\": 4,\n \"input_size\": 5,\n \"batch_size\": 6,\n },\n {\n \"num_layers\": 1,\n \"seq_length\": 2,\n \"num_units\": 2,\n \"input_size\": 4,\n \"batch_size\": 1,\n },\n ]\n directions = [CUDNN_RNN_UNIDIRECTION, CUDNN_RNN_BIDIRECTION]\n for cfg, direction in zip(configs, directions):\n self._TestCudnnCompatibleRnnCells(cfg[\"num_layers\"], cfg[\"seq_length\"],\n cfg[\"num_units\"], cfg[\"input_size\"],\n cfg[\"batch_size\"], rnn_mode, direction)\n\n def _TestCudnnCompatibleRnnCells(self, num_layers, seq_length, num_units,\n input_size, batch_size, rnn_mode, direction):\n dtype = dtypes.float32\n # Train graph\n with ops.Graph().as_default() as g:\n model = CudnnTestModel(\n rnn_mode,\n num_layers,\n num_units,\n input_size,\n direction=direction,\n dtype=dtype,\n training=True)\n target_output = array_ops.placeholder(dtype=dtype)\n loss_op = losses.log_loss(\n labels=target_output, predictions=model.total_sum)\n optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1e-2)\n train_op = optimizer.minimize(loss_op)\n\n saver = saver_lib.Saver()\n\n # Train Cudnn model\n seed = 0\n with self.test_session(use_gpu=True, graph=g) as sess:\n sess.run(variables.global_variables_initializer())\n # Train 128 steps\n num_steps = 128\n for _ in range(num_steps):\n inputs, _ = model.SynthesizeInput(seq_length, batch_size, seed)\n targets = np.random.rand()\n sess.run(\n train_op,\n feed_dict={\n model.inputs: inputs,\n model.initial_state: model.ZeroState(batch_size),\n target_output: targets\n })\n seed += 1\n\n save_path = os.path.join(self.get_temp_dir(),\n (\"cudnn-rnn-%s-test\" % rnn_mode))\n save_v = saver.save(sess, save_path)\n self.assertEqual(save_path, save_v)\n\n # Cudnn inference graph\n with ops.Graph().as_default() as g:\n model = CudnnTestModel(\n rnn_mode,\n num_layers,\n num_units,\n input_size,\n direction=direction,\n dtype=dtype,\n training=False)\n rnn = model.rnn\n saver = saver_lib.Saver()\n\n inference_input = np.random.rand(seq_length, batch_size,\n input_size).astype(np.float32)\n with self.test_session(use_gpu=True, graph=g) as sess:\n sess.run(variables.global_variables_initializer())\n saver.restore(sess, save_path)\n\n # Cudnn inference\n cudnn_outputs_v, cudnn_output_states_v = model.Feed(\n sess, inference_input, return_sum=False)\n\n # Canonical RNN inference graph\n with ops.Graph().as_default() as g:\n cell_inputs = array_ops.placeholder(\n dtype, shape=[seq_length, batch_size, input_size])\n if direction == CUDNN_RNN_UNIDIRECTION:\n # outputs is one tensor, states are num_layer tuples, each 2 tensors\n (outputs, states) = _CreateCudnnCompatibleCanonicalRNN(rnn, cell_inputs)\n if rnn_mode == CUDNN_LSTM:\n output_h = array_ops.stack([s.h for s in states])\n output_c = array_ops.stack([s.c for s in states])\n else:\n output_state = array_ops.stack([s for s in states])\n else:\n # outputs is one tensor.\n # states is a tuple of 2 tuples:\n # each sub tuple is num_layer tuples, each with 2 tensors.\n (outputs, states) = _CreateCudnnCompatibleCanonicalRNN(\n rnn, cell_inputs, is_bidi=True)\n output_state_fw, output_state_bw = states\n if rnn_mode == CUDNN_LSTM:\n output_h, output_c = [], []\n for s_fw, s_bw in zip(output_state_fw, output_state_bw):\n output_h.append(array_ops.stack([s_fw.h, s_bw.h]))\n output_c.append(array_ops.stack([s_fw.c, s_bw.c]))\n output_h = array_ops.concat(output_h, axis=0)\n output_c = array_ops.concat(output_c, axis=0)\n else:\n output_state = []\n for s_fw, s_bw in zip(output_state_fw, output_state_bw):\n output_state.append(array_ops.stack([s_fw, s_bw]))\n output_state = array_ops.concat(output_state, axis=0)\n saver = saver_lib.Saver()\n\n with self.test_session(use_gpu=True, graph=g) as sess:\n saver.restore(sess, save_path)\n\n # BlockCell inference\n if rnn_mode == CUDNN_LSTM:\n outputs_v, output_h_v, output_c_v = sess.run(\n [outputs, output_h, output_c],\n feed_dict={cell_inputs: inference_input})\n self.assertAllClose(cudnn_outputs_v, outputs_v)\n cudnn_output_h_v, cudnn_output_c_v = cudnn_output_states_v\n self.assertAllClose(cudnn_output_h_v, output_h_v)\n self.assertAllClose(cudnn_output_c_v, output_c_v)\n else:\n outputs_v, output_state_v = sess.run(\n [outputs, output_state],\n feed_dict={cell_inputs: inference_input})\n self.assertAllClose(cudnn_outputs_v, outputs_v, atol=1e-5, rtol=1e-5)\n (cudnn_output_h_v,) = cudnn_output_states_v\n self.assertAllClose(cudnn_output_h_v, output_state_v, atol=1e-5,\n rtol=1e-5)\n\n\nclass CudnnRNNTestParamsSize(TensorFlowTestCase):\n\n def _TestOpaqueParamsSize(self, rnn_mode, num_layers, num_units, input_size,\n direction):\n logging.info(\"Testing one lstm param size with config: %s\", locals())\n dtype = dtypes.float32\n\n model = CudnnTestModel(\n rnn_mode,\n num_layers,\n num_units,\n input_size,\n dtype=dtype,\n direction=direction)\n rnn = model.rnn\n\n # Min param size estimate = sum(weights.size) + sum(biases.size)\n min_params_size = (\n np.sum(map(np.prod, rnn.canonical_weight_shapes)) +\n np.sum([sp[0] for sp in rnn.canonical_bias_shapes]))\n\n opaque_params = rnn.trainable_variables[0]\n with self.test_session(use_gpu=True, graph=ops.get_default_graph()):\n variables.global_variables_initializer().run()\n opaque_params_size_v = opaque_params.eval().size\n self.assertLessEqual(min_params_size, opaque_params_size_v)\n\n @unittest.skipUnless(test.is_built_with_cuda(),\n \"Test only applicable when running on GPUs\")\n def testOpaqueParamsSize(self):\n test_configs = [\n [4, 200, 200],\n [4, 200, 300],\n [4, 200, 100],\n [1, 100, 200],\n [2, 200, 100],\n [3, 200, 400],\n ]\n directions = [CUDNN_RNN_UNIDIRECTION, CUDNN_RNN_BIDIRECTION]\n rnns = [CUDNN_LSTM, CUDNN_GRU, CUDNN_RNN_RELU, CUDNN_RNN_TANH]\n for (rnn, config, direction) in itertools.product(rnns, test_configs,\n directions):\n num_layers, num_units, input_size = config\n with ops.Graph().as_default():\n self._TestOpaqueParamsSize(rnn, num_layers, num_units, input_size,\n direction)\n\n\nclass CudnnRNNTestTraining(TensorFlowTestCase):\n\n def _ComputeNumericGrad(self, sess, y, x, delta=1e-4, step=1):\n \"\"\"Compute the numeric gradient of y wrt to x.\n\n Args:\n sess: The TF session constructed with a graph containing x and y.\n y: A scalar TF Tensor in the graph constructed in sess.\n x: A TF Tensor in the graph constructed in sess.\n delta: Gradient checker's small perturbation of x[i].\n step: Only compute numerical gradients for a subset of x values.\n I.e. dy/dx[i] is computed if i % step == 0.\n Returns:\n A Tensor of the same shape and dtype as x. If x[i] is not chosen\n to compute the numerical gradient dy/x[i], the corresponding\n value is set to 0.\n \"\"\"\n\n x_data = sess.run(x)\n x_size = x_data.size\n x_shape = x_data.shape\n\n numeric_grad = np.zeros(x_size, dtype=x_data.dtype)\n\n for i in range(0, x_size, step):\n x_pos = x_data.copy()\n if x_size == 1:\n x_pos += delta\n else:\n x_pos.flat[i] += delta\n y_pos_feed_dict = dict([(x.name, x_pos)])\n y_pos = sess.run(y, feed_dict=y_pos_feed_dict)\n\n x_neg = x_data.copy()\n if x_size == 1:\n x_neg -= delta\n else:\n x_neg.flat[i] -= delta\n y_neg_feed_dict = dict([(x.name, x_neg)])\n y_neg = sess.run(y, feed_dict=y_neg_feed_dict)\n numeric_grad[i] = (y_pos - y_neg) / (2 * delta)\n return numeric_grad.reshape(x_shape)\n\n def _GradientCheck(self, sess, y, xs, tolerance=1e-6, delta=1e-4):\n sym_grads_t = gradients.gradients(y, xs)\n sym_grads = sess.run(sym_grads_t)\n\n num_grads = [self._ComputeNumericGrad(sess, y, x, delta) for x in xs]\n self.assertEqual(len(sym_grads), len(num_grads))\n for sym, num in zip(sym_grads, num_grads):\n self.assertFalse(np.any(np.isnan(sym)))\n self.assertFalse(np.any(np.isnan(num)))\n self.assertAllClose(sym, num, atol=tolerance, rtol=tolerance)\n\n def _TestOneSimpleTraining(self, rnn_mode, num_layers, num_units, input_size,\n batch_size, seq_length, dir_count, dropout, dtype,\n delta, tolerance):\n # Gradient checking runs two forward ops with almost the same input. Need to\n # make sure the drop patterns across the two runs are the same.\n logging.info(\"Training test with config: %s\", locals())\n old_env_state = os.environ.get(\"TF_CUDNN_RESET_RND_GEN_STATE\", str(False))\n os.environ[\"TF_CUDNN_RESET_RND_GEN_STATE\"] = str(True)\n random_seed.set_random_seed(5678)\n has_input_c = (rnn_mode == CUDNN_LSTM)\n direction = (CUDNN_RNN_UNIDIRECTION\n if dir_count == 1 else CUDNN_RNN_BIDIRECTION)\n model = CudnnTestModel(\n rnn_mode,\n num_layers,\n num_units,\n input_size,\n direction=direction,\n dropout=dropout,\n dtype=dtype,\n training=True,\n bias_initializer=init_ops.random_normal_initializer(\n mean=1., dtype=dtype))\n rnn = model.rnn\n params = rnn.trainable_variables[0]\n\n inputs = variables.Variable(\n random_ops.random_uniform(\n [seq_length, batch_size, input_size], dtype=dtype),\n dtype=dtype)\n input_h = variables.Variable(\n random_ops.random_uniform(\n [num_layers * dir_count, batch_size, num_units], dtype=dtype),\n dtype=dtype)\n if has_input_c:\n input_c = variables.Variable(\n random_ops.random_uniform(\n [num_layers * dir_count, batch_size, num_units], dtype=dtype),\n dtype=dtype)\n initial_state = (input_h, input_c)\n else:\n initial_state = (input_h,)\n total_sum = model.FProp(inputs, initial_state, training=True)\n\n with self.test_session(use_gpu=True, graph=ops.get_default_graph()) as sess:\n sess.run(variables.global_variables_initializer())\n all_inputs = [inputs, params]\n for s in initial_state:\n all_inputs.append(s)\n self._GradientCheck(\n sess, total_sum, all_inputs, tolerance=tolerance, delta=delta)\n os.environ[\"TF_CUDNN_RESET_RND_GEN_STATE\"] = old_env_state\n\n def _TestSimpleTrainingHelper(self, rnn_mode, test_configs):\n dropouts = [0., 0.5, 1.]\n for config, dropout in itertools.product(test_configs, dropouts):\n dtype = config.get(\"dtype\", dtypes.float32)\n delta = config.get(\"delta\", 1e-4)\n tolerance = config.get(\"tolerance\", 1e-6)\n dir_count = config.get(\"dir_count\", 1)\n shape = config[\"shape\"]\n with ops.Graph().as_default():\n self._TestOneSimpleTraining(rnn_mode, shape[\"num_layers\"],\n shape[\"num_units\"], shape[\"input_size\"],\n shape[\"batch_size\"], shape[\"seq_length\"],\n dir_count, dropout, dtype, delta, tolerance)\n\n @unittest.skipUnless(test.is_built_with_cuda(),\n \"Test only applicable when running on GPUs\")\n def testSimpleTrainingLSTM64(self):\n test_configs = [\n {\n \"dtype\": dtypes.float64,\n \"tolerance\": 5e-6,\n \"shape\": {\n \"num_layers\": 2,\n \"num_units\": 3,\n \"input_size\": 4,\n \"batch_size\": 3,\n \"seq_length\": 4,\n },\n },\n ]\n self._TestSimpleTrainingHelper(CUDNN_LSTM, test_configs)\n\n @unittest.skipUnless(test.is_built_with_cuda(),\n \"Test only applicable when running on GPUs\")\n def testSimpleTrainingLSTM32(self):\n test_configs = [\n {\n \"dtype\": dtypes.float32,\n \"delta\": 1e-4,\n \"tolerance\": 9e-2,\n \"shape\": {\n \"num_layers\": 2,\n \"num_units\": 3,\n \"input_size\": 4,\n \"batch_size\": 3,\n \"seq_length\": 4,\n },\n },\n ]\n self._TestSimpleTrainingHelper(CUDNN_LSTM, test_configs)\n\n @unittest.skipUnless(test.is_built_with_cuda(),\n \"Test only applicable when running on GPUs\")\n def testSimpleTrainingGRU64(self):\n test_configs = [\n {\n \"dtype\": dtypes.float64,\n \"tolerance\": 5e-6,\n \"shape\": {\n \"num_layers\": 2,\n \"num_units\": 3,\n \"input_size\": 4,\n \"batch_size\": 3,\n \"seq_length\": 4,\n }\n },\n ]\n self._TestSimpleTrainingHelper(CUDNN_GRU, test_configs)\n\n @unittest.skipUnless(test.is_built_with_cuda(),\n \"Test only applicable when running on GPUs\")\n def testSimpleTrainingGRU32(self):\n test_configs = [\n {\n \"dtype\": dtypes.float32,\n \"delta\": 1e-3,\n \"tolerance\": 4e-3,\n \"shape\": {\n \"num_layers\": 2,\n \"num_units\": 3,\n \"input_size\": 4,\n \"batch_size\": 3,\n \"seq_length\": 4,\n },\n },\n ]\n self._TestSimpleTrainingHelper(CUDNN_GRU, test_configs)\n\n @unittest.skipUnless(test.is_built_with_cuda(),\n \"Test only applicable when running on GPUs\")\n def testSimpleTrainingRNNTanh64(self):\n test_configs = [\n {\n \"dtype\": dtypes.float64,\n \"tolerance\": 5e-6,\n \"shape\": {\n \"num_layers\": 2,\n \"num_units\": 3,\n \"input_size\": 4,\n \"batch_size\": 3,\n \"seq_length\": 4,\n },\n },\n ]\n self._TestSimpleTrainingHelper(CUDNN_RNN_TANH, test_configs)\n\n @unittest.skipUnless(test.is_built_with_cuda(),\n \"Test only applicable when running on GPUs\")\n def testSimpleTrainingRNNTanh32(self):\n test_configs = [\n {\n \"dtype\": dtypes.float32,\n \"delta\": 1e-3,\n \"tolerance\": 5e-3,\n \"shape\": {\n \"num_layers\": 2,\n \"num_units\": 3,\n \"input_size\": 4,\n \"batch_size\": 3,\n \"seq_length\": 4,\n },\n },\n ]\n self._TestSimpleTrainingHelper(CUDNN_RNN_TANH, test_configs)\n\n @unittest.skipUnless(test.is_built_with_cuda(),\n \"Test only applicable when running on GPUs\")\n def testSimpleTrainingRNNRelu64(self):\n test_configs = [\n {\n \"dtype\": dtypes.float64,\n \"tolerance\": 5e-6,\n \"shape\": {\n \"num_layers\": 2,\n \"num_units\": 3,\n \"input_size\": 4,\n \"batch_size\": 3,\n \"seq_length\": 4,\n },\n },\n ]\n self._TestSimpleTrainingHelper(CUDNN_RNN_RELU, test_configs)\n\n @unittest.skipUnless(test.is_built_with_cuda(),\n \"Test only applicable when running on GPUs\")\n def testSimpleTrainingRNNRelu32(self):\n test_configs = [\n {\n \"dtype\": dtypes.float32,\n \"delta\": 1e-3,\n \"tolerance\": 7e-2,\n \"shape\": {\n \"num_layers\": 2,\n \"num_units\": 3,\n \"input_size\": 4,\n \"batch_size\": 3,\n \"seq_length\": 4,\n },\n },\n ]\n self._TestSimpleTrainingHelper(CUDNN_RNN_RELU, test_configs)\n\n\nif __name__ == \"__main__\":\n googletest.main()\n" ]
[ [ "tensorflow.python.ops.array_ops.split", "tensorflow.python.framework.ops.colocate_with", "tensorflow.contrib.tpu.python.ops.tpu_ops.infeed_dequeue_tuple", "tensorflow.contrib.tpu.python.ops.tpu_ops.infeed_enqueue_tuple", "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.python.framework.ops.device", "tensorflow.contrib.tpu.python.tpu.tpu_sharding.ShardingPolicy", "tensorflow.python.framework.tensor_shape.as_shape" ], [ "tensorflow.python.ops.array_ops.shape", "tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops.CudnnCompatibleLSTMCell", "tensorflow.python.ops.array_ops.placeholder", "numpy.random.randn", "tensorflow.python.ops.variables.trainable_variables", "tensorflow.python.ops.init_ops.constant_initializer", "tensorflow.python.ops.gradients_impl.gradients", "tensorflow.contrib.rnn.python.ops.rnn.stack_bidirectional_dynamic_rnn", "tensorflow.python.platform.test.is_built_with_cuda", "tensorflow.python.framework.ops.get_collection", "tensorflow.python.ops.rnn_cell_impl.BasicRNNCell", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.platform.googletest.main", "numpy.zeros", "tensorflow.python.ops.math_ops.reduce_sum", "tensorflow.contrib.cudnn_rnn.python.layers.cudnn_rnn.CudnnLSTM", "numpy.isnan", "tensorflow.python.ops.array_ops.zeros_like", "tensorflow.python.ops.rnn.dynamic_rnn", "numpy.random.rand", "tensorflow.python.framework.random_seed.set_random_seed", "tensorflow.python.training.gradient_descent.GradientDescentOptimizer", "numpy.sum", "tensorflow.python.ops.array_ops.stack", "tensorflow.python.ops.losses.losses.log_loss", "numpy.random.seed", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.control_flow_ops.group", "tensorflow.python.framework.ops.Graph", "tensorflow.python.ops.init_ops.random_normal_initializer", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops.CudnnCompatibleGRUCell", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.ops.random_ops.random_uniform", "tensorflow.python.training.saver.Saver" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7" ] } ]
falimoradi/E2E-MLT
[ "f14111238cc941a0d3411b00c72e58347229ba8d" ]
[ "fps.py" ]
[ "'''\nCreated on Aug 25, 2017\n\n@author: busta\n'''\n\nimport cv2, glob, os, codecs\nimport numpy as np\n\nfrom nms import get_boxes\n\nfrom models import ModelMLTRCTW\nimport net_utils\n\nfrom ocr_utils import ocr_image\nfrom data_gen import draw_box_points\nimport torch\n\nimport argparse\n\nfrom PIL import Image\nfrom PIL import ImageFont\nfrom PIL import ImageDraw\n\nimport time\n\n\nf = codecs.open('codec_mine2.txt', 'r', encoding='utf-8')\ncodec = f.readlines()[0]\nf.close()\n\ndef resize_image(im, max_size = 1585152, scale_up=True):\n\n if scale_up:\n image_size = [im.shape[1] * 3 // 32 * 32, im.shape[0] * 3 // 32 * 32]\n else:\n image_size = [im.shape[1] // 32 * 32, im.shape[0] // 32 * 32]\n while image_size[0] * image_size[1] > max_size:\n image_size[0] /= 1.2\n image_size[1] /= 1.2\n image_size[0] = int(image_size[0] // 32) * 32\n image_size[1] = int(image_size[1] // 32) * 32\n\n\n resize_h = int(image_size[1])\n resize_w = int(image_size[0])\n\n\n scaled = cv2.resize(im, dsize=(resize_w, resize_h))\n return scaled, (resize_h, resize_w)\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-cuda', type=int, default=1)\n parser.add_argument('-model', default='E2E-MLT_26000.h5')\n parser.add_argument('-segm_thresh', default=0.5)\n parser.add_argument('-images_dir', default='/home/alimoradi/scene_text_dataset/valid_dataset')\n\n font2 = ImageFont.truetype(\"Arial-Unicode-Regular.ttf\", 18)\n\n args = parser.parse_args()\n\n net = ModelMLTRCTW(attention=True)\n net_utils.load_net(args.model, net)\n net = net.eval()\n\n if args.cuda:\n print('Using cuda ...')\n net = net.cuda()\n\n\n imagess = glob.glob(os.path.join(args.images_dir, '*.jpg'))\n png = glob.glob(os.path.join(args.images_dir, '*.png'))\n imagess.extend(png)\n png = glob.glob(os.path.join(args.images_dir, '*.JPG'))\n imagess.extend(png)\n\n t_complt = 0\n t_afterResize = 0\n\n frame_no = 0\n\n print(len(imagess))\n with torch.no_grad():\n for img in imagess:\n start_comp = time.time()\n# ret, im = cap.read()\n im = cv2.imread(img)\n # im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n im_resized, (ratio_h, ratio_w) = resize_image(im, scale_up=False)\n\n start_after = time.time()\n\n images = np.asarray([im_resized], dtype=np.float)\n images /= 128\n images -= 1\n im_data = net_utils.np_to_variable(images, is_cuda=args.cuda).permute(0, 3, 1, 2)\n seg_pred, rboxs, angle_pred, features = net(im_data)\n\n rbox = rboxs[0].data.cpu()[0].numpy()\n rbox = rbox.swapaxes(0, 1)\n rbox = rbox.swapaxes(1, 2)\n\n angle_pred = angle_pred[0].data.cpu()[0].numpy()\n\n segm = seg_pred[0].data.cpu()[0].numpy()\n segm = segm.squeeze(0)\n\n\n boxes = get_boxes(segm, rbox, angle_pred, args.segm_thresh)\n\n # draw2 = np.copy(im_resized)\n # img = Image.fromarray(draw2)\n # draw = ImageDraw.Draw(img)\n\n\n out_boxes = []\n for box in boxes:\n pts = box[0:8]\n pts = pts.reshape(4, -1)\n\n det_text, conf, dec_s = ocr_image(net, codec, im_data, box)\n if len(det_text) == 0:\n continue\n\n # width, height = draw.textsize(det_text, font=font2)\n # center = [box[0], box[1]]\n # draw.text((center[0], center[1]), det_text, fill=(0,255,0),font=font2)\n\n t = time.time()\n t_complt += t - start_comp\n t_afterResize += t - start_after\n\n\n # out_boxes.append(box)\n # print(det_text)\n\n # im = np.array(img)\n # for box in out_boxes:\n # pts = box[0:8]\n # pts = pts.reshape(4, -1)\n # draw_box_points(im, pts, color=(0, 255, 0), thickness=1)\n #\n # # cv2.imshow('img', im)\n # # cv2.waitKey(10)\n # cv2.imwrite('res_{}.jpg'.format(img.split('/')[-1][:-4]), im)\n\n print(t_complt, t_afterResize)\n print(t_complt/len(imagess), t_afterResize/len(imagess))\n" ]
[ [ "numpy.asarray", "torch.no_grad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
haozaijie/news_scraper
[ "494b8c8db1f452d900fbb4e06e757733fddccf23" ]
[ "src/postgresql_script/load_data.py" ]
[ "from sqlalchemy import create_engine\nimport pandas as pd\n\nengine = create_engine('postgresql://haozaijie:password@localhost:5432/sample_db')\n\ndf = pd.read_csv('files/combined_coronavirus.csv')\ndf.to_sql('news_scraper.raw', engine, if_exists='replace', index=False)\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
mahmoud-al-najar/morphcast
[ "75c6eac775be1e54c7893d6fe64b88347d14aa3b" ]
[ "init_scripts/create_dataset.py" ]
[ "import os\nimport copy\nimport netCDF4\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy import interpolate\nfrom itertools import groupby\nfrom utilities.wrappers import Topo\nfrom utilities.common import get_datetime_from_ymd_string, datetime_to_timestamp\n\n\ns_start_ymd = '19961104'\ndt_start_ymd = get_datetime_from_ymd_string(s_start_ymd)\nts_start_ymd = datetime_to_timestamp(dt_start_ymd)\n\ns_end_ymd = '20210330'\ndt_end_ymd = get_datetime_from_ymd_string(s_end_ymd)\nts_end_ymd = datetime_to_timestamp(dt_end_ymd)\n\nprint(s_start_ymd, s_end_ymd)\nprint(dt_start_ymd, dt_end_ymd)\nprint(ts_start_ymd, ts_end_ymd)\n\ntarget_dates = np.arange(dt_start_ymd, dt_end_ymd, np.timedelta64(1, 'M'),\n dtype='datetime64[M]') # Y-M only\ntarget_dates = target_dates.astype('datetime64[D]') # Y-M-D\ntarget_dates = target_dates.astype('datetime64[s]') # add seconds\nprint(target_dates)\n" ]
[ [ "numpy.timedelta64" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jbak920/pixelcanvas
[ "c66c6678f22e3616da28a5aa920bd8436e7ea968" ]
[ "conway/conway.py" ]
[ "import time\r\nimport sys\r\nimport random\r\nsys.path.insert(0,'/home/pi/pixelcanvas')\r\n\r\nimport numpy as np\r\n\r\nfrom utils import multiply\r\n\r\ndef num_neighbors(row, col, array):\r\n neighbors = 0\r\n try:\r\n neighbors += array[row][col+1]\r\n except:\r\n pass\r\n try:\r\n neighbors += array[row][col-1]\r\n except:\r\n pass\r\n try:\r\n neighbors += array[row+1][col+1]\r\n except:\r\n pass\r\n try:\r\n neighbors += array[row+1][col]\r\n except:\r\n pass\r\n try:\r\n neighbors += array[row+1][col-1]\r\n except:\r\n pass\r\n try:\r\n neighbors += array[row-1][col+1]\r\n except:\r\n pass\r\n try:\r\n neighbors += array[row-1][col]\r\n except:\r\n pass\r\n try:\r\n neighbors += array[row-1][col-1]\r\n except:\r\n pass\r\n return neighbors\r\n\r\ndef step(array):\r\n new_array = np.full_like(array, 0)\r\n for i,row in enumerate(array):\r\n for j,pixel in enumerate(row):\r\n neighbors = num_neighbors(i, j, array)\r\n if int(pixel) is 1:\r\n if neighbors < 2 or neighbors > 3:\r\n new_array[i][j] = 0\r\n else:\r\n new_array[i][j] = 1\r\n else:\r\n if neighbors == 3:\r\n new_array[i][j] = 1\r\n \r\n return new_array\r\n\r\ndef life(canvas, init_file):\r\n if 'random' not in init_file:\r\n init_file = '/home/pi/pixelcanvas/conway/' + init_file\r\n with open(init_file, \"r\") as file:\r\n result = [[int(x) for x in line.split()] for line in file]\r\n array = np.array(result)\r\n array = array.T\r\n else:\r\n array = [[random.choice([0, 1]) for pixel in col] for col in canvas._array]\r\n \r\n t_end = time.time() + 60 # Loop for 60 seconds\r\n while time.time() < t_end:\r\n colored_array = multiply(array, 0xFFFFFF)\r\n canvas._array = colored_array\r\n canvas.display()\r\n time.sleep(0.3)\r\n new_array = step(array)\r\n if (new_array == array).all():\r\n break\r\n else:\r\n array = new_array\r\n \r\n" ]
[ [ "numpy.full_like", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pshivam97/Bachelors-Thesis
[ "e55aa943e6f2bf541febb3b63a1e427b56326f33" ]
[ "Research-Work/My-Work/MOEAs/NSGA-II Other Implementation/Implementation-1/NSGA2.py" ]
[ "## TAKEN FROM : https://pythonhealthcare.org/2019/01/17/117-code-only-genetic-algorithms-2-a-multiple-objective-genetic-algorithm-nsga-ii/\n\nimport random as rn\nimport numpy as np\nimport matplotlib.pyplot as plt\n# For use in Jupyter notebooks only:\n\n# Create reference solutions\n# --------------------------\n\ndef create_reference_solutions(chromosome_length, solutions):\n \"\"\"\n Function to create reference chromosomes that will mimic an ideal solution\n \"\"\"\n references = np.zeros((solutions, chromosome_length))\n number_of_ones = int(chromosome_length / 2)\n\n for solution in range(solutions):\n # Build an array with an equal mix of zero and ones\n reference = np.zeros(chromosome_length)\n reference[0: number_of_ones] = 1\n\n # Shuffle the array to mix the zeros and ones\n np.random.shuffle(reference)\n references[solution,:] = reference\n\n return references\n\n\n# Evaluate solutions\n# ------------------\n\ndef calculate_fitness(reference, population):\n \"\"\"\n Calculate how many binary digits in each solution are the same as our\n reference solution.\n \"\"\"\n # Create an array of True/False compared to reference\n identical_to_reference = population == reference\n # Sum number of genes that are identical to the reference\n fitness_scores = identical_to_reference.sum(axis=1)\n\n return fitness_scores\n\n\ndef score_population(population, references):\n \"\"\"\n Loop through all reference solutoins and request score/fitness of\n populaiton against that reference solution.\n \"\"\"\n scores = np.zeros((population.shape[0], references.shape[0]))\n for i, reference in enumerate(references):\n scores[:,i] = calculate_fitness(reference, population)\n\n return scores\n\n# Calculate crowding and select a population based on crowding scores\n# -------------------------------------------------------------------\n\ndef calculate_crowding(scores):\n \"\"\"\n Crowding is based on a vector for each individual\n All scores are normalised between low and high. For any one score, all\n solutions are sorted in order low to high. Crowding for chromsome x\n for that score is the difference between the next highest and next\n lowest score. Total crowding value sums all crowding for all scores\n \"\"\"\n\n population_size = len(scores[:, 0])\n number_of_scores = len(scores[0, :])\n\n # create crowding matrix of population (row) and score (column)\n crowding_matrix = np.zeros((population_size, number_of_scores))\n\n # normalise scores (ptp is max-min)\n normed_scores = (scores - scores.min(0)) / scores.ptp(0)\n\n # calculate crowding distance for each score in turn\n for col in range(number_of_scores):\n crowding = np.zeros(population_size)\n\n # end points have maximum crowding\n crowding[0] = 1\n crowding[population_size - 1] = 1\n\n # Sort each score (to calculate crowding between adjacent scores)\n sorted_scores = np.sort(normed_scores[:, col])\n\n sorted_scores_index = np.argsort(\n normed_scores[:, col])\n\n # Calculate crowding distance for each individual\n crowding[1:population_size - 1] = \\\n (sorted_scores[2:population_size] -\n sorted_scores[0:population_size - 2])\n\n # resort to orginal order (two steps)\n re_sort_order = np.argsort(sorted_scores_index)\n sorted_crowding = crowding[re_sort_order]\n\n # Record crowding distances\n crowding_matrix[:, col] = sorted_crowding\n\n # Sum croding distances of each score\n crowding_distances = np.sum(crowding_matrix, axis=1)\n\n return crowding_distances\n\n\ndef reduce_by_crowding(scores, number_to_select):\n \"\"\"\n This function selects a number of solutions based on tournament of\n crowding distances. Two members of the population are picked at\n random. The one with the higher croding dostance is always picked\n \"\"\"\n population_ids = np.arange(scores.shape[0])\n\n crowding_distances = calculate_crowding(scores)\n\n picked_population_ids = np.zeros((number_to_select))\n\n picked_scores = np.zeros((number_to_select, len(scores[0, :])))\n\n for i in range(number_to_select):\n\n population_size = population_ids.shape[0]\n\n fighter1ID = rn.randint(0, population_size - 1)\n\n fighter2ID = rn.randint(0, population_size - 1)\n\n # If fighter # 1 is better\n if crowding_distances[fighter1ID] >= crowding_distances[\n fighter2ID]:\n\n # add solution to picked solutions array\n picked_population_ids[i] = population_ids[\n fighter1ID]\n\n # Add score to picked scores array\n picked_scores[i, :] = scores[fighter1ID, :]\n\n # remove selected solution from available solutions\n population_ids = np.delete(population_ids, (fighter1ID),\n axis=0)\n\n scores = np.delete(scores, (fighter1ID), axis=0)\n\n crowding_distances = np.delete(crowding_distances, (fighter1ID),\n axis=0)\n else:\n picked_population_ids[i] = population_ids[fighter2ID]\n\n picked_scores[i, :] = scores[fighter2ID, :]\n\n population_ids = np.delete(population_ids, (fighter2ID), axis=0)\n\n scores = np.delete(scores, (fighter2ID), axis=0)\n\n crowding_distances = np.delete(\n crowding_distances, (fighter2ID), axis=0)\n\n # Convert to integer\n picked_population_ids = np.asarray(picked_population_ids, dtype=int)\n\n return (picked_population_ids)\n\n# Pareto selecion\n# ---------------\n\ndef identify_pareto(scores, population_ids):\n \"\"\"\n Identifies a single Pareto front, and returns the population IDs of\n the selected solutions.\n \"\"\"\n\n population_size = scores.shape[0]\n # Create a starting list of items on the Pareto front\n # All items start off as being labelled as on the Parteo front\n pareto_front = np.ones(population_size, dtype=bool)\n # Loop through each item. This will then be compared with all other items\n for i in range(population_size):\n # Loop through all other items\n for j in range(population_size):\n # Check if our 'i' pint is dominated by out 'j' point\n if all(scores[j] >= scores[i]) and any(scores[j] > scores[i]):\n # j dominates i. Label 'i' point as not on Pareto front\n pareto_front[i] = 0\n # Stop further comparisons with 'i' (no more comparisons needed)\n break\n # Return ids of scenarios on pareto front\n return population_ids[pareto_front]\n\n\ndef build_pareto_population(\n population, scores, minimum_population_size, maximum_population_size):\n \"\"\"\n As necessary repeats Pareto front selection to build a population within\n defined size limits. Will reduce a Pareto front by applying crowding\n selection as necessary.\n \"\"\"\n unselected_population_ids = np.arange(population.shape[0])\n all_population_ids = np.arange(population.shape[0])\n pareto_front = []\n while len(pareto_front) < minimum_population_size:\n temp_pareto_front = identify_pareto(\n scores[unselected_population_ids, :], unselected_population_ids)\n\n # Check size of total parteo front.\n # If larger than maximum size reduce new pareto front by crowding\n combined_pareto_size = len(pareto_front) + len(temp_pareto_front)\n if combined_pareto_size > maximum_population_size:\n number_to_select = combined_pareto_size - maximum_population_size\n selected_individuals = (reduce_by_crowding(\n scores[temp_pareto_front], number_to_select))\n temp_pareto_front = temp_pareto_front[selected_individuals]\n\n # Add latest pareto front to full Pareto front\n pareto_front = np.hstack((pareto_front, temp_pareto_front))\n\n\n # Update unslected population ID by using sets to find IDs in all\n # ids that are not in the selected front\n unselected_set = set(all_population_ids) - set(pareto_front)\n unselected_population_ids = np.array(list(unselected_set))\n\n population = population[pareto_front.astype(int)]\n return population\n\n# Population functions\n# --------------------\n\ndef create_population(individuals, chromosome_length):\n \"\"\"\n Create random population with given number of individuals and chroosome\n length.\n \"\"\"\n\n # Set up an initial array of all zeros\n population = np.zeros((individuals, chromosome_length))\n # Loop through each row (individual)\n for i in range(individuals):\n # Choose a random number of ones to create\n ones = rn.randint(0, chromosome_length)\n # Change the required number of zeros to ones\n population[i, 0:ones] = 1\n # Sfuffle row\n np.random.shuffle(population[i])\n\n return population\n\ndef breed_by_crossover(parent_1, parent_2):\n \"\"\"\n Combine two parent chromsomes by crossover to produce two children.\n \"\"\"\n # Get length of chromosome\n chromosome_length = len(parent_1)\n\n # Pick crossover point, avoding ends of chromsome\n crossover_point = rn.randint(1,chromosome_length-1)\n\n # Create children. np.hstack joins two arrays\n child_1 = np.hstack((parent_1[0:crossover_point],\n parent_2[crossover_point:]))\n\n child_2 = np.hstack((parent_2[0:crossover_point],\n parent_1[crossover_point:]))\n\n # Return children\n return child_1, child_2\n\n\ndef randomly_mutate_population(population, mutation_probability):\n \"\"\"\n Randomly mutate population with a given individual gene mutation\n probability. Individual gene may switch between 0/1.\n \"\"\"\n\n # Apply random mutation\n random_mutation_array = np.random.random(size=(population.shape))\n\n random_mutation_boolean = \\\n random_mutation_array <= mutation_probability\n\n population[random_mutation_boolean] = \\\n np.logical_not(population[random_mutation_boolean])\n\n # Return mutation population\n return population\n\n\ndef breed_population(population):\n \"\"\"\n Create child population by repetedly calling breeding function (two parents\n producing two children), applying genetic mutation to the child population,\n combining parent and child population, and removing duplice chromosomes.\n \"\"\"\n # Create an empty list for new population\n new_population = []\n population_size = population.shape[0]\n # Create new popualtion generating two children at a time\n for i in range(int(population_size/2)):\n parent_1 = population[rn.randint(0, population_size-1)]\n parent_2 = population[rn.randint(0, population_size-1)]\n child_1, child_2 = breed_by_crossover(parent_1, parent_2)\n new_population.append(child_1)\n new_population.append(child_2)\n\n # Add the child population to the parent population\n # In this method we allow parents and children to compete to be kept\n population = np.vstack((population, np.array(new_population)))\n population = np.unique(population, axis=0)\n\n return population\n\n# *************************************\n# ******** MAIN ALGORITHM CODE ********\n# *************************************\n\n# Set general parameters\nchromosome_length = 50\nstarting_population_size = 50\nmaximum_generation = 500\nminimum_population_size = 5\nmaximum_population_size = 100\n\n# Create two reference solutions\n# (this is used just to illustrate GAs)\nreferences = create_reference_solutions(chromosome_length, 2)\n\n# Create starting population\npopulation = create_population(\n starting_population_size, chromosome_length)\n#population = np.unique(population, axis=0)\n\n# Now we'll go through the generations of genetic algorithm\n\nfor generation in range(maximum_generation):\n if generation %10 ==0:\n print ('Generation (out of %i): %i '%(maximum_generation, generation))\n\n # Brred\n population = breed_population(population)\n\n # Score population\n scores = score_population(population, references)\n\n # Build pareto front\n population = build_pareto_population(\n population, scores, minimum_population_size, maximum_population_size)\n\n\n# Get final pareto front\nscores = score_population(population, references)\npopulation_ids = np.arange(population.shape[0]).astype(int)\npareto_front = identify_pareto(scores, population_ids)\npopulation = population[pareto_front, :]\nscores = scores[pareto_front]\n\n# Plot Pareto front (for two scores only)\nx = scores[:, 0]/chromosome_length*100\ny = scores[:, 1]/chromosome_length*100\nplt.xlabel('Objective A - % maximum obtainable')\nplt.ylabel('Objective B - % maximum obtainable')\n\nplt.scatter(x,y)\nplt.savefig('pareto.png')\nplt.show()\n" ]
[ [ "numpy.asarray", "numpy.hstack", "numpy.unique", "numpy.arange", "numpy.zeros", "numpy.logical_not", "matplotlib.pyplot.savefig", "numpy.delete", "numpy.argsort", "numpy.array", "matplotlib.pyplot.show", "numpy.sum", "matplotlib.pyplot.ylabel", "numpy.random.random", "matplotlib.pyplot.scatter", "numpy.random.shuffle", "numpy.ones", "numpy.sort", "matplotlib.pyplot.xlabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
leelabcnbc/tang_jcompneuro_revision
[ "58e9dbcbef7ca3f0c3976b24a4e4aa9c5efcdd3a" ]
[ "tang_jcompneuro/stimulus_classification.py" ]
[ "\"\"\"similar to misc.py in the previous tang-paper-2017 repo\"\"\"\nimport os.path\nfrom collections import OrderedDict\n# from itertools import product\n\nimport numpy as np\n\nfrom . import dir_dictionary\n\nrange_constructor = np.arange\n\n\ndef _shape_mapping_add_sublevels(prefix, result_dict, global_start_idx, group_step, group_num, start=1):\n for subgroup_idx, start_idx in enumerate(\n range(global_start_idx, global_start_idx + group_step * group_num, group_step), start=start):\n result_dict[prefix + str(subgroup_idx)] = range_constructor(start_idx, start_idx + group_step)\n return result_dict\n\n\ndef shape_9500_mapping(get_sublevels=False):\n result = OrderedDict()\n\n # edge\n result['E'] = range_constructor(80)\n # full lines\n if get_sublevels:\n result = _shape_mapping_add_sublevels('FL', result, 80, 40, 4)\n else:\n # total\n result['FL'] = range_constructor(80, 240)\n # dense lines\n if get_sublevels:\n result['DL1'] = range_constructor(240, 280)\n result['DL2'] = range_constructor(280, 320)\n else:\n result['DL'] = range_constructor(240, 320)\n\n # center bars\n if get_sublevels:\n result = _shape_mapping_add_sublevels('CB', result, 320, 40, 6)\n else:\n result['CB'] = range_constructor(320, 560)\n\n # side bars\n if get_sublevels:\n result['SB1'] = range_constructor(560, 640)\n result['SB2'] = range_constructor(640, 720)\n # longer, and so many.\n result['SB3'] = range_constructor(720, 1600)\n else:\n result['SB'] = range_constructor(560, 1600)\n\n # solid corners, acute\n if get_sublevels:\n result = _shape_mapping_add_sublevels('SCA', result, 1600, 40, 50)\n else:\n result['SCA'] = range_constructor(1600, 3600)\n\n # solid corners, obtuse\n if get_sublevels:\n result = _shape_mapping_add_sublevels('SCO', result, 3600, 80, 4)\n else:\n result['SCO'] = range_constructor(3600, 3920)\n\n # outline corners, acute\n if get_sublevels:\n result = _shape_mapping_add_sublevels('OCA', result, 3920, 40, 50)\n else:\n result['OCA'] = range_constructor(3920, 5920)\n\n # outline corners, obtuse\n if get_sublevels:\n result = _shape_mapping_add_sublevels('OCO', result, 5920, 80, 4)\n else:\n result['OCO'] = range_constructor(5920, 6240)\n\n # ray\n if get_sublevels:\n result = _shape_mapping_add_sublevels('RAY', result, 6240, 40, 3)\n result = _shape_mapping_add_sublevels('RAY', result, 6360, 10, 1, start=4)\n result = _shape_mapping_add_sublevels('RAY', result, 6370, 40, 1, start=5)\n result = _shape_mapping_add_sublevels('RAY', result, 6410, 10, 1, start=6)\n result = _shape_mapping_add_sublevels('RAY', result, 6420, 40, 1, start=7)\n result = _shape_mapping_add_sublevels('RAY', result, 6460, 10, 1, start=8)\n else:\n result['RAY'] = range_constructor(6240, 6470)\n\n # Y\n result['Y'] = range_constructor(6470, 6550)\n\n # fan\n result['F'] = range_constructor(6550, 6630)\n\n # cross\n if get_sublevels:\n result = _shape_mapping_add_sublevels('CX', result, 6630, 40, 8)\n else:\n result['CX'] = range_constructor(6630, 6950)\n\n # spike short, short\n if get_sublevels:\n result = _shape_mapping_add_sublevels('SSS', result, 6950, 40, 4)\n else:\n result['SSS'] = range_constructor(6950, 7110)\n\n # spike long, short\n if get_sublevels:\n result = _shape_mapping_add_sublevels('SLS', result, 7110, 40, 4)\n else:\n result['SLS'] = range_constructor(7110, 7270)\n\n # spike short\n if get_sublevels:\n result = _shape_mapping_add_sublevels('SS', result, 7270, 40, 4)\n else:\n result['SS'] = range_constructor(7270, 7430)\n\n # spike long\n if get_sublevels:\n result = _shape_mapping_add_sublevels('SL', result, 7430, 40, 4)\n else:\n result['SL'] = range_constructor(7430, 7590)\n\n # grid\n if get_sublevels:\n result = _shape_mapping_add_sublevels('G', result, 7590, 20, 2)\n else:\n result['G'] = range_constructor(7590, 7630)\n\n # balls\n if get_sublevels:\n result = _shape_mapping_add_sublevels('B', result, 7630, 80, 3)\n else:\n result['B'] = range_constructor(7630, 7870)\n\n # rings\n if get_sublevels:\n result = _shape_mapping_add_sublevels('R', result, 7870, 80, 3)\n else:\n result['R'] = range_constructor(7870, 8110)\n\n # curves\n if get_sublevels:\n result = _shape_mapping_add_sublevels('CV', result, 8110, 80, 6)\n else:\n result['CV'] = range_constructor(8110, 8590)\n\n # concentric ring\n if get_sublevels:\n result = _shape_mapping_add_sublevels('CR', result, 8590, 5, 3)\n else:\n result['CR'] = range_constructor(8590, 8605)\n\n # edge with circles\n if get_sublevels:\n result = _shape_mapping_add_sublevels('EC', result, 8605, 40, 8)\n else:\n result['EC'] = range_constructor(8605, 8925)\n\n # ray center\n if get_sublevels:\n result = _shape_mapping_add_sublevels('RC', result, 8925, 40, 8)\n else:\n result['RC'] = range_constructor(8925, 9245)\n\n # ray no center\n if get_sublevels:\n result = _shape_mapping_add_sublevels('RNC', result, 9245, 40, 6)\n else:\n result['RNC'] = range_constructor(9245, 9485)\n\n # stars\n if get_sublevels:\n result = _shape_mapping_add_sublevels('S', result, 9485, 5, 3)\n else:\n result['S'] = range_constructor(9485, 9500)\n\n # make sure it's correct.\n result_all = np.sort(np.concatenate(list(result.values())))\n assert np.array_equal(result_all, np.arange(9500))\n\n return result\n\n\n# based on his v3 classification, which should be the same as\n# /private_data/shape_params_data/Stimuli_Name.xlsx\n# I debugged this against v3 dict in\n# thesis_plots/v1_fitting/pattern_stimulus.ipynb\nshape_9500_more_broad_classification_dict_tang = OrderedDict()\nshape_9500_more_broad_classification_dict_tang['bar'] = ('E', 'FL', 'CB', 'SB', 'DL')\nshape_9500_more_broad_classification_dict_tang['curvature'] = ('B', 'R', 'CR', 'CV')\nshape_9500_more_broad_classification_dict_tang['corner'] = ('SCA', 'SCO', 'OCA', 'OCO', 'F')\nshape_9500_more_broad_classification_dict_tang['cross'] = ('RAY', 'CX', 'Y', 'SSS', 'SLS', 'SS', 'SL', 'G')\nshape_9500_more_broad_classification_dict_tang['composition'] = ('EC', 'RC', 'RNC', 'S')\n_all_cate = sum(shape_9500_more_broad_classification_dict_tang.values(), ())\nassert len(set(_all_cate)) == len(_all_cate)\nassert set(shape_9500_mapping(get_sublevels=False).keys()) == set(_all_cate)\n\n\ndef _mapping_to_str_9500(mapping_this):\n label_str_this = np.empty((9500,), dtype=np.object_)\n for key, range_this in mapping_this.items():\n label_str_this[range_this] = key\n return label_str_this\n\n\ndef _attach_sample_weight(int_label):\n assert int_label.shape == (int_label.size,)\n assert (int_label >= 0).all()\n bin_count = np.bincount(int_label)\n assert np.all(bin_count > 0)\n weight_label = np.empty((int_label.size,), dtype=np.float64)\n for label in np.arange(bin_count.size):\n weight_label[int_label == label] = 1 / bin_count[label]\n\n return weight_label\n\n\ndef _attach_weight_label_wrapper(label_dict):\n for level in ('low', 'middle', 'high'):\n label_dict[(level, 'weight')] = _attach_sample_weight(label_dict[(level, 'int')])\n\n return label_dict\n\n\ndef _load_9500_labels():\n # give shape_9500 label in all three levels, providing both int version and string version.\n\n # first handle low one.\n label_str_low = _mapping_to_str_9500(shape_9500_mapping(get_sublevels=True))\n label_str_mid = _mapping_to_str_9500(shape_9500_mapping(get_sublevels=False))\n label_str_high = np.empty((9500,), dtype=np.object_)\n\n high_order_classification_dict = shape_9500_more_broad_classification_dict_tang\n\n for key, subkeys in high_order_classification_dict.items():\n label_str_high[np.in1d(label_str_mid, subkeys)] = key\n\n _, label_int_low = np.unique(label_str_low, return_inverse=True)\n _, label_int_mid = np.unique(label_str_mid, return_inverse=True)\n _, label_int_high = np.unique(label_str_high, return_inverse=True)\n\n return _attach_weight_label_wrapper({\n ('low', 'int'): label_int_low,\n ('low', 'str'): label_str_low,\n ('middle', 'int'): label_int_mid,\n ('middle', 'str'): label_str_mid,\n ('high', 'int'): label_int_high,\n ('high', 'str'): label_str_high,\n })\n\n\ndef _load_shape_4605_correspondence():\n a = np.loadtxt(os.path.join(dir_dictionary['shape_params_data'], 'mapping_vec.csv'), dtype=np.int64)\n assert a.shape == (4605,)\n return a\n\n\ndef _load_4605_labels():\n mapping = _load_shape_4605_correspondence()\n labels_old = _load_9500_labels()\n # int label has to be regenerated, as there's some sublevel missing.\n labels_new = {}\n for key in ('low', 'middle', 'high'):\n labels_new[(key, 'str')] = labels_old[(key, 'str')][mapping]\n _, labels_new[(key, 'int')] = np.unique(labels_new[(key, 'str')], return_inverse=True)\n return _attach_weight_label_wrapper(labels_new)\n\n\n# this will be used by other files.\nstimulus_label_dict_tang = {\n 'Shape_9500': _load_9500_labels(),\n 'Shape_4605': _load_4605_labels(),\n}\n\n\n# # generate all subsets needed\n# def get_reference_subset_list(*, percentage_list=(None, 25, 50, 75)):\n# base_subsets = ('OT', 'nonOT', 'all')\n# list_all = []\n# for base_subset, per_this in product(base_subsets, percentage_list):\n# # TODO if per_this is float, maybe need some work to convert it to string.\n# suffix = '' if per_this is None else f'+{per_this}_0' # just do one shuffling.\n# list_all.append(base_subset + suffix)\n# return list_all\n\n\ndef decompose_subset(subset):\n if subset is None:\n return None, None\n else:\n assert isinstance(subset, str)\n subset_params_loc = subset.find('+')\n if subset_params_loc != -1:\n raise RuntimeError('should not be here in new repo!')\n # subset_proper, subset_param = subset[:subset_params_loc], subset[subset_params_loc + 1:]\n else:\n subset_proper, subset_param = subset, None\n return subset_proper, subset_param\n\n\nnum_ot_dict = {\n 'Shape_9500': 1600,\n 'Shape_4605': 800,\n}\n\n\n#\ndef get_subset_slice(dataset, subset):\n \"\"\"when subset is complex (with `+` in it), the return value won't be a slice.\"\"\"\n assert dataset in {'Shape_9500', 'Shape_4605'}\n\n if subset is None or subset == 'all':\n return slice(None)\n\n\n num_ot = num_ot_dict[dataset]\n\n # num_total = {\n # 'Shape_9500': 9500,\n # 'Shape_4605': 4605,\n # }[dataset]\n\n # ok. I allow subset to have '+' in it. if not, then it's legacy subset,\n # without partitioning and all that stuff.\n # subset_params_loc = subset.find('+')\n # if subset_params_loc != -1:\n # subset_proper, subset_param = subset[:subset_params_loc], subset[subset_params_loc + 1:]\n # else:\n # subset_proper, subset_param = subset, None\n subset_proper, subset_param = decompose_subset(subset)\n if subset_param is None:\n if subset_proper == 'OT':\n return slice(None, num_ot)\n elif subset_proper == 'nonOT':\n return slice(num_ot, None)\n # didn't include nonOT_nonCN, as that's only for debugging purpose.\n elif subset_proper == 'all':\n return slice(None)\n else:\n raise ValueError('wrong subset')\n else:\n raise RuntimeError('should not be here in new repo!')\n" ]
[ [ "numpy.unique", "numpy.arange", "numpy.in1d", "numpy.all", "numpy.bincount", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
PolarizedLightFieldMicroscopy/LFMNet2
[ "c9b064d7625e018ef54b8dd8a0e53801c4565397" ]
[ "mainTrain.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.utils import data\nfrom torch import optim\nimport torchvision.models as models\nfrom torch.autograd import Variable\nimport torchvision as tv\nimport random\nimport math\nimport time\nfrom datetime import datetime\nimport os\nimport argparse\nimport subprocess\nfrom util.LFUtil import *\nimport numpy as np\n\nfrom networks.LFMNet import LFMNet\n\ndef main(args=None):\n # # Arguments\n # parser = argparse.ArgumentParser()\n # # Number of epochs\n # parser.add_argument('--epochs', type=int, default=1000)\n # # Validate every n percentage of the data\n # parser.add_argument('--valEvery', type=float, default=0.25)\n # # Image indices to use for training and validation\n # parser.add_argument('--imagesToUse', nargs='+', type=int, default=list(range(0,5,1)))\n # # List of GPUs to use: 0 1 2 for example\n # parser.add_argument('--GPUs', nargs='+', type=int, default=None)\n # # Batch size\n # parser.add_argument('--batchSize', type=int, default=128)\n # # Perentage of the data to use for validation, from 0 to 1\n # parser.add_argument('--validationSplit', type=float, default=0.1)\n # # Bias initialization value\n # parser.add_argument('--biasVal', type=float, default=0.1)\n # # Learning rate\n # parser.add_argument('--learningRate', type=float, default=0.001)\n # # Use bias flag\n # parser.add_argument('--useBias', type=str2bool, default=True)\n # # Use skip connections flag\n # parser.add_argument('--useSkipCon', type=str2bool, default=False)\n # # User selected random seed\n # parser.add_argument('--randomSeed', type=int, default=None) \n # # fov of input or neighboarhood around lenslet to reconstruct\n # parser.add_argument('--fovInput', type=int, default=9)\n # # nT number of lenslets to reconstruct simultaneously use at training time\n # parser.add_argument('--neighShape', type=int, default=3)\n # # Flag to use shallow or large U-net\n # parser.add_argument('--useShallowUnet', type=str2bool, default=True)\n # # Lower threshold of GT stacks, to get rid of autofluorescence\n # parser.add_argument('--ths', type=float, default=0.03)\n # # Path to dataset\n # parser.add_argument('--datasetPath', nargs='?', default=\"BrainLFMConfocalDataset/Brain_40x_64Depths_362imgs.h5\")\n # # Path to directory where models and tensorboard logs are stored\n # parser.add_argument('--outputPath', nargs='?', default=\"runs/\")\n # # Prefix for current output folder\n # parser.add_argument('--outputPrefix', nargs='?', default=\"\")\n # # Path to model in case of continuing a training\n # parser.add_argument('--checkpointPath', nargs='?', default=None)\n\n # args = parser.parse_args()\n nImgs = len(args.imagesToUse)\n\n # Setup multithreading\n num_workers = getThreads()\n if num_workers!=0:\n torch.set_num_threads(num_workers)\n\n if not torch.cuda.is_available():\n print(\"GPU initialization error\")\n exit(-1)\n \n if torch.cuda.is_available():\n print (\"Cuda is available\")\n device_id = torch.cuda.current_device()\n gpu_properties = torch.cuda.get_device_properties(device_id)\n print(\"Found %d GPUs available. Using GPU %d (%s) of compute capability %d.%d with \"\n \"%.1fGb total memory.\\n\" % \n (torch.cuda.device_count(),\n device_id,\n gpu_properties.name,\n gpu_properties.major,\n gpu_properties.minor,\n gpu_properties.total_memory / 1e9))\n\n # Select GPUs to use \n args.GPUs = list(range(torch.cuda.device_count())) if args.GPUs is None else args.GPUs\n print('Using GPUs: ' + str(args.GPUs))\n device_ids = args.GPUs\n\n # Set common random seed\n if args.randomSeed is not None:\n np.random.seed(args.randomSeed)\n torch.manual_seed(args.randomSeed)\n\n # Load checkpoint if provided\n if args.checkpointPath is not None:\n checkpointPath = args.checkpointPath\n checkpoint = torch.load(checkpointPath)\n # overwrite args\n args = checkpoint['args']\n args.checkpointPath = checkpointPath\n\n # set Device to use\n device = torch.device(\"cuda:\"+str(device_ids[0]) if torch.cuda.is_available() else \"cpu\")\n\n # Create unique label\n today = datetime.now()\n # Get commit number \n # label = subprocess.check_output([\"git\", \"describe\", \"--always\"]).strip()\n #specific to MBL lab workstation\n label = subprocess.check_output([\"C:/Program Files/git/bin/git\", \"describe\", \"--always\"]).strip()\n\n comment = today.strftime('%Y_%m_%d__%H%M%S') + \"_\"+ str(args.useBias) +\"B_\"+str(args.biasVal)+\"bias_\" + str(nImgs) + \\\n \"I_\"+ str(args.batchSize)+\"BS_\"+str(args.useSkipCon)+\"Sk_\" + str(args.fovInput) + \"FOV_\" + str(args.neighShape) + \"nT_\" \\\n + str(args.ths) + \"ths_\" + str(label.decode(\"utf-8\") ) + \"_commit__\" + args.outputPrefix\n\n # Create output folder\n save_folder = args.outputPath + \"/\" + comment\n # If asked to continue a training, save in the same folder\n if args.checkpointPath is not None:\n save_folder = os.path.split(args.checkpointPath)[0]\n print(save_folder)\n\n # Create summary writer to log stuff\n writer = SummaryWriter(log_dir=save_folder)\n writer.add_text('Description',comment,0)\n writer.flush()\n\n\n\n\n # Load dataset\n all_data = Dataset(args.datasetPath, args.randomSeed, \\\n fov=args.fovInput, neighShape=args.neighShape, img_indices=args.imagesToUse, get_full_imgs=False, center_region=None)\n # Split validation and testing\n train_size = int((1 - args.validationSplit) * len(all_data))\n test_size = len(all_data) - train_size\n train_dataset, test_dataset = torch.utils.data.random_split(all_data, [train_size, test_size])\n # Create data loaders\n train_dataset = data.DataLoader(train_dataset, batch_size=args.batchSize,\n shuffle=True, num_workers=num_workers, pin_memory=True)\n test_dataset = data.DataLoader(test_dataset, batch_size=args.batchSize,\n shuffle=True, num_workers=num_workers, pin_memory=True)\n\n validate_every = np.round(len(train_dataset)*args.valEvery)\n\n # Get Dataset information\n nDepths = all_data.get_n_depths()\n volShape, LFshape = all_data.__shape__()\n LFshape = LFshape[0:4]\n lateralTile = int(math.sqrt(nDepths))\n # Find normalization values\n maxInputTrain, maxVolumeTrain = all_data.get_max()\n maxInputTest, maxVolumeTest = all_data.get_max()\n\n # Create network\n net = LFMNet(nDepths, args.useBias, args.useSkipCon, LFshape, LFfov=args.fovInput, use_small_unet=args.useShallowUnet).to(device)\n optimizer = optim.Adam(net.parameters(), lr=args.learningRate)\n lossFunction = nn.L1Loss()\n # Create SSIM criteria\n ssim = SSIM()\n ssim.eval()\n\n # Init bias and weights if needed\n if args.useBias:\n def bias_init(m):\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv3d):\n if m.bias is not None:\n nn.init.constant_(m.bias.data, args.biasVal)\n nn.init.kaiming_normal_(m.weight)\n if isinstance(m, nn.ConvTranspose2d):\n nn.init.constant_(m.bias.data, args.biasVal)\n nn.init.kaiming_normal_(m.weight)\n net.apply(bias_init)\n\n # Load network from checkpoint\n if args.checkpointPath is not None:\n net.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n epochStart = checkpoint['epoch']\n epochs = args.epochs + epochStart\n train_loss = checkpoint['loss']\n\n\n # Start distributed data parallel, as it's faster than DataParallel\n if torch.cuda.device_count() > 1:\n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\n os.environ['MASTER_ADDR'] = 'localhost'\n os.environ['MASTER_PORT'] = '1234'+str(device_ids[0])\n torch.distributed.init_process_group(backend=\"nccl\", rank=0, world_size=1)\n\n # Move network to distributed data parallel\n net = nn.parallel.DistributedDataParallel(net, device_ids=args.GPUs, output_device=args.GPUs[0]).to(device)\n\n\n # timers\n start = torch.cuda.Event(enable_timing=True)\n end = torch.cuda.Event(enable_timing=True)\n global_it_counter = 0\n # define indices to grab for tensorboard visualization\n indices_to_show = torch.randperm(test_size)[0:8]\n # Init arrays to store losses\n train_losses, test_losses = [], []\n test_loss = 0\n epochStart = 0\n\n # Start training\n for epoch in range(epochStart, args.epochs):\n net.train()\n torch.set_grad_enabled(True)\n torch.cuda.empty_cache()\n train_loss = 0\n print('Training')\n global_it_counter = 0\n\n for nBatch,(inputs,labels) in enumerate(train_dataset):\n # compute current iteration\n curr_it = epoch*len(train_dataset) + nBatch\n # start timer\n start.record()\n print('ep: ' + str(epoch) + ' ' + str(nBatch+1) + '/' + str(len(train_dataset)) + ' currIt: ' + str(curr_it))\n\n optimizer.zero_grad() \n # load data to gpu and normalize from 0 to 1\n inputGPU = inputs.float().to(device) / maxInputTest\n outputsGT = labels.float().to(device) / maxVolumeTrain\n # Threshold GT to get rid of autofluorescence\n if args.ths!=0:\n outputsGT = imadjust(outputsGT, args.ths,outputsGT.max(), outputsGT.min(), outputsGT.max())\n # Predict\n outputsVol = net(inputGPU)\n loss = lossFunction(outputsGT,outputsVol)\n loss.backward()\n train_loss += loss.item() / nDepths\n optimizer.step()\n\n global_it_counter += inputs.shape[0]\n # Record training time\n end.record()\n torch.cuda.synchronize()\n end_time = start.elapsed_time(end)\n # Compute time per sample\n elapsed_time = end_time/inputs.shape[0]\n\n # Check if validation is required\n if nBatch%validate_every==0:\n print(comment)\n # Write training images to tensorboard\n lastBatchSize = min(outputsGT.shape[0],4)\n gridOut2 = torch.cat((outputsGT[0:lastBatchSize, :, :, :, :].sum(2).cpu().data.detach(), outputsVol[0:lastBatchSize, :, :, :, :].sum(2).cpu().data.detach()), dim=0)\n gridOut2 = tv.utils.make_grid(gridOut2, normalize=True, scale_each=False)\n # Select some images in the batch for showing\n indices_to_display = torch.randperm(inputGPU.shape[0])[0:4]\n outputsGT = F.interpolate(outputsGT[indices_to_display, :, :, :, :],[LFshape[0]*2,LFshape[1]*2,volShape[2]])\n outputsVol = F.interpolate(outputsVol[indices_to_display, :, :, :, :],[LFshape[0]*2,LFshape[1]*2,volShape[2]])\n inputGPU = inputGPU[indices_to_display,:,:,:,:,:]\n currPred = convert3Dto2DTiles(outputsVol, [lateralTile, lateralTile])\n currGT = convert3Dto2DTiles(outputsGT, [lateralTile, lateralTile])\n inputGrid = LF2Spatial(inputGPU, inputGPU.shape[2:])\n gridPred = tv.utils.make_grid(currPred,normalize=True, scale_each=False)\n gridGT = tv.utils.make_grid(currGT,normalize=True, scale_each=False)\n gridInput = tv.utils.make_grid(inputGrid,normalize=True, scale_each=False)\n gt = outputsGT[0,:,:,:,:].sum(3).repeat(3,1,1)\n gt /= gt.max()\n # Write to tensorboard\n writer.add_image('z_proj_train',gt,curr_it)\n writer.add_image('images_train_YZ_projection', gridOut2, curr_it) \n writer.add_image('outputRGB_train', gridPred, curr_it)\n writer.add_image('outputRGB_train_GT', gridGT, curr_it)\n writer.add_image('input_train', gridInput, curr_it)\n writer.add_scalar('Loss/train', train_loss/global_it_counter, curr_it)\n writer.add_scalar('times/train', elapsed_time, curr_it)\n \n # Restart\n train_loss = 0.0\n global_it_counter = 0\n\n\n print('Validating')\n net.eval()\n with torch.no_grad(): \n avg_psnr = 0\n avg_ssim = 0\n test_loss = 0\n start.record()\n for nBatch,(inputs,labels) in enumerate(test_dataset):\n inputGPU = inputs.float().to(device) / maxInputTest\n outputsGT = labels.float().to(device) / maxVolumeTrain\n # Threshold GT to get rid of autofluorescence\n outputsGT = imadjust(outputsGT,args.ths,outputsGT.max(), outputsGT.min(), outputsGT.max())\n outputsVol = net(inputGPU)\n loss = lossFunction(outputsGT,outputsVol)\n test_loss += loss.item() / nDepths\n # Compute PSNR\n lossMSE = nn.functional.mse_loss(outputsVol.to(device).detach(), outputsGT.to(device).detach())\n avg_psnr += 10 * math.log10(1 / lossMSE.item())\n # Compute ssim\n avg_ssim += ssim(outputsVol[:,0,:,:,:].permute(0,3,1,2).contiguous().detach().to(device), outputsGT[:,0,:,:,:].permute(0,3,1,2).contiguous().detach().to(device)).sum()\n end.record()\n torch.cuda.synchronize()\n\n \n lastBatchSize = min(outputsGT.shape[0],4)\n gridOut2 = torch.cat((outputsGT[0:lastBatchSize, :, :, :, :].sum(2).cpu().data.detach(), outputsVol[0:lastBatchSize, :, :, :, :].sum(2).cpu().data.detach()), dim=0)\n gridOut2 = tv.utils.make_grid(gridOut2, normalize=True, scale_each=False)\n # process some for showing\n indices_to_display = torch.randperm(inputGPU.shape[0])[0:lastBatchSize]\n outputsGT = F.interpolate(outputsGT[indices_to_display, :, :, :, :],[LFshape[0]*2,LFshape[1]*2,volShape[2]])\n outputsVol = F.interpolate(outputsVol[indices_to_display, :, :, :, :],[LFshape[0]*2,LFshape[1]*2,volShape[2]])\n inputGPU = inputGPU[indices_to_display,:,:,:,:,:]\n\n currPred = convert3Dto2DTiles(outputsVol, [lateralTile, lateralTile])\n currGT = convert3Dto2DTiles(outputsGT, [lateralTile, lateralTile])\n inputGrid = LF2Spatial(inputGPU, inputGPU.shape[2:])\n gridPred = tv.utils.make_grid(currPred,normalize=True, scale_each=False)\n gridGT = tv.utils.make_grid(currGT,normalize=True, scale_each=False)\n gridInput = tv.utils.make_grid(inputGrid,normalize=True, scale_each=False)\n # Write to tensorboard\n writer.add_image('images_val_YZ_projection', gridOut2, curr_it)\n writer.add_image('outputRGB_test', gridPred, curr_it)\n writer.add_image('outputRGB_test_GT', gridGT, curr_it)\n writer.add_image('input_test', gridInput, curr_it)\n writer.add_scalar('Loss/test', test_loss/len(test_dataset), curr_it)\n writer.add_scalar('Loss/psnr_val', avg_psnr/len(test_dataset), curr_it)\n writer.add_scalar('Loss/ssim_val', avg_ssim/len(test_dataset), curr_it) \n writer.add_scalar('LearningRate', args.learningRate, curr_it)\n writer.add_scalar('times/val', start.elapsed_time(end)/test_size, curr_it) \n net.train()\n \n if epoch%2==0:\n torch.save({\n 'epoch': epoch,\n 'args' : args,\n 'model_state_dict': net.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': train_loss, \n 'dataset_path': args.datasetPath},\n save_folder + '/model_'+str(epoch))\n\n print(f\"Epoch {epoch + 1}/{args.epochs}.. \"\n f\"Train loss: {train_loss / len(train_dataset):.7f}.. \"\n f\"Test loss: {test_loss / len(test_dataset):.7f}.. \")\n\nif __name__ == '__main__':\n main()" ]
[ [ "torch.cuda.get_device_properties", "torch.load", "torch.randperm", "torch.utils.data.DataLoader", "torch.set_grad_enabled", "torch.set_num_threads", "torch.utils.tensorboard.SummaryWriter", "torch.cuda.is_available", "torch.nn.functional.interpolate", "torch.no_grad", "torch.nn.L1Loss", "torch.cuda.synchronize", "torch.distributed.init_process_group", "torch.cuda.current_device", "torch.nn.init.constant_", "torch.cuda.Event", "torch.cuda.empty_cache", "torch.utils.data.random_split", "torch.cuda.device_count", "torch.nn.parallel.DistributedDataParallel", "numpy.random.seed", "torch.manual_seed", "torch.nn.init.kaiming_normal_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
reddit-conflicting-viewpoints/Reddit
[ "7d531f8cb826cf2d8196cf126d1e11dacc144155" ]
[ "pages/sas_key.py" ]
[ "import pandas as pd\nimport numpy as np\nfrom pandas.errors import ParserError\n\nAZURE_URL = \"https://redditconflict.blob.core.windows.net/redditconflict/results/\"\nSAS_KEY = \"?sp=r&st=2022-03-06T23:29:32Z&se=2023-01-01T07:29:32Z&sv=2020-08-04&sr=c&sig=%2FGHaPmXaEBpc36JBlYfMJLPE8dYr5cFcAsbPpjV16NA%3D\"\n\ndef get_csv_url(subreddit, sort_order='hot', results=True):\n if results:\n return AZURE_URL + subreddit + \"_\" + sort_order + \"_results.csv\" + SAS_KEY\n else:\n return AZURE_URL + subreddit + \"_\" + sort_order + \"_subreddit.csv\" + SAS_KEY\n\ndef get_df(subreddit, sort_order='hot'):\n dtypess ={\n 'post_index': int,\n 'post_id': str,\n 'post_title': str,\n 'flair': str,\n 'post_score': int,\n 'post_upvote_ratio': np.float64,\n 'subreddit': str,\n 'post_url': str,\n 'num_comments': int,\n 'post_body': str,\n 'post_created': int,\n 'post_body_word_token': str,\n 'post_body_tag': str,\n 'post_body_string': str,\n 'post_topics': str,\n 'post_sentiment': int,\n 'comment_index': pd.Int64Dtype(),\n 'comment_id': str,\n 'parent_id': str,\n 'comment': str,\n 'comment_score': pd.Int64Dtype(),\n 'comment_controversiality': pd.Int64Dtype(),\n 'comment_total_awards_received': pd.Int64Dtype(),\n 'comment_is_locked': pd.BooleanDtype(),\n 'comment_is_collapsed': pd.BooleanDtype(),\n 'comment_is_submitter': pd.BooleanDtype(),\n 'comment_created': pd.Int64Dtype(),\n 'comment_word_token': str,\n 'comment_tag': str,\n 'comment_body_string': str,\n 'comment_topics': str,\n 'comment_sentiment': pd.Int64Dtype(),\n 'comment_relevance': np.float64\n }\n try:\n df = pd.read_csv(get_csv_url(subreddit, sort_order))\n # df = pd.read_csv(get_csv_url(subreddit, sort_order), dtype=dtypess)\n except ParserError:\n df = pd.read_csv(get_csv_url(subreddit, sort_order), lineterminator='\\n')\n # df = pd.read_csv(get_csv_url(subreddit, sort_order), lineterminator='\\n', dtype=dtypess)\n return df\n\ndef get_df_description(subreddit, sort_order='hot'):\n try:\n df = pd.read_csv(get_csv_url(subreddit, sort_order, False))\n except ParserError:\n df = pd.read_csv(get_csv_url(subreddit, sort_order, False), lineterminator='\\n')\n return df\n" ]
[ [ "pandas.Int64Dtype", "pandas.BooleanDtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
mogwai/torch-audiomentations
[ "7d36c4b5970ca3c16482703fa8e8164ff63c20d0" ]
[ "scripts/demo.py" ]
[ "import os\nimport random\nfrom pathlib import Path\n\nimport librosa\nimport numpy as np\nimport time\nimport torch\nfrom scipy.io import wavfile\n\nfrom torch_audiomentations import (\n PolarityInversion,\n Gain,\n PeakNormalization,\n Compose,\n Shift,\n)\n\nSAMPLE_RATE = 44100\n\nBASE_DIR = Path(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))\nSCRIPTS_DIR = BASE_DIR / \"scripts\"\nTEST_FIXTURES_DIR = BASE_DIR / \"test_fixtures\"\n\n\nclass timer(object):\n \"\"\"\n timer: A class used to measure the execution time of a block of code that is\n inside a \"with\" statement.\n\n Example:\n\n ```\n with timer(\"Count to 500000\"):\n x = 0\n for i in range(500000):\n x += 1\n print(x)\n ```\n\n Will output:\n 500000\n Count to 500000: 0.04 s\n\n Warning: The time resolution used here may be limited to 1 ms\n \"\"\"\n\n def __init__(self, description=\"Execution time\", verbose=False):\n self.description = description\n self.verbose = verbose\n self.execution_time = None\n\n def __enter__(self):\n self.t = time.time()\n return self\n\n def __exit__(self, type, value, traceback):\n self.execution_time = time.time() - self.t\n if self.verbose:\n print(\"{}: {:.3f} s\".format(self.description, self.execution_time))\n\n\nif __name__ == \"__main__\":\n \"\"\"\n For each transformation, apply it to an example sound and write the transformed sounds to\n an output folder. Also crudely measure and print execution time.\n \"\"\"\n output_dir = os.path.join(SCRIPTS_DIR, \"output\")\n os.makedirs(output_dir, exist_ok=True)\n\n np.random.seed(42)\n random.seed(42)\n\n filenames = [\"perfect-alley1.ogg\", \"perfect-alley2.ogg\"]\n samples1, _ = librosa.load(\n os.path.join(TEST_FIXTURES_DIR, filenames[0]), sr=SAMPLE_RATE, mono=False\n )\n samples2, _ = librosa.load(\n os.path.join(TEST_FIXTURES_DIR, filenames[1]), sr=SAMPLE_RATE, mono=False\n )\n samples = np.stack((samples1, samples2), axis=0)\n samples = torch.from_numpy(samples)\n\n modes = [\"per_batch\", \"per_example\", \"per_channel\"]\n for mode in modes:\n transforms = [\n {\n \"instance\": Compose(\n transforms=[\n Gain(\n min_gain_in_db=-18.0, max_gain_in_db=-16.0, mode=mode, p=1.0\n ),\n PeakNormalization(mode=mode, p=1.0),\n ],\n shuffle=True,\n ),\n \"name\": \"Shuffled Compose with Gain and PeakNormalization\",\n \"num_runs\": 5,\n },\n {\n \"instance\": Compose(\n transforms=[\n Gain(\n min_gain_in_db=-18.0, max_gain_in_db=-16.0, mode=mode, p=0.5\n ),\n PolarityInversion(mode=mode, p=0.5),\n ],\n shuffle=True,\n ),\n \"name\": \"Compose with Gain and PolarityInversion\",\n \"num_runs\": 5,\n },\n {\"instance\": Gain(mode=mode, p=1.0), \"num_runs\": 5},\n {\"instance\": PolarityInversion(mode=mode, p=1.0), \"num_runs\": 1},\n {\"instance\": PeakNormalization(mode=mode, p=1.0), \"num_runs\": 1},\n {\"instance\": Shift(mode=mode, p=1.0), \"num_runs\": 5},\n ]\n\n execution_times = {}\n\n for transform in transforms:\n augmenter = transform[\"instance\"]\n transform_name = (\n transform.get(\"name\")\n if transform.get(\"name\")\n else transform[\"instance\"].__class__.__name__\n )\n execution_times[transform_name] = []\n for i in range(transform[\"num_runs\"]):\n with timer() as t:\n augmented_samples = augmenter(\n samples=samples, sample_rate=SAMPLE_RATE\n ).numpy()\n execution_times[transform_name].append(t.execution_time)\n for example_idx, original_filename in enumerate(filenames):\n output_file_path = os.path.join(\n output_dir,\n \"{}_{}_{:03d}_{}.wav\".format(\n transform_name, mode, i, Path(original_filename).stem\n ),\n )\n wavfile.write(\n output_file_path,\n rate=SAMPLE_RATE,\n data=augmented_samples[example_idx].transpose(),\n )\n\n for transform_name in execution_times:\n if len(execution_times[transform_name]) > 1:\n print(\n \"{:<52} {:.3f} s (std: {:.3f} s)\".format(\n transform_name,\n np.mean(execution_times[transform_name]),\n np.std(execution_times[transform_name]),\n )\n )\n else:\n print(\n \"{:<52} {:.3f} s\".format(\n transform_name, np.mean(execution_times[transform_name])\n )\n )\n" ]
[ [ "numpy.random.seed", "torch.from_numpy", "numpy.stack", "numpy.std", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ashishd/OpenSfM
[ "f66e51df7200fac676d8487499ebaf40e3de3e88" ]
[ "opensfm/test/test_datastructures.py" ]
[ "import copy\nimport random\n\nimport numpy as np\nimport pytest\nfrom opensfm import pygeometry\nfrom opensfm import pymap\nfrom opensfm import types\nfrom opensfm.test.utils import (\n assert_metadata_equal,\n assert_cameras_equal,\n assert_shots_equal,\n)\n\n\ndef _create_reconstruction(\n n_cameras: int=0,\n n_shots_cam=None,\n n_pano_shots_cam=None,\n n_points: int=0,\n dist_to_shots: bool=False,\n dist_to_pano_shots: bool=False,\n) -> types.Reconstruction:\n \"\"\"Creates a reconstruction with n_cameras random cameras and\n shots, where n_shots_cam is a dictionary, containing the\n camera_id and the number of shots.\n\n Example:\n shot_cams = {\"0\": 50, \"1\": 30}\n _create_reconstruction(2, shot_cams)\n\n Will create a reconstruction with two cameras and 80 shots,\n 50 are associated with cam \"0\" and 30 with cam \"1\".\n\n n_points_in_shots is the number of points to create.\n If dist_to_shots, then observations are created and randomly\n distributed to all shots. We pick with the repeat option, thus\n if we have three shots the distribution could be\n something like: [1,2,2], [0,1,2]. We avoid things like [3,3,3]\n \"\"\"\n if n_shots_cam is None:\n n_shots_cam = {}\n if n_pano_shots_cam is None:\n n_pano_shots_cam = {}\n\n rec = types.Reconstruction()\n if n_cameras > 0:\n for i in range(n_cameras):\n focal, k1, k2 = np.random.rand(3)\n cam = pygeometry.Camera.create_perspective(focal, k1, k2)\n cam.id = str(i)\n rec.add_camera(cam)\n\n shot_id = 0\n for cam_id, n_shots in n_shots_cam.items():\n for _ in range(n_shots):\n rec.create_shot(str(shot_id), cam_id)\n shot_id += 1\n\n shot_id = 0\n for cam_id, n_shots in n_pano_shots_cam.items():\n for _ in range(n_shots):\n rec.create_pano_shot(str(shot_id), cam_id)\n shot_id += 1\n\n if n_points > 0:\n for i in range(n_points):\n rec.create_point(str(i), np.random.rand(3))\n\n if dist_to_shots:\n n_shots = len(rec.shots)\n for pt in rec.points.values():\n choice = set(np.random.choice(n_shots, n_shots))\n if len(choice) > 1:\n for ch in choice:\n # create a new observation\n obs = pymap.Observation(100, 200, 0.5, 255, 0, 0, int(pt.id))\n shot = rec.shots[str(ch)]\n rec.add_observation(shot, pt, obs)\n # TODO: If required, we have to do the same for pano shots\n return rec\n\n\n\"\"\"\nCamera Tests\n\"\"\"\n\n\ndef test_create_cameras() -> None:\n n_cameras = 100\n rec = types.Reconstruction()\n\n for cam_id in range(0, n_cameras):\n focal, k1, k2 = np.random.rand(3)\n cam = pygeometry.Camera.create_perspective(focal, k1, k2)\n cam.id = str(cam_id)\n # create the camera within the reconstruction\n map_cam = rec.add_camera(cam)\n assert_cameras_equal(cam, map_cam)\n # Check that the cameras are different\n assert cam is not map_cam\n # Check the getters\n assert map_cam is rec.get_camera(str(cam_id))\n assert map_cam is rec.cameras[str(cam_id)]\n\n assert len(rec.cameras) == n_cameras\n\n\ndef test_camera_iterators() -> None:\n n_cameras = 100\n rec = _create_reconstruction(n_cameras)\n\n # Key iterator\n visited_cams = set()\n for cam_id in rec.cameras:\n visited_cams.add(cam_id)\n assert len(visited_cams) == n_cameras\n\n for idx in range(0, n_cameras):\n assert str(idx) in visited_cams\n\n # value iterator\n visited_cams = set()\n for cam in rec.cameras.values():\n visited_cams.add(cam.id)\n focal = np.random.rand(1)\n cam.focal = focal\n assert rec.cameras[cam.id].focal == focal\n assert cam is rec.cameras[cam.id]\n\n assert len(visited_cams) == n_cameras\n\n # item iterator\n for idx in range(0, n_cameras):\n assert str(idx) in visited_cams\n\n for cam_id, cam in rec.cameras.items():\n assert cam_id == cam.id\n focal = np.random.rand(1)\n cam.focal = focal\n assert rec.cameras[cam.id].focal == focal\n assert cam is rec.cameras[cam.id]\n\n\ndef _check_common_cam_properties(cam1, cam2) -> None:\n assert cam1.id == cam2.id\n assert cam1.width == cam2.width\n assert cam1.height == cam2.height\n assert cam1.projection_type == cam2.projection_type\n\n\ndef test_brown_camera() -> None:\n rec = types.Reconstruction()\n focal_x = 0.6\n focal_y = 0.7\n c_x = 0.1\n c_y = -0.05\n k1 = -0.1\n k2 = 0.01\n p1 = 0.001\n p2 = 0.002\n k3 = 0.01\n cam_cpp = pygeometry.Camera.create_brown(\n focal_x, focal_y / focal_x, np.array([c_x, c_y]), np.array([k1, k2, k3, p1, p2])\n )\n cam_cpp.width = 800\n cam_cpp.height = 600\n cam_cpp.id = \"cam\"\n c = rec.add_camera(cam_cpp)\n _check_common_cam_properties(cam_cpp, c)\n\n # The specific parameters\n assert cam_cpp.k1 == c.k1 and cam_cpp.k2 == c.k2 and cam_cpp.k3 == c.k3\n assert cam_cpp.p2 == c.p2 and cam_cpp.p1 == c.p1\n assert np.allclose(cam_cpp.principal_point, c.principal_point)\n assert len(c.distortion) == 5\n assert np.allclose(cam_cpp.distortion, c.distortion)\n assert cam_cpp.focal == c.focal\n assert cam_cpp.aspect_ratio == c.aspect_ratio\n\n\ndef test_fisheye_camera() -> None:\n rec = types.Reconstruction()\n focal = 0.6\n k1 = -0.1\n k2 = 0.01\n cam_cpp = pygeometry.Camera.create_fisheye(focal, k1, k2)\n cam_cpp.width = 800\n cam_cpp.height = 600\n cam_cpp.id = \"cam\"\n c = rec.add_camera(cam_cpp)\n _check_common_cam_properties(cam_cpp, c)\n\n # The specific parameters\n assert cam_cpp.k1 == c.k1 and cam_cpp.k2 == c.k2\n assert len(c.distortion) == 2\n assert np.allclose(cam_cpp.distortion, c.distortion)\n assert cam_cpp.focal == c.focal\n\n\ndef test_fisheye_opencv_camera() -> None:\n rec = types.Reconstruction()\n focal = 0.6\n aspect_ratio = 0.7\n ppoint = np.array([0.51, 0.52])\n dist = np.array([-0.1, 0.09, 0.08, 0.01])\n cam_cpp = pygeometry.Camera.create_fisheye_opencv(focal, aspect_ratio, ppoint, dist)\n cam_cpp.width = 800\n cam_cpp.height = 600\n cam_cpp.id = \"cam\"\n c = rec.add_camera(cam_cpp)\n _check_common_cam_properties(cam_cpp, c)\n\n # The specific parameters\n assert cam_cpp.k1 == c.k1 and cam_cpp.k2 == c.k2\n assert cam_cpp.k3 == c.k3 and cam_cpp.k4 == c.k4\n assert len(dist) == len(c.distortion)\n assert np.allclose(cam_cpp.distortion, c.distortion)\n assert cam_cpp.focal == c.focal\n assert cam_cpp.aspect_ratio == c.aspect_ratio\n\n\ndef test_fisheye62_camera() -> None:\n rec = types.Reconstruction()\n focal = 0.6\n aspect_ratio = 0.7\n ppoint = np.array([0.51, 0.52])\n dist = np.array([-0.1, 0.09, 0.08, 0.01, 0.02, 0.05, 0.1, 0.2]) # [k1-k6, p1, p2]\n cam_cpp = pygeometry.Camera.create_fisheye62(focal, aspect_ratio, ppoint, dist)\n cam_cpp.width = 800\n cam_cpp.height = 600\n cam_cpp.id = \"cam\"\n c = rec.add_camera(cam_cpp)\n _check_common_cam_properties(cam_cpp, c)\n\n # The specific parameters\n assert cam_cpp.k1 == c.k1 and cam_cpp.k2 == c.k2\n assert cam_cpp.k3 == c.k3 and cam_cpp.k4 == c.k4\n assert cam_cpp.k5 == c.k5 and cam_cpp.k6 == c.k6\n assert cam_cpp.p1 == c.p1 and cam_cpp.p2 == c.p2\n assert len(dist) == len(c.distortion)\n assert np.allclose(cam_cpp.distortion, c.distortion)\n assert cam_cpp.focal == c.focal\n assert cam_cpp.aspect_ratio == c.aspect_ratio\n\n\ndef test_fisheye624_camera() -> None:\n rec = types.Reconstruction()\n focal = 0.6\n aspect_ratio = 0.7\n ppoint = np.array([0.51, 0.52])\n dist = np.array([-0.1, 0.09, 0.08, 0.01, 0.02, 0.05, 0.1, 0.2, 0.01, -0.003, 0.005, -0.007]) # [k1-k6, p1, p2, s0-s3]\n cam_cpp = pygeometry.Camera.create_fisheye624(focal, aspect_ratio, ppoint, dist)\n cam_cpp.width = 800\n cam_cpp.height = 600\n cam_cpp.id = \"cam\"\n c = rec.add_camera(cam_cpp)\n _check_common_cam_properties(cam_cpp, c)\n\n # The specific parameters\n assert cam_cpp.k1 == c.k1 and cam_cpp.k2 == c.k2\n assert cam_cpp.k3 == c.k3 and cam_cpp.k4 == c.k4\n assert cam_cpp.k5 == c.k5 and cam_cpp.k6 == c.k6\n assert cam_cpp.p1 == c.p1 and cam_cpp.p2 == c.p2\n assert cam_cpp.s0 == c.s0 and cam_cpp.s1 == c.s1\n assert cam_cpp.s2 == c.s2 and cam_cpp.s3 == c.s3\n assert len(dist) == len(c.distortion)\n assert np.allclose(cam_cpp.distortion, c.distortion)\n assert cam_cpp.focal == c.focal\n assert cam_cpp.aspect_ratio == c.aspect_ratio\n\n\ndef test_dual_camera() -> None:\n rec = types.Reconstruction()\n focal = 0.6\n k1 = -0.1\n k2 = 0.01\n transition = 0.5\n cam_cpp = pygeometry.Camera.create_dual(transition, focal, k1, k2)\n cam_cpp.width = 800\n cam_cpp.height = 600\n cam_cpp.id = \"cam\"\n c = rec.add_camera(cam_cpp)\n _check_common_cam_properties(cam_cpp, c)\n\n # The specific parameters\n assert cam_cpp.k1 == c.k1 and cam_cpp.k2 == c.k2\n assert len(c.distortion) == 2\n assert np.allclose(cam_cpp.distortion, c.distortion)\n assert cam_cpp.focal == c.focal\n assert cam_cpp.transition == c.transition\n\n\ndef test_perspective_camera() -> None:\n rec = types.Reconstruction()\n focal = 0.6\n k1 = -0.1\n k2 = 0.01\n cam_cpp = pygeometry.Camera.create_perspective(focal, k1, k2)\n cam_cpp.width = 800\n cam_cpp.height = 600\n cam_cpp.id = \"cam\"\n c = rec.add_camera(cam_cpp)\n _check_common_cam_properties(cam_cpp, c)\n\n # The specific parameters\n assert cam_cpp.k1 == c.k1 and cam_cpp.k2 == c.k2\n assert len(c.distortion) == 2\n assert np.allclose(cam_cpp.distortion, c.distortion)\n assert cam_cpp.focal == c.focal\n\n\ndef test_spherical_camera() -> None:\n rec = types.Reconstruction()\n cam_cpp = pygeometry.Camera.create_spherical()\n cam_cpp.width = 800\n cam_cpp.height = 600\n cam_cpp.id = \"cam\"\n c = rec.add_camera(cam_cpp)\n _check_common_cam_properties(cam_cpp, c)\n\n\n# Test Metadata\ndef _help_measurement_test(measurement, attr, val) -> None:\n # Test metadata's has_value properties\n assert getattr(measurement, attr).has_value is False\n getattr(measurement, attr).value = val\n if np.shape(val) == (): # just a value\n assert getattr(measurement, attr).value == val\n else:\n assert np.allclose(getattr(measurement, attr).value, val)\n # Test metadata's has_value properties!\n assert getattr(measurement, attr).has_value is True\n # Test reset\n getattr(measurement, attr).reset()\n assert getattr(measurement, attr).has_value is False\n\n\ndef test_shot_measurement_setter_and_getter() -> None:\n m1 = pymap.ShotMeasurements()\n # Test basic functionality\n _help_measurement_test(m1, \"capture_time\", np.random.rand(1))\n _help_measurement_test(m1, \"gps_position\", np.random.rand(3))\n _help_measurement_test(m1, \"gps_accuracy\", np.random.rand(1))\n _help_measurement_test(m1, \"compass_accuracy\", np.random.rand(1))\n _help_measurement_test(m1, \"compass_angle\", np.random.rand(1))\n _help_measurement_test(m1, \"opk_accuracy\", np.random.rand(1))\n _help_measurement_test(m1, \"opk_angles\", np.random.rand(3))\n _help_measurement_test(m1, \"accelerometer\", np.random.rand(3))\n _help_measurement_test(m1, \"orientation\", random.randint(0, 100))\n _help_measurement_test(m1, \"sequence_key\", \"key_test\")\n\n\ndef _helper_populate_metadata(m) -> None:\n m.capture_time.value = np.random.rand(1)\n m.gps_position.value = np.random.rand(3)\n m.gps_accuracy.value = np.random.rand(1)\n m.compass_accuracy.value = np.random.rand(1)\n m.compass_angle.value = np.random.rand(1)\n m.opk_accuracy.value = np.random.rand(1)\n m.opk_angles.value = np.random.rand(3)\n m.accelerometer.value = np.random.rand(3)\n m.orientation.value = random.randint(0, 100)\n m.sequence_key.value = \"sequence_key\"\n\n\ndef test_shot_measurement_set() -> None:\n m1 = pymap.ShotMeasurements()\n _helper_populate_metadata(m1)\n m2 = pymap.ShotMeasurements()\n # Test setting metadata with other metadata\n m2.set(m1)\n # Check that m2 has the same values as m1\n assert_metadata_equal(m1, m2)\n m3 = pymap.ShotMeasurements()\n m1.set(m3)\n # Now m1 should be completely reset\n assert_metadata_equal(m1, m3)\n\n\ndef test_shot_create() -> None:\n # Given some created shot\n rec = _create_reconstruction(2)\n shot1 = rec.create_shot(\"shot0\", \"0\")\n\n # When getting it, it should have some properties\n assert shot1.id == \"shot0\"\n assert shot1.camera.id == \"0\"\n assert len(rec.shots) == 1\n\n\ndef test_shot_create_existing() -> None:\n # Given some created shot\n rec = _create_reconstruction(2)\n rec.create_shot(\"shot0\", \"0\")\n\n # When re-adding the same shot, it should throw\n with pytest.raises(RuntimeError):\n rec.create_shot(\"shot0\", \"0\")\n rec.create_shot(\"shot0\", \"1\")\n\n\ndef test_shot_create_more() -> None:\n # Given some created shot\n rec = _create_reconstruction(2)\n rec.create_shot(\"shot0\", \"0\")\n\n # When we create more new shots\n n_shots = 10\n for i in range(1, n_shots):\n rec.create_shot(\"shot\" + str(i), \"0\")\n\n # Then we should have more\n assert len(rec.shots) == n_shots\n\n\ndef test_shot_delete_non_existing() -> None:\n # Given some created reconstruction\n rec = _create_reconstruction(2)\n rec.create_shot(\"shot0\", \"0\")\n\n # When deleting non-existing shot\n # It should throw\n with pytest.raises(RuntimeError):\n rec.remove_shot(\"abcde\")\n\n\ndef test_shot_delete_existing() -> None:\n # Given some created reconstruction\n n_shots = 10\n rec = _create_reconstruction(1, {\"0\": n_shots})\n\n # When deleting existing shot\n del_shots = np.random.choice(n_shots, int(n_shots / 2), replace=False)\n for i in del_shots:\n rec.remove_shot(str(i))\n\n # Then we should have the expected count of shots remaining\n assert len(rec.shots) == n_shots - len(del_shots)\n\n\ndef test_shot_get() -> None:\n # Given some created shot\n rec = _create_reconstruction(1)\n shot_id = \"shot0\"\n shot1 = rec.create_shot(shot_id, \"0\")\n\n # We should get it\n assert shot1 is rec.get_shot(shot_id)\n assert shot1 is rec.shots[shot_id]\n\n\ndef test_shot_pose_set() -> None:\n # Given some created shot\n rec = _create_reconstruction(1)\n shot_id = \"shot0\"\n shot = rec.create_shot(shot_id, \"0\")\n\n origin = np.array([1, 2, 3])\n shot.pose.set_origin(origin)\n assert np.allclose(origin, shot.pose.get_origin())\n\n\ndef test_shot_get_non_existing() -> None:\n # Given some created shot\n rec = _create_reconstruction(1)\n shot_id = \"shot0\"\n shot1 = rec.create_shot(shot_id, \"0\")\n\n # When getting a non_existing one, it should throw\n with pytest.raises(RuntimeError):\n assert shot1 is rec.get_shot(\"toto\")\n with pytest.raises(RuntimeError):\n assert shot1 is rec.shots[\"toto\"]\n\n\ndef test_pano_shot_get() -> None:\n # Given some created pano shot\n rec = _create_reconstruction(1)\n shot_id = \"shot0\"\n shot1 = rec.create_pano_shot(shot_id, \"0\")\n\n # We should get it\n assert shot1 is rec.pano_shots[shot_id]\n assert shot1 is rec.get_pano_shot(shot_id)\n\n\ndef test_pano_shot_get_non_existing() -> None:\n # Given some created pano shot\n rec = _create_reconstruction(1)\n shot_id = \"shot0\"\n shot1 = rec.create_shot(shot_id, \"0\")\n\n # When getting a non_existing one, it should throw\n with pytest.raises(RuntimeError):\n assert shot1 is rec.get_shot(\"toto\")\n with pytest.raises(RuntimeError):\n assert shot1 is rec.shots[\"toto\"]\n\n\ndef test_pano_shot_create() -> None:\n # Given some created shot\n rec = _create_reconstruction(2)\n shot1 = rec.create_pano_shot(\"shot0\", \"0\")\n\n # When getting it, it should have some properties\n assert shot1.id == \"shot0\"\n assert shot1.camera.id == \"0\"\n assert len(rec.pano_shots) == 1\n\n\ndef test_pano_shot_create_existing() -> None:\n # Given some created pano shot\n rec = _create_reconstruction(2)\n rec.create_pano_shot(\"shot0\", \"0\")\n\n n_shots = 10\n # When re-adding the same pano shot\n for _ in range(n_shots):\n # It should throw\n with pytest.raises(RuntimeError):\n rec.create_pano_shot(\"shot0\", \"0\")\n rec.create_pano_shot(\"shot0\", \"1\")\n\n\ndef test_pano_shot_create_more() -> None:\n # Given some created pano shot\n rec = _create_reconstruction(2)\n rec.create_pano_shot(\"shot0\", \"0\")\n\n # When we create more new pano shots\n n_shots = 10\n for i in range(1, n_shots):\n rec.create_pano_shot(\"shot\" + str(i), \"0\")\n\n # Then we should have more\n assert len(rec.pano_shots) == n_shots\n\n\ndef test_pano_shot_delete_non_existing() -> None:\n # Given some created reconstruction\n rec = _create_reconstruction(2)\n rec.create_pano_shot(\"shot0\", \"0\")\n\n # When deleting non-existing shot\n # It should throw\n with pytest.raises(RuntimeError):\n rec.remove_pano_shot(\"abcde\")\n\n\ndef test_pano_shot_delete_existing() -> None:\n # Given some created reconstruction\n n_shots = 10\n rec = _create_reconstruction(2)\n rec = _create_reconstruction(1, n_pano_shots_cam={\"0\": n_shots})\n\n # When deleting existing pano shot\n n_shots = 10\n del_shots = np.random.choice(n_shots, int(n_shots / 2), replace=False)\n for i in del_shots:\n rec.remove_pano_shot(str(i))\n\n # Then we should have the expected count of shots remaining\n assert len(rec.pano_shots) == n_shots - len(del_shots)\n\n\ndef test_shot_merge_cc() -> None:\n # Given some created reconstruction\n rec = _create_reconstruction(1, {\"0\": 2})\n map_shot1 = rec.shots[\"0\"]\n\n # When setting some merge_cc\n map_shot1.merge_cc = 10\n\n # Then we should have it set\n assert map_shot1.merge_cc == 10\n\n\ndef test_shot_covariance() -> None:\n # Given some created reconstruction\n rec = _create_reconstruction(1, {\"0\": 2})\n map_shot1 = rec.shots[\"0\"]\n\n # When setting some covariance\n map_shot1.covariance = np.diag([1, 2, 3])\n\n # Then we should have it set\n assert np.allclose(map_shot1.covariance, np.diag([1, 2, 3]))\n\n\ndef test_shot_covariance_different() -> None:\n # Given some created reconstruction\n rec = _create_reconstruction(1, {\"0\": 2})\n map_shot1 = rec.shots[\"0\"]\n map_shot2 = rec.shots[\"1\"]\n\n # When setting some covariance\n map_shot1.covariance = np.diag([1, 2, 3])\n map_shot2.covariance = np.diag([2, 2, 2])\n\n # Then they are different objects\n assert map_shot2.covariance is not map_shot1.covariance\n\n\ndef test_shot_create_remove_create() -> None:\n # Given some created reconstruction\n n_shots = 10\n rec = _create_reconstruction(1, {\"0\": n_shots})\n\n # When we remove one shot\n rec.remove_shot(\"0\")\n\n # Then we have one shot less\n assert len(rec.shots) == n_shots - 1\n\n # When we re-create it\n rec.create_shot(\"0\", \"0\")\n\n # Then we have the initial count\n assert len(rec.shots) == n_shots\n\n\ndef test_pano_shot_create_remove_create() -> None:\n # Given some created reconstruction\n n_shots = 10\n rec = _create_reconstruction(1, n_pano_shots_cam={\"0\": n_shots})\n\n # When we remove one pano shot\n rec.remove_pano_shot(\"0\")\n\n # Then we have one pano shot less\n assert len(rec.pano_shots) == n_shots - 1\n\n # When we re-create it\n rec.create_pano_shot(\"0\", \"0\")\n\n # Then we have the initial count\n assert len(rec.pano_shots) == n_shots\n\n\ndef _create_rig_camera():\n rig_camera = pymap.RigCamera()\n rig_camera.id = \"rig_camera\"\n rig_camera.pose = pygeometry.Pose(\n np.array([0.1, 0.2, 0.3]), np.array([0.1, 0.2, 0.3])\n )\n return rig_camera\n\n\ndef _create_rig_instance():\n rec = _create_reconstruction(1, {\"0\": 2})\n rig_camera = rec.add_rig_camera(_create_rig_camera())\n rig_instance = pymap.RigInstance(\"1\")\n shot = pymap.Shot(\n \"0\",\n pygeometry.Camera.create_spherical(),\n pygeometry.Pose(),\n )\n rig_instance.add_shot(rig_camera, shot)\n return rec, rig_instance, shot\n\n\ndef test_rig_camera_create() -> None:\n rec = _create_reconstruction(1, {\"0\": 2})\n rec.add_rig_camera(_create_rig_camera())\n\n # we should have default-per-camera rig and the created rig camera\n assert \"0\" in rec.rig_cameras.keys()\n assert \"rig_camera\" in rec.rig_cameras.keys()\n\n\ndef test_rig_instance() -> None:\n _, rig_instance, _ = _create_rig_instance()\n assert list(rig_instance.keys()) == [\"0\"]\n\n\ndef test_rig_instance_create_default() -> None:\n # one default rig instance per shot\n rec, rig_instance, _ = _create_rig_instance()\n\n assert len(rec.rig_instances) == 2\n assert dict(rec.rig_instances[\"0\"].camera_ids.items()) == {\"0\": \"0\"}\n assert list(rec.rig_instances[\"0\"].shots.keys()) == [\"0\"]\n assert dict(rec.rig_instances[\"1\"].camera_ids.items()) == {\"1\": \"0\"}\n assert list(rec.rig_instances[\"1\"].shots.keys()) == [\"1\"]\n\n\ndef test_rig_instance_create_add_existing() -> None:\n rec, rig_instance, _ = _create_rig_instance()\n with pytest.raises(RuntimeError):\n rec.add_rig_instance(rig_instance)\n\n\ndef test_rig_instance_remove_shot() -> None:\n rec, _, shot = _create_rig_instance()\n rec.remove_shot(shot.id)\n assert len(rec.rig_instances[\"0\"].shots) == 0\n\n\ndef test_rig_shot_modify_pose_raise() -> None:\n _, rig_instance, shot = _create_rig_instance()\n with pytest.raises(RuntimeError):\n shot.pose.set_origin(np.array([1, 2, 3]))\n\n\ndef test_rig_shot_modify_pose_succeed() -> None:\n _, rig_instance, shot = _create_rig_instance()\n next(iter(rig_instance.rig_cameras.values())).pose = pygeometry.Pose()\n shot.pose.set_origin(np.array([1, 2, 3]))\n\n\ndef test_rig_shot_set_pose() -> None:\n _, rig_instance, shot = _create_rig_instance()\n with pytest.raises(RuntimeError):\n shot.pose = pygeometry.Pose()\n\n\ndef test_add_shot_from_shot_correct_value() -> None:\n # Given some created reconstruction (rec) ...\n n_shots = 5\n rec = _create_reconstruction(1, n_shots_cam={\"0\": n_shots})\n shot1 = rec.shots[\"0\"]\n _helper_populate_metadata(shot1.metadata)\n\n # .. and given another one (new)\n rec_new = _create_reconstruction(1)\n\n # When adding 2 shot of rec to new\n rec_new.add_shot(rec.shots[\"0\"])\n rec_new.add_shot(rec.shots[\"1\"])\n\n # Then new has two shots ...\n assert len(rec_new.shots) == 2\n\n # ... and new's shots values should be the same as rec's ones'\n for k in rec_new.shots.keys():\n assert_shots_equal(rec.shots[k], rec_new.shots[k])\n\n\ndef test_shot_metadata_different() -> None:\n # Given some created reconstruction\n rec = _create_reconstruction(1, n_shots_cam={\"0\": 2})\n shot1 = rec.shots[\"0\"]\n shot2 = rec.shots[\"1\"]\n _helper_populate_metadata(shot1.metadata)\n\n # When getting their metdata object, they should be different\n assert shot1.metadata is not shot2.metadata\n\n\ndef test_shot_metadata_assign_equal() -> None:\n # Given some created reconstruction\n rec = _create_reconstruction(1, n_shots_cam={\"0\": 2})\n shot1 = rec.shots[\"0\"]\n shot2 = rec.shots[\"1\"]\n _helper_populate_metadata(shot1.metadata)\n\n # When assigning their metadata to be equal\n shot2.metadata = shot1.metadata\n\n # Their object are different ...\n assert shot1.metadata is not shot2.metadata\n\n # ... but their values are equal\n assert_metadata_equal(shot1.metadata, shot2.metadata)\n\n\ndef test_add_pano_shot_from_shot_correct_value() -> None:\n # Given some created reconstruction (rec) ...\n n_shots = 5\n rec = _create_reconstruction(1, n_pano_shots_cam={\"0\": n_shots})\n shot1 = rec.pano_shots[\"0\"]\n _helper_populate_metadata(shot1.metadata)\n\n # .. and given another one (new)\n rec_new = _create_reconstruction(1)\n\n # When adding 2 pano shot of rec to new\n rec_new.add_pano_shot(rec.pano_shots[\"0\"])\n rec_new.add_pano_shot(rec.pano_shots[\"1\"])\n\n # Then new's shots values should be the same as rec's ones'\n for k in rec_new.shots.keys():\n assert_shots_equal(rec.pano_shots[k], rec_new.pano_shots[k])\n\n\ndef test_single_point_create() -> None:\n # Given a created point\n rec = types.Reconstruction()\n pt = rec.create_point(\"0\")\n\n # It should be there\n assert pt.id == \"0\"\n assert len(rec.points) == 1\n\n\ndef test_single_point_get_existing() -> None:\n # Given a created point\n rec = types.Reconstruction()\n pt = rec.create_point(\"0\")\n\n # When we get it, we have it (!)\n assert pt == rec.points[\"0\"] and pt == rec.get_point(\"0\")\n\n\ndef test_single_point_get_non_existing() -> None:\n # Given a created point\n rec = types.Reconstruction()\n rec.create_point(\"0\")\n\n # When we get a non existing one\n with pytest.raises(RuntimeError):\n # It should throw\n rec.get_point(\"toto\")\n\n\ndef test_single_point_coordinates() -> None:\n # Given a created point\n rec = types.Reconstruction()\n pt = rec.create_point(\"0\")\n\n # When assiging coordinates\n coord = np.random.rand(3)\n pt.coordinates = coord\n\n # They should be set\n assert np.allclose(pt.coordinates, coord)\n\n\ndef test_single_point_color() -> None:\n # Given a created point\n rec = types.Reconstruction()\n pt = rec.create_point(\"0\")\n\n # When assiging color\n color = np.random.randint(low=0, high=255, size=(3,))\n pt.color = color\n\n # It should be set\n assert np.allclose(pt.color, color)\n\n\ndef test_point_add_from_point() -> None:\n # Given some created reconstruction (rec) ...\n rec = types.Reconstruction()\n\n # ... and some other one (rec2) with some point\n rec2 = types.Reconstruction()\n coord2 = np.random.rand(3)\n pt2 = rec2.create_point(\"1\", coord2)\n\n # When adding rec2 point to rec\n pt2_1 = rec.add_point(pt2)\n\n # Then rec should have it ...\n assert len(rec.points) == 1\n\n # ... as a different object\n assert pt2 is not pt2_1\n assert \"1\" == pt2_1.id\n\n # ... and with correct values\n assert pt2_1 == rec.points[\"1\"]\n assert np.allclose(pt2_1.coordinates, coord2)\n\n\ndef test_point_reproj_errors_assign() -> None:\n # Given some created point\n rec = _create_reconstruction(n_points=1)\n pt = rec.points[\"0\"]\n\n # When assigning reprojections errors\n reproj_errors = dict({\"shot1\": np.random.rand(2), \"shot2\": np.random.rand(2)})\n pt.reprojection_errors = reproj_errors\n\n # They should be correct\n for k in reproj_errors.keys():\n assert np.allclose(pt.reprojection_errors[k], reproj_errors[k])\n\n\ndef test_point_delete_non_existing() -> None:\n # Given some created points\n n_points = 100\n rec = _create_reconstruction(n_points=n_points)\n\n # When we delete a non-existing one\n with pytest.raises(RuntimeError):\n # It should throw\n rec.remove_point(\"abcdef\")\n\n\ndef test_point_delete_existing() -> None:\n # Given some created points\n n_points = 100\n rec = _create_reconstruction(n_points=n_points)\n\n # When we delete all of them\n del_list = list(rec.points.keys())\n for k in del_list:\n rec.remove_point(k)\n\n # Then there's none\n assert len(rec.points) == 0\n\n\ndef test_point_delete_existing_assign_empty() -> None:\n # Given some created points\n n_points = 100\n rec = _create_reconstruction(n_points=n_points)\n\n # When we delete all of them by assigning the empty dict\n rec.points = {}\n assert len(rec.points) == 0\n\n\ndef test_single_observation() -> None:\n # Given a 1-camera, 1-point reconstruction\n rec = _create_reconstruction(1, n_shots_cam={\"0\": 1}, n_points=1)\n\n # When we add an observation to it\n obs = pymap.Observation(100, 200, 0.5, 255, 0, 0, 100, 2, 5)\n rec.add_observation(\"0\", \"0\", obs)\n shot = rec.shots[\"0\"]\n pt = rec.points[\"0\"]\n\n # Then it has one observation ...\n observations = pt.get_observations()\n assert len(observations) == 1\n assert pt.number_of_observations() == 1\n\n # ... and the corresponding observation object\n obs = shot.get_landmark_observation(pt)\n assert obs is not None\n\n\ndef test_single_observation_delete() -> None:\n # Given a 1-camera, 1-point reconstruction and corresponding observation\n rec = _create_reconstruction(1, n_shots_cam={\"0\": 1}, n_points=1)\n obs = pymap.Observation(100, 200, 0.5, 255, 0, 0, 100)\n rec.add_observation(\"0\", \"0\", obs)\n shot = rec.shots[\"0\"]\n pt = rec.points[\"0\"]\n\n # When we remove it\n rec.remove_observation(shot.id, pt.id)\n\n # Then there's none\n observations = pt.get_observations()\n assert len(observations) == 0\n assert pt.number_of_observations() == 0\n\n\ndef test_many_observations_delete() -> None:\n # Given a map with 10 shots, 1000 landmarks ...\n m = pymap.Map()\n n_cams = 2\n n_shots = 10\n n_landmarks = 1000\n for cam_id in range(n_cams):\n cam = pygeometry.Camera.create_perspective(0.5, 0, 0)\n cam.id = \"cam\" + str(cam_id)\n m.create_camera(cam)\n m.create_rig_camera(pymap.RigCamera(pygeometry.Pose(), cam.id))\n\n for shot_id in range(n_shots):\n cam_id = \"cam\" + str(int(np.random.rand(1) * 10 % n_cams))\n shot_id = str(shot_id)\n m.create_rig_instance(shot_id)\n m.create_shot(shot_id, cam_id, cam_id, shot_id, pygeometry.Pose())\n\n for point_id in range(n_landmarks):\n m.create_landmark(str(point_id), np.random.rand(3))\n\n # ... and random connections (observations) between shots and points\n n_total_obs = 0\n for lm in m.get_landmarks().values():\n n_obs = 0\n for shot in m.get_shots().values():\n # create a new observation\n obs = pymap.Observation(100, 200, 0.5, 255, 0, 0, int(lm.id))\n m.add_observation(shot, lm, obs)\n n_obs += 1\n n_total_obs += 1\n\n # (we expect it to be created correctly)\n for lm in m.get_landmarks().values():\n n_total_obs -= lm.number_of_observations()\n assert n_total_obs == 0\n\n # and when we clear all the observations\n m.clear_observations_and_landmarks()\n\n\ndef test_clean_landmarks_with_min_observations() -> None:\n m = pymap.Map()\n n_cams = 2\n n_shots = 2\n n_landmarks = 10\n for cam_id in range(n_cams):\n cam = pygeometry.Camera.create_perspective(0.5, 0, 0)\n cam.id = \"cam\" + str(cam_id)\n m.create_camera(cam)\n m.create_rig_camera(pymap.RigCamera(pygeometry.Pose(), cam.id))\n\n for shot_id in range(n_shots):\n cam_id = \"cam\" + str(int(np.random.rand(1) * 10 % n_cams))\n m.create_rig_instance(str(shot_id))\n m.create_shot(str(shot_id), cam_id, cam_id, str(shot_id), pygeometry.Pose())\n\n for point_id in range(n_landmarks):\n m.create_landmark(str(point_id), np.random.rand(3))\n\n for point_id in range(int(n_landmarks / 2)):\n for shot in m.get_shots().values():\n # create a new observation\n obs = pymap.Observation(100, 200, 0.5, 255, 0, 0, point_id)\n m.add_observation(shot, m.get_landmark(str(point_id)), obs)\n\n for point_id in range(int(n_landmarks / 2), n_landmarks):\n shot = m.get_shot(\"0\")\n # create a new observation\n obs = pymap.Observation(100, 200, 0.5, 255, 0, 0, point_id)\n m.add_observation(shot, m.get_landmark(str(point_id)), obs)\n\n m.clean_landmarks_below_min_observations(n_shots)\n\n assert len(m.get_landmarks()) == int(n_landmarks / 2)\n m.clean_landmarks_below_min_observations(n_shots + 1)\n assert len(m.get_landmarks()) == 0\n\n\ndef test_camera_deepcopy() -> None:\n # Given a camera\n cam1 = pygeometry.Camera.create_perspective(0.5, 0, 0)\n\n # When we deepcopy it\n cam2 = copy.deepcopy(cam1)\n\n # Then it has the correct focal\n assert cam1.focal == cam2.focal\n\n\ndef test_camera_deepcopy_assign() -> None:\n # Given a camera\n cam1 = pygeometry.Camera.create_perspective(0.5, 0, 0)\n\n # When we deepcopy'n assign it\n cam2 = copy.deepcopy(cam1)\n cam2.focal = 0.7\n\n # Then it has a different value from the original\n assert cam1.focal != cam2.focal\n\n\ndef test_observation_shot_removal() -> None:\n # Given a reconstruction with 2 shots\n rec = _create_reconstruction(\n n_cameras=2, n_shots_cam={\"0\": 1, \"1\": 1}, n_points=200, dist_to_shots=True\n )\n\n # When removing one of them\n rec.remove_shot(\"0\")\n\n # All the points have only one at most observation each ...\n for p in rec.points.values():\n assert len(p.get_observations()) <= 1\n\n # ... and when removing the remaining one ...\n rec.remove_shot(\"1\")\n\n # Thers' none\n for p in rec.points.values():\n assert len(p.get_observations()) == 0\n\n\ndef test_rec_deepcopy() -> None:\n # Given a reconstruction with everything (shots, pano shots, metadata)\n rec = _create_reconstruction(\n n_cameras=2,\n n_shots_cam={\"0\": 50, \"1\": 40},\n n_pano_shots_cam={\"0\": 20, \"1\": 30},\n n_points=200,\n dist_to_shots=True,\n )\n for shot in rec.shots.values():\n _helper_populate_metadata(shot.metadata)\n for shot in rec.pano_shots.values():\n _helper_populate_metadata(shot.metadata)\n\n # When we deep-copy it\n # pyre-fixme[6]: For 2nd param expected `Optional[Dict[int, typing.Any]]` but\n # got `Dict[str, bool]`.\n rec2 = copy.deepcopy(rec, {\"copy_observations\": True})\n\n # It has the expected count of data\n assert len(rec2.cameras) == 2\n assert len(rec2.shots) == 90\n assert len(rec2.pano_shots) == 50\n assert len(rec2.points) == 200\n\n # Cameras are different objects of same value\n for k in rec.cameras:\n cam, cam_cpy = rec.cameras[k], rec2.cameras[k]\n assert cam != cam_cpy\n assert_cameras_equal(cam, cam_cpy)\n\n # Shots are different objects of same value\n for shot_id in rec2.shots.keys():\n shot1, shot2 = rec.shots[shot_id], rec2.shots[shot_id]\n assert shot1 is not shot2\n assert_shots_equal(shot1, shot2)\n\n # Pano shots are different objects of same value\n for shot_id in rec2.pano_shots.keys():\n shot1, shot2 = rec.pano_shots[shot_id], rec2.pano_shots[shot_id]\n assert shot1 is not shot2\n assert_shots_equal(shot1, shot2)\n\n # Points are different objects of same value\n for pt_id in rec2.points:\n pt, pt_cpy = rec.points[pt_id], rec2.points[pt_id]\n assert pt != pt_cpy\n assert pt.id == pt_cpy.id\n assert np.allclose(pt.coordinates, pt_cpy.coordinates)\n assert np.allclose(pt.color, pt_cpy.color)\n obs = pt.get_observations()\n obs_cpy = pt_cpy.get_observations()\n assert len(obs) == len(obs_cpy)\n\n # Observations are different objects of same value\n for shot, obs_id in obs.items():\n obs1 = shot.get_observation(obs_id)\n shot_cpy = rec2.shots[shot.id]\n obs_cpy = shot_cpy.get_observation(obs_id)\n assert obs1 is not obs_cpy\n\n\ndef test_gcp() -> None:\n gcp = []\n for i in range(0, 10):\n p = pymap.GroundControlPoint()\n p.id = \"p\" + str(i)\n o1 = pymap.GroundControlPointObservation()\n o1.shot_id = \"p1\"\n o2 = pymap.GroundControlPointObservation()\n o2.shot_id = \"p2\"\n obs = [o1, o2]\n p.observations = obs\n gcp.append(p)\n assert p.observations[0].shot_id == \"p1\"\n assert p.observations[1].shot_id == \"p2\"\n p.add_observation(o2)\n p.add_observation(o2)\n assert len(p.observations) == 4\n for pt in gcp:\n assert pt.observations[0].shot_id == \"p1\"\n assert pt.observations[1].shot_id == \"p2\"\n\n\ndef test_add_correspondences_from_tracks_manager() -> None:\n n_shots = 3\n rec = _create_reconstruction(\n n_cameras=1,\n n_shots_cam={\"0\": n_shots},\n n_points=10,\n )\n # create tracks manager\n tm = pymap.TracksManager()\n # add observations for 3 tracks\n # One shot and one landmark are not in the reconstruction\n for track_id in [\"0\", \"1\", \"100\"]:\n for shot_id in range(n_shots + 1):\n obs = pymap.Observation(100, 200, 0.5, 255, 0, 0, 100)\n tm.add_observation(str(shot_id), track_id, obs)\n\n # add a shot that is NOT in the tracks manager\n rec.create_shot(str(n_shots + 5), next(iter(rec.cameras)))\n\n rec.add_correspondences_from_tracks_manager(tm)\n\n # make sure to have the observations for []\n assert \"100\" not in rec.points\n\n for track_id in [\"0\", \"1\"]:\n pt = rec.points[track_id]\n observations = pt.get_observations()\n assert len(observations) == n_shots\n" ]
[ [ "numpy.diag", "numpy.allclose", "numpy.random.choice", "numpy.shape", "numpy.random.rand", "numpy.array", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
basiralab/Fed-CBT
[ "c85520ebd536153af1005454b48ca5fdb2cff3af" ]
[ "model.py" ]
[ "import torch\nimport torch.nn.functional as F\nfrom torch.nn import Sequential, Linear, ReLU\nfrom torch_geometric.nn import NNConv\n\n\nclass DGN(torch.nn.Module):\n def __init__(self, MODEL_PARAMS):\n super(DGN, self).__init__()\n self.model_params = MODEL_PARAMS\n \n nn = Sequential(Linear(self.model_params[\"Linear1\"][\"in\"], self.model_params[\"Linear1\"][\"out\"]), ReLU()) # in=6, out=36\n self.conv1 = NNConv(self.model_params[\"conv1\"][\"in\"], self.model_params[\"conv1\"][\"out\"], nn, aggr='mean') # in=1, out=36\n \n nn = Sequential(Linear(self.model_params[\"Linear2\"][\"in\"], self.model_params[\"Linear2\"][\"out\"]), ReLU())\n self.conv2 = NNConv(self.model_params[\"conv2\"][\"in\"], self.model_params[\"conv2\"][\"out\"], nn, aggr='mean')\n \n nn = Sequential(Linear(self.model_params[\"Linear3\"][\"in\"], self.model_params[\"Linear3\"][\"out\"]), ReLU())\n self.conv3 = NNConv(self.model_params[\"conv3\"][\"in\"], self.model_params[\"conv3\"][\"out\"], nn, aggr='mean')\n \n \n def forward(self, data):\n \"\"\"\n Args:\n data (Object): data object consist of three parts x, edge_attr, and edge_index.\n This object can be produced by using helper.cast_data function\n x: Node features with shape [number_of_nodes, 1] (Simply set to vector of ones since we dont have any)\n edge_attr: Edge features with shape [number_of_edges, number_of_views]\n edge_index: Graph connectivities with shape [2, number_of_edges] (COO format) \n \n\n \"\"\"\n x, edge_attr, edge_index = data.x, data.edge_attr, data.edge_index\n \n x = F.relu(self.conv1(x, edge_index, edge_attr))\n\n x = F.relu(self.conv2(x, edge_index, edge_attr))\n \n x = F.relu(self.conv3(x, edge_index, edge_attr))\n repeated_out = x.repeat(self.model_params[\"N_ROIs\"],1,1)\n repeated_t = torch.transpose(repeated_out, 0, 1)\n diff = torch.abs(repeated_out - repeated_t)\n cbt = torch.sum(diff, 2)\n \n return cbt" ]
[ [ "torch.abs", "torch.transpose", "torch.sum", "torch.nn.Linear", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
correlllab/nn4mc_cpp
[ "74a2a923dfdf07b65ffe30d92ea8a686b6dbb1f1" ]
[ "data/simpleRNNexample.py" ]
[ "#!/usr/bin/env python\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport pandas as pd\nimport collections\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\nfrom tensorflow.keras import layers\n\nN = 1000\nTp = 800\n\nt = np.arange(0,N)\nx = np.sin(0.02*t)+2*np.random.rand(N)\n\ndf = pd.DataFrame(x)\ndf.head()\n\nvalues=df.values\ntrain,test = values[0:Tp,:], values[Tp:N,:]\n\nstep = 10\n# add step elements into train and test\ntest = np.append(test,np.repeat(test[-1,],step))\ntrain = np.append(train,np.repeat(train[-1,],step))\n\ndef convertToMatrix(data, step):\n X, Y =[], []\n for i in range(len(data)-step):\n d=i+step\n X.append(data[i:d,])\n Y.append(data[d,])\n return np.array(X), np.array(Y)\n\ntrainX,trainY = convertToMatrix(train,step)\ntestX, testY = convertToMatrix(test,step)\n\ntrainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))\ntestX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))\n\nmodel = tf.keras.Sequential([\n tf.keras.layers.Conv1D(filters=7, kernel_size = 3, padding = 'same', input_shape = (trainX.shape[1], trainX.shape[2])),\n tf.keras.layers.MaxPool1D(pool_size = 2),\n tf.keras.layers.Dense(8, activation=\"relu\"),\n tf.keras.layers.Dense(1)])\n\nmodel.compile(loss='mean_squared_error', optimizer='rmsprop')\nmodel.fit(trainX,trainY, epochs=100, batch_size=16, verbose=2)\n\nprint(model.summary())\nprint(model.layers[0].input_shape)\n\nmodel.save('Conv1_pooling.hdf5')\n\ntrainPredict = model.predict(trainX)\ntestPredict= model.predict(testX)\npredicted = np.concatenate((trainPredict,testPredict),axis=0)\n\ntrainScore = model.evaluate(trainX, trainY, verbose=0)\nprint(trainScore)\n" ]
[ [ "tensorflow.keras.layers.MaxPool1D", "numpy.reshape", "numpy.arange", "tensorflow.keras.layers.Conv1D", "tensorflow.keras.layers.Dense", "pandas.DataFrame", "numpy.sin", "numpy.concatenate", "numpy.random.rand", "numpy.repeat", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [ "1.10" ] } ]
AdityaKane2001/ACL_WASSA
[ "3912f61807cb08ff55fde36c433230720084b57a" ]
[ "models/essaytoemotionempathydistressbert.py" ]
[ "from transformers import BertTokenizer, BertModel\nimport torch\nfrom torch import nn\n\nimport numpy as np\nfrom tqdm.auto import tqdm\nimport wandb\n\nimport os\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import mean_squared_error as mse\nfrom dataloader import get_dataset\nfrom utils import *\n\n\nclass EssayToEmotionEmpathyDistressBERT(nn.Module):\n \"\"\"\n Comprises of a bert based self which takes tokenized essay and outputs:\n emotion, empathy and distress. \n \"\"\"\n\n def __init__(self, cfg):\n \"\"\"Initializes all layers.\"\"\"\n self.cfg = cfg\n super().__init__()\n self.tokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\",\n do_lower_case=True)\n\n self.bert = BertModel.from_pretrained(\"bert-base-uncased\")\n\n if self.cfg.freeze_pretrained:\n for param in self.bert.parameters():\n param.requires_grad = False\n\n self.emotion_lin = nn.Linear(self.bert.config.hidden_size,\n self.cfg.num_classes)\n self.emotion_softmax = torch.nn.Softmax(dim=-1)\n self.class_names = (\"anger\", \"disgust\", \"fear\", \"joy\", \"neutral\",\n \"sadness\", \"surprise\")\n self.empathy = nn.Linear(self.bert.config.hidden_size, 1)\n self.distress = nn.Linear(self.bert.config.hidden_size, 1)\n\n self.device = torch.device(\n \"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\n self.push_all_to_device(self.device)\n\n def forward(self, batch):\n \"\"\"Mandatory forward method\"\"\"\n x = self.bert(**batch[\"inputs\"][0])[1] # (batch_size, hidden_size)\n\n emotion = self.emotion_lin(x)\n emotion = self.emotion_softmax(emotion)\n\n empathy = self.empathy(x)\n distress = self.distress(x)\n return (emotion, empathy, distress)\n\n ### Utilities\n def push_all_to_device(self, device):\n \"\"\"Loads all layers to GPU.\"\"\"\n self.bert = self.bert.to(device)\n\n self.emotion_lin = self.emotion_lin.to(device)\n self.emotion_softmax = self.emotion_softmax.to(device)\n\n self.empathy = self.empathy.to(device)\n self.distress = self.distress.to(device)\n\n def push_batch_to_device(self, batch):\n \"\"\"Loads members of a batch to GPU. Note that all members are torch \n Tensors.\n \"\"\"\n dbatch = {\n \"inputs\": [obj.to(self.device) for obj in batch[\"inputs\"]],\n \"outputs\": [obj.to(self.device) for obj in batch[\"outputs\"]]\n }\n return dbatch\n\n def push_to_wandb(self, stat_dict, val_cm):\n \"\"\"Push statistics to wandb after epoch. Plot confusion matrix.\"\"\"\n ax = sns.heatmap(val_cm,\n annot=True,\n xticklabels=self.class_names,\n yticklabels=self.class_names,\n fmt=\"d\")\n ax.get_figure().savefig(\"confusion.jpg\")\n stat_dict[\"confusion_matrix\"] = wandb.Image(\"confusion.jpg\")\n wandb.log(stat_dict)\n plt.clf()\n os.remove(\"confusion.jpg\")\n del ax\n\n def get_criteria(self):\n \"\"\"Get loss funtions for all outputs. \"\"\"\n criteria = []\n if self.cfg.classification_loss == \"categorical_crossentropy\":\n criteria += [nn.CrossEntropyLoss()]\n if self.cfg.regression_loss == \"mean_squared_error\":\n criteria += [nn.MSELoss()] * 2\n return criteria\n\n ### Metrics\n def loss_fn(self, batch, outputs, criteria):\n \"\"\"Loss function. Currently only calculated loss for emotions.\"\"\"\n loss = 0.\n for i in range(len(outputs)):\n loss += criteria[i](outputs[i], batch[\"outputs\"][i])\n return loss\n \n def tocpu(self, obj):\n return obj.detach().cpu().numpy()\n\n def calculate_metrics(self, batch, outputs):\n \"\"\"Detaches and loads relavent tensors to CPU and calculated metrics.\"\"\"\n np_batch_outputs =self.tocpu(batch[\"outputs\"][0])\n np_outputs = self.tocpu(outputs[0])\n\n empathy_mse = mse(self.tocpu(outputs[1]),\n self.tocpu(batch[\"outputs\"][1]))\n distress_mse = mse(\n self.tocpu(outputs[2]), self.tocpu(batch[\"outputs\"][2]))\n\n acc = accuracy(np_batch_outputs, np_outputs)\n f1 = f1_loss(np_batch_outputs, np_outputs)\n cm = confusion_matrix(np_batch_outputs, np_outputs)\n return acc, f1, empathy_mse, distress_mse, cm\n\n ### Train and eval loops\n def train_epoch(self, train_ds, optimizer, criteria, progress_bar):\n \"\"\"Training loop for one epoch.\"\"\"\n self.train()\n epoch_loss = []\n epoch_acc = []\n epoch_f1 = []\n epoch_empathy_mse = []\n epoch_distress_mse = []\n\n for batchnum, batch in enumerate(train_ds):\n batch[\"inputs\"][0] = self.tokenizer(text=batch[\"inputs\"][0],\n add_special_tokens=True,\n return_attention_mask=True,\n max_length=self.cfg.maxlen,\n padding='max_length',\n truncation=True,\n return_tensors=\"pt\")\n\n batch = self.push_batch_to_device(batch)\n\n outputs = self(batch)\n\n loss = self.loss_fn(batch, outputs, criteria)\n loss.backward()\n\n optimizer.step()\n optimizer.zero_grad()\n\n acc, f1, empathy_mse, distress_mse, _ = self.calculate_metrics(\n batch, outputs)\n loss_ = loss.detach().cpu().numpy()\n\n # record metrics\n epoch_loss.append(loss_)\n epoch_acc.append(acc)\n epoch_f1.append(f1)\n epoch_empathy_mse.append(empathy_mse)\n epoch_distress_mse.append(distress_mse)\n\n # progress bar\n progress_bar.set_postfix(loss=loss_, accuracy=acc, f1=f1)\n progress_bar.update(1)\n progress_bar.set_postfix(loss=np.mean(epoch_loss),\n accuracy=np.mean(epoch_acc),\n f1=np.mean(epoch_f1))\n \n return np.mean(epoch_loss), np.mean(epoch_acc), np.mean(epoch_f1), np.mean(epoch_empathy_mse), np.mean(epoch_distress_mse)\n\n def eval_epoch(self, val_ds, criteria):\n \"\"\"Validation loop. val DS has exactly one batch.\"\"\"\n val_epoch_loss = []\n val_epoch_acc = []\n val_epoch_f1 = []\n\n val_epoch_empathy_mse = []\n val_epoch_distress_mse = []\n self.eval()\n with torch.no_grad():\n for val_batch in val_ds:\n val_batch[\"inputs\"][0] = self.tokenizer(\n text=val_batch[\"inputs\"][0],\n add_special_tokens=True,\n return_attention_mask=True,\n max_length=self.cfg.maxlen,\n padding='max_length',\n truncation=True,\n return_tensors=\"pt\")\n\n val_batch = self.push_batch_to_device(val_batch)\n\n val_outputs = self(val_batch)\n val_loss = self.loss_fn(val_batch, val_outputs, criteria)\n\n val_acc, val_f1, val_empathy_mse, val_distress_mse, val_cm = self.calculate_metrics(val_batch, val_outputs)\n val_epoch_loss.append(val_loss.detach().cpu().numpy())\n val_epoch_acc.append(val_acc)\n val_epoch_f1.append(val_f1)\n val_epoch_empathy_mse.append(val_empathy_mse)\n val_epoch_distress_mse.append(val_distress_mse)\n\n return np.mean(val_epoch_loss), np.mean(val_epoch_acc), np.mean(val_epoch_f1), np.mean(val_epoch_empathy_mse), np.mean(val_epoch_distress_mse), val_cm\n\n ### Main driver function\n def fit(self):\n best_metrics = {\"acc\": 0., \"loss\": 0., \"f1\": 0.}\n optimizer = get_optimizer(self.cfg, self.parameters())\n criteria = self.get_criteria()\n\n train_ds, val_ds = get_dataset(self.cfg)\n\n for epoch in range(self.cfg.epochs):\n progress_bar = tqdm(range(len(train_ds)))\n \n epoch_loss, epoch_acc, epoch_f1, epoch_empathy_mse, epoch_distress_mse = self.train_epoch(train_ds,\n optimizer, criteria, progress_bar)\n \n # validation loop\n val_loss, val_acc, val_f1, val_empathy_mse, val_distress_mse, val_cm = self.eval_epoch(\n val_ds, criteria)\n\n val_metrics = {\"acc\": val_acc, \"loss\": val_loss, \"f1\": val_f1}\n\n progress_bar.close()\n\n if best_metrics[self.cfg.monitor_metric] < val_metrics[\n self.cfg.monitor_metric]:\n best_metrics[self.cfg.monitor_metric] = val_metrics[\n self.cfg.monitor_metric]\n torch.save(self.state_dict(), f\"./ckpts/bert_{epoch}.pt\")\n\n stats_dict = {\n \"epoch\": epoch,\n \"train loss\": epoch_loss,\n \"train accuracy\": epoch_acc,\n \"train macro f1\": epoch_f1,\n \"train empathy mse\": epoch_empathy_mse,\n \"train distress mse\": epoch_distress_mse,\n \"val loss\": val_loss,\n \"val accuracy\": val_acc,\n \"val macro f1\": val_f1,\n \"val empathy mse\": val_empathy_mse,\n \"val distress mse\": val_distress_mse,\n }\n\n self.push_to_wandb(stats_dict, val_cm)\n" ]
[ [ "torch.nn.Softmax", "torch.nn.CrossEntropyLoss", "torch.nn.Linear", "matplotlib.pyplot.clf", "numpy.mean", "torch.no_grad", "torch.cuda.is_available", "torch.device", "torch.nn.MSELoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
davendw49/gakg
[ "9b1cde1c702cbc87edfcb45687815653372665cd" ]
[ "code/baselines/simplE/SimplE.py" ]
[ "import torch\nimport torch.nn as nn\nimport math\n\nclass SimplE(nn.Module):\n def __init__(self, num_ent, num_rel, emb_dim, device):\n super(SimplE, self).__init__()\n self.num_ent = num_ent\n self.num_rel = num_rel\n self.emb_dim = emb_dim\n self.device = device\n\n self.ent_h_embs = nn.Embedding(num_ent, emb_dim).to(device)\n self.ent_t_embs = nn.Embedding(num_ent, emb_dim).to(device)\n self.rel_embs = nn.Embedding(num_rel, emb_dim).to(device)\n self.rel_inv_embs = nn.Embedding(num_rel, emb_dim).to(device)\n\n sqrt_size = 6.0 / math.sqrt(self.emb_dim)\n nn.init.uniform_(self.ent_h_embs.weight.data, -sqrt_size, sqrt_size)\n nn.init.uniform_(self.ent_t_embs.weight.data, -sqrt_size, sqrt_size)\n nn.init.uniform_(self.rel_embs.weight.data, -sqrt_size, sqrt_size)\n nn.init.uniform_(self.rel_inv_embs.weight.data, -sqrt_size, sqrt_size)\n \n def l2_loss(self):\n return ((torch.norm(self.ent_h_embs.weight, p=2) ** 2) + (torch.norm(self.ent_t_embs.weight, p=2) ** 2) + (torch.norm(self.rel_embs.weight, p=2) ** 2) + (torch.norm(self.rel_inv_embs.weight, p=2) ** 2)) / 2\n\n def forward(self, heads, rels, tails):\n hh_embs = self.ent_h_embs(heads)\n ht_embs = self.ent_h_embs(tails)\n th_embs = self.ent_t_embs(heads)\n tt_embs = self.ent_t_embs(tails)\n r_embs = self.rel_embs(rels)\n r_inv_embs = self.rel_inv_embs(rels)\n\n scores1 = torch.sum(hh_embs * r_embs * tt_embs, dim=1)\n scores2 = torch.sum(ht_embs * r_inv_embs * th_embs, dim=1)\n return torch.clamp((scores1 + scores2) / 2, -20, 20)\n " ]
[ [ "torch.nn.init.uniform_", "torch.norm", "torch.sum", "torch.nn.Embedding", "torch.clamp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dennissergeev/cloudsat_example
[ "c8abd77466e39a493dd4a7429d50e26fc8cb6713" ]
[ "utils.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Auxiliary functions for sattools module.\"\"\"\nimport numpy as np\n\n\ndef cc_interp2d(data, X, Z, x1, x2, nx, z1, z2, nz, use_numba=True):\n if use_numba:\n try:\n import numba as nb\n except ImportError:\n print(\"Unsuccessful numba import, using pure Python\")\n use_numba = False\n\n if use_numba:\n res = nb.jit()(_interp2d)(data, X, Z, x1, x2, nx, z1, z2, nz)\n else:\n res = _interp2d(data, X, Z, x1, x2, nx, z1, z2, nz)\n return res\n\n\ndef _interp2d(data, X, Z, x1, x2, nx, z1, z2, nz):\n \"\"\"\n Interpolate 2D data with coordinates given by 1D and 2D arrays.\n\n data is a two-dimensional array of data to be interpolated.\n X and Z are one- and two-dimensional arrays, giving coordinates\n of data points along the first and second axis, respectively\n\n data, X and Z are expected to be C-contiguous float32 numpy arrays\n with no mask and no transformation (such as transposition) applied.\n \"\"\"\n\n xs = (x2 - x1) / nx\n zs = (z2 - z1) / nz\n w = data.shape[0]\n h = data.shape[1]\n\n out = np.zeros((nx, nz), dtype=np.float32)\n q = np.zeros((nx, nz), dtype=np.int32)\n\n for i in range(w):\n n1 = ((X[i - 1] + X[i]) / 2 - x1) / xs if i - 1 >= 0 else -1\n n2 = ((X[i + 1] + X[i]) / 2 - x1) / xs if i + 1 < w else nx\n if n2 - n1 < 1:\n n1 = n2 = (X[i] - x1) / xs\n\n for j in range(h):\n m1 = ((Z[i, j - 1] + Z[i, j]) / 2 - z1) / zs if j - 1 >= 0 else -1\n m2 = ((Z[i, j + 1] + Z[i, j]) / 2 - z1) / zs if j + 1 < h else nz\n if m2 - m1 < 1:\n m1 = m2 = (Z[i, j] - z1) / zs\n\n for n in range(int(n1 + 0.5), int(n2 + 0.5 + 1)):\n for m in range(int(m1 + 0.5), int(m2 + 0.5 + 1)):\n if n < 0 or n >= nx or m < 0 or m >= nz:\n continue\n if np.isnan(data[i, j]):\n continue\n out[n, m] += data[i, j]\n q[n, m] += 1\n\n for n in range(nx):\n for m in range(nz):\n if q[n, m] == 0:\n out[n, m] = np.nan\n else:\n out[n, m] /= q[n, m]\n return out\n" ]
[ [ "numpy.isnan", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
chdoig/blaze
[ "caa5a497e1ca1ceb1cf585483312ff4cd74d0bda" ]
[ "blaze/api/into.py" ]
[ "from __future__ import absolute_import, division, print_function\n\nfrom dynd import nd\nimport datashape\nimport sys\nfrom datashape import DataShape, dshape, Record, to_numpy_dtype\nimport toolz\nfrom toolz import concat, partition_all, valmap\nfrom cytoolz import pluck\nimport copy\nfrom datetime import datetime\nfrom datashape.user import validate, issubschema\nfrom numbers import Number\nfrom collections import Iterable, Iterator\nimport gzip\nimport numpy as np\nimport pandas as pd\nimport h5py\nimport tables\n\nfrom ..compute.chunks import ChunkIterator\nfrom ..dispatch import dispatch\nfrom ..expr import TableExpr, Expr\nfrom ..compute.core import compute\nfrom .resource import resource\nfrom ..compatibility import _strtypes\nfrom ..utils import keywords\n\n\n__all__ = ['into', 'discover']\n\n\n@dispatch(object, object)\ndef into(a, b, **kwargs):\n \"\"\"\n Push data in ``b`` into a container of type ``a``\n\n Examples\n --------\n\n >>> into([], (1, 2, 3))\n [1, 2, 3]\n\n >>> into(np.ndarray, [['Alice', 100], ['Bob', 200]], names=['name', 'amt'])\n rec.array([('Alice', 100), ('Bob', 200)],\n dtype=[('name', 'S5'), ('amt', '<i8')])\n\n >>> into(pd.DataFrame, _)\n name amt\n 0 Alice 100\n 1 Bob 200\n \"\"\"\n raise NotImplementedError(\n \"Blaze does not know a rule for the following conversion\"\n \"\\n%s <- %s\" % (type(a).__name__, type(b).__name__))\n\n# Optional imports\n\ntry:\n from bokeh.objects import ColumnDataSource\nexcept ImportError:\n ColumnDataSource = type(None)\n\ntry:\n import bcolz\n from bcolz import ctable, carray\nexcept ImportError:\n ctable = type(None)\n carray = type(None)\n\ntry:\n import pymongo\n from pymongo.collection import Collection\nexcept ImportError:\n Collection = type(None)\n\ntry:\n from ..data import DataDescriptor, CSV\nexcept ImportError:\n DataDescriptor = type(None)\n CSV = type(None)\n\n\n@dispatch(type, object)\ndef into(a, b, **kwargs):\n \"\"\"\n Resolve into when given a type as a first argument\n\n Usually we give into an example of the thing that we want\n\n >>> into([], (1, 2, 3)) # give me a list like []\n [1, 2, 3]\n\n However sometimes it's inconvenient to construct a dummy example.\n In those cases we just specify the desired type\n\n >>> into(list, (1, 2, 3))\n [1, 2, 3]\n \"\"\"\n f = into.dispatch(a, type(b))\n try:\n a = a()\n except:\n pass\n return f(a, b, **kwargs)\n\n@dispatch((list, tuple, set), (list, tuple, set, Iterator,\n type(dict().items())))\ndef into(a, b):\n return type(a)(b)\n\n\n@dispatch(set, list)\ndef into(a, b):\n try:\n return set(b)\n except TypeError:\n return set(map(tuple, b))\n\n\n@dispatch(dict, (list, tuple, set))\ndef into(a, b):\n return dict(b)\n\n@dispatch((list, tuple, set), dict)\ndef into(a, b):\n return type(a)(map(type(a), sorted(b.items(), key=lambda x: x[0])))\n\n@dispatch(nd.array, (Iterable, Number, str))\ndef into(a, b, **kwargs):\n return nd.array(b, **kwargs)\n\n@dispatch(nd.array, nd.array)\ndef into(a, b):\n return b\n\n@dispatch(np.ndarray, np.ndarray)\ndef into(a, b, **kwargs):\n return b\n\n@dispatch(list, nd.array)\ndef into(a, b):\n return nd.as_py(b, tuple=True)\n\n@dispatch(tuple, nd.array)\ndef into(a, b):\n return tuple(nd.as_py(b, tuple=True))\n\n@dispatch(np.ndarray, nd.array)\ndef into(a, b, **kwargs):\n return nd.as_numpy(b, allow_copy=True)\n\n@dispatch(np.ndarray, (Iterable, Iterator))\ndef into(a, b, **kwargs):\n b = iter(b)\n first = next(b)\n b = toolz.concat([[first], b])\n if isinstance(first, datetime):\n b = map(np.datetime64, b)\n if isinstance(first, (list, tuple)):\n return np.rec.fromrecords(list(b), **kwargs)\n else:\n return np.asarray(list(b), **kwargs)\n\ndef degrade_numpy_dtype_to_python(dt):\n \"\"\"\n\n >>> degrade_numpy_dtype_to_python(np.dtype('M8[ns]'))\n dtype('<M8[us]')\n >>> dt = np.dtype([('a', 'S7'), ('b', 'M8[D]'), ('c', 'M8[ns]')])\n >>> degrade_numpy_dtype_to_python(dt)\n dtype([('a', 'S7'), ('b', '<M8[D]'), ('c', '<M8[us]')])\n \"\"\"\n replacements = {'M8[ns]': np.dtype('M8[us]'),\n 'M8[as]': np.dtype('M8[us]')}\n dt = replacements.get(dt.str.lstrip('<>'), dt)\n\n if str(dt)[0] == '[':\n return np.dtype([(name, degrade_numpy_dtype_to_python(dt[name]))\n for name in dt.names])\n return dt\n\n\n@dispatch(list, np.ndarray)\ndef into(a, b):\n if 'M8' in str(b.dtype) or 'datetime' in str(b.dtype):\n b = b.astype(degrade_numpy_dtype_to_python(b.dtype))\n return numpy_ensure_strings(b).tolist()\n\n\n@dispatch(pd.DataFrame, np.ndarray)\ndef into(df, x):\n if len(df.columns) > 0:\n columns = list(df.columns)\n else:\n columns = list(x.dtype.names)\n return pd.DataFrame(numpy_ensure_strings(x), columns=columns)\n\n@dispatch((pd.DataFrame, list, tuple, Iterator, nd.array), tables.Table)\ndef into(a, t):\n x = into(np.ndarray, t)\n return into(a, x)\n\n\n@dispatch(np.ndarray, tables.Table)\ndef into(_, t):\n return t[:]\n\n\ndef numpy_fixlen_strings(x):\n \"\"\" Returns new array with strings as fixed length\n\n >>> from numpy import rec\n >>> x = rec.array([(1, 'Alice', 100), (2, 'Bob', 200)],\n ... dtype=[('id', 'i8'), ('name', 'O'), ('amount', 'i8')])\n\n >>> numpy_fixlen_strings(x) # doctest: +SKIP\n rec.array([(1, 'Alice', 100), (2, 'Bob', 200)],\n dtype=[('id', '<i8'), ('name', 'S5'), ('amount', '<i8')])\n \"\"\"\n if \"'O'\" in str(x.dtype):\n dt = [(n, \"S%d\" % max(map(len, x[n])) if x.dtype[n] == 'O' else x.dtype[n])\n for n in x.dtype.names]\n x = x.astype(dt)\n return x\n\n@dispatch(tables.Table, np.ndarray)\ndef into(_, x, filename=None, datapath=None, **kwargs):\n if filename is None or datapath is None:\n raise ValueError(\"Must specify filename for new PyTables file. \\n\"\n \"Example: into(tb.Tables, df, filename='myfile.h5', datapath='/data')\")\n\n f = tables.open_file(filename, 'w')\n t = f.create_table('/', datapath, obj=numpy_fixlen_strings(x))\n return t\n\n\n@dispatch(tables.Table, pd.DataFrame)\ndef into(a, df, **kwargs):\n return into(a, into(np.ndarray, df), **kwargs)\n # store = pd.HDFStore(filename, mode='w')\n # store.put(datapath, df, format='table', data_columns=True, index=False)\n # return getattr(store.root, datapath).table\n\n\n@dispatch(tables.Table, _strtypes)\ndef into(a, b, **kwargs):\n kw = dict(kwargs)\n if 'output_path' in kw:\n del kw['output_path']\n return into(a, resource(b, **kw), **kwargs)\n\n\n@dispatch(list, pd.DataFrame)\ndef into(_, df):\n return into([], into(np.ndarray(0), df))\n\n@dispatch(pd.DataFrame, nd.array)\ndef into(a, b):\n ds = dshape(nd.dshape_of(b))\n if list(a.columns):\n names = a.columns\n elif isinstance(ds[-1], Record):\n names = ds[-1].names\n else:\n names = None\n if names:\n return pd.DataFrame(nd.as_py(b), columns=names)\n else:\n return pd.DataFrame(nd.as_py(b))\n\n@dispatch(pd.DataFrame, (list, tuple, Iterator, type(dict().items())))\ndef into(df, seq, **kwargs):\n if list(df.columns):\n return pd.DataFrame(list(seq), columns=df.columns, **kwargs)\n else:\n return pd.DataFrame(list(seq), **kwargs)\n\n@dispatch(pd.DataFrame, pd.DataFrame)\ndef into(_, df):\n return df.copy()\n\n@dispatch(pd.Series, pd.Series)\ndef into(_, ser):\n return ser\n\n@dispatch(pd.Series, Iterator)\ndef into(a, b, **kwargs):\n return into(a, list(b), **kwargs)\n\n@dispatch(pd.Series, (list, tuple))\ndef into(a, b, **kwargs):\n return pd.Series(b, **kwargs)\n\n@dispatch(pd.Series, TableExpr)\ndef into(ser, col):\n ser = into(ser, compute(col))\n ser.name = col.name\n return ser\n\n@dispatch(pd.Series, np.ndarray)\ndef into(_, x):\n return pd.Series(numpy_ensure_strings(x))\n df = into(pd.DataFrame(), x)\n return df[df.columns[0]]\n\n@dispatch(pd.DataFrame, pd.Series)\ndef into(_, df):\n return pd.DataFrame(df)\n\n@dispatch(list, pd.Series)\ndef into(_, ser):\n return ser.tolist()\n\n@dispatch(nd.array, pd.DataFrame)\ndef into(a, df):\n schema = discover(df)\n arr = nd.empty(str(schema))\n for i in range(len(df.columns)):\n arr[:, i] = np.asarray(df[df.columns[i]])\n return arr\n\n\n@dispatch(np.ndarray, pd.DataFrame)\ndef into(a, df, **kwargs):\n return df.to_records(index=False)\n\n\n@dispatch(nd.array)\ndef discover(arr):\n return dshape(nd.dshape_of(arr))\n\n\n@dispatch(pd.DataFrame)\ndef discover(df):\n obj = datashape.coretypes.object_\n names = list(df.columns)\n dtypes = list(map(datashape.CType.from_numpy_dtype, df.dtypes))\n dtypes = [datashape.string if dt == obj else dt for dt in dtypes]\n schema = Record(list(zip(names, dtypes)))\n return len(df) * schema\n\n\n@dispatch(np.ndarray, carray)\ndef into(a, b, **kwargs):\n return b[:]\n\n@dispatch(pd.Series, carray)\ndef into(a, b):\n return into(a, into(np.ndarray, b))\n\n@dispatch(ColumnDataSource, (TableExpr, pd.DataFrame, np.ndarray, ctable))\ndef into(cds, t):\n columns = discover(t).subshape[0][0].names\n return ColumnDataSource(data=dict((col, into([], t[col]))\n for col in columns))\n\n@dispatch(ColumnDataSource, nd.array)\ndef into(cds, t):\n columns = discover(t).subshape[0][0].names\n return ColumnDataSource(data=dict((col, into([], getattr(t, col)))\n for col in columns))\n\n@dispatch(ColumnDataSource, Collection)\ndef into(cds, other):\n return into(cds, into(pd.DataFrame(), other))\n\n\n@dispatch(pd.DataFrame, ColumnDataSource)\ndef into(df, cds):\n return cds.to_df()\n\n\n@dispatch(ctable, TableExpr)\ndef into(a, b, **kwargs):\n c = compute(b)\n if isinstance(c, (list, tuple, Iterator)):\n kwargs['types'] = [datashape.to_numpy_dtype(t) for t in\n b.schema[0].types]\n kwargs['names'] = b.columns\n return into(a, c, **kwargs)\n\n\n@dispatch(pd.DataFrame, ColumnDataSource)\ndef into(df, cds):\n return cds.to_df()\n\n\ndef fix_len_string_filter(ser):\n \"\"\" Convert object strings to fixed length, pass through others \"\"\"\n if ser.dtype == np.dtype('O'):\n return np.asarray(list(ser))\n else:\n return np.asarray(ser)\n\n\n@dispatch(ctable, nd.array)\ndef into(a, b, **kwargs):\n names = dshape(nd.dshape_of(b))[1].names\n columns = [getattr(b, name) for name in names]\n columns = [np.asarray(nd.as_py(c))\n if to_numpy_dtype(dshape(nd.dshape_of(c))) == np.dtype('O')\n else into(np.ndarray(0), c) for c in columns]\n\n return bcolz.ctable(columns, names=names, **kwargs)\n\n\n@dispatch(ctable, pd.DataFrame)\ndef into(a, df, **kwargs):\n return ctable([fix_len_string_filter(df[c]) for c in df.columns],\n names=list(df.columns), **kwargs)\n\n\n@dispatch(pd.DataFrame, ctable)\ndef into(a, b, **kwargs):\n return b.todataframe()\n\n\n@dispatch(nd.array, ctable)\ndef into(a, b, **kwargs):\n return into(a, b[:], **kwargs)\n\n\n@dispatch(ctable, ctable)\ndef into(a, b, **kwargs):\n if not kwargs and a == ctable:\n return b\n else:\n raise NotImplementedError()\n\n\n@dispatch(Collection, DataDescriptor)\ndef into(coll, dd, chunksize=1024, **kwargs):\n return into(coll, iter(dd), chunksize=chunksize, schema=dd.schema)\n\n\n@dispatch(Collection, (tuple, list, Iterator))\ndef into(coll, seq, columns=None, schema=None, chunksize=1024, **kwargs):\n seq = iter(seq)\n item = next(seq)\n seq = concat([[item], seq])\n\n if isinstance(item, (tuple, list)):\n if not columns and schema:\n columns = dshape(schema)[0].names\n if not columns:\n raise ValueError(\"Inputs must be dictionaries. \"\n \"Or provide columns=[...] or schema=DataShape(...) keyword\")\n seq = (dict(zip(columns, item)) for item in seq)\n\n for block in partition_all(1024, seq):\n coll.insert(copy.deepcopy(block))\n\n return coll\n\n\ndef numpy_ensure_strings(x):\n \"\"\" Return a new array with strings that will be turned into the str type\n\n In Python 3 the 'S' numpy type results in ``bytes`` objects. This coerces the\n numpy type to a form that will create ``str`` objects\n\n Examples\n ========\n\n >>> x = np.array(['a', 'b'], dtype='S1')\n >>> # Python 2\n >>> numpy_ensure_strings(x) # doctest: +SKIP\n np.array(['a', 'b'], dtype='S1')\n >>> # Python 3\n >>> numpy_ensure_strings(x) # doctest: +SKIP\n np.array(['a', 'b'], dtype='U1')\n \"\"\"\n if sys.version_info[0] >= 3 and \"S\" in str(x.dtype):\n if x.dtype.names:\n dt = [(n, x.dtype[n].str.replace('S', 'U')) for n in x.dtype.names]\n x = x.astype(dt)\n else:\n dt = x.dtype.str.replace('S', 'U')\n x = x.astype(dt)\n return x\n\n\n@dispatch(Collection, (nd.array, np.ndarray))\ndef into(coll, x, **kwargs):\n return into(coll, into(pd.DataFrame(), x), **kwargs)\n\n\n@dispatch(Collection, ctable)\ndef into(coll, x, **kwargs):\n from blaze.bcolz import chunks\n for chunk in chunks(x):\n into(coll, chunk)\n\n\n@dispatch(Collection, Collection)\ndef into(a, b, **kwargs):\n \"\"\" Copy collection on server-side\n\n https://groups.google.com/forum/#!topic/mongodb-user/wHqJFp44baY\n \"\"\"\n b.database.command('eval', 'db.%s.copyTo(\"%s\")' % (b.name, a.name),\n nolock=True)\n return b\n\n\n@dispatch(Collection, pd.DataFrame)\ndef into(coll, df, **kwargs):\n return into(coll, into([], df), columns=list(df.columns), **kwargs)\n\n\n@dispatch(Collection, TableExpr)\ndef into(coll, t, **kwargs):\n from blaze import compute\n result = compute(t)\n return into(coll, result, schema=t.schema, **kwargs)\n\n\n@dispatch(pd.DataFrame, Collection)\ndef into(df, coll, **kwargs):\n seq = list(coll.find())\n for item in seq:\n del item['_id']\n return pd.DataFrame(seq, **kwargs)\n\n\n@dispatch((nd.array, np.ndarray), Collection)\ndef into(x, coll, **kwargs):\n return into(x, into(pd.DataFrame(), coll), **kwargs)\n\n\ndef _into_iter_mongodb(l, coll, columns=None, schema=None):\n \"\"\" Into helper function\n\n Return both a lazy sequence of tuples and a list of column names\n \"\"\"\n seq = coll.find()\n if not columns and schema:\n columns = schema[0].names\n elif not columns:\n item = next(seq)\n seq = concat([[item], seq])\n columns = sorted(item.keys())\n columns.remove('_id')\n return columns, pluck(columns, seq)\n\n\n@dispatch((carray, ctable), Collection)\ndef into(x, coll, columns=None, schema=None, **kwargs):\n columns, seq = _into_iter_mongodb(x, coll, columns=None, schema=None)\n return into(x, seq, names=columns, **kwargs)\n\n\n@dispatch(Iterator, Collection)\ndef into(l, coll, columns=None, schema=None):\n columns, seq = _into_iter_mongodb(l, coll, columns=columns, schema=schema)\n return seq\n\n\n@dispatch((tuple, list), Collection)\ndef into(l, coll, columns=None, schema=None):\n return type(l)(into(Iterator, coll, columns=columns, schema=schema))\n\n\n@dispatch(nd.array, DataDescriptor)\ndef into(_, dd, **kwargs):\n return dd.dynd[:]\n\n\n@dispatch(Iterator, DataDescriptor)\ndef into(_, dd, **kwargs):\n return iter(dd)\n\n\n@dispatch((list, tuple, set), DataDescriptor)\ndef into(c, dd, **kwargs):\n return type(c)(dd)\n\n\n@dispatch((np.ndarray, pd.DataFrame, ColumnDataSource, ctable), DataDescriptor)\ndef into(a, b, **kwargs):\n return into(a, into(nd.array(), b), **kwargs)\n\n\n@dispatch((np.ndarray, pd.DataFrame, ColumnDataSource, ctable, tables.Table,\n list, tuple, set),\n CSV)\ndef into(a, b, **kwargs):\n return into(a, into(pd.DataFrame(), b, **kwargs), **kwargs)\n\n\n@dispatch(np.ndarray, CSV)\ndef into(a, b, **kwargs):\n return into(a, into(pd.DataFrame(), b, **kwargs))\n\n\n@dispatch(pd.DataFrame, CSV)\ndef into(a, b, **kwargs):\n dialect = b.dialect.copy()\n del dialect['lineterminator']\n dates = [i for i, typ in enumerate(b.schema[0].types)\n if 'date' in str(typ)]\n schema = b.schema\n if '?' in str(schema):\n schema = dshape(str(schema).replace('?', ''))\n\n dtypes = valmap(to_numpy_dtype, schema[0].dict)\n\n datenames = [name for name in dtypes\n if np.issubdtype(dtypes[name], np.datetime64)]\n\n dtypes = dict((k, v) for k, v in dtypes.items()\n if not np.issubdtype(v, np.datetime64))\n\n if 'strict' in dialect:\n del dialect['strict']\n\n # Pass only keyword arguments appropriate for read_csv\n kws = keywords(pd.read_csv)\n options = toolz.merge(dialect, kwargs)\n options = toolz.keyfilter(lambda k: k in kws, options)\n\n if b.open == gzip.open:\n options['compression'] = 'gzip'\n\n return pd.read_csv(b.path,\n skiprows=1 if b.header else 0,\n dtype=dtypes,\n parse_dates=datenames,\n names=b.columns,\n **options)\n\n\n@dispatch(pd.DataFrame, DataDescriptor)\ndef into(a, b):\n return pd.DataFrame(list(b), columns=b.columns)\n\n\n@dispatch(object, Expr)\ndef into(a, b):\n return compute(b)\n\n\n@dispatch((tuple, list, Iterator, np.ndarray, pd.DataFrame, Collection, set,\n ctable), _strtypes)\ndef into(a, b, **kwargs):\n return into(a, resource(b, **kwargs), **kwargs)\n\n\n@dispatch(Iterator, (list, tuple, set, Iterator))\ndef into(a, b):\n return b\n\n\n@dispatch(pd.DataFrame, ChunkIterator)\ndef into(df, chunks, **kwargs):\n dfs = [into(df, chunk, **kwargs) for chunk in chunks]\n return pd.concat(dfs, ignore_index=True)\n\n\n@dispatch(np.ndarray, ChunkIterator)\ndef into(x, chunks, **kwargs):\n arrs = [into(x, chunk, **kwargs) for chunk in chunks]\n return np.vstack(arrs)\n\n@dispatch(Collection, ChunkIterator)\ndef into(coll, chunks, **kwargs):\n for chunk in chunks:\n into(coll, chunk, **kwargs)\n return coll\n" ]
[ [ "pandas.concat", "pandas.read_csv", "pandas.Series", "numpy.asarray", "numpy.issubdtype", "numpy.ndarray", "pandas.DataFrame", "numpy.dtype", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
LandAndLand/Deep-Model-Watermarking
[ "9add23a588903f7e6879527b1347bbd628bd6279" ]
[ "SR/testSREmb.py" ]
[ "\nimport os\nimport torch\nfrom tqdm.std import tqdm\n\nfrom SRDataset import SRDataset\nfrom SR_parser import parameter_parser\nfrom tqdm import tqdm\nfrom utils import save_result_pic\nfrom models.HidingRes import HidingRes\n\nopt = parameter_parser()\nstage = \"IniStage\"\ndataset_dir = \"/home/ay3/houls/watermark_dataset/derain/\"\nStageDir = os.path.join(dataset_dir, stage)\nmode = \"train\"\n# \"/home/ay3/houls/watermark_dataset/derain/IniStage/train\"\nmodeStageDir = os.path.join(StageDir, mode)\ntestDir = os.path.join(dataset_dir, \"test\")\n\nresult_root = \"/home/ay3/houls/Deep-Model-Watermarking/result\"\nresult_stage = \"derain_flower_Init\"\nresult_time = \"2021-09-30-11_20\"\nresult_dir = os.path.join(result_root, result_stage, result_time)\nrmodelname = \"netR191.pth\"\nmodelpath = os.path.join(result_dir, 'modelrun/outckpts', rmodelname)\n\n# 输入到SR model中的路径\n# input_dir = os.path.join(result_dir, input_name)\n# input_name = \"test\"\n# input_dir = os.path.join(dataset_dir, \"test\")\ninput_dir = os.path.join(result_root, \"derain_flower_SR/2021-10-05-21_22\", \"SRout_two\")\noutput_dir = os.path.join(result_root, \"derain_flower_SR/2021-10-05-21_22\", \"SRout_Rextractone\")\n\nif not os.path.exists(output_dir):\n os.makedirs(output_dir)\nnetR = HidingRes(in_c=3, out_c=3)\nnetR.load_state_dict(torch.load(modelpath))\nnetR.cuda()\nnetR.eval()\nRdataset = SRDataset(opt, input_dir)\ndata_loader = torch.utils.data.DataLoader(\n Rdataset, batch_size=1, shuffle=False, num_workers=int(opt.nThreads))\nfor i, data in tqdm(enumerate(data_loader)):\n input_A = data['A'].cuda()\n real_B = data['B'].cuda()\n fake_B = data['B1'].cuda()\n # B2 = data['B2'].cuda()\n #print(f'input a size: {input_A.size()}')\n this_batch_size = int(input_A.size()[0])\n img_path = data['A_paths'][1]\n watermark_B1 = netR(fake_B)\n # watermark_B = netR(B2)\n watermark_inputA = netR(input_A)\n images_tensor = torch.cat([input_A, watermark_inputA, real_B, watermark_B1], axis=-1)\n save_result_pic(images_tensor, img_path[0], \"testSREmb\", output_dir)\n\n\n" ]
[ [ "torch.cat", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tienduccao/clearml
[ "e182c188e0ceca939fa7ba8f5657228136f3ab1f" ]
[ "clearml/storage/helper.py" ]
[ "from __future__ import with_statement\n\nimport errno\nimport getpass\nimport itertools\nimport json\nimport os\nimport shutil\nimport sys\nimport threading\nfrom abc import ABCMeta, abstractmethod\nfrom collections import namedtuple\nfrom concurrent.futures import ThreadPoolExecutor\nfrom copy import copy\nfrom datetime import datetime\nfrom multiprocessing.pool import ThreadPool\nfrom tempfile import mktemp\nfrom time import time\nfrom types import GeneratorType\n\nimport requests\nimport six\nfrom _socket import gethostname\nfrom attr import attrs, attrib, asdict\nfrom furl import furl\nfrom pathlib2 import Path\nfrom requests import codes as requests_codes\nfrom requests.exceptions import ConnectionError\nfrom six import binary_type, StringIO\nfrom six.moves.queue import Queue, Empty\nfrom six.moves.urllib.parse import urlparse\nfrom six.moves.urllib.request import url2pathname\n\nfrom .callbacks import UploadProgressReport, DownloadProgressReport\nfrom .util import quote_url\nfrom ..backend_api.utils import get_http_session_with_retry\nfrom ..backend_config.bucket_config import S3BucketConfigurations, GSBucketConfigurations, AzureContainerConfigurations\nfrom ..config import config, deferred_config\nfrom ..debugging import get_logger\nfrom ..errors import UsageError\nfrom ..utilities.process.mp import ForkSafeRLock\n\n\nclass StorageError(Exception):\n pass\n\n\nclass DownloadError(Exception):\n pass\n\n\[email protected]_metaclass(ABCMeta)\nclass _Driver(object):\n\n @classmethod\n def get_logger(cls):\n return get_logger('storage')\n\n @abstractmethod\n def get_container(self, container_name, config=None, **kwargs):\n pass\n\n @abstractmethod\n def test_upload(self, test_path, config, **kwargs):\n pass\n\n @abstractmethod\n def upload_object_via_stream(self, iterator, container, object_name, extra, **kwargs):\n pass\n\n @abstractmethod\n def list_container_objects(self, container, ex_prefix, **kwargs):\n pass\n\n @abstractmethod\n def get_direct_access(self, remote_path, **kwargs):\n pass\n\n @abstractmethod\n def download_object(self, obj, local_path, overwrite_existing, delete_on_failure, callback, **kwargs):\n pass\n\n @abstractmethod\n def download_object_as_stream(self, obj, chunk_size, **kwargs):\n pass\n\n @abstractmethod\n def delete_object(self, obj, **kwargs):\n pass\n\n @abstractmethod\n def upload_object(self, file_path, container, object_name, extra, **kwargs):\n pass\n\n @abstractmethod\n def get_object(self, container_name, object_name, **kwargs):\n pass\n\n\nclass StorageHelper(object):\n \"\"\" Storage helper.\n Used by the entire system to download/upload files.\n Supports both local and remote files (currently local files, network-mapped files, HTTP/S and Amazon S3)\n \"\"\"\n _temp_download_suffix = '.partially'\n\n @classmethod\n def _get_logger(cls):\n return get_logger('storage')\n\n @attrs\n class _PathSubstitutionRule(object):\n registered_prefix = attrib(type=str)\n local_prefix = attrib(type=str)\n replace_windows_sep = attrib(type=bool)\n replace_linux_sep = attrib(type=bool)\n\n path_substitution_config = 'storage.path_substitution'\n\n @classmethod\n def load_list_from_config(cls):\n rules_list = []\n for index, sub_config in enumerate(config.get(cls.path_substitution_config, list())):\n rule = cls(\n registered_prefix=sub_config.get('registered_prefix', None),\n local_prefix=sub_config.get('local_prefix', None),\n replace_windows_sep=sub_config.get('replace_windows_sep', False),\n replace_linux_sep=sub_config.get('replace_linux_sep', False),\n )\n\n if any(prefix is None for prefix in (rule.registered_prefix, rule.local_prefix)):\n StorageHelper._get_logger().warning(\n \"Illegal substitution rule configuration '{}[{}]': {}\".format(\n cls.path_substitution_config,\n index,\n asdict(rule),\n ))\n\n continue\n\n if all((rule.replace_windows_sep, rule.replace_linux_sep)):\n StorageHelper._get_logger().warning(\n \"Only one of replace_windows_sep and replace_linux_sep flags may be set.\"\n \"'{}[{}]': {}\".format(\n cls.path_substitution_config,\n index,\n asdict(rule),\n ))\n continue\n\n rules_list.append(rule)\n\n return rules_list\n\n class _UploadData(object):\n @property\n def src_path(self):\n return self._src_path\n\n @property\n def dest_path(self):\n return self._dest_path\n\n @property\n def extra(self):\n return self._extra\n\n @property\n def callback(self):\n return self._callback\n\n @property\n def retries(self):\n return self._retries\n\n def __init__(self, src_path, dest_path, extra, callback, retries):\n self._src_path = src_path\n self._dest_path = dest_path\n self._extra = extra\n self._callback = callback\n self._retries = retries\n\n def __str__(self):\n return \"src=%s\" % self.src_path\n\n _helpers = {} # cache of helper instances\n\n # global terminate event for async upload threads\n # _terminate = threading.Event()\n _async_upload_threads = set()\n _upload_pool = None\n _upload_pool_pid = None\n\n # collect all bucket credentials that aren't empty (ignore entries with an empty key or secret)\n _s3_configurations = deferred_config('aws.s3', {}, transform=S3BucketConfigurations.from_config)\n _gs_configurations = deferred_config('google.storage', {}, transform=GSBucketConfigurations.from_config)\n _azure_configurations = deferred_config('azure.storage', {}, transform=AzureContainerConfigurations.from_config)\n _path_substitutions = deferred_config(transform=_PathSubstitutionRule.load_list_from_config)\n\n @property\n def log(self):\n return self._log\n\n @property\n def scheme(self):\n return self._scheme\n\n @property\n def secure(self):\n return self._secure\n\n @property\n def base_url(self):\n return self._base_url\n\n @classmethod\n def get(cls, url, logger=None, **kwargs):\n \"\"\"\n Get a storage helper instance for the given URL\n\n :return: A StorageHelper instance.\n \"\"\"\n\n # Handle URL substitution etc before locating the correct storage driver\n url = cls._canonize_url(url)\n\n # Get the credentials we should use for this url\n base_url = cls._resolve_base_url(url)\n\n instance_key = '%s_%s' % (base_url, threading.current_thread().ident or 0)\n\n force_create = kwargs.pop('__force_create', False)\n if (instance_key in cls._helpers) and (not force_create):\n return cls._helpers[instance_key]\n\n # Don't canonize URL since we already did it\n try:\n instance = cls(base_url=base_url, url=url, logger=logger, canonize_url=False, **kwargs)\n except (StorageError, UsageError) as ex:\n cls._get_logger().error(str(ex))\n return None\n except Exception as ex:\n cls._get_logger().error(\"Failed creating storage object {} Reason: {}\".format(\n base_url or url, ex))\n return None\n\n cls._helpers[instance_key] = instance\n return instance\n\n @classmethod\n def get_local_copy(cls, remote_url):\n \"\"\"\n Download a file from remote URL to a local storage, and return path to local copy,\n\n :param remote_url: Remote URL. Example: https://example.com/file.jpg s3://bucket/folder/file.mp4 etc.\n :return: Path to local copy of the downloaded file. None if error occurred.\n \"\"\"\n helper = cls.get(remote_url)\n if not helper:\n return None\n # create temp file with the requested file name\n file_name = '.' + remote_url.split('/')[-1].split(os.path.sep)[-1]\n local_path = mktemp(suffix=file_name)\n return helper.download_to_file(remote_url, local_path)\n\n def __init__(self, base_url, url, key=None, secret=None, region=None, verbose=False, logger=None, retries=5,\n **kwargs):\n level = config.get('storage.log.level', None)\n\n if level:\n try:\n self._get_logger().setLevel(level)\n except (TypeError, ValueError):\n self._get_logger().error('invalid storage log level in configuration: %s' % level)\n\n self._log = logger or self._get_logger()\n self._verbose = verbose\n self._retries = retries\n self._extra = {}\n self._base_url = base_url\n self._secure = True\n self._driver = None\n self._container = None\n self._conf = None\n\n if kwargs.get('canonize_url', True):\n url = self._canonize_url(url)\n\n parsed = urlparse(url)\n self._scheme = parsed.scheme\n\n if self._scheme == _AzureBlobServiceStorageDriver.scheme:\n self._conf = copy(self._azure_configurations.get_config_by_uri(url))\n if self._conf is None:\n raise StorageError(\"Missing Azure Blob Storage configuration for {}\".format(url))\n\n if not self._conf.account_name or not self._conf.account_key:\n raise StorageError(\n \"Missing account name or key for Azure Blob Storage access for {}\".format(base_url)\n )\n\n self._driver = _AzureBlobServiceStorageDriver()\n self._container = self._driver.get_container(config=self._conf)\n\n elif self._scheme == _Boto3Driver.scheme:\n self._conf = copy(self._s3_configurations.get_config_by_uri(url))\n self._secure = self._conf.secure\n\n final_region = region if region else self._conf.region\n if not final_region:\n final_region = None\n\n self._conf.update(\n key=key or self._conf.key,\n secret=secret or self._conf.secret,\n multipart=self._conf.multipart,\n region=final_region,\n use_credentials_chain=self._conf.use_credentials_chain\n )\n\n if not self._conf.use_credentials_chain:\n if not self._conf.key or not self._conf.secret:\n raise ValueError(\n \"Missing key and secret for S3 storage access (%s)\" % base_url\n )\n\n self._driver = _Boto3Driver()\n self._container = self._driver.get_container(container_name=self._base_url, retries=retries,\n config=self._conf)\n\n elif self._scheme == _GoogleCloudStorageDriver.scheme:\n self._conf = copy(self._gs_configurations.get_config_by_uri(url))\n self._driver = _GoogleCloudStorageDriver()\n self._container = self._driver.get_container(\n container_name=self._base_url,\n config=self._conf\n )\n\n elif self._scheme in _HttpDriver.schemes:\n self._driver = _HttpDriver(retries=retries)\n self._container = self._driver.get_container(container_name=self._base_url)\n else: # elif self._scheme == 'file':\n # if this is not a known scheme assume local file\n\n # If the scheme is file, use only the path segment, If not, use the entire URL\n if self._scheme == 'file':\n url = parsed.path\n\n url = url.replace(\"\\\\\", \"/\")\n\n # url2pathname is specifically intended to operate on (urlparse result).path\n # and returns a cross-platform compatible result\n driver_uri = url2pathname(url)\n path_driver_uri = Path(driver_uri)\n # if path_driver_uri.is_file():\n # driver_uri = str(path_driver_uri.parent)\n # elif not path_driver_uri.exists():\n # # assume a folder and create\n # # Path(driver_uri).mkdir(parents=True, exist_ok=True)\n # pass\n\n self._driver = _FileStorageDriver(str(path_driver_uri.root))\n self._container = None\n\n @classmethod\n def terminate_uploads(cls, force=True, timeout=2.0):\n if force:\n # since async uploaders are daemon threads, we can just return and let them close by themselves\n return\n # signal all threads to terminate and give them a chance for 'timeout' seconds (total, not per-thread)\n # cls._terminate.set()\n remaining_timeout = timeout\n for thread in cls._async_upload_threads:\n t = time()\n try:\n thread.join(timeout=remaining_timeout)\n except Exception:\n pass\n remaining_timeout -= (time() - t)\n\n @classmethod\n def get_configuration(cls, bucket_config):\n return cls._s3_configurations.get_config_by_bucket(bucket_config.bucket, bucket_config.host)\n\n @classmethod\n def add_configuration(cls, bucket_config, log=None, _test_config=True):\n # Try to use existing configuration if we have no key and secret\n use_existing = not bucket_config.is_valid()\n\n # Get existing config anyway (we'll either try to use it or alert we're replacing it\n existing = cls.get_configuration(bucket_config)\n\n configs = cls._s3_configurations\n\n if not use_existing:\n # Test bucket config, fails if unsuccessful\n if _test_config:\n _Boto3Driver._test_bucket_config(bucket_config, log)\n\n if existing:\n if log:\n log.warning('Overriding existing configuration for %s/%s'\n % (existing.host or 'AWS', existing.bucket))\n configs.remove_config(existing)\n else:\n # Try to use existing configuration\n good_config = False\n if existing:\n if log:\n log.info('Using existing credentials for bucket %s/%s'\n % (bucket_config.host or 'AWS', bucket_config.bucket))\n good_config = _Boto3Driver._test_bucket_config(existing, log, raise_on_error=False)\n\n if not good_config:\n # Try to use global key/secret\n configs.update_config_with_defaults(bucket_config)\n\n if log:\n log.info('Using global credentials for bucket %s/%s'\n % (bucket_config.host or 'AWS', bucket_config.bucket))\n if _test_config:\n _Boto3Driver._test_bucket_config(bucket_config, log)\n else:\n # do not add anything, existing config is OK\n return\n\n configs.add_config(bucket_config)\n\n @classmethod\n def add_path_substitution(\n cls,\n registered_prefix,\n local_prefix,\n replace_windows_sep=False,\n replace_linux_sep=False,\n ):\n \"\"\"\n Add a path substitution rule for storage paths.\n\n Useful for case where the data was registered under some path, and that\n path was later renamed. This may happen with local storage paths where\n each machine is has different mounts or network drives configurations\n\n :param registered_prefix: The prefix to search for and replace. This is\n the prefix of the path the data is registered under. This should be the\n exact url prefix, case sensitive, as the data is registered.\n :param local_prefix: The prefix to replace 'registered_prefix' with. This\n is the prefix of the path the data is actually saved under. This should be the\n exact url prefix, case sensitive, as the data is saved under.\n :param replace_windows_sep: If set to True, and the prefix matches, the rest\n of the url has all of the windows path separators (backslash '\\') replaced with\n the native os path separator.\n :param replace_linux_sep: If set to True, and the prefix matches, the rest\n of the url has all of the linux/unix path separators (slash '/') replaced with\n the native os path separator.\n \"\"\"\n\n if not registered_prefix or not local_prefix:\n raise UsageError(\"Path substitution prefixes must be non empty strings\")\n\n if replace_windows_sep and replace_linux_sep:\n raise UsageError(\"Only one of replace_windows_sep and replace_linux_sep may be set.\")\n\n rule = cls._PathSubstitutionRule(\n registered_prefix=registered_prefix,\n local_prefix=local_prefix,\n replace_windows_sep=replace_windows_sep,\n replace_linux_sep=replace_linux_sep,\n )\n\n cls._path_substitutions.append(rule)\n\n @classmethod\n def clear_path_substitutions(cls):\n \"\"\"\n Removes all path substitution rules, including ones from the configuration file.\n \"\"\"\n cls._path_substitutions = list()\n\n def verify_upload(self, folder_uri='', raise_on_error=True, log_on_error=True):\n \"\"\"\n Verify that this helper can upload files to a folder.\n\n An upload is possible iff:\n 1. the destination folder is under the base uri of the url used to create the helper\n 2. the helper has credentials to write to the destination folder\n\n :param folder_uri: The destination folder to test. Must be an absolute\n url that begins with the base uri of the url used to create the helper.\n :param raise_on_error: Raise an exception if an upload is not possible\n :param log_on_error: Log an error if an upload is not possible\n :return: True, if, and only if, an upload to folder_uri is possible.\n \"\"\"\n\n folder_uri = self._canonize_url(folder_uri)\n\n folder_uri = self.conform_url(folder_uri, self._base_url)\n\n test_path = self._normalize_object_name(folder_uri)\n\n if self._scheme == _Boto3Driver.scheme:\n _Boto3Driver._test_bucket_config(\n self._conf,\n self._log,\n test_path=test_path,\n raise_on_error=raise_on_error,\n log_on_error=log_on_error,\n )\n elif self._scheme == _GoogleCloudStorageDriver.scheme:\n self._driver.test_upload(test_path, self._conf)\n\n elif self._scheme == 'file':\n # Check path exists\n Path(test_path).mkdir(parents=True, exist_ok=True)\n # check path permissions\n Path(test_path).touch(exist_ok=True)\n\n return folder_uri\n\n def upload_from_stream(self, stream, dest_path, extra=None, retries=1):\n dest_path = self._canonize_url(dest_path)\n object_name = self._normalize_object_name(dest_path)\n extra = extra.copy() if extra else {}\n extra.update(self._extra)\n last_ex = None\n cb = UploadProgressReport.from_stream(stream, object_name, self._verbose, self._log)\n for i in range(max(1, retries)):\n try:\n self._driver.upload_object_via_stream(\n iterator=stream,\n container=self._container,\n object_name=object_name,\n callback=cb,\n extra=extra)\n last_ex = None\n break\n except Exception as ex:\n last_ex = ex\n # seek to beginning if possible\n # noinspection PyBroadException\n try:\n stream.seek(0)\n except Exception:\n pass\n if last_ex:\n raise last_ex\n\n if self.scheme in _HttpDriver.schemes:\n # quote link\n dest_path = quote_url(dest_path)\n\n return dest_path\n\n def upload(self, src_path, dest_path=None, extra=None, async_enable=False, cb=None, retries=1):\n if not dest_path:\n dest_path = os.path.basename(src_path)\n\n dest_path = self._canonize_url(dest_path)\n\n if cb and self.scheme in _HttpDriver.schemes:\n # store original callback\n a_cb = cb\n\n # quote link\n def callback(a_path):\n return a_cb(quote_url(a_path) if a_path else a_path)\n # replace callback with wrapper\n cb = callback\n\n if async_enable:\n data = self._UploadData(src_path=src_path, dest_path=dest_path, extra=extra, callback=cb, retries=retries)\n StorageHelper._initialize_upload_pool()\n return StorageHelper._upload_pool.apply_async(self._do_async_upload, args=(data,))\n else:\n res = self._do_upload(src_path, dest_path, extra, cb, verbose=False, retries=retries)\n if res:\n res = quote_url(res)\n return res\n\n def list(self, prefix=None):\n \"\"\"\n List entries in the helper base path.\n\n Return a list of names inside this helper base path. The base path is\n determined at creation time and is specific for each storage medium.\n For Google Storage and S3 it is the bucket of the path.\n For local files it is the root directory.\n\n This operation is not supported for http and https protocols.\n\n :param prefix: If None, return the list as described above. If not, it\n must be a string - the path of a sub directory under the base path.\n the returned list will include only objects under that subdir.\n\n :return: The paths of all the objects in the storage base\n path under prefix. Listed relative to the base path.\n\n \"\"\"\n\n if prefix:\n if prefix.startswith(self._base_url):\n prefix = prefix[len(self.base_url):].lstrip(\"/\")\n\n try:\n res = self._driver.list_container_objects(self._container, ex_prefix=prefix)\n except TypeError:\n res = self._driver.list_container_objects(self._container)\n\n return [\n obj.name\n for obj in res if\n obj.name.startswith(prefix) and obj.name != prefix\n ]\n else:\n return [obj.name for obj in self._driver.list_container_objects(self._container)]\n\n def download_to_file(\n self,\n remote_path,\n local_path,\n overwrite_existing=False,\n delete_on_failure=True,\n verbose=None,\n skip_zero_size_check=False\n ):\n def next_chunk(astream):\n if isinstance(astream, binary_type):\n chunk = astream\n astream = None\n elif astream:\n try:\n chunk = next(astream)\n except StopIteration:\n chunk = None\n else:\n chunk = None\n return chunk, astream\n\n remote_path = self._canonize_url(remote_path)\n verbose = self._verbose if verbose is None else verbose\n\n # Check if driver type supports direct access:\n direct_access_path = self.get_driver_direct_access(remote_path)\n if direct_access_path:\n return direct_access_path\n\n temp_local_path = None\n try:\n if verbose:\n self._log.info('Start downloading from %s' % remote_path)\n if not overwrite_existing and Path(local_path).is_file():\n self._log.warning(\n 'File {} already exists, no need to download, thread id = {}'.format(\n local_path,\n threading.current_thread().ident,\n ),\n )\n\n return local_path\n # we download into temp_local_path so that if we accidentally stop in the middle,\n # we won't think we have the entire file\n temp_local_path = '{}_{}{}'.format(local_path, time(), self._temp_download_suffix)\n obj = self._get_object(remote_path)\n if not obj:\n return None\n\n # object size in bytes\n total_size_mb = -1\n dl_total_mb = 0.\n download_reported = False\n # chunks size is ignored and always 5Mb\n chunk_size_mb = 5\n\n # make sure we have the destination folder\n # noinspection PyBroadException\n Path(temp_local_path).parent.mkdir(parents=True, exist_ok=True)\n\n # try to get file size\n try:\n if isinstance(self._driver, _HttpDriver) and obj:\n obj = self._driver._get_download_object(obj)\n total_size_mb = float(obj.headers.get('Content-Length', 0)) / (1024 * 1024)\n elif hasattr(obj, 'size'):\n size = obj.size\n # Google storage has the option to reload the object to get the size\n if size is None and hasattr(obj, 'reload'):\n obj.reload()\n size = obj.size\n\n total_size_mb = 0 if size is None else float(size) / (1024 * 1024)\n elif hasattr(obj, 'content_length'):\n total_size_mb = float(obj.content_length) / (1024 * 1024)\n except (ValueError, AttributeError, KeyError):\n pass\n\n # if driver supports download with callback, use it (it might be faster)\n if hasattr(self._driver, 'download_object'):\n # callback\n cb = DownloadProgressReport(total_size_mb, verbose, remote_path, self._log)\n self._driver.download_object(obj, temp_local_path, callback=cb)\n download_reported = bool(cb.last_reported)\n dl_total_mb = cb.current_status_mb\n else:\n stream = self._driver.download_object_as_stream(obj, chunk_size_mb * 1024 * 1024)\n if stream is None:\n raise ValueError('Could not download %s' % remote_path)\n with open(temp_local_path, 'wb') as fd:\n data, stream = next_chunk(stream)\n while data:\n fd.write(data)\n data, stream = next_chunk(stream)\n\n if not skip_zero_size_check and Path(temp_local_path).stat().st_size <= 0:\n raise Exception('downloaded a 0-sized file')\n\n # if we are on windows, we need to remove the target file before renaming\n # otherwise posix rename will overwrite the target\n if os.name != 'posix':\n try:\n os.remove(local_path)\n except Exception:\n pass\n\n # rename temp file to local_file\n # noinspection PyBroadException\n try:\n os.rename(temp_local_path, local_path)\n except Exception:\n # noinspection PyBroadException\n try:\n os.unlink(temp_local_path)\n except Exception:\n pass\n # file was downloaded by a parallel process, check we have the final output and delete the partial copy\n path_local_path = Path(local_path)\n if not path_local_path.is_file() or (not skip_zero_size_check and path_local_path.stat().st_size <= 0):\n raise Exception('Failed renaming partial file, downloaded file exists and a 0-sized file')\n\n # report download if we are on the second chunk\n if verbose or download_reported:\n self._log.info(\n 'Downloaded %.2f MB successfully from %s , saved to %s' % (dl_total_mb, remote_path, local_path))\n return local_path\n except DownloadError:\n raise\n except Exception as e:\n self._log.error(\"Could not download {} , err: {} \".format(remote_path, e))\n if delete_on_failure:\n # noinspection PyBroadException\n try:\n if temp_local_path:\n os.remove(temp_local_path)\n except Exception:\n pass\n return None\n\n def download_as_stream(self, remote_path, chunk_size=None):\n remote_path = self._canonize_url(remote_path)\n try:\n obj = self._get_object(remote_path)\n return self._driver.download_object_as_stream(\n obj, chunk_size=chunk_size, verbose=self._verbose, log=self.log\n )\n except DownloadError:\n raise\n except Exception as e:\n self._log.error(\"Could not download file : %s, err:%s \" % (remote_path, str(e)))\n return None\n\n def download_as_nparray(self, remote_path, chunk_size=None):\n try:\n stream = self.download_as_stream(remote_path, chunk_size)\n if stream is None:\n return\n\n # TODO: ugly py3 hack, please remove ASAP\n if six.PY3 and not isinstance(stream, GeneratorType):\n import numpy as np\n return np.frombuffer(stream, dtype=np.uint8)\n else:\n import numpy as np\n return np.asarray(bytearray(b''.join(stream)), dtype=np.uint8)\n\n except Exception as e:\n self._log.error(\"Could not download file : %s, err:%s \" % (remote_path, str(e)))\n\n def delete(self, path):\n return self._driver.delete_object(self._get_object(path))\n\n def check_write_permissions(self, dest_path=None):\n # create a temporary file, then delete it\n base_url = dest_path or self._base_url\n dest_path = base_url + '/.clearml.test'\n # do not check http/s connection permissions\n if dest_path.startswith('http'):\n return True\n try:\n self.upload_from_stream(stream=six.BytesIO(b'clearml'), dest_path=dest_path)\n self.delete(path=dest_path)\n except Exception:\n raise ValueError('Insufficient permissions for {}'.format(base_url))\n return True\n\n @classmethod\n def download_from_url(cls, remote_url, local_path, overwrite_existing=False):\n \"\"\"\n Download a file from remote URL to a local storage\n\n :param remote_url: Remote URL. Example: https://example.com/image.jpg or s3://bucket/folder/file.mp4 etc.\n :param local_path: target location for downloaded file. Example: /tmp/image.jpg\n :param overwrite_existing: If True and local_path exists, it will overwrite it, otherwise print warning\n :return: local_path if download was successful.\n \"\"\"\n helper = cls.get(remote_url)\n if not helper:\n return None\n return helper.download_to_file(remote_url, local_path, overwrite_existing=overwrite_existing)\n\n def get_driver_direct_access(self, path):\n \"\"\"\n Check if the helper's driver has a direct access to the file\n\n :param str path: file path to check access to\n :return: Return the string representation of the file as path if have access to it, else None\n \"\"\"\n\n return self._driver.get_direct_access(path)\n\n @classmethod\n def _canonize_url(cls, url):\n return cls._apply_url_substitutions(url)\n\n @classmethod\n def _apply_url_substitutions(cls, url):\n def replace_separator(_url, where, sep):\n return _url[:where] + _url[where:].replace(sep, os.sep)\n\n for index, rule in enumerate(cls._path_substitutions):\n if url.startswith(rule.registered_prefix):\n url = url.replace(\n rule.registered_prefix,\n rule.local_prefix,\n 1, # count. str.replace() does not support keyword arguments\n )\n\n if rule.replace_windows_sep:\n url = replace_separator(url, len(rule.local_prefix), '\\\\')\n\n if rule.replace_linux_sep:\n url = replace_separator(url, len(rule.local_prefix), '/')\n\n break\n\n return url\n\n @classmethod\n def _resolve_base_url(cls, base_url):\n parsed = urlparse(base_url)\n if parsed.scheme == _Boto3Driver.scheme:\n conf = cls._s3_configurations.get_config_by_uri(base_url)\n bucket = conf.bucket\n if not bucket:\n parts = Path(parsed.path.strip('/')).parts\n if parts:\n bucket = parts[0]\n return '/'.join(x for x in ('s3:/', conf.host, bucket) if x)\n elif parsed.scheme == _AzureBlobServiceStorageDriver.scheme:\n conf = cls._azure_configurations.get_config_by_uri(base_url)\n if not conf:\n raise StorageError(\"Can't find azure configuration for {}\".format(base_url))\n return str(furl(base_url).set(path=conf.container_name))\n elif parsed.scheme == _GoogleCloudStorageDriver.scheme:\n conf = cls._gs_configurations.get_config_by_uri(base_url)\n return str(furl(scheme=parsed.scheme, netloc=conf.bucket))\n elif parsed.scheme == 'http':\n return 'http://'\n elif parsed.scheme == 'https':\n return 'https://'\n else: # if parsed.scheme == 'file':\n # if we do not know what it is, we assume file\n return 'file://'\n\n @classmethod\n def conform_url(cls, folder_uri, base_url=None):\n if not folder_uri:\n return folder_uri\n _base_url = cls._resolve_base_url(folder_uri) if not base_url else base_url\n\n if not folder_uri.startswith(_base_url):\n prev_folder_uri = folder_uri\n if _base_url == 'file://':\n folder_uri = str(Path(folder_uri).absolute())\n if folder_uri.startswith('/'):\n folder_uri = _base_url + folder_uri\n else:\n folder_uri = '/'.join((_base_url, folder_uri))\n\n cls._get_logger().debug('Upload destination {} amended to {} for registration purposes'.format(\n prev_folder_uri, folder_uri))\n else:\n raise ValueError('folder_uri: {} does not start with base url: {}'.format(folder_uri, _base_url))\n\n return folder_uri\n\n def _absolute_object_name(self, path):\n \"\"\" Returns absolute remote path, including any prefix that is handled by the container \"\"\"\n if not path.startswith(self.base_url):\n return self.base_url.rstrip('/') + '///' + path.lstrip('/')\n return path\n\n def _normalize_object_name(self, path):\n \"\"\" Normalize remote path. Remove any prefix that is already handled by the container \"\"\"\n if path.startswith(self.base_url):\n path = path[len(self.base_url):]\n if path.startswith('/') and os.name == 'nt':\n path = path[1:]\n if self.scheme in (_Boto3Driver.scheme, _GoogleCloudStorageDriver.scheme,\n _AzureBlobServiceStorageDriver.scheme):\n path = path.lstrip('/')\n return path\n\n def _do_async_upload(self, data):\n assert isinstance(data, self._UploadData)\n return self._do_upload(data.src_path, data.dest_path, extra=data.extra, cb=data.callback,\n verbose=True, retries=data.retries)\n\n def _upload_from_file(self, local_path, dest_path, extra=None):\n if not hasattr(self._driver, 'upload_object'):\n with open(local_path, 'rb') as stream:\n res = self.upload_from_stream(stream=stream, dest_path=dest_path, extra=extra)\n else:\n object_name = self._normalize_object_name(dest_path)\n extra = extra.copy() if extra else {}\n extra.update(self._extra)\n cb = UploadProgressReport.from_file(local_path, self._verbose, self._log)\n res = self._driver.upload_object(\n file_path=local_path,\n container=self._container,\n object_name=object_name,\n callback=cb,\n extra=extra)\n return res\n\n def _do_upload(self, src_path, dest_path, extra=None, cb=None, verbose=False, retries=1):\n object_name = self._normalize_object_name(dest_path)\n if cb:\n try:\n cb(None)\n except Exception as e:\n self._log.error(\"Calling upload callback when starting upload: %s\" % str(e))\n if verbose:\n msg = 'Starting upload: {} => {}{}'.format(\n src_path,\n (self._container.name if self._container.name.endswith('/') else self._container.name + '/')\n if self._container and self._container.name else '', object_name)\n if object_name.startswith('file://') or object_name.startswith('/'):\n self._log.debug(msg)\n else:\n self._log.info(msg)\n last_ex = None\n for i in range(max(1, retries)):\n try:\n if not self._upload_from_file(local_path=src_path, dest_path=dest_path, extra=extra):\n # retry if failed\n last_ex = ValueError(\"Upload failed\")\n continue\n last_ex = None\n break\n except Exception as e:\n last_ex = e\n\n if last_ex:\n self._log.error(\"Exception encountered while uploading %s\" % str(last_ex))\n if cb:\n try:\n cb(False)\n except Exception as e:\n self._log.warning(\"Exception on upload callback: %s\" % str(e))\n raise last_ex\n\n if verbose:\n self._log.debug(\"Finished upload: %s => %s\" % (src_path, object_name))\n if cb:\n try:\n cb(dest_path)\n except Exception as e:\n self._log.warning(\"Exception on upload callback: %s\" % str(e))\n\n return dest_path\n\n def _get_object(self, path):\n object_name = self._normalize_object_name(path)\n try:\n return self._driver.get_object(\n container_name=self._container.name if self._container else '', object_name=object_name)\n except ConnectionError:\n raise DownloadError\n except Exception as e:\n self.log.warning('Storage helper problem for {}: {}'.format(str(object_name), str(e)))\n return None\n\n @staticmethod\n def _initialize_upload_pool():\n if not StorageHelper._upload_pool or StorageHelper._upload_pool_pid != os.getpid():\n StorageHelper._upload_pool_pid = os.getpid()\n StorageHelper._upload_pool = ThreadPool(processes=1)\n\n @staticmethod\n def close_async_threads():\n if StorageHelper._upload_pool:\n pool = StorageHelper._upload_pool\n StorageHelper._upload_pool = None\n # noinspection PyBroadException\n try:\n pool.terminate()\n pool.join()\n except Exception:\n pass\n\n\nclass _HttpDriver(_Driver):\n \"\"\" LibCloud http/https adapter (simple, enough for now) \"\"\"\n\n timeout_connection = deferred_config('http.timeout.connection', 30)\n timeout_total = deferred_config('http.timeout.total', 30)\n max_retries = deferred_config('http.download.max_retries', 15)\n min_kbps_speed = 50\n\n schemes = ('http', 'https')\n\n class _Container(object):\n _default_backend_session = None\n _default_files_server_host = None\n\n def __init__(self, name, retries=5, **kwargs):\n self.name = name\n self.session = get_http_session_with_retry(\n total=retries,\n connect=retries,\n read=retries,\n redirect=retries,\n backoff_factor=0.5,\n backoff_max=120,\n status_forcelist=[\n requests_codes.request_timeout,\n requests_codes.timeout,\n requests_codes.bad_gateway,\n requests_codes.service_unavailable,\n requests_codes.bandwidth_limit_exceeded,\n requests_codes.too_many_requests,\n ]\n )\n\n def get_headers(self, url):\n if not self._default_backend_session:\n from ..backend_interface.base import InterfaceBase\n self._default_backend_session = InterfaceBase._get_default_session()\n if self._default_files_server_host is None:\n self._default_files_server_host = self._default_backend_session.get_files_server_host().rstrip('/')\n\n if url == self._default_files_server_host or url.startswith(self._default_files_server_host + '/'):\n return self._default_backend_session.add_auth_headers({})\n return None\n\n class _HttpSessionHandle(object):\n def __init__(self, url, is_stream, container_name, object_name):\n self.url, self.is_stream, self.container_name, self.object_name = \\\n url, is_stream, container_name, object_name\n\n def __init__(self, retries=None):\n self._retries = retries or int(self.max_retries)\n self._containers = {}\n\n def get_container(self, container_name, config=None, **kwargs):\n if container_name not in self._containers:\n self._containers[container_name] = self._Container(name=container_name, retries=self._retries, **kwargs)\n return self._containers[container_name]\n\n def upload_object_via_stream(self, iterator, container, object_name, extra=None, callback=None, **kwargs):\n url = object_name[:object_name.index('/')]\n url_path = object_name[len(url) + 1:]\n full_url = container.name + url\n # when sending data in post, there is no connection timeout, just an entire upload timeout\n timeout = int(self.timeout_total)\n stream_size = 0\n if hasattr(iterator, 'tell') and hasattr(iterator, 'seek'):\n pos = iterator.tell()\n iterator.seek(0, 2)\n stream_size = iterator.tell() - pos\n iterator.seek(pos, 0)\n timeout = max(timeout, (stream_size / 1024) / float(self.min_kbps_speed))\n\n res = container.session.post(full_url, files={url_path: iterator}, timeout=timeout,\n headers=container.get_headers(full_url))\n if res.status_code != requests.codes.ok:\n raise ValueError('Failed uploading object %s (%d): %s' % (object_name, res.status_code, res.text))\n\n # call back is useless because we are not calling it while uploading...\n\n # if callback and stream_size:\n # try:\n # callback(stream_size)\n # except Exception as ex:\n # log.debug('Exception raised when running callback function: %s' % ex)\n return res\n\n def list_container_objects(self, *args, **kwargs):\n raise NotImplementedError('List is not implemented for http protocol')\n\n def delete_object(self, obj, *args, **kwargs):\n assert isinstance(obj, self._HttpSessionHandle)\n container = self._containers[obj.container_name]\n res = container.session.delete(obj.url, headers=container.get_headers(obj.url))\n if res.status_code != requests.codes.ok:\n self._get_logger().warning('Failed deleting object %s (%d): %s' % (\n obj.object_name, res.status_code, res.text))\n return False\n return True\n\n def get_object(self, container_name, object_name, *args, **kwargs):\n is_stream = kwargs.get('stream', True)\n url = ''.join((container_name, object_name.lstrip('/')))\n return self._HttpSessionHandle(url, is_stream, container_name, object_name)\n\n def _get_download_object(self, obj):\n # bypass for session result\n if not isinstance(obj, self._HttpSessionHandle):\n return obj\n\n container = self._containers[obj.container_name]\n # set stream flag before we send the request\n container.session.stream = obj.is_stream\n res = container.session.get(\n obj.url, timeout=(int(self.timeout_connection), int(self.timeout_total)),\n headers=container.get_headers(obj.url))\n if res.status_code != requests.codes.ok:\n raise ValueError('Failed getting object %s (%d): %s' % (obj.object_name, res.status_code, res.text))\n return res\n\n def download_object_as_stream(self, obj, chunk_size=64 * 1024, **_):\n # return iterable object\n obj = self._get_download_object(obj)\n return obj.iter_content(chunk_size=chunk_size)\n\n def download_object(self, obj, local_path, overwrite_existing=True, delete_on_failure=True, callback=None, **_):\n obj = self._get_download_object(obj)\n p = Path(local_path)\n if not overwrite_existing and p.is_file():\n self.get_logger().warning('failed saving after download: overwrite=False and file exists (%s)' % str(p))\n return\n length = 0\n with p.open(mode='wb') as f:\n for chunk in obj.iter_content(chunk_size=5 * 1024 * 1024):\n # filter out keep-alive new chunks\n if not chunk:\n continue\n chunk_size = len(chunk)\n f.write(chunk)\n length += chunk_size\n if callback:\n callback(chunk_size)\n\n return length\n\n def get_direct_access(self, remote_path, **_):\n return None\n\n def test_upload(self, test_path, config, **kwargs):\n return True\n\n def upload_object(self, file_path, container, object_name, extra, callback=None, **kwargs):\n with open(file_path, 'rb') as stream:\n return self.upload_object_via_stream(iterator=stream, container=container,\n object_name=object_name, extra=extra, callback=callback, **kwargs)\n\n\nclass _Stream(object):\n encoding = None\n mode = 'rw'\n name = ''\n newlines = '\\n'\n softspace = False\n\n def __init__(self, input_iterator=None):\n self.closed = False\n self._buffer = Queue()\n self._input_iterator = input_iterator\n self._leftover = None\n\n def __iter__(self):\n return self\n\n def __next__(self):\n return self.next()\n\n def close(self):\n self.closed = True\n\n def flush(self):\n pass\n\n def fileno(self):\n return 87\n\n def isatty(self):\n return False\n\n def next(self):\n while not self.closed or not self._buffer.empty():\n # input stream\n if self._input_iterator:\n try:\n chunck = next(self._input_iterator)\n return chunck\n except StopIteration:\n self.closed = True\n raise StopIteration()\n except Exception as ex:\n _Driver.get_logger().error('Failed downloading: %s' % ex)\n else:\n # in/out stream\n try:\n return self._buffer.get(block=True, timeout=1.)\n except Empty:\n pass\n\n raise StopIteration()\n\n def read(self, size=None):\n try:\n data = self.next() if self._leftover is None else self._leftover\n except StopIteration:\n return six.b('')\n\n self._leftover = None\n try:\n while size is None or not data or len(data) < size:\n chunk = self.next()\n if chunk is not None:\n if data is not None:\n data += chunk\n else:\n data = chunk\n except StopIteration:\n pass\n\n if size is not None and data and len(data) > size:\n self._leftover = data[size:]\n return data[:size]\n\n return data\n\n def readline(self, size=None):\n return self.read(size)\n\n def readlines(self, sizehint=None):\n pass\n\n def truncate(self, size=None):\n pass\n\n def write(self, bytes):\n self._buffer.put(bytes, block=True)\n\n def writelines(self, sequence):\n for s in sequence:\n self.write(s)\n\n\nclass _Boto3Driver(_Driver):\n \"\"\" Boto3 storage adapter (simple, enough for now) \"\"\"\n\n _min_pool_connections = 512\n _max_multipart_concurrency = deferred_config('aws.boto3.max_multipart_concurrency', 16)\n _pool_connections = deferred_config('aws.boto3.pool_connections', 512)\n _connect_timeout = deferred_config('aws.boto3.connect_timeout', 60)\n _read_timeout = deferred_config('aws.boto3.read_timeout', 60)\n\n _stream_download_pool_connections = deferred_config('aws.boto3.stream_connections', 128)\n _stream_download_pool = None\n _stream_download_pool_pid = None\n\n _containers = {}\n\n scheme = 's3'\n scheme_prefix = str(furl(scheme=scheme, netloc=''))\n\n _bucket_location_failure_reported = set()\n\n class _Container(object):\n _creation_lock = ForkSafeRLock()\n\n def __init__(self, name, cfg):\n try:\n import boto3\n import botocore.client\n from botocore.exceptions import ClientError # noqa: F401\n except ImportError:\n raise UsageError(\n 'AWS S3 storage driver (boto3) not found. '\n 'Please install driver using: pip install \\\"boto3>=1.9\\\"'\n )\n\n # skip 's3://'\n self.name = name[5:]\n endpoint = (('https://' if cfg.secure else 'http://') + cfg.host) if cfg.host else None\n\n # boto3 client creation isn't thread-safe (client itself is)\n with self._creation_lock:\n boto_kwargs = {\n \"endpoint_url\": endpoint,\n \"use_ssl\": cfg.secure,\n \"verify\": cfg.verify,\n \"config\": botocore.client.Config(\n max_pool_connections=max(\n int(_Boto3Driver._min_pool_connections),\n int(_Boto3Driver._pool_connections)),\n connect_timeout=int(_Boto3Driver._connect_timeout),\n read_timeout=int(_Boto3Driver._read_timeout),\n )\n }\n if not cfg.use_credentials_chain:\n boto_kwargs[\"aws_access_key_id\"] = cfg.key\n boto_kwargs[\"aws_secret_access_key\"] = cfg.secret\n\n self.resource = boto3.resource(\n 's3',\n **boto_kwargs\n )\n\n self.config = cfg\n bucket_name = self.name[len(cfg.host) + 1:] if cfg.host else self.name\n self.bucket = self.resource.Bucket(bucket_name)\n\n @attrs\n class ListResult(object):\n name = attrib(default=None)\n\n def __init__(self):\n pass\n\n def _get_stream_download_pool(self):\n if self._stream_download_pool is None or self._stream_download_pool_pid != os.getpid():\n self._stream_download_pool_pid = os.getpid()\n self._stream_download_pool = ThreadPoolExecutor(max_workers=int(self._stream_download_pool_connections))\n return self._stream_download_pool\n\n def get_container(self, container_name, config=None, **kwargs):\n if container_name not in self._containers:\n self._containers[container_name] = self._Container(name=container_name, cfg=config)\n self._containers[container_name].config.retries = kwargs.get('retries', 5)\n return self._containers[container_name]\n\n def upload_object_via_stream(self, iterator, container, object_name, callback=None, extra=None, **kwargs):\n import boto3.s3.transfer\n stream = _Stream(iterator)\n try:\n container.bucket.upload_fileobj(stream, object_name, Config=boto3.s3.transfer.TransferConfig(\n use_threads=container.config.multipart,\n max_concurrency=self._max_multipart_concurrency if container.config.multipart else 1,\n num_download_attempts=container.config.retries),\n Callback=callback,\n )\n except Exception as ex:\n self.get_logger().error('Failed uploading: %s' % ex)\n return False\n return True\n\n def upload_object(self, file_path, container, object_name, callback=None, extra=None, **kwargs):\n import boto3.s3.transfer\n try:\n container.bucket.upload_file(file_path, object_name, Config=boto3.s3.transfer.TransferConfig(\n use_threads=container.config.multipart,\n max_concurrency=self._max_multipart_concurrency if container.config.multipart else 1,\n num_download_attempts=container.config.retries),\n Callback=callback)\n except Exception as ex:\n self.get_logger().error('Failed uploading: %s' % ex)\n return False\n return True\n\n def list_container_objects(self, container, ex_prefix=None, **kwargs):\n if ex_prefix:\n res = container.bucket.objects.filter(Prefix=ex_prefix)\n else:\n res = container.bucket.objects.all()\n for res in res:\n yield self.ListResult(name=res.key)\n\n def delete_object(self, object, **kwargs):\n from botocore.exceptions import ClientError\n object.delete()\n try:\n # Try loading the file to verify deletion\n object.load()\n return False\n except ClientError as e:\n return int(e.response['Error']['Code']) == 404\n\n def get_object(self, container_name, object_name, *args, **kwargs):\n full_container_name = 's3://' + container_name\n container = self._containers[full_container_name]\n obj = container.resource.Object(container.bucket.name, object_name)\n obj.container_name = full_container_name\n return obj\n\n def download_object_as_stream(self, obj, chunk_size=64 * 1024, verbose=None, log=None, **_):\n def async_download(a_obj, a_stream, cb, cfg):\n try:\n a_obj.download_fileobj(a_stream, Callback=cb, Config=cfg)\n except Exception as ex:\n (log or self.get_logger()).error('Failed downloading: %s' % ex)\n a_stream.close()\n\n import boto3.s3.transfer\n # return iterable object\n stream = _Stream()\n container = self._containers[obj.container_name]\n config = boto3.s3.transfer.TransferConfig(\n use_threads=container.config.multipart,\n max_concurrency=self._max_multipart_concurrency if container.config.multipart else 1,\n num_download_attempts=container.config.retries)\n total_size_mb = obj.content_length / (1024. * 1024.)\n remote_path = os.path.join(obj.container_name, obj.key)\n cb = DownloadProgressReport(total_size_mb, verbose, remote_path, log)\n self._get_stream_download_pool().submit(async_download, obj, stream, cb, config)\n\n return stream\n\n def download_object(self, obj, local_path, overwrite_existing=True, delete_on_failure=True, callback=None, **_):\n import boto3.s3.transfer\n p = Path(local_path)\n if not overwrite_existing and p.is_file():\n self.get_logger().warning('failed saving after download: overwrite=False and file exists (%s)' % str(p))\n return\n container = self._containers[obj.container_name]\n Config = boto3.s3.transfer.TransferConfig(\n use_threads=container.config.multipart,\n max_concurrency=self._max_multipart_concurrency if container.config.multipart else 1,\n num_download_attempts=container.config.retries\n )\n obj.download_file(str(p), Callback=callback, Config=Config)\n\n @classmethod\n def _test_bucket_config(cls, conf, log, test_path='', raise_on_error=True, log_on_error=True):\n try:\n import boto3\n from botocore.exceptions import ClientError\n except ImportError:\n return False\n\n if not conf.bucket:\n return False\n try:\n if not conf.is_valid():\n raise Exception('Missing credentials')\n\n fullname = furl(conf.bucket).add(path=test_path).add(path='%s-upload_test' % cls.__module__)\n bucket_name = str(fullname.path.segments[0])\n filename = str(furl(path=fullname.path.segments[1:]))\n\n data = {\n 'user': getpass.getuser(),\n 'machine': gethostname(),\n 'time': datetime.utcnow().isoformat()\n }\n\n boto_session = boto3.Session(conf.key, conf.secret)\n boto_resource = boto_session.resource('s3', conf.region)\n bucket = boto_resource.Bucket(bucket_name)\n bucket.put_object(Key=filename, Body=six.b(json.dumps(data)))\n\n region = cls._get_bucket_region(conf=conf, log=log, report_info=True)\n\n if region and ((conf.region and region != conf.region) or (not conf.region and region != 'us-east-1')):\n msg = \"incorrect region specified for bucket %s (detected region %s)\" % (conf.bucket, region)\n else:\n return True\n\n except ClientError as ex:\n msg = ex.response['Error']['Message']\n if log_on_error and log:\n log.error(msg)\n\n if raise_on_error:\n raise\n\n except Exception as ex:\n msg = str(ex)\n if log_on_error and log:\n log.error(msg)\n\n if raise_on_error:\n raise\n\n msg = (\"Failed testing access to bucket %s: \" % conf.bucket) + msg\n\n if log_on_error and log:\n log.error(msg)\n\n if raise_on_error:\n raise StorageError(msg)\n\n return False\n\n @classmethod\n def _get_bucket_region(cls, conf, log=None, report_info=False):\n import boto3\n from botocore.exceptions import ClientError\n\n if not conf.bucket:\n return None\n\n def report(msg):\n if log and conf.get_bucket_host() not in cls._bucket_location_failure_reported:\n if report_info:\n log.debug(msg)\n else:\n log.warning(msg)\n cls._bucket_location_failure_reported.add(conf.get_bucket_host())\n\n try:\n boto_session = boto3.Session(conf.key, conf.secret)\n boto_resource = boto_session.resource('s3')\n return boto_resource.meta.client.get_bucket_location(Bucket=conf.bucket)[\"LocationConstraint\"]\n\n except ClientError as ex:\n report(\"Failed getting bucket location (region) for bucket \"\n \"%s: %s (%s, access_key=%s). Default region will be used. \"\n \"This is normal if you do not have GET_BUCKET_LOCATION permission\"\n % (conf.bucket, ex.response['Error']['Message'], ex.response['Error']['Code'], conf.key))\n except Exception as ex:\n report(\"Failed getting bucket location (region) for bucket %s: %s. Default region will be used.\"\n % (conf.bucket, str(ex)))\n\n return None\n\n def get_direct_access(self, remote_path, **_):\n return None\n\n def test_upload(self, test_path, config, **_):\n return True\n\n\nclass _GoogleCloudStorageDriver(_Driver):\n \"\"\"Storage driver for google cloud storage\"\"\"\n\n _stream_download_pool_connections = deferred_config('google.storage.stream_connections', 128)\n _stream_download_pool = None\n _stream_download_pool_pid = None\n\n _containers = {}\n\n scheme = 'gs'\n scheme_prefix = str(furl(scheme=scheme, netloc=''))\n\n class _Container(object):\n def __init__(self, name, cfg):\n try:\n from google.cloud import storage\n from google.oauth2 import service_account\n except ImportError:\n raise UsageError(\n 'Google cloud driver not found. '\n 'Please install driver using: pip install \\\"google-cloud-storage>=1.13.2\\\"'\n )\n\n self.name = name[len(_GoogleCloudStorageDriver.scheme_prefix):]\n\n if cfg.credentials_json:\n credentials = service_account.Credentials.from_service_account_file(cfg.credentials_json)\n else:\n credentials = None\n\n self.client = storage.Client(project=cfg.project, credentials=credentials)\n for adapter in self.client._http.adapters.values():\n if cfg.pool_connections:\n adapter._pool_connections = cfg.pool_connections\n if cfg.pool_maxsize:\n adapter._pool_maxsize = cfg.pool_maxsize\n\n self.config = cfg\n self.bucket = self.client.bucket(self.name)\n\n def _get_stream_download_pool(self):\n if self._stream_download_pool is None or self._stream_download_pool_pid != os.getpid():\n self._stream_download_pool_pid = os.getpid()\n self._stream_download_pool = ThreadPoolExecutor(max_workers=int(self._stream_download_pool_connections))\n return self._stream_download_pool\n\n def get_container(self, container_name, config=None, **kwargs):\n if container_name not in self._containers:\n self._containers[container_name] = self._Container(name=container_name, cfg=config)\n self._containers[container_name].config.retries = kwargs.get('retries', 5)\n return self._containers[container_name]\n\n def upload_object_via_stream(self, iterator, container, object_name, extra=None, **kwargs):\n try:\n blob = container.bucket.blob(object_name)\n blob.upload_from_file(iterator)\n except Exception as ex:\n self.get_logger().error('Failed uploading: %s' % ex)\n return False\n return True\n\n def upload_object(self, file_path, container, object_name, extra=None, **kwargs):\n try:\n blob = container.bucket.blob(object_name)\n blob.upload_from_filename(file_path)\n except Exception as ex:\n self.get_logger().error('Failed uploading: %s' % ex)\n return False\n return True\n\n def list_container_objects(self, container, **kwargs):\n return list(container.bucket.list_blobs())\n\n def delete_object(self, object, **kwargs):\n try:\n object.delete()\n except Exception as ex:\n try:\n from google.cloud.exceptions import NotFound\n if isinstance(ex, NotFound):\n return False\n except ImportError:\n pass\n name = getattr(object, \"name\", \"\")\n self.get_logger().warning(\"Failed deleting object {}: {}\".format(name, ex))\n return False\n\n return not object.exists()\n\n def get_object(self, container_name, object_name, *args, **kwargs):\n full_container_name = str(furl(scheme=self.scheme, netloc=container_name))\n container = self._containers[full_container_name]\n obj = container.bucket.blob(object_name)\n obj.container_name = full_container_name\n return obj\n\n def download_object_as_stream(self, obj, chunk_size=256 * 1024, **_):\n raise NotImplementedError('Unsupported for google storage')\n\n def async_download(a_obj, a_stream):\n try:\n a_obj.download_to_file(a_stream)\n except Exception as ex:\n self.get_logger().error('Failed downloading: %s' % ex)\n a_stream.close()\n\n # return iterable object\n stream = _Stream()\n obj.chunk_size = chunk_size\n self._get_stream_download_pool().submit(async_download, obj, stream)\n\n return stream\n\n def download_object(self, obj, local_path, overwrite_existing=True, delete_on_failure=True, callback=None, **_):\n p = Path(local_path)\n if not overwrite_existing and p.is_file():\n self.get_logger().warning('failed saving after download: overwrite=False and file exists (%s)' % str(p))\n return\n obj.download_to_filename(str(p))\n\n def test_upload(self, test_path, config, **_):\n bucket_url = str(furl(scheme=self.scheme, netloc=config.bucket, path=config.subdir))\n bucket = self.get_container(container_name=bucket_url, config=config).bucket\n\n test_obj = bucket\n\n if test_path:\n if not test_path.endswith('/'):\n test_path += '/'\n\n blob = bucket.blob(test_path)\n\n if blob.exists():\n test_obj = blob\n\n permissions_to_test = ('storage.objects.get', 'storage.objects.update')\n return set(test_obj.test_iam_permissions(permissions_to_test)) == set(permissions_to_test)\n\n def get_direct_access(self, remote_path, **_):\n return None\n\n\nclass _AzureBlobServiceStorageDriver(_Driver):\n scheme = 'azure'\n\n _containers = {}\n\n class _Container(object):\n def __init__(self, name, config):\n try:\n from azure.common import AzureHttpError # noqa: F401\n from azure.storage.blob import BlockBlobService\n except ImportError:\n raise UsageError(\n 'Azure blob storage driver not found. '\n 'Please install driver using: pip install \\\"azure.storage.blob<=2.1.0\\\"'\n )\n\n self.name = name\n self.config = config\n self.blob_service = BlockBlobService(\n account_name=config.account_name,\n account_key=config.account_key,\n )\n\n @attrs\n class _Object(object):\n container = attrib()\n blob_name = attrib()\n content_length = attrib()\n\n def get_container(self, container_name=None, config=None, **kwargs):\n container_name = container_name or config.container_name\n if container_name not in self._containers:\n self._containers[container_name] = self._Container(name=container_name, config=config)\n # self._containers[container_name].config.retries = kwargs.get('retries', 5)\n return self._containers[container_name]\n\n def upload_object_via_stream(self, iterator, container, object_name, callback=None, extra=None, **kwargs):\n from azure.common import AzureHttpError # noqa\n\n blob_name = self._blob_name_from_object_path(object_name, container.name) # noqa: F841\n try:\n container.blob_service.MAX_SINGLE_PUT_SIZE = 16 * 1024 * 1024\n container.blob_service.socket_timeout = (300, 2000)\n container.blob_service.create_blob_from_bytes(\n container.name,\n object_name,\n iterator.read() if hasattr(iterator, \"read\") else bytes(iterator),\n # timeout=300,\n max_connections=2,\n progress_callback=callback,\n )\n return True\n except AzureHttpError as ex:\n self.get_logger().error('Failed uploading (Azure error): %s' % ex)\n except Exception as ex:\n self.get_logger().error('Failed uploading: %s' % ex)\n return False\n\n def upload_object(self, file_path, container, object_name, callback=None, extra=None, **kwargs):\n from azure.common import AzureHttpError # noqa\n\n blob_name = self._blob_name_from_object_path(object_name, container.name)\n stream = None\n try:\n from azure.storage.blob import ContentSettings # noqa\n from mimetypes import guess_type\n container.blob_service.MAX_SINGLE_PUT_SIZE = 16 * 1024 * 1024\n container.blob_service.socket_timeout = (300, 2000)\n container.blob_service.create_blob_from_path(\n container.name,\n blob_name,\n file_path,\n # timeout=300,\n max_connections=2,\n content_settings=ContentSettings(content_type=guess_type(file_path)),\n progress_callback=callback,\n )\n return True\n except AzureHttpError as ex:\n self.get_logger().error('Failed uploading (Azure error): %s' % ex)\n except Exception as ex:\n self.get_logger().error('Failed uploading: %s' % ex)\n finally:\n if stream:\n stream.close()\n\n def list_container_objects(self, container, ex_prefix=None, **kwargs):\n return list(container.blob_service.list_blobs(container_name=container.name, prefix=ex_prefix))\n\n def delete_object(self, object, **kwargs):\n container = object.container\n container.blob_service.delete_blob(\n container.name,\n object.blob_name,\n )\n return not object.container.blob_service.exists(container.name, object.blob_name)\n\n def get_object(self, container_name, object_name, *args, **kwargs):\n container = self._containers.get(container_name)\n if not container:\n raise StorageError(\"Container `{}` not found for object {}\".format(container_name, object_name))\n\n # blob_name = self._blob_name_from_object_path(object_name, container_name)\n blob = container.blob_service.get_blob_properties(container.name, object_name)\n\n return self._Object(container=container, blob_name=blob.name, content_length=blob.properties.content_length)\n\n def download_object_as_stream(self, obj, verbose, *_, **__):\n container = obj.container\n total_size_mb = obj.content_length / (1024. * 1024.)\n remote_path = os.path.join(\n \"{}://\".format(self.scheme),\n container.config.account_name,\n container.name,\n obj.blob_name\n )\n cb = DownloadProgressReport(total_size_mb, verbose, remote_path, self.get_logger())\n blob = container.blob_service.get_blob_to_bytes(\n container.name,\n obj.blob_name,\n progress_callback=cb,\n )\n return blob.content\n\n def download_object(self, obj, local_path, overwrite_existing=True, delete_on_failure=True, callback=None, **_):\n p = Path(local_path)\n if not overwrite_existing and p.is_file():\n self.get_logger().warning('failed saving after download: overwrite=False and file exists (%s)' % str(p))\n return\n\n download_done = threading.Event()\n download_done.counter = 0\n\n def callback_func(current, total):\n if callback:\n chunk = current - download_done.counter\n download_done.counter += chunk\n callback(chunk)\n if current >= total:\n download_done.set()\n\n container = obj.container\n container.blob_service.MAX_SINGLE_GET_SIZE = 5 * 1024 * 1024\n _ = container.blob_service.get_blob_to_path(\n container.name,\n obj.blob_name,\n local_path,\n max_connections=10,\n progress_callback=callback_func,\n )\n download_done.wait()\n\n def test_upload(self, test_path, config, **_):\n container = self.get_container(config=config)\n try:\n container.blob_service.get_container_properties(container.name)\n except Exception:\n return False\n else:\n # Using the account Key, we can always upload...\n return True\n\n @classmethod\n def _blob_name_from_object_path(cls, name, container_name):\n scheme = urlparse(name).scheme\n if scheme:\n if scheme != cls.scheme:\n raise StorageError(\n \"When using a URL, only the `{}` scheme is supported for Azure storage: {}\",\n cls.scheme,\n name,\n )\n\n f = furl(name)\n\n if not f.path.segments:\n raise StorageError(\n \"Missing container name in URL {}\",\n name,\n )\n\n parsed_container_name = f.path.segments[0]\n\n if parsed_container_name != container_name:\n raise StorageError(\n \"Container name mismatch (expected {}, found {}) in {}\",\n container_name,\n parsed_container_name,\n name,\n )\n\n if len(f.path.segments) == 1:\n raise StorageError(\n \"No path found following container name {} in {}\",\n container_name,\n name,\n )\n\n return f.path.segments[0], os.path.join(*f.path.segments[1:])\n\n return name\n\n def get_direct_access(self, remote_path, **_):\n return None\n\n\nclass _FileStorageDriver(_Driver):\n \"\"\"\n A base StorageDriver to derive from.\n \"\"\"\n\n scheme = \"file\"\n CHUNK_SIZE = 8096\n IGNORE_FOLDERS = ['.lock', '.hash']\n Object = namedtuple(\"Object\", ['name', 'size', 'extra', 'driver', 'container', 'hash', 'meta_data'])\n\n class _Container(object):\n def __init__(self, name, extra, driver):\n self.name = name\n self.extra = extra\n self.driver = driver\n\n def __init__(self, key, secret=None, secure=True, host=None, port=None,\n **kwargs):\n\n # Use the key as the path to the storage\n self.base_path = key\n\n def _make_path(self, path, ignore_existing=True):\n \"\"\"\n Create a path by checking if it already exists\n \"\"\"\n\n try:\n os.makedirs(path)\n except OSError:\n exp = sys.exc_info()[1]\n if exp.errno == errno.EEXIST and not ignore_existing:\n raise exp\n\n def _check_container_name(self, container_name):\n \"\"\"\n Check if the container name is valid\n\n :param container_name: Container name\n :type container_name: ``str``\n \"\"\"\n\n if '/' in container_name or '\\\\' in container_name:\n raise ValueError(\"Container name \\\"{}\\\" cannot contain \\\\ or / \".format(container_name))\n\n def _make_container(self, container_name):\n \"\"\"\n Create a container instance\n\n :param container_name: Container name.\n :type container_name: ``str``\n\n :return: A Container instance.\n \"\"\"\n container_name = container_name or '.'\n self._check_container_name(container_name)\n\n full_path = os.path.realpath(os.path.join(self.base_path, container_name))\n\n try:\n stat = os.stat(full_path)\n if not os.path.isdir(full_path):\n raise OSError(\"Target path \\\"{}\\\" is not a directory\".format(full_path))\n except OSError:\n raise OSError(\"Target path \\\"{}\\\" is not accessible or does not exist\".format(full_path))\n\n extra = {\n 'creation_time': stat.st_ctime,\n 'access_time': stat.st_atime,\n 'modify_time': stat.st_mtime,\n }\n\n return self._Container(name=container_name, extra=extra, driver=self)\n\n def _make_object(self, container, object_name):\n \"\"\"\n Create an object instance\n\n :param container: Container.\n :type container: :class:`Container`\n\n :param object_name: Object name.\n :type object_name: ``str``\n\n :return: A Object instance.\n \"\"\"\n\n full_path = os.path.realpath(os.path.join(self.base_path, container.name if container else '.', object_name))\n\n if os.path.isdir(full_path):\n raise ValueError(\"Target path \\\"{}\\\" already exist\".format(full_path))\n\n try:\n stat = os.stat(full_path)\n except Exception:\n raise ValueError(\"Cannot access target path \\\"{}\\\"\".format(full_path))\n\n extra = {\n 'creation_time': stat.st_ctime,\n 'access_time': stat.st_atime,\n 'modify_time': stat.st_mtime,\n }\n\n return self.Object(name=object_name, size=stat.st_size, extra=extra,\n driver=self, container=container, hash=None, meta_data=None)\n\n def iterate_containers(self):\n \"\"\"\n Return a generator of containers.\n\n :return: A generator of Container instances.\n \"\"\"\n\n for container_name in os.listdir(self.base_path):\n full_path = os.path.join(self.base_path, container_name)\n if not os.path.isdir(full_path):\n continue\n yield self._make_container(container_name)\n\n def _get_objects(self, container):\n \"\"\"\n Recursively iterate through the file-system and return the object names\n \"\"\"\n\n cpath = self.get_container_cdn_url(container, check=True)\n\n for folder, subfolders, files in os.walk(cpath, topdown=True):\n # Remove unwanted subfolders\n for subf in self.IGNORE_FOLDERS:\n if subf in subfolders:\n subfolders.remove(subf)\n\n for name in files:\n full_path = os.path.join(folder, name)\n object_name = os.path.relpath(full_path, start=cpath)\n yield self._make_object(container, object_name)\n\n def iterate_container_objects(self, container):\n \"\"\"\n Returns a generator of objects for the given container.\n\n :param container: Container instance\n :type container: :class:`Container`\n\n :return: A generator of Object instances.\n \"\"\"\n\n return self._get_objects(container)\n\n def get_container(self, container_name, **_):\n \"\"\"\n Return a container instance.\n\n :param container_name: Container name.\n :type container_name: ``str``\n\n :return: A Container instance.\n \"\"\"\n return self._make_container(container_name)\n\n def get_container_cdn_url(self, container, check=False):\n \"\"\"\n Return a container CDN URL.\n\n :param container: Container instance\n :type container: :class:`Container`\n\n :param check: Indicates if the path's existence must be checked\n :type check: ``bool``\n\n :return: A CDN URL for this container.\n \"\"\"\n path = os.path.realpath(os.path.join(self.base_path, container.name if container else '.'))\n\n if check and not os.path.isdir(path):\n raise ValueError(\"Target path \\\"{}\\\" does not exist\".format(path))\n\n return path\n\n def get_object(self, container_name, object_name, **_):\n \"\"\"\n Return an object instance.\n\n :param container_name: Container name.\n :type container_name: ``str``\n\n :param object_name: Object name.\n :type object_name: ``str``\n\n :return: An Object instance.\n \"\"\"\n container = self._make_container(container_name)\n return self._make_object(container, object_name)\n\n def get_object_cdn_url(self, obj):\n \"\"\"\n Return an object CDN URL.\n\n :param obj: Object instance\n :type obj: :class:`Object`\n\n :return: A CDN URL for this object.\n \"\"\"\n return os.path.realpath(os.path.join(self.base_path, obj.container.name, obj.name))\n\n def download_object(self, obj, destination_path, overwrite_existing=False, delete_on_failure=True, **_):\n \"\"\"\n Download an object to the specified destination path.\n\n :param obj: Object instance.\n :type obj: :class:`Object`\n\n :param destination_path: Full path to a file or a directory where the\n incoming file will be saved.\n :type destination_path: ``str``\n\n :param overwrite_existing: True to overwrite an existing file,\n defaults to False.\n :type overwrite_existing: ``bool``\n\n :param delete_on_failure: True to delete a partially downloaded file if\n the download was not successful (hash mismatch / file size).\n :type delete_on_failure: ``bool``\n\n :return: True, if an object has been successfully downloaded, False, otherwise.\n \"\"\"\n\n obj_path = self.get_object_cdn_url(obj)\n base_name = os.path.basename(destination_path)\n\n if not base_name and not os.path.exists(destination_path):\n raise ValueError('Path \\\"{}\\\" does not exist'.format(destination_path))\n\n if not base_name:\n file_path = os.path.join(destination_path, obj.name)\n else:\n file_path = destination_path\n\n if os.path.exists(file_path) and not overwrite_existing:\n raise ValueError('File \\\"{}\\\" already exists, but overwrite_existing=False'.format(file_path))\n\n try:\n shutil.copy(obj_path, file_path)\n except IOError:\n if delete_on_failure:\n # noinspection PyBroadException\n try:\n os.unlink(file_path)\n except Exception:\n pass\n return False\n\n return True\n\n def download_object_as_stream(self, obj, chunk_size=None, **_):\n \"\"\"\n Return a generator which yields object data.\n\n :param obj: Object instance\n :type obj: :class:`Object`\n\n :param chunk_size: Optional chunk size (in bytes).\n :type chunk_size: ``int``\n\n :return: A stream of binary chunks of data.\n \"\"\"\n path = self.get_object_cdn_url(obj)\n with open(path, 'rb') as obj_file:\n for data in self._read_in_chunks(obj_file, chunk_size=chunk_size):\n yield data\n\n def upload_object(self, file_path, container, object_name, extra=None, verify_hash=True, **_):\n \"\"\"\n Upload an object currently located on a disk.\n\n :param file_path: Path to the object on disk.\n :type file_path: ``str``\n\n :param container: Destination container.\n :type container: :class:`Container`\n\n :param object_name: Object name.\n :type object_name: ``str``\n\n :param verify_hash: Verify hast\n :type verify_hash: ``bool``\n\n :param extra: (optional) Extra attributes (driver specific).\n :type extra: ``dict``\n \"\"\"\n\n path = self.get_container_cdn_url(container, check=True)\n obj_path = os.path.join(path, object_name)\n base_path = os.path.dirname(obj_path)\n\n self._make_path(base_path)\n\n shutil.copy(file_path, obj_path)\n\n os.chmod(obj_path, int('664', 8))\n\n return self._make_object(container, object_name)\n\n def upload_object_via_stream(self, iterator, container, object_name, extra=None, **kwargs):\n \"\"\"\n Upload an object using an iterator.\n\n If a provider supports it, chunked transfer encoding is used and you\n don't need to know in advance the amount of data to be uploaded.\n\n Otherwise if a provider doesn't support it, iterator will be exhausted\n so a total size for data to be uploaded can be determined.\n\n Note: Exhausting the iterator means that the whole data must be\n buffered in memory which might result in memory exhausting when\n uploading a very large object.\n\n If a file is located on a disk you are advised to use upload_object\n function which uses fs.stat function to determine the file size and it\n doesn't need to buffer whole object in the memory.\n\n :type iterator: ``object``\n :param iterator: An object which implements the iterator\n interface and yields binary chunks of data.\n\n :type container: :class:`Container`\n :param container: Destination container.\n\n :type object_name: ``str``\n :param object_name: Object name.\n\n :type extra: ``dict``\n :param extra: (optional) Extra attributes (driver specific). Note:\n This dictionary must contain a 'content_type' key which represents\n a content type of the stored object.\n \"\"\"\n path = self.get_container_cdn_url(container, check=True)\n obj_path = os.path.join(path, object_name)\n base_path = os.path.dirname(obj_path)\n self._make_path(base_path)\n\n obj_path = os.path.realpath(obj_path)\n with open(obj_path, 'wb' if not isinstance(iterator, StringIO) else 'wt') as obj_file:\n obj_file.write(iterator.read() if hasattr(iterator, 'read') else bytes(iterator))\n\n os.chmod(obj_path, int('664', 8))\n return self._make_object(container, object_name)\n\n def delete_object(self, obj, **_):\n \"\"\"\n Delete an object.\n\n :type obj: :class:`Object`\n :param obj: Object instance.\n\n :return: True on success.\n \"\"\"\n if not obj:\n return False\n\n path = self.get_object_cdn_url(obj)\n\n try:\n os.unlink(path)\n except Exception:\n return False\n\n # # Check and delete all the empty parent folders\n # path = os.path.dirname(path)\n # container_url = obj.container.get_cdn_url()\n #\n # # Delete the empty parent folders till the container's level\n # while path != container_url:\n # try:\n # os.rmdir(path)\n # except OSError:\n # exp = sys.exc_info()[1]\n # if exp.errno == errno.ENOTEMPTY:\n # break\n # raise exp\n #\n # path = os.path.dirname(path)\n\n return True\n\n def create_container(self, container_name):\n \"\"\"\n Create a new container.\n\n :type container_name: ``str``\n :param container_name: Container name.\n\n :return: A Container instance on success.\n \"\"\"\n container_name = container_name or '.'\n self._check_container_name(container_name)\n\n path = os.path.join(self.base_path, container_name)\n\n try:\n self._make_path(path, ignore_existing=False)\n except OSError:\n exp = sys.exc_info()[1]\n if exp.errno == errno.EEXIST:\n raise ValueError('Container \\\"{}\\\" with this name already exists. The name '\n 'must be unique among all the containers in the '\n 'system'.format(container_name))\n else:\n raise ValueError('Error creating container \\\"{}\\\"'.format(container_name))\n except Exception:\n raise ValueError('Error creating container \\\"{}\\\"'.format(container_name))\n\n return self._make_container(container_name)\n\n def delete_container(self, container):\n \"\"\"\n Delete a container.\n\n :type container: :class:`Container`\n :param container: Container instance\n\n :return: True on success, False otherwise.\n \"\"\"\n\n # Check if there are any objects inside this\n for obj in self._get_objects(container):\n raise ValueError('Container \\\"{}\\\" is not empty'.format(container.name))\n\n path = self.get_container_cdn_url(container, check=True)\n\n # noinspection PyBroadException\n try:\n shutil.rmtree(path)\n except Exception:\n return False\n\n return True\n\n def list_container_objects(self, container, **kwargs):\n return list(self.iterate_container_objects(container))\n\n @staticmethod\n def _read_in_chunks(iterator, chunk_size=None, fill_size=False, yield_empty=False):\n \"\"\"\n Return a generator which yields data in chunks.\n\n :param iterator: An object which implements an iterator interface\n or a File like object with read method.\n :type iterator: :class:`object` which implements iterator interface.\n\n :param chunk_size: Optional chunk size (defaults to CHUNK_SIZE)\n :type chunk_size: ``int``\n\n :param fill_size: If True, make sure chunks are exactly chunk_size in\n length (except for last chunk).\n :type fill_size: ``bool``\n\n :param yield_empty: If true and iterator returned no data, only yield empty\n bytes object\n :type yield_empty: ``bool``\n\n TODO: At some point in the future we could use byte arrays here if version\n >= Python 3. This should speed things up a bit and reduce memory usage.\n \"\"\"\n chunk_size = chunk_size or _FileStorageDriver.CHUNK_SIZE\n if six.PY3:\n from io import FileIO as file\n\n if isinstance(iterator, (file)):\n get_data = iterator.read\n args = (chunk_size,)\n else:\n get_data = next\n args = (iterator,)\n\n data = bytes('')\n empty = False\n\n while not empty or len(data) > 0:\n if not empty:\n try:\n chunk = bytes(get_data(*args))\n if len(chunk) > 0:\n data += chunk\n else:\n empty = True\n except StopIteration:\n empty = True\n\n if len(data) == 0:\n if empty and yield_empty:\n yield bytes('')\n\n return\n\n if fill_size:\n if empty or len(data) >= chunk_size:\n yield data[:chunk_size]\n data = data[chunk_size:]\n else:\n yield data\n data = bytes('')\n\n def get_direct_access(self, remote_path, **_):\n # this will always make sure we have full path and file:// prefix\n full_url = StorageHelper.conform_url(remote_path)\n # now get rid of the file:// prefix\n path = Path(full_url[7:])\n if not path.exists():\n raise ValueError(\"Requested path does not exist: {}\".format(path))\n return path.as_posix()\n\n def test_upload(self, test_path, config, **kwargs):\n return True\n\n\ndriver_schemes = set(\n filter(\n None,\n itertools.chain(\n (getattr(cls, \"scheme\", None) for cls in _Driver.__subclasses__()),\n *(getattr(cls, \"schemes\", []) for cls in _Driver.__subclasses__())\n )\n )\n)\n\nremote_driver_schemes = driver_schemes - {_FileStorageDriver.scheme}\n" ]
[ [ "numpy.frombuffer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jsosulski/toeplitz
[ "9d6e6e08af566227e6777067993c17dc39a75971" ]
[ "setup.py" ]
[ "#!/usr/bin/env python\nfrom numpy.distutils.core import Extension, setup\n\nVERSION = '0.3.2-dev'\n\nwith open('README.rst') as f:\n README = f.read()\nDESCRIPTION = README.split('\\n')[2]\nLONG_DESCRIPTION = '\\n'.join(README.split('\\n')[17:])\n\nEXT = Extension(name='toeplitz',\n sources=['src/toeplitz.pyf', 'src/toeplitz.f90'])\n\nCLASSIFIERS = [\n 'Environment :: Console',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Scientific/Engineering :: Mathematics'\n ]\n\nsetup(name='toeplitz',\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n author='Tom Eulenfeld',\n author_email='[email protected]',\n license='MIT',\n url='https://github.com/trichter/toeplitz',\n classifiers=CLASSIFIERS,\n ext_modules=[EXT],\n scripts=['scripts/toeplitz-runtests'],\n requires=['numpy'],\n include_package_data=True\n )\n" ]
[ [ "numpy.distutils.core.Extension", "numpy.distutils.core.setup" ] ]
[ { "matplotlib": [], "numpy": [ "1.11", "1.19", "1.24", "1.16", "1.23", "1.20", "1.7", "1.12", "1.21", "1.22", "1.14", "1.6", "1.13", "1.9", "1.17", "1.10", "1.18", "1.15", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
jjtan/unintended-ml-bias-analysis
[ "8172643f6224df09323c2be227bc2ca0f218f03a" ]
[ "unintended_ml_bias/model_tool.py" ]
[ "\"\"\"Train a Toxicity model using Keras.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport cPickle\nimport json\nimport os\nfrom keras.callbacks import EarlyStopping\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.layers import Conv1D\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nfrom keras.layers import Embedding\nfrom keras.layers import Embedding\nfrom keras.layers import Flatten\nfrom keras.layers import GlobalMaxPooling1D\nfrom keras.layers import Input\nfrom keras.layers import MaxPooling1D\nfrom keras.models import load_model\nfrom keras.models import Model\nfrom keras.optimizers import RMSprop\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.utils import to_categorical\nimport numpy as np\nimport pandas as pd\nfrom sklearn import metrics\n\nprint('HELLO from model_tool')\n\nDEFAULT_EMBEDDINGS_PATH = '../data/glove.6B/glove.6B.100d.txt'\nDEFAULT_MODEL_DIR = '../models'\n\nDEFAULT_HPARAMS = {\n 'max_sequence_length': 250,\n 'max_num_words': 10000,\n 'embedding_dim': 100,\n 'embedding_trainable': False,\n 'learning_rate': 0.00005,\n 'stop_early': True,\n 'es_patience': 1, # Only relevant if STOP_EARLY = True\n 'es_min_delta': 0, # Only relevant if STOP_EARLY = True\n 'batch_size': 128,\n 'epochs': 20,\n 'dropout_rate': 0.3,\n 'cnn_filter_sizes': [128, 128, 128],\n 'cnn_kernel_sizes': [5, 5, 5],\n 'cnn_pooling_sizes': [5, 5, 40],\n 'verbose': True\n}\n\n\ndef compute_auc(y_true, y_pred):\n try:\n return metrics.roc_auc_score(y_true, y_pred)\n except ValueError:\n return np.nan\n\n\n### Model scoring\n\n# Scoring these dataset for dozens of models actually takes non-trivial amounts\n# of time, so we save the results as a CSV. The resulting CSV includes all the\n# columns of the original dataset, and in addition has columns for each model,\n# containing the model's scores.\ndef score_dataset(df, models, text_col):\n \"\"\"Scores the dataset with each model and adds the scores as new columns.\"\"\"\n for model in models:\n name = model.get_model_name()\n print('{} Scoring with {}...'.format(datetime.datetime.now(), name))\n df[name] = model.predict(df[text_col])\n\ndef load_maybe_score(models, orig_path, scored_path, postprocess_fn):\n if os.path.exists(scored_path):\n print('Using previously scored data:', scored_path)\n return pd.read_csv(scored_path)\n\n dataset = pd.read_csv(orig_path)\n postprocess_fn(dataset)\n score_dataset(dataset, models, 'text')\n print('Saving scores to:', scored_path)\n dataset.to_csv(scored_path)\n return dataset\n\ndef postprocess_madlibs(madlibs):\n \"\"\"Modifies madlibs data to have standard 'text' and 'label' columns.\"\"\"\n # Native madlibs data uses 'Label' column with values 'BAD' and 'NOT_BAD'.\n # Replace with a bool.\n madlibs['label'] = madlibs['Label'] == 'BAD'\n madlibs.drop('Label', axis=1, inplace=True)\n madlibs.rename(columns={'Text': 'text'}, inplace=True)\n\ndef postprocess_wiki_dataset(wiki_data):\n \"\"\"Modifies Wikipedia dataset to have 'text' and 'label' columns.\"\"\"\n wiki_data.rename(columns={'is_toxic': 'label',\n 'comment': 'text'},\n inplace=True)\n\n\nclass ToxModel():\n \"\"\"Toxicity model.\"\"\"\n\n def __init__(self,\n model_name=None,\n model_dir=DEFAULT_MODEL_DIR,\n embeddings_path=DEFAULT_EMBEDDINGS_PATH,\n hparams=None):\n self.model_dir = model_dir\n self.embeddings_path = embeddings_path\n self.model_name = model_name\n self.model = None\n self.tokenizer = None\n self.hparams = DEFAULT_HPARAMS.copy()\n if hparams:\n self.update_hparams(hparams)\n if model_name:\n self.load_model_from_name(model_name)\n self.print_hparams()\n\n def print_hparams(self):\n print('Hyperparameters')\n print('---------------')\n for k, v in self.hparams.iteritems():\n print('{}: {}'.format(k, v))\n print('')\n\n def update_hparams(self, new_hparams):\n self.hparams.update(new_hparams)\n\n def get_model_name(self):\n return self.model_name\n\n def save_hparams(self, model_name):\n self.hparams['model_name'] = model_name\n with open(\n os.path.join(self.model_dir, '%s_hparams.json' % self.model_name),\n 'w') as f:\n json.dump(self.hparams, f, sort_keys=True)\n\n def load_model_from_name(self, model_name):\n self.model = load_model(\n os.path.join(self.model_dir, '%s_model.h5' % model_name))\n self.tokenizer = cPickle.load(\n open(\n os.path.join(self.model_dir, '%s_tokenizer.pkl' % model_name),\n 'rb'))\n with open(\n os.path.join(self.model_dir, '%s_hparams.json' % self.model_name),\n 'r') as f:\n self.hparams = json.load(f)\n\n def fit_and_save_tokenizer(self, texts):\n \"\"\"Fits tokenizer on texts and pickles the tokenizer state.\"\"\"\n self.tokenizer = Tokenizer(num_words=self.hparams['max_num_words'])\n self.tokenizer.fit_on_texts(texts)\n cPickle.dump(self.tokenizer,\n open(\n os.path.join(self.model_dir,\n '%s_tokenizer.pkl' % self.model_name), 'wb'))\n\n def prep_text(self, texts):\n \"\"\"Turns text into into padded sequences.\n\n The tokenizer must be initialized before calling this method.\n\n Args:\n texts: Sequence of text strings.\n\n Returns:\n A tokenized and padded text sequence as a model input.\n \"\"\"\n text_sequences = self.tokenizer.texts_to_sequences(texts)\n return pad_sequences(\n text_sequences, maxlen=self.hparams['max_sequence_length'])\n\n def load_embeddings(self):\n \"\"\"Loads word embeddings.\"\"\"\n embeddings_index = {}\n with open(self.embeddings_path) as f:\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n\n self.embedding_matrix = np.zeros((len(self.tokenizer.word_index) + 1,\n self.hparams['embedding_dim']))\n num_words_in_embedding = 0\n for word, i in self.tokenizer.word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n num_words_in_embedding += 1\n # words not found in embedding index will be all-zeros.\n self.embedding_matrix[i] = embedding_vector\n\n def train(self, training_data_path, validation_data_path, text_column,\n label_column, model_name):\n \"\"\"Trains the model.\"\"\"\n self.model_name = model_name\n self.save_hparams(model_name)\n\n train_data = pd.read_csv(training_data_path)\n valid_data = pd.read_csv(validation_data_path)\n\n print('Fitting tokenizer...')\n self.fit_and_save_tokenizer(train_data[text_column])\n print('Tokenizer fitted!')\n\n print('Preparing data...')\n train_text, train_labels = (self.prep_text(train_data[text_column]),\n to_categorical(train_data[label_column]))\n valid_text, valid_labels = (self.prep_text(valid_data[text_column]),\n to_categorical(valid_data[label_column]))\n print('Data prepared!')\n\n print('Loading embeddings...')\n self.load_embeddings()\n print('Embeddings loaded!')\n\n print('Building model graph...')\n self.build_model()\n print('Training model...')\n\n save_path = os.path.join(self.model_dir, '%s_model.h5' % self.model_name)\n callbacks = [\n ModelCheckpoint(\n save_path, save_best_only=True, verbose=self.hparams['verbose'])\n ]\n\n if self.hparams['stop_early']:\n callbacks.append(\n EarlyStopping(\n min_delta=self.hparams['es_min_delta'],\n monitor='val_loss',\n patience=self.hparams['es_patience'],\n verbose=self.hparams['verbose'],\n mode='auto'))\n\n self.model.fit(\n train_text,\n train_labels,\n batch_size=self.hparams['batch_size'],\n epochs=self.hparams['epochs'],\n validation_data=(valid_text, valid_labels),\n callbacks=callbacks,\n verbose=2)\n print('Model trained!')\n print('Best model saved to {}'.format(save_path))\n print('Loading best model from checkpoint...')\n self.model = load_model(save_path)\n print('Model loaded!')\n\n def build_model(self):\n \"\"\"Builds model graph.\"\"\"\n sequence_input = Input(\n shape=(self.hparams['max_sequence_length'],), dtype='int32')\n embedding_layer = Embedding(\n len(self.tokenizer.word_index) + 1,\n self.hparams['embedding_dim'],\n weights=[self.embedding_matrix],\n input_length=self.hparams['max_sequence_length'],\n trainable=self.hparams['embedding_trainable'])\n\n embedded_sequences = embedding_layer(sequence_input)\n x = embedded_sequences\n for filter_size, kernel_size, pool_size in zip(\n self.hparams['cnn_filter_sizes'], self.hparams['cnn_kernel_sizes'],\n self.hparams['cnn_pooling_sizes']):\n x = self.build_conv_layer(x, filter_size, kernel_size, pool_size)\n\n x = Flatten()(x)\n x = Dropout(self.hparams['dropout_rate'])(x)\n # TODO(nthain): Parametrize the number and size of fully connected layers\n x = Dense(128, activation='relu')(x)\n preds = Dense(2, activation='softmax')(x)\n\n rmsprop = RMSprop(lr=self.hparams['learning_rate'])\n self.model = Model(sequence_input, preds)\n self.model.compile(\n loss='categorical_crossentropy', optimizer=rmsprop, metrics=['acc'])\n\n def build_conv_layer(self, input_tensor, filter_size, kernel_size, pool_size):\n output = Conv1D(\n filter_size, kernel_size, activation='relu', padding='same')(\n input_tensor)\n if pool_size:\n output = MaxPooling1D(pool_size, padding='same')(output)\n else:\n # TODO(nthain): This seems broken. Fix.\n output = GlobalMaxPooling1D()(output)\n return output\n\n def predict(self, texts):\n \"\"\"Returns model predictions on texts.\"\"\"\n data = self.prep_text(texts)\n return self.model.predict(data)[:, 1]\n\n def score_auc(self, texts, labels):\n preds = self.predict(texts)\n return compute_auc(labels, preds)\n\n def summary(self):\n return self.model.summary()\n" ]
[ [ "numpy.asarray", "sklearn.metrics.roc_auc_score", "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
yangcht/radex_emcee
[ "03d948b378773bf1d20a006cdab6143c57072cd0" ]
[ "emcee/emcee_radex_2comp.py" ]
[ "#!/usr/bin/env python\n# To run it on the cluster, login and use\n#\n# > srun -N 1 -c 16 --exclusive emcee_radex_2comp.py\n#\n# to launch it on a node with 16 core, or use\n#\n# > sbatch emcee_radex_2comp.py\n#\n#\n#SBATCH --nodes=1\n#SBATCH --ntasks=32\n#SBATCH --partition=cpu_only\n#SBATCH --account=cyang\n#SBATCH --exclusive\n#SBATCH --mail-type=END\n\n# autopep8 --ignore E26 emcee_radex_2comp.py\nimport os\n# For runing the code on the clusters\nimport sys\nimport logging\nimport _pickle as pickle \nimport warnings\nimport numpy as np\nfrom astropy.io import ascii\nfrom astropy import units as u\nfrom astropy.table import Table\nfrom scipy.optimize import curve_fit, minimize\nfrom scipy.interpolate import interp1d\n\n# to match the cosmo in the CO paper\nfrom astropy.cosmology import FlatLambdaCDM\ncosmo = FlatLambdaCDM(H0=67.8 * u.km / u.s / u.Mpc, Om0=0.308)\n\nimport matplotlib\n# Define the fonts to make plots look consistent across different machines\nmatplotlib.rcParams['mathtext.fontset'] = 'custom'\nmatplotlib.rcParams['mathtext.rm'] = 'Linux Biolinum'\nmatplotlib.rcParams['mathtext.it'] = 'Linux Biolinum:italic'\nmatplotlib.rcParams['mathtext.bf'] = 'Linux Biolinum:bold'\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MultipleLocator, FormatStrFormatter\n\nimport multiprocessing\nimport emcee\nimport corner\nimport pyradex\n# import pyradex.fjdu\n\n\nkms = u.km / u.s\nJykms = u.Jy * kms\n\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\n\nncpu = multiprocessing.cpu_count()\n\n#Assuming ortho to para ratio equals 3\nopr = 3\nfortho = opr/(1+opr)\n\n\ndef model_lvg(Jup, p, R=None):\n # component 1 + component 2\n log_density_1, log_temperature_1, log_column_1, log_size_1, \\\n log_density_2, log_temperature_2, log_column_2, log_size_2 = p\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n ## 1st component \n R.set_params(density={'oH2':fortho*10.**log_density_1,'pH2':(1-fortho)*10.**log_density_1},\n column=10.**log_column_1,\n temperature=10.**log_temperature_1)\n R.run_radex(validate_colliders=False,\n reuse_last=True, reload_molfile=False)\n result_1 = R.source_line_surfbrightness # Do not use get_table()\n ## 2nd component \n R.set_params(density={'oH2':fortho*10.**log_density_2,'pH2':(1-fortho)*10.**log_density_2},\n column=10.**log_column_2,\n temperature=10.**log_temperature_2)\n R.run_radex(validate_colliders=False,\n reuse_last=True, reload_molfile=False)\n result_2 = R.source_line_surfbrightness # Do not use get_table()\n\n intensity = (result_1[np.asarray(np.int_(Jup)) - 1] * (10.**log_size_1 * u.sr) * (1. * kms)).to(Jykms) + \\\n (result_2[np.asarray(np.int_(Jup)) - 1] * (10.**log_size_2 * u.sr) * (1. * kms)).to(Jykms)\n return intensity.value\n\n\ndef model_single_lvg(Jup, p, R=None):\n log_density, log_temperature, log_column, log_size = p\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n R.set_params(density={'oH2':fortho*10.**log_density,'pH2':(1-fortho)*10.**log_density},\n column=10.**log_column,\n temperature=10.**log_temperature)\n R.run_radex(validate_colliders=False,\n reuse_last=True, reload_molfile=False)\n result = R.source_line_surfbrightness # Do not use get_table()\n intensity = (result[np.asarray(np.int_(Jup)) - 1] *\n (10.**log_size * u.sr) * (1. * kms)).to(Jykms)\n return intensity.value\n\n\ndef model_lvg_tau(p, R=None):\n # component 1 + component 2\n log_density_1, log_temperature_1, log_column_1, log_size_1, \\\n log_density_2, log_temperature_2, log_column_2, log_size_2 = p\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n ## 1st component \n R.set_params(density={'oH2':fortho*10.**log_density_1,'pH2':(1-fortho)*10.**log_density_1},\n column=10.**log_column_1,\n temperature=10.**log_temperature_1)\n R.run_radex(validate_colliders=False,\n reuse_last=True, reload_molfile=False)\n result_1 = np.amax(R.tau)\n ## 2nd component \n R.set_params(density={'oH2':fortho*10.**log_density_2,'pH2':(1-fortho)*10.**log_density_2},\n column=10.**log_column_2,\n temperature=10.**log_temperature_2)\n R.run_radex(validate_colliders=False,\n reuse_last=True, reload_molfile=False)\n result_2 = np.amax(R.tau)\n return np.amax([result_1,result_2])\n\n\ndef residual(p, R=None, Jup=None, flux=None, eflux=None):\n model_flux = model_lvg(Jup, p, R)\n return (flux - model_flux) / eflux\n\n\ndef lnlike(p, Jup, flux, eflux, R=None):\n \"\"\" likelihood function\"\"\"\n try:\n model_flux = model_lvg(Jup, p, R)\n except ValueError:\n return -np.inf\n # Some nan sometimes....\n if np.any(np.isnan(model_flux)):\n return -np.inf\n return -0.5 * (np.sum(((flux - model_flux)**2.0 / eflux**2.0)+np.log(eflux**2.0)))\n\n\ndef lnprior(p, bounds, T_d=None, R=None):\n \"\"\"Gaussian prior(T_d)\"\"\"\n # n_H2 T_kin N_CO size\n # p[0] p[1] p[2] p[3] ## 1st component\n # p[4] p[5] p[6] p[7] ## 2nd component\n log_density_1, log_temperature_1, log_column_1, log_size_1,\\\n log_density_2, log_temperature_2, log_column_2, log_size_2 = p\n \n # First Check boundaries\n if (np.any(p > bounds[:, 1]) or np.any(p < bounds[:, 0])):\n return -np.inf\n\n # Forcing the 2nd component to be warmer\n if (np.any(p[5] <= p[1])):\n return -np.inf\n\n # 9 < log10(N_CO/dv) - log10(n_H2) < 18\n if (np.any((p[2] - p[0]) >= 18.0) or np.any((p[2] - p[0]) <= 9.0) or\n np.any((p[6] - p[4]) >= 18.0) or np.any((p[6] - p[4]) <= 9.0)) :\n return -np.inf\n\n ## The size of the cold component should be larger!\n if (np.any((p[3] < p[7]))):\n return -np.inf\n\n # Add bounds fro tau, should be < 100\n # try:\n # tau_max = model_lvg_tau(p, R)\n # except ValueError:\n # return -np.inf\n # \n # if (tau_max > 100):\n # return -np.inf\n\n logp=0\n for index, (value, bound) in enumerate(zip(p, bounds)):\n if index == 1:\n # First T_kin -> Gaussian around (T_d, sigma_T_d=1*T_d)\n logp += (-((10.0**value-T_d)/(1.0*T_d))**2.0/2.0 - np.log(T_d*np.sqrt(2.0*np.pi)))\n else:\n # Classical Uniform prior\n logp += -(bound[1]-bound[0])\n return logp\n\n\ndef lnprob(p, Jup, flux, eflux, bounds=None, T_d=None):\n lp = lnprior(p, bounds, T_d=T_d, R=R)\n if not np.isfinite(lp):\n return -np.inf\n return lp + lnlike(p, Jup, flux, eflux, R=R)\n\n\ndef read_data(filename):\n \"\"\"Read data into a comprehensible panda frame\"\"\"\n \n ######################################################\n # Disadvantage here: only includes J_up = 11 here, #\n # please manually add more if you have #\n # J_up >= 12 CO lines #\n ######################################################\n \n ascii_data = ascii.read(\n filename, names=[\n \"SOURCE\", \"z\", \"D_L\", \"T_d\", \"line_width\",\n \"CO_J_1\", \"eCO_J_1\", \"CO_J_2\", \"eCO_J_2\", \"CO_J_3\", \"eCO_J_3\",\n \"CO_J_4\", \"eCO_J_4\", \"CO_J_5\", \"eCO_J_5\", \"CO_J_6\", \"eCO_J_6\",\n \"CO_J_7\", \"eCO_J_7\", \"CO_J_8\", \"eCO_J_8\", \"CO_J_9\", \"eCO_J_9\",\n \"CO_J_10\", \"eCO_J_10\", \"CO_J_11\", \"eCO_J_11\", \"CI_1\", \"eCI_1\",\n \"CI_2\", \"eCI_2\"])\n\n pd = ascii_data.to_pandas()\n pd = pd.set_index('SOURCE')\n return pd.T\n\n\ndef get_source(source, data):\n \"\"\"Retrieve redshift and CO data for a particular source\"\"\"\n\n # source = 'NCv1.143'\n z = data[source]['z']\n line_width = data[source]['line_width']\n T_d = data[source]['T_d']\n delta_v = 1 * kms # do not care actually, fully degenerate with\n # the column density\n\n # selecting only CO lines\n keys = [key for key in data[source].keys()\n if 'CO' in key and 'eCO' not in key]\n CO_data = Table(np.asarray([(Jlow + 1, data[source][key], data[source]['e' + key])\n for Jlow, key in enumerate(keys)\n if np.isfinite(data[source][key])]),\n names=['Jup', 'flux', 'eflux'],\n dtype=[int, float, float])\n\n Jup = CO_data['Jup'].data\n flux = CO_data['flux'].data * Jykms\n eflux = CO_data['eflux'].data * Jykms\n\n return z, T_d, line_width, Jup, flux, eflux\n\n\n# the pyradex.Radex object MUST be declared at the module level for\n# lnprob to work properly\n# .... Setup the Radex parameters\n\n# Note that N_CO is totaly degenerated with deltav, so in\n# principle we should fit for N_CO/deltav\nR = pyradex.Radex(species='co', datapath=\"radex_moldata\",\n density={'oH2':fortho*10.**10.0,'pH2':(1-fortho)*10.**10.0},\n column=10.**6.0,\n temperature=20.0,\n tbackground=2.7315,\n deltav=1.0,\n escapeProbGeom='lvg')\n\n# R = pyradex.fjdu.Fjdu(species='co', datapath=\"radex_moldata\",\n# density={'oH2':fortho*10.**10,'pH2':(1-fortho)*10.**10},\n# column=10**6,\n# temperature=20,\n# tbg=2.7315,\n# deltav=1,\n# escapeProbGeom='lvg')\n\n\ndef replot(source):\n\n plt.ion()\n # Retrieve the data\n with open(\"./double/{}_bounds_2comp.pickle\".format(source), 'rb') as pkl_file:\n (source, z, bounds, T_d, \n (Jup, flux, eflux), (popt, pcov), pmin, pemcee, (chain, lnprobability)) = pickle.load(pkl_file)\n\n R.set_params(tbg=2.7315 * (1 + z))\n\n # Get the max posterior within +/-1 sigma range\n flatchain = chain.reshape((chain.shape[0]*chain.shape[1]),8) \n lnp = lnprobability.reshape((lnprobability.shape[0]*lnprobability.shape[1]),1)\n lower, upper = np.percentile(flatchain, [16, 84],axis=0)\n narrow_flatchain = flatchain[(flatchain[:,0] > lower[0]*1) & (flatchain[:,0] < upper[0]*1) & \\\n (flatchain[:,1] > lower[1]*1) & (flatchain[:,1] < upper[1]*1) & \\\n (flatchain[:,2] > lower[2]*1) & (flatchain[:,2] < upper[2]*1) & \\\n (flatchain[:,3] > lower[3]*1) & (flatchain[:,3] < upper[3]*1) & \\\n (flatchain[:,4] > lower[4]*1) & (flatchain[:,4] < upper[4]*1) & \\\n (flatchain[:,5] > lower[5]*1) & (flatchain[:,5] < upper[5]*1) & \\\n (flatchain[:,6] > lower[6]*1) & (flatchain[:,6] < upper[6]*1) & \\\n (flatchain[:,7] > lower[7]*1) & (flatchain[:,7] < upper[7]*1) ]\n narrow_lnp = lnp[(flatchain[:,0] > lower[0]*1) & (flatchain[:,0] < upper[0]*1) & \\\n (flatchain[:,1] > lower[1]*1) & (flatchain[:,1] < upper[1]*1) & \\\n (flatchain[:,2] > lower[2]*1) & (flatchain[:,2] < upper[2]*1) & \\\n (flatchain[:,3] > lower[3]*1) & (flatchain[:,3] < upper[3]*1) & \\\n (flatchain[:,4] > lower[4]*1) & (flatchain[:,4] < upper[4]*1) & \\\n (flatchain[:,5] > lower[5]*1) & (flatchain[:,5] < upper[5]*1) & \\\n (flatchain[:,6] > lower[6]*1) & (flatchain[:,6] < upper[6]*1) & \\\n (flatchain[:,7] > lower[7]*1) & (flatchain[:,7] < upper[7]*1) ] \n pemcee_max = narrow_flatchain[narrow_lnp.argmax()]\n\n model_Jup = range(1, 12)\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n minorLocator_x = MultipleLocator(1)\n #minorLocator_y = MultipleLocator(0.5)\n ax.errorbar(Jup, flux.value, eflux.value, fmt='o', label='$\\mathrm{data}$', color='#000000', capsize=0)\n plot_Jup = np.arange(min(model_Jup), max(model_Jup), 0.05) # smoothing the model line\n\n pemcee_max_c = pemcee_max[:4]\n pemcee_max_w = pemcee_max[4:]\n pemcee_c = pemcee[:4]\n pemcee_w = pemcee[4:]\n\n f_inter_pemcee_max = interp1d(model_Jup, model_lvg(model_Jup, pemcee_max, R), kind='cubic')\n f_inter_pemcee_max_c = interp1d(model_Jup, model_single_lvg(model_Jup, pemcee_max_c, R), kind='cubic')\n f_inter_pemcee_max_w = interp1d(model_Jup, model_single_lvg(model_Jup, pemcee_max_w, R), kind='cubic')\n f_inter_pemcee = interp1d(model_Jup, model_lvg(model_Jup, pemcee, R), kind='cubic')\n f_inter_pemcee_c = interp1d(model_Jup, model_single_lvg(model_Jup, pemcee_c, R), kind='cubic')\n f_inter_pemcee_w = interp1d(model_Jup, model_single_lvg(model_Jup, pemcee_w, R), kind='cubic')\n\n ax.plot(plot_Jup, f_inter_pemcee_max(plot_Jup), label=r'$\\mathrm{{MCMC}}$', color='#FFA833')\n ax.plot(plot_Jup, f_inter_pemcee_max_w(plot_Jup), linestyle='--', color='#fcc82d')\n ax.plot(plot_Jup, f_inter_pemcee_max_c(plot_Jup), linestyle='-.', color='#ff7b33')\n #ax.plot(plot_Jup, f_inter_pemcee(plot_Jup), label=r'$\\mathrm{median_{MCMC}}$', color='#58b82a')\n #ax.plot(plot_Jup, f_inter_pemcee_c(plot_Jup), linestyle='--', color='#198189')\n #ax.plot(plot_Jup, f_inter_pemcee_w(plot_Jup), linestyle=':', color='#b1d623')\n ax.set_xlabel(r'$J_\\mathrm{up}$',fontsize=14)\n ax.set_ylabel(r'$I_\\mathrm{CO}\\;[\\mathrm{Jy\\;km\\;s^{-1}}]$',fontsize=14)\n ax.legend(loc=0, prop={'size':11}, numpoints=1)\n ax.xaxis.set_minor_locator(minorLocator_x)\n #ax.yaxis.set_minor_locator(minorLocator_y)\n fig.suptitle('$\\mathrm{'+source+'}$',fontsize = 15)\n fig.savefig(\"./double/{}_SLED_2comp.pdf\".format(source))\n\n # plots for the full corner\n chain_plot = np.hstack((flatchain[:,[0,1,2,3]], flatchain[:,[4,5,6,7]]))\n plot_range=[(1.9,7.1),(1,3.02),(14.5, 19.5),(-12.5,-8.5),(1.9,7.1),(1,3.0),(14.5, 19.5),(-12.5,-8.5)]\n fig = corner.corner(chain_plot,\n labels=[r'$\\mathrm{log}_{10}(n_\\mathrm{H_2,\\,c}\\;[\\mathrm{cm}^{-3}])$',\n r'$\\mathrm{log}_{10}(T_\\mathrm{kin,\\,c}\\;[\\mathrm{K}])$',\n r'$\\mathrm{log}_{10}({N_\\mathrm{CO,\\,c}}/{\\mathrm{d}v}\\;[\\frac{\\mathrm{cm}^{-2}}{\\mathrm{km\\,s}^{-1}}])$',\n r'$\\mathrm{log}_{10}(\\mathrm{[size,\\,c\\,sr^{-1}]})$',\n r'$\\mathrm{log}_{10}(n_\\mathrm{H_2,\\,w}\\;[\\mathrm{cm}^{-3}])$',\n r'$\\mathrm{log}_{10}(T_\\mathrm{kin,\\,w}\\;[\\mathrm{K}])$',\n r'$\\mathrm{log}_{10}({N_\\mathrm{CO,\\,w}}/{\\mathrm{d}v}\\;[\\frac{\\mathrm{cm}^{-2}}{\\mathrm{km\\,s}^{-1}}])$',\n r'$\\mathrm{log}_{10}(\\mathrm{[size,\\,w\\,sr^{-1}]})$',\n ],\n show_titles=True, title_kwargs={\"fontsize\": 11}, label_kwargs={\"fontsize\": 15},\n plot_datapoints=False, range=plot_range, max_n_ticks=6, smooth=0.8,\n quantiles=(0.16, 0.84), truths=np.hstack((pemcee_max_c,pemcee_max_w)), \n truth_color=\"#FFA833\", color=\"#2B61DD\", bins=24)\n fig.suptitle('$\\mathrm{'+source+'}$',fontsize = 16)\n fig.savefig(\"./double/{}_corner_2comp_all.pdf\".format(source))\n \n # plots for publication, remove size from the plot\n chain_cold = flatchain[:,[0,1,2]]\n chain_warm = flatchain[:,[4,5,6]]\n new_pemcee_max_c = np.hstack((pemcee_max_c[:3])) # only show n_H2, T_kin, N_CO and Pressure\n new_pemcee_max_w = np.hstack((pemcee_max_w[:3])) # only show n_H2, T_kin, N_CO and Pressure\n plot_range=[(1.9,7.1),(1,3.02),(14.5, 19.5)]\n fig = corner.corner(chain_cold,\n labels=[r'$\\mathrm{log}_{10}(n_\\mathrm{H_2}\\;[\\mathrm{cm}^{-3}])$',\n r'$\\mathrm{log}_{10}(T_\\mathrm{kin}\\;[\\mathrm{K}])$',\n r'$\\mathrm{log}_{10}({N_\\mathrm{CO}}/{\\mathrm{d}v}\\;[\\frac{\\mathrm{cm}^{-2}}{\\mathrm{km\\,s}^{-1}}])$'],\n show_titles=True, title_kwargs={\"fontsize\": 11}, label_kwargs={\"fontsize\": 15}, \n plot_datapoints=False, range=plot_range, max_n_ticks=6, smooth=0.8,\n quantiles=(0.16, 0.84), truths=new_pemcee_max_c,\n truth_color=\"#fcc82d\", color=\"#198189\", bins=24)\n fig.savefig(\"./double/{}_corner_2comp_1.pdf\".format(source))\n \n fig = corner.corner(chain_warm,\n labels=[r'$\\mathrm{log}_{10}(n_\\mathrm{H_2}\\;[\\mathrm{cm}^{-3}])$',\n r'$\\mathrm{log}_{10}(T_\\mathrm{kin}\\;[\\mathrm{K}])$',\n r'$\\mathrm{log}_{10}({N_\\mathrm{CO}}/{\\mathrm{d}v}\\;[\\frac{\\mathrm{cm}^{-2}}{\\mathrm{km\\,s}^{-1}}])$'],\n show_titles=True, title_kwargs={\"fontsize\": 11}, label_kwargs={\"fontsize\": 15}, \n plot_datapoints=False, range=plot_range, max_n_ticks=6, smooth=0.8,\n quantiles=(0.16, 0.84), truths=new_pemcee_max_w,\n truth_color=\"#ff7b33\", color=\"#b1d623\", bins=24)\n fig.savefig(\"./double/{}_corner_2comp_2.pdf\".format(source))\n \n # Print the MCMC results\n chain_plot_cold = np.hstack((chain_cold[:,[0,1,2]], chain_cold[:,[0]]+chain_cold[:,[1]]))\n chain_plot_warm = np.hstack((chain_warm[:,[0,1,2]], chain_warm[:,[0]]+chain_warm[:,[1]]))\n n_c, T_c, N_c, P_c = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]), \n list(zip(*np.percentile(chain_plot_cold, [16, 50, 84], axis=0))))\n n_w, T_w, N_w, P_w = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]), \n list(zip(*np.percentile(chain_plot_warm, [16, 50, 84], axis=0))))\n \n print(\"#### cold component - median ####\")\n print(' ', n_c[0],' ', T_c[0],' ', N_c[0],' ', P_c[0])\n print('+', n_c[1],'+', T_c[1],'+', N_c[1],'+', P_c[1])\n print('-', n_c[2],'-', T_c[2],'-', N_c[2],'-', P_c[2])\n print(\"4max\", '\\n', pemcee_max_c)\n print(\"=================================\")\n print(\"#### warm component - median ####\")\n print(' ', n_w[0],' ', T_w[0],' ', N_w[0],' ', P_w[0])\n print('+', n_w[1],'+', T_w[1],'+', N_w[1],'+', P_w[1])\n print('-', n_w[2],'-', T_w[2],'-', N_w[2],'-', P_w[2])\n print(\"4max\", '\\n', pemcee_max_w)\n print(\"=================================\")\n\n\ndef main():\n\n data = read_data(\"../data/flux_for2p.dat\")\n\n if not os.path.exists(\"./double\"):\n os.makedirs(\"./double\")\n\n for source in data.keys():\n # if True:\n # source = \"SDP81\"\n # source = 'NCv1.143'\n\n logger.info('Processing {}'.format(source))\n\n delta_v = 1 * kms # do not care actually, fully degenerate\n # with the column density\n\n # TODO: would it help to use the CI data ?\n # If 2 lines probably yes, otherwise, must be degenerate with\n # the abondance\n\n # Retrieve data\n z, T_d, line_width, Jup, flux, eflux = get_source(source, data)\n R.set_params(tbg=2.7315 * (1 + z))\n\n # Line : Jy km/s\n # 1 Jansky=1 Jy = 10e-26 W m-2 Hz-1 = 1e-23 erg s-1 cm-2 Hz-1\n # brightness erg s-1 cm-2 Hz-1 sr-1 ~ W m-2 Hz-1 sr-1 ~ Jy sr-1\n\n # Radex output : erg s-1 cm-2 = 1e3 W m-2\n # pyradex output : erg s-1 cm-2 Hz-1 sr-1\n\n # K*km/s is an integrated surface brightness, while the value\n # in erg/s/cm2 is an isotopic flux emitted in all directions.\n\n # Specicic Intensity I(nu) W m-2 Hz-1 sr-1\n # Flux density W m-2 Hz-1 ~ 10e26 Jy : S = I * Surf./d**2\n\n # -> Specific (velocity integrated ) Intensity\n # I = S_nu * d**2 / surf\n\n # Size:\n # R_source = D_A * sqrt(size/pi)\n # This is uncorrected for lensing magification\n\n # H2 density : Number density of collision partners : Unit: cm-3. ; Allowed range: 10(-3) - 10(13)\n # Column density : Unit: cm-2 : Allowed range: 10(5) - 10(25)\n # Kinetic temperature : Unit: K. Allowed range: 0.1 - 10,000\n # linewidth : Thermal + turbulent width (FWHM) of the lines.; Unit: km s-1. ; Allowed range: 10(-3) - 10(3)\n # Size : Unit sr\n\n bounds = np.array([[-3, 13], # log H2 density\n [2, 3000], # Kinetic temperature\n # (Collisions temperature of CO\n # in radex files)\n [5, 25], # log Column density\n [-20, -5], # log size\n ##############\n [-3, 13], # log H2 density\n [2, 3000], # Kinetic temperature\n # (Collisions temperature of CO\n # in radex files)\n [5, 25], # log Column density\n [-20, -5]]) # log size\n\n R_angle = ((7/(cosmo.angular_diameter_distance(z).value*1000.0))**2 * np.pi)*10\n # Assuming, 7 kpc size and mu=10 lensing magnification\n\n # basic starting point\n p0 = [5.6, # np.log10(total_density)\n 24, # temperature\n 18, # 10**16.75*300, # np.log10(column)\n np.log10(R_angle), # np.log10(size)\n ##########\n 5.6, # np.log10(total_density)\n 24, # temperature\n 18, # 10**16.75*300, # np.log10(column)\n np.log10(R_angle)] # np.log10(size)\n\n # As suggested by https://arxiv.org/pdf/1212.5955.pdf\n # Further narrowing down according to Zhang et al, A&A 568, A122 (2014)\n # remove unrealistic ranges\n # Ranges:\n # n_H2 = 10^1.5 -- 10^7.0 cm^-3\n # T_kin = T_CMB -- 10^3 K\n # N_CO/dv = 10^15.5 -- 10^19.5 cm^-2 (km/s)^-1\n # dv/dr = 0.1 -- 1000 (Tunnard+2016, Xco=5e-5), saying r ~ 1-5 kpc, delta_V = 250-700 km/s\n # --> 6.18e13 < N_CO/n_H2 < 6.18e17 \n # --> due to lensing uncertainties, add +/- 50 lensing uncertainty factor, multiply delta_V = delta_V = 250-700 km/s\n # --> 10.0 < log10(N_CO/dv) - log10(n_H2) < 17.5, \n # Additional constrains:\n # N_CO/(n_H2*Xco) < 2 R = 10 kpc (assuming disk is =<5 kpc, Xco=5e-5)\n # --> N_CO/n_H2 < 5e19, within the dv/dr range.\n ##### 1st component #####\n bounds = np.array([[1.5, 7.0], \n [np.log10(2.7315 * (1 + z)), 3.0], \n [14.5, 19.5], \n [np.log10(R_angle)-5, np.log10(R_angle)+5],\n ##### 2nd component #####\n [1.5, 7.0], \n [np.log10(2.7315 * (1 + z)), 3.0], \n [14.5, 19.5], \n [np.log10(R_angle)-5, np.log10(R_angle)+5]])\n\n p0 = [4.6, # np.log10(n_H2_cold) = 3.0\n 1.5, # np.log10(T_kin_cold) = 1.5\n 17.7, # 10**16.8*300, # np.log10(column)\n -10.5, # np.log10(size)\n ###########\n 3.8, # np.log10(n_H2_cold) = 2.9\n 2.3, # nnp.log10(T_kin_warm) = 2.5\n 17.6, # 10**16.7*300, # np.log10(column)\n -11.1] # np.log10(size)\n\n # Simple curve_fit to find a starting point\n # Using lambda to get the R object through\n opt_fun = lambda p, log_density_1, log_temperature_1, log_column_1, log_size_1, \\\n log_density_2, log_temperature_2, log_column_2, log_size_2: \\\n model_lvg(p, [log_density_1, log_temperature_1, log_column_1, log_size_1,\n log_density_2, log_temperature_2, log_column_2, log_size_2], R=R)\n\n try:\n popt, pcov = curve_fit(opt_fun, Jup, flux.value,\n sigma=eflux.value, p0=p0,\n bounds=list(zip(*bounds)))\n logger.info(\" curve_fit : {}\".format(popt))\n except RuntimeError:\n logger.warn(\" curve_fit : failed\")\n popt = p0\n pcov = None\n\n # minimize do not work well far from the solution, so use the\n # curve_fit solution to test the lnprob function...\n p = popt\n nll = lambda p, Jup, flux, eflux, : - \\\n lnprob(p, Jup, flux, eflux, bounds, T_d)\n result = minimize(nll, p,\n args=(Jup, flux.value, eflux.value),\n bounds=bounds)\n pmin = result.x\n logger.info(\" minimize : {}\".format(pmin))\n\n # Do the heavy computation\n ndim = len(popt)\n \n #################### Define the number of walkers here\n nwalkers = 400 # 400 walkers\n n_iter_burn = 100 # burning phase, number of iterations = 100\n n_iter_walk = 1000 # walking phase, number of iterations = 1000\n \n # Random starting positions\n pos = [popt + 1e-3 * np.random.randn(ndim) for i in range(nwalkers)]\n \n # Multithread\n with multiprocessing.Pool() as pool:\n sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob,\n args=(Jup, flux.value, eflux.value),\n kwargs={'bounds': bounds, 'T_d': T_d},\n threads=ncpu)\n # Burning time\n logger.info(\" burning samples\")\n pos, prob, state = sampler.run_mcmc(\n pos, n_iter_burn) # now 100, will be 1000\n sampler.reset()\n # Sampling time\n logger.info(\" walking\")\n result = sampler.run_mcmc(pos, n_iter_walk) # now 1000, will be 5000\n pemcee = np.percentile(sampler.flatchain, [50], axis=0)[0]\n\n chain, lnprobability = sampler.chain, sampler.lnprobability\n\n with open(\"./double/{}_bounds_2comp.pickle\".format(source), 'wb') as pkl_file:\n pickle.dump((source, z, bounds, T_d,\n (Jup, flux, eflux), (popt, pcov), pmin, pemcee, (chain, lnprobability)),\n pkl_file)\n\n # Quick plot the model\n # replot(source)\n\n # Print only the best fitted n_H2, T_kin, N_CO and Pressure\n pmin_c = pmin[:4]\n pmin_w = pmin[4:]\n\n chain_cold = np.hstack((sampler.flatchain[:,[0,1,2]],\n sampler.flatchain[:,[0]]+sampler.flatchain[:,[1]]))\n chain_warm = np.hstack((sampler.flatchain[:,[4,5,6]],\n sampler.flatchain[:,[4]]+sampler.flatchain[:,[5]]))\n new_pmin_c = np.hstack((pmin_c[:3],pmin_c[0]+pmin_c[1]))\n new_pmin_w = np.hstack((pmin_w[:3],pmin_w[0]+pmin_w[1]))\n\n n_c, T_c, N_c, P_c = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),\n list(zip(*np.percentile(chain_cold, [16, 50, 84], axis=0))))\n n_w, T_w, N_w, P_w = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),\n list(zip(*np.percentile(chain_warm, [16, 50, 84], axis=0))))\n\n # Output the best fit\n print(\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\")\n print(\"xxx:\", source, '\\n', \"xxx: minimised results\")\n print(\"xxx:\", new_pmin_c, '\\n', new_pmin_w)\n # Print the MCMC results for +/- 1 sigma range of the paremeters\n print(\"xxx: emcee results\")\n print(\"xxx:\", \"n_H2\")\n print(\"xxx:\", n_c, '\\n', \"xxx:\",n_w)\n print(\"xxx:\", \"T_kin\")\n print(\"xxx:\", T_c, '\\n', \"xxx:\",T_w)\n print(\"xxx:\", \"N_CO/dv\")\n print(\"xxx:\", N_c, '\\n', \"xxx:\",N_w)\n print(\"xxx:\", \"P\")\n print(\"xxx:\", P_c, '\\n', \"xxx:\",P_w)\n\nif __name__ == '__main__':\n\n plt.ion()\n # plt.close('all')\n main()\n\n\n# See :\n# - https://arxiv.org/pdf/1602.01095.pdf\n# - https://arxiv.org/pdf/1401.2998.pdf\n# - https://arxiv.org/pdf/1212.5955.pdf\n# - https://arxiv.org/abs/0809.2337\n# plt.figure()\n# plt.plot(Jup, model[:8]/model[0])\n" ]
[ [ "numpy.hstack", "numpy.amax", "matplotlib.ticker.MultipleLocator", "numpy.log", "numpy.sqrt", "numpy.isfinite", "numpy.isnan", "matplotlib.use", "numpy.percentile", "numpy.int_", "numpy.log10", "scipy.optimize.minimize", "numpy.any", "numpy.random.randn", "numpy.array", "matplotlib.pyplot.ion", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
Akssi/spin-pommerman
[ "8d74e00fdc95610a8dcc9acd59b3bfebfe2078b3" ]
[ "train_singleAgent.py" ]
[ "import pommerman\nfrom pommerman import agents\nimport SPINAgents\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport gym\nfrom torch.autograd import Variable\nimport random\nfrom collections import namedtuple\n\nimport sys, getopt\n\nfrom tensorboardX import SummaryWriter\n\nTransition = namedtuple('Transition',\n ('state', 'action', 'next_state', 'reward', 'done'))\n\nclass ReplayMemory(object):\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.memory = []\n self.position = 0\n\n def push(self, *args):\n \"\"\"Saves a transition.\"\"\"\n if len(self.memory) < self.capacity:\n self.memory.append(None)\n self.memory[self.position] = Transition(*args)\n self.position = (self.position + 1) % self.capacity\n\n def sample(self, batch_size):\n return random.sample(self.memory, batch_size)\n\n def __len__(self):\n return len(self.memory)\n\n\ndef save_checkpoint(state, agent):\n filename = \"Checkpoints/\" + agent + '_game #' + str(state['epoch']) + \".pth\"\n torch.save(state, filename) \n\ndef load_checkpoint(agent, path):\n checkpoint = torch.load(path)\n agent.Q.load_state_dict(checkpoint['state_dict_Q'])\n agent.target_Q.load_state_dict(checkpoint['state_dict_target_Q'])\n agent.optimizer.load_state_dict(checkpoint['optimizer'])\n \n return checkpoint['epoch']\n\ndef main(argv):\n checkpointFilePath = ''\n alwaysRender = False\n forceRestartOnDeath = False\n try:\n opts, args = getopt.getopt(argv,\"hrRc:a:\",[\"checkpoint=\",\"agent=\",\"restart_on_death\"])\n except getopt.GetoptError:\n print('Error in command arguments. Run this for help:\\n\\ttrain_singleAgent.py -h')\n sys.exit(2)\n\n for opt, arg in opts:\n if opt == '-h':\n print(\"train_singleAgent.py\" +\n \"\\n-c <checkpointfile> => Resume training from a saved checkpoint\" +\n \"\\n-a(--agent) <agent version> => Version of agent to train (default=0)\" +\n \"\\n-r => Always render\" +\n \"\\n-R(--restart_on_death) => Always render\")\n sys.exit()\n elif opt in (\"-c\", \"--checkpoint\"):\n checkpointFilePath = arg\n elif opt in (\"-a\", \"--agent\"):\n agentName = arg\n elif opt == '-r':\n alwaysRender = True\n elif opt in (\"-R\", \"--restart_on_death\"):\n forceRestartOnDeath = True\n\n # Create a set of agents (exactly four)\n agent_list = [\n agents.SimpleAgent(),\n agents.SimpleAgent(),\n agents.SimpleAgent()\n ]\n if agentName == \"1\":\n agent_list.append(SPINAgents.SPIN_1())\n elif agentName == \"2\":\n agent_list.append(SPINAgents.SPIN_2())\n else:\n agent_list.append(SPINAgents.SPIN_0())\n\n\n # Make the \"Team\" environment using the agent list\n env = pommerman.make('PommeFFAFast-v0', agent_list)\n memory = ReplayMemory(100000)\n batch_size = 128\n epsilon = 1\n start_epoch = 0\n end_epoch = 5750\n\n # Writer to log data to tensorboard\n writer = SummaryWriter('runs')\n\n\n if checkpointFilePath != '':\n start_epoch = load_checkpoint(agent_list[3], checkpointFilePath)\n\n # Run the episodes just like OpenAI Gym\n for i in range(start_epoch, end_epoch):\n state = env.reset()\n done = False\n total_reward = [0] * len(agent_list)\n action_histo = np.zeros(6)\n epsilon *= 0.995\n alive_steps = 0\n while not done and (not forceRestartOnDeath or agent_list[3]._character.is_alive):\n if i > (end_epoch -50) or alwaysRender:\n env.render()\n # Set epsilon for our learning agent\n agent_list[3].epsilon = max(epsilon, 0.1)\n \n actions = env.act(state)\n next_obs, reward, done, _ = env.step(actions)\n state = next_obs\n\n # Fill replay memory for our learning agent\n memory.push(agent_list[3].Input, torch.LongTensor([actions[3]]),\n torch.from_numpy(agent_list[3].prepInput(state[3])).type(torch.FloatTensor), torch.Tensor([reward[3]]),\n torch.Tensor([done]))\n\n # Save infos about our leaning agent\n action_histo[actions[3]] += 1\n alive_steps += 1\n total_reward = [x + y for x, y in zip(total_reward, reward)]\n\n # Log infos about our leaning agent to tensorboad\n writer.add_scalars('data/actions', {'stop':action_histo[0], 'up':action_histo[1], 'down':action_histo[2], 'left':action_histo[3], 'right':action_histo[4], 'bomb':action_histo[5]}, i)\n writer.add_scalar('data/alive_steps', alive_steps, i)\n writer.add_scalar('data/epsilon', agent_list[3].epsilon, i)\n writer.add_scalar('data/memory', memory.__len__(), i)\n \n # Creates a dictionary with agent name and rewards to be logged on tensorboard\n total_reward_list = []\n for j in range(len(total_reward)):\n total_reward_list.append((type(agent_list[j]).__name__+'('+str(j)+')', total_reward[j]))\n writer.add_scalars('data/rewards', dict(total_reward_list), i)\n \n #### Log input of learning agent as image ####\n #### Use if agent has a 1 x 3 x N x N input matrix #### \n spinInput = agent_list[3].Input\n writer.add_image('end_img', spinInput.reshape(spinInput.shape[1], spinInput.shape[2], spinInput.shape[3]), i)\n\n print(\"Episode : \", i)\n if memory.__len__() > 10000:\n batch = memory.sample(batch_size)\n agent_list[3].backward(batch)\n if i > 0 and i % 750 == 0:\n save_checkpoint({\n 'epoch': i + 1,\n 'arch': 0,\n 'state_dict_Q': agent_list[3].Q.state_dict(),\n 'state_dict_target_Q': agent_list[3].target_Q.state_dict(),\n 'best_prec1': 0,\n 'optimizer' : agent_list[3].optimizer.state_dict(),\n }, agent_list[3].__class__.__name__)\n env.close()\n\n save_checkpoint({\n 'epoch': end_epoch + 1,\n 'arch': 0,\n 'state_dict_Q': agent_list[3].Q.state_dict(),\n 'state_dict_target_Q': agent_list[3].target_Q.state_dict(),\n 'best_prec1': 0,\n 'optimizer' : agent_list[3].optimizer.state_dict(),\n }, agent_list[3].__class__.__name__)\n\n\n\n writer.close()\n\nif __name__ == '__main__':\n main(sys.argv[1:])" ]
[ [ "torch.LongTensor", "torch.Tensor", "torch.load", "numpy.zeros", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sepehrgdr/Mode_Imputation
[ "bd7c17d05beacdbdf2f4c9fdefa3062a253607c8" ]
[ "Codes/Attribute_Extraction/calc_dist.py" ]
[ "import fiona\r\nimport shapely.geometry as sg\r\nfrom shapely.geometry import asMultiLineString\r\nimport time\r\nimport pandas as pd\r\nimport geopandas\r\nimport csv\r\nimport os\r\nimport numpy as np\r\n\r\n\r\ndef calc_dist(in_pts_file, in_net_file, out_path, out_name):\r\n \r\n # Create shapefile from csv file\r\n start_time = time.time()\r\n shapeout = in_pts_file.replace(\".csv\",\"_fiona.shp\")\r\n yourschema = {'geometry': 'Point',\r\n 'properties': {'point_id': 'int','long': 'float','lat': 'float'}}\r\n try:\r\n with fiona.open(shapeout, 'w',crs=fiona.crs.from_epsg(4326),driver='ESRI Shapefile', schema=yourschema) as output:\r\n reader = pd.read_csv(in_pts_file)\r\n count = 0\r\n for index, row in reader.iterrows():\r\n # geometry \r\n tmp_point = sg.Point(float(row['long']), float(row['lat']))\r\n # attributes\r\n prop = {'point_id': int(count),'long': float(row['long']),'lat': float(row['lat'])}\r\n # write the row (geometry + attributes in GeoJSON format)\r\n output.write({'geometry': sg.mapping(tmp_point), 'properties':prop})\r\n count += 1\r\n del row, reader\r\n output.close()\r\n print(output.closed)\r\n except:\r\n print(output.closed)\r\n print(\"---Creating a shapefile takes %s seconds for %s points---\" % ((time.time() - start_time),count))\r\n \r\n # Convert crs\r\n start_time = time.time()\r\n points = geopandas.read_file(shapeout)\r\n # change CRS to epsg 6350: NAD83\r\n points = points.to_crs({'init': 'epsg:6350'})\r\n points.to_file(shapeout.replace(\".shp\", \"_NAD83.shp\"))\r\n print(\"---Projecting a shapefile takes %s seconds for %sMB file---\" % ((time.time() - start_time),\r\n int(os.path.getsize(shapeout)*1e-6)))\r\n \r\n # Read network file\r\n start_time = time.time()\r\n network = geopandas.read_file(in_net_file)\r\n # change CRS to epsg 6350: NAD83\r\n network = network.to_crs({'init': 'epsg:6350'})\r\n network.to_file(in_net_file.replace(\".shp\", \"_NAD83.shp\"))\r\n print(\"---Projecting a shapefile takes %s seconds for %sMB file---\" % ((time.time() - start_time),\r\n int(os.path.getsize(in_net_file)*1e-6)))\r\n net_lines = network['geometry']\r\n line_output = asMultiLineString(net_lines)\r\n net_shply = sg.MultiLineString(line_output)\r\n print(\"Network converted to Shapely geometry object.\")\r\n \r\n # Calculate distance\r\n start_time = time.time()\r\n distance = []\r\n count = 0\r\n with fiona.open(shapeout.replace(\".shp\", \"_NAD83.shp\")) as coords:\r\n for feature in coords:\r\n geom = sg.shape(feature[\"geometry\"])\r\n distance.append( geom.distance(net_shply) )\r\n count += 1\r\n print(\"---Distance calculation takes %s seconds for %s points and %sMB network file---\" \r\n % ((time.time() - start_time),count,int(os.path.getsize(in_net_file)*1e-6)))\r\n \r\n # Store distance to csv\r\n with open(out_path + '\\\\' + out_name, 'w', newline='') as myfile: \r\n w = csv.writer(myfile, delimiter=',')\r\n w.writerows(zip(distance)) \r\n myfile.close()\r\n print(\"Distance calculated and stored!\")\r\n\r\ncalc_dist(in_pts_file = '/Users/sepehr_ghader/Desktop/Files/Projects/OD Project/AirSage/Inputs/Trip_Data/AirSage_Data/points_long_distance.csv',\r\n in_net_file ='/Users/sepehr_ghader/Desktop/Files/Projects/OD Project/AirSage/Inputs/Network_Data/Track_Network/National_Passenger_Rail_Plus_Metro/Psg_rail_NTM_metro.shp' ,\r\n out_path = '/Users/sepehr_ghader/Desktop/Files/Projects/OD Project/AirSage/Inputs/Trip_Data/',\r\n out_name = 'psg_rail_dist.csv')\r\n\r\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
alichaudry/pandas
[ "ce3e57b44932e7131968b9bcca97c1391cb6b532" ]
[ "pandas/core/resample.py" ]
[ "from __future__ import annotations\n\nimport copy\nfrom datetime import timedelta\nfrom textwrap import dedent\nfrom typing import Callable, Dict, Optional, Tuple, Union, no_type_check\n\nimport numpy as np\n\nfrom pandas._libs import lib\nfrom pandas._libs.tslibs import (\n IncompatibleFrequency,\n NaT,\n Period,\n Timedelta,\n Timestamp,\n to_offset,\n)\nfrom pandas._typing import T, TimedeltaConvertibleTypes, TimestampConvertibleTypes\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import AbstractMethodError\nfrom pandas.util._decorators import Appender, Substitution, doc\n\nfrom pandas.core.dtypes.generic import ABCDataFrame, ABCSeries\n\nfrom pandas.core.aggregation import aggregate\nimport pandas.core.algorithms as algos\nfrom pandas.core.base import DataError\nfrom pandas.core.generic import NDFrame, _shared_docs\nfrom pandas.core.groupby.base import GotItemMixin, ShallowMixin\nfrom pandas.core.groupby.generic import SeriesGroupBy\nfrom pandas.core.groupby.groupby import (\n BaseGroupBy,\n GroupBy,\n _pipe_template,\n get_groupby,\n)\nfrom pandas.core.groupby.grouper import Grouper\nfrom pandas.core.groupby.ops import BinGrouper\nfrom pandas.core.indexes.api import Index\nfrom pandas.core.indexes.datetimes import DatetimeIndex, date_range\nfrom pandas.core.indexes.period import PeriodIndex, period_range\nfrom pandas.core.indexes.timedeltas import TimedeltaIndex, timedelta_range\n\nfrom pandas.tseries.frequencies import is_subperiod, is_superperiod\nfrom pandas.tseries.offsets import DateOffset, Day, Nano, Tick\n\n_shared_docs_kwargs: Dict[str, str] = {}\n\n\nclass Resampler(BaseGroupBy, ShallowMixin):\n \"\"\"\n Class for resampling datetimelike data, a groupby-like operation.\n See aggregate, transform, and apply functions on this object.\n\n It's easiest to use obj.resample(...) to use Resampler.\n\n Parameters\n ----------\n obj : pandas object\n groupby : a TimeGrouper object\n axis : int, default 0\n kind : str or None\n 'period', 'timestamp' to override default index treatment\n\n Returns\n -------\n a Resampler of the appropriate type\n\n Notes\n -----\n After resampling, see aggregate, apply, and transform functions.\n \"\"\"\n\n # to the groupby descriptor\n _attributes = [\n \"freq\",\n \"axis\",\n \"closed\",\n \"label\",\n \"convention\",\n \"loffset\",\n \"kind\",\n \"origin\",\n \"offset\",\n ]\n\n def __init__(self, obj, groupby=None, axis=0, kind=None, **kwargs):\n self.groupby = groupby\n self.keys = None\n self.sort = True\n self.axis = axis\n self.kind = kind\n self.squeeze = False\n self.group_keys = True\n self.as_index = True\n self.exclusions = set()\n self.binner = None\n # pandas\\core\\resample.py:96: error: Incompatible types in assignment\n # (expression has type \"None\", variable has type \"BaseGrouper\")\n # [assignment]\n self.grouper = None # type: ignore[assignment]\n\n if self.groupby is not None:\n self.groupby._set_grouper(self._convert_obj(obj), sort=True)\n\n def __str__(self) -> str:\n \"\"\"\n Provide a nice str repr of our rolling object.\n \"\"\"\n attrs = (\n f\"{k}={getattr(self.groupby, k)}\"\n for k in self._attributes\n if getattr(self.groupby, k, None) is not None\n )\n return f\"{type(self).__name__} [{', '.join(attrs)}]\"\n\n def __getattr__(self, attr: str):\n if attr in self._internal_names_set:\n return object.__getattribute__(self, attr)\n if attr in self._attributes:\n return getattr(self.groupby, attr)\n if attr in self.obj:\n return self[attr]\n\n return object.__getattribute__(self, attr)\n\n def __iter__(self):\n \"\"\"\n Resampler iterator.\n\n Returns\n -------\n Generator yielding sequence of (name, subsetted object)\n for each group.\n\n See Also\n --------\n GroupBy.__iter__ : Generator yielding sequence for each group.\n \"\"\"\n self._set_binner()\n return super().__iter__()\n\n @property\n def obj(self):\n return self.groupby.obj\n\n @property\n def ax(self):\n return self.groupby.ax\n\n @property\n def _typ(self) -> str:\n \"\"\"\n Masquerade for compat as a Series or a DataFrame.\n \"\"\"\n if isinstance(self._selected_obj, ABCSeries):\n return \"series\"\n return \"dataframe\"\n\n @property\n def _from_selection(self) -> bool:\n \"\"\"\n Is the resampling from a DataFrame column or MultiIndex level.\n \"\"\"\n # upsampling and PeriodIndex resampling do not work\n # with selection, this state used to catch and raise an error\n return self.groupby is not None and (\n self.groupby.key is not None or self.groupby.level is not None\n )\n\n def _convert_obj(self, obj):\n \"\"\"\n Provide any conversions for the object in order to correctly handle.\n\n Parameters\n ----------\n obj : the object to be resampled\n\n Returns\n -------\n obj : converted object\n \"\"\"\n return obj._consolidate()\n\n def _get_binner_for_time(self):\n raise AbstractMethodError(self)\n\n def _set_binner(self):\n \"\"\"\n Setup our binners.\n\n Cache these as we are an immutable object\n \"\"\"\n if self.binner is None:\n self.binner, self.grouper = self._get_binner()\n\n def _get_binner(self):\n \"\"\"\n Create the BinGrouper, assume that self.set_grouper(obj)\n has already been called.\n \"\"\"\n binner, bins, binlabels = self._get_binner_for_time()\n assert len(bins) == len(binlabels)\n bin_grouper = BinGrouper(bins, binlabels, indexer=self.groupby.indexer)\n return binner, bin_grouper\n\n def _assure_grouper(self):\n \"\"\"\n Make sure that we are creating our binner & grouper.\n \"\"\"\n self._set_binner()\n\n @Substitution(\n klass=\"Resampler\",\n examples=\"\"\"\n >>> df = pd.DataFrame({'A': [1, 2, 3, 4]},\n ... index=pd.date_range('2012-08-02', periods=4))\n >>> df\n A\n 2012-08-02 1\n 2012-08-03 2\n 2012-08-04 3\n 2012-08-05 4\n\n To get the difference between each 2-day period's maximum and minimum\n value in one pass, you can do\n\n >>> df.resample('2D').pipe(lambda x: x.max() - x.min())\n A\n 2012-08-02 1\n 2012-08-04 1\"\"\",\n )\n @Appender(_pipe_template)\n def pipe(\n self,\n func: Union[Callable[..., T], Tuple[Callable[..., T], str]],\n *args,\n **kwargs,\n ) -> T:\n return super().pipe(func, *args, **kwargs)\n\n _agg_see_also_doc = dedent(\n \"\"\"\n See Also\n --------\n DataFrame.groupby.aggregate : Aggregate using callable, string, dict,\n or list of string/callables.\n DataFrame.resample.transform : Transforms the Series on each group\n based on the given function.\n DataFrame.aggregate: Aggregate using one or more\n operations over the specified axis.\n \"\"\"\n )\n\n _agg_examples_doc = dedent(\n \"\"\"\n Examples\n --------\n >>> s = pd.Series([1,2,3,4,5],\n index=pd.date_range('20130101', periods=5,freq='s'))\n 2013-01-01 00:00:00 1\n 2013-01-01 00:00:01 2\n 2013-01-01 00:00:02 3\n 2013-01-01 00:00:03 4\n 2013-01-01 00:00:04 5\n Freq: S, dtype: int64\n\n >>> r = s.resample('2s')\n DatetimeIndexResampler [freq=<2 * Seconds>, axis=0, closed=left,\n label=left, convention=start]\n\n >>> r.agg(np.sum)\n 2013-01-01 00:00:00 3\n 2013-01-01 00:00:02 7\n 2013-01-01 00:00:04 5\n Freq: 2S, dtype: int64\n\n >>> r.agg(['sum','mean','max'])\n sum mean max\n 2013-01-01 00:00:00 3 1.5 2\n 2013-01-01 00:00:02 7 3.5 4\n 2013-01-01 00:00:04 5 5.0 5\n\n >>> r.agg({'result' : lambda x: x.mean() / x.std(),\n 'total' : np.sum})\n total result\n 2013-01-01 00:00:00 3 2.121320\n 2013-01-01 00:00:02 7 4.949747\n 2013-01-01 00:00:04 5 NaN\n \"\"\"\n )\n\n @doc(\n _shared_docs[\"aggregate\"],\n see_also=_agg_see_also_doc,\n examples=_agg_examples_doc,\n klass=\"DataFrame\",\n axis=\"\",\n )\n def aggregate(self, func, *args, **kwargs):\n\n self._set_binner()\n result, how = aggregate(self, func, *args, **kwargs)\n if result is None:\n how = func\n grouper = None\n result = self._groupby_and_aggregate(how, grouper, *args, **kwargs)\n\n result = self._apply_loffset(result)\n return result\n\n agg = aggregate\n apply = aggregate\n\n def transform(self, arg, *args, **kwargs):\n \"\"\"\n Call function producing a like-indexed Series on each group and return\n a Series with the transformed values.\n\n Parameters\n ----------\n arg : function\n To apply to each group. Should return a Series with the same index.\n\n Returns\n -------\n transformed : Series\n\n Examples\n --------\n >>> resampled.transform(lambda x: (x - x.mean()) / x.std())\n \"\"\"\n return self._selected_obj.groupby(self.groupby).transform(arg, *args, **kwargs)\n\n def _downsample(self, f):\n raise AbstractMethodError(self)\n\n def _upsample(self, f, limit=None, fill_value=None):\n raise AbstractMethodError(self)\n\n def _gotitem(self, key, ndim: int, subset=None):\n \"\"\"\n Sub-classes to define. Return a sliced object.\n\n Parameters\n ----------\n key : string / list of selections\n ndim : {1, 2}\n requested ndim of result\n subset : object, default None\n subset to act on\n \"\"\"\n self._set_binner()\n grouper = self.grouper\n if subset is None:\n subset = self.obj\n grouped = get_groupby(subset, by=None, grouper=grouper, axis=self.axis)\n\n # try the key selection\n try:\n return grouped[key]\n except KeyError:\n return grouped\n\n def _groupby_and_aggregate(self, how, grouper=None, *args, **kwargs):\n \"\"\"\n Re-evaluate the obj with a groupby aggregation.\n \"\"\"\n if grouper is None:\n self._set_binner()\n grouper = self.grouper\n\n obj = self._selected_obj\n\n grouped = get_groupby(obj, by=None, grouper=grouper, axis=self.axis)\n\n try:\n if isinstance(obj, ABCDataFrame) and callable(how):\n # Check if the function is reducing or not.\n result = grouped._aggregate_item_by_item(how, *args, **kwargs)\n else:\n result = grouped.aggregate(how, *args, **kwargs)\n except (DataError, AttributeError, KeyError):\n # we have a non-reducing function; try to evaluate\n # alternatively we want to evaluate only a column of the input\n result = grouped.apply(how, *args, **kwargs)\n except ValueError as err:\n if \"Must produce aggregated value\" in str(err):\n # raised in _aggregate_named\n pass\n elif \"len(index) != len(labels)\" in str(err):\n # raised in libgroupby validation\n pass\n elif \"No objects to concatenate\" in str(err):\n # raised in concat call\n # In tests this is reached via either\n # _apply_to_column_groupbys (ohlc) or DataFrameGroupBy.nunique\n pass\n else:\n raise\n\n # we have a non-reducing function\n # try to evaluate\n result = grouped.apply(how, *args, **kwargs)\n\n result = self._apply_loffset(result)\n return self._wrap_result(result)\n\n def _apply_loffset(self, result):\n \"\"\"\n If loffset is set, offset the result index.\n\n This is NOT an idempotent routine, it will be applied\n exactly once to the result.\n\n Parameters\n ----------\n result : Series or DataFrame\n the result of resample\n \"\"\"\n # pandas\\core\\resample.py:409: error: Cannot determine type of\n # 'loffset' [has-type]\n needs_offset = (\n isinstance(\n self.loffset, # type: ignore[has-type]\n (DateOffset, timedelta, np.timedelta64),\n )\n and isinstance(result.index, DatetimeIndex)\n and len(result.index) > 0\n )\n\n if needs_offset:\n # pandas\\core\\resample.py:415: error: Cannot determine type of\n # 'loffset' [has-type]\n result.index = result.index + self.loffset # type: ignore[has-type]\n\n self.loffset = None\n return result\n\n def _get_resampler_for_grouping(self, groupby, **kwargs):\n \"\"\"\n Return the correct class for resampling with groupby.\n \"\"\"\n return self._resampler_for_grouping(self, groupby=groupby, **kwargs)\n\n def _wrap_result(self, result):\n \"\"\"\n Potentially wrap any results.\n \"\"\"\n if isinstance(result, ABCSeries) and self._selection is not None:\n result.name = self._selection\n\n if isinstance(result, ABCSeries) and result.empty:\n obj = self.obj\n # When index is all NaT, result is empty but index is not\n result.index = _asfreq_compat(obj.index[:0], freq=self.freq)\n result.name = getattr(obj, \"name\", None)\n\n return result\n\n def pad(self, limit=None):\n \"\"\"\n Forward fill the values.\n\n Parameters\n ----------\n limit : int, optional\n Limit of how many values to fill.\n\n Returns\n -------\n An upsampled Series.\n\n See Also\n --------\n Series.fillna: Fill NA/NaN values using the specified method.\n DataFrame.fillna: Fill NA/NaN values using the specified method.\n \"\"\"\n return self._upsample(\"pad\", limit=limit)\n\n ffill = pad\n\n def nearest(self, limit=None):\n \"\"\"\n Resample by using the nearest value.\n\n When resampling data, missing values may appear (e.g., when the\n resampling frequency is higher than the original frequency).\n The `nearest` method will replace ``NaN`` values that appeared in\n the resampled data with the value from the nearest member of the\n sequence, based on the index value.\n Missing values that existed in the original data will not be modified.\n If `limit` is given, fill only this many values in each direction for\n each of the original values.\n\n Parameters\n ----------\n limit : int, optional\n Limit of how many values to fill.\n\n Returns\n -------\n Series or DataFrame\n An upsampled Series or DataFrame with ``NaN`` values filled with\n their nearest value.\n\n See Also\n --------\n backfill : Backward fill the new missing values in the resampled data.\n pad : Forward fill ``NaN`` values.\n\n Examples\n --------\n >>> s = pd.Series([1, 2],\n ... index=pd.date_range('20180101',\n ... periods=2,\n ... freq='1h'))\n >>> s\n 2018-01-01 00:00:00 1\n 2018-01-01 01:00:00 2\n Freq: H, dtype: int64\n\n >>> s.resample('15min').nearest()\n 2018-01-01 00:00:00 1\n 2018-01-01 00:15:00 1\n 2018-01-01 00:30:00 2\n 2018-01-01 00:45:00 2\n 2018-01-01 01:00:00 2\n Freq: 15T, dtype: int64\n\n Limit the number of upsampled values imputed by the nearest:\n\n >>> s.resample('15min').nearest(limit=1)\n 2018-01-01 00:00:00 1.0\n 2018-01-01 00:15:00 1.0\n 2018-01-01 00:30:00 NaN\n 2018-01-01 00:45:00 2.0\n 2018-01-01 01:00:00 2.0\n Freq: 15T, dtype: float64\n \"\"\"\n return self._upsample(\"nearest\", limit=limit)\n\n def backfill(self, limit=None):\n \"\"\"\n Backward fill the new missing values in the resampled data.\n\n In statistics, imputation is the process of replacing missing data with\n substituted values [1]_. When resampling data, missing values may\n appear (e.g., when the resampling frequency is higher than the original\n frequency). The backward fill will replace NaN values that appeared in\n the resampled data with the next value in the original sequence.\n Missing values that existed in the original data will not be modified.\n\n Parameters\n ----------\n limit : int, optional\n Limit of how many values to fill.\n\n Returns\n -------\n Series, DataFrame\n An upsampled Series or DataFrame with backward filled NaN values.\n\n See Also\n --------\n bfill : Alias of backfill.\n fillna : Fill NaN values using the specified method, which can be\n 'backfill'.\n nearest : Fill NaN values with nearest neighbor starting from center.\n pad : Forward fill NaN values.\n Series.fillna : Fill NaN values in the Series using the\n specified method, which can be 'backfill'.\n DataFrame.fillna : Fill NaN values in the DataFrame using the\n specified method, which can be 'backfill'.\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Imputation_(statistics)\n\n Examples\n --------\n Resampling a Series:\n\n >>> s = pd.Series([1, 2, 3],\n ... index=pd.date_range('20180101', periods=3, freq='h'))\n >>> s\n 2018-01-01 00:00:00 1\n 2018-01-01 01:00:00 2\n 2018-01-01 02:00:00 3\n Freq: H, dtype: int64\n\n >>> s.resample('30min').backfill()\n 2018-01-01 00:00:00 1\n 2018-01-01 00:30:00 2\n 2018-01-01 01:00:00 2\n 2018-01-01 01:30:00 3\n 2018-01-01 02:00:00 3\n Freq: 30T, dtype: int64\n\n >>> s.resample('15min').backfill(limit=2)\n 2018-01-01 00:00:00 1.0\n 2018-01-01 00:15:00 NaN\n 2018-01-01 00:30:00 2.0\n 2018-01-01 00:45:00 2.0\n 2018-01-01 01:00:00 2.0\n 2018-01-01 01:15:00 NaN\n 2018-01-01 01:30:00 3.0\n 2018-01-01 01:45:00 3.0\n 2018-01-01 02:00:00 3.0\n Freq: 15T, dtype: float64\n\n Resampling a DataFrame that has missing values:\n\n >>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]},\n ... index=pd.date_range('20180101', periods=3,\n ... freq='h'))\n >>> df\n a b\n 2018-01-01 00:00:00 2.0 1\n 2018-01-01 01:00:00 NaN 3\n 2018-01-01 02:00:00 6.0 5\n\n >>> df.resample('30min').backfill()\n a b\n 2018-01-01 00:00:00 2.0 1\n 2018-01-01 00:30:00 NaN 3\n 2018-01-01 01:00:00 NaN 3\n 2018-01-01 01:30:00 6.0 5\n 2018-01-01 02:00:00 6.0 5\n\n >>> df.resample('15min').backfill(limit=2)\n a b\n 2018-01-01 00:00:00 2.0 1.0\n 2018-01-01 00:15:00 NaN NaN\n 2018-01-01 00:30:00 NaN 3.0\n 2018-01-01 00:45:00 NaN 3.0\n 2018-01-01 01:00:00 NaN 3.0\n 2018-01-01 01:15:00 NaN NaN\n 2018-01-01 01:30:00 6.0 5.0\n 2018-01-01 01:45:00 6.0 5.0\n 2018-01-01 02:00:00 6.0 5.0\n \"\"\"\n return self._upsample(\"backfill\", limit=limit)\n\n bfill = backfill\n\n def fillna(self, method, limit=None):\n \"\"\"\n Fill missing values introduced by upsampling.\n\n In statistics, imputation is the process of replacing missing data with\n substituted values [1]_. When resampling data, missing values may\n appear (e.g., when the resampling frequency is higher than the original\n frequency).\n\n Missing values that existed in the original data will\n not be modified.\n\n Parameters\n ----------\n method : {'pad', 'backfill', 'ffill', 'bfill', 'nearest'}\n Method to use for filling holes in resampled data\n\n * 'pad' or 'ffill': use previous valid observation to fill gap\n (forward fill).\n * 'backfill' or 'bfill': use next valid observation to fill gap.\n * 'nearest': use nearest valid observation to fill gap.\n\n limit : int, optional\n Limit of how many consecutive missing values to fill.\n\n Returns\n -------\n Series or DataFrame\n An upsampled Series or DataFrame with missing values filled.\n\n See Also\n --------\n backfill : Backward fill NaN values in the resampled data.\n pad : Forward fill NaN values in the resampled data.\n nearest : Fill NaN values in the resampled data\n with nearest neighbor starting from center.\n interpolate : Fill NaN values using interpolation.\n Series.fillna : Fill NaN values in the Series using the\n specified method, which can be 'bfill' and 'ffill'.\n DataFrame.fillna : Fill NaN values in the DataFrame using the\n specified method, which can be 'bfill' and 'ffill'.\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Imputation_(statistics)\n\n Examples\n --------\n Resampling a Series:\n\n >>> s = pd.Series([1, 2, 3],\n ... index=pd.date_range('20180101', periods=3, freq='h'))\n >>> s\n 2018-01-01 00:00:00 1\n 2018-01-01 01:00:00 2\n 2018-01-01 02:00:00 3\n Freq: H, dtype: int64\n\n Without filling the missing values you get:\n\n >>> s.resample(\"30min\").asfreq()\n 2018-01-01 00:00:00 1.0\n 2018-01-01 00:30:00 NaN\n 2018-01-01 01:00:00 2.0\n 2018-01-01 01:30:00 NaN\n 2018-01-01 02:00:00 3.0\n Freq: 30T, dtype: float64\n\n >>> s.resample('30min').fillna(\"backfill\")\n 2018-01-01 00:00:00 1\n 2018-01-01 00:30:00 2\n 2018-01-01 01:00:00 2\n 2018-01-01 01:30:00 3\n 2018-01-01 02:00:00 3\n Freq: 30T, dtype: int64\n\n >>> s.resample('15min').fillna(\"backfill\", limit=2)\n 2018-01-01 00:00:00 1.0\n 2018-01-01 00:15:00 NaN\n 2018-01-01 00:30:00 2.0\n 2018-01-01 00:45:00 2.0\n 2018-01-01 01:00:00 2.0\n 2018-01-01 01:15:00 NaN\n 2018-01-01 01:30:00 3.0\n 2018-01-01 01:45:00 3.0\n 2018-01-01 02:00:00 3.0\n Freq: 15T, dtype: float64\n\n >>> s.resample('30min').fillna(\"pad\")\n 2018-01-01 00:00:00 1\n 2018-01-01 00:30:00 1\n 2018-01-01 01:00:00 2\n 2018-01-01 01:30:00 2\n 2018-01-01 02:00:00 3\n Freq: 30T, dtype: int64\n\n >>> s.resample('30min').fillna(\"nearest\")\n 2018-01-01 00:00:00 1\n 2018-01-01 00:30:00 2\n 2018-01-01 01:00:00 2\n 2018-01-01 01:30:00 3\n 2018-01-01 02:00:00 3\n Freq: 30T, dtype: int64\n\n Missing values present before the upsampling are not affected.\n\n >>> sm = pd.Series([1, None, 3],\n ... index=pd.date_range('20180101', periods=3, freq='h'))\n >>> sm\n 2018-01-01 00:00:00 1.0\n 2018-01-01 01:00:00 NaN\n 2018-01-01 02:00:00 3.0\n Freq: H, dtype: float64\n\n >>> sm.resample('30min').fillna('backfill')\n 2018-01-01 00:00:00 1.0\n 2018-01-01 00:30:00 NaN\n 2018-01-01 01:00:00 NaN\n 2018-01-01 01:30:00 3.0\n 2018-01-01 02:00:00 3.0\n Freq: 30T, dtype: float64\n\n >>> sm.resample('30min').fillna('pad')\n 2018-01-01 00:00:00 1.0\n 2018-01-01 00:30:00 1.0\n 2018-01-01 01:00:00 NaN\n 2018-01-01 01:30:00 NaN\n 2018-01-01 02:00:00 3.0\n Freq: 30T, dtype: float64\n\n >>> sm.resample('30min').fillna('nearest')\n 2018-01-01 00:00:00 1.0\n 2018-01-01 00:30:00 NaN\n 2018-01-01 01:00:00 NaN\n 2018-01-01 01:30:00 3.0\n 2018-01-01 02:00:00 3.0\n Freq: 30T, dtype: float64\n\n DataFrame resampling is done column-wise. All the same options are\n available.\n\n >>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]},\n ... index=pd.date_range('20180101', periods=3,\n ... freq='h'))\n >>> df\n a b\n 2018-01-01 00:00:00 2.0 1\n 2018-01-01 01:00:00 NaN 3\n 2018-01-01 02:00:00 6.0 5\n\n >>> df.resample('30min').fillna(\"bfill\")\n a b\n 2018-01-01 00:00:00 2.0 1\n 2018-01-01 00:30:00 NaN 3\n 2018-01-01 01:00:00 NaN 3\n 2018-01-01 01:30:00 6.0 5\n 2018-01-01 02:00:00 6.0 5\n \"\"\"\n return self._upsample(method, limit=limit)\n\n @doc(NDFrame.interpolate, **_shared_docs_kwargs)\n def interpolate(\n self,\n method=\"linear\",\n axis=0,\n limit=None,\n inplace=False,\n limit_direction=\"forward\",\n limit_area=None,\n downcast=None,\n **kwargs,\n ):\n \"\"\"\n Interpolate values according to different methods.\n \"\"\"\n result = self._upsample(\"asfreq\")\n return result.interpolate(\n method=method,\n axis=axis,\n limit=limit,\n inplace=inplace,\n limit_direction=limit_direction,\n limit_area=limit_area,\n downcast=downcast,\n **kwargs,\n )\n\n def asfreq(self, fill_value=None):\n \"\"\"\n Return the values at the new freq, essentially a reindex.\n\n Parameters\n ----------\n fill_value : scalar, optional\n Value to use for missing values, applied during upsampling (note\n this does not fill NaNs that already were present).\n\n Returns\n -------\n DataFrame or Series\n Values at the specified freq.\n\n See Also\n --------\n Series.asfreq: Convert TimeSeries to specified frequency.\n DataFrame.asfreq: Convert TimeSeries to specified frequency.\n \"\"\"\n return self._upsample(\"asfreq\", fill_value=fill_value)\n\n def std(self, ddof=1, *args, **kwargs):\n \"\"\"\n Compute standard deviation of groups, excluding missing values.\n\n Parameters\n ----------\n ddof : int, default 1\n Degrees of freedom.\n\n Returns\n -------\n DataFrame or Series\n Standard deviation of values within each group.\n \"\"\"\n nv.validate_resampler_func(\"std\", args, kwargs)\n # pandas\\core\\resample.py:850: error: Unexpected keyword argument\n # \"ddof\" for \"_downsample\" [call-arg]\n return self._downsample(\"std\", ddof=ddof) # type: ignore[call-arg]\n\n def var(self, ddof=1, *args, **kwargs):\n \"\"\"\n Compute variance of groups, excluding missing values.\n\n Parameters\n ----------\n ddof : int, default 1\n Degrees of freedom.\n\n Returns\n -------\n DataFrame or Series\n Variance of values within each group.\n \"\"\"\n nv.validate_resampler_func(\"var\", args, kwargs)\n # pandas\\core\\resample.py:867: error: Unexpected keyword argument\n # \"ddof\" for \"_downsample\" [call-arg]\n return self._downsample(\"var\", ddof=ddof) # type: ignore[call-arg]\n\n @doc(GroupBy.size)\n def size(self):\n result = self._downsample(\"size\")\n if not len(self.ax):\n from pandas import Series\n\n if self._selected_obj.ndim == 1:\n name = self._selected_obj.name\n else:\n name = None\n result = Series([], index=result.index, dtype=\"int64\", name=name)\n return result\n\n @doc(GroupBy.count)\n def count(self):\n result = self._downsample(\"count\")\n if not len(self.ax):\n if self._selected_obj.ndim == 1:\n result = type(self._selected_obj)(\n [], index=result.index, dtype=\"int64\", name=self._selected_obj.name\n )\n else:\n from pandas import DataFrame\n\n result = DataFrame(\n [], index=result.index, columns=result.columns, dtype=\"int64\"\n )\n\n return result\n\n def quantile(self, q=0.5, **kwargs):\n \"\"\"\n Return value at the given quantile.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n q : float or array-like, default 0.5 (50% quantile)\n\n Returns\n -------\n DataFrame or Series\n Quantile of values within each group.\n\n See Also\n --------\n Series.quantile\n Return a series, where the index is q and the values are the quantiles.\n DataFrame.quantile\n Return a DataFrame, where the columns are the columns of self,\n and the values are the quantiles.\n DataFrameGroupBy.quantile\n Return a DataFrame, where the coulmns are groupby columns,\n and the values are its quantiles.\n \"\"\"\n # pandas\\core\\resample.py:920: error: Unexpected keyword argument \"q\"\n # for \"_downsample\" [call-arg]\n\n # pandas\\core\\resample.py:920: error: Too many arguments for\n # \"_downsample\" [call-arg]\n return self._downsample(\"quantile\", q=q, **kwargs) # type: ignore[call-arg]\n\n\n# downsample methods\nfor method in [\"sum\", \"prod\", \"min\", \"max\", \"first\", \"last\"]:\n\n def f(self, _method=method, min_count=0, *args, **kwargs):\n nv.validate_resampler_func(_method, args, kwargs)\n return self._downsample(_method, min_count=min_count)\n\n f.__doc__ = getattr(GroupBy, method).__doc__\n setattr(Resampler, method, f)\n\n\n# downsample methods\nfor method in [\"mean\", \"sem\", \"median\", \"ohlc\"]:\n\n def g(self, _method=method, *args, **kwargs):\n nv.validate_resampler_func(_method, args, kwargs)\n return self._downsample(_method)\n\n g.__doc__ = getattr(GroupBy, method).__doc__\n setattr(Resampler, method, g)\n\n\n# series only methods\nfor method in [\"nunique\"]:\n\n def h(self, _method=method):\n return self._downsample(_method)\n\n h.__doc__ = getattr(SeriesGroupBy, method).__doc__\n setattr(Resampler, method, h)\n\n\nclass _GroupByMixin(GotItemMixin):\n \"\"\"\n Provide the groupby facilities.\n \"\"\"\n\n def __init__(self, obj, *args, **kwargs):\n\n parent = kwargs.pop(\"parent\", None)\n groupby = kwargs.pop(\"groupby\", None)\n if parent is None:\n parent = obj\n\n # initialize our GroupByMixin object with\n # the resampler attributes\n for attr in self._attributes:\n setattr(self, attr, kwargs.get(attr, getattr(parent, attr)))\n\n # pandas\\core\\resample.py:972: error: Too many arguments for \"__init__\"\n # of \"object\" [call-arg]\n super().__init__(None) # type: ignore[call-arg]\n self._groupby = groupby\n self._groupby.mutated = True\n self._groupby.grouper.mutated = True\n self.groupby = copy.copy(parent.groupby)\n\n @no_type_check\n def _apply(self, f, grouper=None, *args, **kwargs):\n \"\"\"\n Dispatch to _upsample; we are stripping all of the _upsample kwargs and\n performing the original function call on the grouped object.\n \"\"\"\n\n def func(x):\n x = self._shallow_copy(x, groupby=self.groupby)\n\n if isinstance(f, str):\n return getattr(x, f)(**kwargs)\n\n return x.apply(f, *args, **kwargs)\n\n result = self._groupby.apply(func)\n return self._wrap_result(result)\n\n _upsample = _apply\n _downsample = _apply\n _groupby_and_aggregate = _apply\n\n\nclass DatetimeIndexResampler(Resampler):\n @property\n def _resampler_for_grouping(self):\n return DatetimeIndexResamplerGroupby\n\n def _get_binner_for_time(self):\n\n # this is how we are actually creating the bins\n if self.kind == \"period\":\n return self.groupby._get_time_period_bins(self.ax)\n return self.groupby._get_time_bins(self.ax)\n\n def _downsample(self, how, **kwargs):\n \"\"\"\n Downsample the cython defined function.\n\n Parameters\n ----------\n how : string / cython mapped function\n **kwargs : kw args passed to how function\n \"\"\"\n self._set_binner()\n how = self._get_cython_func(how) or how\n ax = self.ax\n obj = self._selected_obj\n\n if not len(ax):\n # reset to the new freq\n obj = obj.copy()\n obj.index = obj.index._with_freq(self.freq)\n assert obj.index.freq == self.freq, (obj.index.freq, self.freq)\n return obj\n\n # do we have a regular frequency\n # pandas\\core\\resample.py:1037: error: \"BaseGrouper\" has no\n # attribute \"binlabels\" [attr-defined]\n if (\n (ax.freq is not None or ax.inferred_freq is not None)\n and len(self.grouper.binlabels) > len(ax) # type: ignore[attr-defined]\n and how is None\n ):\n\n # let's do an asfreq\n return self.asfreq()\n\n # we are downsampling\n # we want to call the actual grouper method here\n result = obj.groupby(self.grouper, axis=self.axis).aggregate(how, **kwargs)\n\n result = self._apply_loffset(result)\n return self._wrap_result(result)\n\n def _adjust_binner_for_upsample(self, binner):\n \"\"\"\n Adjust our binner when upsampling.\n\n The range of a new index should not be outside specified range\n \"\"\"\n if self.closed == \"right\":\n binner = binner[1:]\n else:\n binner = binner[:-1]\n return binner\n\n def _upsample(self, method, limit=None, fill_value=None):\n \"\"\"\n Parameters\n ----------\n method : string {'backfill', 'bfill', 'pad',\n 'ffill', 'asfreq'} method for upsampling\n limit : int, default None\n Maximum size gap to fill when reindexing\n fill_value : scalar, default None\n Value to use for missing values\n\n See Also\n --------\n .fillna: Fill NA/NaN values using the specified method.\n\n \"\"\"\n self._set_binner()\n if self.axis:\n raise AssertionError(\"axis must be 0\")\n if self._from_selection:\n raise ValueError(\n \"Upsampling from level= or on= selection \"\n \"is not supported, use .set_index(...) \"\n \"to explicitly set index to datetime-like\"\n )\n\n ax = self.ax\n obj = self._selected_obj\n binner = self.binner\n res_index = self._adjust_binner_for_upsample(binner)\n\n # if we have the same frequency as our axis, then we are equal sampling\n if (\n limit is None\n and to_offset(ax.inferred_freq) == self.freq\n and len(obj) == len(res_index)\n ):\n result = obj.copy()\n result.index = res_index\n else:\n result = obj.reindex(\n res_index, method=method, limit=limit, fill_value=fill_value\n )\n\n result = self._apply_loffset(result)\n return self._wrap_result(result)\n\n def _wrap_result(self, result):\n result = super()._wrap_result(result)\n\n # we may have a different kind that we were asked originally\n # convert if needed\n if self.kind == \"period\" and not isinstance(result.index, PeriodIndex):\n result.index = result.index.to_period(self.freq)\n return result\n\n\nclass DatetimeIndexResamplerGroupby(_GroupByMixin, DatetimeIndexResampler):\n \"\"\"\n Provides a resample of a groupby implementation\n \"\"\"\n\n @property\n def _constructor(self):\n return DatetimeIndexResampler\n\n\nclass PeriodIndexResampler(DatetimeIndexResampler):\n @property\n def _resampler_for_grouping(self):\n return PeriodIndexResamplerGroupby\n\n def _get_binner_for_time(self):\n if self.kind == \"timestamp\":\n return super()._get_binner_for_time()\n return self.groupby._get_period_bins(self.ax)\n\n def _convert_obj(self, obj):\n obj = super()._convert_obj(obj)\n\n if self._from_selection:\n # see GH 14008, GH 12871\n msg = (\n \"Resampling from level= or on= selection \"\n \"with a PeriodIndex is not currently supported, \"\n \"use .set_index(...) to explicitly set index\"\n )\n raise NotImplementedError(msg)\n\n if self.loffset is not None:\n # Cannot apply loffset/timedelta to PeriodIndex -> convert to\n # timestamps\n self.kind = \"timestamp\"\n\n # convert to timestamp\n if self.kind == \"timestamp\":\n obj = obj.to_timestamp(how=self.convention)\n\n return obj\n\n def _downsample(self, how, **kwargs):\n \"\"\"\n Downsample the cython defined function.\n\n Parameters\n ----------\n how : string / cython mapped function\n **kwargs : kw args passed to how function\n \"\"\"\n # we may need to actually resample as if we are timestamps\n if self.kind == \"timestamp\":\n return super()._downsample(how, **kwargs)\n\n how = self._get_cython_func(how) or how\n ax = self.ax\n\n if is_subperiod(ax.freq, self.freq):\n # Downsampling\n return self._groupby_and_aggregate(how, grouper=self.grouper, **kwargs)\n elif is_superperiod(ax.freq, self.freq):\n if how == \"ohlc\":\n # GH #13083\n # upsampling to subperiods is handled as an asfreq, which works\n # for pure aggregating/reducing methods\n # OHLC reduces along the time dimension, but creates multiple\n # values for each period -> handle by _groupby_and_aggregate()\n return self._groupby_and_aggregate(how, grouper=self.grouper)\n return self.asfreq()\n elif ax.freq == self.freq:\n return self.asfreq()\n\n raise IncompatibleFrequency(\n f\"Frequency {ax.freq} cannot be resampled to {self.freq}, \"\n \"as they are not sub or super periods\"\n )\n\n def _upsample(self, method, limit=None, fill_value=None):\n \"\"\"\n Parameters\n ----------\n method : string {'backfill', 'bfill', 'pad', 'ffill'}\n Method for upsampling.\n limit : int, default None\n Maximum size gap to fill when reindexing.\n fill_value : scalar, default None\n Value to use for missing values.\n\n See Also\n --------\n .fillna: Fill NA/NaN values using the specified method.\n\n \"\"\"\n # we may need to actually resample as if we are timestamps\n if self.kind == \"timestamp\":\n return super()._upsample(method, limit=limit, fill_value=fill_value)\n\n self._set_binner()\n ax = self.ax\n obj = self.obj\n new_index = self.binner\n\n # Start vs. end of period\n memb = ax.asfreq(self.freq, how=self.convention)\n\n # Get the fill indexer\n indexer = memb.get_indexer(new_index, method=method, limit=limit)\n return self._wrap_result(\n _take_new_index(obj, indexer, new_index, axis=self.axis)\n )\n\n\nclass PeriodIndexResamplerGroupby(_GroupByMixin, PeriodIndexResampler):\n \"\"\"\n Provides a resample of a groupby implementation.\n \"\"\"\n\n @property\n def _constructor(self):\n return PeriodIndexResampler\n\n\nclass TimedeltaIndexResampler(DatetimeIndexResampler):\n @property\n def _resampler_for_grouping(self):\n return TimedeltaIndexResamplerGroupby\n\n def _get_binner_for_time(self):\n return self.groupby._get_time_delta_bins(self.ax)\n\n def _adjust_binner_for_upsample(self, binner):\n \"\"\"\n Adjust our binner when upsampling.\n\n The range of a new index is allowed to be greater than original range\n so we don't need to change the length of a binner, GH 13022\n \"\"\"\n return binner\n\n\nclass TimedeltaIndexResamplerGroupby(_GroupByMixin, TimedeltaIndexResampler):\n \"\"\"\n Provides a resample of a groupby implementation.\n \"\"\"\n\n @property\n def _constructor(self):\n return TimedeltaIndexResampler\n\n\ndef get_resampler(obj, kind=None, **kwds):\n \"\"\"\n Create a TimeGrouper and return our resampler.\n \"\"\"\n tg = TimeGrouper(**kwds)\n return tg._get_resampler(obj, kind=kind)\n\n\nget_resampler.__doc__ = Resampler.__doc__\n\n\ndef get_resampler_for_grouping(\n groupby, rule, how=None, fill_method=None, limit=None, kind=None, **kwargs\n):\n \"\"\"\n Return our appropriate resampler when grouping as well.\n \"\"\"\n # .resample uses 'on' similar to how .groupby uses 'key'\n kwargs[\"key\"] = kwargs.pop(\"on\", None)\n\n tg = TimeGrouper(freq=rule, **kwargs)\n resampler = tg._get_resampler(groupby.obj, kind=kind)\n return resampler._get_resampler_for_grouping(groupby=groupby)\n\n\nclass TimeGrouper(Grouper):\n \"\"\"\n Custom groupby class for time-interval grouping.\n\n Parameters\n ----------\n freq : pandas date offset or offset alias for identifying bin edges\n closed : closed end of interval; 'left' or 'right'\n label : interval boundary to use for labeling; 'left' or 'right'\n convention : {'start', 'end', 'e', 's'}\n If axis is PeriodIndex\n \"\"\"\n\n _attributes = Grouper._attributes + (\n \"closed\",\n \"label\",\n \"how\",\n \"loffset\",\n \"kind\",\n \"convention\",\n \"origin\",\n \"offset\",\n )\n\n def __init__(\n self,\n freq=\"Min\",\n closed: Optional[str] = None,\n label: Optional[str] = None,\n how=\"mean\",\n axis=0,\n fill_method=None,\n limit=None,\n loffset=None,\n kind: Optional[str] = None,\n convention: Optional[str] = None,\n base: Optional[int] = None,\n origin: Union[str, TimestampConvertibleTypes] = \"start_day\",\n offset: Optional[TimedeltaConvertibleTypes] = None,\n **kwargs,\n ):\n # Check for correctness of the keyword arguments which would\n # otherwise silently use the default if misspelled\n if label not in {None, \"left\", \"right\"}:\n raise ValueError(f\"Unsupported value {label} for `label`\")\n if closed not in {None, \"left\", \"right\"}:\n raise ValueError(f\"Unsupported value {closed} for `closed`\")\n if convention not in {None, \"start\", \"end\", \"e\", \"s\"}:\n raise ValueError(f\"Unsupported value {convention} for `convention`\")\n\n freq = to_offset(freq)\n\n end_types = {\"M\", \"A\", \"Q\", \"BM\", \"BA\", \"BQ\", \"W\"}\n rule = freq.rule_code\n if rule in end_types or (\"-\" in rule and rule[: rule.find(\"-\")] in end_types):\n if closed is None:\n closed = \"right\"\n if label is None:\n label = \"right\"\n else:\n # The backward resample sets ``closed`` to ``'right'`` by default\n # since the last value should be considered as the edge point for\n # the last bin. When origin in \"end\" or \"end_day\", the value for a\n # specific ``Timestamp`` index stands for the resample result from\n # the current ``Timestamp`` minus ``freq`` to the current\n # ``Timestamp`` with a right close.\n if origin in [\"end\", \"end_day\"]:\n if closed is None:\n closed = \"right\"\n if label is None:\n label = \"right\"\n else:\n if closed is None:\n closed = \"left\"\n if label is None:\n label = \"left\"\n\n self.closed = closed\n self.label = label\n self.kind = kind\n\n self.convention = convention or \"E\"\n self.convention = self.convention.lower()\n\n self.how = how\n self.fill_method = fill_method\n self.limit = limit\n\n if origin in (\"epoch\", \"start\", \"start_day\", \"end\", \"end_day\"):\n self.origin = origin\n else:\n try:\n self.origin = Timestamp(origin)\n except Exception as e:\n raise ValueError(\n \"'origin' should be equal to 'epoch', 'start', 'start_day', \"\n \"'end', 'end_day' or \"\n f\"should be a Timestamp convertible type. Got '{origin}' instead.\"\n ) from e\n\n try:\n self.offset = Timedelta(offset) if offset is not None else None\n except Exception as e:\n raise ValueError(\n \"'offset' should be a Timedelta convertible type. \"\n f\"Got '{offset}' instead.\"\n ) from e\n\n # always sort time groupers\n kwargs[\"sort\"] = True\n\n # Handle deprecated arguments since v1.1.0 of `base` and `loffset` (GH #31809)\n if base is not None and offset is not None:\n raise ValueError(\"'offset' and 'base' cannot be present at the same time\")\n\n if base and isinstance(freq, Tick):\n # this conversion handle the default behavior of base and the\n # special case of GH #10530. Indeed in case when dealing with\n # a TimedeltaIndex base was treated as a 'pure' offset even though\n # the default behavior of base was equivalent of a modulo on\n # freq_nanos.\n self.offset = Timedelta(base * freq.nanos // freq.n)\n\n if isinstance(loffset, str):\n loffset = to_offset(loffset)\n self.loffset = loffset\n\n super().__init__(freq=freq, axis=axis, **kwargs)\n\n def _get_resampler(self, obj, kind=None):\n \"\"\"\n Return my resampler or raise if we have an invalid axis.\n\n Parameters\n ----------\n obj : input object\n kind : string, optional\n 'period','timestamp','timedelta' are valid\n\n Returns\n -------\n a Resampler\n\n Raises\n ------\n TypeError if incompatible axis\n\n \"\"\"\n self._set_grouper(obj)\n\n ax = self.ax\n if isinstance(ax, DatetimeIndex):\n return DatetimeIndexResampler(obj, groupby=self, kind=kind, axis=self.axis)\n elif isinstance(ax, PeriodIndex) or kind == \"period\":\n return PeriodIndexResampler(obj, groupby=self, kind=kind, axis=self.axis)\n elif isinstance(ax, TimedeltaIndex):\n return TimedeltaIndexResampler(obj, groupby=self, axis=self.axis)\n\n raise TypeError(\n \"Only valid with DatetimeIndex, \"\n \"TimedeltaIndex or PeriodIndex, \"\n f\"but got an instance of '{type(ax).__name__}'\"\n )\n\n def _get_grouper(self, obj, validate: bool = True):\n # create the resampler and return our binner\n r = self._get_resampler(obj)\n r._set_binner()\n return r.binner, r.grouper, r.obj\n\n def _get_time_bins(self, ax):\n if not isinstance(ax, DatetimeIndex):\n raise TypeError(\n \"axis must be a DatetimeIndex, but got \"\n f\"an instance of {type(ax).__name__}\"\n )\n\n if len(ax) == 0:\n binner = labels = DatetimeIndex(data=[], freq=self.freq, name=ax.name)\n return binner, [], labels\n\n first, last = _get_timestamp_range_edges(\n ax.min(),\n ax.max(),\n self.freq,\n closed=self.closed,\n origin=self.origin,\n offset=self.offset,\n )\n # GH #12037\n # use first/last directly instead of call replace() on them\n # because replace() will swallow the nanosecond part\n # thus last bin maybe slightly before the end if the end contains\n # nanosecond part and lead to `Values falls after last bin` error\n # GH 25758: If DST lands at midnight (e.g. 'America/Havana'), user feedback\n # has noted that ambiguous=True provides the most sensible result\n binner = labels = date_range(\n freq=self.freq,\n start=first,\n end=last,\n tz=ax.tz,\n name=ax.name,\n ambiguous=True,\n nonexistent=\"shift_forward\",\n )\n\n ax_values = ax.asi8\n binner, bin_edges = self._adjust_bin_edges(binner, ax_values)\n\n # general version, knowing nothing about relative frequencies\n bins = lib.generate_bins_dt64(\n ax_values, bin_edges, self.closed, hasnans=ax.hasnans\n )\n\n if self.closed == \"right\":\n labels = binner\n if self.label == \"right\":\n labels = labels[1:]\n elif self.label == \"right\":\n labels = labels[1:]\n\n if ax.hasnans:\n binner = binner.insert(0, NaT)\n labels = labels.insert(0, NaT)\n\n # if we end up with more labels than bins\n # adjust the labels\n # GH4076\n if len(bins) < len(labels):\n labels = labels[: len(bins)]\n\n return binner, bins, labels\n\n def _adjust_bin_edges(self, binner, ax_values):\n # Some hacks for > daily data, see #1471, #1458, #1483\n\n if self.freq != \"D\" and is_superperiod(self.freq, \"D\"):\n if self.closed == \"right\":\n # GH 21459, GH 9119: Adjust the bins relative to the wall time\n bin_edges = binner.tz_localize(None)\n bin_edges = bin_edges + timedelta(1) - Nano(1)\n bin_edges = bin_edges.tz_localize(binner.tz).asi8\n else:\n bin_edges = binner.asi8\n\n # intraday values on last day\n if bin_edges[-2] > ax_values.max():\n bin_edges = bin_edges[:-1]\n binner = binner[:-1]\n else:\n bin_edges = binner.asi8\n return binner, bin_edges\n\n def _get_time_delta_bins(self, ax):\n if not isinstance(ax, TimedeltaIndex):\n raise TypeError(\n \"axis must be a TimedeltaIndex, but got \"\n f\"an instance of {type(ax).__name__}\"\n )\n\n if not len(ax):\n binner = labels = TimedeltaIndex(data=[], freq=self.freq, name=ax.name)\n return binner, [], labels\n\n start, end = ax.min(), ax.max()\n labels = binner = timedelta_range(\n start=start, end=end, freq=self.freq, name=ax.name\n )\n\n end_stamps = labels + self.freq\n bins = ax.searchsorted(end_stamps, side=\"left\")\n\n if self.offset:\n # GH 10530 & 31809\n labels += self.offset\n if self.loffset:\n # GH 33498\n labels += self.loffset\n\n return binner, bins, labels\n\n def _get_time_period_bins(self, ax: DatetimeIndex):\n if not isinstance(ax, DatetimeIndex):\n raise TypeError(\n \"axis must be a DatetimeIndex, but got \"\n f\"an instance of {type(ax).__name__}\"\n )\n\n freq = self.freq\n\n if not len(ax):\n binner = labels = PeriodIndex(data=[], freq=freq, name=ax.name)\n return binner, [], labels\n\n labels = binner = period_range(start=ax[0], end=ax[-1], freq=freq, name=ax.name)\n\n end_stamps = (labels + freq).asfreq(freq, \"s\").to_timestamp()\n if ax.tz:\n end_stamps = end_stamps.tz_localize(ax.tz)\n bins = ax.searchsorted(end_stamps, side=\"left\")\n\n return binner, bins, labels\n\n def _get_period_bins(self, ax: PeriodIndex):\n if not isinstance(ax, PeriodIndex):\n raise TypeError(\n \"axis must be a PeriodIndex, but got \"\n f\"an instance of {type(ax).__name__}\"\n )\n\n memb = ax.asfreq(self.freq, how=self.convention)\n\n # NaT handling as in pandas._lib.lib.generate_bins_dt64()\n nat_count = 0\n if memb.hasnans:\n nat_count = np.sum(memb._isnan)\n memb = memb[~memb._isnan]\n\n if not len(memb):\n # index contains no valid (non-NaT) values\n bins = np.array([], dtype=np.int64)\n binner = labels = PeriodIndex(data=[], freq=self.freq, name=ax.name)\n if len(ax) > 0:\n # index is all NaT\n binner, bins, labels = _insert_nat_bin(binner, bins, labels, len(ax))\n return binner, bins, labels\n\n freq_mult = self.freq.n\n\n start = ax.min().asfreq(self.freq, how=self.convention)\n end = ax.max().asfreq(self.freq, how=\"end\")\n bin_shift = 0\n\n if isinstance(self.freq, Tick):\n # GH 23882 & 31809: get adjusted bin edge labels with 'origin'\n # and 'origin' support. This call only makes sense if the freq is a\n # Tick since offset and origin are only used in those cases.\n # Not doing this check could create an extra empty bin.\n p_start, end = _get_period_range_edges(\n start,\n end,\n self.freq,\n closed=self.closed,\n origin=self.origin,\n offset=self.offset,\n )\n\n # Get offset for bin edge (not label edge) adjustment\n start_offset = Period(start, self.freq) - Period(p_start, self.freq)\n bin_shift = start_offset.n % freq_mult\n start = p_start\n\n labels = binner = period_range(\n start=start, end=end, freq=self.freq, name=ax.name\n )\n\n i8 = memb.asi8\n\n # when upsampling to subperiods, we need to generate enough bins\n expected_bins_count = len(binner) * freq_mult\n i8_extend = expected_bins_count - (i8[-1] - i8[0])\n rng = np.arange(i8[0], i8[-1] + i8_extend, freq_mult)\n rng += freq_mult\n # adjust bin edge indexes to account for base\n rng -= bin_shift\n\n # Wrap in PeriodArray for PeriodArray.searchsorted\n prng = type(memb._data)(rng, dtype=memb.dtype)\n bins = memb.searchsorted(prng, side=\"left\")\n\n if nat_count > 0:\n binner, bins, labels = _insert_nat_bin(binner, bins, labels, nat_count)\n\n return binner, bins, labels\n\n\ndef _take_new_index(obj, indexer, new_index, axis=0):\n\n if isinstance(obj, ABCSeries):\n new_values = algos.take_1d(obj._values, indexer)\n return obj._constructor(new_values, index=new_index, name=obj.name)\n elif isinstance(obj, ABCDataFrame):\n if axis == 1:\n raise NotImplementedError(\"axis 1 is not supported\")\n return obj._constructor(\n obj._mgr.reindex_indexer(new_axis=new_index, indexer=indexer, axis=1)\n )\n else:\n raise ValueError(\"'obj' should be either a Series or a DataFrame\")\n\n\ndef _get_timestamp_range_edges(\n first, last, freq, closed=\"left\", origin=\"start_day\", offset=None\n):\n \"\"\"\n Adjust the `first` Timestamp to the preceding Timestamp that resides on\n the provided offset. Adjust the `last` Timestamp to the following\n Timestamp that resides on the provided offset. Input Timestamps that\n already reside on the offset will be adjusted depending on the type of\n offset and the `closed` parameter.\n\n Parameters\n ----------\n first : pd.Timestamp\n The beginning Timestamp of the range to be adjusted.\n last : pd.Timestamp\n The ending Timestamp of the range to be adjusted.\n freq : pd.DateOffset\n The dateoffset to which the Timestamps will be adjusted.\n closed : {'right', 'left'}, default None\n Which side of bin interval is closed.\n origin : {'epoch', 'start', 'start_day'} or Timestamp, default 'start_day'\n The timestamp on which to adjust the grouping. The timezone of origin must\n match the timezone of the index.\n If a timestamp is not used, these values are also supported:\n\n - 'epoch': `origin` is 1970-01-01\n - 'start': `origin` is the first value of the timeseries\n - 'start_day': `origin` is the first day at midnight of the timeseries\n offset : pd.Timedelta, default is None\n An offset timedelta added to the origin.\n\n Returns\n -------\n A tuple of length 2, containing the adjusted pd.Timestamp objects.\n \"\"\"\n if isinstance(freq, Tick):\n index_tz = first.tz\n if isinstance(origin, Timestamp) and (origin.tz is None) != (index_tz is None):\n raise ValueError(\"The origin must have the same timezone as the index.\")\n elif origin == \"epoch\":\n # set the epoch based on the timezone to have similar bins results when\n # resampling on the same kind of indexes on different timezones\n origin = Timestamp(\"1970-01-01\", tz=index_tz)\n\n if isinstance(freq, Day):\n # _adjust_dates_anchored assumes 'D' means 24H, but first/last\n # might contain a DST transition (23H, 24H, or 25H).\n # So \"pretend\" the dates are naive when adjusting the endpoints\n first = first.tz_localize(None)\n last = last.tz_localize(None)\n if isinstance(origin, Timestamp):\n origin = origin.tz_localize(None)\n\n first, last = _adjust_dates_anchored(\n first, last, freq, closed=closed, origin=origin, offset=offset\n )\n if isinstance(freq, Day):\n first = first.tz_localize(index_tz)\n last = last.tz_localize(index_tz)\n else:\n first = first.normalize()\n last = last.normalize()\n\n if closed == \"left\":\n first = Timestamp(freq.rollback(first))\n else:\n first = Timestamp(first - freq)\n\n last = Timestamp(last + freq)\n\n return first, last\n\n\ndef _get_period_range_edges(\n first, last, freq, closed=\"left\", origin=\"start_day\", offset=None\n):\n \"\"\"\n Adjust the provided `first` and `last` Periods to the respective Period of\n the given offset that encompasses them.\n\n Parameters\n ----------\n first : pd.Period\n The beginning Period of the range to be adjusted.\n last : pd.Period\n The ending Period of the range to be adjusted.\n freq : pd.DateOffset\n The freq to which the Periods will be adjusted.\n closed : {'right', 'left'}, default None\n Which side of bin interval is closed.\n origin : {'epoch', 'start', 'start_day'}, Timestamp, default 'start_day'\n The timestamp on which to adjust the grouping. The timezone of origin must\n match the timezone of the index.\n\n If a timestamp is not used, these values are also supported:\n\n - 'epoch': `origin` is 1970-01-01\n - 'start': `origin` is the first value of the timeseries\n - 'start_day': `origin` is the first day at midnight of the timeseries\n offset : pd.Timedelta, default is None\n An offset timedelta added to the origin.\n\n Returns\n -------\n A tuple of length 2, containing the adjusted pd.Period objects.\n \"\"\"\n if not all(isinstance(obj, Period) for obj in [first, last]):\n raise TypeError(\"'first' and 'last' must be instances of type Period\")\n\n # GH 23882\n first = first.to_timestamp()\n last = last.to_timestamp()\n adjust_first = not freq.is_on_offset(first)\n adjust_last = freq.is_on_offset(last)\n\n first, last = _get_timestamp_range_edges(\n first, last, freq, closed=closed, origin=origin, offset=offset\n )\n\n first = (first + int(adjust_first) * freq).to_period(freq)\n last = (last - int(adjust_last) * freq).to_period(freq)\n return first, last\n\n\ndef _insert_nat_bin(\n binner: PeriodIndex, bins: np.ndarray, labels: PeriodIndex, nat_count: int\n) -> Tuple[PeriodIndex, np.ndarray, PeriodIndex]:\n # NaT handling as in pandas._lib.lib.generate_bins_dt64()\n # shift bins by the number of NaT\n assert nat_count > 0\n bins += nat_count\n bins = np.insert(bins, 0, nat_count)\n binner = binner.insert(0, NaT)\n labels = labels.insert(0, NaT)\n return binner, bins, labels\n\n\ndef _adjust_dates_anchored(\n first, last, freq, closed=\"right\", origin=\"start_day\", offset=None\n):\n # First and last offsets should be calculated from the start day to fix an\n # error cause by resampling across multiple days when a one day period is\n # not a multiple of the frequency. See GH 8683\n # To handle frequencies that are not multiple or divisible by a day we let\n # the possibility to define a fixed origin timestamp. See GH 31809\n origin_nanos = 0 # origin == \"epoch\"\n if origin == \"start_day\":\n origin_nanos = first.normalize().value\n elif origin == \"start\":\n origin_nanos = first.value\n elif isinstance(origin, Timestamp):\n origin_nanos = origin.value\n elif origin in [\"end\", \"end_day\"]:\n origin = last if origin == \"end\" else last.ceil(\"D\")\n sub_freq_times = (origin.value - first.value) // freq.nanos\n if closed == \"left\":\n sub_freq_times += 1\n first = origin - sub_freq_times * freq\n origin_nanos = first.value\n origin_nanos += offset.value if offset else 0\n\n # GH 10117 & GH 19375. If first and last contain timezone information,\n # Perform the calculation in UTC in order to avoid localizing on an\n # Ambiguous or Nonexistent time.\n first_tzinfo = first.tzinfo\n last_tzinfo = last.tzinfo\n if first_tzinfo is not None:\n first = first.tz_convert(\"UTC\")\n if last_tzinfo is not None:\n last = last.tz_convert(\"UTC\")\n\n foffset = (first.value - origin_nanos) % freq.nanos\n loffset = (last.value - origin_nanos) % freq.nanos\n\n if closed == \"right\":\n if foffset > 0:\n # roll back\n fresult = first.value - foffset\n else:\n fresult = first.value - freq.nanos\n\n if loffset > 0:\n # roll forward\n lresult = last.value + (freq.nanos - loffset)\n else:\n # already the end of the road\n lresult = last.value\n else: # closed == 'left'\n if foffset > 0:\n fresult = first.value - foffset\n else:\n # start of the road\n fresult = first.value\n\n if loffset > 0:\n # roll forward\n lresult = last.value + (freq.nanos - loffset)\n else:\n lresult = last.value + freq.nanos\n fresult = Timestamp(fresult)\n lresult = Timestamp(lresult)\n if first_tzinfo is not None:\n fresult = fresult.tz_localize(\"UTC\").tz_convert(first_tzinfo)\n if last_tzinfo is not None:\n lresult = lresult.tz_localize(\"UTC\").tz_convert(last_tzinfo)\n return fresult, lresult\n\n\ndef asfreq(obj, freq, method=None, how=None, normalize=False, fill_value=None):\n \"\"\"\n Utility frequency conversion method for Series/DataFrame.\n\n See :meth:`pandas.NDFrame.asfreq` for full documentation.\n \"\"\"\n if isinstance(obj.index, PeriodIndex):\n if method is not None:\n raise NotImplementedError(\"'method' argument is not supported\")\n\n if how is None:\n how = \"E\"\n\n new_obj = obj.copy()\n new_obj.index = obj.index.asfreq(freq, how=how)\n\n elif len(obj.index) == 0:\n new_obj = obj.copy()\n\n new_obj.index = _asfreq_compat(obj.index, freq)\n else:\n dti = date_range(obj.index[0], obj.index[-1], freq=freq)\n dti.name = obj.index.name\n new_obj = obj.reindex(dti, method=method, fill_value=fill_value)\n if normalize:\n new_obj.index = new_obj.index.normalize()\n\n return new_obj\n\n\ndef _asfreq_compat(index, freq):\n \"\"\"\n Helper to mimic asfreq on (empty) DatetimeIndex and TimedeltaIndex.\n\n Parameters\n ----------\n index : PeriodIndex, DatetimeIndex, or TimedeltaIndex\n freq : DateOffset\n\n Returns\n -------\n same type as index\n \"\"\"\n if len(index) != 0:\n # This should never be reached, always checked by the caller\n raise ValueError(\n \"Can only set arbitrary freq for empty DatetimeIndex or TimedeltaIndex\"\n )\n new_index: Index\n if isinstance(index, PeriodIndex):\n new_index = index.asfreq(freq=freq)\n elif isinstance(index, DatetimeIndex):\n new_index = DatetimeIndex([], dtype=index.dtype, freq=freq, name=index.name)\n elif isinstance(index, TimedeltaIndex):\n new_index = TimedeltaIndex([], dtype=index.dtype, freq=freq, name=index.name)\n else: # pragma: no cover\n raise TypeError(type(index))\n return new_index\n" ]
[ [ "pandas.core.indexes.datetimes.DatetimeIndex", "pandas._libs.tslibs.Timestamp", "pandas.Series", "pandas._libs.lib.generate_bins_dt64", "pandas.DataFrame", "pandas.core.indexes.datetimes.date_range", "pandas.core.indexes.timedeltas.timedelta_range", "pandas.core.indexes.period.PeriodIndex", "pandas.util._decorators.Substitution", "pandas._libs.tslibs.to_offset", "pandas.errors.AbstractMethodError", "numpy.arange", "pandas.core.indexes.period.period_range", "pandas.core.groupby.groupby.get_groupby", "pandas.tseries.frequencies.is_superperiod", "pandas.core.indexes.timedeltas.TimedeltaIndex", "numpy.insert", "pandas.util._decorators.Appender", "numpy.array", "pandas._libs.tslibs.Period", "numpy.sum", "pandas.tseries.frequencies.is_subperiod", "pandas._libs.tslibs.Timedelta", "pandas.core.algorithms.take_1d", "pandas.tseries.offsets.Nano", "pandas.core.aggregation.aggregate", "pandas.compat.numpy.function.validate_resampler_func", "pandas._libs.tslibs.IncompatibleFrequency", "pandas.core.groupby.ops.BinGrouper", "pandas.util._decorators.doc" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rishabhjha708/Pyostie
[ "47091748bc746920f386952c1a6c1002340e3224" ]
[ "pyostie/parsers.py" ]
[ "import os\nimport docx2txt\nimport xlrd\nimport csv\nimport cv2\nimport pytesseract\nfrom PIL import Image\nfrom pkgutil import find_loader\nimport PyPDF2\nimport pdfplumber\nfrom pptx import Presentation\nfrom pdf2image import convert_from_path\nimport speech_recognition as sr\n\nfrom pyostie.convert import *\nfrom pyostie.insights_ext import *\n\npandas_installed = find_loader(\"pandas\") is not None\nif pandas_installed:\n import pandas as pd\n\na = pd.DataFrame()\nocr_dict_output = []\n\n\nclass DOCXParser:\n\n def __init__(self, filename, img_dir):\n \"\"\"\n\n Parameters\n ----------\n filename : The file that needs to be processed.\n \"\"\"\n self.file = filename\n self.img_dir = img_dir\n\n def extract_docx(self):\n \"\"\"\n\n Returns\n -------\n DOCXParser for Docx files.\n extract text and write images in img_dir\n\n \"\"\"\n output = docx2txt.process(self.file, self.img_dir)\n return output\n\n\nclass XLSXParser:\n\n def __init__(self, filename):\n \"\"\"\n\n Parameters\n ----------\n filename : The file that needs to be processed.\n \"\"\"\n self.file = filename\n\n def extract_xlsx(self):\n \"\"\"\n\n Returns\n -------\n XLSXParser for XLSX and XLS files.\n \"\"\"\n out_list = []\n book = xlrd.open_workbook(self.file)\n for val in range(len(book.sheet_names())):\n sheet = book.sheet_by_index(val)\n for res in range(sheet.nrows):\n output = \" \" + \" \".join(str(val_) for val_ in (sheet.row_values(res)))\n out_list.append(output)\n return out_list\n\n\nclass CSVParser:\n\n def __init__(self, filename, delimiter):\n \"\"\"\n\n Parameters\n ----------\n filename : The file that needs to be processed.\n delimiter : By default ','. Can be changed if any other delimiter is needed.\n\n \"\"\"\n self.file = filename\n self.delimiter = delimiter\n\n def extract_csv(self):\n \"\"\"\n\n Returns\n -------\n CSVParser for csv files.\n\n \"\"\"\n with open(self.file) as file:\n output = csv.reader(file, delimiter=self.delimiter)\n return ' '.join([' '.join(row) for row in output])\n\n\nclass ImageParser:\n\n def __init__(self, filename, tess_path=None):\n \"\"\"\n\n Parameters\n ----------\n filename : The file that needs to be processed.\n tess_path : The path to the tesseract cmd (Only for windows.)\n \"\"\"\n self.file = filename\n self.path = tess_path\n\n def extract_image(self):\n \"\"\"\n\n Returns\n -------\n ImageParser for Image formats.\n\n \"\"\"\n out_list = []\n if self.path is not None:\n pytesseract.pytesseract.tesseract_cmd = self.path\n img = Image.open(self.file)\n text = pytesseract.image_to_string(img)\n out_list.append(text)\n else:\n img = Image.open(self.file)\n text = pytesseract.image_to_string(img)\n out_list.append(text)\n return out_list\n\n\nclass PDFParser:\n\n def __init__(self, filename, insights=False):\n \"\"\"\n\n Parameters\n ----------\n filename : The file that needs to be processed.\n insights : True by default. False if the dataframe is not needed.\n \"\"\"\n self.file = filename\n self.insights = insights\n\n def extract_pypdf2(self):\n \"\"\"\n\n Returns\n -------\n PDFParser for pdf files.\n\n \"\"\"\n contents = []\n text = ' '\n pdfFileObj = open(self.file, 'rb')\n pdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n pdfPages = pdfReader.getNumPages()\n if pdfPages == 1:\n for val in range(pdfReader.numPages):\n pageObject = pdfReader.getPage(val)\n text = text + pageObject.extractText()\n contents.append(text)\n if self.insights:\n conv = conversion(self.file)\n __conv = conv.convert()\n insights = generate_insights(__conv, df)\n __insights = insights.generate_df()\n remove_files(__conv)\n return __insights, contents\n else:\n return contents\n\n if pdfPages >= 2:\n pdf_multipage_df = pd.DataFrame()\n for val in range(pdfReader.numPages):\n pageObject = pdfReader.getPage(val)\n text = text + pageObject.extractText()\n contents.append(text)\n if self.insights:\n df_list = []\n pdffile = self.file\n os.mkdir(\"tempdir\")\n tempdir = \"tempdir/\"\n if os.path.isdir(tempdir):\n shutil.rmtree(tempdir)\n os.mkdir(\"tempdir/converted_files\")\n images = convert_from_path(pdffile)\n converted_files = tempdir + \"converted_files/\"\n for val in range(len(images)):\n images[val - 1].save(converted_files + str(val) + \".jpg\", \"JPEG\")\n jpgfiles = os.listdir(converted_files)\n output_files = [converted_files + os.sep + _val for _val in jpgfiles if _val[-3:].upper() == \"JPG\"]\n for val in range(len(output_files)):\n insights = generate_insights(output_files[val], df)\n __insights = insights.generate_df()\n page = [val] * len(__insights)\n __insights[\"page_num\"] = page\n df_list.append(__insights)\n pdf_multipage_df = pd.concat([pdf_multipage_df, __insights])\n shutil.rmtree(tempdir)\n df1 = pdf_multipage_df.reset_index()\n df1 = df1.drop(\"index\", 1)\n return df1, contents\n else:\n return contents\n\n def extract_pdfplumber(self):\n \"\"\"\n\n Returns\n -------\n Works as an alternative for PyPDF2.\n \"\"\"\n out_list = []\n with pdfplumber.open(self.file) as pdf:\n for val in range(len(pdf.pages)):\n page = pdf.pages[val]\n output = page.extract_text()\n out_list.append(output)\n return out_list\n\n\nclass TXTParser:\n\n def __init__(self, filename):\n \"\"\"\n\n Parameters\n ----------\n filename : The file that needs to be processed.\n \"\"\"\n self.file = filename\n\n def extract_txt(self):\n \"\"\"\n\n Returns\n -------\n TXTParser for txt, log or no extension files.\n \"\"\"\n with open(self.file) as file:\n return file.read()\n\n\nclass PPTXParser:\n\n def __init__(self, filename):\n \"\"\"\n\n Parameters\n ----------\n filename : The file that needs to be processed.\n \"\"\"\n self.file = filename\n\n def extract_pptx(self):\n \"\"\"\n\n Returns\n -------\n PPTXParser for pptx files.\n \"\"\"\n text = []\n paper = Presentation(self.file)\n for slide in paper.slides:\n for shape in slide.shapes:\n if not shape.has_text_frame:\n continue\n for paragraph in shape.text_frame.paragraphs:\n stripped = paragraph.text.strip()\n if stripped:\n text.append(paragraph.text)\n return text\n\n\nclass speech_to_text:\n\n def __init__(self, filename):\n \"\"\"\n\n Parameters\n ----------\n filename : The file that needs to be processed.\n \"\"\"\n self.file = filename\n\n def extract_audio(self):\n \"\"\"\n\n Returns\n -------\n speech_to_text for mp3, wav files.\n \"\"\"\n output_audio = []\n os.mkdir(\"tempdir\")\n dst_file = mp3_to_wav(self.file, \"tempdir/sample.wav\", format=\"wav\")\n output = sr.AudioFile(dst_file)\n recog = sr.Recognizer()\n with output as source:\n audio = recog.record(source)\n output_audio.append(recog.recognize_google(audio))\n shutil.rmtree(\"tempdir\")\n return output_audio\n" ]
[ [ "pandas.concat", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
htahir1/ml-metadata
[ "9d76a4ba515a21f68273a3734b1d06572aea9802" ]
[ "ml_metadata/metadata_store/metadata_store.py" ]
[ "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A python API to the metadata store.\n\nProvides access to a SQLite3 or a MySQL backend. Artifact types and execution\ntypes can be created on the fly.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom __future__ import print_function\n\nfrom absl import logging\nimport grpc\nfrom typing import List, Optional, Sequence, Text, Tuple, Union\n\nfrom ml_metadata.metadata_store import pywrap_tf_metadata_store_serialized as metadata_store_serialized\nfrom ml_metadata.proto import metadata_store_pb2\nfrom ml_metadata.proto import metadata_store_service_pb2\nfrom ml_metadata.proto import metadata_store_service_pb2_grpc\nfrom tensorflow.python.framework import errors\n\n\n# See _make_specific_exception in tensorflow.python.framework.errors\ndef _make_exception(message, error_code):\n try:\n exc_type = errors.exception_type_from_error_code(error_code)\n return exc_type(None, None, message)\n except KeyError:\n return errors.UnknownError(None, None, message, error_code)\n\n\nclass MetadataStore(object):\n \"\"\"A store for the artifact metadata.\"\"\"\n\n def __init__(self,\n config: Union[metadata_store_pb2.ConnectionConfig,\n metadata_store_pb2.MetadataStoreClientConfig]):\n \"\"\"Initialize the MetadataStore.\n\n MetadataStore can directly connect to either the metadata database or\n the metadata store server.\n\n Args:\n config: metadata_store_pb2.ConnectionConfig or\n metadata_store_pb2.MetadataStoreClientConfig. Configuration to\n connect to the database or the metadata store server.\n \"\"\"\n if isinstance(config, metadata_store_pb2.ConnectionConfig):\n self._using_db_connection = True\n self._metadata_store = metadata_store_serialized.CreateMetadataStore(\n config.SerializeToString())\n # If you remove this line, errors are not thrown correctly.\n logging.log(logging.INFO, 'MetadataStore with DB connection initialized')\n return\n if not isinstance(config, metadata_store_pb2.MetadataStoreClientConfig):\n raise ValueError('MetadataStore is expecting either'\n 'metadata_store_pb2.ConnectionConfig or'\n 'metadata_store_pb2.MetadataStoreClientConfig')\n self._using_db_connection = False\n target = ':'.join([config.host, str(config.port)])\n channel = self._get_channel(config, target)\n self._metadata_store_stub = (metadata_store_service_pb2_grpc.\n MetadataStoreServiceStub(channel))\n logging.log(logging.INFO, 'MetadataStore with gRPC connection initialized')\n\n def _get_channel(self, config: metadata_store_pb2.MetadataStoreClientConfig,\n target: Text):\n \"\"\"Configures the channel, which could be secure or insecure.\n\n It returns a channel that can be specified to be secure or insecure,\n depending on whether ssl_config is specified in the config.\n\n Args:\n config: metadata_store_pb2.MetadataStoreClientConfig.\n target: target host with port.\n\n Returns:\n an initialized gRPC channel.\n \"\"\"\n if not config.HasField('ssl_config'):\n return grpc.insecure_channel(target)\n\n root_certificates = None\n private_key = None\n certificate_chain = None\n if config.ssl_config.HasField('custom_ca'):\n root_certificates = config.ssl_config.custom_ca\n if config.ssl_config.HasField('client_key'):\n private_key = config.ssl_config.client_key\n if config.ssl_config.HasField('server_cert'):\n certificate_chain = config.ssl_config.server_cert\n credentials = grpc.ssl_channel_credentials(root_certificates, private_key,\n certificate_chain)\n return grpc.secure_channel(target, credentials)\n\n def __del__(self):\n if self._using_db_connection:\n metadata_store_serialized.DestroyMetadataStore(self._metadata_store)\n\n def _call(self, method_name, request, response) -> None:\n \"\"\"Calls method using SWIG or gRPC.\n\n Args:\n method_name: the method to call in SWIG or gRPC.\n request: a protobuf message, serialized and sent to the method.\n response: a protobuf message, filled from the return value of the method.\n \"\"\"\n if self._using_db_connection:\n swig_method = getattr(metadata_store_serialized, method_name)\n self._swig_call(swig_method, request, response)\n else:\n grpc_method = getattr(self._metadata_store_stub, method_name)\n response.CopyFrom(grpc_method(request))\n\n def _swig_call(self, method, request, response) -> None:\n \"\"\"Calls method, serializing and deserializing inputs and outputs.\n\n Note that this does not check the types of request and response.\n\n This can throw a variety of Python errors, based upon the underlying\n tensorflow error returned in MetadataStore.\n See _CODE_TO_EXCEPTION_CLASS in tensorflow/python/framework/errors_impl.py\n for the mapping.\n\n Args:\n method: the method to call in SWIG.\n request: a protobuf message, serialized and sent to the method.\n response: a protobuf message, filled from the return value of the method.\n\n Raises:\n Error: whatever tensorflow error is returned by the method.\n \"\"\"\n [response_str, error_message, status_code] = method(\n self._metadata_store, request.SerializeToString())\n if status_code != 0:\n raise _make_exception(error_message, status_code)\n response.ParseFromString(response_str)\n\n def put_artifacts(\n self, artifacts: Sequence[metadata_store_pb2.Artifact]) -> List[int]:\n \"\"\"Inserts or updates artifacts in the database.\n\n If an artifact_id is specified for an artifact, it is an update.\n If an artifact_id is unspecified, it will insert a new artifact.\n For new artifacts, type must be specified.\n For old artifacts, type must be unchanged or unspecified.\n\n Args:\n artifacts: A list of artifacts to insert or update.\n\n Returns:\n A list of artifact ids index-aligned with the input.\n \"\"\"\n request = metadata_store_service_pb2.PutArtifactsRequest()\n for x in artifacts:\n request.artifacts.add().CopyFrom(x)\n response = metadata_store_service_pb2.PutArtifactsResponse()\n\n self._call('PutArtifacts', request, response)\n result = []\n for x in response.artifact_ids:\n result.append(x)\n return result\n\n def put_artifact_type(self,\n artifact_type: metadata_store_pb2.ArtifactType,\n can_add_fields: bool = False,\n can_delete_fields: bool = False,\n all_fields_match: bool = True) -> int:\n \"\"\"Inserts or updates an artifact type.\n\n Similar to put execution/context type, if no artifact type exists in the\n database with the given name, it creates a new artifact type (and a\n database).\n\n If an artifact type with the same name already exists (let's call it\n old_artifact_type), then the impact depends upon the other options.\n\n If artifact_type == old_artifact_type, then nothing happens.\n\n Otherwise, if there is a field where artifact_type and old_artifact_type\n have different types, then it fails.\n\n Otherwise, if can_add_fields is False and artifact_type has a field\n old_artifact_type is missing, then it fails.\n\n Otherwise, if all_fields_match is True and old_artifact_type has a field\n artifact_type is missing, then it fails.\n\n Otherwise, if can_delete_fields is True and old_artifact_type has a field\n artifact_type is missing, then it deletes that field.\n\n Otherwise, it does nothing.\n\n Args:\n artifact_type: the type to add or update.\n can_add_fields: if true, you can add fields with this operation. If false,\n then if there are more fields in artifact_type than in the database, the\n call fails.\n can_delete_fields: if true, you can remove fields with this operation. If\n false, then if there are more fields in the current type, they are not\n removed.\n all_fields_match: if true, all fields must match, and the method fails if\n they are not the same.\n\n Returns:\n the type_id of the response.\n\n Raises:\n InvalidArgumentError: If a constraint is violated.\n \"\"\"\n request = metadata_store_service_pb2.PutArtifactTypeRequest()\n request.can_add_fields = can_add_fields\n request.can_delete_fields = can_delete_fields\n request.all_fields_match = all_fields_match\n request.artifact_type.CopyFrom(artifact_type)\n response = metadata_store_service_pb2.PutArtifactTypeResponse()\n\n self._call('PutArtifactType', request, response)\n return response.type_id\n\n def create_artifact_with_type(\n self, artifact: metadata_store_pb2.Artifact,\n artifact_type: metadata_store_pb2.ArtifactType) -> int:\n \"\"\"Creates an artifact with a type.\n\n This first gets the type (or creates it if it does not exist), and then\n puts the artifact into the database with that type.\n\n The type_id should not be specified in the artifact (it is ignored).\n\n Note that this is not a transaction!\n 1. First, the type is created as a transaction.\n 2. Then the artifact is created as a transaction.\n\n Args:\n artifact: the artifact to create (no id or type_id)\n artifact_type: the type of the new artifact (no id)\n\n Returns:\n the artifact ID of the resulting type.\n\n Raises:\n InvalidArgument: if the type is not the same as one with the same name\n already in the database.\n \"\"\"\n type_id = self.put_artifact_type(artifact_type)\n artifact_copy = metadata_store_pb2.Artifact()\n artifact_copy.CopyFrom(artifact)\n artifact_copy.type_id = type_id\n return self.put_artifacts([artifact_copy])[0]\n\n def put_executions(\n self, executions: Sequence[metadata_store_pb2.Execution]) -> List[int]:\n \"\"\"Inserts or updates executions in the database.\n\n If an execution_id is specified for an execution, it is an update.\n If an execution_id is unspecified, it will insert a new execution.\n For new executions, type must be specified.\n For old executions, type must be unchanged or unspecified.\n\n Args:\n executions: A list of executions to insert or update.\n\n Returns:\n A list of execution ids index-aligned with the input.\n \"\"\"\n request = metadata_store_service_pb2.PutExecutionsRequest()\n for x in executions:\n request.executions.add().CopyFrom(x)\n response = metadata_store_service_pb2.PutExecutionsResponse()\n\n self._call('PutExecutions', request, response)\n result = []\n for x in response.execution_ids:\n result.append(x)\n return result\n\n def put_execution_type(self,\n execution_type: metadata_store_pb2.ExecutionType,\n can_add_fields: bool = False,\n can_delete_fields: bool = False,\n all_fields_match: bool = True) -> int:\n \"\"\"Inserts or updates an execution type.\n\n Similar to put artifact/context type, if no execution type exists in the\n database with the given name, it creates a new execution type (and a\n database).\n\n If an execution type with the same name already exists (let's call it\n old_execution_type), then the impact depends upon the other options.\n\n If execution_type == old_execution_type, then nothing happens.\n\n Otherwise, if there is a field where execution_type and old_execution_type\n have different types, then it fails.\n\n Otherwise, if can_add_fields is False and execution_type has a field\n old_execution_type is missing, then it fails.\n\n Otherwise, if all_fields_match is True and old_execution_type has a field\n execution_type is missing, then it fails.\n\n Otherwise, if can_delete_fields is True and old_execution_type has a field\n execution_type is missing, then it deletes that field.\n\n Otherwise, it does nothing.\n Args:\n execution_type: the type to add or update.\n can_add_fields: if true, you can add fields with this operation. If false,\n then if there are more fields in execution_type than in the database,\n the call fails.\n can_delete_fields: if true, you can remove fields with this operation. If\n false, then if there are more fields.\n all_fields_match: if true, all fields must match, and the method fails if\n they are not the same.\n\n Returns:\n the type id of the type.\n Raises:\n ValueError: If a constraint is violated.\n \"\"\"\n request = metadata_store_service_pb2.PutExecutionTypeRequest()\n request.can_add_fields = can_add_fields\n request.can_delete_fields = can_delete_fields\n request.all_fields_match = all_fields_match\n request.execution_type.CopyFrom(execution_type)\n response = metadata_store_service_pb2.PutExecutionTypeResponse()\n\n self._call('PutExecutionType', request, response)\n return response.type_id\n\n def put_contexts(self,\n contexts: Sequence[metadata_store_pb2.Context]) -> List[int]:\n \"\"\"Inserts or updates contexts in the database.\n\n If an context_id is specified for an context, it is an update.\n If an context_id is unspecified, it will insert a new context.\n For new contexts, type must be specified.\n For old contexts, type must be unchanged or unspecified.\n The name of a context cannot be empty, and it should be unique among\n contexts of the same ContextType.\n\n Args:\n contexts: A list of contexts to insert or update.\n\n Returns:\n A list of context ids index-aligned with the input.\n \"\"\"\n request = metadata_store_service_pb2.PutContextsRequest()\n for x in contexts:\n request.contexts.add().CopyFrom(x)\n response = metadata_store_service_pb2.PutContextsResponse()\n\n self._call('PutContexts', request, response)\n result = []\n for x in response.context_ids:\n result.append(x)\n return result\n\n def put_context_type(self,\n context_type: metadata_store_pb2.ContextType,\n can_add_fields: bool = False,\n can_delete_fields: bool = False,\n all_fields_match: bool = True) -> int:\n \"\"\"Inserts or updates a context type.\n\n Similar to put artifact/execution type, if no context type exists in the\n database with the given name, it creates a new context type (and a\n database).\n\n If a context type with the same name already exists (let's call it\n old_context_type), then the impact depends upon the other options.\n\n If context_type == old_context_type, then nothing happens.\n\n Otherwise, if there is a field where context_type and old_context_type\n have different types, then it fails.\n\n Otherwise, if can_add_fields is False and context_type has a field\n old_context_type is missing, then it fails.\n\n Otherwise, if all_fields_match is True and old_context_type has a field\n context_type is missing, then it fails.\n\n Otherwise, if can_delete_fields is True and old_context_type has a field\n context_type is missing, then it deletes that field.\n\n Otherwise, it does nothing.\n\n Args:\n context_type: the type to add or update.\n can_add_fields: if true, you can add fields with this operation. If false,\n then if there are more fields in context_type than in the database, the\n call fails.\n can_delete_fields: if true, you can remove fields with this operation. If\n false, then if there are more fields in the current type, they are not\n removed.\n all_fields_match: if true, all fields must match, and the method fails if\n they are not the same.\n\n Returns:\n the type_id of the response.\n\n Raises:\n InvalidArgumentError: If a constraint is violated.\n \"\"\"\n request = metadata_store_service_pb2.PutContextTypeRequest()\n request.can_add_fields = can_add_fields\n request.can_delete_fields = can_delete_fields\n request.all_fields_match = all_fields_match\n request.context_type.CopyFrom(context_type)\n response = metadata_store_service_pb2.PutContextTypeResponse()\n self._call('PutContextType', request, response)\n return response.type_id\n\n def put_events(self, events: Sequence[metadata_store_pb2.Event]) -> None:\n \"\"\"Inserts events in the database.\n\n The execution_id and artifact_id must already exist.\n Once created, events cannot be modified.\n\n Args:\n events: A list of events to insert.\n \"\"\"\n request = metadata_store_service_pb2.PutEventsRequest()\n for x in events:\n request.events.add().CopyFrom(x)\n response = metadata_store_service_pb2.PutEventsResponse()\n\n self._call('PutEvents', request, response)\n\n def put_execution(\n self, execution: metadata_store_pb2.Execution,\n artifact_and_events: Sequence[Tuple[metadata_store_pb2.Artifact,\n Optional[metadata_store_pb2.Event]]]\n ) -> Tuple[int, List[int]]:\n \"\"\"Inserts or updates an Execution with related artifacts and events.\n\n If an execution_id or artifact_id is specified, it is an update, otherwise\n it does an insertion.\n\n Args:\n execution: The execution to be created or updated.\n artifact_and_events: a pair of Artifact and Event that the execution uses\n or generates. The event's execution id or artifact id can be empty, as\n the artifact or execution may not be stored beforehand. If given, the\n ids must match with the paired Artifact and the input execution.\n\n Returns:\n the execution id, and the list of artifact's id.\n \"\"\"\n request = metadata_store_service_pb2.PutExecutionRequest()\n request.execution.CopyFrom(execution)\n for pair in artifact_and_events:\n artifact_and_event = request.artifact_event_pairs.add()\n artifact_and_event.artifact.CopyFrom(pair[0])\n if len(pair) == 2 and pair[1] is not None:\n artifact_and_event.event.CopyFrom(pair[1])\n response = metadata_store_service_pb2.PutExecutionResponse()\n\n self._call('PutExecution', request, response)\n artifact_ids = []\n for x in response.artifact_ids:\n artifact_ids.append(x)\n return response.execution_id, artifact_ids\n\n def get_artifacts_by_type(\n self, type_name: Text) -> List[metadata_store_pb2.Artifact]:\n \"\"\"Gets all the artifacts of a given type.\"\"\"\n request = metadata_store_service_pb2.GetArtifactsByTypeRequest()\n request.type_name = type_name\n response = metadata_store_service_pb2.GetArtifactsByTypeResponse()\n\n self._call('GetArtifactsByType', request, response)\n result = []\n for x in response.artifacts:\n result.append(x)\n return result\n\n def get_artifacts_by_uri(self,\n uri: Text) -> List[metadata_store_pb2.Artifact]:\n \"\"\"Gets all the artifacts of a given uri.\"\"\"\n request = metadata_store_service_pb2.GetArtifactsByURIRequest()\n request.uri = uri\n response = metadata_store_service_pb2.GetArtifactsByURIResponse()\n\n self._call('GetArtifactsByURI', request, response)\n result = []\n for x in response.artifacts:\n result.append(x)\n return result\n\n def get_artifacts_by_id(\n self, artifact_ids: Sequence[int]) -> List[metadata_store_pb2.Artifact]:\n \"\"\"Gets all artifacts with matching ids.\n\n The result is not index-aligned: if an id is not found, it is not returned.\n\n Args:\n artifact_ids: A list of artifact ids to retrieve.\n\n Returns:\n Artifacts with matching ids.\n \"\"\"\n request = metadata_store_service_pb2.GetArtifactsByIDRequest()\n for x in artifact_ids:\n request.artifact_ids.append(x)\n response = metadata_store_service_pb2.GetArtifactsByIDResponse()\n\n self._call('GetArtifactsByID', request, response)\n result = []\n for x in response.artifacts:\n result.append(x)\n return result\n\n def get_artifact_type(\n self, type_name: Text) -> Optional[metadata_store_pb2.ArtifactType]:\n \"\"\"Gets an artifact type by name.\n\n Args:\n type_name: the type with that name.\n\n Returns:\n The type with name type_name.\n\n Raises:\n tensorflow.errors.NotFoundError: if no type exists\n tensorflow.errors.InternalError: if query execution fails\n \"\"\"\n request = metadata_store_service_pb2.GetArtifactTypeRequest()\n request.type_name = type_name\n response = metadata_store_service_pb2.GetArtifactTypeResponse()\n\n self._call('GetArtifactType', request, response)\n return response.artifact_type\n\n def get_artifact_types(self) -> List[metadata_store_pb2.ArtifactType]:\n \"\"\"Gets all artifact types.\n\n Returns:\n A list of all known ArtifactTypes.\n\n Raises:\n tensorflow.errors.InternalError: if query execution fails\n \"\"\"\n request = metadata_store_service_pb2.GetArtifactTypesRequest()\n response = metadata_store_service_pb2.GetArtifactTypesResponse()\n\n self._call('GetArtifactTypes', request, response)\n result = []\n for x in response.artifact_types:\n result.append(x)\n return result\n\n def get_execution_type(\n self, type_name: Text) -> Optional[metadata_store_pb2.ExecutionType]:\n \"\"\"Gets an execution type by name.\n\n Args:\n type_name: the type with that name.\n\n Returns:\n The type with name type_name.\n\n Raises:\n tensorflow.errors.NotFoundError: if no type exists\n tensorflow.errors.InternalError: if query execution fails\n \"\"\"\n request = metadata_store_service_pb2.GetExecutionTypeRequest()\n request.type_name = type_name\n response = metadata_store_service_pb2.GetExecutionTypeResponse()\n\n self._call('GetExecutionType', request, response)\n return response.execution_type\n\n def get_execution_types(self) -> List[metadata_store_pb2.ExecutionType]:\n \"\"\"Gets all execution types.\n\n Returns:\n A list of all known ExecutionTypes.\n\n Raises:\n tensorflow.errors.InternalError: if query execution fails\n \"\"\"\n request = metadata_store_service_pb2.GetExecutionTypesRequest()\n response = metadata_store_service_pb2.GetExecutionTypesResponse()\n\n self._call('GetExecutionTypes', request, response)\n result = []\n for x in response.execution_types:\n result.append(x)\n return result\n\n def get_context_type(\n self, type_name: Text) -> Optional[metadata_store_pb2.ContextType]:\n \"\"\"Gets a context type by name.\n\n Args:\n type_name: the type with that name.\n\n Returns:\n The type with name type_name.\n\n Raises:\n tensorflow.errors.NotFoundError: if no type exists\n tensorflow.errors.InternalError: if query execution fails\n \"\"\"\n request = metadata_store_service_pb2.GetContextTypeRequest()\n request.type_name = type_name\n response = metadata_store_service_pb2.GetContextTypeResponse()\n\n self._call('GetContextType', request, response)\n return response.context_type\n\n def get_executions_by_type(\n self, type_name: Text) -> List[metadata_store_pb2.Execution]:\n \"\"\"Gets all the executions of a given type.\"\"\"\n request = metadata_store_service_pb2.GetExecutionsByTypeRequest()\n request.type_name = type_name\n response = metadata_store_service_pb2.GetExecutionsByTypeResponse()\n\n self._call('GetExecutionsByType', request, response)\n result = []\n for x in response.executions:\n result.append(x)\n return result\n\n def get_executions_by_id(\n self, execution_ids: Sequence[int]) -> List[metadata_store_pb2.Execution]:\n \"\"\"Gets all executions with matching ids.\n\n The result is not index-aligned: if an id is not found, it is not returned.\n\n Args:\n execution_ids: A list of execution ids to retrieve.\n\n Returns:\n Executions with matching ids.\n \"\"\"\n request = metadata_store_service_pb2.GetExecutionsByIDRequest()\n for x in execution_ids:\n request.execution_ids.append(x)\n response = metadata_store_service_pb2.GetExecutionsByIDResponse()\n\n self._call('GetExecutionsByID', request, response)\n result = []\n for x in response.executions:\n result.append(x)\n return result\n\n def get_executions(self) -> List[metadata_store_pb2.Execution]:\n \"\"\"Gets all executions.\n\n Returns:\n A list of all executions.\n\n Raises:\n InternalError: if query execution fails.\n \"\"\"\n request = metadata_store_service_pb2.GetExecutionsRequest()\n response = metadata_store_service_pb2.GetExecutionsResponse()\n\n self._call('GetExecutions', request, response)\n result = []\n for x in response.executions:\n result.append(x)\n return result\n\n def get_artifacts(self) -> List[metadata_store_pb2.Artifact]:\n \"\"\"Gets all artifacts.\n\n Returns:\n A list of all artifacts.\n\n Raises:\n InternalError: if query execution fails.\n \"\"\"\n request = metadata_store_service_pb2.GetArtifactsRequest()\n response = metadata_store_service_pb2.GetArtifactsResponse()\n\n self._call('GetArtifacts', request, response)\n result = []\n for x in response.artifacts:\n result.append(x)\n return result\n\n def get_contexts(self) -> List[metadata_store_pb2.Context]:\n \"\"\"Gets all contexts.\n\n Returns:\n A list of all contexts.\n\n Raises:\n InternalError: if query execution fails.\n \"\"\"\n request = metadata_store_service_pb2.GetContextsRequest()\n response = metadata_store_service_pb2.GetContextsResponse()\n\n self._call('GetContexts', request, response)\n result = []\n for x in response.contexts:\n result.append(x)\n return result\n\n def get_contexts_by_id(\n self, context_ids: Sequence[int]) -> List[metadata_store_pb2.Context]:\n \"\"\"Gets all contexts with matching ids.\n\n The result is not index-aligned: if an id is not found, it is not returned.\n\n Args:\n context_ids: A list of context ids to retrieve.\n\n Returns:\n Contexts with matching ids.\n \"\"\"\n request = metadata_store_service_pb2.GetContextsByIDRequest()\n for x in context_ids:\n request.context_ids.append(x)\n response = metadata_store_service_pb2.GetContextsByIDResponse()\n\n self._call('GetContextsByID', request, response)\n result = []\n for x in response.contexts:\n result.append(x)\n return result\n\n def get_contexts_by_type(self,\n type_name: Text) -> List[metadata_store_pb2.Context]:\n \"\"\"Gets all the contexts of a given type.\"\"\"\n request = metadata_store_service_pb2.GetContextsByTypeRequest()\n request.type_name = type_name\n response = metadata_store_service_pb2.GetContextsByTypeResponse()\n\n self._call('GetContextsByType', request, response)\n result = []\n for x in response.contexts:\n result.append(x)\n return result\n\n def get_artifact_types_by_id(\n self, type_ids: Sequence[int]) -> List[metadata_store_pb2.ArtifactType]:\n \"\"\"Gets artifact types by ID.\n\n Args:\n type_ids: a sequence of artifact type IDs.\n\n Returns:\n A list of artifact types.\n\n Raises:\n InternalError: if query execution fails.\n \"\"\"\n request = metadata_store_service_pb2.GetArtifactTypesByIDRequest()\n response = metadata_store_service_pb2.GetArtifactTypesByIDResponse()\n for x in type_ids:\n request.type_ids.append(x)\n\n self._call('GetArtifactTypesByID', request, response)\n result = []\n for x in response.artifact_types:\n result.append(x)\n return result\n\n def get_execution_types_by_id(\n self, type_ids: Sequence[int]) -> List[metadata_store_pb2.ExecutionType]:\n \"\"\"Gets execution types by ID.\n\n Args:\n type_ids: a sequence of execution type IDs.\n\n Returns:\n A list of execution types.\n\n Args:\n type_ids: ids to look for.\n\n Raises:\n InternalError: if query execution fails.\n \"\"\"\n request = metadata_store_service_pb2.GetExecutionTypesByIDRequest()\n response = metadata_store_service_pb2.GetExecutionTypesByIDResponse()\n for x in type_ids:\n request.type_ids.append(x)\n\n self._call('GetExecutionTypesByID', request, response)\n result = []\n for x in response.execution_types:\n result.append(x)\n return result\n\n def get_context_types_by_id(\n self, type_ids: Sequence[int]) -> List[metadata_store_pb2.ContextType]:\n \"\"\"Gets context types by ID.\n\n Args:\n type_ids: a sequence of context type IDs.\n\n Returns:\n A list of context types.\n\n Args:\n type_ids: ids to look for.\n\n Raises:\n InternalError: if query execution fails.\n \"\"\"\n request = metadata_store_service_pb2.GetContextTypesByIDRequest()\n response = metadata_store_service_pb2.GetContextTypesByIDResponse()\n for x in type_ids:\n request.type_ids.append(x)\n\n self._call('GetContextTypesByID', request, response)\n result = []\n for x in response.context_types:\n result.append(x)\n return result\n\n def put_attributions_and_associations(\n self, attributions: Sequence[metadata_store_pb2.Attribution],\n associations: Sequence[metadata_store_pb2.Association]) -> None:\n \"\"\"Inserts attribution and association relationships in the database.\n\n The context_id, artifact_id, and execution_id must already exist.\n If the relationship exists, this call does nothing. Once added, the\n relationships cannot be modified.\n\n Args:\n attributions: A list of attributions to insert.\n associations: A list of associations to insert.\n \"\"\"\n request = metadata_store_service_pb2.PutAttributionsAndAssociationsRequest()\n for x in attributions:\n request.attributions.add().CopyFrom(x)\n for x in associations:\n request.associations.add().CopyFrom(x)\n response = metadata_store_service_pb2.PutAttributionsAndAssociationsResponse(\n )\n self._call('PutAttributionsAndAssociations', request, response)\n\n def get_contexts_by_artifact(\n self, artifact_id: int) -> List[metadata_store_pb2.Context]:\n \"\"\"Gets all context that an artifact is attributed to.\n\n Args:\n artifact_id: The id of the querying artifact\n\n Returns:\n Contexts that the artifact is attributed to.\n \"\"\"\n request = metadata_store_service_pb2.GetContextsByArtifactRequest()\n request.artifact_id = artifact_id\n response = metadata_store_service_pb2.GetContextsByArtifactResponse()\n\n self._call('GetContextsByArtifact', request, response)\n result = []\n for x in response.contexts:\n result.append(x)\n return result\n\n def get_contexts_by_execution(\n self, execution_id: int) -> List[metadata_store_pb2.Context]:\n \"\"\"Gets all context that an execution is associated with.\n\n Args:\n execution_id: The id of the querying execution\n\n Returns:\n Contexts that the execution is associated with.\n \"\"\"\n request = metadata_store_service_pb2.GetContextsByExecutionRequest()\n request.execution_id = execution_id\n response = metadata_store_service_pb2.GetContextsByExecutionResponse()\n\n self._call('GetContextsByExecution', request, response)\n result = []\n for x in response.contexts:\n result.append(x)\n return result\n\n def get_artifacts_by_context(\n self, context_id: int) -> List[metadata_store_pb2.Artifact]:\n \"\"\"Gets all direct artifacts that a context attributes to.\n\n Args:\n context_id: The id of the querying context\n\n Returns:\n Artifacts attributing to the context.\n \"\"\"\n request = metadata_store_service_pb2.GetArtifactsByContextRequest()\n request.context_id = context_id\n response = metadata_store_service_pb2.GetArtifactsByContextResponse()\n\n self._call('GetArtifactsByContext', request, response)\n result = []\n for x in response.artifacts:\n result.append(x)\n return result\n\n def get_executions_by_context(\n self, context_id: int) -> List[metadata_store_pb2.Execution]:\n \"\"\"Gets all direct executions that a context associates with.\n\n Args:\n context_id: The id of the querying context\n\n Returns:\n Executions associating with the context.\n \"\"\"\n request = metadata_store_service_pb2.GetExecutionsByContextRequest()\n request.context_id = context_id\n response = metadata_store_service_pb2.GetExecutionsByContextResponse()\n\n self._call('GetExecutionsByContext', request, response)\n result = []\n for x in response.executions:\n result.append(x)\n return result\n\n def get_events_by_execution_ids(\n self, execution_ids: Sequence[int]) -> List[metadata_store_pb2.Event]:\n \"\"\"Gets all events with matching execution ids.\n\n Args:\n execution_ids: a list of execution ids.\n\n Returns:\n Events with the execution IDs given.\n\n Raises:\n InternalError: if query execution fails.\n \"\"\"\n request = metadata_store_service_pb2.GetEventsByExecutionIDsRequest()\n for x in execution_ids:\n request.execution_ids.append(x)\n response = metadata_store_service_pb2.GetEventsByExecutionIDsResponse()\n\n self._call('GetEventsByExecutionIDs', request, response)\n result = []\n for x in response.events:\n result.append(x)\n return result\n\n def get_events_by_artifact_ids(\n self, artifact_ids: Sequence[int]) -> List[metadata_store_pb2.Event]:\n \"\"\"Gets all events with matching artifact ids.\n\n Args:\n artifact_ids: a list of artifact ids.\n\n Returns:\n Events with the execution IDs given.\n\n Raises:\n InternalError: if query execution fails.\n \"\"\"\n\n request = metadata_store_service_pb2.GetEventsByArtifactIDsRequest()\n for x in artifact_ids:\n request.artifact_ids.append(x)\n response = metadata_store_service_pb2.GetEventsByArtifactIDsResponse()\n\n self._call('GetEventsByArtifactIDs', request, response)\n result = []\n for x in response.events:\n result.append(x)\n return result\n\n def make_artifact_live(self, artifact_id: int) -> None:\n \"\"\"Changes the state of each artifact to LIVE.\n\n The artifact state must be NEW or CREATABLE.\n\n Args:\n artifact_id: the ID of the artifact.\n \"\"\"\n raise NotImplementedError()\n\n # TODO(b/121041332) consider at the same time as artifact/execution creation.\n def complete_execution(self, execution_id: int,\n artifact_ids: Sequence[int]) -> None:\n \"\"\"Changes the state of an execution to COMPLETE and the artifacts to LIVE.\n\n The execution state must be NEW or RUNNING.\n The artifacts must be NEW or CREATABLE.\n\n Args:\n execution_id: the execution to change to COMPLETE.\n artifact_ids: the artifacts to change to LIVE.\n \"\"\"\n raise NotImplementedError()\n" ]
[ [ "tensorflow.python.framework.errors.exception_type_from_error_code", "tensorflow.python.framework.errors.UnknownError" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] } ]
Rohit-Kundu/Hybrid_MRFO-OBHSA
[ "3511ead3a2024099fb77e8e19056257c03c8393a" ]
[ "OBHSA.py" ]
[ "import numpy as np\r\nimport pandas as pd\r\nimport sklearn\r\nfrom sklearn import datasets,svm,metrics\r\nfrom sklearn.model_selection import KFold\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import *\r\n\r\ndef reduce_features(solution, features):\r\n selected_elements_indices = np.where(solution ==1)[0]\r\n reduced_features = features[:, selected_elements_indices]\r\n return reduced_features\r\n\r\ndef classification_accuracy(labels, predictions):\r\n correct = np.where(labels == predictions)[0]\r\n accuracy = correct.shape[0]/labels.shape[0]\r\n return accuracy\r\n\r\ndef cal_pop_fitness(pop, features, labels, train_indices,val_indices,classifier):\r\n test_accuracies = np.zeros(pop.shape[0])\r\n val_accuracies = np.zeros(pop.shape[0])\r\n idx = 0\r\n\r\n val_pop_pred = np.zeros(shape=(pop.shape[0],val_indices.shape[0]))\r\n for i,curr_solution in enumerate(pop):\r\n\r\n reduced_features = reduce_features(curr_solution, features)\r\n train_data = reduced_features[train_indices, :]\r\n val_data=reduced_features[val_indices,:]\r\n\r\n train_labels = labels[train_indices]\r\n val_labels=labels[val_indices]\r\n if classifier=='SVM':\r\n SV_classifier = sklearn.svm.SVC(kernel='rbf',gamma='scale',C=5000)\r\n SV_classifier.fit(X=train_data, y=train_labels)\r\n val_predictions = SV_classifier.predict(val_data)\r\n val_accuracies[idx] = classification_accuracy(val_labels, val_predictions)\r\n idx = idx + 1\r\n elif classifier == 'KNN':\r\n knn=KNeighborsClassifier(n_neighbors=8)\r\n knn.fit(train_data,train_labels)\r\n val_predictions=knn.predict(val_data)\r\n val_accuracies[idx]=classification_accuracy(val_labels,predictions)\r\n idx = idx + 1\r\n else :\r\n mlp = MLPClassifier()\r\n mlp.fit(train_data,train_labels)\r\n val_predictions=mlp.predict(val_data)\r\n val_accuracies[idx]=classification_accuracy(val_labels,predictions)\r\n idx = idx + 1\r\n val_pop_pred[i] = val_predictions\r\n \r\n return val_accuracies,val_pop_pred\r\n\r\ndef get_vector(labels,pop_preds):\r\n vector = np.zeros(shape=(pop_preds.shape[0],4))\r\n for i in range(pop_preds.shape[0]):\r\n preds = pop_preds[i]\r\n acc = classification_accuracy(labels,preds)\r\n pre = precision_score(labels,preds,average=\"macro\")\r\n rec = recall_score(labels,preds,average=\"macro\")\r\n f1 = f1_score(labels,preds,average=\"macro\")\r\n vector[i] = np.array([acc,pre,rec,f1])\r\n return vector\r\n\r\ndef OBHSA(data_inputs,data_outputs,input_chromosome,\r\n HMCR=0.9,\r\n PAR=0.35,\r\n classifier=\"SVM\",\r\n num_generations = 10 #Number of generations in each fold\r\n ):\r\n popSize = input_chromosome.shape[0]\r\n print(\"\\nOPPOSITION-BASED HARMONY SEARCH:\\n\")\r\n population_output = np.zeros(shape = (popSize,data_inputs.shape[1],5))\r\n vector_output = np.zeros(shape=(popSize,4,5)) #4 because acc,pre,rec,f1; and 5 because folds=5\r\n \r\n num_samples = data_inputs.shape[0]\r\n num_feature_elements = data_inputs.shape[1]\r\n HM_shape=(popSize,num_feature_elements)\r\n #harmony_memory=np.random.randint(low=0,high=2,size=HM_shape)\r\n NCHV = np.ones((1, num_feature_elements))\r\n best_outputs = []\r\n best_opp_outputs = []\r\n \r\n kf=KFold(5,True,random_state=1)\r\n fold=0\r\n for train_indices,test_val_indices in kf.split(data_inputs):\r\n print(\"Fold : \",fold+1)\r\n val_indices,test_indices=train_test_split(test_val_indices,test_size=0.5,shuffle=True,random_state=8)\r\n best_test_outputs=[]\r\n\r\n #harmony_memory=np.random.randint(low=0,high=2,size=HM_shape)\r\n harmony_memory = input_chromosome[:,:,fold]\r\n opposite_memory=1-harmony_memory\r\n total_memory=np.concatenate((harmony_memory,opposite_memory),axis=0)\r\n total_fitness,_ = cal_pop_fitness(total_memory,data_inputs,data_outputs,train_indices,val_indices,classifier)\r\n fit_ind = np.argpartition(total_fitness, -popSize)[-popSize:]\r\n harmony_memory=total_memory[fit_ind,:]\r\n\r\n gen_fit = np.array([-1]) \r\n for currentIteration in range(num_generations):\r\n NCHV = np.ones((1, num_feature_elements))\r\n print(\"Generation : \", currentIteration+1)\r\n \r\n fitness,val_pop_preds=cal_pop_fitness(harmony_memory,data_inputs,data_outputs,train_indices,val_indices,classifier)\r\n best_outputs.append(np.max(fitness))\r\n print(\"Best validation result : \", max(best_outputs))\r\n\r\n if max(fitness)>max(gen_fit):\r\n gen_fit = fitness\r\n gen_labels = data_outputs[val_indices]\r\n gen_preds = val_pop_preds \r\n\r\n for i in range(num_feature_elements):\r\n ran = np.random.rand()\r\n if ran < HMCR:\r\n index = np.random.randint(0, popSize)\r\n NCHV[0, i] = harmony_memory[index, i]\r\n pvbran = np.random.rand()\r\n if pvbran < PAR:\r\n pvbran1 = np.random.rand()\r\n result = NCHV[0, i]\r\n if pvbran1 < 0.5:\r\n result =1-result\r\n\r\n else:\r\n NCHV[0, i] = np.random.randint(low=0,high=2,size=1)\r\n \r\n new_fitness,_ = cal_pop_fitness(NCHV, data_inputs, data_outputs, train_indices, val_indices,classifier)\r\n if new_fitness > min(fitness):\r\n min_fit_idx = np.where(fitness == min(fitness))\r\n harmony_memory[min_fit_idx, :] = NCHV\r\n fitness[min_fit_idx] = new_fitness\r\n\r\n opp_NCHV=1-NCHV\r\n new_opp_fitness,_=cal_pop_fitness(opp_NCHV,data_inputs, data_outputs, train_indices, val_indices,classifier)\r\n if new_opp_fitness > min(fitness):\r\n min_fit_idx = np.where(fitness == min(fitness))\r\n harmony_memory[min_fit_idx, :] = opp_NCHV\r\n fitness[min_fit_idx] = new_opp_fitness\r\n \r\n fitness,_ = cal_pop_fitness(harmony_memory, data_inputs, data_outputs, train_indices,val_indices,classifier)\r\n\r\n best_match_idx = np.where(fitness == np.max(fitness))[0]\r\n best_match_idx = best_match_idx[0]\r\n\r\n best_solution = harmony_memory[best_match_idx, :]\r\n best_solution_indices = np.where(best_solution == 1)[0]\r\n best_solution_num_elements = best_solution_indices.shape[0]\r\n best_solution_fitness = np.max(fitness)\r\n\r\n #print(\"best_match_idx : \", best_match_idx)\r\n #print(\"best_solution : \", best_solution)\r\n #print(\"Selected indices : \", best_solution_indices)\r\n print(\"Number of selected elements : \", best_solution_num_elements)\r\n\r\n vector_output[:,:,fold] = get_vector(gen_labels,gen_preds)\r\n population_output[:,:,fold] = harmony_memory\r\n \r\n fold=fold+1\r\n" ]
[ [ "sklearn.neural_network.MLPClassifier", "sklearn.model_selection.train_test_split", "sklearn.model_selection.KFold", "numpy.ones", "numpy.concatenate", "numpy.max", "sklearn.neighbors.KNeighborsClassifier", "numpy.argpartition", "sklearn.svm.SVC", "numpy.random.rand", "numpy.array", "numpy.zeros", "numpy.where", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rogerwxd/Machine-Learning
[ "1fae84725e3d789b709afcfe2b57c40ecbc4af75" ]
[ "sklearn/train/train.py" ]
[ "import pandas as pd\nfrom sklearn import tree\nimport os\nfrom sklearn.ensemble import RandomForestClassifier, BaggingClassifier, AdaBoostClassifier\nfrom sklearn.model_selection import train_test_split\nimport joblib\nfrom sklearn.neural_network import MLPClassifier\n\nsaveModel = 'saveModel/' # PASTA PARA SALVAR O MODELO\ncaminho = 'datasetTrain/' # PASTA ONDE ESTÃO OS ARQUIVOS DE TRAINO\ncaminho_list = (os.listdir(caminho))\ncaminho_list.sort()\n\n\nfor data in caminho_list:\n\n dataset_nome = data.split('.')\n dataset_nome = dataset_nome[0]\n data_capture2 = caminho + data\n data_capture = pd.read_csv(data_capture2)\n x = data_capture.drop('class',axis=1)\n y = data_capture['class']\n\n # 70% dos dados para Train, 30% de dados para Test\n xTrain, xTest, yTrain, yTest = train_test_split(x, y, test_size = 0.30, random_state = 42)\n\n ############################################\n ########## DECISION TREE ##########\n ############################################\n # Making a decision tree with two levels.\n clfTre = tree.DecisionTreeClassifier(max_depth=None)\n clfTre.fit(xTrain, yTrain)\n score = clfTre.score(xTrain, yTrain)\n print(\"DECISION TREE: \", str(score))\n print('SAVE MODEL - DECISION TREE')\n scalerfile = saveModel + dataset_nome + '_decisionTree.pkl'\n joblib.dump(clfTre, scalerfile)\n print(\"DECISION TREE OK \")\n\n\n ############################################\n ########## RANDOM FOREST #########\n ############################################\n rf = RandomForestClassifier(n_estimators=100, max_depth=None, random_state=0, n_jobs=1)\n rf.fit(xTrain, yTrain)\n score = rf.score(xTrain, yTrain)\n print(\"RANDOM FLOREST: \", str(score))\n print('SAVE MODEL - RANDOM FOREST')\n scalerfile = saveModel + dataset_nome + '_randomForest.pkl'\n joblib.dump(rf, scalerfile)\n print(\"RANDOM FLOREST OK \")\n\n ############################################\n ########### ADA BOOSTING ##############\n ############################################\n Adaclf = AdaBoostClassifier(n_estimators=100, random_state=0)\n Adaclf.fit(xTrain, yTrain)\n score = Adaclf.score(xTrain, yTrain)\n print(\"ADA BOOSTING \", str(score))\n print('SAVE MODEL - ADA BOOSTING')\n scalerfile = saveModel + dataset_nome + '_adaBoosting.pkl'\n joblib.dump(Adaclf, scalerfile)\n print(\"GRADIENT BOOSTING OK \")\n\n ############################################\n ########## BAGGING ##############\n ############################################\n bagClf = BaggingClassifier(n_estimators=100, random_state = 0, n_jobs=8)\n bagClf.fit(xTrain, yTrain)\n score = bagClf.score(xTrain, yTrain)\n print(\"BAGGING \", str(score))\n print('SAVE MODEL - BAGGING')\n scalerfile = saveModel + dataset_nome + '_bagging.pkl'\n joblib.dump(bagClf, scalerfile)\n print(\"BAGGING OK\")\n\n ############################################\n ############### MLP $#############\n ############################################\n mlp = MLPClassifier(random_state=1, max_iter=300)\n mlp.fit(xTrain, yTrain)\n score = mlp.score(xTrain, yTrain)\n print(\"MLP \", str(score))\n print('SAVE MODEL - MLP')\n scalerfile = saveModel + dataset_nome + '_mlp.pkl'\n joblib.dump(mlp, scalerfile)\n print(\"MLP OK \")" ]
[ [ "sklearn.neural_network.MLPClassifier", "sklearn.ensemble.BaggingClassifier", "pandas.read_csv", "sklearn.ensemble.RandomForestClassifier", "sklearn.model_selection.train_test_split", "sklearn.tree.DecisionTreeClassifier", "sklearn.ensemble.AdaBoostClassifier" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
yyuting/FastImageProcessing
[ "c3ac272d3218cc4d30939f8219be70bc4125c4cc" ]
[ "CAN24_AN/read_timeline.py" ]
[ "import os\nimport json\nimport numpy\nimport sys\nimport glob\n\ndef read_time_dur(data_list):\n t_min = numpy.inf\n t_max = -numpy.inf\n\n for item in data_list:\n if 'ts' in item.keys():\n if item['ts'] < t_min:\n t_min = item['ts']\n if 'dur' in item.keys():\n if item['dur'] + item['ts'] > t_max:\n t_max = item['dur'] + item['ts']\n\n return t_max - t_min\n\ndef main():\n dataroot = sys.argv[1]\n subfolder = sys.argv[2]\n\n cwd = os.getcwd()\n os.chdir(os.path.join(dataroot, subfolder))\n files = glob.glob('nn_*.json')\n files.sort(key=os.path.getmtime)\n files = [file for file in files if file.startswith('nn_') and file.endswith('.json')]\n os.chdir(cwd)\n\n print(files)\n\n nburns = 10\n file_no = len(files) - nburns\n times = numpy.zeros(file_no)\n\n for n in range(len(files)):\n\n if n < nburns:\n continue\n\n filename = files[n]\n with open(os.path.join(dataroot, subfolder, filename)) as file:\n data = json.load(file)\n data_list = data['traceEvents']\n \n times[n-nburns] = read_time_dur(data_list)\n numpy.save(os.path.join(dataroot, subfolder, 'timeline_value.npy'), times)\n\n print('min time', numpy.min(times))\n print('mean time', numpy.mean(times))\n print('median_time', numpy.median(times))\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.median", "numpy.zeros", "numpy.mean", "numpy.min" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
isl-mt/NMTGMinor
[ "573504d0ed2e40240e4186eb4af28275f0e3f422" ]
[ "onmt/EnsembleTranslator.py" ]
[ "import onmt\nimport onmt.modules\nimport torch.nn as nn\nimport torch\nimport math\nfrom torch.autograd import Variable\nfrom onmt.ModelConstructor import build_model\nimport torch.nn.functional as F\n\n\nmodel_list = ['transformer', 'stochastic_transformer']\n\nclass EnsembleTranslator(object):\n def __init__(self, opt):\n self.opt = opt\n self.tt = torch.cuda if opt.cuda else torch\n self.beam_accum = None\n self.beta = opt.beta\n self.alpha = opt.alpha\n self.start_with_bos = opt.start_with_bos\n \n self.models = list()\n self.model_types = list()\n \n # models are string with | as delimiter\n models = opt.model.split(\"|\")\n \n print(models)\n self.n_models = len(models)\n self._type = 'text'\n \n for i, model in enumerate(models):\n if opt.verbose:\n print('Loading model from %s' % model)\n checkpoint = torch.load(model,\n map_location=lambda storage, loc: storage)\n \n model_opt = checkpoint['opt']\n \n if i == 0:\n self.src_dict = checkpoint['dicts']['src']\n self.tgt_dict = checkpoint['dicts']['tgt']\n \n # Build model from the saved option\n model = build_model(model_opt, checkpoint['dicts'])\n \n model.load_state_dict(checkpoint['model'])\n \n if model_opt.model in model_list:\n if model.decoder.positional_encoder.len_max < self.opt.max_sent_length:\n print(\"Not enough len to decode. Renewing .. \") \n model.decoder.renew_buffer(self.opt.max_sent_length)\n \n if opt.cuda:\n model = model.cuda()\n else:\n model = model.cpu()\n \n model.eval()\n \n self.models.append(model)\n self.model_types.append(model_opt.model)\n \n self.cuda = opt.cuda\n self.ensemble_op = opt.ensemble_op\n \n if opt.verbose:\n print('Done')\n\n def initBeamAccum(self):\n self.beam_accum = {\n \"predicted_ids\": [],\n \"beam_parent_ids\": [],\n \"scores\": [],\n \"log_probs\": []}\n \n # Combine distributions from different models\n def _combineOutputs(self, outputs):\n \n if len(outputs) == 1:\n return outputs[0]\n \n if self.ensemble_op == \"logSum\":\n output = (outputs[0])\n \n # sum the log prob\n for i in range(1, len(outputs)):\n output += (outputs[i])\n \n output.div(len(outputs))\n \n #~ output = torch.log(output)\n output = F.log_softmax(output, dim=-1)\n elif self.ensemble_op == \"mean\":\n output = torch.exp(outputs[0])\n \n # sum the log prob\n for i in range(1, len(outputs)):\n output += torch.exp(outputs[i])\n \n output.div(len(outputs))\n \n #~ output = torch.log(output)\n output = torch.log(output)\n elif self.ensemble_op == 'gmean':\n output = torch.exp(outputs[0])\n \n # geometric mean of the probabilities\n for i in range(1, len(outputs)):\n output *= torch.exp(outputs[i])\n \n # have to normalize\n output.pow_(1.0 / float(len(outputs)))\n norm_ = torch.norm(output, p=1, dim=-1)\n output.div_(norm_.unsqueeze(-1))\n\n \n output = torch.log(output)\n else:\n raise ValueError('Emsemble operator needs to be \"mean\" or \"logSum\", the current value is %s' % self.ensemble_op)\n \n return output\n \n # Take the average of attention scores\n def _combineAttention(self, attns):\n \n attn = attns[0]\n \n for i in range(1, len(attns)):\n attn += attns[i]\n \n attn.div(len(attns))\n \n return attn\n\n def _getBatchSize(self, batch):\n if self._type == \"text\":\n return batch.size(1)\n else:\n return batch.size(0)\n \n def to_variable(self, data):\n \n for i, t in enumerate(data):\n if data[i] is not None:\n if self.cuda:\n data[i] = Variable(data[i].cuda())\n else:\n data[i] = Variable(data[i])\n else:\n data[i] = None\n return data\n\n def buildData(self, srcBatch, goldBatch):\n # This needs to be the same as preprocess.py.\n \n if self.start_with_bos:\n srcData = [self.src_dict.convertToIdx(b,\n onmt.Constants.UNK_WORD,\n onmt.Constants.BOS_WORD)\n for b in srcBatch]\n else:\n srcData = [self.src_dict.convertToIdx(b,\n onmt.Constants.UNK_WORD)\n for b in srcBatch]\n\n tgtData = None\n if goldBatch:\n tgtData = [self.tgt_dict.convertToIdx(b,\n onmt.Constants.UNK_WORD,\n onmt.Constants.BOS_WORD,\n onmt.Constants.EOS_WORD) for b in goldBatch]\n\n return onmt.Dataset(srcData, tgtData, 9999,\n [self.opt.gpu], volatile=True,\n data_type=self._type, max_seq_num =self.opt.batch_size)\n\n def buildTargetTokens(self, pred, src, attn):\n tokens = self.tgt_dict.convertToLabels(pred, onmt.Constants.EOS)\n tokens = tokens[:-1] # EOS\n \n return tokens\n\n def translateBatch(self, srcBatch, tgtBatch):\n \n torch.set_grad_enabled(False)\n # Batch size is in different location depending on data.\n\n beamSize = self.opt.beam_size\n batchSize = self._getBatchSize(srcBatch)\n \n vocab_size = self.tgt_dict.size()\n allHyp, allScores, allAttn, allLengths = [], [], [], []\n \n # srcBatch should have size len x batch\n # tgtBatch should have size len x batch\n \n contexts = dict()\n \n src = srcBatch.transpose(0, 1)\n \n # (1) run the encoders on the src\n for i in range(self.n_models):\n contexts[i], src_mask = self.models[i].encoder(src)\n \n \n goldScores = contexts[0].data.new(batchSize).zero_()\n goldWords = 0\n \n if tgtBatch is not None:\n # Use the first model to decode\n model_ = self.models[0]\n \n tgtBatchInput = tgtBatch[:-1]\n tgtBatchOutput = tgtBatch[1:]\n tgtBatchInput = tgtBatchInput.transpose(0,1)\n \n output, coverage = model_.decoder(tgtBatchInput, contexts[0], src)\n output = output.transpose(0, 1) # transpose to have time first, like RNN models\n \n \n # (2) if a target is specified, compute the 'goldScore'\n # (i.e. log likelihood) of the target under the model\n for dec_t, tgt_t in zip(output, tgtBatchOutput.data):\n gen_t = model_.generator(dec_t)\n tgt_t = tgt_t.unsqueeze(1)\n scores = gen_t.data.gather(1, tgt_t)\n scores.masked_fill_(tgt_t.eq(onmt.Constants.PAD), 0)\n goldScores += scores.squeeze(1)\n goldWords += tgt_t.ne(onmt.Constants.PAD).sum()\n \n \n # (3) Start decoding\n \n # time x batch * beam\n src = Variable(srcBatch.data.repeat(1, beamSize))\n \n # initialize the beam\n beam = [onmt.Beam(beamSize, self.opt.cuda) for k in range(batchSize)]\n \n batchIdx = list(range(batchSize))\n remainingSents = batchSize\n \n decoder_states = dict()\n \n decoder_hiddens = dict()\n \n for i in range(self.n_models):\n decoder_states[i] = self.models[i].create_decoder_state(src.clone(), contexts[i], beamSize)\n \n for i in range(self.opt.max_sent_length):\n # Prepare decoder input.\n \n # input size: 1 x ( batch * beam )\n input = torch.stack([b.getCurrentState() for b in beam\n if not b.done]).t().contiguous().view(1, -1)\n \n \"\"\" \n Inefficient decoding implementation\n We re-compute all states for every time step\n A better buffering algorithm will be implemented\n \"\"\"\n \n decoder_input = Variable(input)\n \n # require batch first for everything\n outs = dict()\n attns = dict()\n \n for i in range(self.n_models):\n #~ decoder_hidden, coverage, buffers[i] = self.models[i].decoder.step(decoder_input.transpose(0,1) , contexts[i].transpose(0, 1), src.transpose(0, 1), buffer=buffers[i])\n decoder_hidden, coverage = self.models[i].decoder.step(decoder_input.clone(), decoder_states[i])\n \n # take the last decoder state\n decoder_hidden = decoder_hidden.squeeze(1)\n attns[i] = coverage[:, -1, :].squeeze(1) # batch * beam x src_len\n \n # batch * beam x vocab_size \n outs[i] = self.models[i].generator(decoder_hidden)\n \n out = self._combineOutputs(outs)\n attn = self._combineAttention(attns)\n \n wordLk = out.view(beamSize, remainingSents, -1) \\\n .transpose(0, 1).contiguous()\n attn = attn.view(beamSize, remainingSents, -1) \\\n .transpose(0, 1).contiguous()\n \n active = []\n \n for b in range(batchSize):\n if beam[b].done:\n continue\n \n idx = batchIdx[b]\n if not beam[b].advance(wordLk.data[idx], attn.data[idx]):\n active += [b]\n \n for i in range(self.n_models):\n decoder_states[i]._update_beam(beam, b, remainingSents, idx)\n \n \n if not active:\n break\n \n # in this section, the sentences that are still active are\n # compacted so that the decoder is not run on completed sentences\n activeIdx = self.tt.LongTensor([batchIdx[k] for k in active])\n batchIdx = {beam: idx for idx, beam in enumerate(active)}\n \n \n for i in range(self.n_models):\n decoder_states[i]._prune_complete_beam(activeIdx, remainingSents)\n \n \n \n remainingSents = len(active)\n \n # (4) package everything up\n allHyp, allScores, allAttn = [], [], []\n n_best = self.opt.n_best\n allLengths = []\n\n for b in range(batchSize):\n scores, ks = beam[b].sortBest()\n\n allScores += [scores[:n_best]]\n hyps, attn, length = zip(*[beam[b].getHyp(k) for k in ks[:n_best]])\n allHyp += [hyps]\n allLengths += [length]\n valid_attn = srcBatch.data[:, b].ne(onmt.Constants.PAD) \\\n .nonzero().squeeze(1)\n attn = [a.index_select(1, valid_attn) for a in attn]\n allAttn += [attn]\n\n if self.beam_accum:\n self.beam_accum[\"beam_parent_ids\"].append(\n [t.tolist()\n for t in beam[b].prevKs])\n self.beam_accum[\"scores\"].append([\n [\"%4f\" % s for s in t.tolist()]\n for t in beam[b].allScores][1:])\n self.beam_accum[\"predicted_ids\"].append(\n [[self.tgt_dict.getLabel(id)\n for id in t.tolist()]\n for t in beam[b].nextYs][1:])\n \n \n torch.set_grad_enabled(True)\n\n return allHyp, allScores, allAttn, allLengths, goldScores, goldWords\n\n def translate(self, srcBatch, goldBatch):\n # (1) convert words to indexes\n dataset = self.buildData(srcBatch, goldBatch)\n batch = self.to_variable(dataset.next()[0])\n src, tgt = batch\n batchSize = self._getBatchSize(src)\n\n # (2) translate\n pred, predScore, attn, predLength, goldScore, goldWords = self.translateBatch(src, tgt)\n \n\n # (3) convert indexes to words\n predBatch = []\n for b in range(batchSize):\n predBatch.append(\n [self.buildTargetTokens(pred[b][n], srcBatch[b], attn[b][n])\n for n in range(self.opt.n_best)]\n )\n\n return predBatch, predScore, predLength, goldScore, goldWords\n\n\n" ]
[ [ "torch.norm", "torch.nn.functional.log_softmax", "torch.load", "torch.exp", "torch.set_grad_enabled", "torch.log", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
seann27/DogBreedDetector
[ "88b76cc6eea869a9d28e775843f5fbf9d9fbc9c1" ]
[ "predict.py" ]
[ "import torchvision.models as models\nimport torch.nn as nn\nimport torch\nimport torchvision.transforms as transforms\nimport sys\nimport os\nimport numpy as np\nimport json\nimport random\nimport matplotlib.pyplot as plt\nfrom glob import glob\nfrom PIL import Image\nfrom torchvision import datasets\n\n# # grab file from ajax call\n# import cgi, cgitb\n# cgitb.enable()\n# form = cgi.FieldStorage()\n# if form.has_key(\"file\"):\n# file = form[\"file\"].value\n\ntest_val_transforms = transforms.Compose([transforms.Resize(225),\n\t\t\t\t\t transforms.CenterCrop(224),\n\t\t\t\t\t transforms.ToTensor(),\n\t\t\t\t\t transforms.Normalize([0.485, 0.456, 0.406],\n\t\t\t\t\t\t\t\t\t\t [0.229, 0.224, 0.225])])\n\ndata_dir = os.path.normpath(\"../Files/dogImages\")\ntrain_dir = os.path.normpath(data_dir+'/train')\ntrain_data = datasets.ImageFolder(train_dir)\n\n# list of class names by index, i.e. a name can be accessed like class_names[0]\nclass_names = [item[4:].replace(\"_\", \" \") for item in train_data.classes]\n\n# use GPU if available\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n# grab file from commandline arguments\nimport argparse\nparser = argparse.ArgumentParser(description=\"File upload\")\nparser.add_argument('image', help='Filepath of image')\nparser.add_argument('--name', help='Name of image')\nargs = parser.parse_args()\nfile = os.path.normpath(args.image)\nif args.name:\n\tname = args.name\nelse:\n\tname = file\n\n# terminate program if image file cannot be found\nif os.path.exists(file) is False:\n\tprint(\"Could not find file \"+file)\n\texit()\n\ndef face_detector(img_path):\n img = cv2.imread(img_path)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray)\n return len(faces) > 0\n\nVGG16 = models.vgg16(pretrained=True)\nVGG16.to(device)\ndef VGG16_predict(img_path):\n '''\n Use pre-trained VGG-16 model to obtain index corresponding to\n predicted ImageNet class for image at specified path\n\n Args:\n img_path: path to an image\n\n Returns:\n Index corresponding to VGG-16 model's prediction\n '''\n img_path = os.path.normpath(img_path)\n ## TODO: Complete the function.\n ## Load and pre-process an image from the given img_path\n tr = transforms.Compose([transforms.Resize(225),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n np_tensor = process_image(img_path,tr)\n np_tensor.unsqueeze_(0)\n np_tensor = np_tensor.float()\n np_tensor = np_tensor.to(device)\n log_ps = VGG16.forward(np_tensor)\n ps = torch.exp(log_ps)\n top_p,top_class = ps.topk(1,dim=1)\n\n ## Return the *index* of the predicted class for that image\n\n return int(top_class) # predicted class index\n\ndef dog_detector(img_path):\n ## TODO: Complete the function.\n index = VGG16_predict(img_path)\n\n return index >= 151 and index <= 268\n\ndef generate_image_map(dog_files):\n\timage_map = {}\n\tfor file in dog_files:\n\t\tname = os.path.basename(os.path.dirname(file))\n\t\tname = name.split('.')[1]\n\t\tif name in image_map.keys():\n\t\t\timage_map[name].append(file)\n\t\telse:\n\t\t\timage_map[name] = [file]\n\treturn image_map\n\n# returns image tensor\ndef process_image(image_path,transformation):\n\timage = Image.open(image_path)\n\timg = transformation(image)\n\treturn img\n\ndef get_breed_image(prediction,image_map):\n\tpred = prediction.replace(\" \",\"_\")\n\timages = image_map[pred]\n\timg = random.choice(images)\n\treturn img\n\ndef predict_breed_transfer(img_path,model,transforms):\n\t# load the image and return the predicted breed\n\tnp_tensor = process_image(img_path,transforms)\n\tnp_tensor.unsqueeze_(0)\n\tnp_tensor = np_tensor.float()\n\tnp_tensor = np_tensor.to(device)\n\toutput = model(np_tensor)\n\tps = torch.exp(output)\n\ttop_p,top_class = ps.topk(4,dim=1)\n\ttop_p = top_p.cpu().detach().numpy().reshape(-1)\n\ttop_class = top_class.cpu().numpy().reshape(-1)\n\treturn top_p,top_class\n\ndef normalize_predictions(top_p,top_class):\n\tnorm_preds = {}\n\tfor p,c in zip(top_p,top_class):\n\t\ttotal = top_p.sum()\n\t\tnorm_preds[c] = p/total\n\treturn norm_preds\n\ndef filter_matches(matches):\n\tfiltered = {'Other':0}\n\tfor key,val in matches.items():\n\t\tif val*100 >= 5:\n\t\t\tfiltered[class_names[key]] = val\n\t\telse:\n\t\t\tfiltered['Other'] += val\n\treturn filtered\n\ndef plot_figures(file, best_match, comparison, matches, text):\n my_imgs = [Image.open(file),Image.open(comparison)]\n my_labels = ['Your image','{}\\n{:.2f}% match'.format(best_match,matches[best_match]*100)]\n\n for key,val in matches.items():\n if key != 'Other' and key != best_match:\n my_imgs.append(Image.open(get_breed_image(key,img_map)))\n my_labels.append('{}\\n{:.2f}% match'.format(key,val*100))\n\n f = plt.figure(figsize=(8,8))\n f.suptitle(text, fontsize=16)\n ax1 = f.add_subplot(1,2,1)\n ax1.imshow(my_imgs[0])\n ax1.set_title(my_labels[0])\n ax1.set_xticks([])\n ax1.set_yticks([])\n ax2 = f.add_subplot(1,2,2)\n ax2.imshow(my_imgs[1])\n ax2.set_title(my_labels[1])\n ax2.set_xticks([])\n ax2.set_yticks([])\n f.subplots_adjust(top=1)\n\n if len(my_imgs) > 2:\n f2 = plt.figure(figsize=(6,6))\n f2.suptitle('Other resemblances', fontsize=12)\n for idx,i in enumerate(my_imgs[2:]):\n ax = f2.add_subplot(1,(len(my_imgs)-2),idx+1)\n ax.imshow(i)\n ax.set_title(my_labels[idx+2])\n ax.set_xticks([])\n ax.set_yticks([])\n f2.subplots_adjust(top=1)\n\n plt.show()\n\ndef predict(file,model,img_map,transforms):\n\tmatches = {}\n\tfor i in range(0,99):\n\t\ttop_p,top_class = predict_breed_transfer(file,model,transforms)\n\t\tpreds = normalize_predictions(top_p,top_class)\n\t\tfor key,val in preds.items():\n\t\t\tif key in matches.keys():\n\t\t\t\tmatches[key] += val\n\t\t\telse:\n\t\t\t\tmatches[key] = val\n\tbest = 0\n\tbest_match = ''\n\tfor key,val in matches.items():\n\t\tmatches[key] = val/100\n\t\tif val > best:\n\t\t\tbest = val\n\t\t\tbest_match = class_names[key]\n\tcomparison = get_breed_image(best_match,img_map)\n\treturn best_match,comparison,filter_matches(matches)\n\n# build and initialize model\nmodel = models.vgg16(pretrained=True)\n\nfor param in model.parameters():\n\tparam.requires_grad = False\n\nclassifier = nn.Sequential(nn.Linear(25088, 3072),\n\t\t\t\t\t\t\t nn.ReLU(),\n\t\t\t\t\t\t\t nn.Dropout(p=0.2),\n\t\t\t\t\t\t\t nn.Linear(3072, 1024),\n\t\t\t\t\t\t\t nn.ReLU(),\n\t\t\t\t\t\t\t nn.Dropout(p=0.2),\n\t\t\t\t\t\t\t nn.Linear(1024, 306),\n\t\t\t\t\t\t\t nn.ReLU(),\n\t\t\t\t\t\t\t nn.Dropout(p=0.2),\n\t\t\t\t\t\t\t nn.Linear(306, 133),\n\t\t\t\t\t\t\t nn.LogSoftmax(dim=1))\n\nmodel.classifier = classifier\nmodel.to(device)\n\n# load checkpoint file from training\ntry:\n\tcheckpoint = os.path.normpath('../Files/model_trained.pt')\nexcept:\n\tprint(\"Model checkpoint not found. Please train model or move checkpoint file to ../Files/\")\n\texit()\n\n# send checkpoint file data into model\nmodel.load_state_dict(torch.load(checkpoint,map_location=device))\n\n# load filenames for human and dog images\nhuman_files = np.array(glob(os.path.normpath(\"../Files/lfw/*/*\")))\ndog_files = np.array(glob(os.path.normpath(\"../Files/dogImages/*/*/*\")))\n\nimg_map = generate_image_map(dog_files)\n\ntext = \"Error, couldn't detect a dog or person\"\nif dog_detector(file):\n text = \"Woof Woof Hello Doggy!\"\nelif face_detector(file) > 0:\n text = \"Hello human! If you were a dog...\"\nelse:\n print(text)\n exit()\n\nbest_match,comparison,matches = predict(file,model,img_map,test_val_transforms)\nplot_figures(file,best_match,comparison,matches,text)\n" ]
[ [ "torch.nn.Dropout", "torch.nn.LogSoftmax", "torch.load", "torch.exp", "torch.nn.Linear", "torch.cuda.is_available", "torch.nn.ReLU", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rosulucian/handrolled-ml
[ "dc4227039da20a8daaeabc7ec438398b2ff12894" ]
[ "utils/activations.py" ]
[ "import numpy as np\n\n\ndef sigmoid(Z):\n return 1 / (1 + np.exp(-Z))\n\n\ndef relu(Z):\n A = np.maximum(0, Z)\n\n assert(A.shape == Z.shape)\n\n return A\n\n\ndef tanh(Z):\n return np.tanh(Z)\n\n\ndef sigmoid_deriv(dA, Z):\n assert (dA.shape == Z.shape)\n\n s = 1/(1+np.exp(-Z))\n deriv = s * (1-s)\n\n dZ = dA * deriv\n\n assert (dZ.shape == Z.shape)\n\n return dZ\n\n\ndef relu_deriv(dA, Z):\n assert (dA.shape == Z.shape)\n\n dZ = np.array(dA, copy=True)\n dZ[Z <= 0] = 0\n\n assert (dZ.shape == Z.shape)\n\n return dZ\n\n\ndef tanh_deriv(dA, Z):\n assert (dA.shape == Z.shape)\n\n A = tanh(Z)\n dZ = dA * (1 - np.power(A, 2))\n\n assert (dZ.shape == Z.shape)\n\n return dZ\n\n\nforward = {\n 'sigmoid': sigmoid,\n 'relu': relu,\n 'tanh': tanh\n}\n\nbackward = {\n 'sigmoid': sigmoid_deriv,\n 'relu': relu_deriv,\n 'tanh': tanh_deriv\n}\n" ]
[ [ "numpy.maximum", "numpy.power", "numpy.tanh", "numpy.array", "numpy.exp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
izhx/hsinkit-learn
[ "94252bed07c5b1bd97985ddcd5ec3e36f115f0cb" ]
[ "hklearn/neighbors.py" ]
[ "import numpy as np\nfrom scipy import stats\n\nfrom .base import BaseEstimator, ClassifierMixin\n\n\nclass KNeighborsClassifier(BaseEstimator, ClassifierMixin):\n def __init__(self, n_neighbors=5, weights=None, metric='L2'):\n super(KNeighborsClassifier, self).__init__()\n self.n_neighbors = n_neighbors\n self.metric = metric\n self.weights = weights\n\n def predict(self, X):\n \"\"\"\n Predict the class labels for the provided data\n \"\"\"\n nei_dis, nei_idx = self.kneighbors(X)\n n_samples = X.shape[0]\n nei_y = self._y[nei_idx]\n y_pred = np.array(stats.mode(nei_y, axis=1)[0]).reshape((n_samples))\n return y_pred\n\n def kneighbors(self, X=None, n_neighbors=None):\n \"\"\"\n 得到K近邻索引和距离\n \"\"\"\n if n_neighbors is None:\n n_neighbors = self.n_neighbors\n elif n_neighbors <= 0:\n raise ValueError(\n \"Expected n_neighbors > 0. Got %d\" %\n n_neighbors\n )\n else:\n if not np.issubdtype(type(n_neighbors), np.integer):\n raise TypeError(\n \"n_neighbors does not take %s value, \"\n \"enter integer value\" %\n type(n_neighbors))\n\n if X is None:\n X = self._fit_X\n # Include an extra neighbor to account for the sample itself being\n # returned, which is removed later\n n_neighbors += 1\n\n train_size = self._fit_X.shape[0]\n if n_neighbors > train_size:\n raise ValueError(\n \"Expected n_neighbors <= n_samples, \"\n \" but n_samples = %d, n_neighbors = %d\" %\n (train_size, n_neighbors)\n )\n n_samples, _ = X.shape\n\n # numpy暴力计算过程\n X = X[:, np.newaxis, :] # 将测试X扩展为(n_samples, 1, n_feats),便于广播\n diff = X - self._fit_X # 利用广播操作,得到形状为(n_samples, n_train, n_feats)\n if self.metric == 'L1': # 计算距离测度\n dis = np.sum(np.abs(diff), axis=-1)\n else:\n dis = np.sum(diff * diff, axis=-1)\n # 得到前k个邻居的索引\n nei_idx = dis.argpartition(n_neighbors)[:, :n_neighbors]\n # 生成辅助索引,构建符合numpy要求的花式索引,便于得到与前k个邻居的距离\n test_idx = np.arange(n_samples).repeat(n_neighbors).reshape((n_samples, n_neighbors))\n # 通过花式索引获得每个测试样本与前k个邻居的距离矩阵\n nei_dis = dis[test_idx, nei_idx]\n\n return nei_dis, nei_idx\n\n def fit(self, X, y): # todo weight\n self.classes_ = np.array(list(set(list(y))), dtype=y.dtype)\n self._fit_X = X\n self._y = y\n return\n" ]
[ [ "numpy.abs", "numpy.arange", "numpy.sum", "scipy.stats.mode" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
Jackson-Kang/VQVC-Pytorch
[ "d2267b5c52253b6ae11a5767963a65320ae335c2" ]
[ "utils/audio/audio_preprocessing.py" ]
[ "\"\"\"\n\tfrom NVIDIA's preprocessing\n\n\treference)\n\t\thttps://github.com/NVIDIA/tacotron2\n\"\"\"\n\nimport torch\nimport numpy as np\nfrom scipy.signal import get_window\nimport librosa.util as librosa_util\nfrom config import Arguments as args\n\ndef window_sumsquare(window, n_frames, hop_length=args.hop_length, win_length=args.win_length,\n n_fft=args.filter_length, dtype=np.float32, norm=None):\n \"\"\"\n # from librosa 0.6\n Compute the sum-square envelope of a window function at a given hop length.\n This is used to estimate modulation effects induced by windowing\n observations in short-time fourier transforms.\n Parameters\n ----------\n window : string, tuple, number, callable, or list-like\n Window specification, as in `get_window`\n n_frames : int > 0\n The number of analysis frames\n hop_length : int > 0\n The number of samples to advance between frames\n win_length : [optional]\n The length of the window function. By default, this matches `n_fft`.\n n_fft : int > 0\n The length of each analysis frame.\n dtype : np.dtype\n The data type of the output\n Returns\n -------\n wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`\n The sum-squared envelope of the window function\n \"\"\"\n if win_length is None:\n win_length = n_fft\n\n n = n_fft + hop_length * (n_frames - 1)\n x = np.zeros(n, dtype=dtype)\n\n # Compute the squared window at the desired length\n win_sq = get_window(window, win_length, fftbins=True)\n win_sq = librosa_util.normalize(win_sq, norm=norm)**2\n win_sq = librosa_util.pad_center(win_sq, n_fft)\n\n # Fill the envelope\n for i in range(n_frames):\n sample = i * hop_length\n x[sample:min(n, sample + n_fft)\n ] += win_sq[:max(0, min(n_fft, n - sample))]\n return x\n\n\ndef griffin_lim(magnitudes, stft_fn, n_iters=30):\n \"\"\"\n PARAMS\n ------\n magnitudes: spectrogram magnitudes\n stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods\n \"\"\"\n\n angles = np.angle(np.exp(2j * np.pi * np.random.rand(*magnitudes.size())))\n angles = angles.astype(np.float32)\n angles = torch.autograd.Variable(torch.from_numpy(angles))\n signal = stft_fn.inverse(magnitudes, angles).squeeze(1)\n\n for i in range(n_iters):\n _, angles = stft_fn.transform(signal)\n signal = stft_fn.inverse(magnitudes, angles).squeeze(1)\n return signal\n\n\ndef dynamic_range_compression(x, C=1, clip_val=1e-5):\n \"\"\"\n PARAMS\n ------\n C: compression factor\n \"\"\"\n return torch.log(torch.clamp(x, min=clip_val) * C)\n\n\ndef dynamic_range_decompression(x, C=1):\n \"\"\"\n PARAMS\n ------\n C: compression factor used to compress\n \"\"\"\n return torch.exp(x) / C\n" ]
[ [ "scipy.signal.get_window", "torch.from_numpy", "torch.exp", "torch.clamp", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
MarcosRBL/ScholarDropout
[ "f35f8c8715bc6eab79e49052098deab549d34144" ]
[ "PredAlunos.py" ]
[ "import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport sklearn.metrics as m\r\nimport streamlit as st\r\n\r\nimport inflection\r\nimport pylab\r\nimport random\r\nimport warnings\r\nimport os\r\nimport io\r\n\r\nfrom IPython.display import Image\r\nfrom sklearn.preprocessing import RobustScaler, MinMaxScaler, LabelEncoder\r\nfrom sklearn.model_selection import train_test_split, StratifiedKFold, GridSearchCV\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.model_selection import cross_validate\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.dummy import DummyClassifier\r\nfrom sklearn.metrics import accuracy_score, classification_report\r\n\r\nwarnings.filterwarnings('ignore')\r\n\r\n#Streamlit page\r\nst.set_page_config(page_title='Propensão de Evasão Escolar')\r\n\r\nmultiple_files = st.file_uploader(\r\n \"Insira o arquivo CSV abaixo\",\r\n accept_multiple_files=True\r\n)\r\nfor file in multiple_files:\r\n file_container = st.beta_expander(\r\n f\"Nome do Arquivo: {file.name} ({file.size})\"\r\n )\r\n data = io.BytesIO(file.getbuffer())\r\n\r\n st.text(\"\")\r\n\r\n if st.button('Estudar dados'):\r\n #Lugar onde todas as funções serão feitas\r\n def ml_error(model_name, y, yhat):\r\n print(model_name)\r\n print(classification_report(y_test, y_pred))\r\n\r\n #importando o dataset\r\n df_raw = pd.read_csv(data)\r\n\r\n #Em todas as novas seções vamos fazer uma cópia para caso alguma coisa não saia como o esperado podemos rodar apenas a seção \r\n #novamente não necessitando rodar todo o projeto.\r\n df1 = df_raw.copy()\r\n\r\n cols_old = ['school', 'sex', 'age', 'address', 'famsize', 'Pstatus', 'Medu', 'Fedu',\r\n 'Mjob', 'Fjob', 'reason', 'guardian', 'traveltime', 'studytime',\r\n 'failures', 'schoolsup', 'famsup', 'paid', 'activities', 'nursery',\r\n 'higher', 'internet', 'romantic', 'famrel', 'freetime', 'goout', 'Dalc',\r\n 'Walc', 'health', 'absences', 'passed']\r\n\r\n snackecase = lambda x: inflection.underscore(x)\r\n cols_new = list(map( snackecase, cols_old ) )\r\n\r\n df1.columns = cols_new\r\n\r\n num_attributes = df1.select_dtypes( include=['int64', 'float64'] )\r\n cat_attributes = df1.select_dtypes( exclude=['int64', 'float64'] )\r\n\r\n mms = MinMaxScaler()\r\n df1['age'] = mms.fit_transform( df1[['age']].values )\r\n df1['medu'] = mms.fit_transform( df1[['medu']].values )\r\n df1['fedu'] = mms.fit_transform( df1[['fedu']].values )\r\n df1['traveltime'] = mms.fit_transform( df1[['traveltime']].values )\r\n df1['studytime'] = mms.fit_transform( df1[['studytime']].values )\r\n df1['failures'] = mms.fit_transform( df1[['failures']].values )\r\n df1['famrel'] = mms.fit_transform( df1[['famrel']].values )\r\n df1['freetime'] = mms.fit_transform( df1[['freetime']].values )\r\n df1['goout'] = mms.fit_transform( df1[['goout']].values )\r\n df1['dalc'] = mms.fit_transform( df1[['dalc']].values )\r\n df1['walc'] = mms.fit_transform( df1[['walc']].values )\r\n df1['health'] = mms.fit_transform( df1[['health']].values )\r\n df1['absences'] = mms.fit_transform( df1[['absences']].values )\r\n\r\n le = LabelEncoder()\r\n df1['school'] = le.fit_transform( df1['school'] )\r\n df1['sex'] = le.fit_transform( df1['sex'] )\r\n df1['address'] = le.fit_transform( df1['address'] )\r\n df1['famsize'] = le.fit_transform( df1['famsize'] )\r\n df1['mjob'] = le.fit_transform( df1['mjob'] )\r\n df1['fjob'] = le.fit_transform( df1['fjob'] )\r\n df1['reason'] = le.fit_transform( df1['reason'] )\r\n df1['guardian'] = le.fit_transform( df1['guardian'] )\r\n df1['schoolsup'] = le.fit_transform( df1['schoolsup'] )\r\n df1['famsup'] = le.fit_transform( df1['famsup'] )\r\n df1['paid'] = le.fit_transform( df1['paid'] )\r\n df1['activities'] = le.fit_transform( df1['activities'] )\r\n df1['nursery'] = le.fit_transform( df1['nursery'] )\r\n df1['higher'] = le.fit_transform( df1['higher'] )\r\n df1['internet'] = le.fit_transform( df1['internet'] )\r\n df1['romantic'] = le.fit_transform( df1['romantic'])\r\n df1['passed'] = le.fit_transform( df1['passed'])\r\n df1['pstatus'] = le.fit_transform( df1['pstatus'])\r\n\r\n X = df1.drop('passed', axis=1)\r\n y = df1['passed']\r\n\r\n X_train, X_test, y_train, y_test = train_test_split( X, y, test_size= 0.20 )\r\n\r\n rfc = RandomForestClassifier(random_state = 42)\r\n rfc.fit(X_train, y_train)\r\n\r\n y_pred = rfc.predict(X_test)\r\n\r\n from sklearn.model_selection import cross_val_score\r\n\r\n scores_dt = cross_val_score(rfc, X, y,\r\n scoring='accuracy', cv=5)\r\n\r\n accScore = 'Acurácia',accuracy_score(y_test, y_pred)* 100\r\n classRep = classification_report(y_test, y_pred)\r\n\r\n st.text(accScore)\r\n st.text(classRep)" ]
[ [ "pandas.read_csv", "sklearn.model_selection.cross_val_score", "sklearn.ensemble.RandomForestClassifier", "sklearn.model_selection.train_test_split", "sklearn.preprocessing.LabelEncoder", "sklearn.metrics.classification_report", "sklearn.preprocessing.MinMaxScaler", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
NagarajSMurthy/Invisibility-cloak
[ "16d02e9ee1d456d5606edf973e85c1dbdb6e5377" ]
[ "Invisibility_cloak.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 17 14:12:32 2020\r\n\r\n@author: nagar\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport cv2\r\nimport time\r\nimport math\r\n\r\ncam = cv2.VideoCapture(0)\r\n\r\ntime.sleep(1)\r\nnum_frames = 0\r\nbg = None\r\naWeight = 0.8\r\n\r\ndef init_bgnd():\r\n global bg\r\n for i in range(30): \r\n return_val, bg = cam.read() \r\n if return_val == False : \r\n continue \r\n \r\n bg = np.flip(bg, axis = 1) # .astype('float') # flipping of the frame \r\n return bg\r\n\r\n\r\nbg = init_bgnd()\r\n\r\nwhile True:\r\n ret,frame = cam.read()\r\n if ret is False:\r\n pass\r\n \r\n frame = cv2.flip(frame, 1)\r\n\r\n # clone the frame\r\n clone = frame.copy() # .astype('float')\r\n \r\n # get the height and width of the frame\r\n (height, width) = frame.shape[:2]\r\n\r\n # convert the frame to grayscale and blur it\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n gray = cv2.GaussianBlur(gray, (7, 7), 0)\r\n \r\n hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)\r\n \r\n lower_red = np.array([0, 174, 223]) \r\n upper_red = np.array([100, 255, 255]) \r\n mask1 = cv2.inRange(hsv, lower_red, upper_red) \r\n # setting the lower and upper range for mask2 \r\n #cv2.imshow('mask 1',mask1)\r\n \r\n lower_red = np.array([155, 40, 40]) \r\n upper_red = np.array([180, 255, 255]) \r\n mask2 = cv2.inRange(hsv, lower_red, upper_red) \r\n \r\n # to get the background, keep looking till a threshold is reached\r\n # so that our running average model gets calibrated\r\n \r\n mask1 = mask1 + mask2 \r\n \r\n # Refining the mask corresponding to the detected red color \r\n mask1 = cv2.morphologyEx(mask1, cv2.MORPH_OPEN, np.ones((3, 3), \r\n np.uint8), iterations = 2) \r\n mask1 = cv2.dilate(mask1, np.ones((3, 3), np.uint8), iterations = 1) \r\n #cv2.imshow('Mask 1 after morph',mask1)\r\n mask2 = cv2.bitwise_not(mask1) \r\n #cv2.imshow('mask 2', mask2)\r\n #print(mask2)\r\n # Generating the final output \r\n res1 = cv2.bitwise_and(bg, bg, mask = mask1) \r\n res2 = cv2.bitwise_and(clone, clone, mask = mask2) \r\n final_output = cv2.addWeighted(res1, 1, res2, 1, 0) \r\n cv2.imshow(\"Invisible\", final_output) \r\n \r\n #dynamic_cam = static_background()\r\n \r\n #static_cam(frame,gray,good_new,good_old)\r\n \r\n cv2.imshow('my_frame',frame)\r\n \r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n cam.release()\r\n cv2.destroyAllWindows()\r\n " ]
[ [ "numpy.array", "numpy.flip", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Azeirah/multiplot
[ "5a7b10f0ef5ba480fadfc43fc8e7d9717a6ebac5" ]
[ "multiplot.py" ]
[ "import matplotlib.pyplot as plt\n\n\ndef multiplot(plots, *args, **kwargs):\n \"\"\"A convenient way to lay out matplotlib figures\n The first argument is a multiline string which diagrammatically describes\n the desired layout of your plot\n\n @return (figure, grid)\n\n e.g., a simple single plot looks like\n grid1 = '''\n A\n '''\n\n If you want complicated grid layouts, you can do that as well\n\n grid2 = '''\n AA\n AA\n BB\n CC\n CC\n '''\n\n grid3 = '''\n AAABB\n CCCBB\n '''\n\n Make sure that your subfigures have a rectangular shape\n e.g.\n '''\n AA\n BB\n A'''\n Will not work at all.\n\n multiplot takes care of creating the layout using matplotlib's gridspec\n You can access each individual subplot by dictionary key access.\n The figure is exposed as well\n\n fig, mpt = multiplot(grid3)\n mpt[\"A\"].plot(...)\n mpt[\"B\"].set_title(...)\n\n Any arguments you want to pass to plt.figure, you can pass to multiplot right after the grid\n eg\n fig, mpt = multiplot(grid3, figsize=(10, 10), ...)\n\n Easily layout complicated grids\n Easily change its layout\n Complete access to axes and figure\n \"\"\"\n chars = {}\n lines = plots.split(\"\\n\")\n [line for line in lines if len(line) > 0]\n\n for y, line in enumerate(lines):\n for x, char in enumerate(line):\n if char in chars:\n chars[char][1] = (x, y)\n else:\n chars[char] = [(x, y), (x, y)]\n\n plots = {}\n columns = len(lines[0])\n rows = len(lines)\n gridSize = (rows, columns)\n grid = {}\n\n fig = plt.figure(*args, **kwargs)\n for char in chars.keys():\n points = chars[char]\n leftTop = points[0]\n bottomRight = points[1]\n\n width = 1 + bottomRight[0] - leftTop[0]\n height = 1 + bottomRight[1] - leftTop[1]\n\n grid[char] = plt.subplot2grid(\n gridSize, [leftTop[1], leftTop[0]], colspan=width, rowspan=height)\n\n return fig, grid\n\n\n# example\n\n# mtp = multiplot(\n# \"\"\"\n# AABBB\n# AABBB\n# AACCC\"\"\")\n\n# mtp[\"A\"].plot(numpy.arange(0, 100, 1))\n# mtp[\"A\"].set_title(\"\")\n# mtp[\"B\"].plot(numpy.arange(0, 50, 1))\n# mtp[\"B\"].set_title(\"\")\n# mtp[\"C\"].plot(numpy.arange(0, 25, 0.5))\n# mtp[\"C\"].set_title(\"\")\n# plt.show()\n" ]
[ [ "matplotlib.pyplot.subplot2grid", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jakirkham/dask-scheduler-performance
[ "74d4eb7080b351f912a284a9eadb80726858f0e4" ]
[ "nightly-benchmark/nightly-run.py" ]
[ "import os\nimport dask\nimport distributed\nfrom datetime import datetime\nimport numpy as np\nimport time\nfrom dask.distributed import Client, wait, performance_report\nfrom dask.dataframe.shuffle import shuffle\n\nimport xarray as xr\nimport dask.array as dsa\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nsns.set(font_scale=1.5, style=\"whitegrid\")\n\n\ntoday = datetime.now().strftime(\"%Y%m%d\")\n\n\ndef main():\n client = Client(n_workers=10, threads_per_worker=1)\n print(client)\n\n df = dask.datasets.timeseries(\n start=\"2000-01-01\",\n end=\"2000-01-31\",\n # end=\"2000-12-31\",\n partition_freq=\"1h\",\n freq=\"60s\",\n )\n df = df.persist()\n wait(df)\n iterations = 10\n\n with performance_report(filename=f\"{today}-simple-scheduler.html\"):\n simple = []\n # print('start simple: ', flush=True)\n for i in range(iterations):\n start = time.time()\n z = df.x + 1 + 2 - df.y\n z.sum().compute()\n stop = time.time()\n simple.append(stop - start)\n simple = np.array(simple)\n\n df2 = None\n with performance_report(filename=f\"{today}-shuffle-scheduler.html\"):\n shuffle_t = []\n # print('start shuffle: ', flush=True)\n for i in range(iterations):\n client.cancel(df2)\n start = time.time()\n # shuffle(df, \"id\", shuffle=\"tasks\")\n df2 = df.set_index(\"id\").persist()\n wait(df2)\n stop = time.time()\n shuffle_t.append(stop - start)\n shuffle_t = np.array(shuffle_t)\n\n with performance_report(filename=f\"{today}-rand-access-scheduler.html\"):\n rand_access = []\n for i in range(iterations):\n start = time.time()\n df2.head()\n stop = time.time()\n rand_access.append(stop - start)\n rand_access = np.array(rand_access)\n data = dsa.random.random((10000, 1000000), chunks=(1, 1000000))\n da = xr.DataArray(data, dims=['time', 'x'],\n coords={'day': ('time', np.arange(10000) % 100)})\n clim = da.groupby('day').mean(dim='time')\n anom = da.groupby('day') - clim\n anom_mean = anom.mean(dim='time')\n with performance_report(filename=f\"{today}-anom-mean-scheduler.html\"):\n anom_mean_t = []\n for i in range(iterations):\n start = time.time()\n anom_mean.compute()\n stop = time.time()\n anom_mean_t.append(stop-start)\n\n anom_mean_t = np.array(anom_mean_t)\n\n return dict(simple=simple, shuffle=shuffle_t, rand_access=rand_access,\n anom_mean=anom_mean_t)\n\nif __name__ == \"__main__\":\n data = main()\n print(f\"Distributed Version: {distributed.__version__}\")\n\n today = datetime.now().strftime(\"%Y%m%d\")\n\n bench_data_name = \"benchmark-historic-runs.csv\"\n bench_image = f\"{today}-benchmark-history.png\"\n if os.path.exists(\"/etc/dgx-release\"):\n bench_data_name = \"dgx-\" + bench_data_name\n bench_image = \"dgx-\" + bench_image\n\n\n for idx, (k, v) in enumerate(data.items()):\n print(k)\n mean = np.format_float_scientific(v.mean(), precision=3)\n std = np.format_float_scientific(v.std(), precision=3)\n with open(bench_data_name, \"a+\") as f:\n f.write(f\"{today},{k},{v.mean()},{v.std()}\\n\")\n\n print(f\"\\t {mean} +/- {std}\")\n\n print(\"\\n\\n## Raw Values\")\n for k, v in data.items():\n print(k)\n print(f\"\\t {v}\")\n\n fig, ax = plt.subplots(1, 4, figsize=(10, 10))\n df = pd.read_csv(\n bench_data_name,\n parse_dates=[\"date\"],\n names=[\"date\", \"operation\", \"avg\", \"std\"],\n )\n ax[0].set_ylabel(\"Time (s)\")\n for idx, (key, group) in enumerate(df.groupby(\"operation\")):\n ax[idx].set_title(f\"{key}\")\n ax[idx].errorbar(group.date, group.avg, yerr=group[\"std\"].values)\n lim = group.avg.max() + group[\"std\"].max()\n ax[idx].set_ylim(0, lim)\n plt.setp(ax[idx].get_xticklabels(), rotation=45)\n\n plt.savefig(bench_image)\n" ]
[ [ "pandas.read_csv", "numpy.arange", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
quid256/OpenFermion
[ "562a03abf501885ee5a792ec3d7d10d91581b938" ]
[ "src/openfermion/transforms/_jordan_wigner_test.py" ]
[ "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests _jordan_wigner.py.\"\"\"\nfrom __future__ import absolute_import\nimport os\nimport unittest\n\nimport numpy\n\nfrom openfermion.config import DATA_DIRECTORY\nfrom openfermion.hamiltonians import MolecularData, fermi_hubbard\nfrom openfermion.ops import (FermionOperator,\n InteractionOperator,\n normal_ordered,\n QubitOperator)\nfrom openfermion.transforms import (get_diagonal_coulomb_hamiltonian,\n get_fermion_operator,\n get_interaction_operator,\n reverse_jordan_wigner)\nfrom openfermion.utils import hermitian_conjugated, number_operator\nfrom openfermion.utils._testing_utils import (\n random_interaction_operator,\n random_quadratic_hamiltonian)\n\nfrom openfermion.transforms._jordan_wigner import (\n jordan_wigner, jordan_wigner_one_body, jordan_wigner_two_body,\n jordan_wigner_interaction_op)\n\n\nclass JordanWignerTransformTest(unittest.TestCase):\n def setUp(self):\n self.n_qubits = 5\n\n def test_bad_input(self):\n with self.assertRaises(TypeError):\n jordan_wigner(3)\n\n def test_transm_raise3(self):\n raising = jordan_wigner(FermionOperator(((3, 1),)))\n self.assertEqual(len(raising.terms), 2)\n\n correct_operators_x = ((0, 'Z'), (1, 'Z'), (2, 'Z'), (3, 'X'))\n correct_operators_y = ((0, 'Z'), (1, 'Z'), (2, 'Z'), (3, 'Y'))\n qtermx = QubitOperator(correct_operators_x, 0.5)\n qtermy = QubitOperator(correct_operators_y, -0.5j)\n\n self.assertEqual(raising.terms[correct_operators_x], 0.5)\n self.assertEqual(raising.terms[correct_operators_y], -0.5j)\n self.assertTrue(raising == qtermx + qtermy)\n\n def test_transm_raise1(self):\n raising = jordan_wigner(FermionOperator(((1, 1),)))\n\n correct_operators_x = ((0, 'Z'), (1, 'X'))\n correct_operators_y = ((0, 'Z'), (1, 'Y'))\n qtermx = QubitOperator(correct_operators_x, 0.5)\n qtermy = QubitOperator(correct_operators_y, -0.5j)\n\n self.assertEqual(raising.terms[correct_operators_x], 0.5)\n self.assertEqual(raising.terms[correct_operators_y], -0.5j)\n self.assertTrue(raising == qtermx + qtermy)\n\n def test_transm_lower3(self):\n lowering = jordan_wigner(FermionOperator(((3, 0),)))\n\n correct_operators_x = ((0, 'Z'), (1, 'Z'), (2, 'Z'), (3, 'X'))\n correct_operators_y = ((0, 'Z'), (1, 'Z'), (2, 'Z'), (3, 'Y'))\n qtermx = QubitOperator(correct_operators_x, 0.5)\n qtermy = QubitOperator(correct_operators_y, 0.5j)\n\n self.assertEqual(lowering.terms[correct_operators_x], 0.5)\n self.assertEqual(lowering.terms[correct_operators_y], 0.5j)\n self.assertTrue(lowering == qtermx + qtermy)\n\n def test_transm_lower2(self):\n lowering = jordan_wigner(FermionOperator(((2, 0),)))\n\n correct_operators_x = ((0, 'Z'), (1, 'Z'), (2, 'X'))\n correct_operators_y = ((0, 'Z'), (1, 'Z'), (2, 'Y'))\n qtermx = QubitOperator(correct_operators_x, 0.5)\n qtermy = QubitOperator(correct_operators_y, 0.5j)\n\n self.assertEqual(lowering.terms[correct_operators_x], 0.5)\n self.assertEqual(lowering.terms[correct_operators_y], 0.5j)\n self.assertTrue(lowering == qtermx + qtermy)\n\n def test_transm_lower1(self):\n lowering = jordan_wigner(FermionOperator(((1, 0),)))\n\n correct_operators_x = ((0, 'Z'), (1, 'X'))\n correct_operators_y = ((0, 'Z'), (1, 'Y'))\n qtermx = QubitOperator(correct_operators_x, 0.5)\n qtermy = QubitOperator(correct_operators_y, 0.5j)\n\n self.assertEqual(lowering.terms[correct_operators_x], 0.5)\n self.assertEqual(lowering.terms[correct_operators_y], 0.5j)\n self.assertTrue(lowering == qtermx + qtermy)\n\n def test_transm_lower0(self):\n lowering = jordan_wigner(FermionOperator(((0, 0),)))\n\n correct_operators_x = ((0, 'X'),)\n correct_operators_y = ((0, 'Y'),)\n qtermx = QubitOperator(correct_operators_x, 0.5)\n qtermy = QubitOperator(correct_operators_y, 0.5j)\n\n self.assertEqual(lowering.terms[correct_operators_x], 0.5)\n self.assertEqual(lowering.terms[correct_operators_y], 0.5j)\n self.assertTrue(lowering == qtermx + qtermy)\n\n def test_transm_raise3lower0(self):\n # recall that creation gets -1j on Y and annihilation gets +1j on Y.\n term = jordan_wigner(FermionOperator(((3, 1), (0, 0))))\n self.assertEqual(term.terms[((0, 'X'), (1, 'Z'), (2, 'Z'), (3, 'Y'))],\n 0.25 * 1 * -1j)\n self.assertEqual(term.terms[((0, 'Y'), (1, 'Z'), (2, 'Z'), (3, 'Y'))],\n 0.25 * 1j * -1j)\n self.assertEqual(term.terms[((0, 'Y'), (1, 'Z'), (2, 'Z'), (3, 'X'))],\n 0.25 * 1j * 1)\n self.assertEqual(term.terms[((0, 'X'), (1, 'Z'), (2, 'Z'), (3, 'X'))],\n 0.25 * 1 * 1)\n\n def test_transm_number(self):\n n = number_operator(self.n_qubits, 3)\n n_jw = jordan_wigner(n)\n self.assertEqual(n_jw.terms[((3, 'Z'),)], -0.5)\n self.assertEqual(n_jw.terms[()], 0.5)\n self.assertEqual(len(n_jw.terms), 2)\n\n def test_ccr_offsite_even_ca(self):\n c2 = FermionOperator(((2, 1),))\n a4 = FermionOperator(((4, 0),))\n\n self.assertTrue(normal_ordered(c2 * a4) ==\n normal_ordered(-a4 * c2))\n self.assertTrue(jordan_wigner(c2 * a4) ==\n jordan_wigner(-a4 * c2))\n\n def test_ccr_offsite_odd_ca(self):\n c1 = FermionOperator(((1, 1),))\n a4 = FermionOperator(((4, 0),))\n self.assertTrue(normal_ordered(c1 * a4) ==\n normal_ordered(-a4 * c1))\n\n self.assertTrue(jordan_wigner(c1 * a4) ==\n jordan_wigner(-a4 * c1))\n\n def test_ccr_offsite_even_cc(self):\n c2 = FermionOperator(((2, 1),))\n c4 = FermionOperator(((4, 1),))\n self.assertTrue(normal_ordered(c2 * c4) ==\n normal_ordered(-c4 * c2))\n\n self.assertTrue(jordan_wigner(c2 * c4) ==\n jordan_wigner(-c4 * c2))\n\n def test_ccr_offsite_odd_cc(self):\n c1 = FermionOperator(((1, 1),))\n c4 = FermionOperator(((4, 1),))\n self.assertTrue(normal_ordered(c1 * c4) ==\n normal_ordered(-c4 * c1))\n\n self.assertTrue(jordan_wigner(c1 * c4) ==\n jordan_wigner(-c4 * c1))\n\n def test_ccr_offsite_even_aa(self):\n a2 = FermionOperator(((2, 0),))\n a4 = FermionOperator(((4, 0),))\n self.assertTrue(normal_ordered(a2 * a4) ==\n normal_ordered(-a4 * a2))\n\n self.assertTrue(jordan_wigner(a2 * a4) ==\n jordan_wigner(-a4 * a2))\n\n def test_ccr_offsite_odd_aa(self):\n a1 = FermionOperator(((1, 0),))\n a4 = FermionOperator(((4, 0),))\n self.assertTrue(normal_ordered(a1 * a4) ==\n normal_ordered(-a4 * a1))\n\n self.assertTrue(jordan_wigner(a1 * a4) ==\n jordan_wigner(-a4 * a1))\n\n def test_ccr_onsite(self):\n c1 = FermionOperator(((1, 1),))\n a1 = hermitian_conjugated(c1)\n self.assertTrue(normal_ordered(c1 * a1) ==\n FermionOperator(()) - normal_ordered(a1 * c1))\n self.assertTrue(jordan_wigner(c1 * a1) ==\n QubitOperator(()) - jordan_wigner(a1 * c1))\n\n def test_jordan_wigner_transm_op(self):\n n = number_operator(self.n_qubits)\n n_jw = jordan_wigner(n)\n self.assertEqual(self.n_qubits + 1, len(n_jw.terms))\n self.assertEqual(self.n_qubits / 2., n_jw.terms[()])\n for qubit in range(self.n_qubits):\n operators = ((qubit, 'Z'),)\n self.assertEqual(n_jw.terms[operators], -0.5)\n\n\nclass InteractionOperatorsJWTest(unittest.TestCase):\n\n def setUp(self):\n self.n_qubits = 5\n self.constant = 0.\n self.one_body = numpy.zeros((self.n_qubits, self.n_qubits), float)\n self.two_body = numpy.zeros((self.n_qubits, self.n_qubits,\n self.n_qubits, self.n_qubits), float)\n self.interaction_operator = InteractionOperator(self.constant,\n self.one_body,\n self.two_body)\n\n def test_consistency(self):\n \"\"\"Test consistency with JW for FermionOperators.\"\"\"\n # Random interaction operator\n n_qubits = 5\n iop = random_interaction_operator(n_qubits)\n op1 = jordan_wigner(iop)\n op2 = jordan_wigner(get_fermion_operator(iop))\n\n self.assertEqual(op1, op2)\n\n # Interaction operator from molecule\n geometry = [('Li', (0., 0., 0.)), ('H', (0., 0., 1.45))]\n basis = 'sto-3g'\n multiplicity = 1\n\n filename = os.path.join(DATA_DIRECTORY, 'H1-Li1_sto-3g_singlet_1.45')\n molecule = MolecularData(geometry, basis, multiplicity,\n filename=filename)\n molecule.load()\n\n iop = molecule.get_molecular_hamiltonian()\n op1 = jordan_wigner(iop)\n op2 = jordan_wigner(get_fermion_operator(iop))\n\n self.assertEqual(op1, op2)\n\n def test_jordan_wigner_one_body(self):\n # Make sure it agrees with jordan_wigner(FermionTerm).\n for p in range(self.n_qubits):\n for q in range(self.n_qubits):\n # Get test qubit operator.\n test_operator = jordan_wigner_one_body(p, q)\n\n # Get correct qubit operator.\n fermion_term = FermionOperator(((p, 1), (q, 0)))\n correct_op = jordan_wigner(fermion_term)\n hermitian_conjugate = hermitian_conjugated(fermion_term)\n if not fermion_term == hermitian_conjugate:\n correct_op += jordan_wigner(hermitian_conjugate)\n\n self.assertTrue(test_operator == correct_op)\n\n def test_jordan_wigner_two_body(self):\n # Make sure it agrees with jordan_wigner(FermionTerm).\n for p in range(self.n_qubits):\n for q in range(self.n_qubits):\n for r in range(self.n_qubits):\n for s in range(self.n_qubits):\n # Get test qubit operator.\n test_operator = jordan_wigner_two_body(p, q, r, s)\n\n # Get correct qubit operator.\n fermion_term = FermionOperator(((p, 1), (q, 1),\n (r, 0), (s, 0)))\n correct_op = jordan_wigner(fermion_term)\n hermitian_conjugate = hermitian_conjugated(\n fermion_term)\n if not fermion_term == hermitian_conjugate:\n if p == r and q == s:\n pass\n else:\n correct_op += jordan_wigner(\n hermitian_conjugate)\n\n self.assertTrue(test_operator == correct_op,\n str(test_operator - correct_op))\n\n def test_jordan_wigner_twobody_interaction_op_allunique(self):\n test_op = FermionOperator('1^ 2^ 3 4')\n test_op += hermitian_conjugated(test_op)\n\n retransformed_test_op = reverse_jordan_wigner(jordan_wigner(\n get_interaction_operator(test_op)))\n\n self.assertTrue(normal_ordered(retransformed_test_op) ==\n normal_ordered(test_op))\n\n def test_jordan_wigner_twobody_interaction_op_reversal_symmetric(self):\n test_op = FermionOperator('1^ 2^ 2 1')\n test_op += hermitian_conjugated(test_op)\n self.assertTrue(jordan_wigner(test_op) ==\n jordan_wigner(get_interaction_operator(test_op)))\n\n def test_jordan_wigner_interaction_op_too_few_n_qubits(self):\n with self.assertRaises(ValueError):\n jordan_wigner_interaction_op(self.interaction_operator,\n self.n_qubits - 2)\n\n def test_jordan_wigner_interaction_op_with_zero_term(self):\n test_op = FermionOperator('1^ 2^ 3 4')\n test_op += hermitian_conjugated(test_op)\n\n interaction_op = get_interaction_operator(test_op)\n interaction_op.constant = 0.0\n\n retransformed_test_op = reverse_jordan_wigner(jordan_wigner(\n interaction_op))\n\n\nclass GetInteractionOperatorTest(unittest.TestCase):\n\n def setUp(self):\n self.n_qubits = 5\n self.constant = 0.\n self.one_body = numpy.zeros((self.n_qubits, self.n_qubits), float)\n self.two_body = numpy.zeros((self.n_qubits, self.n_qubits,\n self.n_qubits, self.n_qubits), float)\n\n def test_get_interaction_operator_identity(self):\n interaction_operator = InteractionOperator(-2j, self.one_body,\n self.two_body)\n qubit_operator = jordan_wigner(interaction_operator)\n self.assertTrue(qubit_operator == -2j * QubitOperator(()))\n self.assertEqual(interaction_operator,\n get_interaction_operator(reverse_jordan_wigner(\n qubit_operator), self.n_qubits))\n\n def test_get_interaction_operator_one_body(self):\n interaction_operator = get_interaction_operator(\n FermionOperator('2^ 2'), self.n_qubits)\n one_body = numpy.zeros((self.n_qubits, self.n_qubits), float)\n one_body[2, 2] = 1.\n self.assertEqual(interaction_operator,\n InteractionOperator(0.0, one_body, self.two_body))\n\n def test_get_interaction_operator_one_body_twoterm(self):\n interaction_operator = get_interaction_operator(\n FermionOperator('2^ 3', -2j) + FermionOperator('3^ 2', 3j),\n self.n_qubits)\n one_body = numpy.zeros((self.n_qubits, self.n_qubits), complex)\n one_body[2, 3] = -2j\n one_body[3, 2] = 3j\n self.assertEqual(interaction_operator,\n InteractionOperator(0.0, one_body, self.two_body))\n\n def test_get_interaction_operator_two_body(self):\n interaction_operator = get_interaction_operator(\n FermionOperator('2^ 2 3^ 4'), self.n_qubits)\n two_body = numpy.zeros((self.n_qubits, self.n_qubits,\n self.n_qubits, self.n_qubits), float)\n two_body[3, 2, 4, 2] = -1.\n self.assertEqual(interaction_operator,\n InteractionOperator(0.0, self.one_body, two_body))\n\n def test_get_interaction_operator_two_body_distinct(self):\n interaction_operator = get_interaction_operator(\n FermionOperator('0^ 1^ 2 3'), self.n_qubits)\n two_body = numpy.zeros((self.n_qubits, self.n_qubits,\n self.n_qubits, self.n_qubits), float)\n two_body[1, 0, 3, 2] = 1.\n self.assertEqual(interaction_operator,\n InteractionOperator(0.0, self.one_body, two_body))\n\n\nclass JordanWignerDiagonalCoulombHamiltonianTest(unittest.TestCase):\n\n def test_hubbard(self):\n x_dim = 4\n y_dim = 5\n tunneling = 2.\n coulomb = 3.\n chemical_potential = 7.\n magnetic_field = 11.\n periodic = False\n\n hubbard_model = fermi_hubbard(x_dim, y_dim, tunneling, coulomb,\n chemical_potential, magnetic_field,\n periodic)\n\n self.assertTrue(\n jordan_wigner(hubbard_model) ==\n jordan_wigner(get_diagonal_coulomb_hamiltonian(hubbard_model)))\n\n def test_random_quadratic(self):\n n_qubits = 5\n quad_ham = random_quadratic_hamiltonian(n_qubits, True)\n ferm_op = get_fermion_operator(quad_ham)\n self.assertTrue(\n jordan_wigner(ferm_op) ==\n jordan_wigner(get_diagonal_coulomb_hamiltonian(ferm_op)))\n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
IvyGao58/Pronoun-Coref
[ "48067a82553bdd25ccf47328cf1f0a3ed5bdc970" ]
[ "elmoForManyLangs/__main__.py" ]
[ "#!/usr/bin/env python\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nimport os\nimport codecs\nimport argparse\nimport logging\nimport json\nimport sys\n\nimport torch\n\nsys.path.append('../')\nfrom elmoformanylangs.modules.embedding_layer import EmbeddingLayer\nfrom elmoformanylangs.utils import dict2namedtuple\nfrom elmoformanylangs.frontend import Model\nfrom elmoformanylangs.frontend import create_batches\nimport numpy as np\nimport h5py\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(levelname)s: %(message)s')\n\n\ndef read_corpus(path, max_chars=None):\n \"\"\"\n read raw text file. The format of the input is like, one sentence per line\n words are separated by '\\t'\n\n :param path:\n :param max_chars: int, the number of maximum characters in a word, this\n parameter is used when the model is configured with CNN word encoder.\n :return:\n \"\"\"\n dataset = []\n textset = []\n with codecs.open(path, 'r', encoding='utf-8') as fin:\n for line in fin.read().strip().split('\\n'):\n data = ['<bos>']\n text = []\n for token in line.split('\\t'):\n text.append(token)\n if max_chars is not None and len(token) + 2 > max_chars:\n token = token[:max_chars - 2]\n data.append(token)\n data.append('<eos>')\n dataset.append(data)\n textset.append(text)\n return dataset, textset\n\n\ndef read_conll_corpus(path, max_chars=None):\n \"\"\"\n read text in CoNLL-U format.\n\n :param path:\n :param max_chars:\n :return:\n \"\"\"\n dataset = []\n textset = []\n titles = []\n indices = []\n with codecs.open(path, 'r', encoding='utf-8') as fin:\n for payload in fin.read().strip().split('\\r\\n\\r\\n\\r\\n'):\n data = ['<bos>']\n text = []\n body = []\n lines = payload.splitlines()\n\n # save content, get title\n for line in lines:\n if line.startswith('#'):\n title = line.replace('#', '').strip()\n titles.append(title)\n else:\n body.append(line)\n\n # save tokens\n idxes = [0]\n _num = -1\n for i, line in enumerate(body):\n if line == '':\n idxes.append(_num + 1)\n continue\n _num += 1\n fields = line.split('\\t')\n num, token = fields[0], fields[1]\n if '-' in num or '.' in num:\n continue\n text.append(token)\n if max_chars is not None and len(token) + 2 > max_chars:\n token = token[:max_chars - 2]\n data.append(token)\n\n idxes.append(_num+1)\n data.append('<eos>')\n dataset.append(data)\n textset.append(text)\n indices.append([x for x in idxes])\n return titles, indices, dataset, textset\n\n\ndef read_conll_char_corpus(path, max_chars=None):\n \"\"\"\n\n :param path:\n :param max_chars:\n :return:\n \"\"\"\n dataset = []\n textset = []\n with codecs.open(path, 'r', encoding='utf-8') as fin:\n for payload in fin.read().strip().split('\\n\\n'):\n data = ['<bos>']\n text = []\n lines = payload.splitlines()\n body = [line for line in lines if not line.startswith('#')]\n for line in body:\n fields = line.split('\\t')\n num, token = fields[0], fields[1]\n if '-' in num or '.' in num:\n continue\n for ch in token:\n text.append(ch)\n if max_chars is not None and len(ch) + 2 > max_chars:\n ch = ch[:max_chars - 2]\n data.append(ch)\n data.append('<eos>')\n dataset.append(data)\n textset.append(text)\n return dataset, textset\n\n\ndef read_conll_char_vi_corpus(path, max_chars=None):\n \"\"\"\n\n :param path:\n :param max_chars:\n :return:\n \"\"\"\n dataset = []\n textset = []\n with codecs.open(path, 'r', encoding='utf-8') as fin:\n for payload in fin.read().strip().split('\\n\\n'):\n data = ['<bos>']\n text = []\n lines = payload.splitlines()\n body = [line for line in lines if not line.startswith('#')]\n for line in body:\n fields = line.split('\\t')\n num, token = fields[0], fields[1]\n if '-' in num or '.' in num:\n continue\n for ch in token.split():\n text.append(ch)\n if max_chars is not None and len(ch) + 2 > max_chars:\n ch = ch[:max_chars - 2]\n data.append(ch)\n data.append('<eos>')\n dataset.append(data)\n textset.append(text)\n return dataset, textset\n\n\ndef test_main():\n # Configurations\n cmd = argparse.ArgumentParser('The testing components of')\n cmd.add_argument('--gpu', default=-1, type=int, help='use id of gpu, -1 if cpu.')\n cmd.add_argument('--input_format', default='conll', choices=('plain', 'conll', 'conll_char', 'conll_char_vi'),\n help='the input format.')\n cmd.add_argument(\"--input\", default='../data/law/conll.p2s.txt', help=\"the path to the raw text file.\")\n cmd.add_argument(\"--output_format\", default='hdf5', help='the output format. Supported format includes (hdf5, txt).'\n ' Use comma to separate the format identifiers,'\n ' like \\'--output_format=hdf5,plain\\'')\n cmd.add_argument(\"--output_prefix\", help='the prefix of the output file. The output file is in the format of '\n '<output_prefix>.<output_layer>.<output_format>')\n cmd.add_argument(\"--output_layer\", default='0,1,2,-1,-2', help='the target layer to output. 0 for the word encoder,'\n ' 1 for the first LSTM hidden layer, 2 for the second LSTM hidden layer, -1 for an average'\n 'of 3 layers.')\n cmd.add_argument(\"--model\", required=True, help=\"the path to the model.\")\n cmd.add_argument(\"--batch_size\", \"--batch\", type=int, default=5, help='the batch size.')\n args = cmd.parse_args(sys.argv[2:])\n\n if args.gpu >= 0:\n torch.cuda.set_device(args.gpu)\n use_cuda = args.gpu >= 0 and torch.cuda.is_available()\n # load the model configurations\n args2 = dict2namedtuple(json.load(codecs.open(os.path.join(args.model, 'config.json'), 'r', encoding='utf-8')))\n\n with open(os.path.join(args.model, args2.config_path), 'r') as fin:\n config = json.load(fin)\n\n # For the model trained with character-based word encoder.\n if config['token_embedder']['char_dim'] > 0:\n char_lexicon = {}\n with codecs.open(os.path.join(args.model, 'char.dic'), 'r', encoding='utf-8') as fpi:\n for line in fpi:\n tokens = line.strip().split('\\t')\n if len(tokens) == 1:\n tokens.insert(0, '\\u3000')\n token, i = tokens\n char_lexicon[token] = int(i)\n char_emb_layer = EmbeddingLayer(config['token_embedder']['char_dim'], char_lexicon, fix_emb=False, embs=None)\n logging.info('char embedding size: ' + str(len(char_emb_layer.word2id)))\n else:\n char_lexicon = None\n char_emb_layer = None\n\n # For the model trained with word form word encoder.\n if config['token_embedder']['word_dim'] > 0:\n word_lexicon = {}\n with codecs.open(os.path.join(args.model, 'word.dic'), 'r', encoding='utf-8') as fpi:\n for line in fpi:\n tokens = line.strip().split('\\t')\n if len(tokens) == 1:\n tokens.insert(0, '\\u3000')\n token, i = tokens\n word_lexicon[token] = int(i)\n word_emb_layer = EmbeddingLayer(config['token_embedder']['word_dim'], word_lexicon, fix_emb=False, embs=None)\n logging.info('word embedding size: ' + str(len(word_emb_layer.word2id)))\n else:\n word_lexicon = None\n word_emb_layer = None\n\n # instantiate the model\n model = Model(config, word_emb_layer, char_emb_layer, use_cuda)\n\n if use_cuda:\n model.cuda()\n\n model.load_model(args.model)\n\n # read test data according to input format\n read_function = read_corpus if args.input_format == 'plain' else (\n read_conll_corpus if args.input_format == 'conll' else (\n read_conll_char_corpus if args.input_format == 'conll_char' else read_conll_char_vi_corpus))\n\n if config['token_embedder']['name'].lower() == 'cnn':\n titles, indices, test, text = read_function(args.input, config['token_embedder']['max_characters_per_token'])\n else:\n test, text = read_function(args.input)\n\n # create test batches from the input data.\n test_w, test_c, test_lens, test_masks, test_text, test_title, test_indices = create_batches(titles, indices,\n test, args.batch_size, word_lexicon, char_lexicon, config, text=text)\n\n # configure the model to evaluation mode.\n model.eval()\n\n sent_set = set()\n cnt = 0\n\n output_formats = args.output_format.split(',')\n output_layers = map(int, args.output_layer.split(','))\n\n handlers = {}\n for output_format in output_formats:\n if output_format not in ('hdf5', 'txt'):\n print('Unknown output_format: {0}'.format(output_format))\n continue\n for output_layer in output_layers:\n filename = '{0}.ly{1}.{2}'.format(args.output_prefix, output_layer, output_format)\n handlers[output_format, output_layer] = \\\n h5py.File(filename, 'w') if output_format == 'hdf5' else open(filename, 'w')\n\n with h5py.File(\"elmo_chinese_cache_p2s.hdf5\", \"a\") as out_file:\n count = 0\n for w, c, lens, masks, texts, title, indice in zip(test_w, test_c, test_lens, test_masks, test_text, test_title,\n test_indices):\n output = model.forward(w, c, masks) # [3, 5, 247, 1024]\n for i, text in enumerate(texts):\n sent = '\\t'.join(text)\n sent = sent.replace('.', '$period$')\n sent = sent.replace('/', '$backslash$')\n if sent in sent_set:\n continue\n sent_set.add(sent)\n if config['encoder']['name'].lower() == 'lstm':\n data = output[i, 1:lens[i] - 1, :].data\n if use_cuda:\n data = data.cpu()\n data = data.numpy()\n elif config['encoder']['name'].lower() == 'elmo':\n data = output[:, i, 1:lens[i] - 1, :].data # [3, 5, 1024]\n if use_cuda:\n data = data.cpu()\n data = data.numpy()\n\n word_emb = None\n lstm1_emb = None\n lstm2_emb = None\n\n for (output_format, output_layer) in handlers:\n if output_layer == -1:\n payload = np.average(data, axis=0)\n else:\n if output_layer == 0:\n word_emb = data[output_layer] # [579, 1024]\n if output_layer == 1:\n lstm1_emb = data[output_layer] # [579, 1024]\n if output_layer == 2:\n lstm2_emb = data[output_layer] # [579, 1024]\n payload = data[output_layer]\n\n # lm_emb = torch.stack([torch.cat([word_emb, word_emb], -1), lstm1_emb, lstm2_emb], -1)\n lm_emb = np.stack([word_emb, lstm1_emb, lstm2_emb], -1) # [579, 1024, 3]\n\n cur_tile = title[i].strip()\n if cur_tile in out_file:\n print('{} exist.'.format(cur_tile))\n continue\n\n group = out_file.create_group(cur_tile)\n\n if len(indice[i]) == 1:\n group[str(0)] = lm_emb\n else:\n begin = None\n for j, index in enumerate(indice[i]):\n if begin is None:\n begin = index\n else:\n group[str(j - 1)] = lm_emb[begin:index, :, :]\n begin = index\n\n # out_file.create_dataset(title[i], data=lm_emb)\n\n count += 1\n print(\"Cached {} documents. Title {}\".format(count, cur_tile))\n\n\ndef tokens2sent(tokens):\n sents = []\n cur_sent = []\n for token in tokens:\n if token != '':\n cur_sent.append(token)\n else:\n if len(cur_sent):\n sents.append(cur_sent)\n return sents\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1 and sys.argv[1] == 'test':\n test_main()\n else:\n print('Usage: {0} [test] [options]'.format(sys.argv[0]), file=sys.stderr)\n" ]
[ [ "torch.cuda.is_available", "numpy.stack", "torch.cuda.set_device", "numpy.average" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
arpastrana/compas
[ "ed677a162c14dbe562c82d72f370279259faf7da" ]
[ "src/compas/numerical/drx/drx_numba.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom numpy import arccos\nfrom numpy import array\nfrom numpy import isnan\nfrom numpy import mean\nfrom numpy import sin\nfrom numpy import sqrt\nfrom numpy import sum\nfrom numpy import zeros\n\nfrom numba import guvectorize\nfrom numba import f8\nfrom numba import i4\nfrom numba import i8\n\nfrom numba import jit\n\ntry:\n from numba import prange\nexcept ImportError:\n prange = range\n\nfrom compas.numerical import uvw_lengths\n\nfrom compas.numerical.drx.drx_numpy import _beam_data\nfrom compas.numerical.drx.drx_numpy import _create_arrays\n\n# from compas_hpc.geometry import cross_vectors_numba as cross\n# from compas_hpc.geometry import dot_vectors_numba as dot\n# from compas_hpc.geometry import length_vector_numba as length\n\nfrom time import time\n\n\n__all__ = [\n 'drx_numba',\n]\n\n\n@jit(f8(f8[:]), nogil=True, nopython=True, parallel=False, cache=True)\ndef length(a):\n \"\"\"Calculate the length of a vector.\n\n Parameters\n ----------\n a : array\n XYZ components of the vector.\n\n Returns\n -------\n float: The length of the vector.\n \"\"\"\n return sqrt(a[0]**2 + a[1]**2 + a[2]**2)\n\n\n@jit(f8(f8[:], f8[:]), nogil=True, nopython=True, parallel=False, cache=True)\ndef dot(u, v):\n \"\"\"Compute the dot product of two vectors.\n\n Parameters\n ----------\n u : array\n XYZ components of the first vector.\n v : array\n XYZ components of the second vector.\n\n Returns\n -------\n float\n u . v.\n \"\"\"\n return u[0] * v[0] + u[1] * v[1] + u[2] * v[2]\n\n\n@jit(f8[:](f8[:], f8[:]), nogil=True, nopython=True, parallel=False, cache=True)\ndef cross(u, v):\n \"\"\"Compute the cross product of two vectors.\n\n Parameters\n ----------\n u : array\n XYZ components of the first vector.\n v : array\n XYZ components of the second vector.\n\n Returns\n -------\n array\n u X v.\n \"\"\"\n w = zeros(3)\n w[0] = u[1] * v[2] - u[2] * v[1]\n w[1] = u[2] * v[0] - u[0] * v[2]\n w[2] = u[0] * v[1] - u[1] * v[0]\n return w\n\n\ndef _args(network, factor, summary, steps, tol):\n X, B, P, S, V, E, A, C, Ct, f0, l0, ind_c, ind_t, u, v, M, k0, m, n, rows, cols, vals, nv = _create_arrays(network)\n inds, indi, indf, EIx, EIy, beams = _beam_data(network)\n if not ind_c:\n ind_c = [-1]\n if not ind_t:\n ind_t = [-1]\n ind_c = array(ind_c)\n ind_t = array(ind_t)\n return tol, steps, summary, m, n, u, v, X, f0, l0, k0, ind_c, ind_t, B, P, S, rows, cols, vals, nv, M, factor, V, inds, indi, indf, EIx, EIy, beams, C\n\n\ndef drx_numba(network, factor=1.0, tol=0.1, steps=10000, summary=0, update=False):\n \"\"\" Run Numba accelerated dynamic relaxation analysis.\n\n Parameters\n ----------\n network : obj\n Network to analyse.\n factor : float\n Convergence factor.\n tol : float\n Tolerance value.\n steps : int\n Maximum number of steps.\n summary : int\n Print summary at end (1:yes or 0:no).\n update : bool\n Update the co-ordinates of the Network.\n\n Returns\n -------\n array\n Vertex co-ordinates.\n array\n Edge forces.\n array\n Edge lengths.\n \"\"\"\n # Setup\n tic1 = time()\n args = _args(network, factor, summary, steps, tol)\n toc1 = time() - tic1\n\n # Solver\n tic2 = time()\n tol, steps, summary, m, n, u, v, X, f0, l0, k0, ind_c, ind_t, B, P, S, rows, cols, vals, nv, M, factor, V, inds, indi, indf, EIx, EIy, beams, C = args\n drx_solver_numba(tol, steps, summary, m, n, u, v, X, f0, l0, k0, ind_c, ind_t, B, P, S, rows, cols, vals, nv,\n M, factor, V, inds, indi, indf, EIx, EIy, beams)\n _, l = uvw_lengths(C, X) # noqa: E741\n f = f0 + k0 * (l.ravel() - l0)\n toc2 = time() - tic2\n\n # Summary\n if summary:\n print('\\n\\nNumba DR -------------------')\n print('Setup time: {0:.3f} s'.format(toc1))\n print('Solver time: {0:.3f} s'.format(toc2))\n print('----------------------------------')\n\n # Update\n if update:\n k_i = network.key_index()\n uv_i = network.uv_index()\n for key in network.vertices():\n x, y, z = X[k_i[key], :]\n network.set_vertex_attributes(key, 'xyz', [x, y, z])\n for uv in network.edges():\n i = uv_i[uv]\n network.set_edge_attribute(uv, 'f', float(f[i]))\n\n return X, f, l\n\n\n@guvectorize([(f8, i8, i8, i8, i8, i4[:], i4[:], f8[:, :], f8[:], f8[:], f8[:], i8[:], i8[:], f8[:, :], f8[:, :],\n f8[:, :], i4[:], i4[:], f8[:], i8, f8[:], f8, f8[:, :], i4[:], i4[:], i4[:], f8[:], f8[:], i8, f8)],\n '(),(),(),(),(),(m),(m),(n,p),(m),(m),(m),(a),(b),(n,p),(n,p),(n,p),(c),(c),(c),(),(n),(),(n,p),(k),(k),(k),(k),(k),()->()',\n nopython=True, cache=True, target='parallel')\ndef drx_solver_numba(tol, steps, summary, m, n, u, v, X, f0, l0, k0, ind_c, ind_t, B, P, S, rows, cols, vals, nv,\n M, factor, V, inds, indi, indf, EIx, EIy, beams, out):\n \"\"\"Numba accelerated dynamic relaxation solver.\n\n Parameters\n ----------\n tol : float\n Tolerance value.\n steps : int\n Maximum number of steps.\n summary : int\n Print summary 1 or 0.\n m : int\n Number of edges.\n n : int\n Number of vertices.\n u : array\n Network edges' start points.\n v : array\n Network edges' end points.\n X : array\n Nodal co-ordinates.\n f0 : array\n Initial edge forces.\n l0 : array\n Initial edge lengths.\n k0 : array\n Initial edge axial stiffnesses.\n ind_c : array\n Indices of compression only edges.\n ind_t : array\n Indices of tension only edges.\n B : array\n Constraint conditions Bx, By, Bz.\n P : array\n Nodal loads Px, Py, Pz.\n S : array\n Shear forces Sx, Sy, Sz.\n rows : array\n Edge adjacencies (rows).\n cols : array\n Edge adjacencies (columns).\n vals : array\n Edge adjacencies (values).\n nv : int\n Length of rows, cols and vals.\n M : array\n Mass matrix.\n factor : float\n Convergence factor.\n V : array\n Nodal velocities.\n inds : array\n Indices of beam element start nodes.\n indi : array\n Indices of beam element intermediate nodes.\n indf : array\n Indices of beam element finish nodes beams.\n EIx : array\n Nodal EIx flexural stiffnesses.\n EIy : array\n Nodal EIy flexural stiffnesses.\n beams : int\n Beam analysis on: 1 or off: 0.\n \"\"\"\n f = zeros(m)\n fx = zeros(m)\n fy = zeros(m)\n fz = zeros(m)\n frx = zeros(n)\n fry = zeros(n)\n frz = zeros(n)\n Rn = zeros(n)\n Una = zeros(n)\n\n res = 1000 * tol\n ts, Uo = 0, 0\n\n while (ts <= steps) and (res > tol):\n\n for i in range(m):\n xd = X[v[i], 0] - X[u[i], 0]\n yd = X[v[i], 1] - X[u[i], 1]\n zd = X[v[i], 2] - X[u[i], 2]\n l = sqrt(xd**2 + yd**2 + zd**2) # noqa: E741\n f[i] = f0[i] + k0[i] * (l - l0[i])\n q = f[i] / l\n fx[i] = xd * q\n fy[i] = yd * q\n fz[i] = zd * q\n\n if ind_t[0] != -1:\n for i in ind_t:\n if f[i] < 0:\n fx[i] = 0\n fy[i] = 0\n fz[i] = 0\n\n if ind_c[0] != -1:\n for i in ind_c:\n if f[i] > 0:\n fx[i] = 0\n fy[i] = 0\n fz[i] = 0\n\n if beams:\n S *= 0\n for i in range(len(inds)):\n Xs = X[inds[i], :]\n Xi = X[indi[i], :]\n Xf = X[indf[i], :]\n Qa = Xi - Xs\n Qb = Xf - Xi\n Qc = Xf - Xs\n Qn = cross(Qa, Qb)\n\n mu = 0.5 * (Xf - Xs)\n La = length(Qa)\n Lb = length(Qb)\n Lc = length(Qc)\n LQn = length(Qn)\n Lmu = length(mu)\n\n a = arccos((La**2 + Lb**2 - Lc**2) / (2 * La * Lb))\n k = 2 * sin(a) / Lc\n ex = Qn / LQn\n ez = mu / Lmu\n ey = cross(ez, ex)\n\n K = k * Qn / LQn\n Kx = dot(K, ex) * ex\n Ky = dot(K, ey) * ey\n Mc = EIx[i] * Kx + EIy[i] * Ky\n cma = cross(Mc, Qa)\n cmb = cross(Mc, Qb)\n ua = cma / length(cma)\n ub = cmb / length(cmb)\n c1 = cross(Qa, ua)\n c2 = cross(Qb, ub)\n Lc1 = length(c1)\n Lc2 = length(c2)\n Ms = Mc[0]**2 + Mc[1]**2 + Mc[2]**2\n\n Sa = ua * Ms * Lc1 / (La * dot(Mc, c1))\n Sb = ub * Ms * Lc2 / (Lb * dot(Mc, c2))\n\n if isnan(Sa).any() or isnan(Sb).any():\n pass\n else:\n S[inds[i], :] += Sa\n S[indi[i], :] -= Sa + Sb\n S[indf[i], :] += Sb\n\n frx *= 0\n fry *= 0\n frz *= 0\n\n for i in range(nv):\n frx[rows[i]] += vals[i] * fx[cols[i]]\n fry[rows[i]] += vals[i] * fy[cols[i]]\n frz[rows[i]] += vals[i] * fz[cols[i]]\n\n for i in range(n):\n Rx = (P[i, 0] - S[i, 0] - frx[i]) * B[i, 0]\n Ry = (P[i, 1] - S[i, 1] - fry[i]) * B[i, 1]\n Rz = (P[i, 2] - S[i, 2] - frz[i]) * B[i, 2]\n Rn[i] = sqrt(Rx**2 + Ry**2 + Rz**2)\n\n Mi = M[i] * factor\n V[i, 0] += Rx / Mi\n V[i, 1] += Ry / Mi\n V[i, 2] += Rz / Mi\n Una[i] = Mi * (V[i, 0]**2 + V[i, 1]**2 + V[i, 2]**2)\n\n Un = sum(Una)\n\n if Un < Uo:\n V *= 0\n Uo = Un\n\n # X += V\n for i in range(n):\n X[i, 0] += V[i, 0]\n X[i, 1] += V[i, 1]\n X[i, 2] += V[i, 2]\n\n res = mean(Rn)\n\n # # refresh\n # if refresh:\n # if (ts % refresh == 0) or (res < tol):\n # print('Step:{0} Residual:{1:.3f}'.format(ts, res))\n # if callback:\n # callback(X, **kwargs)\n\n ts += 1\n\n if summary:\n print('Step:', ts - 1, ' Residual:', res)\n\n\n# ==============================================================================\n# Main\n# ==============================================================================\n\nif __name__ == \"__main__\":\n pass\n" ]
[ [ "numpy.sqrt", "numpy.isnan", "numpy.arccos", "numpy.sin", "numpy.mean", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sdomanskyi/decneo
[ "c3b78d7cb24fbecde317850ea5068394029a7d03" ]
[ "scripts/demo.py" ]
[ "import pandas as pd\nfrom decneo.analysisPipeline import process\n\ndemoData = '/mnt/home/domansk6/Projects/Endothelial/scripts/demo/VoightChoroid4567RemappedData.h5'\n\nif __name__ == '__main__':\n\n wdir = '/mnt/scratch/domansk6/DECNEOdemo/'\n\n process(pd.read_hdf(demoData, key='dfa'), # Endothelial cells\n pd.read_hdf(demoData, key='dfb'), # Non-endothelial cells\n None, None, # Comparison dataset is provided\n wdir, # Working directory\n wdir+'fromPanglaoDBmouseAllbyDCS/', # Comparison dataset \n parallelBootstrap=True, # Set False if RAM is limited\n exprCutoff1=0.01, # Gene expression cutoff\n perEachOtherCase=False) # Comparison mode setting" ]
[ [ "pandas.read_hdf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
lianyfei/bert-utils
[ "5de95a459146482a27deae36464e95a24dfe2bcf" ]
[ "run_classifier_exporter.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"BERT finetuning runner.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport csv\nimport os\nimport modeling\nimport optimization\nimport tokenization\nimport tensorflow as tf\n\nflags = tf.flags\n\nFLAGS = flags.FLAGS\n\n## Required parameters\nflags.DEFINE_string(\n \"data_dir\", None,\n \"The input data dir. Should contain the .tsv files (or other data files) \"\n \"for the task.\")\n\nflags.DEFINE_string(\n \"bert_config_file\", None,\n \"The config json file corresponding to the pre-trained BERT model. \"\n \"This specifies the model architecture.\")\n\nflags.DEFINE_string(\"task_name\", None, \"The name of the task to train.\")\n\nflags.DEFINE_string(\"vocab_file\", None,\n \"The vocabulary file that the BERT model was trained on.\")\n\nflags.DEFINE_string(\n \"output_dir\", None,\n \"The output directory where the model checkpoints will be written.\")\n\nflags.DEFINE_string(\n \"export_dir\", None,\n \"The dir where the exported model will be written.\")\n\n## Other parameters\n\nflags.DEFINE_string(\n \"init_checkpoint\", None,\n \"Initial checkpoint (usually from a pre-trained BERT model).\")\n\nflags.DEFINE_bool(\n \"do_lower_case\", True,\n \"Whether to lower case the input text. Should be True for uncased \"\n \"models and False for cased models.\")\n\nflags.DEFINE_integer(\n \"max_seq_length\", 128,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter \"\n \"than this will be padded.\")\n\nflags.DEFINE_bool(\"do_train\", False, \"Whether to run training.\")\n\nflags.DEFINE_bool(\"do_eval\", False, \"Whether to run eval on the dev set.\")\n\nflags.DEFINE_bool(\n \"do_predict\", False,\n \"Whether to run the model in inference mode on the test set.\")\n\nflags.DEFINE_bool(\n \"do_export\", False,\n \"Whether to export the model.\")\n\nflags.DEFINE_integer(\"train_batch_size\", 32, \"Total batch size for training.\")\n\nflags.DEFINE_integer(\"eval_batch_size\", 8, \"Total batch size for eval.\")\n\nflags.DEFINE_integer(\"predict_batch_size\", 8, \"Total batch size for predict.\")\n\nflags.DEFINE_float(\"learning_rate\", 5e-5, \"The initial learning rate for Adam.\")\n\nflags.DEFINE_float(\"num_train_epochs\", 3.0,\n \"Total number of training epochs to perform.\")\n\nflags.DEFINE_float(\n \"warmup_proportion\", 0.1,\n \"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10% of training.\")\n\nflags.DEFINE_integer(\"save_checkpoints_steps\", 1000,\n \"How often to save the model checkpoint.\")\n\nflags.DEFINE_integer(\"iterations_per_loop\", 1000,\n \"How many steps to make in each estimator call.\")\n\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\")\n\ntf.flags.DEFINE_string(\n \"tpu_name\", None,\n \"The Cloud TPU to use for training. This should be either the name \"\n \"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 \"\n \"url.\")\n\ntf.flags.DEFINE_string(\n \"tpu_zone\", None,\n \"[Optional] GCE zone where the Cloud TPU is located in. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\ntf.flags.DEFINE_string(\n \"gcp_project\", None,\n \"[Optional] Project name for the Cloud TPU-enabled project. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\ntf.flags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master URL.\")\n\nflags.DEFINE_integer(\n \"num_tpu_cores\", 8,\n \"Only used if `use_tpu` is True. Total number of TPU cores to use.\")\n\n\nclass InputExample(object):\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n\n def __init__(self, guid, text_a, text_b=None, label=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, input_ids, input_mask, segment_ids, label_id):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.label_id = label_id\n\n\nclass DataProcessor(object):\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()\n\n def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()\n\n def get_test_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for prediction.\"\"\"\n raise NotImplementedError()\n\n def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()\n\n @classmethod\n def _read_tsv(cls, input_file, quotechar=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines\n\n\nclass XnliProcessor(DataProcessor):\n \"\"\"Processor for the XNLI data set.\"\"\"\n\n def __init__(self):\n self.language = \"zh\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n lines = self._read_tsv(\n os.path.join(data_dir, \"multinli\",\n \"multinli.train.%s.tsv\" % self.language))\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"train-%d\" % (i)\n text_a = tokenization.convert_to_unicode(line[0])\n text_b = tokenization.convert_to_unicode(line[1])\n label = tokenization.convert_to_unicode(line[2])\n if label == tokenization.convert_to_unicode(\"contradictory\"):\n label = tokenization.convert_to_unicode(\"contradiction\")\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n lines = self._read_tsv(os.path.join(data_dir, \"xnli.dev.tsv\"))\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"dev-%d\" % (i)\n language = tokenization.convert_to_unicode(line[0])\n if language != tokenization.convert_to_unicode(self.language):\n continue\n text_a = tokenization.convert_to_unicode(line[6])\n text_b = tokenization.convert_to_unicode(line[7])\n label = tokenization.convert_to_unicode(line[1])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"contradiction\", \"entailment\", \"neutral\"]\n\n\nclass MnliProcessor(DataProcessor):\n \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev_matched.tsv\")),\n \"dev_matched\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test_matched.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"contradiction\", \"entailment\", \"neutral\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0]))\n text_a = tokenization.convert_to_unicode(line[8])\n text_b = tokenization.convert_to_unicode(line[9])\n if set_type == \"test\":\n label = \"contradiction\"\n else:\n label = tokenization.convert_to_unicode(line[-1])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\nclass MrpcProcessor(DataProcessor):\n \"\"\"Processor for the MRPC data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[3])\n text_b = tokenization.convert_to_unicode(line[4])\n if set_type == \"test\":\n label = \"0\"\n else:\n label = tokenization.convert_to_unicode(line[0])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\nclass ColaProcessor(DataProcessor):\n \"\"\"Processor for the CoLA data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n # Only the test set has a header\n if set_type == \"test\" and i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n if set_type == \"test\":\n text_a = tokenization.convert_to_unicode(line[1])\n label = \"0\"\n else:\n text_a = tokenization.convert_to_unicode(line[3])\n label = tokenization.convert_to_unicode(line[1])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples\n\n\nclass ChineseDataProcessor(DataProcessor):\n def get_train_examples(self, data_dir):\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')\n\n def get_dev_examples(self, data_dir):\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev')\n\n def get_test_examples(self, data_dir):\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test')\n\n def get_labels(self):\n return ['0', '1']\n\n def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = '%s-%s' % (set_type, i)\n if set_type == 'test':\n text_a = tokenization.convert_to_unicode(line[-1])\n label = '0'\n else:\n text_a = tokenization.convert_to_unicode(line[-1])\n label = tokenization.convert_to_unicode(line[1])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples\n\n\ndef convert_single_example(ex_index, example, label_list, max_seq_length,\n tokenizer):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n label_id = label_map[example.label]\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"guid: %s\" % (example.guid))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n tf.logging.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n feature = InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id)\n return feature\n\n\ndef file_based_convert_examples_to_features(\n examples, label_list, max_seq_length, tokenizer, output_file):\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n\n writer = tf.python_io.TFRecordWriter(output_file)\n\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenizer)\n\n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"label_ids\"] = create_int_feature([feature.label_id])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n\n\ndef file_based_input_fn_builder(input_file, seq_length, is_training,\n drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"label_ids\": tf.FixedLenFeature([], tf.int64),\n }\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn\n\n\ndef _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n\n\ndef create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings):\n \"\"\"Creates a classification model.\"\"\"\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n # In the demo, we are doing a simple classification task on the entire\n # segment.\n #\n # If you want to use the token-level output, use model.get_sequence_output()\n # instead.\n output_layer = model.get_pooled_output()\n\n hidden_size = output_layer.shape[-1].value\n\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n probabilities = tf.nn.softmax(logits, axis=-1)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n\n return (loss, per_example_loss, logits, probabilities)\n\n\ndef model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(per_example_loss, label_ids, logits):\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(label_ids, predictions)\n loss = tf.metrics.mean(per_example_loss)\n return {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n\n eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=probabilities, scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn\n\n\n# This function is not used by this file but is still used by the Colab and\n# people who depend on it.\ndef input_fn_builder(features, seq_length, is_training, drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n all_input_ids = []\n all_input_mask = []\n all_segment_ids = []\n all_label_ids = []\n\n for feature in features:\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_segment_ids.append(feature.segment_ids)\n all_label_ids.append(feature.label_id)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"segment_ids\":\n tf.constant(\n all_segment_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d\n\n return input_fn\n\n\n# This function is not used by this file but is still used by the Colab and\n# people who depend on it.\ndef convert_examples_to_features(examples, label_list, max_seq_length,\n tokenizer):\n \"\"\"Convert a set of `InputExample`s to a list of `InputFeatures`.\"\"\"\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenizer)\n\n features.append(feature)\n return features\n\n\ndef serving_input_fn():\n label_ids = tf.placeholder(tf.int32, [None], name='label_ids')\n input_ids = tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='input_ids')\n input_mask = tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='input_mask')\n segment_ids = tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='segment_ids')\n input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({\n 'label_ids': label_ids,\n 'input_ids': input_ids,\n 'input_mask': input_mask,\n 'segment_ids': segment_ids,\n })()\n return input_fn\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n processors = {\n \"cola\": ColaProcessor,\n \"mnli\": MnliProcessor,\n \"mrpc\": MrpcProcessor,\n \"xnli\": XnliProcessor,\n \"chinese\": ChineseDataProcessor,\n }\n\n if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:\n raise ValueError(\n \"At least one of `do_train`, `do_eval` or `do_predict' must be True.\")\n\n bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)\n\n if FLAGS.max_seq_length > bert_config.max_position_embeddings:\n raise ValueError(\n \"Cannot use sequence length %d because the BERT model \"\n \"was only trained up to sequence length %d\" %\n (FLAGS.max_seq_length, bert_config.max_position_embeddings))\n\n tf.gfile.MakeDirs(FLAGS.output_dir)\n\n task_name = FLAGS.task_name.lower()\n\n if task_name not in processors:\n raise ValueError(\"Task not found: %s\" % (task_name))\n\n processor = processors[task_name]()\n\n label_list = processor.get_labels()\n\n tokenizer = tokenization.FullTokenizer(\n vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\n\n tpu_cluster_resolver = None\n if FLAGS.use_tpu and FLAGS.tpu_name:\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)\n\n is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2\n run_config = tf.contrib.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n master=FLAGS.master,\n model_dir=FLAGS.output_dir,\n save_checkpoints_steps=FLAGS.save_checkpoints_steps,\n tpu_config=tf.contrib.tpu.TPUConfig(\n iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.num_tpu_cores,\n per_host_input_for_training=is_per_host))\n\n train_examples = None\n num_train_steps = None\n num_warmup_steps = None\n if FLAGS.do_train:\n train_examples = processor.get_train_examples(FLAGS.data_dir)\n num_train_steps = int(\n len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)\n num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)\n\n model_fn = model_fn_builder(\n bert_config=bert_config,\n num_labels=len(label_list),\n init_checkpoint=FLAGS.init_checkpoint,\n learning_rate=FLAGS.learning_rate,\n num_train_steps=num_train_steps,\n num_warmup_steps=num_warmup_steps,\n use_tpu=FLAGS.use_tpu,\n use_one_hot_embeddings=FLAGS.use_tpu)\n\n # If TPU is not available, this will fall back to normal Estimator on CPU\n # or GPU.\n estimator = tf.contrib.tpu.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=model_fn,\n config=run_config,\n train_batch_size=FLAGS.train_batch_size,\n eval_batch_size=FLAGS.eval_batch_size,\n predict_batch_size=FLAGS.predict_batch_size)\n\n if FLAGS.do_train:\n train_file = os.path.join(FLAGS.output_dir, \"train.tf_record\")\n file_based_convert_examples_to_features(\n train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)\n tf.logging.info(\"***** Running training *****\")\n tf.logging.info(\" Num examples = %d\", len(train_examples))\n tf.logging.info(\" Batch size = %d\", FLAGS.train_batch_size)\n tf.logging.info(\" Num steps = %d\", num_train_steps)\n train_input_fn = file_based_input_fn_builder(\n input_file=train_file,\n seq_length=FLAGS.max_seq_length,\n is_training=True,\n drop_remainder=True)\n estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)\n\n if FLAGS.do_eval:\n eval_examples = processor.get_dev_examples(FLAGS.data_dir)\n eval_file = os.path.join(FLAGS.output_dir, \"eval.tf_record\")\n file_based_convert_examples_to_features(\n eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)\n\n tf.logging.info(\"***** Running evaluation *****\")\n tf.logging.info(\" Num examples = %d\", len(eval_examples))\n tf.logging.info(\" Batch size = %d\", FLAGS.eval_batch_size)\n\n # This tells the estimator to run through the entire set.\n eval_steps = None\n # However, if running eval on the TPU, you will need to specify the\n # number of steps.\n if FLAGS.use_tpu:\n # Eval will be slightly WRONG on the TPU because it will truncate\n # the last batch.\n eval_steps = int(len(eval_examples) / FLAGS.eval_batch_size)\n\n eval_drop_remainder = True if FLAGS.use_tpu else False\n eval_input_fn = file_based_input_fn_builder(\n input_file=eval_file,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=eval_drop_remainder)\n\n result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)\n\n output_eval_file = os.path.join(FLAGS.output_dir, \"eval_results.txt\")\n with tf.gfile.GFile(output_eval_file, \"w\") as writer:\n tf.logging.info(\"***** Eval results *****\")\n for key in sorted(result.keys()):\n tf.logging.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n\n if FLAGS.do_predict:\n predict_examples = processor.get_test_examples(FLAGS.data_dir)\n predict_file = os.path.join(FLAGS.output_dir, \"predict.tf_record\")\n file_based_convert_examples_to_features(predict_examples, label_list,\n FLAGS.max_seq_length, tokenizer,\n predict_file)\n\n tf.logging.info(\"***** Running prediction*****\")\n tf.logging.info(\" Num examples = %d\", len(predict_examples))\n tf.logging.info(\" Batch size = %d\", FLAGS.predict_batch_size)\n\n if FLAGS.use_tpu:\n # Warning: According to tpu_estimator.py Prediction on TPU is an\n # experimental feature and hence not supported here\n raise ValueError(\"Prediction in TPU not supported\")\n\n predict_drop_remainder = True if FLAGS.use_tpu else False\n predict_input_fn = file_based_input_fn_builder(\n input_file=predict_file,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=predict_drop_remainder)\n\n result = estimator.predict(input_fn=predict_input_fn)\n\n output_predict_file = os.path.join(FLAGS.output_dir, \"test_results.tsv\")\n with tf.gfile.GFile(output_predict_file, \"w\") as writer:\n tf.logging.info(\"***** Predict results *****\")\n for prediction in result:\n output_line = \"\\t\".join(\n str(class_probability) for class_probability in prediction) + \"\\n\"\n writer.write(output_line)\n if FLAGS.do_export:\n estimator._export_to_tpu = False\n estimator.export_savedmodel(FLAGS.export_dir, serving_input_fn)\n\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"data_dir\")\n flags.mark_flag_as_required(\"task_name\")\n flags.mark_flag_as_required(\"vocab_file\")\n flags.mark_flag_as_required(\"bert_config_file\")\n flags.mark_flag_as_required(\"output_dir\")\n tf.app.run()" ]
[ [ "tensorflow.contrib.cluster_resolver.TPUClusterResolver", "tensorflow.metrics.accuracy", "tensorflow.FixedLenFeature", "tensorflow.nn.log_softmax", "tensorflow.estimator.export.build_raw_serving_input_receiver_fn", "tensorflow.reduce_sum", "tensorflow.gfile.GFile", "tensorflow.train.init_from_checkpoint", "tensorflow.gfile.MakeDirs", "tensorflow.to_int32", "tensorflow.contrib.tpu.TPUEstimatorSpec", "tensorflow.contrib.tpu.TPUEstimator", "tensorflow.data.TFRecordDataset", "tensorflow.truncated_normal_initializer", "tensorflow.python_io.TFRecordWriter", "tensorflow.logging.set_verbosity", "tensorflow.trainable_variables", "tensorflow.parse_single_example", "tensorflow.argmax", "tensorflow.app.run", "tensorflow.nn.dropout", "tensorflow.metrics.mean", "tensorflow.matmul", "tensorflow.gfile.Open", "tensorflow.zeros_initializer", "tensorflow.placeholder", "tensorflow.logging.info", "tensorflow.one_hot", "tensorflow.contrib.tpu.TPUConfig", "tensorflow.train.Features", "tensorflow.nn.bias_add", "tensorflow.nn.softmax", "tensorflow.constant", "tensorflow.train.Scaffold", "tensorflow.reduce_mean", "tensorflow.flags.DEFINE_string", "tensorflow.variable_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
mondrasovic/reid_baseline_syncbn
[ "3d21a786fb1a0519caaa0572c649f750036689b5" ]
[ "dataset/__init__.py" ]
[ "from .transform import RandomErasing\r\nfrom .collate_batch import train_collate_fn\r\nfrom .collate_batch import val_collate_fn\r\nfrom .triplet_sampler import RandomIdentitySampler\r\nfrom .data import ImageDataset, init_dataset\r\nimport torchvision.transforms as T\r\nfrom torch.utils.data.dataloader import DataLoader\r\n\r\n\r\ndef get_trm(cfg, is_train=True):\r\n normalize_transform = T.Normalize(\r\n mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD\r\n )\r\n if is_train:\r\n transform = T.Compose(\r\n [\r\n T.Resize(cfg.INPUT.SIZE_TRAIN),\r\n T.RandomHorizontalFlip(p=cfg.INPUT.PROB),\r\n T.Pad(cfg.INPUT.PADDING),\r\n T.RandomCrop(cfg.INPUT.SIZE_TRAIN),\r\n T.ToTensor(), normalize_transform,\r\n RandomErasing(\r\n probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN\r\n )\r\n ]\r\n )\r\n else:\r\n transform = T.Compose(\r\n [T.Resize(cfg.INPUT.SIZE_TEST),\r\n T.ToTensor(), normalize_transform]\r\n )\r\n return transform\r\n\r\n\r\ndef make_dataloader(cfg, num_gpus=1):\r\n train_trm = get_trm(cfg, is_train=True)\r\n val_trm = get_trm(cfg, is_train=False)\r\n\r\n num_workers = cfg.DATALOADER.NUM_WORKERS * num_gpus\r\n dataset = init_dataset(cfg)\r\n\r\n num_classes = dataset.num_train_pids\r\n train_set = ImageDataset(dataset.train, cfg, train_trm)\r\n if cfg.DATALOADER.SAMPLER == 'softmax':\r\n train_loader = DataLoader(\r\n train_set,\r\n batch_size=cfg.SOLVER.IMS_PER_BATCH * num_gpus,\r\n shuffle=True,\r\n num_workers=num_workers,\r\n collate_fn=train_collate_fn\r\n )\r\n else:\r\n train_loader = DataLoader(\r\n train_set,\r\n batch_size=cfg.SOLVER.IMS_PER_BATCH * num_gpus,\r\n sampler=RandomIdentitySampler(\r\n dataset.train, cfg.SOLVER.IMS_PER_BATCH * num_gpus,\r\n cfg.DATALOADER.NUM_INSTANCE * num_gpus\r\n ),\r\n num_workers=num_workers,\r\n collate_fn=train_collate_fn\r\n )\r\n\r\n val_set = ImageDataset(dataset.query + dataset.gallery, cfg, val_trm)\r\n val_loader = DataLoader(\r\n val_set,\r\n batch_size=cfg.TEST.IMS_PER_BATCH * num_gpus,\r\n shuffle=False,\r\n num_workers=num_workers,\r\n collate_fn=val_collate_fn\r\n )\r\n return train_loader, val_loader, len(dataset.query), num_classes\r\n" ]
[ [ "torch.utils.data.dataloader.DataLoader" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yun-s-oh/addrmatcher
[ "1d937d1e25d785b170fc967bc6bc21a456bace1b" ]
[ "tests/python/latlon_addrmatching_unfactored_2.py" ]
[ "import os\nimport glob\nimport time\nimport numpy as np\nfrom pyarrow import fs\nimport pyarrow.parquet as pq\nfrom sklearn.neighbors import BallTree\n\nis_index_file_exist = os.path.isfile(\"../../../data/Master/New/index.parquet\")\nfiles = (\n glob.glob(os.path.join(\"../../../\", \"data\", \"Master\", \"New\", \"*[0-9].parquet\"))\n if is_index_file_exist\n else []\n)\n\n## Interested Dimensions in the GNAF Files\ninterested_dims = [\n \"LATITUDE\",\n \"LONGITUDE\",\n \"FULL_ADDRESS\",\n \"STATE\",\n \"SA4_NAME_2016\",\n \"LGA_NAME_2016\",\n \"SSC_NAME_2016\",\n \"SA3_NAME_2016\",\n \"SA2_NAME_2016\",\n \"ADDRESS_DETAIL_PID\",\n]\n\nlocal = fs.LocalFileSystem()\n\n\n# Set Minimum and Maximum lat for all properties within Australia\nlat_min = -43.58301104\nlat_max = -9.23000371\nlon_min = 96.82159219\nlon_max = 167.99384663\n\n# 1 lat equals 110.574km\ndeg = 110.574\n\n# Conversion Rate - radians to kilometer\nrad_to_km = 6371\n\n\ndef load_parquet(lat, lon, distance):\n\n df = pq.read_table(\n files,\n filesystem=local,\n columns=interested_dims,\n filters=[\n (\"LATITUDE\", \">=\", lat - distance),\n (\"LATITUDE\", \"<=\", lat + distance),\n (\"LONGITUDE\", \">=\", lon - distance),\n (\"LONGITUDE\", \"<=\", lon + distance),\n ],\n ).to_pandas()\n\n return df\n\n\ndef ensure_lat_lon_within_range(lat, lon):\n\n # Ensure Latitudge within the AU range\n lat = max(lat, lat_min)\n lat = min(lat, lat_max)\n\n # Ensure longitutde within the AU range\n lon = max(lon, lon_min)\n lon = min(lon, lon_max)\n\n return lat, lon\n\n\ndef filter_for_rows_within_mid_distance(df, lat, lon, mid_distance):\n\n mid_df = df[\n df.LATITUDE.between(lat - mid_distance, lat + mid_distance)\n & df.LONGITUDE.between(lon - mid_distance, lon + mid_distance)\n ]\n\n return mid_df\n\n\ndef get_region_by_coordinates(lat, lon, km=None, n=1):\n\n ## 1. Initial distance setting according to lat/lon arguments\n lat, lon = ensure_lat_lon_within_range(lat, lon)\n min_distance = 0\n distance = (km if km else 1) / deg\n\n ## 2. Make the first load of GNAF dataset\n gnaf_df = load_parquet(lat, lon, distance)\n\n # 2.a If the desired count of addresses not exist, increase the radius\n while gnaf_df.shape[0] < n:\n min_distance = distance\n distance *= 2\n\n gnaf_df = load_parquet(lat, lon, distance)\n print(\"gnaf_df.shape: First Load: \", gnaf_df.shape)\n\n # 2.b Keep reducing the size of rows if more than 10k adddresses are found within the radius\n # Take the median distance to reduce\n # This is to limit the number of datapoint to build the Ball tree in the next step\n while gnaf_df.shape[0] >= n + 10000:\n middle_distance = (distance - min_distance) / 2\n gnaf_df = filter_for_rows_within_mid_distance(\n gnaf_df, lat, lon, middle_distance\n )\n print(\"gnaf_df.shape: Reduced Load: \", gnaf_df.shape)\n distance = middle_distance\n print(\"gnaf_df.shape: Final Load: \", gnaf_df.shape)\n\n ## 3. Build the Ball Tree and Query for the nearest within k distance\n ball_tree = BallTree(\n np.deg2rad(gnaf_df[[\"LATITUDE\", \"LONGITUDE\"]].values), metric=\"haversine\"\n )\n distances, indices = ball_tree.query(\n np.deg2rad(np.c_[lat, lon]), k=min(n, gnaf_df.shape[0])\n )\n # Get indices of the search result, Extract pid and calculate distance(km)\n indices = indices[0].tolist()\n pids = gnaf_df.ADDRESS_DETAIL_PID.iloc[indices].tolist()\n distance_map = dict(zip(pids, [distance * rad_to_km for distance in distances[0]]))\n\n ## 4. Filter the GNAF dataset by address_detail_pid and Extract the interested columns\n bool_list = gnaf_df[\"ADDRESS_DETAIL_PID\"].isin(pids)\n final_gnaf_df = gnaf_df[bool_list]\n\n final_gnaf_df = final_gnaf_df[interested_dims]\n final_gnaf_df[\"DISTANCE\"] = final_gnaf_df[\"ADDRESS_DETAIL_PID\"].map(distance_map)\n\n return final_gnaf_df.sort_values(\"DISTANCE\")\n" ]
[ [ "numpy.deg2rad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
guillaume-florent/PyGeM
[ "372682bff82d1cd396de5773b821ae17918eb905" ]
[ "pygem/radial.py" ]
[ "\"\"\"\nModule focused on the implementation of the Radial Basis Functions interpolation\ntechnique. This technique is still based on the use of a set of parameters, the\nso-called control points, as for FFD, but RBF is interpolatory. Another\nimportant key point of RBF strategy relies in the way we can locate the control\npoints: in fact, instead of FFD where control points need to be placed inside a\nregular lattice, with RBF we hano no more limitations. So we have the\npossibility to perform localized control points refiniments.\nThe module is analogous to the freeform one.\n\n:Theoretical Insight:\n\n As reference please consult M.D. Buhmann, Radial Basis Functions, volume 12\n of Cambridge monographs on applied and computational mathematics. Cambridge\n University Press, UK, 2003. This implementation follows D. Forti and G.\n Rozza, Efficient geometrical parametrization techniques of interfaces for\n reduced order modelling: application to fluid-structure interaction coupling\n problems, International Journal of Computational Fluid Dynamics.\n\n RBF shape parametrization technique is based on the definition of a map,\n :math:`\\\\mathcal{M}(\\\\boldsymbol{x}) : \\\\mathbb{R}^n \\\\rightarrow\n \\\\mathbb{R}^n`, that allows the possibility of transferring data across\n non-matching grids and facing the dynamic mesh handling. The map introduced\n is defines as follows\n\n .. math::\n \\\\mathcal{M}(\\\\boldsymbol{x}) = p(\\\\boldsymbol{x}) + \n \\\\sum_{i=1}^{\\\\mathcal{N}_C} \\\\gamma_i\n \\\\varphi(\\\\| \\\\boldsymbol{x} - \\\\boldsymbol{x_{C_i}} \\\\|)\n\n where :math:`p(\\\\boldsymbol{x})` is a low_degree polynomial term,\n :math:`\\\\gamma_i` is the weight, corresponding to the a-priori selected\n :math:`\\\\mathcal{N}_C` control points, associated to the :math:`i`-th basis\n function, and :math:`\\\\varphi(\\\\| \\\\boldsymbol{x} - \\\\boldsymbol{x_{C_i}}\n \\\\|)` a radial function based on the Euclidean distance between the control\n points position :math:`\\\\boldsymbol{x_{C_i}}` and :math:`\\\\boldsymbol{x}`.\n A radial basis function, generally, is a real-valued function whose value\n depends only on the distance from the origin, so that\n :math:`\\\\varphi(\\\\boldsymbol{x}) = \\\\tilde{\\\\varphi}(\\\\| \\\\boldsymbol{x}\n \\\\|)`.\n\n The matrix version of the formula above is:\n\n .. math::\n \\\\mathcal{M}(\\\\boldsymbol{x}) = \\\\boldsymbol{c} +\n \\\\boldsymbol{Q}\\\\boldsymbol{x} +\n \\\\boldsymbol{W^T}\\\\boldsymbol{d}(\\\\boldsymbol{x})\n\n The idea is that after the computation of the weights and the polynomial\n terms from the coordinates of the control points before and after the\n deformation, we can deform all the points of the mesh accordingly. Among\n the most common used radial basis functions for modelling 2D and 3D shapes,\n we consider Gaussian splines, Multi-quadratic biharmonic splines, Inverted\n multi-quadratic biharmonic splines, Thin-plate splines, Beckert and\n Wendland :math:`C^2` basis and Polyharmonic splines all defined and\n implemented below.\n\"\"\"\nimport numpy as np\n\nfrom scipy.spatial.distance import cdist\n\n\nclass RBF(object):\n \"\"\"\n Class that handles the Radial Basis Functions interpolation on the mesh\n points.\n\n :param RBFParameters rbf_parameters: parameters of the RBF.\n :param numpy.ndarray original_mesh_points: coordinates of the original\n points of the mesh.\n :cvar RBFParameters parameters: parameters of the RBF.\n :cvar numpy.ndarray original_mesh_points: coordinates of the original points\n of the mesh. The shape is `n_points`-by-3.\n :cvar numpy.ndarray modified_mesh_points: coordinates of the points of the\n deformed mesh. The shape is `n_points`-by-3.\n :cvar dict bases: a dictionary that associates the names of the basis\n functions implemented to the actual implementation.\n :cvar numpy.matrix weights: the matrix formed by the weights corresponding\n to the a-priori selected N control points, associated to the basis\n functions and c and Q terms that describe the polynomial of order one\n p(x) = c + Qx. The shape is (n_control_points+1+3)-by-3. It is computed\n internally.\n\n :Example:\n\n >>> import pygem.radial as rbf\n >>> import pygem.params as rbfp\n >>> import numpy as np\n >>> rbf_parameters = rbfp.RBFParameters()\n >>> fname = 'tests/test_datasets/parameters_rbf_cube.prm'\n >>> rbf_parameters.read_parameters(fname)\n >>> nx, ny, nz = (20, 20, 20)\n >>> mesh = np.zeros((nx * ny * nz, 3))\n >>> xv = np.linspace(0, 1, nx)\n >>> yv = np.linspace(0, 1, ny)\n >>> zv = np.linspace(0, 1, nz)\n >>> z, y, x = np.meshgrid(zv, yv, xv)\n >>> mesh = np.array([x.ravel(), y.ravel(), z.ravel()])\n >>> original_mesh_points = mesh.T\n >>> radial_trans = rbf.RBF(rbf_parameters, original_mesh_points)\n >>> radial_trans.perform()\n >>> new_mesh_points = radial_trans.modified_mesh_points\n \"\"\"\n\n def __init__(self, rbf_parameters, original_mesh_points):\n self.parameters = rbf_parameters\n self.original_mesh_points = original_mesh_points\n self.modified_mesh_points = None\n\n self.bases = {\n 'gaussian_spline':\n self.gaussian_spline,\n 'multi_quadratic_biharmonic_spline':\n self.multi_quadratic_biharmonic_spline,\n 'inv_multi_quadratic_biharmonic_spline':\n self.inv_multi_quadratic_biharmonic_spline,\n 'thin_plate_spline':\n self.thin_plate_spline,\n 'beckert_wendland_c2_basis':\n self.beckert_wendland_c2_basis,\n 'polyharmonic_spline':\n self.polyharmonic_spline\n }\n\n # to make the str callable we have to use a dictionary with all the\n # implemented radial basis functions\n if self.parameters.basis in self.bases:\n self.basis = self.bases[self.parameters.basis]\n else:\n raise NameError(\n \"\"\"The name of the basis function in the parameters file is not\n correct or not implemented. Check the documentation for\n all the available functions.\"\"\")\n\n self.weights = self._get_weights(\n self.parameters.original_control_points,\n self.parameters.deformed_control_points)\n\n @staticmethod\n def gaussian_spline(X, r):\n \"\"\"\n It implements the following formula:\n\n .. math::\n \\\\varphi(\\\\boldsymbol{x}) = e^{-\\\\frac{\\\\boldsymbol{x}^2}{r^2}}\n\n :param numpy.ndarray X: the vector x in the formula above.\n :param float r: the parameter r in the formula above.\n\n :return: result: the result of the formula above.\n :rtype: float\n \"\"\"\n result = np.exp(-(X * X) / (r * r))\n return result\n\n @staticmethod\n def multi_quadratic_biharmonic_spline(X, r):\n \"\"\"\n It implements the following formula:\n\n .. math::\n \\\\varphi(\\\\boldsymbol{x}) = \\\\sqrt{\\\\boldsymbol{x}^2 + r^2}\n\n :param numpy.ndarray X: the vector x in the formula above.\n :param float r: the parameter r in the formula above.\n\n :return: result: the result of the formula above.\n :rtype: float\n \"\"\"\n result = np.sqrt((X * X) + (r * r))\n return result\n\n @staticmethod\n def inv_multi_quadratic_biharmonic_spline(X, r):\n \"\"\"\n It implements the following formula:\n\n .. math::\n \\\\varphi(\\\\boldsymbol{x}) =\n (\\\\boldsymbol{x}^2 + r^2 )^{-\\\\frac{1}{2}}\n\n :param numpy.ndarray X: the vector x in the formula above.\n :param float r: the parameter r in the formula above.\n\n :return: result: the result of the formula above.\n :rtype: float\n \"\"\"\n result = 1.0 / (np.sqrt((X * X) + (r * r)))\n return result\n\n @staticmethod\n def thin_plate_spline(X, r):\n \"\"\"\n It implements the following formula:\n\n .. math::\n \\\\varphi(\\\\boldsymbol{x}) =\n \\\\left(\\\\frac{\\\\boldsymbol{x}}{r}\\\\right)^2\n \\\\ln\\\\frac{\\\\boldsymbol{x}}{r}\n\n :param numpy.ndarray X: the vector x in the formula above.\n :param float r: the parameter r in the formula above.\n\n :return: result: the result of the formula above.\n :rtype: float\n \"\"\"\n arg = X / r\n result = arg * arg\n result = np.where(arg > 0, result * np.log(arg), result)\n return result\n\n @staticmethod\n def beckert_wendland_c2_basis(X, r):\n \"\"\"\n It implements the following formula:\n\n .. math::\n \\\\varphi(\\\\boldsymbol{x}) = \n \\\\left( 1 - \\\\frac{\\\\boldsymbol{x}}{r}\\\\right)^4 +\n \\\\left( 4 \\\\frac{ \\\\boldsymbol{x} }{r} + 1 \\\\right)\n\n :param numpy.ndarray X: the vector x in the formula above.\n :param float r: the parameter r in the formula above.\n\n :return: result: the result of the formula above.\n :rtype: float\n \"\"\"\n arg = X / r\n first = np.where((1 - arg) > 0, np.power((1 - arg), 4), 0)\n second = (4 * arg) + 1\n result = first * second\n return result\n\n def polyharmonic_spline(self, X, r):\n \"\"\"\n It implements the following formula:\n\n .. math::\n \n \\\\varphi(\\\\boldsymbol{x}) =\n \\\\begin{cases}\n \\\\frac{\\\\boldsymbol{x}}{r}^k\n \\\\quad & \\\\text{if}~k = 1,3,5,...\\\\\\\\\n \\\\frac{\\\\boldsymbol{x}}{r}^{k-1}\n \\\\ln(\\\\frac{\\\\boldsymbol{x}}{r}^\n {\\\\frac{\\\\boldsymbol{x}}{r}})\n \\\\quad & \\\\text{if}~\\\\frac{\\\\boldsymbol{x}}{r} < 1,\n ~k = 2,4,6,...\\\\\\\\\n \\\\frac{\\\\boldsymbol{x}}{r}^k\n \\\\ln(\\\\frac{\\\\boldsymbol{x}}{r})\n \\\\quad & \\\\text{if}~\\\\frac{\\\\boldsymbol{x}}{r} \\\\ge 1,\n ~k = 2,4,6,...\\\\\\\\\n \\\\end{cases}\n\n :param numpy.ndarray X: the vector x in the formula above.\n :param float r: the parameter r in the formula above.\n\n :return: result: the result of the formula above.\n :rtype: float\n \"\"\"\n\n k = self.parameters.power\n r_sc = X / r\n\n # k odd\n if k & 1:\n return np.power(r_sc, k)\n\n print(r_sc)\n # k even\n result = np.where(r_sc < 1,\n np.power(r_sc, k - 1) * np.log(np.power(r_sc, r_sc)),\n np.power(r_sc, k) * np.log(r_sc))\n return result\n\n def _get_weights(self, X, Y):\n \"\"\"\n This private method, given the original control points and the deformed\n ones, returns the matrix with the weights and the polynomial terms, that\n is :math:`W`, :math:`c^T` and :math:`Q^T`. The shape is\n (n_control_points+1+3)-by-3.\n\n :param numpy.ndarray X: it is an n_control_points-by-3 array with the\n coordinates of the original interpolation control points before the\n deformation.\n :param numpy.ndarray Y: it is an n_control_points-by-3 array with the\n coordinates of the interpolation control points after the\n deformation.\n\n :return: weights: the matrix with the weights and the polynomial terms.\n :rtype: numpy.matrix\n \"\"\"\n n_points, dim = X.shape\n H = np.zeros((n_points + 3 + 1, n_points + 3 + 1))\n H[:n_points, :n_points] = self.basis(\n cdist(X, X), self.parameters.radius)\n H[n_points, :n_points] = 1.0\n H[:n_points, n_points] = 1.0\n H[:n_points, -3:] = X\n H[-3:, :n_points] = X.T\n\n rhs = np.zeros((n_points + 3 + 1, dim))\n rhs[:n_points, :] = Y\n weights = np.linalg.solve(H, rhs)\n return weights\n\n def perform(self):\n \"\"\"\n This method performs the deformation of the mesh points. After the\n execution it sets `self.modified_mesh_points`.\n \"\"\"\n n_points = self.original_mesh_points.shape[0]\n dist = self.basis(\n cdist(self.original_mesh_points,\n self.parameters.original_control_points),\n self.parameters.radius)\n identity = np.ones((n_points, 1))\n H = np.bmat([[dist, identity, self.original_mesh_points]])\n self.modified_mesh_points = np.asarray(np.dot(H, self.weights))\n" ]
[ [ "numpy.dot", "numpy.log", "numpy.linalg.solve", "numpy.sqrt", "numpy.power", "scipy.spatial.distance.cdist", "numpy.ones", "numpy.bmat", "numpy.exp", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
SoumyajitPal/YinYang
[ "325c5a7846fabf10e601f059d682b5099a351589" ]
[ "CropImage.py" ]
[ "import numpy as np\r\nimport cv2\r\n# from matplotlib import pyplot as plt\r\nimport ImDiffMod\r\nimport math\r\n\r\n\r\ndef cropImage(img1, img2):\r\n\r\n # Initiate SIFT detector\r\n sift = cv2.xfeatures2d.SIFT_create()\r\n\r\n # find the keypoints and descriptors with SIFT\r\n kp1, des1 = sift.detectAndCompute(img1, None)\r\n kp2, des2 = sift.detectAndCompute(img2, None)\r\n '''img11 = np.zeros_like(img1)\r\n img22 = np.zeros_like(img2)\r\n img11 = cv2.drawKeypoints(img1,kp1, img11, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\r\n img22 = cv2.drawKeypoints(img2,kp2, img22, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\r\n cv2.imwrite('sift_keypoints1.jpg',img11)\r\n cv2.imwrite('sift_keypoints2.jpg',img22)'''\r\n # FLANN parameters\r\n FLANN_INDEX_KDTREE = 1\r\n index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)\r\n search_params = dict(checks=50) # or pass empty dictionary\r\n flann = cv2.FlannBasedMatcher(index_params, search_params)\r\n matches = flann.knnMatch(des1, des2, k=2)\r\n # print(matches)\r\n\r\n good = []\r\n # Need to draw only good matches, so create a mask\r\n matchesMask = [[0, 0] for i in range(len(matches))]\r\n\r\n # ratio test as per Lowe's paper\r\n for i, (m, n) in enumerate(matches):\r\n if m.distance < 0.7*n.distance:\r\n matchesMask[i]=[1, 0]\r\n good.append(m)\r\n\r\n sourcePoints = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)\r\n desPoints = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)\r\n\r\n # print(sourcePoints)\r\n # print(desPoints)\r\n\r\n M, mask = cv2.findHomography(sourcePoints, desPoints, cv2.RANSAC, 10.0)\r\n ss = M[0, 1]\r\n sc = M[0, 0]\r\n scaleRecovered = math.sqrt(ss * ss + sc * sc)\r\n thetaRecovered = math.atan2(ss, sc) * 180 / math.pi\r\n print(\"Calculated scale difference: %.2f\\nCalculated rotation difference: %.2f\" % (scaleRecovered, thetaRecovered))\r\n\r\n im_out = cv2.warpPerspective(img2, np.linalg.inv(M), (img1.shape[1], img1.shape[0]))\r\n # im_in = cv2.warpPerspective(img1, np.linalg.inv(M), (img1.shape[1], img1.shape[0]))\r\n # plt.imshow(im_out, 'gray')\r\n # plt.show()\r\n\r\n # draw_params = dict(matchColor=(0, 255, 0),\r\n # singlePointColor=(255, 0, 0),\r\n # matchesMask=matchesMask,\r\n # flags=0)\r\n # img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, matches, None, **draw_params)\r\n # cv2.imshow('sift', img3)\r\n # cv2.waitKey()\r\n return im_out\r\n\r\n\r\nif __name__ == '__main__':\r\n # img1 = cv2.imread('box.png', 0) # queryImage\r\n # img2 = cv2.imread('box_in_scene.png', 0) # trainImage\r\n\r\n im = 'D:\\\\Hocus-Focus\\\\p2.png '\r\n im1, im2 = ImDiffMod.cropPhoto(im)\r\n\r\n # im1 = cv2.imread('D:\\\\HocusFocusCropped\\\\1\\\\p1.jpg', cv2.IMREAD_GRAYSCALE)\r\n # im2 = cv2.imread('D:\\\\HocusFocusCropped\\\\1\\\\p2.jpg', cv2.IMREAD_GRAYSCALE)\r\n\r\n cropImage(im1, im2)\r\n" ]
[ [ "numpy.linalg.inv", "numpy.float32" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fidler-lab/efficient-annotation-cookbook
[ "8d02da89c8049c549748761e1762a04f40a64da0" ]
[ "online_label/worker.py" ]
[ "import os\nimport json\nimport uuid\nimport numpy as np\n\nfrom data import REPO_DIR, imagenet100\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass Worker(object):\n\n def __init__(self, config, known, seed, **kwargs):\n\n self.id = str(uuid.uuid4())\n self.config = config\n self.seed = seed\n self.npr = np.random.RandomState(seed)\n self.n_classes = config.n_classes\n self.known = known\n\n self.m = self.sample_confusion_matrix() # (actual classes, predict classes)\n\n def save_state(self):\n return json.dumps(dict(id=self.id, m=self.m.tolist()))\n\n def load_state(self, state):\n state = json.loads(state)\n self.id = state['id']\n self.m = np.array(state['m'])\n \n def annotate(self, true_y, qmask=None):\n m = self.m\n valid_options = range(self.n_classes)\n\n prob = m[true_y]\n prob = np.clip(prob, 0., 1.) \n\n z = self.npr.choice(range(self.n_classes), p=prob)\n if self.known:\n p_z_given_y = m[:, z]\n else:\n p_z_given_y = None\n\n return z, p_z_given_y\n \n def sample_confusion_matrix(self):\n raise NotImplementedError\n\n\nclass UniformWorker(Worker):\n '''w/o class correlation\n '''\n def sample_confusion_matrix(self):\n config = self.config\n\n m = np.zeros((self.n_classes, self.n_classes))\n m += np.eye(self.n_classes) * self.npr.normal(config.worker.reliability.mean, \n config.worker.reliability.std, \n size=self.n_classes)\n m = np.clip(m, 0, 1)\n for i in range(self.n_classes):\n noise = self.npr.rand(self.n_classes)\n noise /= noise.sum()\n noise += noise[i] / (self.n_classes - 1)\n noise *= (1 - m[i, i])\n m[i] += -1*(np.eye(self.n_classes)-1)[i] * noise\n\n reliability = np.diag(m).mean()\n logger.debug(f'Reliability: {reliability}')\n return m\n\n\nclass PerfectWorker(Worker):\n def sample_confusion_matrix(self):\n m = np.eye(self.n_classes)\n m = np.clip(m, 0, 1)\n return m\n\n\nclass RealWorker(Worker):\n wnids = None\n worker_cm_info = json.load(open(os.path.join(REPO_DIR, 'data/group_workers.json'), 'r'))\n groups_path = os.path.join(REPO_DIR, 'data/groups.txt')\n global_cm = []\n for _k, _v in worker_cm_info['group_workers'].items():\n _v = np.array(_v)\n global_cm.append(_v.sum(0))\n global_cm = sum(global_cm)\n\n def __init__(self, config, known, seed, **kwargs):\n\n keep_indices = np.array([imagenet100.index(i.lower()) for i in self.wnids])\n self.keep_indices = keep_indices\n self.global_cm = self.global_cm[keep_indices, :][:, keep_indices]\n Worker.__init__(self, config, known, seed, **kwargs)\n\n def sample_confusion_matrix(self):\n m = self._sample_confusion_matrix()\n\n with open(self.groups_path) as fp:\n groups = fp.read()\n groups = groups.split('\\n\\n')\n groups.pop(-1)\n groups = [np.array(i.split('\\n')) for i in groups]\n\n\n def __which_group(i):\n for g_idx, g in enumerate(groups):\n if i in g:\n return g_idx\n\n\n # Add uniform noise in off-diagonal terms\n noise_level = 0.03 # According to the amt stats\n for i, i_wnid in enumerate(self.wnids):\n i_group = __which_group(i_wnid)\n same_group_mask = np.zeros(self.config.n_classes).astype(np.bool)\n same_group_mask[i] = True\n for j, j_wnid in enumerate(self.wnids):\n if i != j and i_group == __which_group(j_wnid):\n same_group_mask[j] = True\n \n\n if (~same_group_mask).sum() > 0:\n density_to_spread = m[i, same_group_mask].sum()\n m[i, same_group_mask] = m[i, same_group_mask] * (1 - noise_level)\n m[i, ~same_group_mask] += density_to_spread * (noise_level) / max(sum(~same_group_mask), 1e-8)\n\n return m\n\n\nclass StructuredNoiseWorker(RealWorker):\n def _sample_confusion_matrix(self):\n \n cm = []\n for _, v in self.worker_cm_info['group_workers'].items():\n v = np.array(v)\n global_v = v.sum(0)\n idx = self.npr.choice(range(len(v)), 1)\n cm.append(global_v + v[idx][0] * 10)\n\n cm = sum(cm)\n if self.config.n_data_distraction_per_class > 0:\n m = np.zeros((self.config.n_classes, self.config.n_classes))\n m[:self.config.n_classes-1, :self.config.n_classes-1] = cm[self.keep_indices, :][:, self.keep_indices]\n\n drop_indices_mask = np.ones(cm.shape[0]).astype(np.bool)\n drop_indices_mask[self.keep_indices] = False\n\n m[-1, :self.config.n_classes-1] = cm[drop_indices_mask, :][:, self.keep_indices].sum(0) # Last row\n m[:self.config.n_classes-1, -1] = cm[self.keep_indices, :][:, drop_indices_mask].sum(1) # Last column\n m[-1, -1] = cm[drop_indices_mask, :][:, drop_indices_mask].sum()\n else:\n m = cm[self.keep_indices, :][:, self.keep_indices]\n\n assert len(np.where(m.sum(1)==0)[0]) == 0\n\n m = m / (m.sum(1, keepdims=True) + 1e-8)\n\n \n reliability = np.diag(m).mean()\n logger.debug(f'Reliability: {reliability:.2f}')\n return m\n\n\nclass UniformNoiseWorker(RealWorker):\n def _sample_confusion_matrix(self):\n \n cm = []\n for _, v in self.worker_cm_info['group_workers'].items():\n v = np.array(v)\n global_v = v.sum(0)\n idx = self.npr.choice(range(len(v)), 10)\n cm.append(global_v + v[idx][0] * 10)\n cm = sum(cm)\n\n imagenet100_name = self.worker_cm_info['imagenet100_name']\n\n m = cm[self.keep_indices, :][:, self.keep_indices]\n\n if self.config.n_data_distraction_per_class > 0:\n m = np.zeros((self.config.n_classes, self.config.n_classes))\n m[:self.config.n_classes-1, :self.config.n_classes-1] = cm[self.keep_indices, :][:, self.keep_indices]\n idx_to_drop_mask = np.ones(cm.shape[0]).astype(np.bool)\n idx_to_drop_mask[self.keep_indices] = False\n\n\n m[-1, :len(self.keep_indices)] = cm[idx_to_drop_mask, :][:, self.keep_indices].sum(0)\n m[:len(self.keep_indices), -1] = cm[self.keep_indices, :][:, idx_to_drop_mask].sum(1)\n m[-1, -1] = cm[idx_to_drop_mask, :][:, idx_to_drop_mask].sum()\n else:\n m = cm[self.keep_indices, :][:, self.keep_indices]\n assert len(np.where(m.sum(1)==0)[0]) == 0\n\n m = m / (m.sum(1, keepdims=True) + 1e-8)\n\n n_classes = len(self.wnids)\n _m = np.zeros((n_classes, n_classes))\n _m += ((1 - m.diagonal()) / (n_classes - 1)).reshape(-1, 1)\n np.fill_diagonal(_m, m.diagonal())\n m = _m\n \n reliability = np.diag(m).mean()\n logger.debug(f'Reliability: {reliability:.2f}')\n return m\n\n\ndef get_worker_class(config, wnids):\n\n if config.worker.type == 'perfect':\n worker_class = PerfectWorker\n elif config.worker.type == 'uniform':\n worker_class = UniformWorker\n elif config.worker.type == 'uniform_noise':\n worker_class = UniformNoiseWorker\n worker_class.wnids = wnids\n elif config.worker.type == 'structured_noise':\n worker_class = StructuredNoiseWorker\n worker_class.wnids = wnids\n else:\n raise ValueError\n\n return worker_class\n" ]
[ [ "numpy.diag", "numpy.clip", "numpy.eye", "numpy.ones", "numpy.array", "numpy.zeros", "numpy.random.RandomState" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tsumikihuang/ml-agents-
[ "cb0bfa0382650dee2071eb415147d795721297b1" ]
[ "ml-agents/mlagents/trainers/ppo/trainer.py" ]
[ "# # Unity ML-Agents Toolkit\n# ## ML-Agent Learning (PPO)\n# Contains an implementation of PPO as described (https://arxiv.org/abs/1707.06347).\n\nimport logging\nimport os\nfrom collections import deque\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom mlagents.envs import AllBrainInfo, BrainInfo\nfrom mlagents.trainers.buffer import Buffer\nfrom mlagents.trainers.ppo.policy import PPOPolicy\nfrom mlagents.trainers.trainer import Trainer\n\nlogger = logging.getLogger(\"mlagents.trainers\")\n\n\nclass PPOTrainer(Trainer):\n \"\"\"The PPOTrainer is an implementation of the PPO algorithm.\"\"\"\n\n def __init__(self, brain, reward_buff_cap, trainer_parameters, training, load, seed, run_id):\n \"\"\"\n Responsible for collecting experiences and training PPO model.\n :param trainer_parameters: The parameters for the trainer (dictionary).\n :param training: Whether the trainer is set for training.\n :param load: Whether the model should be loaded.\n :param seed: The seed the model will be initialized with\n :param run_id: The The identifier of the current run\n \"\"\"\n super(PPOTrainer, self).__init__(brain, trainer_parameters, training, run_id)\n self.param_keys = ['batch_size', 'beta', 'buffer_size', 'epsilon', 'gamma', 'hidden_units', 'lambd',\n 'learning_rate', 'max_steps', 'normalize', 'num_epoch', 'num_layers',\n 'time_horizon', 'sequence_length', 'summary_freq', 'use_recurrent',\n 'summary_path', 'memory_size', 'use_curiosity', 'curiosity_strength',\n 'curiosity_enc_size', 'model_path']\n\n self.check_param_keys()\n self.use_curiosity = bool(trainer_parameters['use_curiosity'])\n self.step = 0\n self.policy = PPOPolicy(seed, brain, trainer_parameters,\n self.is_training, load)\n\n stats = {'Environment/Cumulative Reward': [], 'Environment/Episode Length': [],\n 'Policy/Value Estimate': [], 'Policy/Entropy': [], 'Losses/Value Loss': [],\n 'Losses/Policy Loss': [], 'Policy/Learning Rate': []}\n if self.use_curiosity:\n stats['Losses/Forward Loss'] = []\n stats['Losses/Inverse Loss'] = []\n stats['Policy/Curiosity Reward'] = []\n self.intrinsic_rewards = {}\n self.stats = stats\n\n self.training_buffer = Buffer()\n self.cumulative_rewards = {}\n self._reward_buffer = deque(maxlen=reward_buff_cap)\n self.episode_steps = {}\n self.summary_path = trainer_parameters['summary_path']\n if not os.path.exists(self.summary_path):\n os.makedirs(self.summary_path)\n\n self.summary_writer = tf.summary.FileWriter(self.summary_path)\n\n def __str__(self):\n return '''Hyperparameters for the PPO Trainer of brain {0}: \\n{1}'''.format(\n self.brain_name, '\\n'.join(['\\t{0}:\\t{1}'.format(x, self.trainer_parameters[x]) for x in self.param_keys]))\n\n @property\n def parameters(self):\n \"\"\"\n Returns the trainer parameters of the trainer.\n \"\"\"\n return self.trainer_parameters\n\n @property\n def get_max_steps(self):\n \"\"\"\n Returns the maximum number of steps. Is used to know when the trainer should be stopped.\n :return: The maximum number of steps of the trainer\n \"\"\"\n return float(self.trainer_parameters['max_steps'])\n\n @property\n def get_step(self):\n \"\"\"\n Returns the number of steps the trainer has performed\n :return: the step count of the trainer\n \"\"\"\n return self.step\n\n @property\n def reward_buffer(self):\n \"\"\"\n Returns the reward buffer. The reward buffer contains the cumulative\n rewards of the most recent episodes completed by agents using this\n trainer.\n :return: the reward buffer.\n \"\"\"\n return self._reward_buffer\n\n def increment_step_and_update_last_reward(self):\n \"\"\"\n Increment the step count of the trainer and Updates the last reward\n \"\"\"\n if len(self.stats['Environment/Cumulative Reward']) > 0:\n mean_reward = np.mean(self.stats['Environment/Cumulative Reward'])\n self.policy.update_reward(mean_reward)\n self.policy.increment_step()\n self.step = self.policy.get_current_step()\n\n def take_action(self, all_brain_info: AllBrainInfo):\n \"\"\"\n Decides actions given observations information, and takes them in environment.\n :param all_brain_info: A dictionary of brain names and BrainInfo from environment.\n :return: a tuple containing action, memories, values and an object\n to be passed to add experiences\n \"\"\"\n curr_brain_info = all_brain_info[self.brain_name]\n if len(curr_brain_info.agents) == 0:\n return [], [], [], None, None\n\n run_out = self.policy.evaluate(curr_brain_info)\n self.stats['Policy/Value Estimate'].append(run_out['value'].mean())\n self.stats['Policy/Entropy'].append(run_out['entropy'].mean())\n self.stats['Policy/Learning Rate'].append(run_out['learning_rate'])\n if self.policy.use_recurrent:\n return run_out['action'], run_out['memory_out'], None, \\\n run_out['value'], run_out\n else:\n return run_out['action'], None, None, run_out['value'], run_out\n\n def construct_curr_info(self, next_info: BrainInfo) -> BrainInfo:\n \"\"\"\n Constructs a BrainInfo which contains the most recent previous experiences for all agents info\n which correspond to the agents in a provided next_info.\n :BrainInfo next_info: A t+1 BrainInfo.\n :return: curr_info: Reconstructed BrainInfo to match agents of next_info.\n \"\"\"\n visual_observations = [[]]\n vector_observations = []\n text_observations = []\n memories = []\n rewards = []\n local_dones = []\n max_reacheds = []\n agents = []\n prev_vector_actions = []\n prev_text_actions = []\n for agent_id in next_info.agents:\n agent_brain_info = self.training_buffer[agent_id].last_brain_info\n if agent_brain_info is None:\n agent_brain_info = next_info\n agent_index = agent_brain_info.agents.index(agent_id)\n for i in range(len(next_info.visual_observations)):\n visual_observations[i].append(agent_brain_info.visual_observations[i][agent_index])\n vector_observations.append(agent_brain_info.vector_observations[agent_index])\n text_observations.append(agent_brain_info.text_observations[agent_index])\n if self.policy.use_recurrent:\n if len(agent_brain_info.memories > 0):\n memories.append(agent_brain_info.memories[agent_index])\n else:\n memories.append(self.policy.make_empty_memory(1))\n rewards.append(agent_brain_info.rewards[agent_index])\n local_dones.append(agent_brain_info.local_done[agent_index])\n max_reacheds.append(agent_brain_info.max_reached[agent_index])\n agents.append(agent_brain_info.agents[agent_index])\n prev_vector_actions.append(agent_brain_info.previous_vector_actions[agent_index])\n prev_text_actions.append(agent_brain_info.previous_text_actions[agent_index])\n if self.policy.use_recurrent:\n memories = np.vstack(memories)\n curr_info = BrainInfo(visual_observations, vector_observations, text_observations,\n memories, rewards, agents, local_dones, prev_vector_actions,\n prev_text_actions, max_reacheds)\n return curr_info\n\n def add_experiences(self, curr_all_info: AllBrainInfo, next_all_info: AllBrainInfo, take_action_outputs):\n \"\"\"\n Adds experiences to each agent's experience history.\n :param curr_all_info: Dictionary of all current brains and corresponding BrainInfo.\n :param next_all_info: Dictionary of all current brains and corresponding BrainInfo.\n :param take_action_outputs: The outputs of the take action method.\n \"\"\"\n curr_info = curr_all_info[self.brain_name]\n next_info = next_all_info[self.brain_name]\n\n for agent_id in curr_info.agents:\n self.training_buffer[agent_id].last_brain_info = curr_info\n self.training_buffer[agent_id].last_take_action_outputs = take_action_outputs\n\n if curr_info.agents != next_info.agents:\n curr_to_use = self.construct_curr_info(next_info)\n else:\n curr_to_use = curr_info\n\n intrinsic_rewards = self.policy.get_intrinsic_rewards(curr_to_use, next_info)\n\n for agent_id in next_info.agents:\n stored_info = self.training_buffer[agent_id].last_brain_info\n stored_take_action_outputs = self.training_buffer[agent_id].last_take_action_outputs\n if stored_info is not None:\n idx = stored_info.agents.index(agent_id)\n next_idx = next_info.agents.index(agent_id)\n if not stored_info.local_done[idx]:\n for i, _ in enumerate(stored_info.visual_observations):\n self.training_buffer[agent_id]['visual_obs%d' % i].append(\n stored_info.visual_observations[i][idx])\n self.training_buffer[agent_id]['next_visual_obs%d' % i].append(\n next_info.visual_observations[i][next_idx])\n if self.policy.use_vec_obs:\n self.training_buffer[agent_id]['vector_obs'].append(stored_info.vector_observations[idx])\n self.training_buffer[agent_id]['next_vector_in'].append(\n next_info.vector_observations[next_idx])\n if self.policy.use_recurrent:\n if stored_info.memories.shape[1] == 0:\n stored_info.memories = np.zeros((len(stored_info.agents), self.policy.m_size))\n self.training_buffer[agent_id]['memory'].append(stored_info.memories[idx])\n actions = stored_take_action_outputs['action']\n if self.policy.use_continuous_act:\n actions_pre = stored_take_action_outputs['pre_action']\n self.training_buffer[agent_id]['actions_pre'].append(actions_pre[idx])\n epsilons = stored_take_action_outputs['random_normal_epsilon']\n self.training_buffer[agent_id]['random_normal_epsilon'].append(\n epsilons[idx])\n else:\n self.training_buffer[agent_id]['action_mask'].append(\n stored_info.action_masks[idx])\n a_dist = stored_take_action_outputs['log_probs']\n value = stored_take_action_outputs['value']\n self.training_buffer[agent_id]['actions'].append(actions[idx])\n self.training_buffer[agent_id]['prev_action'].append(stored_info.previous_vector_actions[idx])\n self.training_buffer[agent_id]['masks'].append(1.0)\n if self.use_curiosity:\n self.training_buffer[agent_id]['rewards'].append(next_info.rewards[next_idx] +\n intrinsic_rewards[next_idx])\n else:\n self.training_buffer[agent_id]['rewards'].append(next_info.rewards[next_idx])\n self.training_buffer[agent_id]['action_probs'].append(a_dist[idx])\n self.training_buffer[agent_id]['value_estimates'].append(value[idx][0])\n if agent_id not in self.cumulative_rewards:\n self.cumulative_rewards[agent_id] = 0\n self.cumulative_rewards[agent_id] += next_info.rewards[next_idx]\n if self.use_curiosity:\n if agent_id not in self.intrinsic_rewards:\n self.intrinsic_rewards[agent_id] = 0\n self.intrinsic_rewards[agent_id] += intrinsic_rewards[next_idx]\n if not next_info.local_done[next_idx]:\n if agent_id not in self.episode_steps:\n self.episode_steps[agent_id] = 0\n self.episode_steps[agent_id] += 1\n\n def process_experiences(self, current_info: AllBrainInfo, new_info: AllBrainInfo):\n \"\"\"\n Checks agent histories for processing condition, and processes them as necessary.\n Processing involves calculating value and advantage targets for model updating step.\n :param current_info: Dictionary of all current brains and corresponding BrainInfo.\n :param new_info: Dictionary of all next brains and corresponding BrainInfo.\n \"\"\"\n\n info = new_info[self.brain_name]\n for l in range(len(info.agents)):\n agent_actions = self.training_buffer[info.agents[l]]['actions']\n if ((info.local_done[l] or len(agent_actions) > self.trainer_parameters['time_horizon'])\n and len(agent_actions) > 0):\n agent_id = info.agents[l]\n if info.local_done[l] and not info.max_reached[l]:\n value_next = 0.0\n else:\n if info.max_reached[l]:\n bootstrapping_info = self.training_buffer[agent_id].last_brain_info\n idx = bootstrapping_info.agents.index(agent_id)\n else:\n bootstrapping_info = info\n idx = l\n value_next = self.policy.get_value_estimate(bootstrapping_info, idx)\n\n self.training_buffer[agent_id]['advantages'].set(\n get_gae(\n rewards=self.training_buffer[agent_id]['rewards'].get_batch(),\n value_estimates=self.training_buffer[agent_id]['value_estimates'].get_batch(),\n value_next=value_next,\n gamma=self.trainer_parameters['gamma'],\n lambd=self.trainer_parameters['lambd']))\n self.training_buffer[agent_id]['discounted_returns'].set(\n self.training_buffer[agent_id]['advantages'].get_batch()\n + self.training_buffer[agent_id]['value_estimates'].get_batch())\n\n self.training_buffer.append_update_buffer(agent_id, batch_size=None,\n training_length=self.policy.sequence_length)\n\n self.training_buffer[agent_id].reset_agent()\n if info.local_done[l]:\n self.stats['Environment/Cumulative Reward'].append(\n self.cumulative_rewards.get(agent_id, 0))\n self.reward_buffer.appendleft(self.cumulative_rewards.get(agent_id, 0))\n self.stats['Environment/Episode Length'].append(\n self.episode_steps.get(agent_id, 0))\n self.cumulative_rewards[agent_id] = 0\n self.episode_steps[agent_id] = 0\n if self.use_curiosity:\n self.stats['Policy/Curiosity Reward'].append(\n self.intrinsic_rewards.get(agent_id, 0))\n self.intrinsic_rewards[agent_id] = 0\n\n def end_episode(self):\n \"\"\"\n A signal that the Episode has ended. The buffer must be reset. \n Get only called when the academy resets.\n \"\"\"\n self.training_buffer.reset_local_buffers()\n for agent_id in self.cumulative_rewards:\n self.cumulative_rewards[agent_id] = 0\n for agent_id in self.episode_steps:\n self.episode_steps[agent_id] = 0\n if self.use_curiosity:\n for agent_id in self.intrinsic_rewards:\n self.intrinsic_rewards[agent_id] = 0\n\n def is_ready_update(self):\n \"\"\"\n Returns whether or not the trainer has enough elements to run update model\n :return: A boolean corresponding to whether or not update_model() can be run\n \"\"\"\n size_of_buffer = len(self.training_buffer.update_buffer['actions'])\n return size_of_buffer > max(int(self.trainer_parameters['buffer_size'] / self.policy.sequence_length), 1)\n\n def update_policy(self):\n \"\"\"\n Uses demonstration_buffer to update the policy.\n \"\"\"\n n_sequences = max(int(self.trainer_parameters['batch_size'] / self.policy.sequence_length), 1)\n value_total, policy_total, forward_total, inverse_total = [], [], [], []\n advantages = self.training_buffer.update_buffer['advantages'].get_batch()\n self.training_buffer.update_buffer['advantages'].set(\n (advantages - advantages.mean()) / (advantages.std() + 1e-10))\n num_epoch = self.trainer_parameters['num_epoch']\n for k in range(num_epoch):\n self.training_buffer.update_buffer.shuffle()\n buffer = self.training_buffer.update_buffer\n for l in range(len(self.training_buffer.update_buffer['actions']) // n_sequences):\n start = l * n_sequences\n end = (l + 1) * n_sequences\n run_out = self.policy.update(buffer.make_mini_batch(start, end), n_sequences)\n value_total.append(run_out['value_loss'])\n policy_total.append(np.abs(run_out['policy_loss']))\n if self.use_curiosity:\n inverse_total.append(run_out['inverse_loss'])\n forward_total.append(run_out['forward_loss'])\n self.stats['Losses/Value Loss'].append(np.mean(value_total))\n self.stats['Losses/Policy Loss'].append(np.mean(policy_total))\n if self.use_curiosity:\n self.stats['Losses/Forward Loss'].append(np.mean(forward_total))\n self.stats['Losses/Inverse Loss'].append(np.mean(inverse_total))\n self.training_buffer.reset_update_buffer()\n\n\ndef discount_rewards(r, gamma=0.99, value_next=0.0):\n \"\"\"\n Computes discounted sum of future rewards for use in updating value estimate.\n :param r: List of rewards.\n :param gamma: Discount factor.\n :param value_next: T+1 value estimate for returns calculation.\n :return: discounted sum of future rewards as list.\n \"\"\"\n discounted_r = np.zeros_like(r)\n running_add = value_next\n for t in reversed(range(0, r.size)):\n running_add = running_add * gamma + r[t]\n discounted_r[t] = running_add\n return discounted_r\n\n\ndef get_gae(rewards, value_estimates, value_next=0.0, gamma=0.99, lambd=0.95):\n \"\"\"\n Computes generalized advantage estimate for use in updating policy.\n :param rewards: list of rewards for time-steps t to T.\n :param value_next: Value estimate for time-step T+1.\n :param value_estimates: list of value estimates for time-steps t to T.\n :param gamma: Discount factor.\n :param lambd: GAE weighing factor.\n :return: list of advantage estimates for time-steps t to T.\n \"\"\"\n value_estimates = np.asarray(value_estimates.tolist() + [value_next])\n delta_t = rewards + gamma * value_estimates[1:] - value_estimates[:-1]\n advantage = discount_rewards(r=delta_t, gamma=gamma * lambd)\n return advantage\n" ]
[ [ "tensorflow.summary.FileWriter", "numpy.abs", "numpy.zeros_like", "numpy.mean", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
TSFDlib/TSFEL
[ "a4c30acc93dd3717bf93b19e59c3dc927903caf2" ]
[ "tsfel/feature_extraction/feat_selection.py" ]
[ "import numpy as np\nimport pandas_profiling\nfrom sklearn.metrics import accuracy_score\n\ndef FSE(X_train, X_test, y_train, y_test, features_descrition, classifier):\n \"\"\" Performs a forward feature selection.\n Parameters\n ----------\n X_train: array-like\n train set features\n X_test: array-like\n test set features\n y_train: array-like\n train set labels\n y_test: array-like\n test set labels\n y_test: array-like\n test set labels\n features_descrition: list of strings\n list with extracted features names\n classifier: object\n classifier object\n Returns\n -------\n FS_X_train: train set best set of features\n FS_X_test: test set best set of features\n FS_lab: name of the best set of features\n \"\"\"\n total_acc, FS_lab, acc_list = [], [], []\n X_train = np.array(X_train)\n X_test = np.array(X_test)\n\n print(\"*** Feature selection started ***\")\n for feat_idx, feat_name in enumerate(features_descrition):\n classifier.fit(X_train[:,feat_idx].reshape(-1,1), y_train)\n y_test_predict = classifier.predict(X_test[:,feat_idx].reshape(-1,1))\n acc_list.append(accuracy_score(y_test, y_test_predict))\n\n curr_acc_idx = np.argmax(acc_list)\n FS_lab.append(features_descrition[curr_acc_idx])\n last_acc = acc_list[curr_acc_idx]\n FS_X_train = X_train[:,curr_acc_idx]\n FS_X_test = X_test[:,curr_acc_idx]\n total_acc.append(last_acc)\n\n while 1:\n acc_list = []\n for feat_idx, feat_name in enumerate(features_descrition):\n if feat_name not in FS_lab:\n curr_train = np.column_stack((FS_X_train, X_train[:, feat_idx]))\n curr_test = np.column_stack((FS_X_test, X_test[:, feat_idx]))\n classifier.fit(curr_train, y_train)\n y_test_predict = classifier.predict(curr_test)\n acc_list.append(accuracy_score(y_test, y_test_predict))\n else:\n acc_list.append(0)\n curr_acc_idx = np.argmax(acc_list)\n if last_acc < acc_list[curr_acc_idx]:\n FS_lab.append(features_descrition[curr_acc_idx])\n last_acc = acc_list[curr_acc_idx]\n total_acc.append(last_acc)\n\n FS_X_train = np.column_stack((FS_X_train, X_train[:, curr_acc_idx]))\n FS_X_test = np.column_stack((FS_X_test, X_test[:, curr_acc_idx]))\n else:\n print(\"FINAL Features: \" + str(FS_lab))\n print(\"Number of features\", len(FS_lab))\n print(\"Acc: \", str(total_acc))\n print(\"From \", str(len(X_train[0])), \"to \", str(len(FS_lab)))\n\n break\n print(\"*** Feature selection finished ***\")\n\n return np.array(FS_X_train), np.array(FS_X_test), np.array(FS_lab)\n\n\ndef correlation_report(df):\n \"\"\" Performs a correlation report and removes highly correlated features.\n Parameters\n ----------\n df: dataframe\n features\n Returns\n -------\n df: feature dataframe without high correlated features\n \"\"\"\n profile = pandas_profiling.ProfileReport(df)\n profile.to_file(outputfile=\"CorrelationReport.html\")\n inp = str(input('Do you wish to remove correlated features? Enter y/n: '))\n if inp == 'y':\n reject = profile.get_rejected_variables(threshold=0.9)\n if not list(reject):\n print('No features to remove')\n for rej in reject:\n print('Removing ' + str(rej))\n df = df.drop(rej, axis=1)\n return df" ]
[ [ "numpy.column_stack", "numpy.array", "numpy.argmax", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dariodsa/pycuda
[ "0fb9477b8b73deb8773ee9007b2ed97720d06552" ]
[ "examples/hello_gpu.py" ]
[ "from __future__ import print_function\nfrom __future__ import absolute_import\nimport pycuda.driver as drv\nimport pycuda.tools\nimport pycuda.autoinit\nimport numpy\nimport numpy.linalg as la\nfrom pycuda.compiler import SourceModule\n\nmod = SourceModule(\"\"\"\n__global__ void multiply_them(float *dest, float *a, float *b)\n{\n const int i = threadIdx.x;\n dest[i] = a[i] * b[i];\n}\n\"\"\")\n\nmultiply_them = mod.get_function(\"multiply_them\")\n\na = numpy.random.randn(400).astype(numpy.float32)\nb = numpy.random.randn(400).astype(numpy.float32)\n\ndest = numpy.zeros_like(a)\nmultiply_them(\n drv.Out(dest), drv.In(a), drv.In(b),\n block=(400,1,1))\n\nprint(dest-a*b)\n" ]
[ [ "numpy.random.randn", "numpy.zeros_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
saiyalamarty/advent-of-code
[ "e3ef525d06859515451d5ac7125004536d8ef985" ]
[ "src/year_2021/day_7/puzzle.py" ]
[ "import os\n\nimport numpy as np\n\n\ndef main():\n\n # Read contents of input (as a file) with a context manager\n file_path = os.path.abspath(\n os.path.join(os.path.dirname(__file__), 'input.data')\n )\n with open(file_path, \"r\") as input_file:\n for line in input_file:\n positions = np.array(list(map(int, line.strip().split(\",\"))))\n\n min_fuel_1 = min_fuel_2 = np.inf\n for i in range(min(positions), max(positions)):\n min_fuel_1 = min(sum(np.absolute(positions - i)), min_fuel_1)\n min_fuel_2 = min(\n sum(\n int(each * (each + 1) / 2)\n for each in np.absolute(positions - i)\n ), min_fuel_2\n )\n\n print(f\"Puzzle 1 -> {min_fuel_1}\")\n print(f\"Puzzle 2 -> {min_fuel_2}\")\n\n return min_fuel_1, min_fuel_2\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.absolute" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
thestephencasper/football
[ "5f10d87961c493712ba22146696088aea891df66" ]
[ "gfootball/make_victim_action_dataset.py" ]
[ "# coding=utf-8\n# Copyright 2019 Google LLC\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Script allowing to play the game by multiple players.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nfrom baselines.bench import monitor\nfrom baselines import logger\nimport numpy as np\nimport pickle\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nfrom gfootball.env import config\nfrom gfootball.env import football_env\nfrom gfootball.env import wrappers\nfrom gfootball.env import _apply_output_wrappers\nfrom gfootball.env import _process_representation_wrappers\nfrom gfootball.env import observation_preprocessing\nfrom gfootball.examples.models import gfootball_impala_cnn\nfrom gfootball.env.players.ppo2_cnn import Player, ObservationStacker\nfrom gfootball.env.football_action_set import full_action_set\n\n\ndef main(_):\n\n left_player = 'ppo2_cnn:left_players=1,policy=gfootball_impala_cnn,checkpoint=/Users/stephen/Documents/football/checkpoints/11_vs_11_easy_stochastic_v2'\n right_player = 'ppo2_cnn:right_players=1,policy=gfootball_impala_cnn,checkpoint=/Users/stephen/Documents/football/checkpoints/11_vs_11_easy_stochastic_v2'\n players = [left_player, right_player]\n\n env_config_values = {'dump_full_episodes': False,\n 'dump_scores': False,\n 'players': players,\n 'level': '11_vs_11_easy_stochastic',\n 'tracesdir': '/Users/stephen/Documents/football/logs', # logdir\n 'write_video': False}\n\n env_config = config.Config(env_config_values)\n env = football_env.FootballEnv(env_config)\n env.reset()\n\n player_config = {'index': 2}\n name, definition = config.parse_player_definition(left_player)\n config_name = 'player_{}'.format(name)\n if config_name in player_config:\n player_config[config_name] += 1\n else:\n player_config[config_name] = 0\n player_config.update(definition)\n player_config['stacked'] = True\n player = Player(player_config, env_config)\n stacker = ObservationStacker(4)\n\n n_timesteps = 30000 # 10 games\n game_i = 0\n observations = []\n actions = []\n\n for i in range(n_timesteps):\n obs, _, done, _ = env.step([])\n obs_processed = observation_preprocessing.generate_smm([obs])\n obs_processed = stacker.get(obs_processed)\n observations.append(obs_processed)\n act = player.take_action([obs])[0]\n actions.append(full_action_set.index(act))\n if done:\n env.reset()\n stacker.reset()\n observations = np.squeeze(np.vstack(observations)) # should not be shape (3000, 72, 96, 16)\n actions = np.array(actions) # should be shape (n_samples,)\n with open(f'/Users/stephen/Documents/football/data/observations{game_i}.pkl', 'wb') as f:\n pickle.dump(observations, f)\n with open(f'/Users/stephen/Documents/football/data/actions{game_i}.pkl', 'wb') as f:\n pickle.dump(actions, f)\n game_i += 1\n observations = []\n actions = []\n\n print('Done :)')\n\n\nif __name__ == '__main__':\n app.run(main)" ]
[ [ "numpy.array", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
valsworthen/toxic-comment-classification
[ "12ceb4d78410a14fba05e43f6f424cec52e6665d" ]
[ "tools/utils.py" ]
[ "\"\"\"Utilities\"\"\"\nimport pandas as pd\nimport numpy as np\nfrom attrdict import AttrDict\nimport yaml\n\ndef average_predictions(cv_predictions, n_splits, num_samples = 153164, num_labels = 6):\n \"\"\"Average k-fold predictions stored in a dict\"\"\"\n preds = np.zeros((num_samples, num_labels))\n for preds_i in cv_predictions:\n preds += preds_i\n preds /= n_splits\n return preds\n\ndef geom_average_predictions(cv_predictions, n_splits, num_samples = 153164, num_labels = 6):\n \"\"\"Average k-fold predictions stored in a dict\"\"\"\n preds = np.ones((num_samples, num_labels))\n for preds_i in cv_predictions:\n preds *= preds_i\n preds = preds **(1/n_splits)\n return preds\n\ndef create_submission(preds, filename):\n labels = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']\n subm = pd.read_csv('input/sample_submission.csv')\n submid = pd.DataFrame({'id': subm[\"id\"]})\n submission = pd.concat([submid, pd.DataFrame(preds, columns = labels)], axis=1)\n submission.to_csv(filename, index=False)\n\ndef format_time(sec):\n m, s = divmod(sec, 60)\n h, m = divmod(m, 60)\n return \"{:.0f}h {:.0f}min {:.0f}s\".format(h, m, s)\n\ndef read_yaml(filepath):\n with open(filepath) as f:\n config = yaml.load(f)\n return AttrDict(config)\n" ]
[ [ "pandas.read_csv", "numpy.zeros", "pandas.DataFrame", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
zsy0828/OpenHGNN
[ "7fe0917008c9f50269bbd308e411a1d8199d667d" ]
[ "openhgnn/trainerflow/entity_classification.py" ]
[ "import dgl\nimport torch\nfrom tqdm import tqdm\nfrom ..models import build_model\nfrom ..layers.EmbedLayer import HeteroEmbedLayer\nfrom . import BaseFlow, register_flow\nfrom ..tasks import build_task\nfrom ..utils import extract_embed, EarlyStopping, get_nodes_dict\n\n\n@register_flow(\"entity_classification\")\nclass EntityClassification(BaseFlow):\n \"\"\"Node classification flows.\n Supported Model: RGCN/CompGCN/RSHN\n Supported Dataset:AIFB/MUTAG/BGS/AM\n Dataset description can be found in https://github.com/dmlc/dgl/tree/master/examples/pytorch/rgcn-hetero\n The task is to classify the entity.\n \"\"\"\n\n def __init__(self, args):\n super(EntityClassification, self).__init__(args)\n\n self.args = args\n self.model_name = args.model\n self.device = args.device\n self.task = build_task(args)\n\n self.hg = self.task.get_graph().to(self.device)\n self.num_classes = self.task.dataset.num_classes\n\n if hasattr(self.task.dataset, 'in_dim'):\n self.args.in_dim = self.task.dataset.in_dim\n elif not hasattr(self.args, 'in_dim'):\n raise ValueError('Set input dimension parameter!')\n # Build the model. If the output dim is not equal the number of classes, modify the dim.\n if not hasattr(self.task.dataset, 'out_dim') or args.out_dim != self.num_classes:\n print('Modify the out_dim with num_classes')\n self.args.out_dim = self.num_classes\n\n self.model = build_model(self.model_name).build_model_from_args(self.args, self.hg).to(self.device)\n\n self.loss_fn = self.task.get_loss_fn()\n if self.task.dataset.has_feature:\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n self.has_feature = True\n else:\n self.has_feature = False\n self.input_feature = HeteroEmbedLayer(get_nodes_dict(self.hg), args.in_dim).to(self.device)\n self.optimizer = torch.optim.Adam([{'params': self.model.parameters()},\n {'params': self.input_feature.parameters()}],\n lr=args.lr, weight_decay=args.weight_decay)\n self.patience = args.patience\n self.max_epoch = args.max_epoch\n\n self.category = self.task.dataset.category\n self.train_idx, self.val_idx, self.test_idx = self.task.get_idx()\n self.labels = self.task.get_labels().to(self.device)\n if self.args.mini_batch_flag:\n self.sampler = dgl.dataloading.MultiLayerNeighborSampler([self.args.fanout] * self.args.n_layers)\n #self.sampler = dgl.dataloading.MultiLayerFullNeighborSampler(self.args.n_layers)\n self.train_loader = dgl.dataloading.NodeDataLoader(\n self.hg.to('cpu'), {self.category: self.train_idx.to('cpu')}, self.sampler,\n batch_size=self.args.batch_size, shuffle=True, num_workers=4\n )\n self.test_loader = dgl.dataloading.NodeDataLoader(\n self.hg.to('cpu'), {self.category: self.test_idx.to('cpu')}, self.sampler,\n batch_size=self.args.batch_size, shuffle=False, num_workers=0, drop_last=False,\n )\n\n def preprocess(self):\n return\n\n def train(self):\n self.preprocess()\n stopper = EarlyStopping(self.args.patience, self._checkpoint)\n epoch_iter = tqdm(range(self.max_epoch))\n for epoch in epoch_iter:\n if self.args.mini_batch_flag:\n loss = self._mini_train_step()\n else:\n loss = self._full_train_step()\n if (epoch + 1) % self.evaluate_interval == 0:\n acc, losses = self._test_step()\n train_acc = acc[\"train\"]\n val_acc = acc[\"val\"]\n val_loss = losses[\"val\"]\n epoch_iter.set_description(\n f\"Epoch: {epoch:03d}, Loss:{loss: .4f}, Train_acc: {train_acc:.4f}, Val_acc: {val_acc:.4f}, Val_loss: {val_loss:.4f}\"\n )\n #print(f'Test_acc:{acc[\"test\"]:.4f}')\n # print(\n # f\"Epoch: {epoch:03d}, Loss:{loss: .4f}, Train_acc: {train_acc:.4f}, Val_acc: {val_acc:.4f}, Val_loss: {val_loss:.4f}\"\n # )\n early_stop = stopper.loss_step(val_loss, self.model)\n if early_stop:\n print('Early Stop!\\tEpoch:' + str(epoch))\n break\n\n print(f\"Valid loss = {stopper.best_loss: .4f}\")\n stopper.load_model(self.model)\n test_acc, _ = self._test_step(split=\"test\")\n val_acc, _ = self._test_step(split=\"val\")\n print(f\"Test accuracy = {test_acc:.4f}\")\n return dict(Acc=test_acc, ValAcc=val_acc)\n\n def _full_train_step(self):\n self.model.train()\n if self.has_feature == True:\n h = self.hg.ndata['h']\n else:\n h = self.input_feature()\n logits = self.model(self.hg, h)[self.category]\n loss = self.loss_fn(logits[self.train_idx], self.labels[self.train_idx])\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n return loss.item()\n\n def _mini_train_step(self):\n self.model.train()\n loss_all = 0\n for i, (input_nodes, seeds, blocks) in enumerate(self.train_loader):\n n = i + 1\n blocks = [blk.to(self.device) for blk in blocks]\n seeds = seeds[self.category] # out_nodes, we only predict the nodes with type \"category\"\n # batch_tic = time.time()\n lbl = self.labels[seeds].to(self.device).squeeze()\n if self.has_feature:\n h = blocks[0].srcdata['h']\n else:\n h = self.input_feature.forward_nodes(input_nodes)\n logits = self.model(blocks, h)[self.category]\n loss = self.loss_fn(logits, lbl)\n loss_all += loss.item()\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n return loss_all / n\n\n def _mini_test(self, split=None, logits=None):\n self.model.eval()\n if split == \"train\":\n mask = self.train_idx\n elif split == \"val\":\n mask = self.val_idx\n elif split == \"test\":\n mask = self.test_idx\n else:\n mask = None\n if mask is not None:\n dataloader = dgl.dataloading.NodeDataLoader(\n self.hg.to('cpu'), {self.category: mask.to('cpu')}, self.sampler,\n batch_size=self.args.batch_size, shuffle=False, num_workers=0, drop_last=False,\n )\n preds = self._forward_model(dataloader)\n acc = self.task.evaluate(preds, 'acc-ogbn-mag')\n else:\n masks = {'train': self.train_idx, 'val': self.val_idx, 'test': self.test_idx}\n acc = {}\n for key, mask in masks:\n dataloader = dgl.dataloading.NodeDataLoader(\n self.hg.to('cpu'), {self.category: mask.to('cpu')}, self.sampler,\n batch_size=self.args.batch_size, shuffle=False, num_workers=0, drop_last=False,\n )\n preds = self._forward_model(dataloader)\n acc[key] = self.task.evaluate(preds, 'acc-ogbn-mag')\n return acc, 0\n\n def _forward_model(self, dataloader):\n self.model.eval()\n with torch.no_grad():\n preds = []\n for i, (input_nodes, seeds, blocks) in enumerate(dataloader):\n blocks = [blk.to(self.device) for blk in blocks]\n # batch_tic = time.time()\n if self.has_feature:\n h = blocks[0].srcdata['h']\n else:\n h = self.input_feature.forward_nodes(input_nodes)\n preds.append(self.model(blocks, h)[self.category].argmax(dim=1).to('cpu'))\n preds = torch.cat(preds, dim=0)\n return preds\n\n def _test_step(self, split=None, logits=None):\n self.model.eval()\n with torch.no_grad():\n logits = logits if logits else self.model(self.hg, self.input_feature())[self.category]\n if split == \"train\":\n mask = self.train_idx\n elif split == \"val\":\n mask = self.val_idx\n elif split == \"test\":\n mask = self.test_idx\n else:\n mask = None\n\n if mask is not None:\n loss = self.loss_fn(logits[mask], self.labels[mask]).item()\n metric = self.task.evaluate(logits[mask].argmax(dim=1).to('cpu'), 'acc', mask)\n return metric, loss\n else:\n masks = {'train': self.train_idx, 'val': self.val_idx, 'test': self.test_idx}\n metrics = {key: self.task.evaluate(logits[mask].argmax(dim=1).to('cpu'), 'acc', mask) for key, mask in\n masks.items()}\n losses = {key: self.loss_fn(logits[mask], self.labels[mask]).item() for key, mask in masks.items()}\n return metrics, losses\n" ]
[ [ "torch.no_grad", "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RobinAbrahamse/hls4ml
[ "8d68c5262c6c8728502fa88b1a2d2429929e222c" ]
[ "hls4ml/model/profiling.py" ]
[ "from hls4ml.model.hls_model import HLSModel\nfrom hls4ml.model.hls_layers import IntegerPrecisionType, FixedPrecisionType\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas\nimport seaborn as sb\nimport uuid\nimport os\nimport shutil\nimport json\nfrom collections import defaultdict\n\nfrom hls4ml.model.hls_model import HLSModel\n\ntry:\n from tensorflow import keras\n import qkeras\n __tf_profiling_enabled__ = True\nexcept ImportError:\n __tf_profiling_enabled__ = False\n\ntry:\n import torch\n __torch_profiling_enabled__ = True\nexcept ImportError:\n __torch_profiling_enabled__ = False\n\n\ndef get_unoptimized_hlsmodel(model):\n from hls4ml.converters import convert_from_config\n\n new_config = model.config.config.copy()\n new_config['HLSConfig'] = json.loads(json.dumps(new_config['HLSConfig']))\n\n new_output_dir = uuid.uuid4().hex\n\n while os.path.exists(new_output_dir):\n new_output_dir = uuid.uuid4().hex\n\n if 'SkipOptimizers' in new_config['HLSConfig']:\n del new_config['HLSConfig']['SkipOptimizers']\n\n new_config['HLSConfig']['Optimizers'] = []\n new_config['OutputDir'] = new_output_dir\n\n return convert_from_config(new_config), new_output_dir\n\n\ndef array_to_summary(x, fmt='boxplot'):\n if fmt == 'boxplot':\n y = {'med' : np.median(x),\n 'q1' : np.percentile(x, 25),\n 'q3' : np.percentile(x, 75),\n 'whislo' : min(x),\n 'whishi' : max(x)\n }\n elif fmt == 'histogram':\n # Power of 2 bins covering data range\n high = np.ceil(np.log2(max(x))) + 1\n low = np.floor(np.log2(min(x))) - 1\n bits = np.arange(low, high, 1)\n bins = 2 ** bits\n h, b = np.histogram(x, bins=bins)\n h = h * 1. / float(sum(h)) # normalize\n y = {'h' : h,\n 'b' : np.log2(b)}\n return y\n\ndef boxplot(data, fmt='longform'):\n if fmt == 'longform':\n f = plt.figure() #figsize=(3, 3))\n hue = 'layer' if 'layer' in data.keys() else None\n vp = sb.boxplot(x='x', y='weight', hue=hue, data=data[data['x'] > 0], showfliers=False)\n vp.set_yticklabels(vp.get_yticklabels(), rotation=45, ha='right')\n if hue is not None:\n vp.get_legend().remove()\n vp.set_xscale('log', base=2)\n return f\n elif fmt == 'summary':\n from matplotlib.patches import Rectangle\n medianprops = dict(linestyle='-', color='k')\n f, ax = plt.subplots(1, 1)\n data.reverse()\n colors = sb.color_palette(\"Blues\", len(data))\n bp = ax.bxp(data, showfliers=False, vert=False, medianprops=medianprops)\n # add colored boxes\n for line, color in zip(bp['boxes'], colors):\n x = line.get_xdata()\n xl, xh = min(x), max(x)\n y = line.get_ydata()\n yl, yh = min(y), max(y)\n rect = Rectangle((xl, yl), (xh-xl), (yh-yl), fill=True, color=color)\n ax.add_patch(rect)\n ax.set_yticklabels([d['weight'] for d in data])\n ax.set_xscale('log', base=2)\n plt.xlabel('x')\n return f\n else:\n return None\n\ndef histogram(data, fmt='longform'):\n f = plt.figure()\n from matplotlib.ticker import MaxNLocator\n n = len(data) if fmt == 'summary' else len(data['weight'].unique())\n colors = sb.color_palette(\"husl\", n)\n if fmt == 'longform':\n for i, weight in enumerate(data['weight'].unique()):\n y = array_to_summary(data[data['weight'] == weight]['x'], fmt='histogram')\n plt.bar(y['b'][:-1], y['h'], width=1, fill=False, label=weight, edgecolor=colors[i])\n elif fmt == 'summary':\n for i, weight in enumerate(data):\n plt.bar(weight['b'][:-1], weight['h'], width=1, fill=False, label=weight['weight'], edgecolor=colors[i])\n\n plt.gca().xaxis.set_major_locator(MaxNLocator(integer=True))\n plt.xlabel('log2(x)')\n plt.ylabel('frequency')\n plt.legend()\n return f\n\nplots = {'boxplot' : boxplot,\n 'histogram' : histogram}\n\ndef types_boxplot(data, fmt='longform'):\n from matplotlib.patches import PathPatch\n from matplotlib.patches import Rectangle\n ax = plt.gca()\n f = plt.gcf()\n # Scale the data\n data['low'] = 2.**data['low']\n data['high'] = 2.**data['high']\n\n # Plot the custom precisions\n ticks = np.array([tick.get_text() for tick in plt.yticks()[1]])\n # Get the coordinates of the boxes to place the markers\n if fmt == 'longform':\n # seaborn adjusts the box positions slightly in groups\n boxes = [c.get_extents().inverse_transformed(ax.transData) for c in ax.get_children() if isinstance(c, PathPatch)]\n ys = [(box.y0 + box.y1) / 2 for box in boxes]\n ys = [(y, y) for y in ys]\n elif fmt == 'summary':\n ys = [(y, y) for y in plt.yticks()[0]]\n for irow, row in data[data['layer'] != 'model'].iterrows():\n if row['layer'] in ticks:\n iy = np.argwhere(ticks == row['layer'])[0][0] # Determine which layer in the plot\n rectangle = Rectangle((row['low'], ys[iy][0]-0.4), row['high']-row['low'], 0.8, fill=True, color='grey', alpha=0.2)\n ax.add_patch(rectangle)\n\ndef types_histogram(data, fmt='longform'):\n ax = plt.gca()\n layers = np.array(ax.get_legend_handles_labels()[1])\n colors = sb.color_palette(\"husl\", len(layers))\n ylim = ax.get_ylim()\n for irow, row in data[data['layer'] != 'model'].iterrows():\n if row['layer'] in layers:\n col = colors[np.argwhere(layers == row['layer'])[0][0]]\n plt.plot((row['low'], row['low']), ylim, '--', color=col)\n plt.plot((row['high'], row['high']), ylim, '--', color=col)\n\ntypes_plots = {'boxplot' : types_boxplot,\n 'histogram' : types_histogram}\n\ndef ap_fixed_WIF(dtype):\n from hls4ml.templates.vivado_template import VivadoBackend\n dtype = VivadoBackend.convert_precision_string(None, dtype) \n W, I, F = dtype.width, dtype.integer, dtype.fractional\n return W, I, F\n\ndef types_hlsmodel(model):\n suffix = ['w', 'b']\n data = {'layer' : [], 'low' : [], 'high' : []}\n # Plot the default precision\n default_precision = model.config.model_precision['default']\n # assumes ap_fixed\n W, I, F = ap_fixed_WIF(default_precision)\n data['layer'].append('model')\n data['low'].append(-F)\n data['high'].append(I-1)\n\n for layer in model.get_layers():\n for iw, weight in enumerate(layer.get_weights()):\n wname = '{}/{}'.format(layer.name, suffix[iw])\n T = weight.type\n if T.name != 'model':\n W, I, F = ap_fixed_WIF(T.precision)\n data['layer'].append(wname)\n data['low'].append(-F)\n data['high'].append(I-1)\n data = pandas.DataFrame(data)\n return data\n\ndef activation_types_hlsmodel(model):\n data = {'layer' : [], 'low' : [], 'high' : []}\n # Get the default precision\n default_precision = model.config.model_precision['default']\n W, I, F = ap_fixed_WIF(default_precision)\n data['layer'].append('model')\n data['low'].append(-F)\n data['high'].append(I-1)\n for layer in model.get_layers():\n T = layer.get_output_variable().type.precision\n W, I, F = ap_fixed_WIF(T)\n data['layer'].append(layer.name)\n data['low'].append(-F)\n data['high'].append(I-1)\n data = pandas.DataFrame(data)\n return data\n\ndef weights_hlsmodel(model, fmt='longform', plot='boxplot'):\n suffix = ['w', 'b']\n if fmt == 'longform':\n data = {'x' : [], 'layer' : [], 'weight' : []}\n elif fmt == 'summary':\n data = []\n\n for layer in model.get_layers():\n name = layer.name\n for iw, weight in enumerate(layer.get_weights()):\n l = '{}/{}'.format(name, suffix[iw])\n w = weight.data.flatten()\n w = abs(w[w != 0])\n n = len(w)\n if n == 0:\n print(f'Weights for {name} are only zeros, ignoring.')\n break\n if fmt == 'longform':\n data['x'].extend(w.tolist())\n data['layer'].extend([name for i in range(len(w))])\n data['weight'].extend([l for i in range(len(w))])\n elif fmt == 'summary':\n data.append(array_to_summary(w, fmt=plot))\n data[-1]['layer'] = name\n data[-1]['weight'] = l\n\n if fmt == 'longform':\n data = pandas.DataFrame(data)\n return data\n\n\ndef _keras_batchnorm(layer):\n weights = layer.get_weights()\n epsilon = layer.epsilon\n\n gamma = weights[0]\n beta = weights[1]\n mean = weights[2]\n var = weights[3]\n\n scale = gamma / np.sqrt(var + epsilon)\n bias = beta - gamma * mean / np.sqrt(var + epsilon)\n\n return [scale, bias], ['s', 'b']\n\n\ndef _keras_layer(layer):\n return layer.get_weights(), ['w', 'b']\n\n\nkeras_process_layer_map = defaultdict(lambda: _keras_layer,\n {\n 'BatchNormalization': _keras_batchnorm,\n 'QBatchNormalization': _keras_batchnorm\n })\n\n\ndef activations_hlsmodel(model, X, fmt='summary', plot='boxplot'):\n if fmt == 'longform':\n raise NotImplemented\n elif fmt == 'summary':\n data = []\n\n _, trace = model.trace(np.ascontiguousarray(X))\n\n if len(trace) == 0:\n raise RuntimeError(\"HLSModel must have tracing on for at least 1 layer (this can be set in its config)\")\n\n for layer in trace.keys():\n print(\" {}\".format(layer))\n\n if fmt == 'summary':\n y = trace[layer].flatten()\n y = abs(y[y != 0])\n\n if len(y) == 0:\n print(f'Activations for {layer} are only zeros, ignoring.')\n continue\n\n data.append(array_to_summary(y, fmt=plot))\n data[-1]['weight'] = layer\n\n return data\n\n\ndef weights_keras(model, fmt='longform', plot='boxplot'):\n if fmt == 'longform':\n data = {'x' : [], 'layer' : [], 'weight' : []}\n elif fmt == 'summary':\n data = []\n for layer in model.layers:\n name = layer.name\n weights, suffix = keras_process_layer_map[type(layer).__name__](layer)\n\n for i, w in enumerate(weights):\n l = '{}/{}'.format(name, suffix[i])\n w = w.flatten()\n w = abs(w[w != 0])\n n = len(w)\n if n == 0:\n print(f'Weights for {name} are only zeros, ignoring.')\n break\n if fmt == 'longform':\n data['x'].extend(w.tolist())\n data['layer'].extend([name for j in range(n)])\n data['weight'].extend([l for j in range(n)])\n elif fmt == 'summary':\n data.append(array_to_summary(w, fmt=plot))\n data[-1]['layer'] = name\n data[-1]['weight'] = l\n\n if fmt == 'longform':\n data = pandas.DataFrame(data)\n return data\n\ndef activations_keras(model, X, fmt='longform', plot='boxplot'):\n # test layer by layer on data\n if fmt == 'longform':\n # return long form pandas dataframe for\n # seaborn boxplot\n data = {'x' : [], 'weight' : []}\n elif fmt == 'summary':\n # return summary statistics for matplotlib.axes.Axes.bxp\n # or histogram bin edges and heights\n data = []\n\n for layer in model.layers:\n print(\" {}\".format(layer.name))\n if not isinstance(layer, keras.layers.InputLayer):\n y = _get_output(layer, X, model.input).flatten()\n y = abs(y[y != 0])\n if len(y) == 0:\n print(f'Activations for {layer.name} are only zeros, ignoring.')\n continue\n if fmt == 'longform':\n data['x'].extend(y.tolist())\n data['weight'].extend([layer.name for i in range(len(y))])\n elif fmt == 'summary':\n data.append(array_to_summary(y, fmt=plot))\n data[-1]['weight'] = layer.name\n\n if fmt == 'longform':\n data = pandas.DataFrame(data)\n return data\n\n\ndef weights_torch(model, fmt='longform', plot='boxplot'):\n suffix = ['w', 'b']\n if fmt == 'longform':\n data = {'x': [], 'layer': [], 'weight': []}\n elif fmt == 'summary':\n data = []\n for layer in model.children():\n if isinstance(layer, torch.nn.Linear):\n name = layer.__class__.__name__\n weights = list(layer.parameters())\n for i, w in enumerate(weights):\n l = '{}/{}'.format(name, suffix[i])\n w = weights[i].detach().numpy()\n w = w.flatten()\n w = abs(w[w != 0])\n n = len(w)\n if n == 0:\n print(f'Weights for {name} are only zeros, ignoring.')\n break\n if fmt == 'longform':\n data['x'].extend(w.tolist())\n data['layer'].extend([name for _ in range(n)])\n data['weight'].extend([l for _ in range(n)])\n elif fmt == 'summary':\n data.append(array_to_summary(w, fmt=plot))\n data[-1]['layer'] = name\n data[-1]['weight'] = l\n\n if fmt == 'longform':\n data = pandas.DataFrame(data)\n return data\n\n\ndef activations_torch(model, X, fmt='longform', plot='boxplot'):\n X = torch.Tensor(X)\n if fmt == 'longform':\n data = {'x': [], 'weight': []}\n elif fmt == 'summary':\n data = []\n\n partial_model = torch.nn.Sequential\n layers = []\n for layer in model.children():\n lname = layer.__class__.__name__\n layers.append(layer)\n pm = partial_model(*layers)\n print(\" {}\".format(lname))\n y = pm(X).flatten().detach().numpy()\n y = abs(y[y != 0])\n if len(y) == 0:\n print(f'Activations for {lname} are only zeros, ignoring.')\n continue\n if fmt == 'longform':\n data['x'].extend(y.tolist())\n data['weight'].extend([lname for _ in range(len(y))])\n elif fmt == 'summary':\n data.append(array_to_summary(y, fmt=plot))\n data[-1]['weight'] = lname\n\n if fmt == 'longform':\n data = pandas.DataFrame(data)\n return data\n\n\ndef numerical(model=None, hls_model=None, X=None, plot='boxplot'):\n \"\"\"\n Perform numerical profiling of a model\n\n Parameters\n ----------\n model : keras or pytorch model\n The model to profile\n hls_model : HLSModel\n The HLSModel to profile\n X : array-like, optional\n Test data on which to evaluate the model to profile activations\n Must be formatted suitably for the ``model.predict(X)`` method\n plot : str, optional\n The type of plot to produce.\n Options are: 'boxplot' (default), 'violinplot', 'histogram',\n 'FacetGrid'\n\n Returns\n -------\n tuple\n The quadruple of produced figures. First weights and biases\n for the pre- and post-optimization models respectively,\n then activations for the pre- and post-optimization models\n respectively. (Optimizations are applied to an HLSModel by hls4ml,\n a post-optimization HLSModel is a final model)\n \"\"\"\n wp, wph, ap, aph = None, None, None, None\n\n hls_model_present = hls_model is not None and isinstance(hls_model, HLSModel)\n model_present = model is not None\n\n if hls_model_present:\n before = \" (before optimization)\"\n after = \" (final / after optimization)\"\n hls_model_unoptimized, tmp_output_dir = get_unoptimized_hlsmodel(hls_model)\n else:\n before = \"\"\n after = \"\"\n hls_model_unoptimized, tmp_output_dir = None, None\n\n print(\"Profiling weights\" + before)\n data = None\n\n if hls_model_present:\n data = weights_hlsmodel(hls_model_unoptimized, fmt='summary', plot=plot)\n elif model_present:\n if __tf_profiling_enabled__ and isinstance(model, keras.Model):\n data = weights_keras(model, fmt='summary', plot=plot)\n elif __torch_profiling_enabled__ and \\\n isinstance(model, torch.nn.Sequential):\n data = weights_torch(model, fmt='summary', plot=plot)\n\n if data is None:\n print(\"Only keras, PyTorch (Sequential) and HLSModel models \" +\n \"can currently be profiled\")\n\n if hls_model_present and os.path.exists(tmp_output_dir):\n shutil.rmtree(tmp_output_dir)\n\n return wp, wph, ap, aph\n\n wp = plots[plot](data, fmt='summary') # weight plot\n\n if hls_model_present and plot in types_plots:\n t_data = types_hlsmodel(hls_model_unoptimized)\n types_plots[plot](t_data, fmt='summary')\n\n plt.title(\"Distribution of (non-zero) weights\" + before)\n plt.tight_layout()\n\n if hls_model_present:\n print(\"Profiling weights\" + after)\n\n data = weights_hlsmodel(hls_model, fmt='summary', plot=plot)\n wph = plots[plot](data, fmt='summary') # weight plot\n\n if plot in types_plots:\n t_data = types_hlsmodel(hls_model)\n types_plots[plot](t_data, fmt='summary')\n\n plt.title(\"Distribution of (non-zero) weights\" + after)\n plt.tight_layout()\n\n if X is not None:\n print(\"Profiling activations\" + before)\n data = None\n if __tf_profiling_enabled__ and isinstance(model, keras.Model):\n data = activations_keras(model, X, fmt='summary', plot=plot)\n elif __torch_profiling_enabled__ and \\\n isinstance(model, torch.nn.Sequential):\n data = activations_torch(model, X, fmt='summary', plot=plot)\n\n if data is not None:\n ap = plots[plot](data, fmt='summary') # activation plot\n if hls_model_present and plot in types_plots:\n t_data = activation_types_hlsmodel(hls_model_unoptimized)\n types_plots[plot](t_data, fmt='summary')\n plt.title(\"Distribution of (non-zero) activations\" + before)\n plt.tight_layout()\n\n if hls_model_present:\n print(\"Profiling activations\" + after)\n data = activations_hlsmodel(hls_model, X, fmt='summary', plot=plot)\n aph = plots[plot](data, fmt='summary')\n\n t_data = activation_types_hlsmodel(hls_model)\n types_plots[plot](t_data, fmt='summary')\n\n plt.title(\"Distribution of (non-zero) activations (final / after optimization)\")\n plt.tight_layout()\n\n if hls_model_present and os.path.exists(tmp_output_dir):\n shutil.rmtree(tmp_output_dir)\n\n return wp, wph, ap, aph\n\n\n########COMPARE OUTPUT IMPLEMENTATION########\ndef _is_ignored_layer(layer):\n \"\"\"Some layers need to be ingored during inference\"\"\"\n if isinstance(layer, (keras.layers.InputLayer,\n keras.layers.Dropout)):\n return True\n return False\n\ndef _get_output(layer, X, model_input):\n \"\"\"Get output of partial model\"\"\"\n partial_model = keras.models.Model(inputs=model_input,\n outputs=layer.output)\n y = partial_model.predict(X)\n return y\n\ndef get_ymodel_keras(keras_model, X):\n \"\"\"\n Calculate each layer's ouput and put them into a dictionary\n\n Parameters\n ----------\n keras_model :\n a keras model\n X : array-like\n Test data on which to evaluate the model to profile activations.\n Must be formatted suitably for the ``model.predict(X)`` method.\n\n Returns\n -------\n dictionary\n A dictionary in the form {\"layer_name\": ouput array of layer}\n \"\"\"\n \n ymodel = {}\n \n for layer in keras_model.layers:\n print(\"Processing {} in Keras model...\".format(layer.name))\n if not _is_ignored_layer(layer):\n #If the layer has activation integrated then separate them\n #Note that if the layer is a standalone activation layer then skip this\n if hasattr(layer, 'activation') and not (isinstance(layer,keras.layers.Activation) or isinstance(layer, qkeras.qlayers.QActivation)):\n if layer.activation:\n \n if layer.activation.__class__.__name__ == \"linear\":\n ymodel[layer.name] = _get_output(layer, X, keras_model.input)\n \n else:\n temp_activation = layer.activation\n layer.activation = None\n #Get output for layer without activation\n ymodel[layer.name] = _get_output(layer, X, keras_model.input)\n\n #Add the activation back \n layer.activation = temp_activation\n #Get ouput for activation\n ymodel[layer.name + \"_{}\".format(temp_activation.__class__.__name__)] = _get_output(layer, X, keras_model.input)\n else:\n ymodel[layer.name] = _get_output(layer, X, keras_model.input)\n else: \n ymodel[layer.name] = _get_output(layer, X, keras_model.input)\n print(\"Done taking outputs for Keras model.\")\n return ymodel\n\ndef _norm_diff(ymodel, ysim):\n \"\"\"Calculate the square root of the sum of the squares of the differences\"\"\"\n diff = {}\n \n for key in list(ysim.keys()):\n diff[key] = np.linalg.norm(ysim[key]-ymodel[key])\n \n #---Bar Plot---\n f, ax = plt.subplots()\n plt.bar(list(diff.keys()),list(diff.values()))\n plt.title(\"layer-by-layer output differences\")\n ax.set_ylabel('Norm of difference vector')\n plt.xticks(rotation=90)\n plt.tight_layout()\n return f\n\ndef _dist_diff(ymodel, ysim):\n \"\"\"\n Calculate the normalized distribution of the differences of the elements\n of the output vectors. \n If difference >= original value then the normalized difference will be set to 1,\n meaning \"very difference\".\n If difference < original value then the normalized difference would be difference/original.\n \"\"\"\n\n diff = {}\n\n for key in list(ysim.keys()):\n flattened_ysim = ysim[key].flatten()\n flattened_ymodel = np.array(ymodel[key]).flatten()\n\n diff[key] = np.absolute(flattened_ymodel - flattened_ysim) / np.linalg.norm(flattened_ymodel - flattened_ysim)\n diff_vector = np.absolute(flattened_ymodel - flattened_ysim)\n abs_ymodel = np.absolute(flattened_ymodel)\n\n normalized_diff = np.zeros(diff_vector.shape)\n normalized_diff[(diff_vector >= abs_ymodel) & (abs_ymodel>0) & (diff_vector>0)] = 1\n\n #Fill out the rest\n index = diff_vector < abs_ymodel\n normalized_diff[index] = diff_vector[index] / abs_ymodel[index]\n\n diff[key] = normalized_diff\n\n #---Box Plot---\n f, ax = plt.subplots()\n pos = np.array(range(len(list(diff.values())))) + 1 \n ax.boxplot(list(diff.values()), sym='k+', positions=pos)\n\n #--formatting\n plt.title(\"Layer-by-layer distribution of output differences\")\n ax.set_xticklabels(list(diff.keys()))\n ax.set_ylabel('Normalized difference')\n ax.set_ylabel('Percent difference.')\n plt.xticks(rotation=90)\n plt.tight_layout()\n\n return f\n\ndef compare(keras_model, hls_model, X, plot_type = \"dist_diff\"):\n \"\"\"\n Compare each layer's output in keras and hls model. Note that the hls_model should not be compiled before using this.\n\n Parameters\n ----------\n keras_model : \n original keras model\n hls_model :\n converted HLSModel, with \"Trace:True\" in the configuration file.\n X : array-like \n Input for the model. \n plot_type : string\n different methods to visualize the y_model and y_sim differences.\n Possible options include:\n \n - 'norm_diff' : square root of the sum of the squares of the differences \n between each output vectors \n - 'dist_diff' : The normalized distribution of the differences of the elements\n between two output vectors\n \n Returns\n -------\n matplotlib figure\n plot object of the histogram depicting the difference in each layer's output\n \"\"\"\n \n #Take in output from both models\n #Note that each y is a dictionary with structure {\"layer_name\": flattened ouput array}\n ymodel = get_ymodel_keras(keras_model, X)\n _, ysim = hls_model.trace(X)\n \n print(\"Plotting difference...\")\n f = plt.figure()\n if plot_type == \"norm_diff\":\n f = _norm_diff(ymodel, ysim)\n elif plot_type == \"dist_diff\":\n f = _dist_diff(ymodel, ysim)\n\n return f\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.sqrt", "pandas.DataFrame", "matplotlib.pyplot.plot", "numpy.histogram", "matplotlib.pyplot.gca", "matplotlib.pyplot.tight_layout", "numpy.arange", "matplotlib.pyplot.gcf", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "tensorflow.keras.models.Model", "numpy.ascontiguousarray", "numpy.median", "matplotlib.patches.Rectangle", "numpy.array", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.yticks", "numpy.absolute", "numpy.log2", "torch.Tensor", "matplotlib.pyplot.subplots", "numpy.linalg.norm", "numpy.percentile", "numpy.argwhere", "matplotlib.ticker.MaxNLocator", "matplotlib.pyplot.bar", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [ "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
Mostafa-ashraf19/TourchPIP
[ "a5090a0ec9cc81a91fe1fd6af41d77841361cec1" ]
[ "DLFrameWork/dataset/FashionMNIST.py" ]
[ "import os\nimport zipfile, urllib.request, shutil\nimport requests\nimport matplotlib.pyplot as plt\n\n\nMNIST_URL = 'https://drive.google.com/uc?id=1NjvEw9Ob7sJkEQLWPe_M-XhhZLYCx7lE&export=download'\n\n\nclass FashionMNIST:\n def __init__(self,path,download=True,train=True):\n self.path = path\n self.download = download\n self.train = train\n if self.download:\n self._Download()\n \n self.TrainFile = os.getcwd() + '/' + self.path + '/mnist_train.csv'\n self.TestFile = os.getcwd() + '/' + self.path + '/mnist_test.csv'\n\n def _Download(self):\n if not os.path.exists(os.getcwd() + '/' + self.path):\n os.mkdir(self.path)\n file_name = 'MNIST.zip'\n with urllib.request.urlopen(MNIST_URL) as response, open(os.getcwd() + '/' + self.path + '/' + file_name,\n 'wb') as out_file:\n shutil.copyfileobj(response, out_file)\n with zipfile.ZipFile(os.getcwd() + '/' + self.path + '/' + file_name) as zf:\n zf.extractall(os.getcwd() + '/' + self.path + '/')\n\n def __repr__(self):\n return self.TrainFile if self.train == True else self.TestFile\n def plot(self,image):\n plt.imshow(image.reshape(28,28))\n plt.show() " ]
[ [ "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sandrarum/inqbus.graphdemo
[ "63cbea8118673f755c78f62cab06d9744e7fc61c" ]
[ "src/inqbus/graphdemo/bokeh_extension/helpers_contour.py" ]
[ "import os\nfrom glob import glob\n\nimport dask.array as da\nimport numpy as np\nimport scipy.ndimage as sc\nimport tables as tb\nfrom bokeh.models import ColumnDataSource, Float\nfrom inqbus.graphdemo.bokeh_extension.helpers import \\\n binary_from_data_map\nfrom inqbus.graphdemo.constants import (\n MAX_NUMBERS_DEFAULT,\n X_MAX_CONTOUR,\n Y_MAX_CONTOUR,\n Y_MIN_CONTOUR,\n X_MIN_CONTOUR,\n MAX_POINTS_CORRECTION, CONTOUR_DATA_SET)\n\n\ndef maxpoints_filter_matrix(matrix, numpoints_x, numpoints_y):\n \"\"\"\n Minimize number of points to given numpoints\n :param numpoints_y: number of points wanted in y-direction\n :param numpoints_x: number of points wanted in x-direction\n :param matrix: matrix where points should be reduced\n \"\"\"\n rows, columns = matrix.shape\n\n if columns == 0:\n columns = 1\n\n if rows == 0:\n rows = 1\n\n # clacluate factors to minimize points\n row_factor = float(numpoints_y * MAX_POINTS_CORRECTION) / float(rows)\n\n col_factor = float(numpoints_x * MAX_POINTS_CORRECTION) / float(columns)\n\n if row_factor > 1.0:\n row_factor = 1.0\n\n if col_factor > 1.0:\n col_factor = 1.0\n\n return sc.zoom(matrix, (row_factor, col_factor), order=3)\n\n\ndef range_filter(data, xmin, xmax, ymin, ymax):\n \"\"\"\n Remove points which are not displayed in the given range\n \"\"\"\n\n rows, columns = data.shape\n\n row_factor = float(rows) / Y_MAX_CONTOUR\n col_factor = float(columns) / X_MAX_CONTOUR\n\n row_min = max(int(ymin * row_factor), 0)\n row_max = min(int(ymax * row_factor), rows)\n col_min = max(int(xmin * col_factor), 0)\n col_max = min(int(xmax * col_factor), columns)\n\n data = data[row_min:row_max, col_min:col_max]\n\n return data\n\ndef clip(data, x_bin_min, x_bin_max, y_bin_min, y_bin_max):\n \"\"\"\n Remove points which are not displayed in the given range\n \"\"\"\n\n data = data[x_bin_min:x_bin_max, y_bin_min:y_bin_max]\n\n return data\n\n\ndef get_file_data(path,\n plot_width=None,\n plot_height=None,\n x_min=None,\n x_max=None,\n y_min=None,\n y_max=None):\n filenames = sorted(glob(CONTOUR_DATA_SET))\n\n# os.path.join(path,\n# '08',\n# '2015*_leipzig_CHM080079_000.h5')))\n\n if not filenames:\n n = 500\n x = np.linspace(0, 10, n)\n y = np.linspace(0, 10, n)\n xx, yy = np.meshgrid(x, y)\n z = np.sin(xx) * np.cos(yy)\n\n else:\n beta_raws = []\n times = []\n\n for fn in filenames:\n h5_file = tb.open_file(fn, 'r')\n\n signal_group = h5_file.get_node(\"/raw_signal\")\n beta_raws.append(signal_group.beta_raw)\n times.append(signal_group.time)\n # ds = Dataset(fn)\n # beta_raws.append( ds.variables['beta_raw'] )\n # times.append( ds.variables['time'] )\n\n height = signal_group.height\n beta_raw_da_arrays = [da.from_array(beta_raw, chunks=(100, 100)) for\n beta_raw in beta_raws]\n beta_raw_concat = da.concatenate(beta_raw_da_arrays, axis=0)\n\n time_da_arrays = [da.from_array(time, chunks=100) for time in times]\n time_concat = da.concatenate(time_da_arrays, axis=0)\n x = time_concat\n y = np.array(height)\n z = beta_raw_concat\n\n# x_min, x_max, y_min, y_max = clear_ranges(x_min, x_max, y_min, y_max)\n x0= x[0].compute()\n xN= x[-1].compute()\n\n if not x_min:\n x_min = x0\n x_bin_min = 0\n else:\n x_bin_min = int(x.shape[0]*(x0-x_min)/(x0-xN))\n if not x_max:\n x_max = xN\n x_bin_max = x.shape[0]-1\n else:\n x_bin_max = int(x.shape[0]*(x0-x_max)/(x0-xN))\n if not y_min:\n y_min = y[0]\n y_bin_min = 0\n else:\n y_bin_min = int(y.shape[0]*(y[0]-y_min)/(y[0]-y[-1]))\n if not y_max:\n y_max = y[-1]\n y_bin_max = y.shape[0]-1\n else:\n y_bin_max = int(y.shape[0]*(y[0]-y_max)/(y[0]-y[-1]))\n\n\n# z = range_filter(z, x_min, x_max, y_min, y_max)\n clipped = clip(z, x_bin_min, x_bin_max, y_bin_min, y_bin_max)\n\n\n if plot_height:\n plot_height = int(plot_height)\n else:\n plot_height = MAX_NUMBERS_DEFAULT\n\n if plot_width:\n plot_width = int(plot_width)\n else:\n plot_width = MAX_NUMBERS_DEFAULT\n\n gridded = maxpoints_filter_matrix(clipped, plot_width, plot_height)\n\n return x.compute(), y, gridded.astype('float64'), x_min, x_max, y_min, y_max\n\nclass ImageColumnDataSource(ColumnDataSource):\n \"\"\" \"\"\"\n\n X0 = Float()\n Y0 = Float()\n DX = Float()\n DY = Float()\n\ndef get_data(path):\n \"\"\"Just return hard coded data in directory 08 or render default\n example of bokeh-doku\"\"\"\n\n x, y, z, x_min, x_max, y_min, y_max = get_file_data(path)\n\n data = ColumnDataSource(data=dict(\n image=[z],\n\n ))\n\n return data, x_min, x_max, y_min, y_max\n\n\ndef clear_ranges(x_min, x_max, y_min, y_max):\n \"\"\"\n check if a range is given and if it is valid. If not use the defaults.\n \"\"\"\n\n if x_min and float(x_min) >= X_MIN_CONTOUR:\n x_min = float(x_min)\n else:\n x_min = X_MIN_CONTOUR\n\n if x_max and float(x_max) <= X_MAX_CONTOUR:\n x_max = float(x_max)\n else:\n x_max = X_MAX_CONTOUR\n\n if y_max and float(y_max) <= Y_MAX_CONTOUR:\n y_max = float(y_max)\n else:\n y_max = Y_MAX_CONTOUR\n\n if y_min and float(y_min) >= Y_MIN_CONTOUR:\n y_min = float(y_min)\n else:\n y_min = Y_MIN_CONTOUR\n\n return x_min, x_max, y_min, y_max\n\ndef get_contour_data_binary(path, plot_width=None,\n plot_height=None,\n x_min=None,\n x_max=None,\n y_min=None,\n y_max=None):\n# x_min, x_max, y_min, y_max = clear_ranges(x_min, x_max, y_min, y_max)\n\n x, y, z, x_min, x_max, y_min, y_max = get_file_data(path,\n plot_width=plot_width,\n plot_height=plot_height,\n x_min=x_min,\n x_max=x_max,\n y_min=y_min,\n y_max=y_max)\n\n shape = z.shape\n\n data_map = {\n 'data.data.image': [z],\n 'attributes.x_min': x_min,\n 'attributes.x_max': x_max,\n 'attributes.y_min': y_min,\n 'attributes.y_max': y_max,\n# 'data.data.x': [x_min],\n# 'data.data.y': [y_min],\n# 'data.data.dw': [x_max - x_min],\n# 'data.data.dh': [y_max - y_min],\n 'data._shapes.image': [shape],\n }\n\n bin_data = binary_from_data_map(data_map)\n\n return bin_data\n" ]
[ [ "numpy.meshgrid", "numpy.linspace", "scipy.ndimage.zoom", "numpy.cos", "numpy.sin", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
reeshogue/OldProjects
[ "e4b105148b8aa44827a7fd2d4b417b66a76218ba" ]
[ "IDontKnowWhatThisIsButTheFolderWasCalledGodel/imaginative_ddpg.py" ]
[ "import tensorflow as tf\nimport tensorflow.keras.layers as L\nfrom collections import deque\nimport random\nimport numpy as np\nimport tensorflow_probability as tfp\n\ndef set_random_seed():\n # tf.random.set_seed(1233)\n np.random.seed(1233)\n random.seed(1233)\n\nset_random_seed()\n\ndef mod_sigmoid(x):\n return 1 / (1 + (1.0001 ** (x-10000)))\n\ndef mish(x):\n return tf.tanh(x) * x\n\ndef conv(x, units, kernel, stride, noise=False, padding='valid', mu_noise=0.4, alpha=1.3):\n y = L.Conv2D(units, kernel, stride, padding=padding)(x)\n z = tf.random.normal(tf.shape(y), mean=0.0, stddev=alpha)\n# y = y + mu_noise * z\n y = tf.nn.sigmoid(y)\n # y = mL.Pact()(y)\n\n return y\n\ndef res_block(x, filt=[4, 8, 4], size=(5,5), stride=1):\n filt_1, filt_2, filt_3 = filt\n convo = L.Conv2D(filt_1, (1,1), stride)(x)\n convo = L.Conv2D(filt_2, size, 1, padding='same')(convo)\n convo = L.Conv2D(filt_3, (1,1), 1)(convo)\n\n cut = L.Conv2D(filt_3, (1,1), stride)(x)\n x = L.Add()([convo, cut])\n x = tf.nn.sigmoid(x)\n# x = mL.Pact()(x)\n return x\n\ndef forgetful_res_block_1d(x, filt=[64, 64, 64], size=(5,), stride=1):\n filt_1, filt_2, filt_3 = filt\n convo = tfp.layers.Convolution1DFlipout(filt_1, 1, stride)(x)\n convo = tfp.layers.Convolution1DFlipout(filt_2, size, 1, padding='same')(convo)\n convo = tfp.layers.Convolution1DFlipout(filt_3, 1, 1)(convo)\n\n# forget = L.Conv1D(filt_3, 1, stride, activation='sigmoid')(x)\n shortcut = tfp.layers.Convolution1DFlipout(filt_3, 1, stride)(x)\n# shortcut = shortcut * forget\n\n combined = L.Add()([shortcut, convo])\n return combined\n\n\ndef conv1d(x, filt, size, stride):\n return L.Conv1D(filt, size, stride)(x)\n\nclass GAN_DDPG:\n def __init__(self, state, action_size):\n self.state = state\n self.action_size = action_size\n self.gamma = .70\n self.tau = .005\n self.alpha = 1.0\n self.beta = 1.0\n self.theta = 0.9\n self.flipout = False\n\n self.buffer = deque(maxlen=1000)\n self.reward_buffer = deque(maxlen=1000)\n\n\n self.cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n self.tape = tf.GradientTape(persistent=True)\n self.target_actor = self.actor_build()\n self.actor = self.actor_build()\n\n self.critic = self.critic_build()\n self.target_critic = self.critic_build()\n\n self.optimizer = tf.keras.optimizers.Adam(lr=0.000001)\n self.internal_clock = 0\n self.loss_critic = 0.0\n\n self.mcts = None #Pseudo_MCTS_Alpha(20, self.action_size, 2)\n\n def save_models(self):\n self.actor.save(\"actor.h5\")\n self.critic.save(\"critic.h5\")\n self.imagination_model.save(\"imagine.h5\")\n\n def load_models(self):\n self.actor = tf.keras.models.load_model(\"actor.h5\")\n self.critic = tf.keras.models.load_model(\"critic.h5\")\n self.imagination_model = tf.keras.models.load_model(\"imagine.h5\")\n\n def actor_build(self):\n state_input = L.Input(shape=self.state)\n x = state_input\n\n if len(self.state) == 3:\n for i in range(3):\n x = res_block(x)\n for i in range(3):\n x = conv(x, 16, (5,5), 3)\n for i in range(1):\n x = conv(x, 32, (5,5), 1)\n elif len(self.state) == 1:\n x = L.Reshape((self.state[0], 1))(x)\n x = forgetful_res_block_1d(x)\n x = forgetful_res_block_1d(x)\n x = conv1d(x, 4, 3, 1)\n\n # for i in range(30):\n # if self.flipout:\n # x = tfp.layers.DenseFlipout(8, activation='tanh')(x)\n # else:\n # x = L.Dense(32, activation='tanh')(x)\n else:\n raise ValueError\n\n x = L.Flatten()(x)\n\n # for i in range(10):\n # x = L.Dense(32, activation='tanh')(x)\n # z = tf.random.normal(tf.shape(x), mean=0.0, stddev=1.2)\n # x = x + 0.1 * z\n control = L.Dense(self.action_size, activation='tanh')(x)\n model = tf.keras.Model(state_input, [control])\n model.summary()\n return model\n\n def actor_loss_obj(self, state, next_state, reward, action):\n with self.tape as tape:\n y_true = self.critic([state, self.actor(state)])\n loss = -y_true\n return loss, tape.gradient(loss, self.actor.trainable_variables)\n\n def curiosity(self, state_real, state_false):\n return tf.keras.losses.mse(state_real, state_false)\n\n def get_action(self, state):\n list_of_action_sources = []\n action_raw = self.actor.predict(state)\n list_of_action_sources.append(action_raw[0][0])\n if self.flipout:\n list_of_action_sources.append(self.actor.predict(state)[0])\n\n if self.mcts is not None and self.internal_clock >= 200 and self.internal_clock % 2 == 0:\n list_of_action_sources.append(self.mcts.action_selection(state, self.imagination_model))\n\n action = np.mean(list_of_action_sources, axis=0)\n\n self.internal_clock += 1\n return action, action_raw\n\n def get_target_action(self, state):\n return self.target_actor.predict(state)[0] + (self.theta * np.random.random((1, self.action_size)))\n\n def train_actor(self, states, next_states, reward, action):\n loss, grads = self.actor_loss_obj(states, next_states, reward, action)\n self.optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))\n\n def critic_build(self):\n action_input = L.Input(shape=(self.action_size,))\n state_input = L.Input(shape=self.state)\n y = action_input\n x = state_input\n if len(self.state) == 1:\n y = L.Dense(8)(y)\n x = L.Dense(32)(x)\n x = L.Dense(32)(x)\n x = L.Concatenate(-1)([y, x])\n x1 = L.Dense(1)(x)\n elif len(self.state) == 3:\n y = L.RepeatVector(self.state[1])(y)\n y = L.Flatten()(y)\n y = L.RepeatVector(self.state[0])(y)\n y = L.Reshape((self.state[0], self.state[1], 2))(y)\n x = L.Concatenate(-1)([y, x])\n\n for i in range(3):\n x = res_block(x)\n\n for i in range(3):\n x = conv(x, 16, (5,5), 3)\n for i in range(1):\n x = conv(x, 32, (5,5), 1)\n x = L.Flatten()(x)\n x1 = L.Dense(1)(x)\n else:\n raise ValueError\n\n model = tf.keras.Model([state_input,action_input], x1)\n model.summary()\n return model\n\n def critic_loss_obj(self, states, actions, target_input, y_true):\n with self.tape as tape:\n y_pred = self.critic([states, actions])\n target_Q = y_true + self.gamma * target_input\n mse = tf.keras.losses.mse(target_Q, y_pred)\n return mse, tape.gradient(mse, self.critic.trainable_variables)\n\n def train_critic(self, states, actions, reward, next_state):\n next_actions = self.get_target_action(next_state)\n targets = self.get_Q_target(next_state, next_actions)\n loss_vals, grads = self.critic_loss_obj(states, actions, targets, reward)\n self.optimizer.apply_gradients(zip(grads, self.critic.trainable_variables))\n return loss_vals\n\n def get_Q(self, states, actions):\n return self.critic.predict([states, actions])\n\n def get_Q_target(self, states, actions):\n return self.target_critic([states, actions])\n\n def soft_update(self):\n weights_crit_local = np.array(self.critic.get_weights())\n weights_crit_targ = np.array(self.target_critic.get_weights())\n self.critic.set_weights(self.tau * weights_crit_local + (1.-self.tau) * weights_crit_targ)\n\n weights_act_local = np.array(self.actor.get_weights())\n weights_act_targ = np.array(self.target_actor.get_weights())\n self.actor.set_weights(self.tau * weights_act_local + (1.-self.tau) * weights_act_targ)\n\n def remember(self, state, action, reward, next_state):\n self.buffer.append((state, action, reward, next_state))\n self.reward_buffer.append(reward)\n\n def train(self):\n batch = random.sample(self.buffer, 1)\n batch_2 = random.sample(self.buffer, 1)\n batch_3 = [self.buffer[np.argmax(self.reward_buffer)]]\n for state, action, reward, next_state in batch:\n train_bool = bool(random.getrandbits(1))\n\n self.train_actor(state, next_state, reward, action)\n self.train_critic(state, action, reward, next_state)\n self.soft_update()\n\n for state, action, reward, next_state in batch_2:\n self.loss_critic = self.train_critic(state, action, reward, next_state)\n self.soft_update()\n\nclass Pseudo_MCTS_Alpha(object):\n def __init__(self, num_sims, action_size, max_branch_depth):\n self.num_sims = num_sims\n self.max_branch_depth = max_branch_depth\n self.action_size = action_size\n def action_selection(self, state, world_model):\n\n state_zero = state\n simulations = []\n simulations_rewards = []\n\n for i in range(self.num_sims):\n branch = []\n branch_rewards = []\n for j in range(self.max_branch_depth):\n action = np.random.normal(scale=2.0, size=(1, self.action_size)) - 1.0\n rewards, state = world_model.predict([state, action])\n branch.append(action)\n branch_rewards.append(rewards)\n simulations.append(branch)\n simulations_rewards.append(branch_rewards)\n\n best_rewards = -100000.0\n\n for branch, i in zip(simulations_rewards, range(len(simulations_rewards))):\n sum_reward_of_branch = np.sum(branch)\n if sum_reward_of_branch > best_rewards:\n index = i\n best_rewards = branch[0]\n\n best_branch = simulations[index]\n return best_branch[0]\n\nif __name__ == '__main__':\n ddpg = DDPG((300,300,3))\n for i in range(50):\n\n state = np.random.random((1,300,300,3))\n state = np.float32(state)\n action = ddpg.get_action(state)\n actions = np.squeeze(action)\n print(actions)\n\n reward = np.array([[100.]])\n\n next_state = np.random.random((1,300,300,3))\n next_state = np.float32(next_state)\n\n\n ddpg.remember(state, action, reward, next_state)\n ddpg.train()\n" ]
[ [ "tensorflow.keras.models.load_model", "numpy.squeeze", "tensorflow.tanh", "numpy.mean", "tensorflow.keras.layers.Concatenate", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.losses.BinaryCrossentropy", "numpy.argmax", "numpy.float32", "tensorflow.keras.layers.Add", "tensorflow.keras.layers.Flatten", "tensorflow.keras.losses.mse", "tensorflow.nn.sigmoid", "tensorflow.shape", "tensorflow.keras.layers.Dense", "tensorflow.keras.Model", "tensorflow.keras.layers.Reshape", "numpy.array", "numpy.sum", "tensorflow.GradientTape", "numpy.random.random", "numpy.random.seed", "tensorflow.keras.layers.Conv1D", "tensorflow.keras.layers.RepeatVector", "numpy.random.normal", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.layers.Input" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
shahaniRG/sinogram_changepoint_detection
[ "17c0193d5628c691172cccde2d3d5d82fc076107" ]
[ "sinogram_functions.py" ]
[ "# -----------------------------------------------------------\r\n# Code free for public use, just acknowledge use\r\n# Paul Chao, [email protected]\r\n# December 18, 2020\r\n# Data obtained at APS 2ID-BM\r\n# Original data type: hdf (h5) file\r\n#\r\n# -----------------------------------------------------------\r\n\r\n#Import packages\r\nimport numpy as np\r\nimport os\r\nimport time\r\nimport sys\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import Image\r\nfrom scipy import ndimage\r\nimport argparse\r\n\r\nimport matplotlib as mpl\r\nimport scipy.stats\r\nfrom pathlib import Path\r\nimport h5py\r\nimport pandas as pd\r\nimport tomopy\r\nfrom scipy import ndimage\r\nimport numpy as np\r\nfrom scipy import signal\r\nfrom scipy.signal import savgol_filter\r\nfrom scipy.ndimage import median_filter\r\nfrom scipy import interpolate\r\nimport time\r\n\r\nfrom math import log, e\r\nfrom sklearn.neighbors import KernelDensity\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.decomposition import PCA\r\nimport skfuzzy as fuzz\r\n\r\n# Functions\r\n\r\n# Import sinogram data\r\ndef load_fromh5(filepath, dir_structure, slice_num, strt_frm=0):\r\n \"\"\"\r\n load_fromh5 will extract the sinogram from the h5 file \r\n\r\n Output: the sinogram\r\n\r\n filepath: where the file is located in the system\r\n dir_structure: the h5 file directory structure\r\n slice_num: the slice where the singoram will be extracted\r\n strt_frm (optional): where the sinogram should begin\r\n \"\"\"\r\n f = h5py.File(filepath, 'r')\r\n #[\"entry/data/data\"]\r\n print(f[dir_structure].shape)\r\n end_frm = f[dir_structure].shape[0]\r\n sino = f[dir_structure][int(strt_frm):int(end_frm),int(slice_num),:] #For APS 2BM h5 file format\r\n return sino\r\n\r\ndef prepare_sinogram(sinogram, period=3000, downsamplescale=10, numLiq=3, keepLiq=True):\r\n \"\"\"\r\n prepare_sinogram will normalize the sinogram using the initial liquid scans\r\n\r\n Output: Sinogram that has been normalized.\r\n\r\n sinogram: The sinogram data as a 2D array\r\n period: The number of oprojections for a 360 degree sample rotation in a tomographic scan\r\n downsamplescale: the slice where the singoram will be extracted\r\n numLiq: The number of liquid periods to be used to normalize \r\n keepLiq: If the output should contain the liquid frames used for normalization\r\n \"\"\"\r\n sinogram = tomopy.minus_log(sinogram)\r\n sino_dwn = downsample(sinogram, scalingfactor=downsamplescale)\r\n period = int(period) // int(downsamplescale)\r\n sino_dwn_subliq = sub_sino_liq(sino_dwn,size=1,numLiq=numLiq, period=period, keepLiq=keepLiq)\r\n return sino_dwn_subliq \r\n\r\ndef downsample(data, scalingfactor=10):\r\n \"\"\"\r\n downsample will reduce the data size by a integer factor by summing a NxN sized pixel area\r\n\r\n Output: the downsampled data\r\n\r\n data: The sinogram data as a 2D array\r\n scalingfactor: The amount the data will be scaled\r\n \"\"\"\r\n data = data[0:(data.shape[0] // scalingfactor)*scalingfactor , 0:(data.shape[1] // scalingfactor)*scalingfactor];\r\n rows = scalingfactor\r\n cols = scalingfactor\r\n smaller = data.reshape(data.shape[0]//rows, rows, data.shape[1]//cols, cols).sum(axis=1).sum(axis=2)\r\n return smaller\r\n\r\ndef sub_sino_liq(data, numLiq, size=20, period=3000, keepLiq=True):\r\n \"\"\"\r\n sub_sino_liq will normalize the singram using the liquid periods\r\n\r\n Output: the normalized sinogram\r\n\r\n data: The sinogram data as a 2D array\r\n period: The number of oprojections for a 360 degree sample rotation in a tomographic scan\r\n size (optional): The size of the median filter used to filter the liquid region\r\n numLiq: The number of liquid periods to be used to normalize \r\n keepLiq: If the output should contain the liquid frames used for normalization\r\n \"\"\"\r\n liquid = np.zeros((period, data.shape[1]), dtype=np.float32)\r\n counter = 0\r\n for i in range(numLiq):\r\n liquid = liquid + data[counter*period: counter*period+period, :]\r\n counter += 1\r\n liquid = liquid/numLiq\r\n liquid = ndimage.median_filter(liquid, size)\r\n\r\n subtract_fluid_data = []\r\n for iblock in np.arange(0, data.shape[0], period):\r\n try:\r\n subtract_fluid_data.append(data[iblock:iblock + period] - liquid)\r\n except ValueError:\r\n remaining = data.shape[0] - iblock\r\n subtract_fluid_data.append(data[iblock:iblock + remaining] - data[:remaining])\r\n subtract_fluid_data = np.concatenate(subtract_fluid_data)\r\n\r\n if keepLiq == True:\r\n return subtract_fluid_data\r\n else:\r\n return subtract_fluid_data[numLiq*period:]\r\n\r\ndef digitizetolevels(data, nLevels=256):\r\n \"\"\"\r\n digitizetolevels will scale the data to discrete levels\r\n\r\n Output: the digitized data\r\n\r\n data: The sinogram data as a 2D array\r\n nLevels: The number of levels to descretize to\r\n \"\"\"\r\n _min, _max = (data.min(), data.max())\r\n bins = np.linspace(_min, _max, nLevels)\r\n data_digitized = np.digitize(data,bins,right=True)\r\n return data_digitized\r\n\r\ndef save_sino(sinogram, fname, cmap='gray'):\r\n \"\"\"\r\n save_sino will save the sinogram as an image\r\n\r\n Output: the saved image\r\n\r\n fname: The name of the saved image\r\n cmap: The image colormap\r\n \"\"\"\r\n fig, ax = plt.subplots(figsize=(18, 2))\r\n ax.imshow(sinogram.T, cmap, interpolation='nearest', aspect='auto')\r\n fig.tight_layout()\r\n fig.savefig(fname)\r\n\r\ndef save_scree(pca, fname):\r\n \"\"\"\r\n save_scree will save the scree plot as an image\r\n\r\n Output: the saved image\r\n\r\n pca: the pca object\r\n fname: The name of the saved image\r\n \"\"\"\r\n var = pca.explained_variance_ratio_\r\n percent_variance = np.round(var* 100, decimals =2)\r\n fig, ax = plt.subplots(figsize=(10, 10))\r\n ax.bar(x= range(1,7), height=percent_variance)#, tick_label=list(principalDf.columns))\r\n plt.ylabel('Percentate of Variance Explained')\r\n plt.xlabel('Principal Component')\r\n plt.title('PCA Scree Plot')\r\n plt.rcParams.update({'font.size': 22})\r\n fig.tight_layout()\r\n fig.savefig(fname)\r\n\r\ndef save_pcaplot(principalDf, fname):\r\n \"\"\"\r\n save_pcaplot will save the PCA plot as an image\r\n\r\n Output: the saved image\r\n\r\n principalDf: the PCA dataframe\r\n fname: The name of the saved image\r\n \"\"\"\r\n fig = plt.figure(figsize = (10,8))\r\n ax = fig.add_subplot(1,1,1)\r\n ax.set_xlabel('Principal Component 1', fontsize = 15)\r\n ax.set_ylabel('Principal Component 2', fontsize = 15)\r\n ax.set_title('2 component PCA', fontsize = 20)\r\n pcaplot = ax.scatter(principalDf.loc[:,'principal component 1'],\r\n principalDf.loc[:,'principal component 2'],\r\n c = range(principalDf.shape[0]), s = 50)\r\n plt.colorbar(pcaplot, ax=ax)\r\n ax.grid()\r\n fig.tight_layout()\r\n fig.savefig(fname)\r\n\r\ndef save_fuzzyProbability(u, filtered_0, filtered_1, fname):\r\n \"\"\"\r\n save_fuzzyProbability will save the fuzzy Probability plot as an image\r\n\r\n Output: the saved image\r\n\r\n u the fuzzy probability Nx2\r\n filtered_0 filtered probability\r\n filtered_1 filtered probability\r\n fname: The name of the saved image\r\n \"\"\"\r\n fig, ax = plt.subplots(figsize=(10, 10))\r\n plt.plot(u[0,:])\r\n plt.plot(u[1,:])\r\n #plt.plot(filtered_0)\r\n #plt.plot(filtered_1)\r\n fig.tight_layout()\r\n fig.savefig(fname)\r\n\r\n# Various features to use, all optional\r\ndef entropy2(labels, base=None):\r\n \"\"\" Computes entropy of label distribution. \"\"\"\r\n n_labels = len(labels)\r\n if n_labels <= 1:\r\n return 0\r\n value,counts = np.unique(labels, return_counts=True)\r\n probs = counts / n_labels\r\n n_classes = np.count_nonzero(probs)\r\n if n_classes <= 1:\r\n return 0\r\n ent = 0.\r\n # Compute entropy\r\n base = e if base is None else base\r\n for i in probs:\r\n ent -= i * log(i, base)\r\n return ent\r\n\r\ndef outlier_zscore_overall(array, threshold):\r\n mean_all = np.mean(array)*np.ones((1, array.shape[1]))\r\n stdev_all = np.std(array)*np.ones((1, array.shape[1]))\r\n\r\n outlier_count = np.zeros((array.shape[0],1))\r\n for col in np.arange(array.shape[0]):\r\n z_score = (array[col,:] - mean_all) / (stdev_all)\r\n outlier_count[col] = np.sum(z_score > threshold)\r\n return np.squeeze(outlier_count > threshold)\r\n\r\ndef numUnique(array):\r\n [unique, counts] = np.unique(array, return_counts=True)\r\n return counts.shape[0]\r\n\r\ndef numThreshold_ge(array, thresh):\r\n vals = (array>= np.ones(array.shape)*thresh).sum(axis=1);\r\n return vals\r\n\r\ndef numThreshold_le(array, thresh):\r\n vals = (array<= np.ones(array.shape)*thresh).sum(axis=1);\r\n return vals\r\n\r\ndef makeTarget(manualID, length):\r\n name = []\r\n for index in range(manualID):\r\n name.append('before?')\r\n for index in range(manualID,length):\r\n name.append('after?')\r\n return name\r\n\r\ndef maxDerivative(array):\r\n # go columnwise\r\n col_array = np.zeros((array.shape[0],1))\r\n for col in np.arange(0,array.shape[0]-1):\r\n current_col = array[col,:]\r\n next_col = array[col+1,:]\r\n diff = np.abs(next_col-current_col)\r\n col_array[col+1,:] = np.max(diff)\r\n return np.squeeze(col_array)\r\n\r\ndef maxDerivative_cols(array, distance=1):\r\n # go columnwise\r\n col_array = np.zeros((array.shape[0],1))\r\n for col in np.arange(0,array.shape[0]-distance):\r\n if col-distance < 0: #begining\r\n diff = np.abs(array[col,:].min()-array[col+distance,:].min())\r\n col_array[col,:] = diff #np.max(diff)\r\n elif col+distance > array.shape[0]: # end\r\n diff = np.abs(array[col,:].min()-array[col-distance,:].min())\r\n col_array[col,:] = diff #np.max(diff)\r\n else:\r\n diff = np.abs(array[col-distance,:].min()-array[col+distance,:].min())\r\n col_array[col,:] = diff #np.max(diff)\r\n return np.squeeze(col_array)\r\n\r\ndef argclosest(arr, K): \r\n arr = np.asarray(arr) \r\n idx = (np.abs(arr - K)).argmin() \r\n return idx\r\n\r\n# Function that encapsulates the algorithm described\r\ndef analyze_sinogram(sinogram, period, save=True):\r\n \"\"\"\r\n analyze_sinogram will implement the algorithm\r\n\r\n Output: the saved image\r\n\r\n sinogram: the sinogram\r\n period: the number of projections for a 360 degree sample rotation\r\n save: Save the results\r\n \"\"\"\r\n data = sinogram\r\n df = pd.DataFrame({\r\n \"index\" : np.arange(data.shape[0]),\r\n #\"Sum\" : data.sum(axis=1),\r\n #\"Entropy\" : np.apply_along_axis(entropy2, 1, data),\r\n \"Max\" : np.apply_along_axis(np.max, 1, data),\r\n \"Min\" : np.apply_along_axis(np.min, 1, data),\r\n \"Mean\" : np.apply_along_axis(np.mean, 1, data),\r\n \"Median\" : np.apply_along_axis(np.median, 1, data),\r\n #\"Q1\" : np.quantile(data, .25, axis=1),\r\n #\"Q3\" : np.quantile(data, .75, axis=1),\r\n \"Stdev\" : np.apply_along_axis(np.std, 1, data),\r\n \"Range\" : np.apply_along_axis(np.ptp, 1, data),\r\n \"Unique Values\" : np.apply_along_axis(numUnique, 1, data),\r\n #\"SNR\" : np.apply_along_axis(np.mean, 1, data)**2/np.apply_along_axis(np.std, 1, data)**2,\r\n #\"Vals<200\" : numThreshold_le(data, 200),\r\n #\"Vals<150\" : numThreshold_le(data, 150),\r\n #\"Vals>150\" : numThreshold_ge(data, 150),\r\n \"Vals<mean\" : numThreshold_le(data, np.mean(data)),\r\n \"Vals>mean\" : numThreshold_ge(data, np.mean(data)),\r\n #\"outlier KDE\" : outlier_score(data, bandwidth=7, threshold = 1e-10),\r\n #\"outliers zscore 1.5\" : outlier_zscore_column(data, 1.5),\r\n #\"z-score outlier\" : outlier_zscore_column(data, 2),\r\n #\"outliers zscore 2.5\" : outlier_zscore_column(data, 2.5),\r\n #\"outliers zscore 3\" : outlier_zscore_column(data, 3),\r\n #\"outliers zscore 3.5\" : outlier_zscore_column(data, 3.5),\r\n #\"max derivative\" : maxDerivative(data),\r\n #\"max derivative 5\" : maxDerivative_cols(data, distance=5),\r\n #\"max derivative 50\" : maxDerivative_cols(data, distance=50),\r\n #\"max derivative 150\" : maxDerivative_cols(data, distance=150),\r\n #\"max derivative 10%\" : maxDerivative_cols(data, distance=10),\r\n #\"max derivative 50%\" : maxDerivative_cols(data, distance=100),\r\n #\"max derivative period\" : maxDerivative_cols(data, distance=300),\r\n #\"max derivative 1000\" : maxDerivative_cols(data, distance=1000),\r\n #\"max change period\" : maxNegChange_period(data, distance=period),\r\n #\"target\" : makeTarget(visual_change, data.shape[0])\r\n })\r\n\r\n features = list(df.columns[1:])\r\n # Separating out the features\r\n x = df.loc[:, features].values\r\n # Standardizing the features\r\n x = StandardScaler().fit_transform(x)\r\n #set up as new dataframe\r\n df_standard = pd.DataFrame(data=x,columns=features)\r\n\r\n pca = PCA(n_components=6)\r\n principalComponents = pca.fit_transform(x)\r\n\r\n if save: save_scree(pca, 'scree.png')\r\n\r\n principalDf = pd.DataFrame(data = principalComponents,\r\n columns = ['principal component 1', 'principal component 2', 'principal component 3',\r\n 'principal component 4', 'principal component 5', 'principal component 6'])\r\n\r\n if save: save_pcaplot(principalDf, 'pcaplot.png')\r\n\r\n points = np.array(list(zip(principalDf.loc[:,'principal component 1'].values, principalDf.loc[:,'principal component 2'].values, df.loc[:,'index'].values)))\r\n points = StandardScaler().fit_transform(points)\r\n\r\n alldata = points.T\r\n ncenters = 2\r\n\r\n cntr, u, u0, d, jm, p, fpc = fuzz.cluster.cmeans(\r\n alldata, ncenters, 1.8, error=0.00005, maxiter=1000, init=None)\r\n if int(period) % 2 == 0: windowsize = int(period+1)\r\n else: windowsize = int(period) #Make it odd for the savgol filter\r\n filtered_0 = savgol_filter(u[0,:], windowsize,1)\r\n filtered_1 = savgol_filter(u[1,:], windowsize,1)\r\n \r\n if save: save_fuzzyProbability(u, filtered_0, filtered_1, 'fuzzyprob.png')\r\n \r\n P = filtered_0 * filtered_1\r\n \r\n crit = np.argmax(P)\r\n \r\n print(' *** Clustering Results')\r\n print('Critical point: {}'.format(crit))\r\n print('Range of critical point (60% threshold): ({0}, {1})'.format(\r\n argclosest(P[:crit], 0.6*0.4), argclosest(P[crit:], 0.6*0.4) + crit ) )\r\n print('Range of critical point (70% threshold): ({0}, {1})'.format(\r\n argclosest(P[:crit], 0.7*0.3), argclosest(P[crit:], 0.7*0.3) + crit ) )\r\n print('Range of critical point (80% threshold): ({0}, {1})'.format(\r\n argclosest(P[:crit], 0.8*0.2), argclosest(P[crit:], 0.8*0.2) + crit ) )\r\n results = [crit, argclosest(P[:crit], 0.6*0.4), argclosest(P[crit:], 0.6*0.4) + crit ,\r\n argclosest(P[:crit], 0.7*0.3), argclosest(P[crit:], 0.7*0.3) + crit,\r\n argclosest(P[:crit], 0.8*0.2), argclosest(P[crit:], 0.8*0.2) + crit]\r\n \r\n return results\r\n\r\n" ]
[ [ "numpy.linspace", "numpy.asarray", "numpy.squeeze", "pandas.DataFrame", "numpy.concatenate", "numpy.round", "matplotlib.pyplot.plot", "numpy.max", "numpy.mean", "matplotlib.pyplot.rcParams.update", "numpy.digitize", "scipy.signal.savgol_filter", "numpy.unique", "numpy.arange", "scipy.ndimage.median_filter", "numpy.std", "numpy.argmax", "numpy.apply_along_axis", "numpy.count_nonzero", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "sklearn.decomposition.PCA", "numpy.sum", "matplotlib.pyplot.ylabel", "numpy.abs", "matplotlib.pyplot.subplots", "numpy.ones", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.xlabel", "sklearn.preprocessing.StandardScaler" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [ "0.14", "1.6", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
skyssj/graph-learn
[ "7bbffceed2c69a7acf903d80ee5bbc7e3fec6ca1" ]
[ "examples/tf/gat/gat.py" ]
[ "# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"class for GAT implementation\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport graphlearn as gl\nimport tensorflow as tf\n\n\nclass GAT(gl.LearningBasedModel):\n \"\"\"\n Args:\n graph: Initialized gl.Graph object.\n output_dim: Output dimension.\n features_num: Input features dimension.\n num_heads: Number of attention heads.\n batch_size: Batch size for training set.\n val_batch_size: Batch size for validation set.\n test_batch_size: Batch size for test set.\n categorical_attrs_desc: A dict indicates discrete features, with the format\n {feature columun index : [name, max number, embedding dimension]}.\n hidden_dim: Hidden dimension.\n in_drop_rate: Dropout ratio for input data.\n attn_drop_rate: Dropout ratio for attention coefficients.\n hops_num: Number of hops to perform neighbor sampling.\n neighs_num: A list indicates number of neighbors to sample in each hop,\n with the format [hop1_num, hop2_num, ...].\n full_graph_mode: Set True if sample full graph in first iteration.\n node_type: User defined node type name.\n edge_type: User defined edge type name.\n \"\"\"\n\n def __init__(self,\n graph,\n output_dim,\n features_num,\n num_heads,\n batch_size,\n val_batch_size=None,\n test_batch_size=None,\n categorical_attrs_desc='',\n hidden_dim=16,\n in_drop_rate=0,\n attn_drop_rate=0,\n hops_num=2,\n neighs_num=None,\n full_graph_mode=False,\n node_type='item',\n edge_type='relation'):\n super(GAT, self).__init__(graph,\n batch_size,\n full_graph_mode=full_graph_mode)\n self.features_num = features_num\n self.categorical_attrs_desc = categorical_attrs_desc\n self.hops_num = hops_num\n self.hidden_dim = hidden_dim\n self.num_heads = num_heads\n self.in_drop_rate = in_drop_rate\n self.attn_drop_rate= attn_drop_rate\n self.output_dim = output_dim\n self.neighs_num = neighs_num\n self.val_batch_size = val_batch_size if val_batch_size else batch_size\n self.test_batch_size = test_batch_size if test_batch_size else batch_size\n self.node_type = node_type\n self.edge_type = edge_type\n\n # construct EgoSpecs.\n categorical_attrs_num = len(categorical_attrs_desc)\n continuous_attrs_num = features_num - categorical_attrs_num\n src_spec = gl.FeatureSpec(continuous_attrs_num,\n categorical_attrs_num,\n labeled = True)\n hop_spec = gl.HopSpec(gl.FeatureSpec(continuous_attrs_num, categorical_attrs_num),\n sparse=(self.neighs_num is None))\n self.src_ego_spec = gl.EgoSpec(src_spec, hops_spec=[hop_spec] * self.hops_num)\n # encoders.\n self.encoders = self._encoders()\n\n def _sample_seed(self):\n return self.graph.V('train').batch(self.batch_size).values()\n\n def _val_sample_seed(self):\n return self.graph.V('val').batch(self.val_batch_size).values()\n\n def _test_sample_seed(self):\n return self.graph.V('test').batch(self.test_batch_size).values()\n\n def _positive_sample(self, t):\n return gl.Edges(t.ids, self.node_type,\n t.ids, self.node_type,\n self.edge_type, graph=self.graph)\n\n def _receptive_fn(self, nodes):\n alias = ['v' + str(i + 1) for i in range(self.hops_num)]\n # sample based\n if self.neighs_num:\n assert len(self.neighs_num) == self.hops_num\n sample_func = lambda v, params: v.outV(self.edge_type).sample(params).by('topk')\n return self.graph.V(nodes.type, feed=nodes).alias('v')\\\n .repeat(sample_func, self.hops_num, params_list=self.neighs_num, alias_list=alias)\\\n .emit(lambda x: gl.EgoGraph(x['v'], [gl.Layer(nodes=x[name]) for name in alias]))\n\n # full batch based\n sample_func = lambda v: v.outV(self.edge_type).sample().by('full')\n return self.graph.V(nodes.type, feed=nodes).alias('v')\\\n .repeat(sample_func, self.hops_num, alias_list=alias)\\\n .emit(lambda x: gl.EgoGraph(x['v'], [gl.Layer(nodes=x[name]) for name in alias]))\n\n def _encoders(self):\n \"\"\"\n return a dict of encoder\n \"\"\"\n self.in_drop = tf.placeholder(tf.float32, None, name='in_drop_ph')\n self.attn_drop = tf.placeholder(tf.float32, None, name='attn_drop_ph')\n\n depth = self.hops_num\n feature_encoders = [gl.encoders.IdentityEncoder()] * (depth + 1)\n conv_layers = []\n # for input layer\n conv_layers.append(gl.layers.MultiHeadGATConv(self.hidden_dim,\n self.num_heads[0],\n attn_drop=self.attn_drop))\n # for hidden layer\n for i in range(1, depth - 1):\n conv_layers.append(gl.layers.MultiHeadGATConv(self.hidden_dim,\n self.num_heads[i],\n attn_drop=self.attn_drop))\n # for output layer\n conv_layers.append(gl.layers.MultiHeadGATConv(self.output_dim,\n self.num_heads[-1],\n attn_drop=self.attn_drop,\n concat=False,\n act=None))\n\n if self.neighs_num:\n encoder = gl.encoders.EgoGraphEncoder(feature_encoders,\n conv_layers,\n nbr_num_list=self.neighs_num,\n dropout=self.in_drop)\n else:\n encoder = gl.encoders.SparseEgoGraphEncoder(feature_encoders,\n conv_layers,\n dropout=self.in_drop)\n\n return {\"src\": encoder, \"edge\": None, \"dst\":None}\n\n def _accuracy(self, logits, labels):\n \"\"\"Accuracy for supervised model.\n Args:\n logits: embeddings, 2D tensor with shape [batchsize, dimension]\n labels: 1D tensor with shape [batchsize]\n \"\"\"\n indices = tf.math.argmax(logits, 1, output_type=tf.int32)\n correct = tf.reduce_sum(tf.cast(tf.math.equal(indices, labels), tf.float32))\n return correct / tf.cast(tf.shape(labels)[0], tf.float32)\n\n def build(self):\n ego_flow = gl.EgoFlow(self._sample_seed,\n self._positive_sample,\n self._receptive_fn,\n self.src_ego_spec,\n full_graph_mode=self.full_graph_mode)\n iterator = ego_flow.iterator\n self.pos_src_ego_tensor = ego_flow.pos_src_ego_tensor\n src_emb = self.encoders['src'].encode(self.pos_src_ego_tensor)\n labels = self.pos_src_ego_tensor.src.labels\n loss = self._supervised_loss(src_emb, labels)\n\n return loss, iterator\n\n def val_acc(self):\n val_ego_flow = gl.EgoFlow(self._val_sample_seed,\n self._positive_sample,\n self._receptive_fn,\n self.src_ego_spec,\n full_graph_mode=self.full_graph_mode)\n val_iterator = val_ego_flow.iterator\n val_pos_src_ego_tensor = val_ego_flow.pos_src_ego_tensor\n val_logits = self.encoders['src'].encode(val_pos_src_ego_tensor)\n val_labels = val_pos_src_ego_tensor.src.labels\n return self._accuracy(val_logits, val_labels), val_iterator\n\n def test_acc(self):\n test_ego_flow = gl.EgoFlow(self._test_sample_seed,\n self._positive_sample,\n self._receptive_fn,\n self.src_ego_spec,\n full_graph_mode=self.full_graph_mode)\n test_iterator = test_ego_flow.iterator\n test_pos_src_ego_tensor = test_ego_flow.pos_src_ego_tensor\n test_logits = self.encoders['src'].encode(test_pos_src_ego_tensor)\n test_labels = test_pos_src_ego_tensor.src.labels\n return self._accuracy(test_logits, test_labels), test_iterator\n\n def _supervised_loss(self, emb, label):\n return gl.softmax_cross_entropy_loss(emb, label)\n\n def feed_training_args(self):\n return {self.in_drop: self.in_drop_rate,\n self.attn_drop: self.attn_drop_rate}\n\n def feed_evaluation_args(self):\n return {self.in_drop: 0.0, self.attn_drop: 0.0}\n" ]
[ [ "tensorflow.math.argmax", "tensorflow.math.equal", "tensorflow.placeholder", "tensorflow.shape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
mchancan/Hierarchical-Localization
[ "e310e311a722405f19a54f9e833834feb5e70a47" ]
[ "hloc/utils/parsers.py" ]
[ "from pathlib import Path\nimport logging\nimport numpy as np\nfrom collections import defaultdict\n\n\ndef parse_image_lists_with_intrinsics(paths):\n results = []\n files = list(Path(paths.parent).glob(paths.name))\n assert len(files) > 0\n\n for lfile in files:\n with open(lfile, 'r') as f:\n raw_data = f.readlines()\n\n logging.info(f'Importing {len(raw_data)} queries in {lfile.name}')\n for data in raw_data:\n data = data.strip('\\n').split(' ')\n name, camera_model, width, height = data[:4]\n params = np.array(data[4:], float)\n info = (camera_model, int(width), int(height), params)\n results.append((name, info))\n\n assert len(results) > 0\n return results\n\n\ndef parse_retrieval(path):\n retrieval = defaultdict(list)\n with open(path, 'r') as f:\n for p in f.read().rstrip('\\n').split('\\n'):\n q, r = p.split(' ')\n retrieval[q].append(r)\n return dict(retrieval)\n\n\ndef names_to_pair(name0, name1):\n return '_'.join((name0.replace('/', '-'), name1.replace('/', '-')))\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Derek-TH-Wang/OpenRoboRL
[ "b81333f034acff7252322322b8d499cd2c3c49e9" ]
[ "OpenRoboRL/envs/quadruped_robot/robots/laikago.py" ]
[ "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Pybullet simulation of a Laikago robot.\"\"\"\nimport math\nimport re\nimport numpy as np\nfrom gym import spaces\n\n\nURDF_FILENAME = \"laikago/laikago_toes_limits.urdf\"\n\nT_STEP = 0.001\nNUM_ACTION_REPEAT = 33\nCTRL_LATENCY = 0.002\n\nNUM_MOTORS = 12\nNUM_LEGS = 4\nMOTOR_NAMES = [\n \"FR_hip_motor_2_chassis_joint\",\n \"FR_upper_leg_2_hip_motor_joint\",\n \"FR_lower_leg_2_upper_leg_joint\",\n \"FL_hip_motor_2_chassis_joint\",\n \"FL_upper_leg_2_hip_motor_joint\",\n \"FL_lower_leg_2_upper_leg_joint\",\n \"RR_hip_motor_2_chassis_joint\",\n \"RR_upper_leg_2_hip_motor_joint\",\n \"RR_lower_leg_2_upper_leg_joint\",\n \"RL_hip_motor_2_chassis_joint\",\n \"RL_upper_leg_2_hip_motor_joint\",\n \"RL_lower_leg_2_upper_leg_joint\",\n]\nPATTERN = [re.compile(r\"\\w+_chassis_\\w+\"), re.compile(r\"\\w+_hip_motor_\\w+\"),\n re.compile(r\"\\w+_lower_leg_\\w+\"), re.compile(r\"jtoe\\d*\")]\n\nINIT_POSITION = [0, 0, 0.48]\nINIT_QUAT = [0.5, 0.5, 0.5, 0.5]\nJOINT_DIRECTIONS = np.array([-1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1])\nDOFS_PER_LEG = 3\nJOINT_OFFSETS = np.array([0.0, -0.6, 0.66] * 4)\n\n_DEFAULT_HIP_POSITIONS = (\n (0.21, -0.1157, 0),\n (0.21, 0.1157, 0),\n (-0.21, -0.1157, 0),\n (-0.21, 0.1157, 0),\n)\n\n# Bases on the readings from Laikago's default pose.\nINIT_MOTOR_ANGLES = np.array([0, 0.67, -1.25] * NUM_LEGS)\n\n\nmotor_kp = [220.0, 220.0, 220.0] * NUM_LEGS\nmotor_kd = [0.3, 2.0, 2.0] * NUM_LEGS\n\n\nOVERHEAT_SHUTDOWN_TORQUE = 2.45\nOVERHEAT_SHUTDOWN_TIME = 1.0\nMAX_MOTOR_ANGLE_CHANGE_PER_STEP = 0.2\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cwpeng-cn/deep-person-reid
[ "354df0860c4730df4466869aaf512db93b05303a" ]
[ "torchreid/data/datasets/video/mars.py" ]
[ "from __future__ import division, print_function, absolute_import\nimport os.path as osp\nimport warnings\nfrom scipy.io import loadmat\n\nfrom ..dataset import VideoDataset\n\n\nclass Mars(VideoDataset):\n \"\"\"MARS.\n\n Reference:\n Zheng et al. MARS: A Video Benchmark for Large-Scale Person Re-identification. ECCV 2016.\n\n URL: `<http://www.liangzheng.com.cn/Project/project_mars.html>`_\n \n Dataset statistics:\n - identities: 1261.\n - tracklets: 8298 (train) + 1980 (query) + 9330 (gallery).\n - cameras: 6.\n \"\"\"\n dataset_dir = 'mars'\n dataset_url = None\n\n def __init__(self, root='', **kwargs):\n self.root = osp.abspath(osp.expanduser(root))\n self.dataset_dir = osp.join(self.root, self.dataset_dir)\n # self.download_dataset(self.dataset_dir, self.dataset_url)\n\n self.train_name_path = osp.join(\n self.dataset_dir, 'info/train_name.txt'\n )\n self.test_name_path = osp.join(self.dataset_dir, 'info/test_name.txt')\n self.track_train_info_path = osp.join(\n self.dataset_dir, 'info/tracks_train_info.mat'\n )\n self.track_test_info_path = osp.join(\n self.dataset_dir, 'info/tracks_test_info.mat'\n )\n self.query_IDX_path = osp.join(self.dataset_dir, 'info/query_IDX.mat')\n\n required_files = [\n self.dataset_dir, self.train_name_path, self.test_name_path,\n self.track_train_info_path, self.track_test_info_path,\n self.query_IDX_path\n ]\n self.check_before_run(required_files)\n\n train_names = self.get_names(self.train_name_path)\n test_names = self.get_names(self.test_name_path)\n track_train = loadmat(self.track_train_info_path\n )['track_train_info'] # numpy.ndarray (8298, 4)\n track_test = loadmat(self.track_test_info_path\n )['track_test_info'] # numpy.ndarray (12180, 4)\n query_IDX = loadmat(self.query_IDX_path\n )['query_IDX'].squeeze() # numpy.ndarray (1980,)\n query_IDX -= 1 # index from 0\n track_query = track_test[query_IDX, :]\n gallery_IDX = [\n i for i in range(track_test.shape[0]) if i not in query_IDX\n ]\n track_gallery = track_test[gallery_IDX, :]\n\n train = self.process_data(\n train_names, track_train, home_dir='bbox_train', relabel=True\n )\n query = self.process_data(\n test_names, track_query, home_dir='bbox_test', relabel=False\n )\n gallery = self.process_data(\n test_names, track_gallery, home_dir='bbox_test', relabel=False\n )\n\n super(Mars, self).__init__(train, query, gallery, **kwargs)\n\n def get_names(self, fpath):\n names = []\n with open(fpath, 'r') as f:\n for line in f:\n new_line = line.rstrip()\n names.append(new_line)\n return names\n\n def process_data(\n self, names, meta_data, home_dir=None, relabel=False, min_seq_len=0\n ):\n assert home_dir in ['bbox_train', 'bbox_test']\n num_tracklets = meta_data.shape[0]\n pid_list = list(set(meta_data[:, 2].tolist()))\n\n if relabel:\n pid2label = {pid: label for label, pid in enumerate(pid_list)}\n tracklets = []\n\n for tracklet_idx in range(num_tracklets):\n data = meta_data[tracklet_idx, ...]\n start_index, end_index, pid, camid = data\n if pid == -1:\n continue # junk images are just ignored\n assert 1 <= camid <= 6\n if relabel:\n pid = pid2label[pid]\n camid -= 1 # index starts from 0\n img_names = names[start_index - 1:end_index]\n\n # make sure image names correspond to the same person\n pnames = [img_name[:4] for img_name in img_names]\n assert len(\n set(pnames)\n ) == 1, 'Error: a single tracklet contains different person images'\n\n # make sure all images are captured under the same camera\n camnames = [img_name[5] for img_name in img_names]\n assert len(\n set(camnames)\n ) == 1, 'Error: images are captured under different cameras!'\n\n # append image names with directory information\n img_paths = [\n osp.join(self.dataset_dir, home_dir, img_name[:4], img_name)\n for img_name in img_names\n ]\n if len(img_paths) >= min_seq_len:\n img_paths = tuple(img_paths)\n tracklets.append((img_paths, pid, camid))\n\n return tracklets\n\n def combine_all(self):\n warnings.warn(\n 'Some query IDs do not appear in gallery. Therefore, combineall '\n 'does not make any difference to Mars'\n )\n" ]
[ [ "scipy.io.loadmat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]