repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
possible_versions
list
ruobop/InsightFace_TF
[ "1dd04e367371d3c7ee1216af099446df772842b4" ]
[ "train_nets.py" ]
[ "import tensorflow as tf\nimport tensorlayer as tl\nimport argparse\nfrom data.mx2tfrecords import parse_function\nimport os\n# from nets.L_Resnet_E_IR import get_resnet\n# from nets.L_Resnet_E_IR_GBN import get_resnet\nfrom nets.L_Resnet_E_IR_fix_issue9 import get_resnet\nfrom losses.face_losses import arcface_loss\nfrom tensorflow.core.protobuf import config_pb2\nimport time\nfrom data.eval_data_reader import load_bin\nfrom verification import ver_test\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(description='parameters to train net')\n parser.add_argument('--net_depth', default=50, type=int, help='resnet depth, default is 50')\n parser.add_argument('--epoch', default=100000, help='epoch to train the network')\n parser.add_argument('--batch_size', default=48, type=int, help='batch size to train network')\n parser.add_argument('--lr_steps', default=[30000, 50000, 70000], help='learning rate to train network')\n parser.add_argument('--momentum', default=0.9, help='learning alg momentum')\n parser.add_argument('--weight_deacy', default=5e-4, help='learning alg momentum')\n # parser.add_argument('--eval_datasets', default=['lfw', 'cfp_ff', 'cfp_fp', 'agedb_30'], help='evluation datasets')\n parser.add_argument('--eval_datasets', default=['lfw'], help='evluation datasets')\n parser.add_argument('--eval_db_path', default='./datasets/faces_ms1m_112x112', help='evluate datasets base path')\n parser.add_argument('--image_size', default=[112, 112], help='the image size')\n parser.add_argument('--num_output', default=85164, help='the image size')\n parser.add_argument('--tfrecords_file_path', default='./datasets/tfrecords', type=str,\n help='path to the output of tfrecords file path')\n parser.add_argument('--summary_path', default='./output/summary', help='the summary file save path')\n parser.add_argument('--ckpt_path', default='./output/ckpt', help='the ckpt file save path')\n parser.add_argument('--log_file_path', default='./output/logs', help='the ckpt file save path')\n parser.add_argument('--saver_maxkeep', default=100, help='tf.train.Saver max keep ckpt files')\n parser.add_argument('--buffer_size', default=10000, help='tf dataset api buffer size')\n parser.add_argument('--log_device_mapping', default=False, help='show device placement log')\n parser.add_argument('--summary_interval', default=300, help='interval to save summary')\n parser.add_argument('--ckpt_interval', default=10000, help='intervals to save ckpt file')\n parser.add_argument('--validate_interval', default=2000, help='intervals to save ckpt file')\n parser.add_argument('--show_info_interval', default=20, help='intervals to save ckpt file')\n args = parser.parse_args()\n return args\n\n\nif __name__ == '__main__':\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n # 1. define global parameters\n args = get_parser()\n global_step = tf.Variable(name='global_step', initial_value=0, trainable=False)\n inc_op = tf.assign_add(global_step, 1, name='increment_global_step')\n images = tf.placeholder(name='img_inputs', shape=[None, *args.image_size, 3], dtype=tf.float32)\n labels = tf.placeholder(name='img_labels', shape=[None, ], dtype=tf.int64)\n # trainable = tf.placeholder(name='trainable_bn', dtype=tf.bool)\n dropout_rate = tf.placeholder(name='dropout_rate', dtype=tf.float32)\n # 2 prepare train datasets and test datasets by using tensorflow dataset api\n # 2.1 train datasets\n # the image is substracted 127.5 and multiplied 1/128.\n # random flip left right\n tfrecords_f = os.path.join(args.tfrecords_file_path, 'tran.tfrecords')\n dataset = tf.data.TFRecordDataset(tfrecords_f)\n dataset = dataset.map(parse_function)\n dataset = dataset.shuffle(buffer_size=args.buffer_size)\n dataset = dataset.batch(args.batch_size)\n iterator = dataset.make_initializable_iterator()\n next_element = iterator.get_next()\n # 2.2 prepare validate datasets\n ver_list = []\n ver_name_list = []\n for db in args.eval_datasets:\n print('begin db %s convert.' % db)\n data_set = load_bin(db, args.image_size, args)\n ver_list.append(data_set)\n ver_name_list.append(db)\n # 3. define network, loss, optimize method, learning rate schedule, summary writer, saver\n # 3.1 inference phase\n w_init_method = tf.contrib.layers.xavier_initializer(uniform=False)\n net = get_resnet(images, args.net_depth, type='ir', w_init=w_init_method, trainable=True, keep_rate=dropout_rate)\n # 3.2 get arcface loss\n logit = arcface_loss(embedding=net.outputs, labels=labels, w_init=w_init_method, out_num=args.num_output)\n # test net because of batch normal layer\n tl.layers.set_name_reuse(True)\n test_net = get_resnet(images, args.net_depth, type='ir', w_init=w_init_method, trainable=False, reuse=True, keep_rate=dropout_rate)\n embedding_tensor = test_net.outputs\n # 3.3 define the cross entropy\n inference_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=labels))\n # inference_loss_avg = tf.reduce_mean(inference_loss)\n # 3.4 define weight deacy losses\n # for var in tf.trainable_variables():\n # print(var.name)\n # print('##########'*30)\n wd_loss = 0\n for weights in tl.layers.get_variables_with_name('W_conv2d', True, True):\n wd_loss += tf.contrib.layers.l2_regularizer(args.weight_deacy)(weights)\n for W in tl.layers.get_variables_with_name('resnet_v1_50/E_DenseLayer/W', True, True):\n wd_loss += tf.contrib.layers.l2_regularizer(args.weight_deacy)(W)\n for weights in tl.layers.get_variables_with_name('embedding_weights', True, True):\n wd_loss += tf.contrib.layers.l2_regularizer(args.weight_deacy)(weights)\n for gamma in tl.layers.get_variables_with_name('gamma', True, True):\n wd_loss += tf.contrib.layers.l2_regularizer(args.weight_deacy)(gamma)\n # for beta in tl.layers.get_variables_with_name('beta', True, True):\n # wd_loss += tf.contrib.layers.l2_regularizer(args.weight_deacy)(beta)\n for alphas in tl.layers.get_variables_with_name('alphas', True, True):\n wd_loss += tf.contrib.layers.l2_regularizer(args.weight_deacy)(alphas)\n # for bias in tl.layers.get_variables_with_name('resnet_v1_50/E_DenseLayer/b', True, True):\n # wd_loss += tf.contrib.layers.l2_regularizer(args.weight_deacy)(bias)\n\n # 3.5 total losses\n total_loss = inference_loss + wd_loss\n # 3.6 define the learning rate schedule\n p = int(512.0/args.batch_size)\n lr_steps = [p*val for val in args.lr_steps]\n print(lr_steps)\n lr = tf.train.piecewise_constant(global_step, boundaries=lr_steps, values=[0.001, 0.0005, 0.0003, 0.0001], name='lr_schedule')\n # 3.7 define the optimize method\n opt = tf.train.MomentumOptimizer(learning_rate=lr, momentum=args.momentum)\n # 3.8 get train op\n grads = opt.compute_gradients(total_loss)\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_op = opt.apply_gradients(grads, global_step=global_step)\n # train_op = opt.minimize(total_loss, global_step=global_step)\n # 3.9 define the inference accuracy used during validate or test\n pred = tf.nn.softmax(logit)\n acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(pred, axis=1), labels), dtype=tf.float32))\n # 3.10 define sess\n config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=args.log_device_mapping)\n config.gpu_options.allow_growth = True\n\n sess = tf.Session(config=config)\n # 3.11 summary writer\n summary = tf.summary.FileWriter(args.summary_path, sess.graph)\n summaries = []\n # # 3.11.1 add grad histogram op\n for grad, var in grads:\n if grad is not None:\n summaries.append(tf.summary.histogram(var.op.name + '/gradients', grad))\n # 3.11.2 add trainabel variable gradients\n for var in tf.trainable_variables():\n summaries.append(tf.summary.histogram(var.op.name, var))\n # 3.11.3 add loss summary\n summaries.append(tf.summary.scalar('inference_loss', inference_loss))\n summaries.append(tf.summary.scalar('wd_loss', wd_loss))\n summaries.append(tf.summary.scalar('total_loss', total_loss))\n # 3.11.4 add learning rate\n summaries.append(tf.summary.scalar('leraning_rate', lr))\n summary_op = tf.summary.merge(summaries)\n # 3.12 saver\n saver = tf.train.Saver(max_to_keep=args.saver_maxkeep)\n # 3.13 init all variables\n sess.run(tf.global_variables_initializer())\n\n # restore_saver = tf.train.Saver()\n # restore_saver.restore(sess, '/home/aurora/workspaces2018/InsightFace_TF/output/ckpt/InsightFace_iter_1110000.ckpt')\n # 4 begin iteration\n if not os.path.exists(args.log_file_path):\n os.makedirs(args.log_file_path)\n log_file_path = args.log_file_path + '/train' + time.strftime('_%Y-%m-%d-%H-%M', time.localtime(time.time())) + '.log'\n log_file = open(log_file_path, 'w')\n # 4 begin iteration\n count = 0\n total_accuracy = {}\n\n for i in range(args.epoch):\n sess.run(iterator.initializer)\n while True:\n try:\n images_train, labels_train = sess.run(next_element)\n feed_dict = {images: images_train, labels: labels_train, dropout_rate: 0.4}\n feed_dict.update(net.all_drop)\n start = time.time()\n _, total_loss_val, inference_loss_val, wd_loss_val, _, acc_val = \\\n sess.run([train_op, total_loss, inference_loss, wd_loss, inc_op, acc],\n feed_dict=feed_dict,\n options=config_pb2.RunOptions(report_tensor_allocations_upon_oom=True))\n end = time.time()\n pre_sec = args.batch_size/(end - start)\n # print training information\n if count > 0 and count % args.show_info_interval == 0:\n print('epoch %d, total_step %d, total loss is %.2f , inference loss is %.2f, weight deacy '\n 'loss is %.2f, training accuracy is %.6f, time %.3f samples/sec' %\n (i, count, total_loss_val, inference_loss_val, wd_loss_val, acc_val, pre_sec))\n count += 1\n\n # save summary\n if count > 0 and count % args.summary_interval == 0:\n feed_dict = {images: images_train, labels: labels_train, dropout_rate: 0.4}\n feed_dict.update(net.all_drop)\n summary_op_val = sess.run(summary_op, feed_dict=feed_dict)\n summary.add_summary(summary_op_val, count)\n\n # save ckpt files\n if count > 0 and count % args.ckpt_interval == 0:\n filename = 'InsightFace_iter_{:d}'.format(count) + '.ckpt'\n filename = os.path.join(args.ckpt_path, filename)\n saver.save(sess, filename)\n\n # validate\n if count > 0 and count % args.validate_interval == 0:\n feed_dict_test ={dropout_rate: 1.0}\n feed_dict_test.update(tl.utils.dict_to_one(net.all_drop))\n results = ver_test(ver_list=ver_list, ver_name_list=ver_name_list, nbatch=count, sess=sess,\n embedding_tensor=embedding_tensor, batch_size=args.batch_size, feed_dict=feed_dict_test,\n input_placeholder=images)\n print('test accuracy is: ', str(results[0]))\n total_accuracy[str(count)] = results[0]\n log_file.write('########'*10+'\\n')\n log_file.write(','.join(list(total_accuracy.keys())) + '\\n')\n log_file.write(','.join([str(val) for val in list(total_accuracy.values())])+'\\n')\n log_file.flush()\n if max(results) > 0.996:\n print('best accuracy is %.5f' % max(results))\n filename = 'InsightFace_iter_best_{:d}'.format(count) + '.ckpt'\n filename = os.path.join(args.ckpt_path, filename)\n saver.save(sess, filename)\n log_file.write('######Best Accuracy######'+'\\n')\n log_file.write(str(max(results))+'\\n')\n log_file.write(filename+'\\n')\n\n log_file.flush()\n except tf.errors.OutOfRangeError:\n print(\"End of epoch %d\" % i)\n break\n log_file.close()\n log_file.write('\\n')\n" ]
[ [ "tensorflow.control_dependencies", "tensorflow.core.protobuf.config_pb2.RunOptions", "tensorflow.summary.scalar", "tensorflow.assign_add", "tensorflow.Variable", "tensorflow.get_collection", "tensorflow.data.TFRecordDataset", "tensorflow.train.piecewise_constant", "tensorflow.ConfigProto", "tensorflow.train.MomentumOptimizer", "tensorflow.contrib.layers.xavier_initializer", "tensorflow.Session", "tensorflow.trainable_variables", "tensorflow.train.Saver", "tensorflow.argmax", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.summary.merge", "tensorflow.summary.histogram", "tensorflow.nn.softmax", "tensorflow.summary.FileWriter", "tensorflow.contrib.layers.l2_regularizer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
jaelgu/towhee
[ "34c79cf50831dc271ae0ab02f319f9e355c2d0bf", "34c79cf50831dc271ae0ab02f319f9e355c2d0bf", "34c79cf50831dc271ae0ab02f319f9e355c2d0bf" ]
[ "tests/unittests/models/mdmmt/test_bert_mmt.py", "tests/unittests/models/layers/test_multi_head_attention_with_lrp.py", "towhee/models/frozen_in_time/frozen_video_transformer.py" ]
[ "# Copyright 2021 Zilliz. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport unittest\nimport torch\n\nfrom towhee.models.mdmmt.bert_mmt import BertMMT\n\n\nclass TestBertMMT(unittest.TestCase):\n \"\"\"\n Test CLIP4Clip model\n \"\"\"\n vid_bert_params = {\n \"vocab_size_or_config_json_file\": 10,\n \"hidden_size\": 512,\n \"num_hidden_layers\": 9,\n \"intermediate_size\": 3072,\n \"hidden_act\": \"gelu\",\n \"hidden_dropout_prob\": 0.2,\n \"attention_probs_dropout_prob\": 0.2,\n \"max_position_embeddings\": 32,\n \"type_vocab_size\": 19,\n \"initializer_range\": 0.02,\n \"layer_norm_eps\": 1e-12,\n \"num_attention_heads\": 8,\n }\n\n class Struct:\n def __init__(self, **entries):\n self.__dict__.update(entries)\n\n config = Struct(**vid_bert_params)\n model = BertMMT(config=config)\n\n def test_forward(self):\n input_ids = torch.randint(low=0, high=200, size=(8, 94))\n attention_mask = torch.randint(low=0, high=2, size=(8, 94))\n token_type_ids = torch.randint(low=0, high=2, size=(8, 94))\n position_ids = torch.randint(low=0, high=2, size=(8, 94))\n features = torch.rand(8, 94, 512)\n output = self.model(input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n features=features)\n self.assertTrue(output[0].size() == (8, 94, 512))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# Copyright 2021 Zilliz. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport unittest\nimport torch\n\nfrom towhee.models.layers.multi_head_attention import MultiHeadAttention\n\n\nclass MHATest(unittest.TestCase):\n def test_mha_with_lrp(self):\n seq_len = 21\n c_dim = 10\n mod = MultiHeadAttention(c_dim, num_heads=2)\n fake_input = torch.rand(8, seq_len, c_dim)\n out1 = mod(fake_input)\n kwargs = {'alpha': 1}\n # torch.Size([8, 21, 10])\n out2 = mod.relprop(out1, **kwargs)\n self.assertTrue(out2.shape == torch.Size([8, 21, 10]))\n", "# Original pytorch implementation by:\n# 'Frozen in Time: A Joint Image and Video Encoder for End-to-End Retrieval'\n# - https://arxiv.org/abs/2104.00650\n# Original code by / Copyright 2021, Max Bain.\n# Modifications & additions by / Copyright 2021 Zilliz. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import OrderedDict\nfrom functools import partial\nimport torch\nfrom einops import rearrange, repeat\nfrom timm.models.layers import DropPath, trunc_normal_\nfrom torch import einsum, nn\nfrom towhee.models.layers.mlp import Mlp\nfrom towhee.models.layers.patch_embed2d import PatchEmbed2D\nfrom towhee.models.utils.init_vit_weights import init_vit_weights\nimport logging\n\n\ndef attn(q, k, v):\n sim = einsum('b i d, b j d -> b i j', q, k)\n attn_value = sim.softmax(dim=-1)\n out = einsum('b i j, b j d -> b i d', attn_value, v)\n return out\n\n\nclass VarAttention(nn.Module):\n \"\"\"\n dim:\n num_heads:\n qkv_bias:\n qk_scale:\n attn_drop:\n proj_drop:\n \"\"\"\n def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.,\n initialize='random'):\n super().__init__()\n self.num_heads = num_heads\n head_dim = dim // num_heads\n # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights\n self.scale = qk_scale or head_dim ** -0.5\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.proj = nn.Linear(dim, dim)\n if initialize == 'zeros':\n self.qkv.weight.data.fill_(0)\n self.qkv.bias.data.fill_(0)\n # fill proj weight with 1 here to improve training dynamics. Otherwise temporal attention inputs\n # are multiplied by 0*0, which is hard for the model to move out of.\n self.proj.weight.data.fill_(1)\n self.proj.bias.data.fill_(0)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj_drop = nn.Dropout(proj_drop)\n\n def forward(self, x, einops_from, einops_to, **einops_dims):\n # x [b,14*14*curr_frames+1,embed_dim]\n h = self.num_heads\n # project x to q, k, v vaalues\n # q,k,v [b,14*14*curr_frames+1,embed_dim]\n q, k, v = self.qkv(x).chunk(3, dim=-1)\n # q,k,v [b*num_heads,14*14*curr_frames+1,embed_dim/num_heads]\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))\n q = q * self.scale\n\n # splice out CLS token at index 1\n # cls_q [b*num_heads,1,embed_dim/num_heads] q_ [b*num_heads,14*14*curr_frames,embed_dim/num_heads]\n (cls_q, q_), (cls_k, k_), (cls_v, v_) = map(lambda t: (t[:, 0:1], t[:, 1:]), (q, k, v))\n\n # let CLS token attend to key / values of all patches across time and space\n # cls_out [b*num_heads,1,embed_dim/num_heads]\n cls_out = attn(cls_q, k, v)\n\n # rearrange across time or space\n # q_ [b*num_heads*14*14,curr_frames,embed_dim/num_heads] or [b*num_heads*curr_frames,14*14,embed_dim/num_heads]\n q_, k_, v_ = map(lambda t: rearrange(t, f'{einops_from} -> {einops_to}', **einops_dims), (q_, k_, v_))\n # expand cls token keys and values across time or space and concat\n r = q_.shape[0] // cls_k.shape[0]\n # cls_k:\n # [b*num_heads*14*14,1,embed_dim/num_heads] or [b*num_heads*curr_frames,1,embed_dim/num_heads]\n cls_k, cls_v = map(lambda t: repeat(t, 'b () d -> (b r) () d', r=r), (cls_k, cls_v))\n # k_, v_\n # [b*num_heads*14*14,curr_frames+1,embed_dim/num_heads] or [b*num_heads*curr_frames,14*14+1,embed_dim/num_heads]\n k_ = torch.cat((cls_k, k_), dim=1)\n v_ = torch.cat((cls_v, v_), dim=1)\n # attention\n # out [b*num_heads*14*14,curr_frames,embed_dim/num_heads] or [b*num_heads*curr_frames,14*14,embed_dim/num_heads]\n out = attn(q_, k_, v_)\n # merge back time or space\n # out [b*num_heads,14*14*curr_frames,embed_dim/num_heads]\n out = rearrange(out, f'{einops_to} -> {einops_from}', **einops_dims)\n\n # concat back the cls token\n # out [b*num_heads,14*14*curr_frames+1,embed_dim/num_heads]\n out = torch.cat((cls_out, out), dim=1)\n # merge back the heads\n # out [b,14*14*curr_frames+1,embed_dim]\n out = rearrange(out, '(b h) n d -> b n (h d)', h=h)\n # to out\n # x [b,14*14*curr_frames+1,embed_dim]\n x = self.proj(out)\n x = self.proj_drop(x)\n return x\n\n\nclass SpaceTimeBlock(nn.Module):\n \"\"\"\n dim:\n num_heads:\n \"\"\"\n def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,\n drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, time_init='zeros',\n attention_style='frozen_in_time'):\n super().__init__()\n self.attention_style = attention_style\n self.norm1 = norm_layer(dim)\n self.attn = VarAttention(\n dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)\n if self.attention_style != 'bridge_former':\n self.timeattn = VarAttention(\n dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop,\n initialize=time_init)\n\n # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n if self.attention_style != 'bridge_former':\n self.norm3 = norm_layer(dim)\n\n def forward(self, x, einops_from_space, einops_to_space, einops_from_time, einops_to_time,\n time_n, space_f):\n if self.attention_style != 'bridge_former':\n time_output = self.timeattn(self.norm3(x), einops_from_time, einops_to_time, n=time_n)\n time_residual = x + time_output\n else:\n time_residual = x\n space_output = self.attn(self.norm1(time_residual), einops_from_space,\n einops_to_space, f=space_f)\n if self.attention_style in ['frozen_in_time', 'bridge_former']:\n space_residual = x + self.drop_path(space_output)\n else:\n raise NotImplementedError\n\n x = space_residual + self.drop_path(self.mlp(self.norm2(space_residual)))\n\n return x\n\n\nclass SpaceTimeTransformer(nn.Module):\n \"\"\" Vision Transformer\n\n A PyTorch impl of : `Space-Time Transformer` from Frozen-in-time - by Max Bain.\n https://arxiv.org/abs/2104.00650\n\n Based off:\n - ViT implementation from the timm library [https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py]\n lucidrains timesformer implementation [https://github.com/lucidrains/TimeSformer-pytorch].\n\n Notable differences:\n - allows for variable length input frames (<= num_frames)\n - allows for variable length input resolution (<= (img_size, img_size)) [UNTESTED]\n - different attention block mechanism\n \"\"\"\n\n def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,\n num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None,\n drop_rate=0., attn_drop_rate=0., drop_path_rate=0., hybrid_backbone=None, norm_layer=None,\n num_frames=8, time_init='rand', attention_style='frozen_in_time'):\n \"\"\"\n Args:\n img_size (int, tuple): input image size\n patch_size (int, tuple): patch size\n in_chans (int): number of input channels\n num_classes (int): number of classes for classification head\n embed_dim (int): embedding dimension\n depth (int): depth of transformer\n num_heads (int): number of attention heads\n mlp_ratio (int): ratio of mlp hidden dim to embedding dim\n qkv_bias (bool): enable bias for qkv if True\n qk_scale (float): override default qk scale of head_dim ** -0.5 if set\n representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set\n drop_rate (float): dropout rate\n attn_drop_rate (float): attention dropout rate\n drop_path_rate (float): stochastic depth rate\n hybrid_backbone (nn.Module): CNN backbone to use in-place of PatchEmbed module\n norm_layer: (nn.Module): normalization layer\n num_frames: (int) maximum number of frames expected as input\n time_init: (str) how to initialise the time attention layer, 'zeros' allows for the timesformer to start off\n as ViT.\n attention_style: (str) how to attend to space and time.\n \"\"\"\n super().__init__()\n self.num_classes = num_classes\n self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models\n self.num_frames = num_frames\n self.embed_dim = embed_dim\n self.attention_style = attention_style\n norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)\n logging.info('######USING ATTENTION STYLE: %s', self.attention_style)\n if hybrid_backbone is not None:\n raise NotImplementedError('hybrid backbone not implemented')\n else:\n # self.patch_embed = VideoPatchEmbed(\n # img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, num_frames=num_frames)\n\n self.patch_embed = PatchEmbed2D(\n img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, flatten=False)\n\n num_patches = self.patch_embed.num_patches*num_frames\n self.patches_per_frame = num_patches // num_frames\n\n self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\n self.pos_embed = nn.Parameter(\n torch.zeros(1, self.patches_per_frame + 1,\n embed_dim)) # remember to take pos_embed[1:] for tiling over time\n if self.attention_style != 'bridge_former':\n self.temporal_embed = nn.Parameter(torch.zeros(1, num_frames, embed_dim))\n\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule\n self.blocks = nn.ModuleList([\n SpaceTimeBlock(\n dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, time_init=time_init,\n attention_style=attention_style)\n for i in range(depth)])\n if self.attention_style == 'bridge_former':\n self.norm1 = norm_layer(embed_dim)\n else:\n self.norm = norm_layer(embed_dim)\n\n # Representation layer\n if representation_size:\n self.num_features = representation_size\n self.pre_logits = nn.Sequential(OrderedDict([\n ('fc', nn.Linear(embed_dim, representation_size)),\n ('act', nn.Tanh())\n ]))\n else:\n self.pre_logits = nn.Identity()\n\n # Classifier head\n self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()\n\n trunc_normal_(self.pos_embed, std=.02)\n trunc_normal_(self.cls_token, std=.02)\n\n # if num_frames > 1, then we perform ViT inflation and initialise time attention to zero so not necessary.\n if num_frames == 1:\n self.apply(init_vit_weights)\n\n # einops transformations\n self.einops_from_space = 'b (f n) d'\n self.einops_to_space = '(b f) n d'\n self.einops_from_time = 'b (f n) d'\n self.einops_to_time = '(b n) f d'\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'pos_embed', 'cls_token'}\n\n def get_classifier(self):\n return self.head\n\n def reset_classifier(self, num_classes):\n self.num_classes = num_classes\n self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()\n\n def forward_features(self, x):\n b, curr_frames, _, _, _ = x.shape\n x = self.patch_embed(x.transpose(2, 1)) # [b*curr_frames, embed_dim, (original 244)14, (original 244)14]\n x = x.flatten(2).transpose(2, 1) # [b*curr_frames,14*14, embed_dim]\n x = x.reshape(b, -1, self.embed_dim) # [b,14*14*curr_frames,embed_dim]\n bf = x.shape[0]\n cls_tokens = self.cls_token.expand(bf, -1, -1) # stole cls_tokens impl from Phil Wang, thanks\n x = torch.cat((cls_tokens, x), dim=1) # [b,14*14*curr_frames+1,embed_dim]\n # positional embed needs to be tiled for each frame (this does [1,2,3] --> [1,2,3,1,2,3]...)\n cls_embed = self.pos_embed[:, 0, :].unsqueeze(1)\n tile_pos_embed = self.pos_embed[:, 1:, :].repeat(1, self.num_frames, 1)\n if self.attention_style != 'bridge_former':\n # temporal embed needs to be repeated within each frame (this does [1,2,3] --> [1,1,1,2,2,2,3,3,3]...)\n tile_temporal_embed = self.temporal_embed.repeat_interleave(self.patches_per_frame, 1)\n total_pos_embed = tile_pos_embed + tile_temporal_embed\n else:\n total_pos_embed = tile_pos_embed\n total_pos_embed = torch.cat([cls_embed, total_pos_embed], dim=1)\n\n curr_patches = x.shape[1]\n x = x + total_pos_embed[:, :curr_patches] # [b,14*14*curr_frames+1,embed_dim]\n x = self.pos_drop(x)\n n = self.patches_per_frame\n f = curr_frames\n\n for blk in self.blocks:\n x = blk(x, self.einops_from_space, self.einops_to_space, self.einops_from_time,\n self.einops_to_time,\n time_n=n, space_f=f)\n if self.attention_style == 'bridge_former':\n x = self.norm1(x)[:, 0]\n else:\n x = self.norm(x)[:, 0]\n x = self.pre_logits(x)\n\n return x\n\n def forward(self, x):\n x = self.forward_features(x)\n x = self.head(x)\n return x\n\n\n# if __name__ == '__main__':\n# att1 = VarAttention(\n# 768, num_heads=8, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.)\n# from towhee.models.layers.multi_head_attention import MultiHeadAttention\n# att2 = MultiHeadAttention(dim=768, num_heads=8, qkv_bias=True, qk_scale=None, attn_drop_ratio=0.,\n# proj_drop_ratio=0.,)\n# data = torch.randn((1, 196*4+1, 768))\n# einops_from_space = 'b (f n) d'\n# einops_to_space = '(b f) n d'\n# out1 = att1(data, einops_from=einops_from_space, einops_to=einops_to_space)\n# out2 = att2(data)\n# print(out1.shape, out2.shape)\n# assert out1 == out2\n# pass\n" ]
[ [ "torch.randint", "torch.rand" ], [ "torch.Size", "torch.rand" ], [ "torch.nn.Dropout", "torch.linspace", "torch.cat", "torch.zeros", "torch.einsum", "torch.nn.Tanh", "torch.nn.Linear", "torch.nn.Identity" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
google/edward2
[ "5574e773ca4ff5f36a5d9bf3b75ac8505973aa4b" ]
[ "experimental/rank1_bnns/resnet_cifar_model_test.py" ]
[ "# coding=utf-8\n# Copyright 2021 The Edward2 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Tests for Rank-1 ResNet-32x4.\"\"\"\nfrom absl.testing import parameterized\nfrom experimental.rank1_bnns import resnet_cifar_model # local file import\nimport tensorflow as tf\n\n\nclass ResnetCifarModelTest(parameterized.TestCase, tf.test.TestCase):\n\n @parameterized.parameters(\n {'alpha_initializer': 'trainable_normal_fixed_stddev',\n 'gamma_initializer': 'trainable_normal_fixed_stddev',\n 'random_sign_init': 0.5,\n 'ensemble_size': 1},\n {'alpha_initializer': 'trainable_deterministic',\n 'gamma_initializer': 'trainable_deterministic',\n 'random_sign_init': 0.5,\n 'ensemble_size': 2},\n {'alpha_initializer': 'trainable_deterministic',\n 'gamma_initializer': 'trainable_deterministic',\n 'random_sign_init': -0.5,\n 'ensemble_size': 2},\n )\n def testRank1ResNetV1(self,\n alpha_initializer,\n gamma_initializer,\n random_sign_init,\n ensemble_size):\n tf.random.set_seed(83922)\n dataset_size = 10\n batch_size = 6\n input_shape = (32, 32, 2) # TODO(dusenberrymw): (32, 32, 1) doesn't work...\n num_classes = 2\n\n features = tf.random.normal((dataset_size,) + input_shape)\n coeffs = tf.random.normal([tf.reduce_prod(input_shape), num_classes])\n net = tf.reshape(features, [dataset_size, -1])\n logits = tf.matmul(net, coeffs)\n labels = tf.random.categorical(logits, 1)\n dataset = tf.data.Dataset.from_tensor_slices((features, labels))\n dataset = dataset.repeat().shuffle(dataset_size).batch(batch_size)\n\n model = resnet_cifar_model.rank1_resnet_v1(\n input_shape=input_shape,\n depth=8,\n num_classes=num_classes,\n width_multiplier=1,\n alpha_initializer=alpha_initializer,\n gamma_initializer=gamma_initializer,\n alpha_regularizer=None,\n gamma_regularizer=None,\n use_additive_perturbation=False,\n ensemble_size=ensemble_size,\n random_sign_init=-0.5,\n dropout_rate=0.)\n model.compile(\n 'adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True))\n history = model.fit(dataset,\n steps_per_epoch=dataset_size // batch_size,\n epochs=2)\n\n loss_history = history.history['loss']\n self.assertAllGreaterEqual(loss_history, 0.)\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.matmul", "tensorflow.random.categorical", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.reshape", "tensorflow.test.main", "tensorflow.reduce_prod", "tensorflow.random.normal", "tensorflow.random.set_seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dmlap/transformers
[ "79588e6fdb5af8add092fc27dd695ea1ebc68b18", "79588e6fdb5af8add092fc27dd695ea1ebc68b18", "79588e6fdb5af8add092fc27dd695ea1ebc68b18", "79588e6fdb5af8add092fc27dd695ea1ebc68b18", "79588e6fdb5af8add092fc27dd695ea1ebc68b18" ]
[ "examples/distillation/run_squad_w_distillation.py", "src/transformers/modeling_tf_albert.py", "tests/test_modeling_tf_xlm_roberta.py", "src/transformers/data/datasets/squad.py", "src/transformers/modeling_tf_xlm.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" This is the exact same script as `examples/question-answering/run_squad.py` (as of 2020, January 8th) with an additional and optional step of distillation.\"\"\"\n\nimport argparse\nimport glob\nimport logging\nimport os\nimport random\nimport timeit\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\n\nfrom transformers import (\n WEIGHTS_NAME,\n AdamW,\n BertConfig,\n BertForQuestionAnswering,\n BertTokenizer,\n DistilBertConfig,\n DistilBertForQuestionAnswering,\n DistilBertTokenizer,\n RobertaConfig,\n RobertaForQuestionAnswering,\n RobertaTokenizer,\n XLMConfig,\n XLMForQuestionAnswering,\n XLMTokenizer,\n XLNetConfig,\n XLNetForQuestionAnswering,\n XLNetTokenizer,\n get_linear_schedule_with_warmup,\n squad_convert_examples_to_features,\n)\nfrom transformers.data.metrics.squad_metrics import (\n compute_predictions_log_probs,\n compute_predictions_logits,\n squad_evaluate,\n)\nfrom transformers.data.processors.squad import SquadResult, SquadV1Processor, SquadV2Processor\n\n\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept ImportError:\n from tensorboardX import SummaryWriter\n\n\nlogger = logging.getLogger(__name__)\n\n\nMODEL_CLASSES = {\n \"bert\": (BertConfig, BertForQuestionAnswering, BertTokenizer),\n \"xlnet\": (XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer),\n \"xlm\": (XLMConfig, XLMForQuestionAnswering, XLMTokenizer),\n \"distilbert\": (DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer),\n \"roberta\": (RobertaConfig, RobertaForQuestionAnswering, RobertaTokenizer),\n}\n\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\ndef to_list(tensor):\n return tensor.detach().cpu().tolist()\n\n\ndef train(args, train_dataset, model, tokenizer, teacher=None):\n \"\"\" Train the model \"\"\"\n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter()\n\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\n\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n )\n\n # Check if saved optimizer or scheduler states exist\n if os.path.isfile(os.path.join(args.model_name_or_path, \"optimizer.pt\")) and os.path.isfile(\n os.path.join(args.model_name_or_path, \"scheduler.pt\")\n ):\n # Load in optimizer and scheduler states\n optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"optimizer.pt\")))\n scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"scheduler.pt\")))\n\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True\n )\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\n \" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size\n * args.gradient_accumulation_steps\n * (torch.distributed.get_world_size() if args.local_rank != -1 else 1),\n )\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 1\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n # Check if continuing training from a checkpoint\n if os.path.exists(args.model_name_or_path):\n try:\n # set global_step to gobal_step of last saved checkpoint from model path\n checkpoint_suffix = args.model_name_or_path.split(\"-\")[-1].split(\"/\")[0]\n global_step = int(checkpoint_suffix)\n epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)\n steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(\" Continuing training from epoch %d\", epochs_trained)\n logger.info(\" Continuing training from global step %d\", global_step)\n logger.info(\" Will skip the first %d steps in the first epoch\", steps_trained_in_current_epoch)\n except ValueError:\n logger.info(\" Starting fine-tuning.\")\n\n tr_loss, logging_loss = 0.0, 0.0\n model.zero_grad()\n train_iterator = trange(\n epochs_trained, int(args.num_train_epochs), desc=\"Epoch\", disable=args.local_rank not in [-1, 0]\n )\n # Added here for reproductibility\n set_seed(args)\n\n for _ in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=args.local_rank not in [-1, 0])\n for step, batch in enumerate(epoch_iterator):\n\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n model.train()\n if teacher is not None:\n teacher.eval()\n batch = tuple(t.to(args.device) for t in batch)\n\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"start_positions\": batch[3],\n \"end_positions\": batch[4],\n }\n if args.model_type != \"distilbert\":\n inputs[\"token_type_ids\"] = None if args.model_type == \"xlm\" else batch[2]\n if args.model_type in [\"xlnet\", \"xlm\"]:\n inputs.update({\"cls_index\": batch[5], \"p_mask\": batch[6]})\n if args.version_2_with_negative:\n inputs.update({\"is_impossible\": batch[7]})\n outputs = model(**inputs)\n loss, start_logits_stu, end_logits_stu = outputs\n\n # Distillation loss\n if teacher is not None:\n if \"token_type_ids\" not in inputs:\n inputs[\"token_type_ids\"] = None if args.teacher_type == \"xlm\" else batch[2]\n with torch.no_grad():\n start_logits_tea, end_logits_tea = teacher(\n input_ids=inputs[\"input_ids\"],\n token_type_ids=inputs[\"token_type_ids\"],\n attention_mask=inputs[\"attention_mask\"],\n )\n assert start_logits_tea.size() == start_logits_stu.size()\n assert end_logits_tea.size() == end_logits_stu.size()\n\n loss_fct = nn.KLDivLoss(reduction=\"batchmean\")\n loss_start = loss_fct(\n F.log_softmax(start_logits_stu / args.temperature, dim=-1),\n F.softmax(start_logits_tea / args.temperature, dim=-1),\n ) * (args.temperature ** 2)\n loss_end = loss_fct(\n F.log_softmax(end_logits_stu / args.temperature, dim=-1),\n F.softmax(end_logits_tea / args.temperature, dim=-1),\n ) * (args.temperature ** 2)\n loss_ce = (loss_start + loss_end) / 2.0\n\n loss = args.alpha_ce * loss_ce + args.alpha_squad * loss\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n # Log metrics\n if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:\n # Only evaluate when single GPU otherwise metrics may not average well\n if args.local_rank == -1 and args.evaluate_during_training:\n results = evaluate(args, model, tokenizer)\n for key, value in results.items():\n tb_writer.add_scalar(\"eval_{}\".format(key), value, global_step)\n tb_writer.add_scalar(\"lr\", scheduler.get_lr()[0], global_step)\n tb_writer.add_scalar(\"loss\", (tr_loss - logging_loss) / args.logging_steps, global_step)\n logging_loss = tr_loss\n\n if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:\n # Save model checkpoint\n output_dir = os.path.join(args.output_dir, \"checkpoint-{}\".format(global_step))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n\n torch.save(args, os.path.join(output_dir, \"training_args.bin\"))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n logger.info(\"Saving optimizer and scheduler states to %s\", output_dir)\n\n if args.max_steps > 0 and global_step > args.max_steps:\n epoch_iterator.close()\n break\n if args.max_steps > 0 and global_step > args.max_steps:\n train_iterator.close()\n break\n\n if args.local_rank in [-1, 0]:\n tb_writer.close()\n\n return global_step, tr_loss / global_step\n\n\ndef evaluate(args, model, tokenizer, prefix=\"\"):\n dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True)\n\n if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(args.output_dir)\n\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n\n # Note that DistributedSampler samples randomly\n eval_sampler = SequentialSampler(dataset)\n eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n # multi-gpu evaluate\n if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):\n model = torch.nn.DataParallel(model)\n\n # Eval!\n logger.info(\"***** Running evaluation {} *****\".format(prefix))\n logger.info(\" Num examples = %d\", len(dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n\n all_results = []\n start_time = timeit.default_timer()\n\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n model.eval()\n batch = tuple(t.to(args.device) for t in batch)\n\n with torch.no_grad():\n inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1]}\n if args.model_type != \"distilbert\":\n inputs[\"token_type_ids\"] = None if args.model_type == \"xlm\" else batch[2] # XLM don't use segment_ids\n example_indices = batch[3]\n if args.model_type in [\"xlnet\", \"xlm\"]:\n inputs.update({\"cls_index\": batch[4], \"p_mask\": batch[5]})\n\n outputs = model(**inputs)\n\n for i, example_index in enumerate(example_indices):\n eval_feature = features[example_index.item()]\n unique_id = int(eval_feature.unique_id)\n\n output = [to_list(output[i]) for output in outputs]\n\n # Some models (XLNet, XLM) use 5 arguments for their predictions, while the other \"simpler\"\n # models only use two.\n if len(output) >= 5:\n start_logits = output[0]\n start_top_index = output[1]\n end_logits = output[2]\n end_top_index = output[3]\n cls_logits = output[4]\n\n result = SquadResult(\n unique_id,\n start_logits,\n end_logits,\n start_top_index=start_top_index,\n end_top_index=end_top_index,\n cls_logits=cls_logits,\n )\n\n else:\n start_logits, end_logits = output\n result = SquadResult(unique_id, start_logits, end_logits)\n\n all_results.append(result)\n\n evalTime = timeit.default_timer() - start_time\n logger.info(\" Evaluation done in total %f secs (%f sec per example)\", evalTime, evalTime / len(dataset))\n\n # Compute predictions\n output_prediction_file = os.path.join(args.output_dir, \"predictions_{}.json\".format(prefix))\n output_nbest_file = os.path.join(args.output_dir, \"nbest_predictions_{}.json\".format(prefix))\n\n if args.version_2_with_negative:\n output_null_log_odds_file = os.path.join(args.output_dir, \"null_odds_{}.json\".format(prefix))\n else:\n output_null_log_odds_file = None\n\n if args.model_type in [\"xlnet\", \"xlm\"]:\n # XLNet uses a more complex post-processing procedure\n predictions = compute_predictions_log_probs(\n examples,\n features,\n all_results,\n args.n_best_size,\n args.max_answer_length,\n output_prediction_file,\n output_nbest_file,\n output_null_log_odds_file,\n model.config.start_n_top,\n model.config.end_n_top,\n args.version_2_with_negative,\n tokenizer,\n args.verbose_logging,\n )\n else:\n predictions = compute_predictions_logits(\n examples,\n features,\n all_results,\n args.n_best_size,\n args.max_answer_length,\n args.do_lower_case,\n output_prediction_file,\n output_nbest_file,\n output_null_log_odds_file,\n args.verbose_logging,\n args.version_2_with_negative,\n args.null_score_diff_threshold,\n tokenizer,\n )\n\n # Compute the F1 and exact scores.\n results = squad_evaluate(examples, predictions)\n return results\n\n\ndef load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False):\n if args.local_rank not in [-1, 0] and not evaluate:\n # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n torch.distributed.barrier()\n\n # Load data features from cache or dataset file\n input_file = args.predict_file if evaluate else args.train_file\n cached_features_file = os.path.join(\n os.path.dirname(input_file),\n \"cached_distillation_{}_{}_{}\".format(\n \"dev\" if evaluate else \"train\",\n list(filter(None, args.model_name_or_path.split(\"/\"))).pop(),\n str(args.max_seq_length),\n ),\n )\n if os.path.exists(cached_features_file) and not args.overwrite_cache:\n logger.info(\"Loading features from cached file %s\", cached_features_file)\n features_and_dataset = torch.load(cached_features_file)\n\n try:\n features, dataset, examples = (\n features_and_dataset[\"features\"],\n features_and_dataset[\"dataset\"],\n features_and_dataset[\"examples\"],\n )\n except KeyError:\n raise DeprecationWarning(\n \"You seem to be loading features from an older version of this script please delete the \"\n \"file %s in order for it to be created again\" % cached_features_file\n )\n else:\n logger.info(\"Creating features from dataset file at %s\", input_file)\n processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor()\n if evaluate:\n examples = processor.get_dev_examples(args.data_dir, filename=args.predict_file)\n else:\n examples = processor.get_train_examples(args.data_dir, filename=args.train_file)\n\n features, dataset = squad_convert_examples_to_features(\n examples=examples,\n tokenizer=tokenizer,\n max_seq_length=args.max_seq_length,\n doc_stride=args.doc_stride,\n max_query_length=args.max_query_length,\n is_training=not evaluate,\n return_dataset=\"pt\",\n threads=args.threads,\n )\n\n if args.local_rank in [-1, 0]:\n logger.info(\"Saving features into cached file %s\", cached_features_file)\n torch.save({\"features\": features, \"dataset\": dataset, \"examples\": examples}, cached_features_file)\n\n if args.local_rank == 0 and not evaluate:\n # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n torch.distributed.barrier()\n\n if output_examples:\n return dataset, examples, features\n return dataset\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n # Required parameters\n parser.add_argument(\n \"--model_type\",\n default=None,\n type=str,\n required=True,\n help=\"Model type selected in the list: \" + \", \".join(MODEL_CLASSES.keys()),\n )\n parser.add_argument(\n \"--model_name_or_path\",\n default=None,\n type=str,\n required=True,\n help=\"Path to pretrained model or model identifier from huggingface.co/models\",\n )\n parser.add_argument(\n \"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model checkpoints and predictions will be written.\",\n )\n\n # Distillation parameters (optional)\n parser.add_argument(\n \"--teacher_type\",\n default=None,\n type=str,\n help=\"Teacher type. Teacher tokenizer and student (model) tokenizer must output the same tokenization. Only for distillation.\",\n )\n parser.add_argument(\n \"--teacher_name_or_path\",\n default=None,\n type=str,\n help=\"Path to the already SQuAD fine-tuned teacher model. Only for distillation.\",\n )\n parser.add_argument(\n \"--alpha_ce\", default=0.5, type=float, help=\"Distillation loss linear weight. Only for distillation.\"\n )\n parser.add_argument(\n \"--alpha_squad\", default=0.5, type=float, help=\"True SQuAD loss linear weight. Only for distillation.\"\n )\n parser.add_argument(\n \"--temperature\", default=2.0, type=float, help=\"Distillation temperature. Only for distillation.\"\n )\n\n # Other parameters\n parser.add_argument(\n \"--data_dir\",\n default=None,\n type=str,\n help=\"The input data dir. Should contain the .json files for the task.\"\n + \"If no data dir or train/predict files are specified, will run with tensorflow_datasets.\",\n )\n parser.add_argument(\n \"--train_file\",\n default=None,\n type=str,\n help=\"The input training file. If a data dir is specified, will look for the file there\"\n + \"If no data dir or train/predict files are specified, will run with tensorflow_datasets.\",\n )\n parser.add_argument(\n \"--predict_file\",\n default=None,\n type=str,\n help=\"The input evaluation file. If a data dir is specified, will look for the file there\"\n + \"If no data dir or train/predict files are specified, will run with tensorflow_datasets.\",\n )\n parser.add_argument(\n \"--config_name\", default=\"\", type=str, help=\"Pretrained config name or path if not the same as model_name\"\n )\n parser.add_argument(\n \"--tokenizer_name\",\n default=\"\",\n type=str,\n help=\"Pretrained tokenizer name or path if not the same as model_name\",\n )\n parser.add_argument(\n \"--cache_dir\",\n default=\"\",\n type=str,\n help=\"Where do you want to store the pre-trained models downloaded from s3\",\n )\n\n parser.add_argument(\n \"--version_2_with_negative\",\n action=\"store_true\",\n help=\"If true, the SQuAD examples contain some that do not have an answer.\",\n )\n parser.add_argument(\n \"--null_score_diff_threshold\",\n type=float,\n default=0.0,\n help=\"If null_score - best_non_null is greater than the threshold predict null.\",\n )\n\n parser.add_argument(\n \"--max_seq_length\",\n default=384,\n type=int,\n help=\"The maximum total input sequence length after WordPiece tokenization. Sequences \"\n \"longer than this will be truncated, and sequences shorter than this will be padded.\",\n )\n parser.add_argument(\n \"--doc_stride\",\n default=128,\n type=int,\n help=\"When splitting up a long document into chunks, how much stride to take between chunks.\",\n )\n parser.add_argument(\n \"--max_query_length\",\n default=64,\n type=int,\n help=\"The maximum number of tokens for the question. Questions longer than this will \"\n \"be truncated to this length.\",\n )\n parser.add_argument(\"--do_train\", action=\"store_true\", help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\", action=\"store_true\", help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\n \"--evaluate_during_training\", action=\"store_true\", help=\"Rul evaluation during training at each logging step.\"\n )\n parser.add_argument(\n \"--do_lower_case\", action=\"store_true\", help=\"Set this flag if you are using an uncased model.\"\n )\n\n parser.add_argument(\"--per_gpu_train_batch_size\", default=8, type=int, help=\"Batch size per GPU/CPU for training.\")\n parser.add_argument(\n \"--per_gpu_eval_batch_size\", default=8, type=int, help=\"Batch size per GPU/CPU for evaluation.\"\n )\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The initial learning rate for Adam.\")\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n )\n parser.add_argument(\"--weight_decay\", default=0.0, type=float, help=\"Weight decay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float, help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float, help=\"Max gradient norm.\")\n parser.add_argument(\n \"--num_train_epochs\", default=3.0, type=float, help=\"Total number of training epochs to perform.\"\n )\n parser.add_argument(\n \"--max_steps\",\n default=-1,\n type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\",\n )\n parser.add_argument(\"--warmup_steps\", default=0, type=int, help=\"Linear warmup over warmup_steps.\")\n parser.add_argument(\n \"--n_best_size\",\n default=20,\n type=int,\n help=\"The total number of n-best predictions to generate in the nbest_predictions.json output file.\",\n )\n parser.add_argument(\n \"--max_answer_length\",\n default=30,\n type=int,\n help=\"The maximum length of an answer that can be generated. This is needed because the start \"\n \"and end predictions are not conditioned on one another.\",\n )\n parser.add_argument(\n \"--verbose_logging\",\n action=\"store_true\",\n help=\"If true, all of the warnings related to data processing will be printed. \"\n \"A number of warnings are expected for a normal SQuAD evaluation.\",\n )\n\n parser.add_argument(\"--logging_steps\", type=int, default=50, help=\"Log every X updates steps.\")\n parser.add_argument(\"--save_steps\", type=int, default=50, help=\"Save checkpoint every X updates steps.\")\n parser.add_argument(\n \"--eval_all_checkpoints\",\n action=\"store_true\",\n help=\"Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number\",\n )\n parser.add_argument(\"--no_cuda\", action=\"store_true\", help=\"Whether not to use CUDA when available\")\n parser.add_argument(\n \"--overwrite_output_dir\", action=\"store_true\", help=\"Overwrite the content of the output directory\"\n )\n parser.add_argument(\n \"--overwrite_cache\", action=\"store_true\", help=\"Overwrite the cached training and evaluation sets\"\n )\n parser.add_argument(\"--seed\", type=int, default=42, help=\"random seed for initialization\")\n\n parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"local_rank for distributed training on gpus\")\n parser.add_argument(\n \"--fp16\",\n action=\"store_true\",\n help=\"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\",\n )\n parser.add_argument(\n \"--fp16_opt_level\",\n type=str,\n default=\"O1\",\n help=\"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\",\n )\n parser.add_argument(\"--server_ip\", type=str, default=\"\", help=\"Can be used for distant debugging.\")\n parser.add_argument(\"--server_port\", type=str, default=\"\", help=\"Can be used for distant debugging.\")\n\n parser.add_argument(\"--threads\", type=int, default=1, help=\"multiple threads for converting example to features\")\n args = parser.parse_args()\n\n if (\n os.path.exists(args.output_dir)\n and os.listdir(args.output_dir)\n and args.do_train\n and not args.overwrite_output_dir\n ):\n raise ValueError(\n \"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.\".format(\n args.output_dir\n )\n )\n\n # Setup distant debugging if needed\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend=\"nccl\")\n args.n_gpu = 1\n args.device = device\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,\n )\n logger.warning(\n \"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n args.local_rank,\n device,\n args.n_gpu,\n bool(args.local_rank != -1),\n args.fp16,\n )\n\n # Set seed\n set_seed(args)\n\n # Load pretrained model and tokenizer\n if args.local_rank not in [-1, 0]:\n # Make sure only the first process in distributed training will download model & vocab\n torch.distributed.barrier()\n\n args.model_type = args.model_type.lower()\n config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n config = config_class.from_pretrained(\n args.config_name if args.config_name else args.model_name_or_path,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n tokenizer = tokenizer_class.from_pretrained(\n args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,\n do_lower_case=args.do_lower_case,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n model = model_class.from_pretrained(\n args.model_name_or_path,\n from_tf=bool(\".ckpt\" in args.model_name_or_path),\n config=config,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n\n if args.teacher_type is not None:\n assert args.teacher_name_or_path is not None\n assert args.alpha_ce > 0.0\n assert args.alpha_ce + args.alpha_squad > 0.0\n assert args.teacher_type != \"distilbert\", \"We constraint teachers not to be of type DistilBERT.\"\n teacher_config_class, teacher_model_class, _ = MODEL_CLASSES[args.teacher_type]\n teacher_config = teacher_config_class.from_pretrained(\n args.teacher_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None\n )\n teacher = teacher_model_class.from_pretrained(\n args.teacher_name_or_path, config=teacher_config, cache_dir=args.cache_dir if args.cache_dir else None\n )\n teacher.to(args.device)\n else:\n teacher = None\n\n if args.local_rank == 0:\n # Make sure only the first process in distributed training will download model & vocab\n torch.distributed.barrier()\n\n model.to(args.device)\n\n logger.info(\"Training/evaluation parameters %s\", args)\n\n # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set.\n # Otherwise it'll default to \"promote\" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level=\"O2\"` will\n # remove the need for this code, but it is still valid.\n if args.fp16:\n try:\n import apex\n\n apex.amp.register_half_function(torch, \"einsum\")\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n\n # Training\n if args.do_train:\n train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False)\n global_step, tr_loss = train(args, train_dataset, model, tokenizer, teacher=teacher)\n logger.info(\" global_step = %s, average loss = %s\", global_step, tr_loss)\n\n # Save the trained model and the tokenizer\n if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n logger.info(\"Saving model checkpoint to %s\", args.output_dir)\n # Save a trained model, configuration and tokenizer using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(args.output_dir)\n tokenizer.save_pretrained(args.output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(args, os.path.join(args.output_dir, \"training_args.bin\"))\n\n # Load a trained model and vocabulary that you have fine-tuned\n model = model_class.from_pretrained(args.output_dir)\n tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)\n model.to(args.device)\n\n # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory\n results = {}\n if args.do_eval and args.local_rank in [-1, 0]:\n if args.do_train:\n logger.info(\"Loading checkpoints saved during training for evaluation\")\n checkpoints = [args.output_dir]\n if args.eval_all_checkpoints:\n checkpoints = list(\n os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + \"/**/\" + WEIGHTS_NAME, recursive=True))\n )\n logging.getLogger(\"transformers.modeling_utils\").setLevel(logging.WARN) # Reduce model loading logs\n\n logger.info(\"Evaluate the following checkpoints: %s\", checkpoints)\n\n for checkpoint in checkpoints:\n # Reload the model\n global_step = checkpoint.split(\"-\")[-1] if len(checkpoints) > 1 else \"\"\n model = model_class.from_pretrained(checkpoint)\n model.to(args.device)\n\n # Evaluate\n result = evaluate(args, model, tokenizer, prefix=global_step)\n\n result = dict((k + (\"_{}\".format(global_step) if global_step else \"\"), v) for k, v in result.items())\n results.update(result)\n\n logger.info(\"Results: {}\".format(results))\n\n return results\n\n\nif __name__ == \"__main__\":\n main()\n", "# coding=utf-8\n# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" TF 2.0 ALBERT model. \"\"\"\n\n\nimport logging\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple\n\nimport tensorflow as tf\n\nfrom .configuration_albert import AlbertConfig\nfrom .file_utils import (\n MULTIPLE_CHOICE_DUMMY_INPUTS,\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_callable,\n replace_return_docstrings,\n)\nfrom .modeling_tf_bert import ACT2FN, TFBertSelfAttention\nfrom .modeling_tf_outputs import (\n TFBaseModelOutput,\n TFBaseModelOutputWithPooling,\n TFMaskedLMOutput,\n TFMultipleChoiceModelOutput,\n TFQuestionAnsweringModelOutput,\n TFSequenceClassifierOutput,\n TFTokenClassifierOutput,\n)\nfrom .modeling_tf_utils import (\n TFMaskedLanguageModelingLoss,\n TFMultipleChoiceLoss,\n TFPreTrainedModel,\n TFQuestionAnsweringLoss,\n TFSequenceClassificationLoss,\n TFTokenClassificationLoss,\n get_initializer,\n keras_serializable,\n shape_list,\n)\nfrom .tokenization_utils import BatchEncoding\n\n\nlogger = logging.getLogger(__name__)\n\n_CONFIG_FOR_DOC = \"AlbertConfig\"\n_TOKENIZER_FOR_DOC = \"AlbertTokenizer\"\n\nTF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"albert-base-v1\",\n \"albert-large-v1\",\n \"albert-xlarge-v1\",\n \"albert-xxlarge-v1\",\n \"albert-base-v2\",\n \"albert-large-v2\",\n \"albert-xlarge-v2\",\n \"albert-xxlarge-v2\",\n # See all ALBERT models at https://huggingface.co/models?filter=albert\n]\n\n\nclass TFAlbertEmbeddings(tf.keras.layers.Layer):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\n \"\"\"\n\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n\n self.config = config\n self.vocab_size = config.vocab_size\n self.position_embeddings = tf.keras.layers.Embedding(\n config.max_position_embeddings,\n config.embedding_size,\n embeddings_initializer=get_initializer(self.config.initializer_range),\n name=\"position_embeddings\",\n )\n self.token_type_embeddings = tf.keras.layers.Embedding(\n config.type_vocab_size,\n config.embedding_size,\n embeddings_initializer=get_initializer(self.config.initializer_range),\n name=\"token_type_embeddings\",\n )\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"LayerNorm\")\n self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)\n\n def build(self, input_shape):\n \"\"\"Build shared word embedding layer \"\"\"\n with tf.name_scope(\"word_embeddings\"):\n # Create and initialize weights. The random normal initializer was chosen\n # arbitrarily, and works well.\n self.word_embeddings = self.add_weight(\n \"weight\",\n shape=[self.config.vocab_size, self.config.embedding_size],\n initializer=get_initializer(self.config.initializer_range),\n )\n super().build(input_shape)\n\n def call(\n self,\n input_ids=None,\n position_ids=None,\n token_type_ids=None,\n inputs_embeds=None,\n mode=\"embedding\",\n training=False,\n ):\n \"\"\"Get token embeddings of inputs.\n Args:\n inputs: list of three int64 tensors with shape [batch_size, length]: (input_ids, position_ids, token_type_ids)\n mode: string, a valid value is one of \"embedding\" and \"linear\".\n Returns:\n outputs: (1) If mode == \"embedding\", output embedding tensor, float32 with\n shape [batch_size, length, embedding_size]; (2) mode == \"linear\", output\n linear tensor, float32 with shape [batch_size, length, vocab_size].\n Raises:\n ValueError: if mode is not valid.\n\n Shared weights logic adapted from\n https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24\n \"\"\"\n if mode == \"embedding\":\n return self._embedding(input_ids, position_ids, token_type_ids, inputs_embeds, training=training)\n elif mode == \"linear\":\n return self._linear(input_ids)\n else:\n raise ValueError(\"mode {} is not valid.\".format(mode))\n\n def _embedding(self, input_ids, position_ids, token_type_ids, inputs_embeds, training=False):\n \"\"\"Applies embedding based on inputs tensor.\"\"\"\n assert not (input_ids is None and inputs_embeds is None)\n\n if input_ids is not None:\n input_shape = shape_list(input_ids)\n else:\n input_shape = shape_list(inputs_embeds)[:-1]\n\n seq_length = input_shape[1]\n if position_ids is None:\n position_ids = tf.range(seq_length, dtype=tf.int32)[tf.newaxis, :]\n if token_type_ids is None:\n token_type_ids = tf.fill(input_shape, 0)\n\n if inputs_embeds is None:\n inputs_embeds = tf.gather(self.word_embeddings, input_ids)\n position_embeddings = self.position_embeddings(position_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = inputs_embeds + position_embeddings + token_type_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings, training=training)\n return embeddings\n\n def _linear(self, inputs):\n \"\"\"Computes logits by running inputs through a linear layer.\n Args:\n inputs: A float32 tensor with shape [batch_size, length, embedding_size]\n Returns:\n float32 tensor with shape [batch_size, length, vocab_size].\n \"\"\"\n batch_size = shape_list(inputs)[0]\n length = shape_list(inputs)[1]\n x = tf.reshape(inputs, [-1, self.config.embedding_size])\n logits = tf.matmul(x, self.word_embeddings, transpose_b=True)\n return tf.reshape(logits, [batch_size, length, self.config.vocab_size])\n\n\nclass TFAlbertSelfAttention(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads)\n )\n\n self.num_attention_heads = config.num_attention_heads\n assert (\n config.hidden_size % config.num_attention_heads == 0\n ), f\"Hidden size {config.hidden_size} not dividable by number of heads {config.num_attention_heads}\"\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n self.output_attentions = config.output_attentions\n\n self.query = tf.keras.layers.Dense(\n self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name=\"query\"\n )\n self.key = tf.keras.layers.Dense(\n self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name=\"key\"\n )\n self.value = tf.keras.layers.Dense(\n self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name=\"value\"\n )\n\n self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x, batch_size):\n x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size))\n return tf.transpose(x, perm=[0, 2, 1, 3])\n\n def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):\n batch_size = shape_list(hidden_states)[0]\n mixed_query_layer = self.query(hidden_states)\n mixed_key_layer = self.key(hidden_states)\n mixed_value_layer = self.value(hidden_states)\n\n query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)\n key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)\n value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n # (batch size, num_heads, seq_len_q, seq_len_k)\n attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)\n # scale attention_scores\n dk = tf.cast(shape_list(key_layer)[-1], tf.float32)\n attention_scores = attention_scores / tf.math.sqrt(dk)\n\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in TFAlbertModel call() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = tf.nn.softmax(attention_scores, axis=-1)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs, training=training)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = tf.matmul(attention_probs, value_layer)\n\n context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])\n context_layer = tf.reshape(\n context_layer, (batch_size, -1, self.all_head_size)\n ) # (batch_size, seq_len_q, all_head_size)\n\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n return outputs\n\n\nclass TFAlbertSelfOutput(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n self.dense = tf.keras.layers.Dense(\n config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name=\"dense\"\n )\n self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"LayerNorm\")\n self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)\n\n def call(self, hidden_states, input_tensor, training=False):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass TFAlbertAttention(TFBertSelfAttention):\n def __init__(self, config, **kwargs):\n super().__init__(config, **kwargs)\n\n self.hidden_size = config.hidden_size\n self.output_attentions = config.output_attentions\n self.dense = tf.keras.layers.Dense(\n config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name=\"dense\"\n )\n self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"LayerNorm\")\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n raise NotImplementedError\n\n def call(self, input_tensor, attention_mask, head_mask, output_attentions, training=False):\n batch_size = shape_list(input_tensor)[0]\n mixed_query_layer = self.query(input_tensor)\n mixed_key_layer = self.key(input_tensor)\n mixed_value_layer = self.value(input_tensor)\n\n query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)\n key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)\n value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n # (batch size, num_heads, seq_len_q, seq_len_k)\n attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)\n # scale attention_scores\n dk = tf.cast(shape_list(key_layer)[-1], tf.float32)\n attention_scores = attention_scores / tf.math.sqrt(dk)\n\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in TFBertModel call() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = tf.nn.softmax(attention_scores, axis=-1)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs, training=training)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = tf.matmul(attention_probs, value_layer)\n\n context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])\n context_layer = tf.reshape(\n context_layer, (batch_size, -1, self.all_head_size)\n ) # (batch_size, seq_len_q, all_head_size)\n\n self_outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n\n hidden_states = self_outputs[0]\n\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states, training=training)\n attention_output = self.LayerNorm(hidden_states + input_tensor)\n\n # add attentions if we output them\n outputs = (attention_output,) + self_outputs[1:]\n return outputs\n\n\nclass TFAlbertLayer(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n self.attention = TFAlbertAttention(config, name=\"attention\")\n\n self.ffn = tf.keras.layers.Dense(\n config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name=\"ffn\"\n )\n\n if isinstance(config.hidden_act, str):\n self.activation = ACT2FN[config.hidden_act]\n else:\n self.activation = config.hidden_act\n\n self.ffn_output = tf.keras.layers.Dense(\n config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name=\"ffn_output\"\n )\n self.full_layer_layer_norm = tf.keras.layers.LayerNormalization(\n epsilon=config.layer_norm_eps, name=\"full_layer_layer_norm\"\n )\n self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)\n\n def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):\n attention_outputs = self.attention(\n hidden_states, attention_mask, head_mask, output_attentions, training=training\n )\n ffn_output = self.ffn(attention_outputs[0])\n ffn_output = self.activation(ffn_output)\n ffn_output = self.ffn_output(ffn_output)\n\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = self.full_layer_layer_norm(ffn_output + attention_outputs[0])\n\n # add attentions if we output them\n outputs = (hidden_states,) + attention_outputs[1:]\n return outputs\n\n\nclass TFAlbertLayerGroup(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n\n self.output_attentions = config.output_attentions\n self.output_hidden_states = config.output_hidden_states\n self.albert_layers = [\n TFAlbertLayer(config, name=\"albert_layers_._{}\".format(i)) for i in range(config.inner_group_num)\n ]\n\n def call(self, hidden_states, attention_mask, head_mask, output_attentions, output_hidden_states, training=False):\n layer_hidden_states = ()\n layer_attentions = ()\n\n for layer_index, albert_layer in enumerate(self.albert_layers):\n layer_output = albert_layer(\n hidden_states, attention_mask, head_mask[layer_index], output_attentions, training=training\n )\n hidden_states = layer_output[0]\n\n if output_attentions:\n layer_attentions = layer_attentions + (layer_output[1],)\n\n if output_hidden_states:\n layer_hidden_states = layer_hidden_states + (hidden_states,)\n\n outputs = (hidden_states,)\n if output_hidden_states:\n outputs = outputs + (layer_hidden_states,)\n if output_attentions:\n outputs = outputs + (layer_attentions,)\n # last-layer hidden state, (layer hidden states), (layer attentions)\n return outputs\n\n\nclass TFAlbertTransformer(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n\n self.config = config\n self.embedding_hidden_mapping_in = tf.keras.layers.Dense(\n config.hidden_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"embedding_hidden_mapping_in\",\n )\n self.albert_layer_groups = [\n TFAlbertLayerGroup(config, name=\"albert_layer_groups_._{}\".format(i))\n for i in range(config.num_hidden_groups)\n ]\n\n def call(\n self,\n hidden_states,\n attention_mask,\n head_mask,\n output_attentions,\n output_hidden_states,\n return_dict,\n training=False,\n ):\n hidden_states = self.embedding_hidden_mapping_in(hidden_states)\n all_attentions = () if output_attentions else None\n all_hidden_states = (hidden_states,) if output_hidden_states else None\n\n for i in range(self.config.num_hidden_layers):\n # Number of layers in a hidden group\n layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups)\n\n # Index of the hidden group\n group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups))\n\n layer_group_output = self.albert_layer_groups[group_idx](\n hidden_states,\n attention_mask,\n head_mask[group_idx * layers_per_group : (group_idx + 1) * layers_per_group],\n output_attentions,\n output_hidden_states,\n training=training,\n )\n hidden_states = layer_group_output[0]\n\n if output_attentions:\n all_attentions = all_attentions + layer_group_output[-1]\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)\n return TFBaseModelOutput(\n last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions\n )\n\n\nclass TFAlbertPreTrainedModel(TFPreTrainedModel):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for downloading and loading pretrained models.\n \"\"\"\n\n config_class = AlbertConfig\n base_model_prefix = \"albert\"\n\n\nclass TFAlbertMLMHead(tf.keras.layers.Layer):\n def __init__(self, config, input_embeddings, **kwargs):\n super().__init__(**kwargs)\n self.vocab_size = config.vocab_size\n\n self.dense = tf.keras.layers.Dense(\n config.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name=\"dense\"\n )\n if isinstance(config.hidden_act, str):\n self.activation = ACT2FN[config.hidden_act]\n else:\n self.activation = config.hidden_act\n\n self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"LayerNorm\")\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = input_embeddings\n\n def build(self, input_shape):\n self.bias = self.add_weight(shape=(self.vocab_size,), initializer=\"zeros\", trainable=True, name=\"bias\")\n self.decoder_bias = self.add_weight(\n shape=(self.vocab_size,), initializer=\"zeros\", trainable=True, name=\"decoder/bias\"\n )\n super().build(input_shape)\n\n def call(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.activation(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n hidden_states = self.decoder(hidden_states, mode=\"linear\") + self.decoder_bias\n return hidden_states\n\n\n@keras_serializable\nclass TFAlbertMainLayer(tf.keras.layers.Layer):\n config_class = AlbertConfig\n\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n self.num_hidden_layers = config.num_hidden_layers\n self.output_attentions = config.output_attentions\n self.output_hidden_states = config.output_hidden_states\n self.return_dict = config.use_return_dict\n\n self.embeddings = TFAlbertEmbeddings(config, name=\"embeddings\")\n self.encoder = TFAlbertTransformer(config, name=\"encoder\")\n self.pooler = tf.keras.layers.Dense(\n config.hidden_size,\n kernel_initializer=get_initializer(config.initializer_range),\n activation=\"tanh\",\n name=\"pooler\",\n )\n\n def get_input_embeddings(self):\n return self.embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n self.embeddings.vocab_size = value.shape[0]\n\n def _resize_token_embeddings(self, new_num_tokens):\n raise NotImplementedError\n\n def _prune_heads(self, heads_to_prune):\n \"\"\" Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n See base class PreTrainedModel\n \"\"\"\n raise NotImplementedError\n\n def call(\n self,\n inputs,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n training=False,\n ):\n if isinstance(inputs, (tuple, list)):\n input_ids = inputs[0]\n attention_mask = inputs[1] if len(inputs) > 1 else attention_mask\n token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids\n position_ids = inputs[3] if len(inputs) > 3 else position_ids\n head_mask = inputs[4] if len(inputs) > 4 else head_mask\n inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds\n output_attentions = inputs[6] if len(inputs) > 6 else output_attentions\n output_hidden_states = inputs[7] if len(inputs) > 7 else output_hidden_states\n return_dict = inputs[8] if len(inputs) > 8 else return_dict\n assert len(inputs) <= 9, \"Too many inputs.\"\n elif isinstance(inputs, (dict, BatchEncoding)):\n input_ids = inputs.get(\"input_ids\")\n attention_mask = inputs.get(\"attention_mask\", attention_mask)\n token_type_ids = inputs.get(\"token_type_ids\", token_type_ids)\n position_ids = inputs.get(\"position_ids\", position_ids)\n head_mask = inputs.get(\"head_mask\", head_mask)\n inputs_embeds = inputs.get(\"inputs_embeds\", inputs_embeds)\n output_attentions = inputs.get(\"output_attentions\", output_attentions)\n output_hidden_states = inputs.get(\"output_hidden_states\", output_hidden_states)\n return_dict = inputs.get(\"return_dict\", return_dict)\n assert len(inputs) <= 9, \"Too many inputs.\"\n else:\n input_ids = inputs\n\n output_attentions = output_attentions if output_attentions is not None else self.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = shape_list(input_ids)\n elif inputs_embeds is not None:\n input_shape = shape_list(inputs_embeds)[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if attention_mask is None:\n attention_mask = tf.fill(input_shape, 1)\n if token_type_ids is None:\n token_type_ids = tf.fill(input_shape, 0)\n\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n extended_attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n\n extended_attention_mask = tf.cast(extended_attention_mask, tf.float32)\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n if head_mask is not None:\n raise NotImplementedError\n else:\n head_mask = [None] * self.num_hidden_layers\n # head_mask = tf.constant([0] * self.num_hidden_layers)\n\n embedding_output = self.embeddings(input_ids, position_ids, token_type_ids, inputs_embeds, training=training)\n encoder_outputs = self.encoder(\n embedding_output,\n extended_attention_mask,\n head_mask,\n output_attentions,\n output_hidden_states,\n return_dict,\n training=training,\n )\n\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output[:, 0])\n\n if not return_dict:\n return (sequence_output, pooled_output,) + encoder_outputs[1:]\n\n return TFBaseModelOutputWithPooling(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )\n\n\n@dataclass\nclass TFAlbertForPreTrainingOutput(ModelOutput):\n \"\"\"\n Output type of :class:`~transformers.TFAlbertForPreTrainingModel`.\n\n Args:\n prediction_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n sop_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, 2)`):\n Prediction scores of the next sequence prediction (classification) head (scores of True/False\n continuation before SoftMax).\n hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`tf.Tensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n prediction_logits: tf.Tensor = None\n sop_logits: tf.Tensor = None\n hidden_states: Optional[Tuple[tf.Tensor]] = None\n attentions: Optional[Tuple[tf.Tensor]] = None\n\n\nALBERT_START_DOCSTRING = r\"\"\"\n This model is a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ sub-class.\n Use it as a regular TF 2.0 Keras Model and\n refer to the TF 2.0 documentation for all matter related to general usage and behavior.\n\n .. _`ALBERT: A Lite BERT for Self-supervised Learning of Language Representations`:\n https://arxiv.org/abs/1909.11942\n\n .. _`tf.keras.Model`:\n https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Model\n\n .. note::\n\n TF 2.0 models accepts two formats as inputs:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional arguments.\n\n This second option is useful when using :obj:`tf.keras.Model.fit()` method which currently requires having\n all the tensors in the first argument of the model call function: :obj:`model(inputs)`.\n\n If you choose this second option, there are three possibilities you can use to gather all the input Tensors\n in the first positional argument :\n\n - a single Tensor with input_ids only and nothing else: :obj:`model(inputs_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n :obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n :obj:`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`\n\n Args:\n config (:class:`~transformers.AlbertConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the configuration.\n Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\n\nALBERT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`{0}`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`transformers.AlbertTokenizer`.\n See :func:`transformers.PreTrainedTokenizer.encode` and\n :func:`transformers.PreTrainedTokenizer.__call__` for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`{0}`, `optional, defaults to :obj:`None`):\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`):\n Segment token indices to indicate first and second portions of the inputs.\n Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``\n corresponds to a `sentence B` token\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`):\n Indices of positions of each input sequence tokens in the position embeddings.\n Selected in the range ``[0, config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):\n Mask to nullify selected heads of the self-attention modules.\n Mask values selected in ``[0, 1]``:\n ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.\n inputs_embeds (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n training (:obj:`boolean`, `optional`, defaults to :obj:`False`):\n Whether to activate dropout modules (if set to :obj:`True`) during training or to de-activate them\n (if set to :obj:`False`) for evaluation.\n output_attentions (:obj:`bool`, `optional`, defaults to :obj:`None`):\n If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`None`):\n If set to ``True``, the hidden states of all layers are returned. See ``hidden_states`` under returned tensors for more detail.\n return_dict (:obj:`bool`, `optional`, defaults to :obj:`None`):\n If set to ``True``, the model will return a :class:`~transformers.file_utils.ModelOutput` instead of a\n plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare Albert Model transformer outputing raw hidden-states without any specific head on top.\",\n ALBERT_START_DOCSTRING,\n)\nclass TFAlbertModel(TFAlbertPreTrainedModel):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.albert = TFAlbertMainLayer(config, name=\"albert\")\n\n @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"albert-base-v2\",\n output_type=TFBaseModelOutputWithPooling,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(self, inputs, **kwargs):\n outputs = self.albert(inputs, **kwargs)\n return outputs\n\n\n@add_start_docstrings(\n \"\"\"Albert Model with two heads on top for pre-training:\n a `masked language modeling` head and a `sentence order prediction` (classification) head. \"\"\",\n ALBERT_START_DOCSTRING,\n)\nclass TFAlbertForPreTraining(TFAlbertPreTrainedModel):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.num_labels = config.num_labels\n\n self.albert = TFAlbertMainLayer(config, name=\"albert\")\n self.predictions = TFAlbertMLMHead(config, self.albert.embeddings, name=\"predictions\")\n self.sop_classifier = TFAlbertSOPHead(config, name=\"sop_classifier\")\n\n def get_output_embeddings(self):\n return self.albert.embeddings\n\n @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n @replace_return_docstrings(output_type=TFAlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)\n def call(self, inputs, **kwargs):\n r\"\"\"\n Return:\n\n Examples::\n import tensorflow as tf\n from transformers import AlbertTokenizer, TFAlbertForPreTraining\n tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')\n model = TFAlbertForPreTraining.from_pretrained('albert-base-v2')\n input_ids = tf.constant(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True))[None, :] # Batch size 1\n outputs = model(input_ids)\n prediction_scores, sop_scores = outputs[:2]\n \"\"\"\n return_dict = kwargs.get(\"return_dict\")\n return_dict = return_dict if return_dict is not None else self.albert.return_dict\n outputs = self.albert(inputs, **kwargs)\n sequence_output, pooled_output = outputs[:2]\n prediction_scores = self.predictions(sequence_output)\n sop_scores = self.sop_classifier(pooled_output, training=kwargs.get(\"training\", False))\n\n if not return_dict:\n return (prediction_scores, sop_scores) + outputs[2:]\n\n return TFAlbertForPreTrainingOutput(\n prediction_logits=prediction_scores,\n sop_logits=sop_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\nclass TFAlbertSOPHead(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n\n self.dropout = tf.keras.layers.Dropout(config.classifier_dropout_prob)\n self.classifier = tf.keras.layers.Dense(\n config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name=\"classifier\",\n )\n\n def call(self, pooled_output, training: bool):\n dropout_pooled_output = self.dropout(pooled_output, training=training)\n logits = self.classifier(dropout_pooled_output)\n return logits\n\n\n@add_start_docstrings(\"\"\"Albert Model with a `language modeling` head on top. \"\"\", ALBERT_START_DOCSTRING)\nclass TFAlbertForMaskedLM(TFAlbertPreTrainedModel, TFMaskedLanguageModelingLoss):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n\n self.albert = TFAlbertMainLayer(config, name=\"albert\")\n self.predictions = TFAlbertMLMHead(config, self.albert.embeddings, name=\"predictions\")\n\n def get_output_embeddings(self):\n return self.albert.embeddings\n\n @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"albert-base-v2\",\n output_type=TFMaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n inputs=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n labels=None,\n training=False,\n ):\n r\"\"\"\n labels (:obj::obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Labels for computing the masked language modeling loss.\n Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)\n Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels\n in ``[0, ..., config.vocab_size]``\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.albert.return_dict\n if isinstance(inputs, (tuple, list)):\n labels = inputs[9] if len(inputs) > 9 else labels\n if len(inputs) > 9:\n inputs = inputs[:9]\n elif isinstance(inputs, (dict, BatchEncoding)):\n labels = inputs.pop(\"labels\", labels)\n\n outputs = self.albert(\n inputs,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.predictions(sequence_output, training=training)\n\n loss = None if labels is None else self.compute_loss(labels, prediction_scores)\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TFMaskedLMOutput(\n loss=loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. \"\"\",\n ALBERT_START_DOCSTRING,\n)\nclass TFAlbertForSequenceClassification(TFAlbertPreTrainedModel, TFSequenceClassificationLoss):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.num_labels = config.num_labels\n\n self.albert = TFAlbertMainLayer(config, name=\"albert\")\n self.dropout = tf.keras.layers.Dropout(config.classifier_dropout_prob)\n self.classifier = tf.keras.layers.Dense(\n config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name=\"classifier\"\n )\n\n @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"albert-base-v2\",\n output_type=TFSequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n inputs=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n labels=None,\n training=False,\n ):\n r\"\"\"\n labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for computing the sequence classification/regression loss.\n Indices should be in ``[0, ..., config.num_labels - 1]``.\n If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),\n If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.albert.return_dict\n if isinstance(inputs, (tuple, list)):\n labels = inputs[9] if len(inputs) > 9 else labels\n if len(inputs) > 9:\n inputs = inputs[:9]\n elif isinstance(inputs, (dict, BatchEncoding)):\n labels = inputs.pop(\"labels\", labels)\n\n outputs = self.albert(\n inputs,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output, training=training)\n logits = self.classifier(pooled_output)\n\n loss = None if labels is None else self.compute_loss(labels, logits)\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TFSequenceClassifierOutput(\n loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"Albert Model with a token classification head on top (a linear layer on top of\n the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. \"\"\",\n ALBERT_START_DOCSTRING,\n)\nclass TFAlbertForTokenClassification(TFAlbertPreTrainedModel, TFTokenClassificationLoss):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.num_labels = config.num_labels\n\n self.albert = TFAlbertMainLayer(config, name=\"albert\")\n self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)\n self.classifier = tf.keras.layers.Dense(\n config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name=\"classifier\"\n )\n\n @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"albert-base-v2\",\n output_type=TFTokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n inputs=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n labels=None,\n training=False,\n ):\n r\"\"\"\n labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Labels for computing the token classification loss.\n Indices should be in ``[0, ..., config.num_labels - 1]``.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.albert.return_dict\n if isinstance(inputs, (tuple, list)):\n labels = inputs[9] if len(inputs) > 9 else labels\n if len(inputs) > 9:\n inputs = inputs[:9]\n elif isinstance(inputs, (dict, BatchEncoding)):\n labels = inputs.pop(\"labels\", labels)\n\n outputs = self.albert(\n inputs,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output, training=training)\n logits = self.classifier(sequence_output)\n\n loss = None if labels is None else self.compute_loss(labels, logits)\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TFTokenClassifierOutput(\n loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). \"\"\",\n ALBERT_START_DOCSTRING,\n)\nclass TFAlbertForQuestionAnswering(TFAlbertPreTrainedModel, TFQuestionAnsweringLoss):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.num_labels = config.num_labels\n\n self.albert = TFAlbertMainLayer(config, name=\"albert\")\n self.qa_outputs = tf.keras.layers.Dense(\n config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name=\"qa_outputs\"\n )\n\n @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"albert-base-v2\",\n output_type=TFQuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n inputs=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n start_positions=None,\n end_positions=None,\n training=False,\n ):\n r\"\"\"\n start_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n end_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.albert.return_dict\n if isinstance(inputs, (tuple, list)):\n start_positions = inputs[9] if len(inputs) > 9 else start_positions\n end_positions = inputs[10] if len(inputs) > 10 else end_positions\n if len(inputs) > 9:\n inputs = inputs[:9]\n elif isinstance(inputs, (dict, BatchEncoding)):\n start_positions = inputs.pop(\"start_positions\", start_positions)\n end_positions = inputs.pop(\"end_positions\", start_positions)\n\n outputs = self.albert(\n inputs,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = tf.split(logits, 2, axis=-1)\n start_logits = tf.squeeze(start_logits, axis=-1)\n end_logits = tf.squeeze(end_logits, axis=-1)\n\n loss = None\n if start_positions is not None and end_positions is not None:\n labels = {\"start_position\": start_positions}\n labels[\"end_position\"] = end_positions\n loss = self.compute_loss(labels, (start_logits, end_logits))\n\n if not return_dict:\n output = (start_logits, end_logits) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TFQuestionAnsweringModelOutput(\n loss=loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"Albert Model with a multiple choice classification head on top (a linear layer on top of\n the pooled output and a softmax) e.g. for RocStories/SWAG tasks. \"\"\",\n ALBERT_START_DOCSTRING,\n)\nclass TFAlbertForMultipleChoice(TFAlbertPreTrainedModel, TFMultipleChoiceLoss):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n\n self.albert = TFAlbertMainLayer(config, name=\"albert\")\n self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)\n self.classifier = tf.keras.layers.Dense(\n 1, kernel_initializer=get_initializer(config.initializer_range), name=\"classifier\"\n )\n\n @property\n def dummy_inputs(self):\n \"\"\" Dummy inputs to build the network.\n\n Returns:\n tf.Tensor with dummy inputs\n \"\"\"\n return {\"input_ids\": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS)}\n\n @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING.format(\"(batch_size, num_choices, sequence_length)\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"albert-base-v2\",\n output_type=TFMultipleChoiceModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n inputs,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n labels=None,\n training=False,\n ):\n r\"\"\"\n labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for computing the multiple choice classification loss.\n Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension\n of the input tensors. (see `input_ids` above)\n \"\"\"\n if isinstance(inputs, (tuple, list)):\n input_ids = inputs[0]\n attention_mask = inputs[1] if len(inputs) > 1 else attention_mask\n token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids\n position_ids = inputs[3] if len(inputs) > 3 else position_ids\n head_mask = inputs[4] if len(inputs) > 4 else head_mask\n inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds\n output_attentions = inputs[6] if len(inputs) > 6 else output_attentions\n output_hidden_states = inputs[7] if len(inputs) > 7 else output_hidden_states\n return_dict = inputs[8] if len(inputs) > 8 else return_dict\n labels = inputs[9] if len(inputs) > 9 else labels\n assert len(inputs) <= 10, \"Too many inputs.\"\n elif isinstance(inputs, (dict, BatchEncoding)):\n input_ids = inputs.get(\"input_ids\")\n attention_mask = inputs.get(\"attention_mask\", attention_mask)\n token_type_ids = inputs.get(\"token_type_ids\", token_type_ids)\n position_ids = inputs.get(\"position_ids\", position_ids)\n head_mask = inputs.get(\"head_mask\", head_mask)\n inputs_embeds = inputs.get(\"inputs_embeds\", inputs_embeds)\n output_attentions = inputs.get(\"output_attentions\", output_attentions)\n output_hidden_states = inputs.get(\"output_hidden_states\", output_hidden_states)\n return_dict = inputs.get(\"return_dict\", return_dict)\n labels = inputs.get(\"labels\", labels)\n assert len(inputs) <= 10, \"Too many inputs.\"\n else:\n input_ids = inputs\n return_dict = return_dict if return_dict is not None else self.albert.return_dict\n\n if input_ids is not None:\n num_choices = shape_list(input_ids)[1]\n seq_length = shape_list(input_ids)[2]\n else:\n num_choices = shape_list(inputs_embeds)[1]\n seq_length = shape_list(inputs_embeds)[2]\n\n flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None\n flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None\n flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None\n flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None\n flat_inputs_embeds = (\n tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))\n if inputs_embeds is not None\n else None\n )\n\n outputs = self.albert(\n flat_input_ids,\n flat_attention_mask,\n flat_token_type_ids,\n flat_position_ids,\n head_mask,\n flat_inputs_embeds,\n output_attentions,\n output_hidden_states,\n return_dict=return_dict,\n training=training,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output, training=training)\n logits = self.classifier(pooled_output)\n reshaped_logits = tf.reshape(logits, (-1, num_choices))\n\n loss = None if labels is None else self.compute_loss(labels, reshaped_logits)\n\n if not return_dict:\n output = (reshaped_logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TFMultipleChoiceModelOutput(\n loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions,\n )\n", "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nfrom transformers import is_tf_available\nfrom transformers.testing_utils import require_tf, slow\n\n\nif is_tf_available():\n import tensorflow as tf\n import numpy as np\n from transformers import TFXLMRobertaModel\n\n\n@require_tf\nclass TFFlaubertModelIntegrationTest(unittest.TestCase):\n @slow\n def test_output_embeds_base_model(self):\n model = TFXLMRobertaModel.from_pretrained(\"jplu/tf-xlm-roberta-base\")\n\n features = {\n \"input_ids\": tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]], dtype=tf.int32), # \"My dog is cute\"\n \"attention_mask\": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]], dtype=tf.int32),\n }\n\n output = model(features)[\"last_hidden_state\"]\n expected_shape = tf.TensorShape((1, 6, 768))\n self.assertEqual(output.shape, expected_shape)\n # compare the actual values for a slice.\n expected_slice = tf.convert_to_tensor(\n [\n [\n [0.0681762, 0.10894451, 0.06772504],\n [-0.06423668, 0.02366615, 0.04329344],\n [-0.06057295, 0.09974135, -0.00070584],\n ]\n ],\n dtype=tf.float32,\n )\n\n self.assertTrue(np.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1e-4))\n", "import logging\nimport os\nimport time\nfrom dataclasses import dataclass, field\nfrom enum import Enum\nfrom typing import Dict, List, Optional, Union\n\nimport torch\nfrom filelock import FileLock\nfrom torch.utils.data.dataset import Dataset\n\nfrom ...modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING\nfrom ...tokenization_utils import PreTrainedTokenizer\nfrom ..processors.squad import SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features\n\n\nlogger = logging.getLogger(__name__)\n\nMODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())\nMODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)\n\n\n@dataclass\nclass SquadDataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n \"\"\"\n\n model_type: str = field(\n default=None, metadata={\"help\": \"Model type selected in the list: \" + \", \".join(MODEL_TYPES)}\n )\n data_dir: str = field(\n default=None, metadata={\"help\": \"The input data dir. Should contain the .json files for the SQuAD task.\"}\n )\n max_seq_length: int = field(\n default=128,\n metadata={\n \"help\": \"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\"\n },\n )\n doc_stride: int = field(\n default=128,\n metadata={\"help\": \"When splitting up a long document into chunks, how much stride to take between chunks.\"},\n )\n max_query_length: int = field(\n default=64,\n metadata={\n \"help\": \"The maximum number of tokens for the question. Questions longer than this will \"\n \"be truncated to this length.\"\n },\n )\n max_answer_length: int = field(\n default=30,\n metadata={\n \"help\": \"The maximum length of an answer that can be generated. This is needed because the start \"\n \"and end predictions are not conditioned on one another.\"\n },\n )\n overwrite_cache: bool = field(\n default=False, metadata={\"help\": \"Overwrite the cached training and evaluation sets\"}\n )\n version_2_with_negative: bool = field(\n default=False, metadata={\"help\": \"If true, the SQuAD examples contain some that do not have an answer.\"}\n )\n null_score_diff_threshold: float = field(\n default=0.0, metadata={\"help\": \"If null_score - best_non_null is greater than the threshold predict null.\"}\n )\n n_best_size: int = field(\n default=20, metadata={\"help\": \"If null_score - best_non_null is greater than the threshold predict null.\"}\n )\n lang_id: int = field(\n default=0,\n metadata={\n \"help\": \"language id of input for language-specific xlm models (see tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)\"\n },\n )\n threads: int = field(default=1, metadata={\"help\": \"multiple threads for converting example to features\"})\n\n\nclass Split(Enum):\n train = \"train\"\n dev = \"dev\"\n\n\nclass SquadDataset(Dataset):\n \"\"\"\n This will be superseded by a framework-agnostic approach\n soon.\n \"\"\"\n\n args: SquadDataTrainingArguments\n features: List[SquadFeatures]\n mode: Split\n is_language_sensitive: bool\n\n def __init__(\n self,\n args: SquadDataTrainingArguments,\n tokenizer: PreTrainedTokenizer,\n limit_length: Optional[int] = None,\n mode: Union[str, Split] = Split.train,\n is_language_sensitive: Optional[bool] = False,\n cache_dir: Optional[str] = None,\n ):\n self.args = args\n self.is_language_sensitive = is_language_sensitive\n self.processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor()\n if isinstance(mode, str):\n try:\n mode = Split[mode]\n except KeyError:\n raise KeyError(\"mode is not a valid split name\")\n self.mode = mode\n # Load data features from cache or dataset file\n version_tag = \"v2\" if args.version_2_with_negative else \"v1\"\n cached_features_file = os.path.join(\n cache_dir if cache_dir is not None else args.data_dir,\n \"cached_{}_{}_{}_{}\".format(\n mode.value, tokenizer.__class__.__name__, str(args.max_seq_length), version_tag,\n ),\n )\n\n # Make sure only the first process in distributed training processes the dataset,\n # and the others will use the cache.\n lock_path = cached_features_file + \".lock\"\n with FileLock(lock_path):\n if os.path.exists(cached_features_file) and not args.overwrite_cache:\n start = time.time()\n self.features = torch.load(cached_features_file)\n logger.info(\n f\"Loading features from cached file {cached_features_file} [took %.3f s]\", time.time() - start\n )\n else:\n if mode == Split.dev:\n examples = self.processor.get_dev_examples(args.data_dir)\n else:\n examples = self.processor.get_train_examples(args.data_dir)\n\n self.features = squad_convert_examples_to_features(\n examples=examples,\n tokenizer=tokenizer,\n max_seq_length=args.max_seq_length,\n doc_stride=args.doc_stride,\n max_query_length=args.max_query_length,\n is_training=mode == Split.train,\n threads=args.threads,\n )\n\n start = time.time()\n torch.save(self.features, cached_features_file)\n # ^ This seems to take a lot of time so I want to investigate why and how we can improve.\n logger.info(\n \"Saving features into cached file %s [took %.3f s]\", cached_features_file, time.time() - start\n )\n\n def __len__(self):\n return len(self.features)\n\n def __getitem__(self, i) -> Dict[str, torch.Tensor]:\n # Convert to Tensors and build dataset\n feature = self.features[i]\n\n input_ids = torch.tensor(feature.input_ids, dtype=torch.long)\n attention_mask = torch.tensor(feature.attention_mask, dtype=torch.long)\n token_type_ids = torch.tensor(feature.token_type_ids, dtype=torch.long)\n cls_index = torch.tensor(feature.cls_index, dtype=torch.long)\n p_mask = torch.tensor(feature.p_mask, dtype=torch.float)\n is_impossible = torch.tensor(feature.is_impossible, dtype=torch.float)\n\n inputs = {\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n \"token_type_ids\": token_type_ids,\n }\n\n if self.args.model_type in [\"xlm\", \"roberta\", \"distilbert\", \"camembert\"]:\n del inputs[\"token_type_ids\"]\n\n if self.args.model_type in [\"xlnet\", \"xlm\"]:\n inputs.update({\"cls_index\": cls_index, \"p_mask\": p_mask})\n if self.args.version_2_with_negative:\n inputs.update({\"is_impossible\": is_impossible})\n if self.is_language_sensitive:\n inputs.update({\"langs\": (torch.ones(input_ids.shape, dtype=torch.int64) * self.args.lang_id)})\n\n if self.mode == Split.train:\n start_positions = torch.tensor(feature.start_position, dtype=torch.long)\n end_positions = torch.tensor(feature.end_position, dtype=torch.long)\n inputs.update({\"start_positions\": start_positions, \"end_positions\": end_positions})\n\n return inputs\n", "# coding=utf-8\n# Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" TF 2.0 XLM model.\n\"\"\"\n\n\nimport itertools\nimport logging\nimport math\nimport warnings\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom .configuration_xlm import XLMConfig\nfrom .file_utils import (\n MULTIPLE_CHOICE_DUMMY_INPUTS,\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_callable,\n)\nfrom .modeling_tf_outputs import (\n TFBaseModelOutput,\n TFMultipleChoiceModelOutput,\n TFQuestionAnsweringModelOutput,\n TFSequenceClassifierOutput,\n TFTokenClassifierOutput,\n)\nfrom .modeling_tf_utils import (\n TFMultipleChoiceLoss,\n TFPreTrainedModel,\n TFQuestionAnsweringLoss,\n TFSequenceClassificationLoss,\n TFSequenceSummary,\n TFSharedEmbeddings,\n TFTokenClassificationLoss,\n get_initializer,\n keras_serializable,\n shape_list,\n)\nfrom .tokenization_utils import BatchEncoding\n\n\nlogger = logging.getLogger(__name__)\n\n_CONFIG_FOR_DOC = \"XLMConfig\"\n_TOKENIZER_FOR_DOC = \"XLMTokenizer\"\n\nTF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"xlm-mlm-en-2048\",\n \"xlm-mlm-ende-1024\",\n \"xlm-mlm-enfr-1024\",\n \"xlm-mlm-enro-1024\",\n \"xlm-mlm-tlm-xnli15-1024\",\n \"xlm-mlm-xnli15-1024\",\n \"xlm-clm-enfr-1024\",\n \"xlm-clm-ende-1024\",\n \"xlm-mlm-17-1280\",\n \"xlm-mlm-100-1280\",\n # See all XLM models at https://huggingface.co/models?filter=xlm\n]\n\n\ndef create_sinusoidal_embeddings(n_pos, dim, out):\n position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])\n out[:, 0::2] = tf.constant(np.sin(position_enc[:, 0::2]))\n out[:, 1::2] = tf.constant(np.cos(position_enc[:, 1::2]))\n\n\ndef gelu(x):\n \"\"\" Gaussian Error Linear Unit.\n Original Implementation of the gelu activation function in Google Bert repo when initially created.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n Also see https://arxiv.org/abs/1606.08415\n \"\"\"\n cdf = 0.5 * (1.0 + tf.math.erf(x / tf.math.sqrt(2.0)))\n return x * cdf\n\n\ndef get_masks(slen, lengths, causal, padding_mask=None, dtype=tf.float32):\n \"\"\"\n Generate hidden states mask, and optionally an attention mask.\n \"\"\"\n bs = shape_list(lengths)[0]\n if padding_mask is not None:\n mask = padding_mask\n else:\n # assert lengths.max().item() <= slen\n alen = tf.range(slen)\n mask = tf.math.less(alen, lengths[:, tf.newaxis])\n\n # attention mask is the same as mask, or triangular inferior attention (causal)\n if causal:\n attn_mask = tf.less_equal(\n tf.tile(alen[tf.newaxis, tf.newaxis, :], (bs, slen, 1)), alen[tf.newaxis, :, tf.newaxis]\n )\n else:\n attn_mask = mask\n\n # sanity check\n # assert shape_list(mask) == [bs, slen]\n tf.debugging.assert_equal(shape_list(mask), [bs, slen])\n assert causal is False or shape_list(attn_mask) == [bs, slen, slen]\n\n mask = tf.cast(mask, dtype=dtype)\n attn_mask = tf.cast(attn_mask, dtype=dtype)\n\n return mask, attn_mask\n\n\nclass TFMultiHeadAttention(tf.keras.layers.Layer):\n\n NEW_ID = itertools.count()\n\n def __init__(self, n_heads, dim, config, **kwargs):\n super().__init__(**kwargs)\n self.layer_id = next(TFMultiHeadAttention.NEW_ID)\n self.dim = dim\n self.n_heads = n_heads\n self.output_attentions = config.output_attentions\n assert self.dim % self.n_heads == 0\n\n self.q_lin = tf.keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name=\"q_lin\")\n self.k_lin = tf.keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name=\"k_lin\")\n self.v_lin = tf.keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name=\"v_lin\")\n self.out_lin = tf.keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name=\"out_lin\")\n self.dropout = tf.keras.layers.Dropout(config.attention_dropout)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n raise NotImplementedError\n\n def call(self, input, mask, kv, cache, head_mask, output_attentions, training=False):\n \"\"\"\n Self-attention (if kv is None) or attention over source sentence (provided by kv).\n \"\"\"\n # Input is (bs, qlen, dim)\n # Mask is (bs, klen) (non-causal) or (bs, klen, klen)\n bs, qlen, dim = shape_list(input)\n if kv is None:\n klen = qlen if cache is None else cache[\"slen\"] + qlen\n else:\n klen = shape_list(kv)[1]\n # assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim)\n n_heads = self.n_heads\n dim_per_head = self.dim // n_heads\n mask_reshape = (bs, 1, qlen, klen) if len(shape_list(mask)) == 3 else (bs, 1, 1, klen)\n\n def shape(x):\n \"\"\" projection \"\"\"\n return tf.transpose(tf.reshape(x, (bs, -1, self.n_heads, dim_per_head)), perm=(0, 2, 1, 3))\n\n def unshape(x):\n \"\"\" compute context \"\"\"\n return tf.reshape(tf.transpose(x, perm=(0, 2, 1, 3)), (bs, -1, self.n_heads * dim_per_head))\n\n q = shape(self.q_lin(input)) # (bs, n_heads, qlen, dim_per_head)\n if kv is None:\n k = shape(self.k_lin(input)) # (bs, n_heads, qlen, dim_per_head)\n v = shape(self.v_lin(input)) # (bs, n_heads, qlen, dim_per_head)\n elif cache is None or self.layer_id not in cache:\n k = v = kv\n k = shape(self.k_lin(k)) # (bs, n_heads, qlen, dim_per_head)\n v = shape(self.v_lin(v)) # (bs, n_heads, qlen, dim_per_head)\n\n if cache is not None:\n if self.layer_id in cache:\n if kv is None:\n k_, v_ = cache[self.layer_id]\n k = tf.concat([k_, k], axis=2) # (bs, n_heads, klen, dim_per_head)\n v = tf.concat([v_, v], axis=2) # (bs, n_heads, klen, dim_per_head)\n else:\n k, v = cache[self.layer_id]\n cache[self.layer_id] = (k, v)\n\n q = q / math.sqrt(dim_per_head) # (bs, n_heads, qlen, dim_per_head)\n scores = tf.matmul(q, k, transpose_b=True) # (bs, n_heads, qlen, klen)\n mask = tf.reshape(mask, mask_reshape) # (bs, n_heads, qlen, klen)\n # scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, qlen, klen)\n scores = scores - 1e30 * (1.0 - mask)\n\n weights = tf.nn.softmax(scores, axis=-1) # (bs, n_heads, qlen, klen)\n weights = self.dropout(weights, training=training) # (bs, n_heads, qlen, klen)\n\n # Mask heads if we want to\n if head_mask is not None:\n weights = weights * head_mask\n\n context = tf.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)\n context = unshape(context) # (bs, qlen, dim)\n\n outputs = (self.out_lin(context),)\n if output_attentions:\n outputs = outputs + (weights,)\n return outputs\n\n\nclass TFTransformerFFN(tf.keras.layers.Layer):\n def __init__(self, in_dim, dim_hidden, out_dim, config, **kwargs):\n super().__init__(**kwargs)\n self.lin1 = tf.keras.layers.Dense(dim_hidden, kernel_initializer=get_initializer(config.init_std), name=\"lin1\")\n self.lin2 = tf.keras.layers.Dense(out_dim, kernel_initializer=get_initializer(config.init_std), name=\"lin2\")\n self.act = tf.keras.layers.Activation(gelu) if config.gelu_activation else tf.keras.activations.relu\n self.dropout = tf.keras.layers.Dropout(config.dropout)\n\n def call(self, input, training=False):\n x = self.lin1(input)\n x = self.act(x)\n x = self.lin2(x)\n x = self.dropout(x, training=training)\n return x\n\n\n@keras_serializable\nclass TFXLMMainLayer(tf.keras.layers.Layer):\n config_class = XLMConfig\n\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n self.output_hidden_states = config.output_hidden_states\n self.output_attentions = config.output_attentions\n self.return_dict = config.use_return_dict\n\n # encoder / decoder, output layer\n self.is_encoder = config.is_encoder\n self.is_decoder = not config.is_encoder\n if self.is_decoder:\n raise NotImplementedError(\"Currently XLM can only be used as an encoder\")\n # self.with_output = with_output\n self.causal = config.causal\n\n # dictionary / languages\n self.n_langs = config.n_langs\n self.use_lang_emb = config.use_lang_emb\n self.n_words = config.n_words\n self.eos_index = config.eos_index\n self.pad_index = config.pad_index\n # self.dico = dico\n # self.id2lang = config.id2lang\n # self.lang2id = config.lang2id\n # assert len(self.dico) == self.n_words\n # assert len(self.id2lang) == len(self.lang2id) == self.n_langs\n\n # model parameters\n self.dim = config.emb_dim # 512 by default\n self.hidden_dim = self.dim * 4 # 2048 by default\n self.n_heads = config.n_heads # 8 by default\n self.n_layers = config.n_layers\n assert self.dim % self.n_heads == 0, \"transformer dim must be a multiple of n_heads\"\n\n # embeddings\n self.dropout = tf.keras.layers.Dropout(config.dropout)\n self.attention_dropout = tf.keras.layers.Dropout(config.attention_dropout)\n\n self.position_embeddings = tf.keras.layers.Embedding(\n config.max_position_embeddings,\n self.dim,\n embeddings_initializer=get_initializer(config.embed_init_std),\n name=\"position_embeddings\",\n )\n if config.sinusoidal_embeddings:\n raise NotImplementedError\n # create_sinusoidal_embeddings(config.max_position_embeddings, self.dim, out=self.position_embeddings.weight)\n if config.n_langs > 1 and config.use_lang_emb:\n self.lang_embeddings = tf.keras.layers.Embedding(\n self.n_langs,\n self.dim,\n embeddings_initializer=get_initializer(config.embed_init_std),\n name=\"lang_embeddings\",\n )\n self.embeddings = TFSharedEmbeddings(\n self.n_words, self.dim, initializer_range=config.embed_init_std, name=\"embeddings\"\n ) # padding_idx=self.pad_index)\n self.layer_norm_emb = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"layer_norm_emb\")\n\n # transformer layers\n self.attentions = []\n self.layer_norm1 = []\n self.ffns = []\n self.layer_norm2 = []\n # if self.is_decoder:\n # self.layer_norm15 = []\n # self.encoder_attn = []\n\n for i in range(self.n_layers):\n self.attentions.append(\n TFMultiHeadAttention(self.n_heads, self.dim, config=config, name=\"attentions_._{}\".format(i))\n )\n self.layer_norm1.append(\n tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"layer_norm1_._{}\".format(i))\n )\n # if self.is_decoder:\n # self.layer_norm15.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))\n # self.encoder_attn.append(MultiHeadAttention(self.n_heads, self.dim, dropout=self.attention_dropout))\n self.ffns.append(\n TFTransformerFFN(self.dim, self.hidden_dim, self.dim, config=config, name=\"ffns_._{}\".format(i))\n )\n self.layer_norm2.append(\n tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"layer_norm2_._{}\".format(i))\n )\n\n if hasattr(config, \"pruned_heads\"):\n pruned_heads = config.pruned_heads.copy().items()\n config.pruned_heads = {}\n for layer, heads in pruned_heads:\n if self.attentions[int(layer)].n_heads == config.n_heads:\n self.prune_heads({int(layer): list(map(int, heads))})\n\n def get_input_embeddings(self):\n return self.embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.weight = value\n self.embeddings.vocab_size = value.shape[0]\n\n def _resize_token_embeddings(self, new_num_tokens):\n raise NotImplementedError\n\n def _prune_heads(self, heads_to_prune):\n \"\"\" Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n See base class PreTrainedModel\n \"\"\"\n raise NotImplementedError\n\n def call(\n self,\n inputs,\n attention_mask=None,\n langs=None,\n token_type_ids=None,\n position_ids=None,\n lengths=None,\n cache=None,\n head_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n training=False,\n ): # removed: src_enc=None, src_len=None\n if isinstance(inputs, (tuple, list)):\n input_ids = inputs[0]\n attention_mask = inputs[1] if len(inputs) > 1 else attention_mask\n langs = inputs[2] if len(inputs) > 2 else langs\n token_type_ids = inputs[3] if len(inputs) > 3 else token_type_ids\n position_ids = inputs[4] if len(inputs) > 4 else position_ids\n lengths = inputs[5] if len(inputs) > 5 else lengths\n cache = inputs[6] if len(inputs) > 6 else cache\n head_mask = inputs[7] if len(inputs) > 7 else head_mask\n inputs_embeds = inputs[8] if len(inputs) > 8 else inputs_embeds\n output_attentions = inputs[9] if len(inputs) > 9 else output_attentions\n output_hidden_states = inputs[10] if len(inputs) > 10 else output_hidden_states\n return_dict = inputs[11] if len(inputs) > 11 else return_dict\n assert len(inputs) <= 12, \"Too many inputs.\"\n elif isinstance(inputs, (dict, BatchEncoding)):\n input_ids = inputs.get(\"input_ids\")\n attention_mask = inputs.get(\"attention_mask\", attention_mask)\n langs = inputs.get(\"langs\", langs)\n token_type_ids = inputs.get(\"token_type_ids\", token_type_ids)\n position_ids = inputs.get(\"position_ids\", position_ids)\n lengths = inputs.get(\"lengths\", lengths)\n cache = inputs.get(\"cache\", cache)\n head_mask = inputs.get(\"head_mask\", head_mask)\n inputs_embeds = inputs.get(\"inputs_embeds\", inputs_embeds)\n output_attentions = inputs.get(\"output_attentions\", output_attentions)\n output_hidden_states = inputs.get(\"output_hidden_states\", output_hidden_states)\n return_dict = inputs.get(\"return_dict\", return_dict)\n assert len(inputs) <= 12, \"Too many inputs.\"\n else:\n input_ids = inputs\n\n output_attentions = output_attentions if output_attentions is not None else self.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n bs, slen = shape_list(input_ids)\n elif inputs_embeds is not None:\n bs, slen = shape_list(inputs_embeds)[:2]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if lengths is None:\n if input_ids is not None:\n lengths = tf.reduce_sum(tf.cast(tf.not_equal(input_ids, self.pad_index), dtype=tf.int32), axis=1)\n else:\n lengths = tf.convert_to_tensor([slen] * bs, tf.int32)\n # mask = input_ids != self.pad_index\n\n # check inputs\n # assert shape_list(lengths)[0] == bs\n tf.debugging.assert_equal(shape_list(lengths)[0], bs)\n # assert lengths.max().item() <= slen\n # input_ids = input_ids.transpose(0, 1) # batch size as dimension 0\n # assert (src_enc is None) == (src_len is None)\n # if src_enc is not None:\n # assert self.is_decoder\n # assert src_enc.size(0) == bs\n\n # generate masks\n mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask)\n # if self.is_decoder and src_enc is not None:\n # src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None]\n\n # position_ids\n if position_ids is None:\n position_ids = tf.expand_dims(tf.range(slen), axis=0)\n else:\n # assert shape_list(position_ids) == [bs, slen] # (slen, bs)\n tf.debugging.assert_equal(shape_list(position_ids), [bs, slen])\n # position_ids = position_ids.transpose(0, 1)\n\n # langs\n if langs is not None:\n # assert shape_list(langs) == [bs, slen] # (slen, bs)\n tf.debugging.assert_equal(shape_list(langs), [bs, slen])\n # langs = langs.transpose(0, 1)\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x qlen x klen]\n if head_mask is not None:\n raise NotImplementedError\n else:\n head_mask = [None] * self.n_layers\n\n # do not recompute cached elements\n if cache is not None and input_ids is not None:\n _slen = slen - cache[\"slen\"]\n input_ids = input_ids[:, -_slen:]\n position_ids = position_ids[:, -_slen:]\n if langs is not None:\n langs = langs[:, -_slen:]\n mask = mask[:, -_slen:]\n attn_mask = attn_mask[:, -_slen:]\n\n # embeddings\n if inputs_embeds is None:\n inputs_embeds = self.embeddings(input_ids)\n\n tensor = inputs_embeds + self.position_embeddings(position_ids)\n\n if langs is not None and self.use_lang_emb and self.n_langs > 1:\n tensor = tensor + self.lang_embeddings(langs)\n if token_type_ids is not None:\n tensor = tensor + self.embeddings(token_type_ids)\n tensor = self.layer_norm_emb(tensor)\n tensor = self.dropout(tensor, training=training)\n tensor = tensor * mask[..., tf.newaxis]\n\n # transformer layers\n hidden_states = () if output_hidden_states else None\n attentions = () if output_attentions else None\n for i in range(self.n_layers):\n if output_hidden_states:\n hidden_states = hidden_states + (tensor,)\n\n # self attention\n attn_outputs = self.attentions[i](\n tensor, attn_mask, None, cache, head_mask[i], output_attentions, training=training\n )\n attn = attn_outputs[0]\n if output_attentions:\n attentions = attentions + (attn_outputs[1],)\n attn = self.dropout(attn, training=training)\n tensor = tensor + attn\n tensor = self.layer_norm1[i](tensor)\n\n # encoder attention (for decoder only)\n # if self.is_decoder and src_enc is not None:\n # attn = self.encoder_attn[i](tensor, src_mask, kv=src_enc, cache=cache)\n # attn = F.dropout(attn, p=self.dropout, training=self.training)\n # tensor = tensor + attn\n # tensor = self.layer_norm15[i](tensor)\n\n # FFN\n tensor = tensor + self.ffns[i](tensor)\n tensor = self.layer_norm2[i](tensor)\n tensor = tensor * mask[..., tf.newaxis]\n\n # Add last hidden state\n if output_hidden_states:\n hidden_states = hidden_states + (tensor,)\n\n # update cache length\n if cache is not None:\n cache[\"slen\"] += tensor.size(1)\n\n # move back sequence length to dimension 0\n # tensor = tensor.transpose(0, 1)\n\n if not return_dict:\n return tuple(v for v in [tensor, hidden_states, attentions] if v is not None)\n return TFBaseModelOutput(last_hidden_state=tensor, hidden_states=hidden_states, attentions=attentions)\n\n\nclass TFXLMPreTrainedModel(TFPreTrainedModel):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for downloading and loading pretrained models.\n \"\"\"\n\n config_class = XLMConfig\n base_model_prefix = \"transformer\"\n\n @property\n def dummy_inputs(self):\n # Sometimes XLM has language embeddings so don't forget to build them as well if needed\n inputs_list = tf.constant([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]])\n attns_list = tf.constant([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])\n if self.config.use_lang_emb and self.config.n_langs > 1:\n langs_list = tf.constant([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])\n else:\n langs_list = None\n return {\"input_ids\": inputs_list, \"attention_mask\": attns_list, \"langs\": langs_list}\n\n\n# Remove when XLMWithLMHead computes loss like other LM models\n@dataclass\nclass TFXLMWithLMHeadModelOutput(ModelOutput):\n \"\"\"\n Base class for :class:`~transformers.TFXLMWithLMHeadModel` outputs.\n\n Args:\n logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`tf.Tensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n logits: tf.Tensor = None\n hidden_states: Optional[Tuple[tf.Tensor]] = None\n attentions: Optional[Tuple[tf.Tensor]] = None\n\n\nXLM_START_DOCSTRING = r\"\"\"\n\n .. note::\n\n TF 2.0 models accepts two formats as inputs:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional arguments.\n\n This second option is useful when using :obj:`tf.keras.Model.fit()` method which currently requires having\n all the tensors in the first argument of the model call function: :obj:`model(inputs)`.\n\n If you choose this second option, there are three possibilities you can use to gather all the input Tensors\n in the first positional argument :\n\n - a single Tensor with input_ids only and nothing else: :obj:`model(inputs_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n :obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n :obj:`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`\n\n Parameters:\n config (:class:`~transformers.XLMConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the configuration.\n Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\n\nXLM_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`transformers.BertTokenizer`.\n See :func:`transformers.PreTrainedTokenizer.encode` and\n :func:`transformers.PreTrainedTokenizer.__call__` for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n langs (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n A parallel sequence of tokens to be used to indicate the language of each token in the input.\n Indices are languages ids which can be obtained from the language names by using two conversion mappings\n provided in the configuration of the model (only provided for multilingual models).\n More precisely, the `language name -> language id` mapping is in `model.config.lang2id` (dict str -> int) and\n the `language id -> language name` mapping is `model.config.id2lang` (dict int -> str).\n\n See usage examples detailed in the `multilingual documentation <https://huggingface.co/transformers/multilingual.html>`__.\n token_type_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Segment token indices to indicate first and second portions of the inputs.\n Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``\n corresponds to a `sentence B` token\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Indices of positions of each input sequence tokens in the position embeddings.\n Selected in the range ``[0, config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n lengths (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Length of each sentence that can be used to avoid performing attention on padding token indices.\n You can also use `attention_mask` for the same result (see above), kept here for compatbility.\n Indices selected in ``[0, ..., input_ids.size(-1)]``:\n cache (:obj:`Dict[str, tf.Tensor]`, `optional`, defaults to :obj:`None`):\n dictionary with ``tf.Tensor`` that contains pre-computed\n hidden-states (key and values in the attention blocks) as computed by the model\n (see `cache` output below). Can be used to speed up sequential decoding.\n The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.\n head_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):\n Mask to nullify selected heads of the self-attention modules.\n Mask values selected in ``[0, 1]``:\n :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.\n inputs_embeds (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`, defaults to :obj:`None`):\n If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`None`):\n If set to ``True``, the hidden states of all layers are returned. See ``hidden_states`` under returned tensors for more detail.\n return_dict (:obj:`bool`, `optional`, defaults to :obj:`None`):\n If set to ``True``, the model will return a :class:`~transformers.file_utils.ModelOutput` instead of a\n plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare XLM Model transformer outputing raw hidden-states without any specific head on top.\",\n XLM_START_DOCSTRING,\n)\nclass TFXLMModel(TFXLMPreTrainedModel):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.transformer = TFXLMMainLayer(config, name=\"transformer\")\n\n @add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"xlm-mlm-en-2048\",\n output_type=TFBaseModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(self, inputs, **kwargs):\n outputs = self.transformer(inputs, **kwargs)\n return outputs\n\n\nclass TFXLMPredLayer(tf.keras.layers.Layer):\n \"\"\"\n Prediction layer (cross_entropy or adaptive_softmax).\n \"\"\"\n\n def __init__(self, config, input_embeddings, **kwargs):\n super().__init__(**kwargs)\n self.asm = config.asm\n self.n_words = config.n_words\n self.pad_index = config.pad_index\n if config.asm is False:\n self.input_embeddings = input_embeddings\n else:\n raise NotImplementedError\n # self.proj = nn.AdaptiveLogSoftmaxWithLoss(\n # in_features=dim,\n # n_classes=config.n_words,\n # cutoffs=config.asm_cutoffs,\n # div_value=config.asm_div_value,\n # head_bias=True, # default is False\n # )\n\n def build(self, input_shape):\n # The output weights are the same as the input embeddings, but there is an output-only bias for each token.\n self.bias = self.add_weight(shape=(self.n_words,), initializer=\"zeros\", trainable=True, name=\"bias\")\n super().build(input_shape)\n\n def call(self, hidden_states):\n hidden_states = self.input_embeddings(hidden_states, mode=\"linear\")\n hidden_states = hidden_states + self.bias\n return hidden_states\n\n\n@add_start_docstrings(\n \"\"\"The XLM Model transformer with a language modeling head on top\n (linear layer with weights tied to the input embeddings). \"\"\",\n XLM_START_DOCSTRING,\n)\nclass TFXLMWithLMHeadModel(TFXLMPreTrainedModel):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.transformer = TFXLMMainLayer(config, name=\"transformer\")\n self.pred_layer = TFXLMPredLayer(config, self.transformer.embeddings, name=\"pred_layer_._proj\")\n\n def get_output_embeddings(self):\n return self.pred_layer.input_embeddings\n\n def prepare_inputs_for_generation(self, inputs, **kwargs):\n mask_token_id = self.config.mask_token_id\n lang_id = self.config.lang_id\n\n effective_batch_size = inputs.shape[0]\n mask_token = tf.ones((effective_batch_size, 1), dtype=tf.int32) * mask_token_id\n inputs = tf.concat([inputs, mask_token], axis=1)\n\n if lang_id is not None:\n langs = tf.ones_like(inputs) * lang_id\n else:\n langs = None\n return {\"inputs\": inputs, \"langs\": langs}\n\n @add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"xlm-mlm-en-2048\",\n output_type=TFXLMWithLMHeadModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(self, inputs, **kwargs):\n return_dict = kwargs.get(\"return_dict\")\n return_dict = return_dict if return_dict is not None else self.transformer.return_dict\n transformer_outputs = self.transformer(inputs, **kwargs)\n\n output = transformer_outputs[0]\n outputs = self.pred_layer(output)\n\n if not return_dict:\n return (outputs,) + transformer_outputs[1:]\n\n return TFXLMWithLMHeadModelOutput(\n logits=outputs, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions\n )\n\n\n@add_start_docstrings(\n \"\"\"XLM Model with a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. \"\"\",\n XLM_START_DOCSTRING,\n)\nclass TFXLMForSequenceClassification(TFXLMPreTrainedModel, TFSequenceClassificationLoss):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.num_labels = config.num_labels\n\n self.transformer = TFXLMMainLayer(config, name=\"transformer\")\n self.sequence_summary = TFSequenceSummary(config, initializer_range=config.init_std, name=\"sequence_summary\")\n\n @add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"xlm-mlm-en-2048\",\n output_type=TFSequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n inputs=None,\n attention_mask=None,\n langs=None,\n token_type_ids=None,\n position_ids=None,\n lengths=None,\n cache=None,\n head_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n labels=None,\n training=False,\n ):\n r\"\"\"\n labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for computing the sequence classification/regression loss.\n Indices should be in ``[0, ..., config.num_labels - 1]``.\n If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),\n If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.transformer.return_dict\n if isinstance(inputs, (tuple, list)):\n labels = inputs[12] if len(inputs) > 12 else labels\n if len(inputs) > 12:\n inputs = inputs[:12]\n elif isinstance(inputs, (dict, BatchEncoding)):\n labels = inputs.pop(\"labels\", labels)\n\n transformer_outputs = self.transformer(\n inputs,\n attention_mask=attention_mask,\n langs=langs,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n lengths=lengths,\n cache=cache,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n )\n output = transformer_outputs[0]\n\n logits = self.sequence_summary(output)\n\n loss = None if labels is None else self.compute_loss(labels, logits)\n\n if not return_dict:\n output = (logits,) + transformer_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return TFSequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"XLM Model with a multiple choice classification head on top (a linear layer on top of\n the pooled output and a softmax) e.g. for RocStories/SWAG tasks. \"\"\",\n XLM_START_DOCSTRING,\n)\nclass TFXLMForMultipleChoice(TFXLMPreTrainedModel, TFMultipleChoiceLoss):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n\n self.transformer = TFXLMMainLayer(config, name=\"transformer\")\n self.sequence_summary = TFSequenceSummary(config, initializer_range=config.init_std, name=\"sequence_summary\")\n self.logits_proj = tf.keras.layers.Dense(\n 1, kernel_initializer=get_initializer(config.initializer_range), name=\"logits_proj\"\n )\n\n @property\n def dummy_inputs(self):\n \"\"\" Dummy inputs to build the network.\n\n Returns:\n tf.Tensor with dummy inputs\n \"\"\"\n return {\n \"input_ids\": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS),\n \"langs\": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS),\n }\n\n @add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"xlm-mlm-en-2048\",\n output_type=TFMultipleChoiceModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n inputs,\n attention_mask=None,\n langs=None,\n token_type_ids=None,\n position_ids=None,\n lengths=None,\n cache=None,\n head_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n labels=None,\n training=False,\n ):\n r\"\"\"\n labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for computing the multiple choice classification loss.\n Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension\n of the input tensors. (see `input_ids` above)\n \"\"\"\n if isinstance(inputs, (tuple, list)):\n input_ids = inputs[0]\n attention_mask = inputs[1] if len(inputs) > 1 else attention_mask\n langs = inputs[2] if len(inputs) > 2 else langs\n token_type_ids = inputs[3] if len(inputs) > 3 else token_type_ids\n position_ids = inputs[4] if len(inputs) > 4 else position_ids\n lengths = inputs[5] if len(inputs) > 5 else lengths\n cache = inputs[6] if len(inputs) > 6 else cache\n head_mask = inputs[7] if len(inputs) > 7 else head_mask\n inputs_embeds = inputs[8] if len(inputs) > 8 else inputs_embeds\n output_attentions = inputs[9] if len(inputs) > 9 else output_attentions\n output_hidden_states = inputs[10] if len(inputs) > 10 else output_hidden_states\n return_dict = inputs[11] if len(inputs) > 11 else return_dict\n labels = inputs[12] if len(inputs) > 12 else labels\n assert len(inputs) <= 13, \"Too many inputs.\"\n elif isinstance(inputs, (dict, BatchEncoding)):\n input_ids = inputs.get(\"input_ids\")\n attention_mask = inputs.get(\"attention_mask\", attention_mask)\n langs = inputs.get(\"langs\", langs)\n token_type_ids = inputs.get(\"token_type_ids\", token_type_ids)\n position_ids = inputs.get(\"position_ids\", position_ids)\n lengths = inputs.get(\"lengths\", lengths)\n cache = inputs.get(\"cache\", cache)\n head_mask = inputs.get(\"head_mask\", head_mask)\n inputs_embeds = inputs.get(\"inputs_embeds\", inputs_embeds)\n output_attentions = inputs.get(\"output_attentions\", output_attentions)\n output_hidden_states = inputs.get(\"output_hidden_states\", output_hidden_states)\n return_dict = inputs.get(\"return_dict\", return_dict)\n labels = inputs.get(\"labels\", labels)\n assert len(inputs) <= 13, \"Too many inputs.\"\n else:\n input_ids = inputs\n return_dict = return_dict if return_dict is not None else self.transformer.return_dict\n\n if input_ids is not None:\n num_choices = shape_list(input_ids)[1]\n seq_length = shape_list(input_ids)[2]\n else:\n num_choices = shape_list(inputs_embeds)[1]\n seq_length = shape_list(inputs_embeds)[2]\n\n flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None\n flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None\n flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None\n flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None\n flat_langs = tf.reshape(langs, (-1, seq_length)) if langs is not None else None\n flat_inputs_embeds = (\n tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))\n if inputs_embeds is not None\n else None\n )\n\n if lengths is not None:\n warnings.warn(\n \"The `lengths` parameter cannot be used with the XLM multiple choice models. Please use the \"\n \"attention mask instead.\",\n FutureWarning,\n )\n lengths = None\n\n transformer_outputs = self.transformer(\n flat_input_ids,\n flat_attention_mask,\n flat_langs,\n flat_token_type_ids,\n flat_position_ids,\n lengths,\n cache,\n head_mask,\n flat_inputs_embeds,\n output_attentions,\n output_hidden_states,\n return_dict=return_dict,\n training=training,\n )\n output = transformer_outputs[0]\n logits = self.sequence_summary(output)\n logits = self.logits_proj(logits)\n reshaped_logits = tf.reshape(logits, (-1, num_choices))\n\n loss = None if labels is None else self.compute_loss(labels, reshaped_logits)\n\n if not return_dict:\n output = (reshaped_logits,) + transformer_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return TFMultipleChoiceModelOutput(\n loss=loss,\n logits=reshaped_logits,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"XLM Model with a token classification head on top (a linear layer on top of\n the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. \"\"\",\n XLM_START_DOCSTRING,\n)\nclass TFXLMForTokenClassification(TFXLMPreTrainedModel, TFTokenClassificationLoss):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.num_labels = config.num_labels\n\n self.transformer = TFXLMMainLayer(config, name=\"transformer\")\n self.dropout = tf.keras.layers.Dropout(config.dropout)\n self.classifier = tf.keras.layers.Dense(\n config.num_labels, kernel_initializer=get_initializer(config.init_std), name=\"classifier\"\n )\n\n @add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"xlm-mlm-en-2048\",\n output_type=TFTokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n inputs=None,\n attention_mask=None,\n langs=None,\n token_type_ids=None,\n position_ids=None,\n lengths=None,\n cache=None,\n head_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n labels=None,\n training=False,\n ):\n r\"\"\"\n labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Labels for computing the token classification loss.\n Indices should be in ``[0, ..., config.num_labels - 1]``.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.transformer.return_dict\n if isinstance(inputs, (tuple, list)):\n labels = inputs[12] if len(inputs) > 12 else labels\n if len(inputs) > 12:\n inputs = inputs[:12]\n elif isinstance(inputs, (dict, BatchEncoding)):\n labels = inputs.pop(\"labels\", labels)\n\n transformer_outputs = self.transformer(\n inputs,\n attention_mask=attention_mask,\n langs=langs,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n lengths=lengths,\n cache=cache,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n )\n\n sequence_output = transformer_outputs[0]\n\n sequence_output = self.dropout(sequence_output, training=training)\n logits = self.classifier(sequence_output)\n\n loss = None if labels is None else self.compute_loss(labels, logits)\n\n if not return_dict:\n output = (logits,) + transformer_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return TFTokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"XLM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of\n the hidden-states output to compute `span start logits` and `span end logits`). \"\"\",\n XLM_START_DOCSTRING,\n)\nclass TFXLMForQuestionAnsweringSimple(TFXLMPreTrainedModel, TFQuestionAnsweringLoss):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.transformer = TFXLMMainLayer(config, name=\"transformer\")\n self.qa_outputs = tf.keras.layers.Dense(\n config.num_labels, kernel_initializer=get_initializer(config.init_std), name=\"qa_outputs\"\n )\n\n @add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"xlm-mlm-en-2048\",\n output_type=TFQuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n inputs=None,\n attention_mask=None,\n langs=None,\n token_type_ids=None,\n position_ids=None,\n lengths=None,\n cache=None,\n head_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n start_positions=None,\n end_positions=None,\n training=False,\n ):\n r\"\"\"\n start_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n end_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.transformer.return_dict\n if isinstance(inputs, (tuple, list)):\n start_positions = inputs[12] if len(inputs) > 12 else start_positions\n end_positions = inputs[13] if len(inputs) > 13 else end_positions\n if len(inputs) > 12:\n inputs = inputs[:12]\n elif isinstance(inputs, (dict, BatchEncoding)):\n start_positions = inputs.pop(\"start_positions\", start_positions)\n end_positions = inputs.pop(\"end_positions\", start_positions)\n\n transformer_outputs = self.transformer(\n inputs,\n attention_mask=attention_mask,\n langs=langs,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n lengths=lengths,\n cache=cache,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n )\n\n sequence_output = transformer_outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = tf.split(logits, 2, axis=-1)\n start_logits = tf.squeeze(start_logits, axis=-1)\n end_logits = tf.squeeze(end_logits, axis=-1)\n\n loss = None\n if start_positions is not None and end_positions is not None:\n labels = {\"start_position\": start_positions}\n labels[\"end_position\"] = end_positions\n loss = self.compute_loss(labels, (start_logits, end_logits))\n\n if not return_dict:\n output = (start_logits, end_logits) + transformer_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return TFQuestionAnsweringModelOutput(\n loss=loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n )\n" ]
[ [ "torch.nn.functional.softmax", "torch.load", "torch.utils.data.DataLoader", "torch.no_grad", "torch.cuda.manual_seed_all", "torch.cuda.is_available", "torch.device", "torch.distributed.get_rank", "torch.save", "torch.distributed.init_process_group", "torch.utils.data.distributed.DistributedSampler", "torch.distributed.barrier", "torch.cuda.device_count", "torch.distributed.get_world_size", "torch.nn.parallel.DistributedDataParallel", "torch.nn.KLDivLoss", "numpy.random.seed", "torch.cuda.set_device", "torch.nn.functional.log_softmax", "torch.manual_seed", "torch.utils.data.SequentialSampler", "torch.utils.data.RandomSampler", "torch.nn.DataParallel" ], [ "tensorflow.keras.layers.LayerNormalization", "tensorflow.matmul", "tensorflow.nn.softmax", "tensorflow.transpose", "tensorflow.fill", "tensorflow.math.sqrt", "tensorflow.constant", "tensorflow.range", "tensorflow.reshape", "tensorflow.cast", "tensorflow.squeeze", "tensorflow.gather", "tensorflow.name_scope", "tensorflow.keras.layers.Dropout", "tensorflow.split" ], [ "tensorflow.convert_to_tensor", "tensorflow.TensorShape" ], [ "torch.save", "torch.load", "torch.ones", "torch.tensor" ], [ "tensorflow.keras.layers.LayerNormalization", "tensorflow.convert_to_tensor", "tensorflow.concat", "tensorflow.cast", "tensorflow.squeeze", "numpy.sin", "tensorflow.tile", "tensorflow.matmul", "numpy.power", "tensorflow.split", "tensorflow.math.less", "tensorflow.not_equal", "tensorflow.nn.softmax", "tensorflow.constant", "tensorflow.keras.layers.Activation", "tensorflow.range", "tensorflow.transpose", "tensorflow.math.sqrt", "tensorflow.reshape", "tensorflow.ones_like", "numpy.cos", "tensorflow.ones", "tensorflow.keras.layers.Dropout" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
ragatti/mne-python
[ "c6825a49c3452db616fc980d62d33f6dddf4cd65", "c6825a49c3452db616fc980d62d33f6dddf4cd65" ]
[ "mne/io/nirx/nirx.py", "mne/io/fieldtrip/tests/test_fieldtrip.py" ]
[ "# Authors: Robert Luke <[email protected]>\n#\n# License: BSD (3-clause)\n\nfrom configparser import ConfigParser, RawConfigParser\nimport glob as glob\nimport re as re\n\nimport numpy as np\n\nfrom ..base import BaseRaw\nfrom ..constants import FIFF\nfrom ..meas_info import create_info, _format_dig_points\nfrom ...annotations import Annotations\nfrom ...transforms import apply_trans, _get_trans\nfrom ...utils import logger, verbose, fill_doc\n\n\n@fill_doc\ndef read_raw_nirx(fname, preload=False, verbose=None):\n \"\"\"Reader for a NIRX fNIRS recording.\n\n Parameters\n ----------\n fname : str\n Path to the NIRX data folder.\n %(preload)s\n %(verbose)s\n\n Returns\n -------\n raw : instance of RawNIRX\n A Raw object containing NIRX data.\n\n See Also\n --------\n mne.io.Raw : Documentation of attribute and methods.\n \"\"\"\n return RawNIRX(fname, preload, verbose)\n\n\ndef _open(fname):\n return open(fname, 'r', encoding='latin-1')\n\n\n@fill_doc\nclass RawNIRX(BaseRaw):\n \"\"\"Raw object from a NIRX fNIRS file.\n\n Parameters\n ----------\n fname : str\n Path to the NIRX data folder.\n %(preload)s\n %(verbose)s\n\n See Also\n --------\n mne.io.Raw : Documentation of attribute and methods.\n \"\"\"\n\n @verbose\n def __init__(self, fname, preload=False, verbose=None):\n from ...externals.pymatreader import read_mat\n from ...coreg import get_mni_fiducials # avoid circular import prob\n logger.info('Loading %s' % fname)\n\n # Check if required files exist and store names for later use\n files = dict()\n keys = ('dat', 'evt', 'hdr', 'inf', 'set', 'tpl', 'wl1', 'wl2',\n 'config.txt', 'probeInfo.mat')\n for key in keys:\n files[key] = glob.glob('%s/*%s' % (fname, key))\n if len(files[key]) != 1:\n raise RuntimeError('Expect one %s file, got %d' %\n (key, len(files[key]),))\n files[key] = files[key][0]\n\n # Read number of rows/samples of wavelength data\n last_sample = -1\n with _open(files['wl1']) as fid:\n for line in fid:\n last_sample += 1\n\n # Read participant information file\n inf = ConfigParser(allow_no_value=True)\n inf.read(files['inf'])\n inf = inf._sections['Subject Demographics']\n\n # Store subject information from inf file in mne format\n # Note: NIRX also records \"Study Type\", \"Experiment History\",\n # \"Additional Notes\", \"Contact Information\" and this information\n # is currently discarded\n subject_info = {}\n names = inf['name'].split()\n if len(names) > 0:\n subject_info['first_name'] = \\\n inf['name'].split()[0].replace(\"\\\"\", \"\")\n if len(names) > 1:\n subject_info['last_name'] = \\\n inf['name'].split()[-1].replace(\"\\\"\", \"\")\n if len(names) > 2:\n subject_info['middle_name'] = \\\n inf['name'].split()[-2].replace(\"\\\"\", \"\")\n # subject_info['birthday'] = inf['age'] # TODO: not formatted properly\n subject_info['sex'] = inf['gender'].replace(\"\\\"\", \"\")\n # Recode values\n if subject_info['sex'] in {'M', 'Male', '1'}:\n subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_MALE\n elif subject_info['sex'] in {'F', 'Female', '2'}:\n subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_FEMALE\n # NIRStar does not record an id, or handedness by default\n\n # Read header file\n # The header file isn't compliant with the configparser. So all the\n # text between comments must be removed before passing to parser\n with _open(files['hdr']) as f:\n hdr_str = f.read()\n hdr_str = re.sub('#.*?#', '', hdr_str, flags=re.DOTALL)\n hdr = RawConfigParser()\n hdr.read_string(hdr_str)\n\n # Check that the file format version is supported\n if not any(item == hdr['GeneralInfo']['NIRStar'] for item in\n [\"\\\"15.0\\\"\", \"\\\"15.2\\\"\"]):\n raise RuntimeError('MNE does not support this NIRStar version'\n ' (%s)' % (hdr['GeneralInfo']['NIRStar'],))\n\n # Parse required header fields\n\n # Extract frequencies of light used by machine\n fnirs_wavelengths = [int(s) for s in\n re.findall(r'(\\d+)',\n hdr['ImagingParameters']['Wavelengths'])]\n\n # Extract source-detectors\n sources = np.asarray([int(s) for s in re.findall(r'(\\d+)-\\d+:\\d+',\n hdr['DataStructure']['S-D-Key'])], int)\n detectors = np.asarray([int(s) for s in re.findall(r'\\d+-(\\d+):\\d+',\n hdr['DataStructure']['S-D-Key'])], int)\n\n # Determine if short channels are present and on which detectors\n if 'shortbundles' in hdr['ImagingParameters']:\n short_det = [int(s) for s in\n re.findall(r'(\\d+)',\n hdr['ImagingParameters']['ShortDetIndex'])]\n short_det = np.array(short_det, int)\n else:\n short_det = []\n\n # Extract sampling rate\n samplingrate = float(hdr['ImagingParameters']['SamplingRate'])\n\n # Read information about probe/montage/optodes\n # A word on terminology used here:\n # Sources produce light\n # Detectors measure light\n # Sources and detectors are both called optodes\n # Each source - detector pair produces a channel\n # Channels are defined as the midpoint between source and detector\n mat_data = read_mat(files['probeInfo.mat'], uint16_codec=None)\n requested_channels = mat_data['probeInfo']['probes']['index_c']\n src_locs = mat_data['probeInfo']['probes']['coords_s3'] / 100.\n det_locs = mat_data['probeInfo']['probes']['coords_d3'] / 100.\n ch_locs = mat_data['probeInfo']['probes']['coords_c3'] / 100.\n\n # These are all in MNI coordinates, so let's transform them to\n # the Neuromag head coordinate frame\n mri_head_t, _ = _get_trans('fsaverage', 'mri', 'head')\n src_locs = apply_trans(mri_head_t, src_locs)\n det_locs = apply_trans(mri_head_t, det_locs)\n ch_locs = apply_trans(mri_head_t, ch_locs)\n\n # Set up digitization\n dig = get_mni_fiducials('fsaverage', verbose=False)\n for fid in dig:\n fid['r'] = apply_trans(mri_head_t, fid['r'])\n fid['coord_frame'] = FIFF.FIFFV_COORD_HEAD\n for ii, ch_loc in enumerate(ch_locs, 1):\n dig.append(dict(\n kind=FIFF.FIFFV_POINT_EEG, # misnomer but probably okay\n r=ch_loc,\n ident=ii,\n coord_frame=FIFF.FIFFV_COORD_HEAD,\n ))\n dig = _format_dig_points(dig)\n del mri_head_t\n\n # Determine requested channel indices\n # The wl1 and wl2 files include all possible source - detector pairs.\n # But most of these are not relevant. We want to extract only the\n # subset requested in the probe file\n req_ind = np.array([], int)\n for req_idx in range(requested_channels.shape[0]):\n sd_idx = np.where((sources == requested_channels[req_idx][0]) &\n (detectors == requested_channels[req_idx][1]))\n req_ind = np.concatenate((req_ind, sd_idx[0]))\n req_ind = req_ind.astype(int)\n\n # Generate meaningful channel names\n def prepend(list, str):\n str += '{0}'\n list = [str.format(i) for i in list]\n return(list)\n snames = prepend(sources[req_ind], 'S')\n dnames = prepend(detectors[req_ind], '_D')\n sdnames = [m + str(n) for m, n in zip(snames, dnames)]\n sd1 = [s + ' ' + str(fnirs_wavelengths[0]) for s in sdnames]\n sd2 = [s + ' ' + str(fnirs_wavelengths[1]) for s in sdnames]\n chnames = [val for pair in zip(sd1, sd2) for val in pair]\n\n # Create mne structure\n info = create_info(chnames,\n samplingrate,\n ch_types='fnirs_raw')\n info.update(subject_info=subject_info, dig=dig)\n\n # Store channel, source, and detector locations\n # The channel location is stored in the first 3 entries of loc.\n # The source location is stored in the second 3 entries of loc.\n # The detector location is stored in the third 3 entries of loc.\n # NIRx NIRSite uses MNI coordinates.\n # Also encode the light frequency in the structure.\n for ch_idx2 in range(requested_channels.shape[0]):\n # Find source and store location\n src = int(requested_channels[ch_idx2, 0]) - 1\n info['chs'][ch_idx2 * 2]['loc'][3:6] = src_locs[src, :]\n info['chs'][ch_idx2 * 2 + 1]['loc'][3:6] = src_locs[src, :]\n # Find detector and store location\n det = int(requested_channels[ch_idx2, 1]) - 1\n info['chs'][ch_idx2 * 2]['loc'][6:9] = det_locs[det, :]\n info['chs'][ch_idx2 * 2 + 1]['loc'][6:9] = det_locs[det, :]\n # Store channel location\n # Channel locations for short channels are bodged,\n # for short channels use the source location.\n if det + 1 in short_det:\n info['chs'][ch_idx2 * 2]['loc'][:3] = src_locs[src, :]\n info['chs'][ch_idx2 * 2 + 1]['loc'][:3] = src_locs[src, :]\n else:\n info['chs'][ch_idx2 * 2]['loc'][:3] = ch_locs[ch_idx2, :]\n info['chs'][ch_idx2 * 2 + 1]['loc'][:3] = ch_locs[ch_idx2, :]\n info['chs'][ch_idx2 * 2]['loc'][9] = fnirs_wavelengths[0]\n info['chs'][ch_idx2 * 2 + 1]['loc'][9] = fnirs_wavelengths[1]\n raw_extras = {\"sd_index\": req_ind, 'files': files}\n\n super(RawNIRX, self).__init__(\n info, preload, filenames=[fname], last_samps=[last_sample],\n raw_extras=[raw_extras], verbose=verbose)\n\n # Read triggers from event file\n with _open(files['evt']) as fid:\n t = [re.findall(r'(\\d+)', line) for line in fid]\n onset = np.zeros(len(t), float)\n duration = np.zeros(len(t), float)\n description = [''] * len(t)\n for t_idx in range(len(t)):\n binary_value = ''.join(t[t_idx][1:])[::-1]\n trigger_frame = float(t[t_idx][0])\n onset[t_idx] = (trigger_frame) * (1.0 / samplingrate)\n duration[t_idx] = 1.0 # No duration info stored in files\n description[t_idx] = int(binary_value, 2) * 1.\n annot = Annotations(onset, duration, description)\n self.set_annotations(annot)\n\n def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):\n \"\"\"Read a segment of data from a file.\n\n The NIRX machine records raw data as two different wavelengths.\n The returned data interleaves the wavelengths.\n \"\"\"\n sdindex = self._raw_extras[fi]['sd_index']\n\n wls = [\n _read_csv_rows_cols(\n self._raw_extras[fi]['files'][key],\n start, stop, sdindex, len(self.ch_names) // 2).T\n for key in ('wl1', 'wl2')\n ]\n\n # TODO: Make this more efficient by only indexing above what we need.\n # For now let's just construct the full data matrix and index.\n # Interleave wavelength 1 and 2 to match channel names:\n this_data = np.zeros((len(wls[0]) * 2, stop - start))\n this_data[0::2, :] = wls[0]\n this_data[1::2, :] = wls[1]\n data[:] = this_data[idx]\n\n return data\n\n\ndef _read_csv_rows_cols(fname, start, stop, cols, n_cols):\n # The following is equivalent to:\n # x = pandas.read_csv(fname, header=None, usecols=cols, skiprows=start,\n # nrows=stop - start, delimiter=' ')\n # But does not require Pandas, and is hopefully fast enough, as the\n # reading should be done in C (CPython), as should the conversion to float\n # (NumPy).\n x = np.zeros((stop - start, n_cols))\n with _open(fname) as fid:\n for li, line in enumerate(fid):\n if li >= start:\n if li >= stop:\n break\n x[li - start] = np.array(line.split(), float)[cols]\n return x\n", "# -*- coding: UTF-8 -*-\n# Authors: Thomas Hartmann <[email protected]>\n# Dirk Gütlin <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport mne\nimport os.path\nimport pytest\nimport copy\nimport itertools\nimport numpy as np\nfrom mne.datasets import testing\nfrom mne.io.fieldtrip.utils import NOINFO_WARNING, _create_events\nfrom mne.utils import _check_pandas_installed, requires_h5py\nfrom mne.io.fieldtrip.tests.helpers import (check_info_fields, get_data_paths,\n get_raw_data, get_epochs,\n get_evoked, _has_h5py,\n pandas_not_found_warning_msg,\n get_raw_info, check_data,\n assert_warning_in_record)\n\n# missing: KIT: biggest problem here is that the channels do not have the same\n# names.\n# EGI: no calibration done in FT. so data is VERY different\n\nall_systems_raw = ['neuromag306', 'CTF', 'CNT', 'BTI', 'eximia']\nall_systems_epochs = ['neuromag306', 'CTF', 'CNT']\nall_versions = ['v7', 'v73']\nuse_info = [True, False]\nall_test_params_raw = list(itertools.product(all_systems_raw, all_versions,\n use_info))\nall_test_params_epochs = list(itertools.product(all_systems_epochs,\n all_versions,\n use_info))\n\nno_info_warning = {'expected_warning': RuntimeWarning,\n 'match': NOINFO_WARNING}\n\n\[email protected]_testing_data\n# Reading the sample CNT data results in a RuntimeWarning because it cannot\n# parse the measurement date. We need to ignore that warning.\[email protected]('ignore:.*parse meas date.*:RuntimeWarning')\[email protected]('ignore:.*number of bytes.*:RuntimeWarning')\[email protected]('cur_system, version, use_info',\n all_test_params_epochs)\ndef test_read_evoked(cur_system, version, use_info):\n \"\"\"Test comparing reading an Evoked object and the FieldTrip version.\"\"\"\n test_data_folder_ft = get_data_paths(cur_system)\n mne_avg = get_evoked(cur_system)\n if use_info:\n info = get_raw_info(cur_system)\n pytestwarning = {'expected_warning': None}\n else:\n info = None\n pytestwarning = no_info_warning\n\n cur_fname = os.path.join(test_data_folder_ft,\n 'averaged_%s.mat' % (version,))\n if version == 'v73' and not _has_h5py():\n with pytest.raises(ImportError):\n mne.io.read_evoked_fieldtrip(cur_fname, info)\n return\n\n with pytest.warns(**pytestwarning):\n avg_ft = mne.io.read_evoked_fieldtrip(cur_fname, info)\n\n mne_data = mne_avg.data[:, :-1]\n ft_data = avg_ft.data\n\n check_data(mne_data, ft_data, cur_system)\n check_info_fields(mne_avg, avg_ft, use_info)\n\n\[email protected]_testing_data\n# Reading the sample CNT data results in a RuntimeWarning because it cannot\n# parse the measurement date. We need to ignore that warning.\[email protected]('ignore:.*parse meas date.*:RuntimeWarning')\[email protected]('ignore:.*number of bytes.*:RuntimeWarning')\[email protected]('cur_system, version, use_info',\n all_test_params_epochs)\n# Strange, non-deterministic Pandas errors:\n# \"ValueError: cannot expose native-only dtype 'g' in non-native\n# byte order '<' via buffer interface\"\[email protected](os.getenv('AZURE_CI_WINDOWS', 'false').lower() == 'true',\n reason='Pandas problem on Azure CI')\ndef test_read_epochs(cur_system, version, use_info):\n \"\"\"Test comparing reading an Epochs object and the FieldTrip version.\"\"\"\n pandas = _check_pandas_installed(strict=False)\n has_pandas = pandas is not False\n test_data_folder_ft = get_data_paths(cur_system)\n mne_epoched = get_epochs(cur_system)\n if use_info:\n info = get_raw_info(cur_system)\n pytestwarning = {'expected_warning': None}\n else:\n info = None\n pytestwarning = no_info_warning\n\n cur_fname = os.path.join(test_data_folder_ft,\n 'epoched_%s.mat' % (version,))\n if has_pandas:\n if version == 'v73' and not _has_h5py():\n with pytest.raises(ImportError):\n mne.io.read_epochs_fieldtrip(cur_fname, info)\n return\n with pytest.warns(**pytestwarning):\n epoched_ft = mne.io.read_epochs_fieldtrip(cur_fname, info)\n assert isinstance(epoched_ft.metadata, pandas.DataFrame)\n else:\n with pytest.warns(None) as warn_record:\n if version == 'v73' and not _has_h5py():\n with pytest.raises(ImportError):\n mne.io.read_epochs_fieldtrip(cur_fname, info)\n return\n epoched_ft = mne.io.read_epochs_fieldtrip(cur_fname, info)\n assert epoched_ft.metadata is None\n assert_warning_in_record(pandas_not_found_warning_msg, warn_record)\n if pytestwarning['expected_warning'] is not None:\n assert_warning_in_record(pytestwarning['match'], warn_record)\n\n mne_data = mne_epoched.get_data()[:, :, :-1]\n ft_data = epoched_ft.get_data()\n\n check_data(mne_data, ft_data, cur_system)\n check_info_fields(mne_epoched, epoched_ft, use_info)\n\n\[email protected]_testing_data\n# Reading the sample CNT data results in a RuntimeWarning because it cannot\n# parse the measurement date. We need to ignore that warning.\[email protected]('ignore:.*parse meas date.*:RuntimeWarning')\[email protected]('ignore:.*number of bytes.*:RuntimeWarning')\[email protected]('cur_system, version, use_info', all_test_params_raw)\ndef test_raw(cur_system, version, use_info):\n \"\"\"Test comparing reading a raw fiff file and the FieldTrip version.\"\"\"\n # Load the raw fiff file with mne\n test_data_folder_ft = get_data_paths(cur_system)\n raw_fiff_mne = get_raw_data(cur_system, drop_extra_chs=True)\n if use_info:\n info = get_raw_info(cur_system)\n pytestwarning = {'expected_warning': None}\n else:\n info = None\n pytestwarning = no_info_warning\n\n cur_fname = os.path.join(test_data_folder_ft,\n 'raw_%s.mat' % (version,))\n\n if version == 'v73' and not _has_h5py():\n with pytest.raises(ImportError):\n mne.io.read_raw_fieldtrip(cur_fname, info)\n return\n with pytest.warns(**pytestwarning):\n raw_fiff_ft = mne.io.read_raw_fieldtrip(cur_fname, info)\n\n if cur_system == 'BTI' and not use_info:\n raw_fiff_ft.drop_channels(['MzA', 'MxA', 'MyaA',\n 'MyA', 'MxaA', 'MzaA'])\n\n if cur_system == 'eximia' and not use_info:\n raw_fiff_ft.drop_channels(['TRIG2', 'TRIG1', 'GATE'])\n\n # Check that the data was loaded correctly\n check_data(raw_fiff_mne.get_data(),\n raw_fiff_ft.get_data(),\n cur_system)\n\n # Check info field\n check_info_fields(raw_fiff_mne, raw_fiff_ft, use_info)\n\n\[email protected]_testing_data\ndef test_load_epoched_as_raw():\n \"\"\"Test whether exception is thrown when loading epochs as raw.\"\"\"\n test_data_folder_ft = get_data_paths('neuromag306')\n info = get_raw_info('neuromag306')\n cur_fname = os.path.join(test_data_folder_ft, 'epoched_v7.mat')\n\n with pytest.raises(RuntimeError):\n mne.io.read_raw_fieldtrip(cur_fname, info)\n\n\[email protected]_testing_data\ndef test_invalid_trialinfocolumn():\n \"\"\"Test for exceptions when using wrong values for trialinfo parameter.\"\"\"\n test_data_folder_ft = get_data_paths('neuromag306')\n info = get_raw_info('neuromag306')\n cur_fname = os.path.join(test_data_folder_ft, 'epoched_v7.mat')\n\n with pytest.raises(ValueError):\n mne.io.read_epochs_fieldtrip(cur_fname, info, trialinfo_column=-1)\n\n with pytest.raises(ValueError):\n mne.io.read_epochs_fieldtrip(cur_fname, info, trialinfo_column=3)\n\n\[email protected]_testing_data\ndef test_create_events():\n \"\"\"Test 2dim trialinfo fields.\"\"\"\n from mne.externals.pymatreader import read_mat\n\n test_data_folder_ft = get_data_paths('neuromag306')\n cur_fname = os.path.join(test_data_folder_ft, 'epoched_v7.mat')\n original_data = read_mat(cur_fname, ['data', ])\n\n new_data = copy.deepcopy(original_data)\n new_data['trialinfo'] = np.array([[1, 2, 3, 4],\n [1, 2, 3, 4],\n [1, 2, 3, 4]])\n\n with pytest.raises(ValueError):\n _create_events(new_data, -1)\n\n for cur_col in np.arange(4):\n evts = _create_events(new_data, cur_col)\n assert np.all(evts[:, 2] == cur_col + 1)\n\n with pytest.raises(ValueError):\n _create_events(new_data, 4)\n\n\[email protected]_testing_data\[email protected]('version', all_versions)\n@requires_h5py\ndef test_one_channel_elec_bug(version):\n \"\"\"Test if loading data having only one elec in the elec field works.\"\"\"\n fname = os.path.join(mne.datasets.testing.data_path(), 'fieldtrip',\n 'one_channel_elec_bug_data_%s.mat' % (version, ))\n\n with pytest.warns(**no_info_warning):\n mne.io.read_raw_fieldtrip(fname, info=None)\n\n\[email protected]_testing_data\n# Reading the sample CNT data results in a RuntimeWarning because it cannot\n# parse the measurement date. We need to ignore that warning.\[email protected]('ignore:.*parse meas date.*:RuntimeWarning')\[email protected]('ignore:.*number of bytes.*:RuntimeWarning')\[email protected]('version', all_versions)\[email protected]('type', ['averaged', 'epoched', 'raw'])\n@requires_h5py\ndef test_throw_exception_on_cellarray(version, type):\n \"\"\"Test for a meaningful exception when the data is a cell array.\"\"\"\n fname = os.path.join(get_data_paths('cellarray'),\n '%s_%s.mat' % (type, version))\n\n info = get_raw_info('CNT')\n\n with pytest.raises(RuntimeError, match='Loading of data in cell arrays '\n 'is not supported'):\n if type == 'averaged':\n mne.read_evoked_fieldtrip(fname, info)\n elif type == 'epoched':\n mne.read_epochs_fieldtrip(fname, info)\n elif type == 'raw':\n mne.io.read_raw_fieldtrip(fname, info)\n\n\[email protected]_testing_data\ndef test_evoked_with_missing_channels():\n \"\"\"Test _create_info on evoked data when channels are missing from info.\"\"\"\n cur_system = 'neuromag306'\n test_data_folder_ft = get_data_paths(cur_system)\n info = get_raw_info(cur_system)\n del info['chs'][1:20]\n info._update_redundant()\n\n with pytest.warns(RuntimeWarning):\n mne.read_evoked_fieldtrip(\n os.path.join(test_data_folder_ft, 'averaged_v7.mat'), info)\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.zeros", "numpy.where" ], [ "numpy.all", "numpy.arange", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
quocdat32461997/Vacation2AI
[ "8fc92022144e35ecc79878d71d05c0e8f15b20fb" ]
[ "ai-course/homework/hw4/optimizers.py" ]
[ "# optimizers.py\n\nimport copy\nimport numpy as np\n\nfrom losses import *\n\n\nclass Optimizers(object):\n def __init__(self):\n self.loss = 0\n pass\n\n def compute_gradient(self):\n pass\n\n def backward(self):\n pass\n\n\nclass GradientDescent(Optimizers):\n def __init__(self,\n model,\n loss_fn,\n learning_rate=0.01,\n momentum=0.0,\n nesterov=False):\n # Args:\n # - learning_rate: float\n # - momentum: float\n # Momentum for SGD\n # - nesterov: bool\n # Flag to usee Nesterov momentum. If True, momentum must not be 0.0\n super(GradientDescent, self).__init__()\n\n self.loss_fn = Losses.get_loss_fn(loss_fn) if isinstance(loss_fn, str) else loss_fn()\n self.learning_rate = learning_rate\n self.momentum = momentum\n self.nesterov = nesterov\n\n self.model = copy.copy(model)\n self.model.coef_ = np.zeros_like(self.model.coef_)\n self.model.intercept_ = np.expand_dims(np.zeros_like(self.model.intercept_), axis=0)\n\n # initialize features\n self.features = np.concatenate((self.model.coef_, self.model.intercept_), axis=-1)\n self.gradients = self.features.copy()\n\n def compute_gradient(self, preds, inputs, labels, **kwargs):\n # compute momentum-enhanced gradients in previous step\n self.gradients *= self.momentum\n\n # if nesterov momentum, append prev-step gradients to model weights\n if self.nesterov:\n self.model.coef_ += self.gradients[..., :-1] # ['weight']\n self.model.intercept_ += self.gradients[..., -1] # ['bias']\n\n # compute gradients at current step\n gradients = self.loss_fn.gradient(self.model, inputs, labels)\n self.gradients -= self.learning_rate * gradients\n\n # update weights and bias\n self.model.coef_ = self.features[..., :-1] = self.features[..., :-1] + self.gradients[..., :-1]\n self.model.intercept_ = self.features[..., -1] = self.features[..., -1] + self.gradients[..., -1]\n\n def backward(self, preds, inputs, labels, **kwargs):\n # compute loss\n self.loss = self.loss_fn(self.model, inputs, labels)\n\n # compute gradients\n self.compute_gradient(preds, inputs, labels)\n\nclass SGD(GradientDescent):\n def __init__(self,\n model,\n loss_fn,\n learning_rate=0.01,\n momentum=0.0,\n nesterov=False):\n super(SGD, self).__init__(model=model,\n loss_fn=loss_fn,\n learning_rate=learning_rate,\n momentum=momentum,\n nesterov=nesterov)\n pass\n\n\nclass AdaGrad(Optimizers):\n def __init__(self,\n model,\n loss_fn,\n learning_rate=0.01):\n # Args:\n # - model: scikit-learn model\n # - loss_fn: loss function\n # - learning_rate: float\n super(AdaGrad, self).__init__()\n self.loss_fn = Losses.get_loss_fn(loss_fn) if isinstance(loss_fn, str) else loss_fn()\n self.learning_rate = learning_rate\n\n self.model = copy.copy(model)\n self.model.coef_ = np.zeros_like(self.model.coef_)\n self.model.intercept_ = np.expand_dims(np.zeros_like(self.model.intercept_), axis=0)\n\n # initialize gradients\n self.gradients = np.concatenate((self.model.coef_, self.model.intercept_), axis=-1)\n self.accumulated_gradients = self.gradients.copy()\n self.r = 1e-8\n\n def _get_lr(self):\n # Function to produce learning-rates corresponding to past gradients\n # square and accumulate gradients\n self.accumulated_gradients += np.square(self.gradients)\n\n return self.learning_rate / np.sqrt(self.accumulated_gradients + self.r)\n\n def compute_gradient(self, preds, inputs, labels, **kwargs):\n # compute gradients at current step\n self.gradients = self.loss_fn.gradient(self.model, inputs, labels)\n\n # update lr\n lr = self._get_lr()\n\n # update weights and bias\n self.model.coef_ -= lr[..., :-1] * self.gradients[..., :-1]\n self.model.intercept_ -= lr[..., -1] * self.gradients[..., -2:-1]\n\n def backward(self, preds, inputs, labels, **kwargs):\n # compute loss\n self.loss = self.loss_fn(self.model, inputs, labels)\n\n # compute gradients\n self.compute_gradient(preds, inputs, labels)\n\n\nclass Adam(Optimizers):\n def __init__(self,\n model,\n loss_fn,\n learning_rate=0.01,\n beta_1=0.9,\n beta_2=0.999):\n super(Adam, self).__init__()\n\n self.loss_fn = Losses.get_loss_fn(loss_fn) if isinstance(loss_fn, str) else loss_fn()\n self.learning_rate = learning_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n\n self.model = copy.copy(model)\n self.model.coef_ = np.zeros_like(self.model.coef_)\n self.model.intercept_ = np.expand_dims(np.zeros_like(self.model.intercept_), axis=0)\n\n # initialize features\n self.gradients = np.concatenate((self.model.coef_, self.model.intercept_), axis=-1)\n self.beta_1_step, self.beta_2_step = 0, 0\n self.r = 1e-8\n\n def _get_lr(self, beta_2_bias):\n return self.learning_rate / np.sqrt(beta_2_bias + self.r)\n\n def compute_gradient(self, preds, inputs, labels, iter, **kwargs):\n # compute gradients at current step\n self.gradients = self.loss_fn.gradient(self.model, inputs, labels)\n\n # compute beta_1_step and beta_2_step\n self.beta_1_step = self.beta_1 * self.beta_1_step + (1 - self.beta_1) * self.gradients\n self.beta_2_step = self.beta_2 * self.beta_2_step + (1 - self.beta_2) * np.square(self.gradients)\n\n # compute bias ccrrection\n bias_1 = self.beta_1_step / (1 - pow(self.beta_1, iter + 1))\n bias_2 = self.beta_2_step / (1 - pow(self.beta_2, iter + 1))\n\n # get learning rate\n lr = self._get_lr(bias_2)\n\n # update weights and bias\n self.model.coef_ -= lr[..., :-1] * bias_1[..., :-1]\n self.model.intercept_ -= lr[..., -1] * bias_1[..., -1]\n\n def backward(self, preds, inputs, labels, **kwargs):\n # compute loss\n self.loss = self.loss_fn(self.model, inputs, labels)\n\n # compute gradients\n self.compute_gradient(preds, inputs, labels, iter=kwargs['iter'])\n" ]
[ [ "numpy.concatenate", "numpy.square", "numpy.zeros_like", "numpy.sqrt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dpfried/phrasenode
[ "dcca59d617937f9e90de15e630690dcbc29e92f4", "dcca59d617937f9e90de15e630690dcbc29e92f4" ]
[ "third-party/gtd/gtd/codalab.py", "third-party/gtd/gtd/text.py" ]
[ "\"\"\"Tools for working with CodaLab.\"\"\"\nimport pickle as pickle\nimport json\nimport os\nimport platform\nimport shutil\nimport sys\nimport tempfile\nfrom contextlib import contextmanager\n\nimport matplotlib.image as mpimg\nfrom gtd.io import shell\n\n__author__ = 'kelvinguu'\n\n\n# need to be specified by user\nworksheet = None\nsite = None\n\n\ndef get_uuids():\n \"\"\"List all bundle UUIDs in the worksheet.\"\"\"\n result = shell('cl ls -w {} -u'.format(worksheet))\n uuids = result.split('\\n')\n uuids = uuids[1:-1] # trim non uuids\n return uuids\n\n\n@contextmanager\ndef open_file(uuid, path):\n \"\"\"Get the raw file content within a particular bundle at a particular path.\n\n Path have no leading slash.\n \"\"\"\n # create temporary file just so we can get an unused file path\n f = tempfile.NamedTemporaryFile()\n f.close() # close and delete right away\n fname = f.name\n\n # download file to temporary path\n cmd ='cl down -o {} -w {} {}/{}'.format(fname, worksheet, uuid, path)\n try:\n shell(cmd)\n except RuntimeError:\n try:\n os.remove(fname) # if file exists, remove it\n except OSError:\n pass\n raise IOError('Failed to open file {}/{}'.format(uuid, path))\n\n f = open(fname)\n yield f\n f.close()\n os.remove(fname) # delete temp file\n\n\nclass Bundle(object):\n def __init__(self, uuid):\n self.uuid = uuid\n\n def __getattr__(self, item):\n \"\"\"\n Load attributes: history, meta on demand\n \"\"\"\n if item == 'history':\n try:\n with open_file(self.uuid, 'history.cpkl') as f:\n value = pickle.load(f)\n except IOError:\n value = {}\n\n elif item == 'meta':\n try:\n with open_file(self.uuid, 'meta.json') as f:\n value = json.load(f)\n except IOError:\n value = {}\n\n # load codalab info\n fields = ('uuid', 'name', 'bundle_type', 'state', 'time', 'remote')\n cmd = 'cl info -w {} -f {} {}'.format(worksheet, ','.join(fields), self.uuid)\n result = shell(cmd)\n info = dict(list(zip(fields, result.split())))\n value.update(info)\n\n elif item in ('stderr', 'stdout'):\n with open_file(self.uuid, item) as f:\n value = f.read()\n\n else:\n raise AttributeError(item)\n\n self.__setattr__(item, value)\n return value\n\n def __repr__(self):\n return self.uuid\n\n def load_img(self, img_path):\n \"\"\"\n Return an image object that can be immediately plotted with matplotlib\n \"\"\"\n with open_file(self.uuid, img_path) as f:\n return mpimg.imread(f)\n\n\ndef download_logs(bundle, log_dir):\n if bundle.meta['bundle_type'] != 'run' or bundle.meta['state'] == 'queued':\n print('Skipped {}\\n'.format(bundle.uuid))\n return\n\n if isinstance(bundle, str):\n bundle = Bundle(bundle)\n\n uuid = bundle.uuid\n name = bundle.meta['name']\n log_path = os.path.join(log_dir, '{}_{}'.format(name, uuid))\n\n cmd ='cl down -o {} -w {} {}/logs'.format(log_path, worksheet, uuid)\n\n print(uuid)\n try:\n shell(cmd, verbose=True)\n except RuntimeError:\n print('Failed to download', bundle.uuid)\n print()\n\n\ndef report(render, uuids=None, reverse=True, limit=None):\n if uuids is None:\n uuids = get_uuids()\n\n if reverse:\n uuids = uuids[::-1]\n\n if limit is not None:\n uuids = uuids[:limit]\n\n for uuid in uuids:\n bundle = Bundle(uuid)\n try:\n render(bundle)\n except Exception:\n print('Failed to render', bundle.uuid)\n\n\ndef monitor_jobs(logdir, uuids=None, reverse=True, limit=None):\n if os.path.exists(logdir):\n delete = input('Overwrite existing logdir? ({})'.format(logdir))\n if delete == 'y':\n shutil.rmtree(logdir)\n os.makedirs(logdir)\n else:\n os.makedirs(logdir)\n print('Using logdir:', logdir)\n\n report(lambda bd: download_logs(bd, logdir), uuids, reverse, limit)\n\n\ndef tensorboard(logdir):\n print('Run this in bash:')\n shell('tensorboard --logdir={}'.format(logdir), verbose=True, debug=True)\n print('\\nGo to TensorBoard: http://localhost:6006/')\n\n\ndef add_to_sys_path(path):\n \"\"\"Add a path to the system PATH.\"\"\"\n sys.path.insert(0, path)\n\n\ndef configure_matplotlib():\n \"\"\"Set Matplotlib backend to 'Agg', which is necessary on CodaLab docker image.\"\"\"\n import warnings\n import matplotlib\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n matplotlib.use('Agg') # needed when running from server\n\n\ndef in_codalab():\n \"\"\"Check if we are running inside CodaLab Docker container or not.\"\"\"\n # TODO: below is a total hack. If the OS is not a Mac, we assume we're on CodaLab.\n return platform.system() != 'Darwin'\n\n\ndef upload(full_path, bundle_name=None, excludes='*.ipynb .git .ipynb_checkpoints .ignore'):\n \"\"\"\n Upload a file or directory to the codalab worksheet\n Args:\n full_path: Path + filename of file to upload\n bundle_name: Name to upload file/directory as. I\n \"\"\"\n directory, filename = os.path.split(full_path)\n if bundle_name is None:\n bundle_name = filename\n shell('cl up -n {} -w {} {} -x {}'.format(bundle_name, worksheet, full_path, excludes), verbose=True)\n\n\ndef launch_job(job_name, cmd,\n dependencies=tuple(),\n queue='john', image='kelvinguu/gtd:1.0',\n memory=None, cpus='5',\n network=False,\n debug=False, tail=False):\n \"\"\"Launch a job on CodaLab (optionally upload code that the job depends on).\n\n Args:\n job_name: name of the job\n cmd: command to execute\n dependencies: list of other bundles that we depend on\n debug: if True, prints SSH commands, but does not execute them\n tail: show the streaming output returned by CodaLab once it launches the job\n \"\"\"\n print('Remember to set up SSH tunnel and LOG IN through the command line before calling this.')\n options = '-v -n {} -w {} --request-queue {} --request-docker-image {} --request-cpus {}'.format(\n job_name, worksheet, queue, image, cpus)\n\n if memory:\n options += ' --request-memory {}'.format(memory)\n if network:\n options += ' --request-network'\n\n dep_str = ' '.join(['{0}:{0}'.format(dep) for dep in dependencies])\n full_cmd = \"cl run {} {} '{}'\".format(options, dep_str, cmd)\n if tail:\n full_cmd += ' -t'\n shell(full_cmd, verbose=True, debug=debug)\n\n\nif in_codalab():\n configure_matplotlib()\n", "import re\nimport logging\nimport numpy as np\nfrom gtd.utils import memoize\n\n\n@memoize\ndef get_spacy():\n \"\"\"\n Loads the spaCy english processor.\n\n Tokenizing, Parsing, and NER are enabled. All other features are disabled.\n\n Returns:\n A spaCy Language object for English\n \"\"\"\n logging.info('Loading spaCy...')\n import spacy.en\n nlp = spacy.en.English(tagger=False, parser=True, matcher=False)\n return nlp\n\n\nclass NER(object):\n def __init__(self):\n self.processor = get_spacy()\n\n def __call__(self, text):\n \"\"\"Given a unicode string, return a tuple of the named entities found inside.\"\"\"\n if not isinstance(text, str):\n text = str(text)\n doc = self.processor(text)\n return doc.ents\n\n\nclass Trie(object):\n\n def __init__(self, token, parent, sink=False):\n self.token = token\n self.parent = parent\n self.sink = sink\n self.children = {}\n\n def __contains__(self, phrase):\n if phrase[0] == self.token:\n if len(phrase) == 1:\n # On our last word. Must be a sink to match.\n return self.sink\n else:\n # doesn't match\n return False\n\n suffix = phrase[1:]\n for child in list(self.children.values()):\n if suffix in child:\n return True\n\n def ancestors(self):\n if self.parent is None:\n return []\n anc = self.parent.ancestors()\n anc.append(self.token)\n return anc\n\n\nclass PhraseMatcher(object):\n def __init__(self, phrases):\n \"\"\"Construct a phrase matcher.\n\n Args:\n phrases (List[Tuple[str]]): a list of phrases to match, where each phrase is a tuple of strings\n \"\"\"\n # construct Trie\n root = Trie('ROOT', None)\n for phrase in phrases:\n current = root\n for token in phrase:\n if token not in current.children:\n current.children[token] = Trie(token, current)\n current = current.children[token]\n current.sink = True # mark last node as a sink\n\n self.root = root\n self.phrases = phrases\n\n def has_phrase(self, phrase):\n \"\"\"Check if a particular phrase is matched by the matcher.\n\n Args:\n phrase (tuple[str])\n \"\"\"\n return ['ROOT'] + phrase in self.root\n\n def match(self, tokens):\n \"\"\"A list of matches.\n\n Args:\n tokens (list[str]): a list of tokens\n\n Returns:\n list[tuple[str, int, int]]: A list of (match, start, end) triples. Each `match` is a tuple of tokens.\n `start` and `end` are word offsets.\n \"\"\"\n root = self.root\n candidates = [root]\n\n matches = []\n for i, token in enumerate(tokens):\n\n # extend candidates or prune failed candidates\n new_candidates = []\n for cand in candidates:\n if token in cand.children:\n new_candidates.append(cand.children[token]) # move to child\n candidates = new_candidates\n candidates.append(root) # always add root\n\n for cand in candidates:\n if cand.sink:\n match = tuple(cand.ancestors())\n end = i + 1\n start = end - len(match)\n matches.append((match, start, end))\n\n return matches\n\n\n# first_cap_re = re.compile('(.)([A-Z][a-z]+)')\nfirst_cap_re = re.compile('([^-_])([A-Z][a-z]+)')\nall_cap_re = re.compile('([a-z0-9])([A-Z])')\n\n\ndef camel_to_snake_case(name):\n \"\"\"Convert camelCase to snake_case (Python).\"\"\"\n s1 = first_cap_re.sub(r'\\1_\\2', name)\n return all_cap_re.sub(r'\\1_\\2', s1).lower()\n\n\ndef longest_common_subsequence(X, Y):\n # https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Longest_common_subsequence#Computing_the_length_of_the_LCS\n\n def LCS(X, Y):\n m = len(X)\n n = len(Y)\n # An (m+1) times (n+1) matrix\n C = [[0] * (n + 1) for _ in range(m + 1)]\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if X[i - 1] == Y[j - 1]:\n C[i][j] = C[i - 1][j - 1] + 1\n else:\n C[i][j] = max(C[i][j - 1], C[i - 1][j])\n return C\n\n def backTrack(C, X, Y, i, j):\n if i == 0 or j == 0:\n return []\n elif X[i - 1] == Y[j - 1]:\n return backTrack(C, X, Y, i - 1, j - 1) + [X[i - 1]]\n else:\n if C[i][j - 1] > C[i - 1][j]:\n return backTrack(C, X, Y, i, j - 1)\n else:\n return backTrack(C, X, Y, i - 1, j)\n\n m = len(X)\n n = len(Y)\n C = LCS(X, Y)\n return backTrack(C, X, Y, m, n)\n\n\ndef get_ngrams(s, n):\n \"\"\"Get n-grams for s.\n\n >>> s = [1, 2, 3, 4]\n >>> get_ngrams(s, 2)\n [(1, 2), (2, 3), (3, 4)]\n >>> get_ngrams(s, 1)\n [(1,), (2,), (3,), (4,)]\n >>> get_ngrams(s, 4)\n [(1, 2, 3, 4)]\n \"\"\"\n assert n <= len(s)\n assert n >= 1\n return [tuple(s[k:k + n]) for k in range(len(s) + 1 - n)]\n\n\ndef ngram_precision_recall(reference, candidate, n=None):\n if n is None:\n # Take the average over 1 through 4 grams.\n prs = []\n for m in [1, 2, 3, 4]:\n prs.append(ngram_precision_recall(reference, candidate, m))\n ps, rs = list(zip(*prs))\n return np.mean(ps), np.mean(rs)\n\n ref_set = set(get_ngrams(reference, n))\n can_set = set(get_ngrams(candidate, n))\n correct = float(len(ref_set & can_set))\n rec = correct / len(ref_set)\n prec = correct / len(can_set)\n return prec, rec" ]
[ [ "matplotlib.use", "matplotlib.image.imread" ], [ "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Tristanovsk/trios
[ "d84a498f0b562d7a792a4588e4d983be885f24b9", "d84a498f0b562d7a792a4588e4d983be885f24b9" ]
[ "exe/process_test_setup.py", "exe/borges/db_borges.py" ]
[ "import glob\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import interp1d\n\nfrom trios.utils.sunposition import sunpos\nfrom trios.utils import utils as u\nfrom trios.process import *\n\n\ncoordf = glob.glob(\"/DATA/OBS2CO/data/info/mesures_in_situ.csv\")[0]\ncoords = pd.read_csv(coordf, sep=';')\nawrfiles = glob.glob(\"/DATA/OBS2CO/data/trios/raw/aw*idpr*.csv\")\n\nawrfiles = glob.glob(\"/DATA/OBS2CO/data/trios/test_setup/raw/aw*idpr*.csv\")\nswrfiles = glob.glob(\"/DATA/OBS2CO/data/trios/raw/Lu0*idpr*.csv\")\n\nidpr='167'\n\nc = coords[coords.ID_prel == int(idpr)] # .values[0]\nlat = c['Lat'].values[0]\nlon = c['Lon'].values[0]\nalt = 0 #c['Altitude']\nname = c['ID_lac'].values[0]\n\n# -----------------------------------------------\n# SWR processing\n# -----------------------------------------------\n\nswr = u.swr_data(idpr, swrfiles)\nif swr.file:\n df, wl = swr.reader(lat, lon, alt)\n Rrs_swr = swr_process(df, wl).process()\n\n# -----------------------------------------------\n# AWR processing\n# -----------------------------------------------\nawr = u.awr_data(idpr, awrfiles)\n\nindex_idx=[2,0,1]\n\nd=u.data(index_idx)\nEd, wl_Ed = d.load_csv(awr.Edf)\nLsky, wl_Lsky = d.load_csv(awr.Lskyf)\nLt0, wl_Lt = d.load_csv(awr.Ltf)\n\n# ''' interpolate Ed and Lsky data upon Lt wavelength'''\nwl = wl_Lt\nLt0.columns = pd.MultiIndex.from_tuples(zip(['Lt'] * len(wl), wl), names=['param', 'wl'])\nintEd = interp1d(wl_Ed, Ed.values, fill_value='extrapolate')(wl)\nnewEd = pd.DataFrame(index=Ed.index,\n columns=pd.MultiIndex.from_tuples(zip(['Ed'] * len(wl), wl), names=['param', 'wl']),\n data=intEd)\nintLsky = interp1d(wl_Lsky, Lsky.values, fill_value='extrapolate')(wl)\nnewLsky = pd.DataFrame(index=Lsky.index, columns=pd.MultiIndex.from_tuples(zip(['Lsky'] * len(wl), wl),\n names=['param', 'wl']), data=intLsky)\n\nawr = awr_process()\nws=[2]\nfig, axs = plt.subplots(nrows=3, ncols=4, figsize=(16, 10))\nfig.subplots_adjust(left=0.1, right=0.9, hspace=.5, wspace=0.65)\n\ni=0\nfor azi, Lt1 in Lt0.groupby(level=2):\n for vza,Lt in Lt1.groupby(level=1):\n ax = axs.flat[i]\n i=i+1\n print(azi,vza)\n\n Lsky = newLsky.loc[(newLsky.index.get_level_values(1) == vza) & (newLsky.index.get_level_values(2) == azi)]\n Ed = newEd.loc[(newEd.index.get_level_values(1) == vza) & (newEd.index.get_level_values(2) == azi)]\n\n Lsky_idx = Lsky.index\n Ed_idx= Ed.index\n Lt_idx = Lt.index\n Lsky.reset_index(level=[1,2],inplace=True)\n Ed.reset_index(level=[1,2],inplace=True)\n Lt.reset_index(level=[1,2],inplace=True)\n\n # merge sensor data on time\n df = pd.merge_asof(Lt, Ed, left_index=True, right_index=True, tolerance=pd.Timedelta(\"2 seconds\"),\n direction=\"nearest\")\n df = pd.merge_asof(df, Lsky, left_index=True, right_index=True, tolerance=pd.Timedelta(\"2 seconds\"),\n direction=\"nearest\")\n\n # add solar angle data and idpr\n # compute solar angle (mean between fisrt and last aqcuisition time\n df['sza', ''] = np.nan\n for index, row in df.iterrows():\n # print index\n sza = sunpos(index, lat, lon, alt)[1]\n df.at[index, 'sza'] = sza\n\n rho_h = awr.get_rho_values([df.sza.min()],[vza],[azi],wl=wl)\n rho15 = awr.get_rho_mobley(awr.rhoM2015,[df.sza.min()],[vza],[azi],[ws])\n rho99 = awr.get_rho_mobley(awr.rhoM1999,[df.sza.min()],[vza],[azi],[ws])\n\n Rrs_h =(df.loc[:,'Lt'] -rho_h*df.loc[:,'Lsky'])/ df.loc[:,'Ed']\n Rrs15 = (df.loc[:,'Lt'] -rho15*df.loc[:,'Lsky'])/ df.loc[:,'Ed']\n\n Rrs99 = (df.loc[:,'Lt'] -rho99*df.loc[:,'Lsky'])/ df.loc[:,'Ed']\n #plt.figure()\n\n\n def add_curve(ax,x,mean,std,c='red',label=''):\n ax.plot(x,mean, linestyle='solid', c=c, lw=2.5,\n alpha=0.8, label=label)\n ax.fill_between(x,\n mean - std,\n mean + std, alpha=0.35,color=c)\n add_curve(ax,wl,Rrs_swr.transpose().mean(axis=1),Rrs_swr.transpose().std(axis=1),label='swr',c='black')\n add_curve(ax,wl,Rrs15.transpose().mean(axis=1),Rrs15.transpose().std(axis=1),label='M2015')\n add_curve(ax,wl,Rrs99.transpose().mean(axis=1),Rrs99.transpose().std(axis=1),c='orange',label='M1999')\n add_curve(ax,wl,Rrs_h.transpose().mean(axis=1),Rrs_h.transpose().std(axis=1),c='grey',label='h')\n\n ax.set_title('azi='+str(azi)+', vza='+str(vza))\n\n\n ax.legend(loc='best', frameon=False)\n\n ax.set_ylabel(r'$R_{rs}\\ (sr^{-1})$')\n ax.set_xlabel(r'Wavelength (nm)')\n\nLt.index.names\n\n\n\n", "import os, sys\nimport pandas as pd\nimport numpy as np\nimport xarray as xr\nimport glob\nimport io\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport plotly\nimport plotly.graph_objs as go\nimport cmocean\nimport scipy.optimize as so\nfrom scipy.interpolate import interp1d\n\nfrom trios.process import *\nfrom trios.utils.sunposition import sunpos\n\nopj = os.path.join\ndir = '/DATA/projet/borges'\ndirdata = opj(dir,'data')\naerosols = ['fine','coarse']\naerosol=aerosols[1]\nmethod = 'osoaa_'+aerosol\n\nodir = opj(dirdata,'L2',method)\nif not os.path.exists(odir):\n os.makedirs(odir)\n\n# (for raw data: 83b0 = Lu, 853e = Ed, 83ae = Ld40va, 855dLd50va).\n# sensors are pointing to a 240º azimuth (cw north).\n# TODO, redo compuation for wl <400nm\n# project spectral data on common wavelength set (i.e., wl_common defined in trios.config)\n# here we change wl_common to stay within interpolation range (i.e., 410-1025 nm)\nwl_common = wl_common[wl_common>=410]\n\nlat, lon, alt = -16.2, -47.32, 850\nazi_sensor = 240\n\ndef load_csv(file,label=''):\n ''' Load and reproject data on common wavelength set'''\n print(file)\n date_parser = lambda x: pd.datetime.strptime(x, \"%d/%m/%y %H:%M\")\n df = pd.read_csv(file,index_col=0,parse_dates=True,date_parser=date_parser)\n wl = df.columns = df.columns.astype('float')\n df.index.name = 'date'\n\n raw = interp1d(wl, df.values, fill_value='extrapolate')(wl_common)\n df = pd.DataFrame(index=df.index,columns=pd.MultiIndex.from_tuples(zip([label] * len(wl_common), wl_common),\n names=['param', 'wl']),data=raw)\n # sort to get data in increasing time order\n df.sort_index(inplace=True)\n return df\n\nEd = load_csv(opj(dirdata,'raw_data.xlsx_853e.csv'),label='Ed')\nLsky = load_csv(opj(dirdata,'raw_data.xlsx_83ae.csv'),label='Lsky')\n#Lsky50,wl = load_csv(opj(dirdata,'raw_data.xlsx_855d.csv'),label='Lsky50')\nLt = load_csv(opj(dirdata,'raw_data.xlsx_83b0.csv'),label='Lt')\n\n# merge sensor data on time\ndf = pd.merge_asof(Lt, Ed, left_index=True, right_index=True, tolerance=pd.Timedelta(\"2 seconds\"),\n direction=\"nearest\")\ndf = pd.merge_asof(df, Lsky, left_index=True, right_index=True, tolerance=pd.Timedelta(\"2 seconds\"),\n direction=\"nearest\")\n\n\n# Convert to UTC time\ndf.index = df.index + pd.Timedelta(hours=3)\n\ndf['sza', ''] = np.nan\ndf['azi', ''] = np.nan\ngeom = sunpos(df.index.to_pydatetime(), lat, lon, alt)[:,0:2]\ndf.at[:, 'sza'] = geom[:,1]\nrelazi = ( geom[:,0] - azi_sensor)% 360\n# to get values between 0 and 180°\n# since rho values are symmetrical with the principal plane\nrelazi[relazi>180] = 360-relazi[relazi>180]\ndf.at[:, 'azi'] = relazi\n\ndef format_Rrs(Rrs,df,name='Rrs'):\n Rrs = Rrs.to_pandas()\n Rrs.index = df.index\n Rrs.columns = pd.MultiIndex.from_product([ [name], df.Lt.columns,])\n return Rrs\n\nawr = awr_process(aerosol = aerosol)\n\nvza = 40\nws = 2\naot550 = 0.1\nrho = awr.rho.rho.to_xarray()\nrho_ = rho.interp(vza=vza, wind=ws,aot=aot550,wl=wl_common)\nrho_M99=awr.rhoM1999.to_xarray().interp(vza=vza, wind=ws)\nrho_M99 = rho_M99.interp(sza=np.linspace(0,80,81),method='cubic').interp(azi=np.linspace(0,180,181),method='cubic')\n\nfor name, raw in df.resample('1H'):\n print(name)\n\n #try:\n suff = name.__str__().replace(' ','_')\n N = len(raw.index)\n if not N:\n continue\n # ------------------\n # filtering\n # ------------------\n # daylight data\n ind = raw.sza < 80\n if not all(ind):\n continue\n\n #ind = awr.filtering(raw.Lt, raw.Lsky, raw.Ed)\n clean = raw[ind]\n Lt, Lsky, Ed, sza, azi = clean.Lt.values, clean.Lsky.values, clean.Ed.values, clean.sza.values, clean.azi.values\n sza_ = xr.DataArray(sza, dims='geom')\n azi_ = xr.DataArray(azi, dims='geom')\n\n # -----------------------------\n # data processing\n # -----------------------------\n rho_v = rho_.interp(sza = sza_, azi = azi_).T\n rho_M99_ = rho_M99.interp(sza = sza_, azi = azi_).to_array().T.values\n\n clean['rho', ''] = rho_v.mean(axis=1)\n clean['rho_min', ''] = rho_v.min(axis=1)\n clean['rho_max', ''] = rho_v.max(axis=1)\n clean['rho_M99', ''] = rho_M99_\n\n Lsurf = (rho_v * Lsky)\n\n Lsurf_M99 = rho_M99_ * Lsky\n\n\n Rrs = format_Rrs((Lt - Lsurf) / clean.Ed,clean,'Rrs_'+method)\n Rrs_M99 = (Lt - Lsurf_M99) / clean.Ed\n Rrs_M99.columns = pd.MultiIndex.from_product([ ['Rrs_M99'], Rrs_M99.columns,])\n\n\n\n clean = pd.concat([Rrs,Rrs_M99,clean],axis=1)\n clean.to_csv(opj(odir,'awr_L2_'+method+'_'+suff+'.csv'))\n\n\n # Rrs, rho = awr.process_wrapper(wl_common, raw, raw.sza, ws=ws, vza = [vza] * N, azi=raw.azi)\n #\n # awr.get_rho_values(raw.sza-90,[vza]*N, raw.azi.values, ws=[2])\n #\n # rho = awr.get_rho_mobley(awr.rhoM1999,raw.sza-90,[vza]*N, azi=raw.azi, ws=[2])[0,:,0]\n" ]
[ [ "scipy.interpolate.interp1d", "matplotlib.pyplot.subplots" ], [ "pandas.concat", "pandas.read_csv", "numpy.linspace", "pandas.Timedelta", "scipy.interpolate.interp1d", "pandas.datetime.strptime", "pandas.MultiIndex.from_product" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
philipco/mcm-bidirectional-compression
[ "64f9d1cb2f302e948d8331477e5ef8f4fc7d872f", "64f9d1cb2f302e948d8331477e5ef8f4fc7d872f" ]
[ "src/utils/data/DataPreparation.py", "src/utils/Constants.py" ]
[ "\"\"\"\nCreated by Philippenko, 10 January 2020.\n\nThis class generate data for Logistic and Least-Square regression\n\"\"\"\nfrom copy import deepcopy\n\nimport numpy as np\nfrom numpy.random.mtrand import multivariate_normal\nfrom scipy.linalg.special_matrices import toeplitz\nimport torch\nfrom math import floor\n\nfrom src.utils.Constants import DIM, NB_OF_POINTS_BY_DEVICE, BIAS\n\n\ndef add_bias_term(X):\n \"\"\"Add a bias term in the dataset.\n\n :param X: dataset\n :return: dataset with an additional columns of 1 at the beginning.\n \"\"\"\n newX = [torch.cat((torch.ones(len(x), 1).to(dtype=torch.float64), x), 1) for x in X]\n return newX\n\n\ndef add_constant_columns(x):\n \"\"\"Form (y,tX) to get regression data in matrix form.\"\"\"\n num_samples = x.shape[0]\n tx = np.c_[np.ones(num_samples), x]\n return tx\n\n\ndef build_data_logistic(true_model_param: torch.FloatTensor, n_samples=NB_OF_POINTS_BY_DEVICE, n_dimensions=DIM,\n n_devices: int = 1, with_seed: bool = False,\n features_corr=0.6, labels_std=0.4):\n \"\"\"Build data for logistic regression.\n\n Args:\n true_model_param: the true parameters of the model.\n n_samples: number of sample by devices.\n n_dimensions: dimension of the problem.\n n_devices: number of devices.\n with_seed: true if we want to initialize the pseudo-random number generator.\n features_corr: correlation coefficient used to generate data points.\n labels_std: standard deviation coefficient of the noises added on labels.\n\n Returns:\n if more than one device, a list of pytorch tensor, otherwise a single tensor.\n \"\"\"\n X, Y = [], []\n model_copy = deepcopy(true_model_param)\n for i in range(n_devices):\n\n # We use two different model to simulate non iid data.\n if i%2==0:\n model_copy[(i+1)%n_dimensions] *= -1\n else:\n model_copy = deepcopy(true_model_param)\n\n # Construction of a covariance matrix\n cov = toeplitz(features_corr ** np.arange(0, n_dimensions))\n\n if not with_seed:\n np.random.seed(0)\n\n sign = np.array([1 for j in range(n_dimensions)])\n if i%2 == 0:\n sign[i%n_dimensions] = -1\n\n x = torch.from_numpy(sign * multivariate_normal(np.zeros(n_dimensions), cov, size=floor(n_samples)).astype(\n dtype=np.float64))\n\n # Simulation of the labels\n # NB : Logistic syntethic dataset is used to show how Artemis is used in non-i.i.d. settings.\n # This is why, we don't introduce a bias here.\n y = torch.bernoulli(torch.sigmoid(x.mv(model_copy.T)))\n y[y == 0] = -1\n X.append(x)\n Y.append(y)\n\n if n_devices == 1:\n return X[0], Y[0]\n return X, Y\n\n\ndef build_data_linear(true_model_param: torch.FloatTensor, n_samples=NB_OF_POINTS_BY_DEVICE, n_dimensions=DIM,\n n_devices: int = 1, with_seed: bool = False, without_noise=False,\n features_corr=0.6, labels_std=0.4):\n \"\"\"Build data for least-square regression.\n\n Args:\n true_model_param: the true parameters of the model.\n n_samples: number of sample by devices.\n n_dimensions: dimension of the problem.\n n_devices: number of devices.\n with_seed: true if we want to initialize the pseudo-random number generator.\n features_corr: correlation coefficient used to generate data points.\n labels_std: standard deviation coefficient of the noises added on labels.\n\n\n Returns:\n if more than one device, a list of pytorch tensor, otherwise a single tensor.\n \"\"\"\n\n X, Y = [], []\n for i in range(n_devices):\n\n # Construction of a covariance matrix\n cov = toeplitz(features_corr ** np.arange(0, n_dimensions))\n\n if with_seed:\n np.random.seed(0)\n x = torch.from_numpy(multivariate_normal(np.zeros(n_dimensions), cov, size=floor(n_samples)).astype(dtype=np.float64))\n\n # Simulation of the labels\n y = x.mv(true_model_param) + BIAS\n\n # We add or not a noise\n if not without_noise:\n if with_seed:\n y += torch.normal(0, labels_std, size=(floor(n_samples), 1),\n generator=torch.manual_seed(0), dtype=torch.float64)[0]\n else:\n y += torch.normal(0, labels_std, size=(floor(n_samples), 1), dtype=torch.float64)[0]\n\n X.append(x)\n Y.append(y)\n if n_devices == 1:\n return X[0], Y[0]\n return X, Y\n", "\"\"\"\nCreated by Philippenko, 6 January 2020.\n\nThis python file gather all constants used as default value of this implementation of Artemis.\n\"\"\"\n\nimport multiprocessing as mp\nimport numpy as np\nimport torch\n\nW_BOUND = (-7.5, 7.5) # Bound of the plot when plotting gradient descent.\nNB_DEVICES = 10 # Default number of devices.\nDIM = 10 # Default dimension.\nDIM_OUTPUT = 1 # Default output dimension.\nNB_EPOCH = 100 # Number of epoch for one gradient descent.\nNB_OF_POINTS_BY_DEVICE = 200 # Default number of points by device.\nMAX_LOSS = 1e10 # maximal acceptable loss when considering that gradient descent diverged.\n\nBIAS = 2\n\nCORES = mp.cpu_count()\n\n\ndef generate_param(n_dimensions: int):\n \"\"\"Simulation of model's parameters\"\"\"\n nnz = 20\n idx = np.arange(n_dimensions)\n W = torch.FloatTensor((-1) ** (idx + 1) * np.exp(-idx / 10.)).to(dtype=torch.float64)\n W[nnz:] = 0.\n return W\n\n\nTRUE_MODEL_PARAM = generate_param(DIM)\n\nDEVICE_RANGE = [1, 3, 10, 16, 20, 40] # Range of device used in experiments\nDIMENSION_RANGE = [1, 4, 10, 16, 20, 160, 320] # Range of dimension used in experiments\n\nstep_formula = [(lambda it, L, omega, N: 40 / (2*L)),\n (lambda it, L, omega, N: 20 / (2*L)),\n (lambda it, L, omega, N: 5 / L),\n (lambda it, L, omega, N: 2 / L),\n (lambda it, L, omega, N: 1 / L),\n (lambda it, L, omega, N: 1 / (2*L)),\n (lambda it, L, omega, N: 1 / (4*L)),\n (lambda it, L, omega, N: 1 / (8*L)),\n (lambda it, L, omega, N: 1 / (16*L)),\n (lambda it, L, omega, N: 1 / (32*L))\n ]\n\nlabel_step_formula = [\"N/L\",\n \"N/2L\",\n \"5/L\",\n \"2/L\",\n \"$L^{-1}$\",\n \"$2L^{-1}$\",\n \"$4L^{-1}$\",\n \"$8L^{-1}$\",\n \"$16L^{-1}$\",\n \"$32L^{-1}$\"\n ]\n\n\n" ]
[ [ "numpy.random.seed", "numpy.arange", "torch.manual_seed", "numpy.ones", "numpy.zeros" ], [ "numpy.arange", "numpy.exp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sebemery/CS433-Machine-Learning-MiniProjects
[ "2fbcf540d2b12f987994850e6822fc365db3cdea" ]
[ "Project 2/script/fastai_package.py" ]
[ "import numpy as np\nimport pandas as pd\nimport fastai\nfrom fastai.collab import *\nfrom data_management import*\n\ndef load_data_fastai(path_data, path_rewritedata):\n \"\"\"Create a new csv for fastai and load the data\"\"\"\n new_dataset(path_data, path_rewritedata)\n data = pd.read_csv(path_rewritedata)\n data2 = CollabDataBunch.from_df(data, seed=12, valid_pct=0.15, user_name='user', item_name='item', rating_name='rating')\n data2.show_batch()\n return data2\n\ndef run_fastai(path_data,rewrite_dataset,path_sample_submission, algo):\n \"\"\" This function runs fastai algorithms.\n Given algo=1 : runs the fastai embeddingDotBias algorithm, which is a MF based algorithm\n Given algo=2 : runs the embeddingNN algorithm, which is a NN bassed algorithm\n\n The function return an numpy array with ids and predictions for sample_submission ids\n \"\"\"\n data = load_data_fastai(path_data,rewrite_dataset)\n \n #EmbeddingDotBias algorithm \n if algo == 1:\n learn = collab_learner(data, n_factors=200, y_range=[1,5], wd=5e-2)\n learn.lr_find() # find learning rate\n learn.recorder.plot() # plot learning rate graph\n learn.fit_one_cycle(10, 3e-4)\n \n #EmbdedingNN algorithm\n elif algo == 2:\n learn = collab_learner(data, use_nn=True, \n emb_szs={'user': 100, 'item':100}, \n layers=[32, 16], \n y_range=[1,5])\n learn.lr_find() # find learning rate\n learn.recorder.plot() # plot learning rate graph\n learn.fit_one_cycle(5, 5e-2)\n else:\n print('algo only takes value 1 for embeddingsDotBias algorithm and 2 for enbeddingNN algorithm')\n return\n \n #Load ids from sample_submission file\n sample_sub = load_sample_sub(path_sample_submission)\n \n fastai_sub = np.copy(sample_sub)\n \n #Calculate predictions for sample_sub ids\n preds = learn.model(torch.tensor(fastai_sub[:,0]),torch.tensor(fastai_sub[:,1]))\n \n for ind, p in enumerate(list(zip(preds))):\n fastai_sub[ind,2] = round(p[0].item())\n \n #return numpy array with ids and predictions \n return fastai_sub" ]
[ [ "numpy.copy", "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
SKA-INAF/efficientdet-pytorch
[ "8967bab88288d11e5547a7efa391adc0c987be47" ]
[ "effdet/evaluator.py" ]
[ "import os\nimport abc\nimport json\nimport logging\nimport time\nfrom tempfile import NamedTemporaryFile\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\n\nfrom pycocotools.cocoeval import COCOeval\nfrom .distributed import synchronize, is_main_process, all_gather_container\n\n# FIXME experimenting with speedups for OpenImages eval, it's slow\n#import pyximport; py_importer, pyx_importer = pyximport.install(pyimport=True)\nimport effdet.evaluation.detection_evaluator as tfm_eval\n#pyximport.uninstall(py_importer, pyx_importer)\n\n_logger = logging.getLogger(__name__)\n\n\n__all__ = ['CocoEvaluator', 'PascalEvaluator', 'OpenImagesEvaluator', 'RadioGalaxyEvaluator' 'create_evaluator']\n\n\nclass Evaluator:\n\n def __init__(self, distributed=False, pred_yxyx=False, score_thresh=0.001):\n self.distributed = distributed\n self.distributed_device = None\n self.pred_yxyx = pred_yxyx\n self.img_indices = []\n self.predictions = []\n self.score_thresh = score_thresh\n\n def add_predictions(self, detections, target):\n if self.distributed:\n if self.distributed_device is None:\n # cache for use later to broadcast end metric\n self.distributed_device = detections.device\n synchronize()\n detections = all_gather_container(detections)\n img_indices = all_gather_container(target['img_idx'])\n if not is_main_process():\n return\n else:\n img_indices = target['img_idx']\n\n detections = detections.cpu().numpy()\n img_indices = img_indices.cpu().numpy()\n for img_idx, img_dets in zip(img_indices, detections):\n self.img_indices.append(img_idx)\n self.predictions.append(img_dets)\n\n def _coco_predictions(self):\n # generate coco-style predictions\n coco_predictions = []\n coco_ids = []\n for img_idx, img_dets in zip(self.img_indices, self.predictions):\n img_id = self._dataset.img_ids[img_idx]\n coco_ids.append(img_id)\n if self.pred_yxyx:\n # to xyxy\n img_dets[:, 0:4] = img_dets[:, [1, 0, 3, 2]]\n # to xywh\n img_dets[:, 2] -= img_dets[:, 0]\n img_dets[:, 3] -= img_dets[:, 1]\n for det in img_dets:\n score = float(det[4])\n if score < self.score_thresh: # stop when below this threshold, scores in descending order\n break\n coco_det = dict(\n image_id=int(img_id),\n bbox=det[0:4].tolist(),\n score=score,\n category_id=int(det[5]))\n coco_predictions.append(coco_det)\n return coco_predictions, coco_ids\n\n @abc.abstractmethod\n def evaluate(self, output_result_file=''):\n pass\n\n def save(self, result_file):\n # save results in coco style, override to save in a alternate form\n if not self.distributed or dist.get_rank() == 0:\n assert len(self.predictions)\n coco_predictions, coco_ids = self._coco_predictions()\n json.dump(coco_predictions, open(result_file, 'w'), indent=4)\n\n\nclass CocoEvaluator(Evaluator):\n\n def __init__(self, dataset, distributed=False, pred_yxyx=False):\n super().__init__(distributed=distributed, pred_yxyx=pred_yxyx)\n self._dataset = dataset.parser\n self.coco_api = dataset.parser.coco\n\n def reset(self):\n self.img_indices = []\n self.predictions = []\n\n def evaluate(self, output_result_file=''):\n if not self.distributed or dist.get_rank() == 0:\n assert len(self.predictions)\n coco_predictions, coco_ids = self._coco_predictions()\n if output_result_file:\n json.dump(coco_predictions, open(output_result_file, 'w'), indent=4)\n results = self.coco_api.loadRes(output_result_file)\n else:\n with NamedTemporaryFile(prefix='coco_', suffix='.json', delete=False, mode='w') as tmpfile:\n json.dump(coco_predictions, tmpfile, indent=4)\n results = self.coco_api.loadRes(tmpfile.name)\n try:\n os.unlink(tmpfile.name)\n except OSError:\n pass\n coco_eval = COCOeval(self.coco_api, results, 'bbox')\n coco_eval.params.imgIds = coco_ids # score only ids we've used\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n metric = coco_eval.stats[0] # mAP 0.5-0.95\n if self.distributed:\n dist.broadcast(torch.tensor(metric, device=self.distributed_device), 0)\n else:\n metric = torch.tensor(0, device=self.distributed_device)\n dist.broadcast(metric, 0)\n metric = metric.item()\n self.reset()\n return metric\n\nclass RadioGalaxyEvaluator(Evaluator):\n\n def __init__(self, dataset, distributed=False, pred_yxyx=False, score_thresh=0.1):\n super().__init__(distributed=distributed, pred_yxyx=pred_yxyx, score_thresh=score_thresh)\n self._dataset = dataset.parser\n self.coco_api = dataset.parser.coco\n\n def reset(self):\n self.img_indices = []\n self.predictions = []\n\n def evaluate(self, output_result_file=''):\n if not self.distributed or dist.get_rank() == 0:\n assert len(self.predictions)\n coco_predictions, coco_ids = self._coco_predictions()\n if output_result_file:\n json.dump(coco_predictions, open(output_result_file, 'w'), indent=4)\n results = self.coco_api.loadRes(output_result_file)\n else:\n with NamedTemporaryFile(prefix='coco_', suffix='.json', delete=False, mode='w') as tmpfile:\n json.dump(coco_predictions, tmpfile, indent=4)\n results = self.coco_api.loadRes(tmpfile.name)\n try:\n os.unlink(tmpfile.name)\n except OSError:\n pass\n coco_eval = COCOeval(self.coco_api, results, 'bbox')\n coco_eval.params.imgIds = coco_ids # score only ids we've used\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n metric = coco_eval.stats[0] # mAP 0.5-0.95\n if self.distributed:\n dist.broadcast(torch.tensor(metric, device=self.distributed_device), 0)\n else:\n metric = torch.tensor(0, device=self.distributed_device)\n dist.broadcast(metric, 0)\n metric = metric.item()\n self.reset()\n return metric\n\n\nclass TfmEvaluator(Evaluator):\n \"\"\" Tensorflow Models Evaluator Wrapper \"\"\"\n def __init__(\n self, dataset, distributed=False, pred_yxyx=False, evaluator_cls=tfm_eval.ObjectDetectionEvaluator):\n super().__init__(distributed=distributed, pred_yxyx=pred_yxyx)\n self._evaluator = evaluator_cls(categories=dataset.parser.cat_dicts)\n self._eval_metric_name = self._evaluator._metric_names[0]\n self._dataset = dataset.parser\n\n def reset(self):\n self._evaluator.clear()\n self.img_indices = []\n self.predictions = []\n\n def evaluate(self, output_result_file=''):\n if not self.distributed or dist.get_rank() == 0:\n for img_idx, img_dets in zip(self.img_indices, self.predictions):\n gt = self._dataset.get_ann_info(img_idx)\n self._evaluator.add_single_ground_truth_image_info(img_idx, gt)\n\n bbox = img_dets[:, 0:4] if self.pred_yxyx else img_dets[:, [1, 0, 3, 2]]\n det = dict(bbox=bbox, score=img_dets[:, 4], cls=img_dets[:, 5])\n self._evaluator.add_single_detected_image_info(img_idx, det)\n\n metrics = self._evaluator.evaluate()\n _logger.info('Metrics:')\n for k, v in metrics.items():\n _logger.info(f'{k}: {v}')\n map_metric = metrics[self._eval_metric_name]\n if self.distributed:\n dist.broadcast(torch.tensor(map_metric, device=self.distributed_device), 0)\n else:\n map_metric = torch.tensor(0, device=self.distributed_device)\n wait = dist.broadcast(map_metric, 0, async_op=True)\n while not wait.is_completed():\n # wait without spinning the cpu @ 100%, no need for low latency here\n time.sleep(0.5)\n map_metric = map_metric.item()\n if output_result_file:\n self.save(output_result_file)\n self.reset()\n return map_metric\n\n\nclass PascalEvaluator(TfmEvaluator):\n\n def __init__(self, dataset, distributed=False, pred_yxyx=False):\n super().__init__(\n dataset, distributed=distributed, pred_yxyx=pred_yxyx, evaluator_cls=tfm_eval.PascalDetectionEvaluator)\n\n\nclass OpenImagesEvaluator(TfmEvaluator):\n\n def __init__(self, dataset, distributed=False, pred_yxyx=False):\n super().__init__(\n dataset, distributed=distributed, pred_yxyx=pred_yxyx, evaluator_cls=tfm_eval.OpenImagesDetectionEvaluator)\n\n\ndef create_evaluator(name, dataset, distributed=False, pred_yxyx=False, score_thresh=0.001):\n # FIXME support OpenImages Challenge2019 metric w/ image level label consideration\n if 'coco' in name:\n return CocoEvaluator(dataset, distributed=distributed, pred_yxyx=pred_yxyx)\n elif 'openimages' in name:\n return OpenImagesEvaluator(dataset, distributed=distributed, pred_yxyx=pred_yxyx)\n elif 'radiogalaxy' in name:\n return RadioGalaxyEvaluator(dataset, distributed=distributed, pred_yxyx=pred_yxyx, score_thresh=score_thresh)\n else:\n return PascalEvaluator(dataset, distributed=distributed, pred_yxyx=pred_yxyx)\n" ]
[ [ "torch.distributed.get_rank", "torch.distributed.broadcast", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Reaction-Space-Explorer/reac-space-exp
[ "52f4b4eab755bd4a6830d838828c958149567396", "52f4b4eab755bd4a6830d838828c958149567396" ]
[ "neo4j_loader_and_queries/mock_data/test.py", "main/check_testset_matches.py" ]
[ "import pandas as pd\npd.options.mode.chained_assignment = None # default='warn'\nimport numpy as np\nimport os\nfrom py2neo import Graph, Node, Relationship, NodeMatcher, RelationshipMatcher\n# from neo4j import GraphDatabase\n# import neo4j\nimport networkx as nx\nimport json\nimport datetime\nimport matplotlib.pyplot as plt\n# from ggplot import *\nfrom shutil import copytree\nimport math\n# from graph_tool.all import *\nimport json\nimport random\n\n\n# configure network database Neo4j\nurl = \"bolt://neo4j:0000@localhost:7687\"\ngraph = Graph(url)\nmatcher = NodeMatcher(graph)\nrel_matcher = RelationshipMatcher(graph)\n\n\n\ndef rxn_query_str(reactant, product, rxn_id):\n \"\"\"\n Generate cypher MERGE query for reactant and product node.\n \"\"\"\n return \"MERGE (r: Molecule {smiles_str:\\\"\"+ reactant +\"\\\"}) MERGE (p: Molecule {smiles_str:\\\"\"+ product +\"\\\"}) MERGE (r)-[:FORMS {rxn_id: \"+ str(rxn_id) +\"}]->(p)\"\n\n\ndef create_molecule_if_not_exists(smiles_str, generation_formed, exact_mass=0):\n \"\"\"\n Create molecule in DB if not exists.\n \"\"\"\n molecule = matcher.match(\"Molecule\", smiles_str = smiles_str).first()\n if molecule is None:\n # molecule does not exist, create node with generation information\n tx = graph.begin()\n new_m = Node(\"Molecule\",\n smiles_str = smiles_str,\n exact_mass = round(float(exact_mass),3),\n generation_formed = generation_formed)\n tx.create(new_m)\n tx.commit()\n return new_m\n return molecule\n\ndef create_reaction_rel_if_not_exists(from_smiles, to_smiles):\n from_molecule = matcher.match(\"Molecule\", smiles_str = from_smiles).first()\n to_molecule = matcher.match(\"Molecule\", smiles_str = to_smiles).first()\n if len(list(graph.match(nodes=(from_molecule, to_molecule), r_type=\"FORMS\"))) <= 0:\n # relationship does not exist\n tx = graph.begin()\n new_r = Relationship(from_molecule, \"FORMS\", to_molecule)\n tx.create(new_r)\n tx.commit()\n\n\ndef import_mock_data():\n \"\"\"\n Import simple fake dataset to test queries out on.\n \"\"\"\n # read in data\n molecules = pd.read_csv(\"mock_data/molecules.csv\")\n reactions = pd.read_csv(\"mock_data/reactions.csv\")\n\n # create nodes\n for _, row in molecules.iterrows():\n create_molecule_if_not_exists(smiles_str = row['smiles_str'],\n generation_formed = 0) # doesn't matter yet\n\n # create relationships\n for _, row in reactions.iterrows():\n create_reaction_rel_if_not_exists(from_smiles = row['from_node'],\n to_smiles = row['to_node'])\n # merge_query = rxn_query_str(reactant=row['from_node'],\n # product=row['to_node'],\n # rxn_id=row['rxn_id'])\n\n\n# def import_molecules():\n# \"\"\"\n# Takes all output .txt files from data folder and parses the text files for\n# any new molecules each generation.\n# \"\"\"\n# txt_file_names = os.listdir(os.path.join(os.getcwd(), \"data\"))\n# for file_name in txt_file_names:\n# generation = int(file_name.split(\".\")[0][-1])\n# molecules = open(f\"data/molecules/{file_name}\").read().split(\"\\n\")\n# for molecule in molecules:\n# create_molecule_if_not_exists(smiles_str = molecule,\n# generation_formed = generation)\n\n\ndef load_simple_graph():\n \"\"\"\n Connect to Neo4j and load graph.\n \n Usually, graph import is split up into nodes.csv and relationships.csv,\n but since this is a single label & single relationship graph for now,\n this makes the import simpler.\n \"\"\"\n \n # first, delete all from db\n db.cypher_query(\"MATCH (n) DETACH DELETE n\")\n \n # prepare import data\n data = pd.read_csv(\"mock_data/simple_graph_import.csv\")\n all_molecules = []\n for col in data.columns:\n if col != 'rxn_id':\n all_molecules.extend(list(data[col].unique()))\n all_molecules = list(set(all_molecules)) # get unique set of all molecules and convert back to list\n # all_molecules = [mol for mol in all_molecules if mol != np.nan]\n # load db with import data\n # first, make sure all molecules exist, and create if not with MERGE\n for mol in all_molecules:\n try:\n db.cypher_query(\"MERGE (:Molecule {smiles_str:\\\"\"+mol+\"\\\"})\")\n except:\n pass\n \n # then, merge on relationships\n for _, rxn in data.iterrows():\n results, meta1 = db.cypher_query(rxn_query_str(reactant = rxn['reactant_1'], product = rxn['product'], rxn_id = int(rxn['rxn_id'])))\n results, meta2 = db.cypher_query(rxn_query_str(reactant = rxn['reactant_2'], product = rxn['product'], rxn_id = int(rxn['rxn_id'])))\n # print(meta1, meta2) # print meta data on cypher query for each reaction; suppress if too many import records\n\n\n\n# import_mock_data()", "import pandas as pd\n\n\"\"\"\n\nThere's a method for checking test set matches in main.py which can do this at the end of reaction\ngeneration itself, but that doesn't take generation into account and\nI wanted to see which test set molecule matched in which generation from the output\ndump files.\n\n\"\"\"\nfrom rdkit.Chem import SDMolSupplier, MolFromSmiles, MolToSmiles, Kekulize\n\ntest_set = [mol for mol in SDMolSupplier('../data/NewAlkalineHydrolysisStructures.sdf')]\n# Kekulize each species in the test set\nfor mol in test_set:\n\tKekulize(mol)\n\nmatched_flag_list = [False for mol in test_set]\n\nmatch_gen_map = {}\n\ndef check_match(candidate_smiles, gen):\n\t\"\"\"\n\tIf A is a substructure of B and B is a substructure of A then the two must be isomorphic. Does that make sense?\n\tThat's the logic I used!\n\t\"\"\"\n\tcandidate = MolFromSmiles(candidate_smiles)\n\tfor m in test_set:\n\t\tif m.HasSubstructMatch(candidate) and candidate.HasSubstructMatch(m):\n\t\t\tprint(f'{candidate_smiles} matched in {gen}')\n\t\t\tmatch_gen_map[candidate_smiles] = int(gen[1])\n\t\t\t# update flag for the matched str.\n\t\t\tmatched_flag_list[test_set.index(m)] = True\n\n\n# Open the .txt output\noutput_data = pd.read_csv('glucose/glucose_degradation_output_10mar.txt',\n\t\t\t\t sep='\\t', names=['Generation', 'SMILES'])\n\nfor i in range(len(output_data)):\n\tcheck_match(output_data['SMILES'].iloc[i], output_data['Generation'].iloc[i])\n\n\n# list molecules that couldn't be matched\nprint('The following test set molecules didn\\'t have a match')\nfor i in range(len(test_set)):\n\tif matched_flag_list[i] == False:\n\t\tsmi = MolToSmiles(test_set[i], kekuleSmiles=True, isomericSmiles=False) #isomericSmiles includes stereochem info\n\t\tprint(smi)" ]
[ [ "pandas.read_csv" ], [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
tacaswell/astropy
[ "75046e61916da36dffe87ddf59a7c6bfb00de81c", "75046e61916da36dffe87ddf59a7c6bfb00de81c" ]
[ "astropy/time/formats.py", "astropy/units/tests/test_quantity.py" ]
[ "# -*- coding: utf-8 -*-\n# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport fnmatch\nimport time\nimport re\nimport datetime\nimport warnings\nfrom collections import OrderedDict, defaultdict\n\nimport numpy as np\n\nfrom astropy.utils.decorators import lazyproperty\nfrom astropy.utils.exceptions import AstropyDeprecationWarning\nfrom astropy import units as u\nfrom astropy import _erfa as erfa\nfrom .utils import day_frac, quantity_day_frac, two_sum, two_product\n\n\n__all__ = ['TimeFormat', 'TimeJD', 'TimeMJD', 'TimeFromEpoch', 'TimeUnix',\n 'TimeCxcSec', 'TimeGPS', 'TimeDecimalYear',\n 'TimePlotDate', 'TimeUnique', 'TimeDatetime', 'TimeString',\n 'TimeISO', 'TimeISOT', 'TimeFITS', 'TimeYearDayTime',\n 'TimeEpochDate', 'TimeBesselianEpoch', 'TimeJulianEpoch',\n 'TimeDeltaFormat', 'TimeDeltaSec', 'TimeDeltaJD',\n 'TimeEpochDateString', 'TimeBesselianEpochString',\n 'TimeJulianEpochString', 'TIME_FORMATS', 'TIME_DELTA_FORMATS',\n 'TimezoneInfo', 'TimeDeltaDatetime', 'TimeDatetime64']\n\n__doctest_skip__ = ['TimePlotDate']\n\n# These both get filled in at end after TimeFormat subclasses defined.\n# Use an OrderedDict to fix the order in which formats are tried.\n# This ensures, e.g., that 'isot' gets tried before 'fits'.\nTIME_FORMATS = OrderedDict()\nTIME_DELTA_FORMATS = OrderedDict()\n\n# Translations between deprecated FITS timescales defined by\n# Rots et al. 2015, A&A 574:A36, and timescales used here.\nFITS_DEPRECATED_SCALES = {'TDT': 'tt', 'ET': 'tt',\n 'GMT': 'utc', 'UT': 'utc', 'IAT': 'tai'}\n\n\ndef _regexify_subfmts(subfmts):\n \"\"\"\n Iterate through each of the sub-formats and try substituting simple\n regular expressions for the strptime codes for year, month, day-of-month,\n hour, minute, second. If no % characters remain then turn the final string\n into a compiled regex. This assumes time formats do not have a % in them.\n\n This is done both to speed up parsing of strings and to allow mixed formats\n where strptime does not quite work well enough.\n \"\"\"\n new_subfmts = []\n for subfmt_tuple in subfmts:\n subfmt_in = subfmt_tuple[1]\n for strptime_code, regex in (('%Y', r'(?P<year>\\d\\d\\d\\d)'),\n ('%m', r'(?P<mon>\\d{1,2})'),\n ('%d', r'(?P<mday>\\d{1,2})'),\n ('%H', r'(?P<hour>\\d{1,2})'),\n ('%M', r'(?P<min>\\d{1,2})'),\n ('%S', r'(?P<sec>\\d{1,2})')):\n subfmt_in = subfmt_in.replace(strptime_code, regex)\n\n if '%' not in subfmt_in:\n subfmt_tuple = (subfmt_tuple[0],\n re.compile(subfmt_in + '$'),\n subfmt_tuple[2])\n new_subfmts.append(subfmt_tuple)\n\n return tuple(new_subfmts)\n\n\nclass TimeFormatMeta(type):\n \"\"\"\n Metaclass that adds `TimeFormat` and `TimeDeltaFormat` to the\n `TIME_FORMATS` and `TIME_DELTA_FORMATS` registries, respectively.\n \"\"\"\n\n _registry = TIME_FORMATS\n\n def __new__(mcls, name, bases, members):\n cls = super().__new__(mcls, name, bases, members)\n\n # Register time formats that have a name, but leave out astropy_time since\n # it is not a user-accessible format and is only used for initialization into\n # a different format.\n if 'name' in members and cls.name != 'astropy_time':\n mcls._registry[cls.name] = cls\n\n if 'subfmts' in members:\n cls.subfmts = _regexify_subfmts(members['subfmts'])\n\n return cls\n\n\nclass TimeFormat(metaclass=TimeFormatMeta):\n \"\"\"\n Base class for time representations.\n\n Parameters\n ----------\n val1 : numpy ndarray, list, number, str, or bytes\n Values to initialize the time or times. Bytes are decoded as ascii.\n val2 : numpy ndarray, list, or number; optional\n Value(s) to initialize the time or times. Only used for numerical\n input, to help preserve precision.\n scale : str\n Time scale of input value(s)\n precision : int\n Precision for seconds as floating point\n in_subfmt : str\n Select subformat for inputting string times\n out_subfmt : str\n Select subformat for outputting string times\n from_jd : bool\n If true then val1, val2 are jd1, jd2\n \"\"\"\n\n _default_scale = 'utc' # As of astropy 0.4\n\n def __init__(self, val1, val2, scale, precision,\n in_subfmt, out_subfmt, from_jd=False):\n self.scale = scale # validation of scale done later with _check_scale\n self.precision = precision\n self.in_subfmt = in_subfmt\n self.out_subfmt = out_subfmt\n\n if from_jd:\n self.jd1 = val1\n self.jd2 = val2\n else:\n val1, val2 = self._check_val_type(val1, val2)\n self.set_jds(val1, val2)\n\n def __len__(self):\n return len(self.jd1)\n\n @property\n def scale(self):\n \"\"\"Time scale\"\"\"\n self._scale = self._check_scale(self._scale)\n return self._scale\n\n @scale.setter\n def scale(self, val):\n self._scale = val\n\n def mask_if_needed(self, value):\n if self.masked:\n value = np.ma.array(value, mask=self.mask, copy=False)\n return value\n\n @property\n def mask(self):\n if 'mask' not in self.cache:\n self.cache['mask'] = np.isnan(self.jd2)\n if self.cache['mask'].shape:\n self.cache['mask'].flags.writeable = False\n return self.cache['mask']\n\n @property\n def masked(self):\n if 'masked' not in self.cache:\n self.cache['masked'] = bool(np.any(self.mask))\n return self.cache['masked']\n\n @property\n def jd2_filled(self):\n return np.nan_to_num(self.jd2) if self.masked else self.jd2\n\n @lazyproperty\n def cache(self):\n \"\"\"\n Return the cache associated with this instance.\n \"\"\"\n return defaultdict(dict)\n\n def _check_val_type(self, val1, val2):\n \"\"\"Input value validation, typically overridden by derived classes\"\"\"\n # val1 cannot contain nan, but val2 can contain nan\n ok1 = val1.dtype == np.double and np.all(np.isfinite(val1))\n ok2 = val2 is None or (val2.dtype == np.double and not np.any(np.isinf(val2)))\n if not (ok1 and ok2):\n raise TypeError('Input values for {0} class must be finite doubles'\n .format(self.name))\n\n if getattr(val1, 'unit', None) is not None:\n # Convert any quantity-likes to days first, attempting to be\n # careful with the conversion, so that, e.g., large numbers of\n # seconds get converted without loosing precision because\n # 1/86400 is not exactly representable as a float.\n val1 = u.Quantity(val1, copy=False)\n if val2 is not None:\n val2 = u.Quantity(val2, copy=False)\n\n try:\n val1, val2 = quantity_day_frac(val1, val2)\n except u.UnitsError:\n raise u.UnitConversionError(\n \"only quantities with time units can be \"\n \"used to instantiate Time instances.\")\n # We now have days, but the format may expect another unit.\n # On purpose, multiply with 1./day_unit because typically it is\n # 1./erfa.DAYSEC, and inverting it recovers the integer.\n # (This conversion will get undone in format's set_jds, hence\n # there may be room for optimizing this.)\n factor = 1. / getattr(self, 'unit', 1.)\n if factor != 1.:\n val1, carry = two_product(val1, factor)\n carry += val2 * factor\n val1, val2 = two_sum(val1, carry)\n\n elif getattr(val2, 'unit', None) is not None:\n raise TypeError('Cannot mix float and Quantity inputs')\n\n if val2 is None:\n val2 = np.zeros_like(val1)\n\n def asarray_or_scalar(val):\n \"\"\"\n Remove ndarray subclasses since for jd1/jd2 we want a pure ndarray\n or a Python or numpy scalar.\n \"\"\"\n return np.asarray(val) if isinstance(val, np.ndarray) else val\n\n return asarray_or_scalar(val1), asarray_or_scalar(val2)\n\n def _check_scale(self, scale):\n \"\"\"\n Return a validated scale value.\n\n If there is a class attribute 'scale' then that defines the default /\n required time scale for this format. In this case if a scale value was\n provided that needs to match the class default, otherwise return\n the class default.\n\n Otherwise just make sure that scale is in the allowed list of\n scales. Provide a different error message if `None` (no value) was\n supplied.\n \"\"\"\n if scale is None:\n scale = self._default_scale\n\n if scale not in TIME_SCALES:\n raise ScaleValueError(\"Scale value '{0}' not in \"\n \"allowed values {1}\"\n .format(scale, TIME_SCALES))\n\n return scale\n\n def set_jds(self, val1, val2):\n \"\"\"\n Set internal jd1 and jd2 from val1 and val2. Must be provided\n by derived classes.\n \"\"\"\n raise NotImplementedError\n\n def to_value(self, parent=None):\n \"\"\"\n Return time representation from internal jd1 and jd2. This is\n the base method that ignores ``parent`` and requires that\n subclasses implement the ``value`` property. Subclasses that\n require ``parent`` or have other optional args for ``to_value``\n should compute and return the value directly.\n \"\"\"\n return self.mask_if_needed(self.value)\n\n @property\n def value(self):\n raise NotImplementedError\n\n\nclass TimeJD(TimeFormat):\n \"\"\"\n Julian Date time format.\n This represents the number of days since the beginning of\n the Julian Period.\n For example, 2451544.5 in JD is midnight on January 1, 2000.\n \"\"\"\n name = 'jd'\n\n def set_jds(self, val1, val2):\n self._check_scale(self._scale) # Validate scale.\n self.jd1, self.jd2 = day_frac(val1, val2)\n\n @property\n def value(self):\n return self.jd1 + self.jd2\n\n\nclass TimeMJD(TimeFormat):\n \"\"\"\n Modified Julian Date time format.\n This represents the number of days since midnight on November 17, 1858.\n For example, 51544.0 in MJD is midnight on January 1, 2000.\n \"\"\"\n name = 'mjd'\n\n def set_jds(self, val1, val2):\n # TODO - this routine and vals should be Cythonized to follow the ERFA\n # convention of preserving precision by adding to the larger of the two\n # values in a vectorized operation. But in most practical cases the\n # first one is probably biggest.\n self._check_scale(self._scale) # Validate scale.\n jd1, jd2 = day_frac(val1, val2)\n jd1 += erfa.DJM0 # erfa.DJM0=2400000.5 (from erfam.h)\n self.jd1, self.jd2 = day_frac(jd1, jd2)\n\n @property\n def value(self):\n return (self.jd1 - erfa.DJM0) + self.jd2\n\n\nclass TimeDecimalYear(TimeFormat):\n \"\"\"\n Time as a decimal year, with integer values corresponding to midnight\n of the first day of each year. For example 2000.5 corresponds to the\n ISO time '2000-07-02 00:00:00'.\n \"\"\"\n name = 'decimalyear'\n\n def set_jds(self, val1, val2):\n self._check_scale(self._scale) # Validate scale.\n\n sum12, err12 = two_sum(val1, val2)\n iy_start = np.trunc(sum12).astype(int)\n extra, y_frac = two_sum(sum12, -iy_start)\n y_frac += extra + err12\n\n val = (val1 + val2).astype(np.double)\n iy_start = np.trunc(val).astype(int)\n\n imon = np.ones_like(iy_start)\n iday = np.ones_like(iy_start)\n ihr = np.zeros_like(iy_start)\n imin = np.zeros_like(iy_start)\n isec = np.zeros_like(y_frac)\n\n # Possible enhancement: use np.unique to only compute start, stop\n # for unique values of iy_start.\n scale = self.scale.upper().encode('ascii')\n jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday,\n ihr, imin, isec)\n jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday,\n ihr, imin, isec)\n\n t_start = Time(jd1_start, jd2_start, scale=self.scale, format='jd')\n t_end = Time(jd1_end, jd2_end, scale=self.scale, format='jd')\n t_frac = t_start + (t_end - t_start) * y_frac\n\n self.jd1, self.jd2 = day_frac(t_frac.jd1, t_frac.jd2)\n\n @property\n def value(self):\n scale = self.scale.upper().encode('ascii')\n iy_start, ims, ids, ihmsfs = erfa.d2dtf(scale, 0, # precision=0\n self.jd1, self.jd2_filled)\n imon = np.ones_like(iy_start)\n iday = np.ones_like(iy_start)\n ihr = np.zeros_like(iy_start)\n imin = np.zeros_like(iy_start)\n isec = np.zeros_like(self.jd1)\n\n # Possible enhancement: use np.unique to only compute start, stop\n # for unique values of iy_start.\n scale = self.scale.upper().encode('ascii')\n jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday,\n ihr, imin, isec)\n jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday,\n ihr, imin, isec)\n\n dt = (self.jd1 - jd1_start) + (self.jd2 - jd2_start)\n dt_end = (jd1_end - jd1_start) + (jd2_end - jd2_start)\n decimalyear = iy_start + dt / dt_end\n\n return decimalyear\n\n\nclass TimeFromEpoch(TimeFormat):\n \"\"\"\n Base class for times that represent the interval from a particular\n epoch as a floating point multiple of a unit time interval (e.g. seconds\n or days).\n \"\"\"\n\n def __init__(self, val1, val2, scale, precision,\n in_subfmt, out_subfmt, from_jd=False):\n self.scale = scale\n # Initialize the reference epoch (a single time defined in subclasses)\n epoch = Time(self.epoch_val, self.epoch_val2, scale=self.epoch_scale,\n format=self.epoch_format)\n self.epoch = epoch\n\n # Now create the TimeFormat object as normal\n super().__init__(val1, val2, scale, precision, in_subfmt, out_subfmt,\n from_jd)\n\n def set_jds(self, val1, val2):\n \"\"\"\n Initialize the internal jd1 and jd2 attributes given val1 and val2.\n For an TimeFromEpoch subclass like TimeUnix these will be floats giving\n the effective seconds since an epoch time (e.g. 1970-01-01 00:00:00).\n \"\"\"\n # Form new JDs based on epoch time + time from epoch (converted to JD).\n # One subtlety that might not be obvious is that 1.000 Julian days in\n # UTC can be 86400 or 86401 seconds. For the TimeUnix format the\n # assumption is that every day is exactly 86400 seconds, so this is, in\n # principle, doing the math incorrectly, *except* that it matches the\n # definition of Unix time which does not include leap seconds.\n\n # note: use divisor=1./self.unit, since this is either 1 or 1/86400,\n # and 1/86400 is not exactly representable as a float64, so multiplying\n # by that will cause rounding errors. (But inverting it as a float64\n # recovers the exact number)\n day, frac = day_frac(val1, val2, divisor=1. / self.unit)\n\n jd1 = self.epoch.jd1 + day\n jd2 = self.epoch.jd2 + frac\n\n # Create a temporary Time object corresponding to the new (jd1, jd2) in\n # the epoch scale (e.g. UTC for TimeUnix) then convert that to the\n # desired time scale for this object.\n #\n # A known limitation is that the transform from self.epoch_scale to\n # self.scale cannot involve any metadata like lat or lon.\n try:\n tm = getattr(Time(jd1, jd2, scale=self.epoch_scale,\n format='jd'), self.scale)\n except Exception as err:\n raise ScaleValueError(\"Cannot convert from '{0}' epoch scale '{1}'\"\n \"to specified scale '{2}', got error:\\n{3}\"\n .format(self.name, self.epoch_scale,\n self.scale, err))\n\n self.jd1, self.jd2 = day_frac(tm._time.jd1, tm._time.jd2)\n\n def to_value(self, parent=None):\n # Make sure that scale is the same as epoch scale so we can just\n # subtract the epoch and convert\n if self.scale != self.epoch_scale:\n if parent is None:\n raise ValueError('cannot compute value without parent Time object')\n try:\n tm = getattr(parent, self.epoch_scale)\n except Exception as err:\n raise ScaleValueError(\"Cannot convert from '{0}' epoch scale '{1}'\"\n \"to specified scale '{2}', got error:\\n{3}\"\n .format(self.name, self.epoch_scale,\n self.scale, err))\n\n jd1, jd2 = tm._time.jd1, tm._time.jd2\n else:\n jd1, jd2 = self.jd1, self.jd2\n\n time_from_epoch = ((jd1 - self.epoch.jd1) +\n (jd2 - self.epoch.jd2)) / self.unit\n\n return self.mask_if_needed(time_from_epoch)\n\n value = property(to_value)\n\n @property\n def _default_scale(self):\n return self.epoch_scale\n\n\nclass TimeUnix(TimeFromEpoch):\n \"\"\"\n Unix time: seconds from 1970-01-01 00:00:00 UTC.\n For example, 946684800.0 in Unix time is midnight on January 1, 2000.\n\n NOTE: this quantity is not exactly unix time and differs from the strict\n POSIX definition by up to 1 second on days with a leap second. POSIX\n unix time actually jumps backward by 1 second at midnight on leap second\n days while this class value is monotonically increasing at 86400 seconds\n per UTC day.\n \"\"\"\n name = 'unix'\n unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)\n epoch_val = '1970-01-01 00:00:00'\n epoch_val2 = None\n epoch_scale = 'utc'\n epoch_format = 'iso'\n\n\nclass TimeCxcSec(TimeFromEpoch):\n \"\"\"\n Chandra X-ray Center seconds from 1998-01-01 00:00:00 TT.\n For example, 63072064.184 is midnight on January 1, 2000.\n \"\"\"\n name = 'cxcsec'\n unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)\n epoch_val = '1998-01-01 00:00:00'\n epoch_val2 = None\n epoch_scale = 'tt'\n epoch_format = 'iso'\n\n\nclass TimeGPS(TimeFromEpoch):\n \"\"\"GPS time: seconds from 1980-01-06 00:00:00 UTC\n For example, 630720013.0 is midnight on January 1, 2000.\n\n Notes\n =====\n This implementation is strictly a representation of the number of seconds\n (including leap seconds) since midnight UTC on 1980-01-06. GPS can also be\n considered as a time scale which is ahead of TAI by a fixed offset\n (to within about 100 nanoseconds).\n\n For details, see http://tycho.usno.navy.mil/gpstt.html\n \"\"\"\n name = 'gps'\n unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)\n epoch_val = '1980-01-06 00:00:19'\n # above epoch is the same as Time('1980-01-06 00:00:00', scale='utc').tai\n epoch_val2 = None\n epoch_scale = 'tai'\n epoch_format = 'iso'\n\n\nclass TimePlotDate(TimeFromEpoch):\n \"\"\"\n Matplotlib `~matplotlib.pyplot.plot_date` input:\n 1 + number of days from 0001-01-01 00:00:00 UTC\n\n This can be used directly in the matplotlib `~matplotlib.pyplot.plot_date`\n function::\n\n >>> import matplotlib.pyplot as plt\n >>> jyear = np.linspace(2000, 2001, 20)\n >>> t = Time(jyear, format='jyear', scale='utc')\n >>> plt.plot_date(t.plot_date, jyear)\n >>> plt.gcf().autofmt_xdate() # orient date labels at a slant\n >>> plt.draw()\n\n For example, 730120.0003703703 is midnight on January 1, 2000.\n \"\"\"\n # This corresponds to the zero reference time for matplotlib plot_date().\n # Note that TAI and UTC are equivalent at the reference time.\n name = 'plot_date'\n unit = 1.0\n epoch_val = 1721424.5 # Time('0001-01-01 00:00:00', scale='tai').jd - 1\n epoch_val2 = None\n epoch_scale = 'utc'\n epoch_format = 'jd'\n\n\nclass TimeUnique(TimeFormat):\n \"\"\"\n Base class for time formats that can uniquely create a time object\n without requiring an explicit format specifier. This class does\n nothing but provide inheritance to identify a class as unique.\n \"\"\"\n\n\nclass TimeAstropyTime(TimeUnique):\n \"\"\"\n Instantiate date from an Astropy Time object (or list thereof).\n\n This is purely for instantiating from a Time object. The output\n format is the same as the first time instance.\n \"\"\"\n name = 'astropy_time'\n\n def __new__(cls, val1, val2, scale, precision,\n in_subfmt, out_subfmt, from_jd=False):\n \"\"\"\n Use __new__ instead of __init__ to output a class instance that\n is the same as the class of the first Time object in the list.\n \"\"\"\n val1_0 = val1.flat[0]\n if not (isinstance(val1_0, Time) and all(type(val) is type(val1_0)\n for val in val1.flat)):\n raise TypeError('Input values for {0} class must all be same '\n 'astropy Time type.'.format(cls.name))\n\n if scale is None:\n scale = val1_0.scale\n if val1.shape:\n vals = [getattr(val, scale)._time for val in val1]\n jd1 = np.concatenate([np.atleast_1d(val.jd1) for val in vals])\n jd2 = np.concatenate([np.atleast_1d(val.jd2) for val in vals])\n else:\n val = getattr(val1_0, scale)._time\n jd1, jd2 = val.jd1, val.jd2\n\n OutTimeFormat = val1_0._time.__class__\n self = OutTimeFormat(jd1, jd2, scale, precision, in_subfmt, out_subfmt,\n from_jd=True)\n\n return self\n\n\nclass TimeDatetime(TimeUnique):\n \"\"\"\n Represent date as Python standard library `~datetime.datetime` object\n\n Example::\n\n >>> from astropy.time import Time\n >>> from datetime import datetime\n >>> t = Time(datetime(2000, 1, 2, 12, 0, 0), scale='utc')\n >>> t.iso\n '2000-01-02 12:00:00.000'\n >>> t.tt.datetime\n datetime.datetime(2000, 1, 2, 12, 1, 4, 184000)\n \"\"\"\n name = 'datetime'\n\n def _check_val_type(self, val1, val2):\n # Note: don't care about val2 for this class\n if not all(isinstance(val, datetime.datetime) for val in val1.flat):\n raise TypeError('Input values for {0} class must be '\n 'datetime objects'.format(self.name))\n return val1, None\n\n def set_jds(self, val1, val2):\n \"\"\"Convert datetime object contained in val1 to jd1, jd2\"\"\"\n # Iterate through the datetime objects, getting year, month, etc.\n iterator = np.nditer([val1, None, None, None, None, None, None],\n flags=['refs_ok'],\n op_dtypes=[object] + 5*[np.intc] + [np.double])\n for val, iy, im, id, ihr, imin, dsec in iterator:\n dt = val.item()\n\n if dt.tzinfo is not None:\n dt = (dt - dt.utcoffset()).replace(tzinfo=None)\n\n iy[...] = dt.year\n im[...] = dt.month\n id[...] = dt.day\n ihr[...] = dt.hour\n imin[...] = dt.minute\n dsec[...] = dt.second + dt.microsecond / 1e6\n\n jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'),\n *iterator.operands[1:])\n self.jd1, self.jd2 = day_frac(jd1, jd2)\n\n def to_value(self, timezone=None, parent=None):\n \"\"\"\n Convert to (potentially timezone-aware) `~datetime.datetime` object.\n\n If ``timezone`` is not ``None``, return a timezone-aware datetime\n object.\n\n Parameters\n ----------\n timezone : {`~datetime.tzinfo`, None} (optional)\n If not `None`, return timezone-aware datetime.\n\n Returns\n -------\n `~datetime.datetime`\n If ``timezone`` is not ``None``, output will be timezone-aware.\n \"\"\"\n if timezone is not None:\n if self._scale != 'utc':\n raise ScaleValueError(\"scale is {}, must be 'utc' when timezone \"\n \"is supplied.\".format(self._scale))\n\n # Rather than define a value property directly, we have a function,\n # since we want to be able to pass in timezone information.\n scale = self.scale.upper().encode('ascii')\n iys, ims, ids, ihmsfs = erfa.d2dtf(scale, 6, # 6 for microsec\n self.jd1, self.jd2_filled)\n ihrs = ihmsfs['h']\n imins = ihmsfs['m']\n isecs = ihmsfs['s']\n ifracs = ihmsfs['f']\n iterator = np.nditer([iys, ims, ids, ihrs, imins, isecs, ifracs, None],\n flags=['refs_ok'],\n op_dtypes=7*[iys.dtype] + [object])\n\n for iy, im, id, ihr, imin, isec, ifracsec, out in iterator:\n if isec >= 60:\n raise ValueError('Time {} is within a leap second but datetime '\n 'does not support leap seconds'\n .format((iy, im, id, ihr, imin, isec, ifracsec)))\n if timezone is not None:\n out[...] = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec,\n tzinfo=TimezoneInfo()).astimezone(timezone)\n else:\n out[...] = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec)\n\n return self.mask_if_needed(iterator.operands[-1])\n\n value = property(to_value)\n\n\nclass TimezoneInfo(datetime.tzinfo):\n \"\"\"\n Subclass of the `~datetime.tzinfo` object, used in the\n to_datetime method to specify timezones.\n\n It may be safer in most cases to use a timezone database package like\n pytz rather than defining your own timezones - this class is mainly\n a workaround for users without pytz.\n \"\"\"\n @u.quantity_input(utc_offset=u.day, dst=u.day)\n def __init__(self, utc_offset=0*u.day, dst=0*u.day, tzname=None):\n \"\"\"\n Parameters\n ----------\n utc_offset : `~astropy.units.Quantity` (optional)\n Offset from UTC in days. Defaults to zero.\n dst : `~astropy.units.Quantity` (optional)\n Daylight Savings Time offset in days. Defaults to zero\n (no daylight savings).\n tzname : string, `None` (optional)\n Name of timezone\n\n Examples\n --------\n >>> from datetime import datetime\n >>> from astropy.time import TimezoneInfo # Specifies a timezone\n >>> import astropy.units as u\n >>> utc = TimezoneInfo() # Defaults to UTC\n >>> utc_plus_one_hour = TimezoneInfo(utc_offset=1*u.hour) # UTC+1\n >>> dt_aware = datetime(2000, 1, 1, 0, 0, 0, tzinfo=utc_plus_one_hour)\n >>> print(dt_aware)\n 2000-01-01 00:00:00+01:00\n >>> print(dt_aware.astimezone(utc))\n 1999-12-31 23:00:00+00:00\n \"\"\"\n if utc_offset == 0 and dst == 0 and tzname is None:\n tzname = 'UTC'\n self._utcoffset = datetime.timedelta(utc_offset.to_value(u.day))\n self._tzname = tzname\n self._dst = datetime.timedelta(dst.to_value(u.day))\n\n def utcoffset(self, dt):\n return self._utcoffset\n\n def tzname(self, dt):\n return str(self._tzname)\n\n def dst(self, dt):\n return self._dst\n\n\nclass TimeString(TimeUnique):\n \"\"\"\n Base class for string-like time representations.\n\n This class assumes that anything following the last decimal point to the\n right is a fraction of a second.\n\n This is a reference implementation can be made much faster with effort.\n \"\"\"\n\n def _check_val_type(self, val1, val2):\n # Note: don't care about val2 for these classes\n if val1.dtype.kind not in ('S', 'U'):\n raise TypeError('Input values for {0} class must be strings'\n .format(self.name))\n return val1, None\n\n def parse_string(self, timestr, subfmts):\n \"\"\"Read time from a single string, using a set of possible formats.\"\"\"\n # Datetime components required for conversion to JD by ERFA, along\n # with the default values.\n components = ('year', 'mon', 'mday', 'hour', 'min', 'sec')\n defaults = (None, 1, 1, 0, 0, 0)\n # Assume that anything following \".\" on the right side is a\n # floating fraction of a second.\n try:\n idot = timestr.rindex('.')\n except Exception:\n fracsec = 0.0\n else:\n timestr, fracsec = timestr[:idot], timestr[idot:]\n fracsec = float(fracsec)\n\n for _, strptime_fmt_or_regex, _ in subfmts:\n if isinstance(strptime_fmt_or_regex, str):\n try:\n tm = time.strptime(timestr, strptime_fmt_or_regex)\n except ValueError:\n continue\n else:\n vals = [getattr(tm, 'tm_' + component)\n for component in components]\n\n else:\n tm = re.match(strptime_fmt_or_regex, timestr)\n if tm is None:\n continue\n tm = tm.groupdict()\n vals = [int(tm.get(component, default)) for component, default\n in zip(components, defaults)]\n\n # Add fractional seconds\n vals[-1] = vals[-1] + fracsec\n return vals\n else:\n raise ValueError('Time {0} does not match {1} format'\n .format(timestr, self.name))\n\n def set_jds(self, val1, val2):\n \"\"\"Parse the time strings contained in val1 and set jd1, jd2\"\"\"\n # Select subformats based on current self.in_subfmt\n subfmts = self._select_subfmts(self.in_subfmt)\n # Be liberal in what we accept: convert bytes to ascii.\n # Here .item() is needed for arrays with entries of unequal length,\n # to strip trailing 0 bytes.\n to_string = (str if val1.dtype.kind == 'U' else\n lambda x: str(x.item(), encoding='ascii'))\n iterator = np.nditer([val1, None, None, None, None, None, None],\n op_dtypes=[val1.dtype] + 5*[np.intc] + [np.double])\n for val, iy, im, id, ihr, imin, dsec in iterator:\n val = to_string(val)\n iy[...], im[...], id[...], ihr[...], imin[...], dsec[...] = (\n self.parse_string(val, subfmts))\n\n jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'),\n *iterator.operands[1:])\n self.jd1, self.jd2 = day_frac(jd1, jd2)\n\n def str_kwargs(self):\n \"\"\"\n Generator that yields a dict of values corresponding to the\n calendar date and time for the internal JD values.\n \"\"\"\n scale = self.scale.upper().encode('ascii'),\n iys, ims, ids, ihmsfs = erfa.d2dtf(scale, self.precision,\n self.jd1, self.jd2_filled)\n\n # Get the str_fmt element of the first allowed output subformat\n _, _, str_fmt = self._select_subfmts(self.out_subfmt)[0]\n\n if '{yday:' in str_fmt:\n has_yday = True\n else:\n has_yday = False\n yday = None\n\n ihrs = ihmsfs['h']\n imins = ihmsfs['m']\n isecs = ihmsfs['s']\n ifracs = ihmsfs['f']\n for iy, im, id, ihr, imin, isec, ifracsec in np.nditer(\n [iys, ims, ids, ihrs, imins, isecs, ifracs]):\n if has_yday:\n yday = datetime.datetime(iy, im, id).timetuple().tm_yday\n\n yield {'year': int(iy), 'mon': int(im), 'day': int(id),\n 'hour': int(ihr), 'min': int(imin), 'sec': int(isec),\n 'fracsec': int(ifracsec), 'yday': yday}\n\n def format_string(self, str_fmt, **kwargs):\n \"\"\"Write time to a string using a given format.\n\n By default, just interprets str_fmt as a format string,\n but subclasses can add to this.\n \"\"\"\n return str_fmt.format(**kwargs)\n\n @property\n def value(self):\n # Select the first available subformat based on current\n # self.out_subfmt\n subfmts = self._select_subfmts(self.out_subfmt)\n _, _, str_fmt = subfmts[0]\n\n # TODO: fix this ugly hack\n if self.precision > 0 and str_fmt.endswith('{sec:02d}'):\n str_fmt += '.{fracsec:0' + str(self.precision) + 'd}'\n\n # Try to optimize this later. Can't pre-allocate because length of\n # output could change, e.g. year rolls from 999 to 1000.\n outs = []\n for kwargs in self.str_kwargs():\n outs.append(str(self.format_string(str_fmt, **kwargs)))\n\n return np.array(outs).reshape(self.jd1.shape)\n\n def _select_subfmts(self, pattern):\n \"\"\"\n Return a list of subformats where name matches ``pattern`` using\n fnmatch.\n \"\"\"\n\n fnmatchcase = fnmatch.fnmatchcase\n subfmts = [x for x in self.subfmts if fnmatchcase(x[0], pattern)]\n if len(subfmts) == 0:\n raise ValueError('No subformats match {0}'.format(pattern))\n return subfmts\n\n\nclass TimeISO(TimeString):\n \"\"\"\n ISO 8601 compliant date-time format \"YYYY-MM-DD HH:MM:SS.sss...\".\n For example, 2000-01-01 00:00:00.000 is midnight on January 1, 2000.\n\n The allowed subformats are:\n\n - 'date_hms': date + hours, mins, secs (and optional fractional secs)\n - 'date_hm': date + hours, mins\n - 'date': date\n \"\"\"\n\n name = 'iso'\n subfmts = (('date_hms',\n '%Y-%m-%d %H:%M:%S',\n # XXX To Do - use strftime for output ??\n '{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}:{sec:02d}'),\n ('date_hm',\n '%Y-%m-%d %H:%M',\n '{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}'),\n ('date',\n '%Y-%m-%d',\n '{year:d}-{mon:02d}-{day:02d}'))\n\n def parse_string(self, timestr, subfmts):\n # Handle trailing 'Z' for UTC time\n if timestr.endswith('Z'):\n if self.scale != 'utc':\n raise ValueError(\"Time input terminating in 'Z' must have \"\n \"scale='UTC'\")\n timestr = timestr[:-1]\n return super().parse_string(timestr, subfmts)\n\n\nclass TimeISOT(TimeISO):\n \"\"\"\n ISO 8601 compliant date-time format \"YYYY-MM-DDTHH:MM:SS.sss...\".\n This is the same as TimeISO except for a \"T\" instead of space between\n the date and time.\n For example, 2000-01-01T00:00:00.000 is midnight on January 1, 2000.\n\n The allowed subformats are:\n\n - 'date_hms': date + hours, mins, secs (and optional fractional secs)\n - 'date_hm': date + hours, mins\n - 'date': date\n \"\"\"\n\n name = 'isot'\n subfmts = (('date_hms',\n '%Y-%m-%dT%H:%M:%S',\n '{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'),\n ('date_hm',\n '%Y-%m-%dT%H:%M',\n '{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}'),\n ('date',\n '%Y-%m-%d',\n '{year:d}-{mon:02d}-{day:02d}'))\n\n\nclass TimeYearDayTime(TimeISO):\n \"\"\"\n Year, day-of-year and time as \"YYYY:DOY:HH:MM:SS.sss...\".\n The day-of-year (DOY) goes from 001 to 365 (366 in leap years).\n For example, 2000:001:00:00:00.000 is midnight on January 1, 2000.\n\n The allowed subformats are:\n\n - 'date_hms': date + hours, mins, secs (and optional fractional secs)\n - 'date_hm': date + hours, mins\n - 'date': date\n \"\"\"\n\n name = 'yday'\n subfmts = (('date_hms',\n '%Y:%j:%H:%M:%S',\n '{year:d}:{yday:03d}:{hour:02d}:{min:02d}:{sec:02d}'),\n ('date_hm',\n '%Y:%j:%H:%M',\n '{year:d}:{yday:03d}:{hour:02d}:{min:02d}'),\n ('date',\n '%Y:%j',\n '{year:d}:{yday:03d}'))\n\n\nclass TimeDatetime64(TimeISOT):\n name = 'datetime64'\n\n def _check_val_type(self, val1, val2):\n # Note: don't care about val2 for this class`\n if not val1.dtype.kind == 'M':\n raise TypeError('Input values for {0} class must be '\n 'datetime64 objects'.format(self.name))\n return val1, None\n\n def set_jds(self, val1, val2):\n # If there are any masked values in the ``val1`` datetime64 array\n # ('NaT') then stub them with a valid date so downstream parse_string\n # will work. The value under the mask is arbitrary but a \"modern\" date\n # is good.\n mask = np.isnat(val1)\n masked = np.any(mask)\n if masked:\n val1 = val1.copy()\n val1[mask] = '2000'\n\n # Make sure M(onth) and Y(ear) dates will parse and convert to bytestring\n if val1.dtype.name in ['datetime64[M]', 'datetime64[Y]']:\n val1 = val1.astype('datetime64[D]')\n val1 = val1.astype('S')\n\n # Standard ISO string parsing now\n super().set_jds(val1, val2)\n\n # Finally apply mask if necessary\n if masked:\n self.jd2[mask] = np.nan\n\n @property\n def value(self):\n precision = self.precision\n self.precision = 9\n ret = super().value\n self.precision = precision\n return ret.astype('datetime64')\n\n\nclass TimeFITS(TimeString):\n \"\"\"\n FITS format: \"[±Y]YYYY-MM-DD[THH:MM:SS[.sss]]\".\n\n ISOT but can give signed five-digit year (mostly for negative years);\n\n The allowed subformats are:\n\n - 'date_hms': date + hours, mins, secs (and optional fractional secs)\n - 'date': date\n - 'longdate_hms': as 'date_hms', but with signed 5-digit year\n - 'longdate': as 'date', but with signed 5-digit year\n\n See Rots et al., 2015, A&A 574:A36 (arXiv:1409.7583).\n \"\"\"\n name = 'fits'\n subfmts = (\n ('date_hms',\n (r'(?P<year>\\d{4})-(?P<mon>\\d\\d)-(?P<mday>\\d\\d)T'\n r'(?P<hour>\\d\\d):(?P<min>\\d\\d):(?P<sec>\\d\\d(\\.\\d*)?)'),\n '{year:04d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'),\n ('date',\n r'(?P<year>\\d{4})-(?P<mon>\\d\\d)-(?P<mday>\\d\\d)',\n '{year:04d}-{mon:02d}-{day:02d}'),\n ('longdate_hms',\n (r'(?P<year>[+-]\\d{5})-(?P<mon>\\d\\d)-(?P<mday>\\d\\d)T'\n r'(?P<hour>\\d\\d):(?P<min>\\d\\d):(?P<sec>\\d\\d(\\.\\d*)?)'),\n '{year:+06d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'),\n ('longdate',\n r'(?P<year>[+-]\\d{5})-(?P<mon>\\d\\d)-(?P<mday>\\d\\d)',\n '{year:+06d}-{mon:02d}-{day:02d}'))\n # Add the regex that parses the scale and possible realization.\n # Support for this is deprecated. Read old style but no longer write\n # in this style.\n subfmts = tuple(\n (subfmt[0],\n subfmt[1] + r'(\\((?P<scale>\\w+)(\\((?P<realization>\\w+)\\))?\\))?',\n subfmt[2]) for subfmt in subfmts)\n\n def parse_string(self, timestr, subfmts):\n \"\"\"Read time and deprecated scale if present\"\"\"\n # Try parsing with any of the allowed sub-formats.\n for _, regex, _ in subfmts:\n tm = re.match(regex, timestr)\n if tm:\n break\n else:\n raise ValueError('Time {0} does not match {1} format'\n .format(timestr, self.name))\n tm = tm.groupdict()\n # Scale and realization are deprecated and strings in this form\n # are no longer created. We issue a warning but still use the value.\n if tm['scale'] is not None:\n warnings.warn(\"FITS time strings should no longer have embedded time scale.\",\n AstropyDeprecationWarning)\n # If a scale was given, translate from a possible deprecated\n # timescale identifier to the scale used by Time.\n fits_scale = tm['scale'].upper()\n scale = FITS_DEPRECATED_SCALES.get(fits_scale, fits_scale.lower())\n if scale not in TIME_SCALES:\n raise ValueError(\"Scale {0!r} is not in the allowed scales {1}\"\n .format(scale, sorted(TIME_SCALES)))\n # If no scale was given in the initialiser, set the scale to\n # that given in the string. Realization is ignored\n # and is only supported to allow old-style strings to be\n # parsed.\n if self._scale is None:\n self._scale = scale\n if scale != self.scale:\n raise ValueError(\"Input strings for {0} class must all \"\n \"have consistent time scales.\"\n .format(self.name))\n return [int(tm['year']), int(tm['mon']), int(tm['mday']),\n int(tm.get('hour', 0)), int(tm.get('min', 0)),\n float(tm.get('sec', 0.))]\n\n @property\n def value(self):\n \"\"\"Convert times to strings, using signed 5 digit if necessary.\"\"\"\n if 'long' not in self.out_subfmt:\n # If we have times before year 0 or after year 9999, we can\n # output only in a \"long\" format, using signed 5-digit years.\n jd = self.jd1 + self.jd2\n if jd.min() < 1721425.5 or jd.max() >= 5373484.5:\n self.out_subfmt = 'long' + self.out_subfmt\n return super().value\n\n\nclass TimeEpochDate(TimeFormat):\n \"\"\"\n Base class for support floating point Besselian and Julian epoch dates\n \"\"\"\n _default_scale = 'tt' # As of astropy 3.2, this is no longer 'utc'.\n\n def set_jds(self, val1, val2):\n self._check_scale(self._scale) # validate scale.\n epoch_to_jd = getattr(erfa, self.epoch_to_jd)\n jd1, jd2 = epoch_to_jd(val1 + val2)\n self.jd1, self.jd2 = day_frac(jd1, jd2)\n\n @property\n def value(self):\n jd_to_epoch = getattr(erfa, self.jd_to_epoch)\n return jd_to_epoch(self.jd1, self.jd2)\n\n\nclass TimeBesselianEpoch(TimeEpochDate):\n \"\"\"Besselian Epoch year as floating point value(s) like 1950.0\"\"\"\n name = 'byear'\n epoch_to_jd = 'epb2jd'\n jd_to_epoch = 'epb'\n\n def _check_val_type(self, val1, val2):\n \"\"\"Input value validation, typically overridden by derived classes\"\"\"\n if hasattr(val1, 'to') and hasattr(val1, 'unit'):\n raise ValueError(\"Cannot use Quantities for 'byear' format, \"\n \"as the interpretation would be ambiguous. \"\n \"Use float with Besselian year instead. \")\n\n return super()._check_val_type(val1, val2)\n\n\nclass TimeJulianEpoch(TimeEpochDate):\n \"\"\"Julian Epoch year as floating point value(s) like 2000.0\"\"\"\n name = 'jyear'\n unit = erfa.DJY # 365.25, the Julian year, for conversion to quantities\n epoch_to_jd = 'epj2jd'\n jd_to_epoch = 'epj'\n\n\nclass TimeEpochDateString(TimeString):\n \"\"\"\n Base class to support string Besselian and Julian epoch dates\n such as 'B1950.0' or 'J2000.0' respectively.\n \"\"\"\n _default_scale = 'tt' # As of astropy 3.2, this is no longer 'utc'.\n\n def set_jds(self, val1, val2):\n epoch_prefix = self.epoch_prefix\n # Be liberal in what we accept: convert bytes to ascii.\n to_string = (str if val1.dtype.kind == 'U' else\n lambda x: str(x.item(), encoding='ascii'))\n iterator = np.nditer([val1, None], op_dtypes=[val1.dtype, np.double])\n for val, years in iterator:\n try:\n time_str = to_string(val)\n epoch_type, year_str = time_str[0], time_str[1:]\n year = float(year_str)\n if epoch_type.upper() != epoch_prefix:\n raise ValueError\n except (IndexError, ValueError, UnicodeEncodeError):\n raise ValueError('Time {0} does not match {1} format'\n .format(time_str, self.name))\n else:\n years[...] = year\n\n self._check_scale(self._scale) # validate scale.\n epoch_to_jd = getattr(erfa, self.epoch_to_jd)\n jd1, jd2 = epoch_to_jd(iterator.operands[-1])\n self.jd1, self.jd2 = day_frac(jd1, jd2)\n\n @property\n def value(self):\n jd_to_epoch = getattr(erfa, self.jd_to_epoch)\n years = jd_to_epoch(self.jd1, self.jd2)\n # Use old-style format since it is a factor of 2 faster\n str_fmt = self.epoch_prefix + '%.' + str(self.precision) + 'f'\n outs = [str_fmt % year for year in years.flat]\n return np.array(outs).reshape(self.jd1.shape)\n\n\nclass TimeBesselianEpochString(TimeEpochDateString):\n \"\"\"Besselian Epoch year as string value(s) like 'B1950.0'\"\"\"\n name = 'byear_str'\n epoch_to_jd = 'epb2jd'\n jd_to_epoch = 'epb'\n epoch_prefix = 'B'\n\n\nclass TimeJulianEpochString(TimeEpochDateString):\n \"\"\"Julian Epoch year as string value(s) like 'J2000.0'\"\"\"\n name = 'jyear_str'\n epoch_to_jd = 'epj2jd'\n jd_to_epoch = 'epj'\n epoch_prefix = 'J'\n\n\nclass TimeDeltaFormatMeta(TimeFormatMeta):\n _registry = TIME_DELTA_FORMATS\n\n\nclass TimeDeltaFormat(TimeFormat, metaclass=TimeDeltaFormatMeta):\n \"\"\"Base class for time delta representations\"\"\"\n\n def _check_scale(self, scale):\n \"\"\"\n Check that the scale is in the allowed list of scales, or is `None`\n \"\"\"\n if scale is not None and scale not in TIME_DELTA_SCALES:\n raise ScaleValueError(\"Scale value '{0}' not in \"\n \"allowed values {1}\"\n .format(scale, TIME_DELTA_SCALES))\n\n return scale\n\n def set_jds(self, val1, val2):\n self._check_scale(self._scale) # Validate scale.\n self.jd1, self.jd2 = day_frac(val1, val2, divisor=1./self.unit)\n\n @property\n def value(self):\n return (self.jd1 + self.jd2) / self.unit\n\n\nclass TimeDeltaSec(TimeDeltaFormat):\n \"\"\"Time delta in SI seconds\"\"\"\n name = 'sec'\n unit = 1. / erfa.DAYSEC # for quantity input\n\n\nclass TimeDeltaJD(TimeDeltaFormat):\n \"\"\"Time delta in Julian days (86400 SI seconds)\"\"\"\n name = 'jd'\n unit = 1.\n\n\nclass TimeDeltaDatetime(TimeDeltaFormat, TimeUnique):\n \"\"\"Time delta in datetime.timedelta\"\"\"\n name = 'datetime'\n\n def _check_val_type(self, val1, val2):\n # Note: don't care about val2 for this class\n if not all(isinstance(val, datetime.timedelta) for val in val1.flat):\n raise TypeError('Input values for {0} class must be '\n 'datetime.timedelta objects'.format(self.name))\n return val1, None\n\n def set_jds(self, val1, val2):\n self._check_scale(self._scale) # Validate scale.\n iterator = np.nditer([val1, None],\n flags=['refs_ok'],\n op_dtypes=[object] + [np.double])\n\n for val, sec in iterator:\n sec[...] = val.item().total_seconds()\n\n self.jd1, self.jd2 = day_frac(iterator.operands[-1], 0.0,\n divisor=erfa.DAYSEC)\n\n @property\n def value(self):\n iterator = np.nditer([self.jd1 + self.jd2, None],\n flags=['refs_ok'],\n op_dtypes=[self.jd1.dtype] + [object])\n\n for jd, out in iterator:\n out[...] = datetime.timedelta(days=jd.item())\n\n return self.mask_if_needed(iterator.operands[-1])\n\n\nfrom .core import Time, TIME_SCALES, TIME_DELTA_SCALES, ScaleValueError\n", "# coding: utf-8\n# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"\n Test the Quantity class and related.\n\"\"\"\n\nimport copy\nimport pickle\nimport decimal\nfrom fractions import Fraction\n\nimport pytest\nimport numpy as np\nfrom numpy.testing import (assert_allclose, assert_array_equal,\n assert_array_almost_equal)\n\nfrom astropy.tests.helper import catch_warnings, raises\nfrom astropy.utils import isiterable, minversion\nfrom astropy.utils.compat import NUMPY_LT_1_14\nfrom astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning\nfrom astropy import units as u\nfrom astropy.units.quantity import _UNIT_NOT_INITIALISED\n\ntry:\n import matplotlib\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n from distutils.version import LooseVersion\n MATPLOTLIB_LT_15 = LooseVersion(matplotlib.__version__) < LooseVersion(\"1.5\")\n HAS_MATPLOTLIB = True\nexcept ImportError:\n HAS_MATPLOTLIB = False\n\n\n\"\"\" The Quantity class will represent a number + unit + uncertainty \"\"\"\n\n\nclass TestQuantityCreation:\n\n def test_1(self):\n # create objects through operations with Unit objects:\n\n quantity = 11.42 * u.meter # returns a Quantity object\n assert isinstance(quantity, u.Quantity)\n quantity = u.meter * 11.42 # returns a Quantity object\n assert isinstance(quantity, u.Quantity)\n\n quantity = 11.42 / u.meter\n assert isinstance(quantity, u.Quantity)\n quantity = u.meter / 11.42\n assert isinstance(quantity, u.Quantity)\n\n quantity = 11.42 * u.meter / u.second\n assert isinstance(quantity, u.Quantity)\n\n with pytest.raises(TypeError):\n quantity = 182.234 + u.meter\n\n with pytest.raises(TypeError):\n quantity = 182.234 - u.meter\n\n with pytest.raises(TypeError):\n quantity = 182.234 % u.meter\n\n def test_2(self):\n\n # create objects using the Quantity constructor:\n q1 = u.Quantity(11.412, unit=u.meter)\n q2 = u.Quantity(21.52, \"cm\")\n q3 = u.Quantity(11.412)\n\n # By default quantities that don't specify a unit are unscaled\n # dimensionless\n assert q3.unit == u.Unit(1)\n\n with pytest.raises(TypeError):\n q4 = u.Quantity(object(), unit=u.m)\n\n def test_3(self):\n # with pytest.raises(u.UnitsError):\n with pytest.raises(ValueError): # Until @mdboom fixes the errors in units\n q1 = u.Quantity(11.412, unit=\"testingggg\")\n\n def test_nan_inf(self):\n # Not-a-number\n q = u.Quantity('nan', unit='cm')\n assert np.isnan(q.value)\n\n q = u.Quantity('NaN', unit='cm')\n assert np.isnan(q.value)\n\n q = u.Quantity('-nan', unit='cm') # float() allows this\n assert np.isnan(q.value)\n\n q = u.Quantity('nan cm')\n assert np.isnan(q.value)\n assert q.unit == u.cm\n\n # Infinity\n q = u.Quantity('inf', unit='cm')\n assert np.isinf(q.value)\n\n q = u.Quantity('-inf', unit='cm')\n assert np.isinf(q.value)\n\n q = u.Quantity('inf cm')\n assert np.isinf(q.value)\n assert q.unit == u.cm\n\n q = u.Quantity('Infinity', unit='cm') # float() allows this\n assert np.isinf(q.value)\n\n # make sure these strings don't parse...\n with pytest.raises(TypeError):\n q = u.Quantity('', unit='cm')\n\n with pytest.raises(TypeError):\n q = u.Quantity('spam', unit='cm')\n\n def test_unit_property(self):\n # test getting and setting 'unit' attribute\n q1 = u.Quantity(11.4, unit=u.meter)\n\n with pytest.raises(AttributeError):\n q1.unit = u.cm\n\n def test_preserve_dtype(self):\n \"\"\"Test that if an explicit dtype is given, it is used, while if not,\n numbers are converted to float (including decimal.Decimal, which\n numpy converts to an object; closes #1419)\n \"\"\"\n # If dtype is specified, use it, but if not, convert int, bool to float\n q1 = u.Quantity(12, unit=u.m / u.s, dtype=int)\n assert q1.dtype == int\n\n q2 = u.Quantity(q1)\n assert q2.dtype == float\n assert q2.value == float(q1.value)\n assert q2.unit == q1.unit\n\n # but we should preserve float32\n a3 = np.array([1., 2.], dtype=np.float32)\n q3 = u.Quantity(a3, u.yr)\n assert q3.dtype == a3.dtype\n # items stored as objects by numpy should be converted to float\n # by default\n q4 = u.Quantity(decimal.Decimal('10.25'), u.m)\n assert q4.dtype == float\n\n q5 = u.Quantity(decimal.Decimal('10.25'), u.m, dtype=object)\n assert q5.dtype == object\n\n def test_copy(self):\n\n # By default, a new quantity is constructed, but not if copy=False\n\n a = np.arange(10.)\n\n q0 = u.Quantity(a, unit=u.m / u.s)\n assert q0.base is not a\n\n q1 = u.Quantity(a, unit=u.m / u.s, copy=False)\n assert q1.base is a\n\n q2 = u.Quantity(q0)\n assert q2 is not q0\n assert q2.base is not q0.base\n\n q2 = u.Quantity(q0, copy=False)\n assert q2 is q0\n assert q2.base is q0.base\n\n q3 = u.Quantity(q0, q0.unit, copy=False)\n assert q3 is q0\n assert q3.base is q0.base\n\n q4 = u.Quantity(q0, u.cm / u.s, copy=False)\n assert q4 is not q0\n assert q4.base is not q0.base\n\n def test_subok(self):\n \"\"\"Test subok can be used to keep class, or to insist on Quantity\"\"\"\n class MyQuantitySubclass(u.Quantity):\n pass\n\n myq = MyQuantitySubclass(np.arange(10.), u.m)\n # try both with and without changing the unit\n assert type(u.Quantity(myq)) is u.Quantity\n assert type(u.Quantity(myq, subok=True)) is MyQuantitySubclass\n assert type(u.Quantity(myq, u.km)) is u.Quantity\n assert type(u.Quantity(myq, u.km, subok=True)) is MyQuantitySubclass\n\n def test_order(self):\n \"\"\"Test that order is correctly propagated to np.array\"\"\"\n ac = np.array(np.arange(10.), order='C')\n qcc = u.Quantity(ac, u.m, order='C')\n assert qcc.flags['C_CONTIGUOUS']\n qcf = u.Quantity(ac, u.m, order='F')\n assert qcf.flags['F_CONTIGUOUS']\n qca = u.Quantity(ac, u.m, order='A')\n assert qca.flags['C_CONTIGUOUS']\n # check it works also when passing in a quantity\n assert u.Quantity(qcc, order='C').flags['C_CONTIGUOUS']\n assert u.Quantity(qcc, order='A').flags['C_CONTIGUOUS']\n assert u.Quantity(qcc, order='F').flags['F_CONTIGUOUS']\n\n af = np.array(np.arange(10.), order='F')\n qfc = u.Quantity(af, u.m, order='C')\n assert qfc.flags['C_CONTIGUOUS']\n qff = u.Quantity(ac, u.m, order='F')\n assert qff.flags['F_CONTIGUOUS']\n qfa = u.Quantity(af, u.m, order='A')\n assert qfa.flags['F_CONTIGUOUS']\n assert u.Quantity(qff, order='C').flags['C_CONTIGUOUS']\n assert u.Quantity(qff, order='A').flags['F_CONTIGUOUS']\n assert u.Quantity(qff, order='F').flags['F_CONTIGUOUS']\n\n def test_ndmin(self):\n \"\"\"Test that ndmin is correctly propagated to np.array\"\"\"\n a = np.arange(10.)\n q1 = u.Quantity(a, u.m, ndmin=1)\n assert q1.ndim == 1 and q1.shape == (10,)\n q2 = u.Quantity(a, u.m, ndmin=2)\n assert q2.ndim == 2 and q2.shape == (1, 10)\n # check it works also when passing in a quantity\n q3 = u.Quantity(q1, u.m, ndmin=3)\n assert q3.ndim == 3 and q3.shape == (1, 1, 10)\n\n def test_non_quantity_with_unit(self):\n \"\"\"Test that unit attributes in objects get recognized.\"\"\"\n class MyQuantityLookalike(np.ndarray):\n pass\n\n a = np.arange(3.)\n mylookalike = a.copy().view(MyQuantityLookalike)\n mylookalike.unit = 'm'\n q1 = u.Quantity(mylookalike)\n assert isinstance(q1, u.Quantity)\n assert q1.unit is u.m\n assert np.all(q1.value == a)\n\n q2 = u.Quantity(mylookalike, u.mm)\n assert q2.unit is u.mm\n assert np.all(q2.value == 1000.*a)\n\n q3 = u.Quantity(mylookalike, copy=False)\n assert np.all(q3.value == mylookalike)\n q3[2] = 0\n assert q3[2] == 0.\n assert mylookalike[2] == 0.\n\n mylookalike = a.copy().view(MyQuantityLookalike)\n mylookalike.unit = u.m\n q4 = u.Quantity(mylookalike, u.mm, copy=False)\n q4[2] = 0\n assert q4[2] == 0.\n assert mylookalike[2] == 2.\n\n mylookalike.unit = 'nonsense'\n with pytest.raises(TypeError):\n u.Quantity(mylookalike)\n\n def test_creation_via_view(self):\n # This works but is no better than 1. * u.m\n q1 = 1. << u.m\n assert isinstance(q1, u.Quantity)\n assert q1.unit == u.m\n assert q1.value == 1.\n # With an array, we get an actual view.\n a2 = np.arange(10.)\n q2 = a2 << u.m / u.s\n assert isinstance(q2, u.Quantity)\n assert q2.unit == u.m / u.s\n assert np.all(q2.value == a2)\n a2[9] = 0.\n assert np.all(q2.value == a2)\n # But with a unit change we get a copy.\n q3 = q2 << u.mm / u.s\n assert isinstance(q3, u.Quantity)\n assert q3.unit == u.mm / u.s\n assert np.all(q3.value == a2 * 1000.)\n a2[8] = 0.\n assert q3[8].value == 8000.\n # Without a unit change, we do get a view.\n q4 = q2 << q2.unit\n a2[7] = 0.\n assert np.all(q4.value == a2)\n with pytest.raises(u.UnitsError):\n q2 << u.s\n # But one can do an in-place unit change.\n a2_copy = a2.copy()\n q2 <<= u.mm / u.s\n assert q2.unit == u.mm / u.s\n # Of course, this changes a2 as well.\n assert np.all(q2.value == a2)\n # Sanity check on the values.\n assert np.all(q2.value == a2_copy * 1000.)\n a2[8] = -1.\n # Using quantities, one can also work with strings.\n q5 = q2 << 'km/hr'\n assert q5.unit == u.km / u.hr\n assert np.all(q5 == q2)\n # Finally, we can use scalar quantities as units.\n not_quite_a_foot = 30. * u.cm\n a6 = np.arange(5.)\n q6 = a6 << not_quite_a_foot\n assert q6.unit == u.Unit(not_quite_a_foot)\n assert np.all(q6.to_value(u.cm) == 30. * a6)\n\n def test_rshift_warns(self):\n with pytest.raises(TypeError), \\\n catch_warnings() as warning_lines:\n 1 >> u.m\n assert len(warning_lines) == 1\n assert warning_lines[0].category == AstropyWarning\n assert 'is not implemented' in str(warning_lines[0].message)\n q = 1. * u.km\n with pytest.raises(TypeError), \\\n catch_warnings() as warning_lines:\n q >> u.m\n assert len(warning_lines) == 1\n assert warning_lines[0].category == AstropyWarning\n assert 'is not implemented' in str(warning_lines[0].message)\n with pytest.raises(TypeError), \\\n catch_warnings() as warning_lines:\n q >>= u.m\n assert len(warning_lines) == 1\n assert warning_lines[0].category == AstropyWarning\n assert 'is not implemented' in str(warning_lines[0].message)\n with pytest.raises(TypeError), \\\n catch_warnings() as warning_lines:\n 1. >> q\n assert len(warning_lines) == 1\n assert warning_lines[0].category == AstropyWarning\n assert 'is not implemented' in str(warning_lines[0].message)\n\n\nclass TestQuantityOperations:\n q1 = u.Quantity(11.42, u.meter)\n q2 = u.Quantity(8.0, u.centimeter)\n\n def test_addition(self):\n # Take units from left object, q1\n new_quantity = self.q1 + self.q2\n assert new_quantity.value == 11.5\n assert new_quantity.unit == u.meter\n\n # Take units from left object, q2\n new_quantity = self.q2 + self.q1\n assert new_quantity.value == 1150.0\n assert new_quantity.unit == u.centimeter\n\n new_q = u.Quantity(1500.1, u.m) + u.Quantity(13.5, u.km)\n assert new_q.unit == u.m\n assert new_q.value == 15000.1\n\n def test_subtraction(self):\n # Take units from left object, q1\n new_quantity = self.q1 - self.q2\n assert new_quantity.value == 11.34\n assert new_quantity.unit == u.meter\n\n # Take units from left object, q2\n new_quantity = self.q2 - self.q1\n assert new_quantity.value == -1134.0\n assert new_quantity.unit == u.centimeter\n\n def test_multiplication(self):\n # Take units from left object, q1\n new_quantity = self.q1 * self.q2\n assert new_quantity.value == 91.36\n assert new_quantity.unit == (u.meter * u.centimeter)\n\n # Take units from left object, q2\n new_quantity = self.q2 * self.q1\n assert new_quantity.value == 91.36\n assert new_quantity.unit == (u.centimeter * u.meter)\n\n # Multiply with a number\n new_quantity = 15. * self.q1\n assert new_quantity.value == 171.3\n assert new_quantity.unit == u.meter\n\n # Multiply with a number\n new_quantity = self.q1 * 15.\n assert new_quantity.value == 171.3\n assert new_quantity.unit == u.meter\n\n def test_division(self):\n # Take units from left object, q1\n new_quantity = self.q1 / self.q2\n assert_array_almost_equal(new_quantity.value, 1.4275, decimal=5)\n assert new_quantity.unit == (u.meter / u.centimeter)\n\n # Take units from left object, q2\n new_quantity = self.q2 / self.q1\n assert_array_almost_equal(new_quantity.value, 0.70052539404553416,\n decimal=16)\n assert new_quantity.unit == (u.centimeter / u.meter)\n\n q1 = u.Quantity(11.4, unit=u.meter)\n q2 = u.Quantity(10.0, unit=u.second)\n new_quantity = q1 / q2\n assert_array_almost_equal(new_quantity.value, 1.14, decimal=10)\n assert new_quantity.unit == (u.meter / u.second)\n\n # divide with a number\n new_quantity = self.q1 / 10.\n assert new_quantity.value == 1.142\n assert new_quantity.unit == u.meter\n\n # divide with a number\n new_quantity = 11.42 / self.q1\n assert new_quantity.value == 1.\n assert new_quantity.unit == u.Unit(\"1/m\")\n\n def test_commutativity(self):\n \"\"\"Regression test for issue #587.\"\"\"\n\n new_q = u.Quantity(11.42, 'm*s')\n\n assert self.q1 * u.s == u.s * self.q1 == new_q\n assert self.q1 / u.s == u.Quantity(11.42, 'm/s')\n assert u.s / self.q1 == u.Quantity(1 / 11.42, 's/m')\n\n def test_power(self):\n # raise quantity to a power\n new_quantity = self.q1 ** 2\n assert_array_almost_equal(new_quantity.value, 130.4164, decimal=5)\n assert new_quantity.unit == u.Unit(\"m^2\")\n\n new_quantity = self.q1 ** 3\n assert_array_almost_equal(new_quantity.value, 1489.355288, decimal=7)\n assert new_quantity.unit == u.Unit(\"m^3\")\n\n def test_matrix_multiplication(self):\n a = np.eye(3)\n q = a * u.m\n result1 = q @ a\n assert np.all(result1 == q)\n result2 = a @ q\n assert np.all(result2 == q)\n result3 = q @ q\n assert np.all(result3 == a * u.m ** 2)\n # less trivial case.\n q2 = np.array([[[1., 0., 0.],\n [0., 1., 0.],\n [0., 0., 1.]],\n [[0., 1., 0.],\n [0., 0., 1.],\n [1., 0., 0.]],\n [[0., 0., 1.],\n [1., 0., 0.],\n [0., 1., 0.]]]) / u.s\n result4 = q @ q2\n assert np.all(result4 == np.matmul(a, q2.value) * q.unit * q2.unit)\n\n def test_unary(self):\n\n # Test the minus unary operator\n\n new_quantity = -self.q1\n assert new_quantity.value == -self.q1.value\n assert new_quantity.unit == self.q1.unit\n\n new_quantity = -(-self.q1)\n assert new_quantity.value == self.q1.value\n assert new_quantity.unit == self.q1.unit\n\n # Test the plus unary operator\n\n new_quantity = +self.q1\n assert new_quantity.value == self.q1.value\n assert new_quantity.unit == self.q1.unit\n\n def test_abs(self):\n\n q = 1. * u.m / u.s\n new_quantity = abs(q)\n assert new_quantity.value == q.value\n assert new_quantity.unit == q.unit\n\n q = -1. * u.m / u.s\n new_quantity = abs(q)\n assert new_quantity.value == -q.value\n assert new_quantity.unit == q.unit\n\n def test_incompatible_units(self):\n \"\"\" When trying to add or subtract units that aren't compatible, throw an error \"\"\"\n\n q1 = u.Quantity(11.412, unit=u.meter)\n q2 = u.Quantity(21.52, unit=u.second)\n\n with pytest.raises(u.UnitsError):\n new_q = q1 + q2\n\n def test_non_number_type(self):\n q1 = u.Quantity(11.412, unit=u.meter)\n type_err_msg = (\"Unsupported operand type(s) for ufunc add: \"\n \"'Quantity' and 'dict'\")\n with pytest.raises(TypeError) as exc:\n q1 + {'a': 1}\n assert exc.value.args[0] == type_err_msg\n\n with pytest.raises(TypeError):\n q1 + u.meter\n\n def test_dimensionless_operations(self):\n # test conversion to dimensionless\n dq = 3. * u.m / u.km\n dq1 = dq + 1. * u.mm / u.km\n assert dq1.value == 3.001\n assert dq1.unit == dq.unit\n\n dq2 = dq + 1.\n assert dq2.value == 1.003\n assert dq2.unit == u.dimensionless_unscaled\n\n # this test will check that operations with dimensionless Quantities\n # don't work\n with pytest.raises(u.UnitsError):\n self.q1 + u.Quantity(0.1, unit=u.Unit(\"\"))\n\n with pytest.raises(u.UnitsError):\n self.q1 - u.Quantity(0.1, unit=u.Unit(\"\"))\n\n # and test that scaling of integers works\n q = u.Quantity(np.array([1, 2, 3]), u.m / u.km, dtype=int)\n q2 = q + np.array([4, 5, 6])\n assert q2.unit == u.dimensionless_unscaled\n assert_allclose(q2.value, np.array([4.001, 5.002, 6.003]))\n # but not if doing it inplace\n with pytest.raises(TypeError):\n q += np.array([1, 2, 3])\n # except if it is actually possible\n q = np.array([1, 2, 3]) * u.km / u.m\n q += np.array([4, 5, 6])\n assert q.unit == u.dimensionless_unscaled\n assert np.all(q.value == np.array([1004, 2005, 3006]))\n\n def test_complicated_operation(self):\n \"\"\" Perform a more complicated test \"\"\"\n from astropy.units import imperial\n\n # Multiple units\n distance = u.Quantity(15., u.meter)\n time = u.Quantity(11., u.second)\n\n velocity = (distance / time).to(imperial.mile / u.hour)\n assert_array_almost_equal(\n velocity.value, 3.05037, decimal=5)\n\n G = u.Quantity(6.673E-11, u.m ** 3 / u.kg / u.s ** 2)\n new_q = ((1. / (4. * np.pi * G)).to(u.pc ** -3 / u.s ** -2 * u.kg))\n\n # Area\n side1 = u.Quantity(11., u.centimeter)\n side2 = u.Quantity(7., u.centimeter)\n area = side1 * side2\n assert_array_almost_equal(area.value, 77., decimal=15)\n assert area.unit == u.cm * u.cm\n\n def test_comparison(self):\n # equality/ non-equality is straightforward for quantity objects\n assert (1 / (u.cm * u.cm)) == 1 * u.cm ** -2\n assert 1 * u.m == 100 * u.cm\n assert 1 * u.m != 1 * u.cm\n\n # when one is a unit, Quantity does not know what to do,\n # but unit is fine with it, so it still works\n unit = u.cm**3\n q = 1. * unit\n assert q.__eq__(unit) is NotImplemented\n assert unit.__eq__(q) is True\n assert q == unit\n q = 1000. * u.mm**3\n assert q == unit\n\n # mismatched types should never work\n assert not 1. * u.cm == 1.\n assert 1. * u.cm != 1.\n\n # comparison with zero should raise a deprecation warning\n for quantity in (1. * u.cm, 1. * u.dimensionless_unscaled):\n with catch_warnings(AstropyDeprecationWarning) as warning_lines:\n bool(quantity)\n assert warning_lines[0].category == AstropyDeprecationWarning\n assert (str(warning_lines[0].message) == 'The truth value of '\n 'a Quantity is ambiguous. In the future this will '\n 'raise a ValueError.')\n\n def test_numeric_converters(self):\n # float, int, long, and __index__ should only work for single\n # quantities, of appropriate type, and only if they are dimensionless.\n # for index, this should be unscaled as well\n # (Check on __index__ is also a regression test for #1557)\n\n # quantities with units should never convert, or be usable as an index\n q1 = u.Quantity(1, u.m)\n\n converter_err_msg = (\"only dimensionless scalar quantities \"\n \"can be converted to Python scalars\")\n index_err_msg = (\"only integer dimensionless scalar quantities \"\n \"can be converted to a Python index\")\n with pytest.raises(TypeError) as exc:\n float(q1)\n assert exc.value.args[0] == converter_err_msg\n\n with pytest.raises(TypeError) as exc:\n int(q1)\n assert exc.value.args[0] == converter_err_msg\n\n # We used to test `q1 * ['a', 'b', 'c'] here, but that that worked\n # at all was a really odd confluence of bugs. Since it doesn't work\n # in numpy >=1.10 any more, just go directly for `__index__` (which\n # makes the test more similar to the `int`, `long`, etc., tests).\n with pytest.raises(TypeError) as exc:\n q1.__index__()\n assert exc.value.args[0] == index_err_msg\n\n # dimensionless but scaled is OK, however\n q2 = u.Quantity(1.23, u.m / u.km)\n\n assert float(q2) == float(q2.to_value(u.dimensionless_unscaled))\n assert int(q2) == int(q2.to_value(u.dimensionless_unscaled))\n\n with pytest.raises(TypeError) as exc:\n q2.__index__()\n assert exc.value.args[0] == index_err_msg\n\n # dimensionless unscaled is OK, though for index needs to be int\n q3 = u.Quantity(1.23, u.dimensionless_unscaled)\n\n assert float(q3) == 1.23\n assert int(q3) == 1\n\n with pytest.raises(TypeError) as exc:\n q3.__index__()\n assert exc.value.args[0] == index_err_msg\n\n # integer dimensionless unscaled is good for all\n q4 = u.Quantity(2, u.dimensionless_unscaled, dtype=int)\n\n assert float(q4) == 2.\n assert int(q4) == 2\n\n assert q4.__index__() == 2\n\n # but arrays are not OK\n q5 = u.Quantity([1, 2], u.m)\n with pytest.raises(TypeError) as exc:\n float(q5)\n assert exc.value.args[0] == converter_err_msg\n\n with pytest.raises(TypeError) as exc:\n int(q5)\n assert exc.value.args[0] == converter_err_msg\n\n with pytest.raises(TypeError) as exc:\n q5.__index__()\n assert exc.value.args[0] == index_err_msg\n\n # See https://github.com/numpy/numpy/issues/5074\n # It seems unlikely this will be resolved, so xfail'ing it.\n @pytest.mark.xfail(reason=\"list multiplication only works for numpy <=1.10\")\n def test_numeric_converter_to_index_in_practice(self):\n \"\"\"Test that use of __index__ actually works.\"\"\"\n q4 = u.Quantity(2, u.dimensionless_unscaled, dtype=int)\n assert q4 * ['a', 'b', 'c'] == ['a', 'b', 'c', 'a', 'b', 'c']\n\n def test_array_converters(self):\n\n # Scalar quantity\n q = u.Quantity(1.23, u.m)\n assert np.all(np.array(q) == np.array([1.23]))\n\n # Array quantity\n q = u.Quantity([1., 2., 3.], u.m)\n assert np.all(np.array(q) == np.array([1., 2., 3.]))\n\n\ndef test_quantity_conversion():\n q1 = u.Quantity(0.1, unit=u.meter)\n value = q1.value\n assert value == 0.1\n value_in_km = q1.to_value(u.kilometer)\n assert value_in_km == 0.0001\n new_quantity = q1.to(u.kilometer)\n assert new_quantity.value == 0.0001\n\n with pytest.raises(u.UnitsError):\n q1.to(u.zettastokes)\n with pytest.raises(u.UnitsError):\n q1.to_value(u.zettastokes)\n\n\ndef test_quantity_value_views():\n q1 = u.Quantity([1., 2.], unit=u.meter)\n # views if the unit is the same.\n v1 = q1.value\n v1[0] = 0.\n assert np.all(q1 == [0., 2.] * u.meter)\n v2 = q1.to_value()\n v2[1] = 3.\n assert np.all(q1 == [0., 3.] * u.meter)\n v3 = q1.to_value('m')\n v3[0] = 1.\n assert np.all(q1 == [1., 3.] * u.meter)\n v4 = q1.to_value('cm')\n v4[0] = 0.\n # copy if different unit.\n assert np.all(q1 == [1., 3.] * u.meter)\n\n\ndef test_quantity_conversion_with_equiv():\n q1 = u.Quantity(0.1, unit=u.meter)\n v2 = q1.to_value(u.Hz, equivalencies=u.spectral())\n assert_allclose(v2, 2997924580.0)\n q2 = q1.to(u.Hz, equivalencies=u.spectral())\n assert_allclose(q2.value, v2)\n\n q1 = u.Quantity(0.4, unit=u.arcsecond)\n v2 = q1.to_value(u.au, equivalencies=u.parallax())\n q2 = q1.to(u.au, equivalencies=u.parallax())\n v3 = q2.to_value(u.arcminute, equivalencies=u.parallax())\n q3 = q2.to(u.arcminute, equivalencies=u.parallax())\n\n assert_allclose(v2, 515662.015)\n assert_allclose(q2.value, v2)\n assert q2.unit == u.au\n assert_allclose(v3, 0.0066666667)\n assert_allclose(q3.value, v3)\n assert q3.unit == u.arcminute\n\n\ndef test_quantity_conversion_equivalency_passed_on():\n class MySpectral(u.Quantity):\n _equivalencies = u.spectral()\n\n def __quantity_view__(self, obj, unit):\n return obj.view(MySpectral)\n\n def __quantity_instance__(self, *args, **kwargs):\n return MySpectral(*args, **kwargs)\n\n q1 = MySpectral([1000, 2000], unit=u.Hz)\n q2 = q1.to(u.nm)\n assert q2.unit == u.nm\n q3 = q2.to(u.Hz)\n assert q3.unit == u.Hz\n assert_allclose(q3.value, q1.value)\n q4 = MySpectral([1000, 2000], unit=u.nm)\n q5 = q4.to(u.Hz).to(u.nm)\n assert q5.unit == u.nm\n assert_allclose(q4.value, q5.value)\n\n# Regression test for issue #2315, divide-by-zero error when examining 0*unit\n\n\ndef test_self_equivalency():\n assert u.deg.is_equivalent(0*u.radian)\n assert u.deg.is_equivalent(1*u.radian)\n\n\ndef test_si():\n q1 = 10. * u.m * u.s ** 2 / (200. * u.ms) ** 2 # 250 meters\n assert q1.si.value == 250\n assert q1.si.unit == u.m\n\n q = 10. * u.m # 10 meters\n assert q.si.value == 10\n assert q.si.unit == u.m\n\n q = 10. / u.m # 10 1 / meters\n assert q.si.value == 10\n assert q.si.unit == (1 / u.m)\n\n\ndef test_cgs():\n q1 = 10. * u.cm * u.s ** 2 / (200. * u.ms) ** 2 # 250 centimeters\n assert q1.cgs.value == 250\n assert q1.cgs.unit == u.cm\n\n q = 10. * u.m # 10 centimeters\n assert q.cgs.value == 1000\n assert q.cgs.unit == u.cm\n\n q = 10. / u.cm # 10 1 / centimeters\n assert q.cgs.value == 10\n assert q.cgs.unit == (1 / u.cm)\n\n q = 10. * u.Pa # 10 pascals\n assert q.cgs.value == 100\n assert q.cgs.unit == u.barye\n\n\nclass TestQuantityComparison:\n\n def test_quantity_equality(self):\n assert u.Quantity(1000, unit='m') == u.Quantity(1, unit='km')\n assert not (u.Quantity(1, unit='m') == u.Quantity(1, unit='km'))\n # for ==, !=, return False, True if units do not match\n assert (u.Quantity(1100, unit=u.m) != u.Quantity(1, unit=u.s)) is True\n assert (u.Quantity(1100, unit=u.m) == u.Quantity(1, unit=u.s)) is False\n\n def test_quantity_comparison(self):\n assert u.Quantity(1100, unit=u.meter) > u.Quantity(1, unit=u.kilometer)\n assert u.Quantity(900, unit=u.meter) < u.Quantity(1, unit=u.kilometer)\n\n with pytest.raises(u.UnitsError):\n assert u.Quantity(1100, unit=u.meter) > u.Quantity(1, unit=u.second)\n\n with pytest.raises(u.UnitsError):\n assert u.Quantity(1100, unit=u.meter) < u.Quantity(1, unit=u.second)\n\n assert u.Quantity(1100, unit=u.meter) >= u.Quantity(1, unit=u.kilometer)\n assert u.Quantity(1000, unit=u.meter) >= u.Quantity(1, unit=u.kilometer)\n\n assert u.Quantity(900, unit=u.meter) <= u.Quantity(1, unit=u.kilometer)\n assert u.Quantity(1000, unit=u.meter) <= u.Quantity(1, unit=u.kilometer)\n\n with pytest.raises(u.UnitsError):\n assert u.Quantity(\n 1100, unit=u.meter) >= u.Quantity(1, unit=u.second)\n\n with pytest.raises(u.UnitsError):\n assert u.Quantity(1100, unit=u.meter) <= u.Quantity(1, unit=u.second)\n\n assert u.Quantity(1200, unit=u.meter) != u.Quantity(1, unit=u.kilometer)\n\n\nclass TestQuantityDisplay:\n scalarintq = u.Quantity(1, unit='m', dtype=int)\n scalarfloatq = u.Quantity(1.3, unit='m')\n arrq = u.Quantity([1, 2.3, 8.9], unit='m')\n\n scalar_complex_q = u.Quantity(complex(1.0, 2.0))\n scalar_big_complex_q = u.Quantity(complex(1.0, 2.0e27) * 1e25)\n scalar_big_neg_complex_q = u.Quantity(complex(-1.0, -2.0e27) * 1e36)\n arr_complex_q = u.Quantity(np.arange(3) * (complex(-1.0, -2.0e27) * 1e36))\n big_arr_complex_q = u.Quantity(np.arange(125) * (complex(-1.0, -2.0e27) * 1e36))\n\n def test_dimensionless_quantity_repr(self):\n q2 = u.Quantity(1., unit='m-1')\n q3 = u.Quantity(1, unit='m-1', dtype=int)\n if NUMPY_LT_1_14:\n assert repr(self.scalarintq * q2) == \"<Quantity 1.0>\"\n assert repr(self.arrq * q2) == \"<Quantity [ 1. , 2.3, 8.9]>\"\n else:\n assert repr(self.scalarintq * q2) == \"<Quantity 1.>\"\n assert repr(self.arrq * q2) == \"<Quantity [1. , 2.3, 8.9]>\"\n assert repr(self.scalarintq * q3) == \"<Quantity 1>\"\n\n def test_dimensionless_quantity_str(self):\n q2 = u.Quantity(1., unit='m-1')\n q3 = u.Quantity(1, unit='m-1', dtype=int)\n assert str(self.scalarintq * q2) == \"1.0\"\n assert str(self.scalarintq * q3) == \"1\"\n if NUMPY_LT_1_14:\n assert str(self.arrq * q2) == \"[ 1. 2.3 8.9]\"\n else:\n assert str(self.arrq * q2) == \"[1. 2.3 8.9]\"\n\n def test_dimensionless_quantity_format(self):\n q1 = u.Quantity(3.14)\n assert format(q1, '.2f') == '3.14'\n\n def test_scalar_quantity_str(self):\n assert str(self.scalarintq) == \"1 m\"\n assert str(self.scalarfloatq) == \"1.3 m\"\n\n def test_scalar_quantity_repr(self):\n assert repr(self.scalarintq) == \"<Quantity 1 m>\"\n assert repr(self.scalarfloatq) == \"<Quantity 1.3 m>\"\n\n def test_array_quantity_str(self):\n if NUMPY_LT_1_14:\n assert str(self.arrq) == \"[ 1. 2.3 8.9] m\"\n else:\n assert str(self.arrq) == \"[1. 2.3 8.9] m\"\n\n def test_array_quantity_repr(self):\n if NUMPY_LT_1_14:\n assert repr(self.arrq) == \"<Quantity [ 1. , 2.3, 8.9] m>\"\n else:\n assert repr(self.arrq) == \"<Quantity [1. , 2.3, 8.9] m>\"\n\n def test_scalar_quantity_format(self):\n assert format(self.scalarintq, '02d') == \"01 m\"\n assert format(self.scalarfloatq, '.1f') == \"1.3 m\"\n assert format(self.scalarfloatq, '.0f') == \"1 m\"\n\n def test_uninitialized_unit_format(self):\n bad_quantity = np.arange(10.).view(u.Quantity)\n assert str(bad_quantity).endswith(_UNIT_NOT_INITIALISED)\n assert repr(bad_quantity).endswith(_UNIT_NOT_INITIALISED + '>')\n\n def test_to_string(self):\n qscalar = u.Quantity(1.5e14, 'm/s')\n\n # __str__ is the default `format`\n assert str(qscalar) == qscalar.to_string()\n\n res = 'Quantity as KMS: 150000000000.0 km / s'\n assert \"Quantity as KMS: {0}\".format(qscalar.to_string(unit=u.km / u.s)) == res\n\n res = r'$1.5 \\times 10^{14} \\; \\mathrm{\\frac{m}{s}}$'\n assert qscalar.to_string(format=\"latex\") == res\n\n res = r'$1.5 \\times 10^{14} \\; \\mathrm{\\frac{m}{s}}$'\n assert qscalar.to_string(format=\"latex\", subfmt=\"inline\") == res\n\n res = r'$\\displaystyle 1.5 \\times 10^{14} \\; \\mathrm{\\frac{m}{s}}$'\n assert qscalar.to_string(format=\"latex\", subfmt=\"display\") == res\n\n def test_repr_latex(self):\n from astropy.units.quantity import conf\n\n q2scalar = u.Quantity(1.5e14, 'm/s')\n assert self.scalarintq._repr_latex_() == r'$1 \\; \\mathrm{m}$'\n assert self.scalarfloatq._repr_latex_() == r'$1.3 \\; \\mathrm{m}$'\n assert (q2scalar._repr_latex_() ==\n r'$1.5 \\times 10^{14} \\; \\mathrm{\\frac{m}{s}}$')\n assert self.arrq._repr_latex_() == r'$[1,~2.3,~8.9] \\; \\mathrm{m}$'\n\n # Complex quantities\n assert self.scalar_complex_q._repr_latex_() == r'$(1+2i) \\; \\mathrm{}$'\n assert (self.scalar_big_complex_q._repr_latex_() ==\n r'$(1 \\times 10^{25}+2 \\times 10^{52}i) \\; \\mathrm{}$')\n assert (self.scalar_big_neg_complex_q._repr_latex_() ==\n r'$(-1 \\times 10^{36}-2 \\times 10^{63}i) \\; \\mathrm{}$')\n assert (self.arr_complex_q._repr_latex_() ==\n (r'$[(0-0i),~(-1 \\times 10^{36}-2 \\times 10^{63}i),'\n r'~(-2 \\times 10^{36}-4 \\times 10^{63}i)] \\; \\mathrm{}$'))\n assert r'\\dots' in self.big_arr_complex_q._repr_latex_()\n\n qmed = np.arange(100)*u.m\n qbig = np.arange(1000)*u.m\n qvbig = np.arange(10000)*1e9*u.m\n\n pops = np.get_printoptions()\n oldlat = conf.latex_array_threshold\n try:\n # check precision behavior\n q = u.Quantity(987654321.123456789, 'm/s')\n qa = np.array([7.89123, 123456789.987654321, 0]) * u.cm\n np.set_printoptions(precision=8)\n assert q._repr_latex_() == r'$9.8765432 \\times 10^{8} \\; \\mathrm{\\frac{m}{s}}$'\n assert qa._repr_latex_() == r'$[7.89123,~1.2345679 \\times 10^{8},~0] \\; \\mathrm{cm}$'\n np.set_printoptions(precision=2)\n assert q._repr_latex_() == r'$9.9 \\times 10^{8} \\; \\mathrm{\\frac{m}{s}}$'\n assert qa._repr_latex_() == r'$[7.9,~1.2 \\times 10^{8},~0] \\; \\mathrm{cm}$'\n\n # check thresholding behavior\n conf.latex_array_threshold = 100 # should be default\n lsmed = qmed._repr_latex_()\n assert r'\\dots' not in lsmed\n lsbig = qbig._repr_latex_()\n assert r'\\dots' in lsbig\n lsvbig = qvbig._repr_latex_()\n assert r'\\dots' in lsvbig\n\n conf.latex_array_threshold = 1001\n lsmed = qmed._repr_latex_()\n assert r'\\dots' not in lsmed\n lsbig = qbig._repr_latex_()\n assert r'\\dots' not in lsbig\n lsvbig = qvbig._repr_latex_()\n assert r'\\dots' in lsvbig\n\n conf.latex_array_threshold = -1 # means use the numpy threshold\n np.set_printoptions(threshold=99)\n lsmed = qmed._repr_latex_()\n assert r'\\dots' in lsmed\n lsbig = qbig._repr_latex_()\n assert r'\\dots' in lsbig\n lsvbig = qvbig._repr_latex_()\n assert r'\\dots' in lsvbig\n finally:\n # prevent side-effects from influencing other tests\n np.set_printoptions(**pops)\n conf.latex_array_threshold = oldlat\n\n qinfnan = [np.inf, -np.inf, np.nan] * u.m\n assert qinfnan._repr_latex_() == r'$[\\infty,~-\\infty,~{\\rm NaN}] \\; \\mathrm{m}$'\n\n\ndef test_decompose():\n q1 = 5 * u.N\n assert q1.decompose() == (5 * u.kg * u.m * u.s ** -2)\n\n\ndef test_decompose_regression():\n \"\"\"\n Regression test for bug #1163\n\n If decompose was called multiple times on a Quantity with an array and a\n scale != 1, the result changed every time. This is because the value was\n being referenced not copied, then modified, which changed the original\n value.\n \"\"\"\n q = np.array([1, 2, 3]) * u.m / (2. * u.km)\n assert np.all(q.decompose().value == np.array([0.0005, 0.001, 0.0015]))\n assert np.all(q == np.array([1, 2, 3]) * u.m / (2. * u.km))\n assert np.all(q.decompose().value == np.array([0.0005, 0.001, 0.0015]))\n\n\ndef test_arrays():\n \"\"\"\n Test using quantites with array values\n \"\"\"\n\n qsec = u.Quantity(np.arange(10), u.second)\n assert isinstance(qsec.value, np.ndarray)\n assert not qsec.isscalar\n\n # len and indexing should work for arrays\n assert len(qsec) == len(qsec.value)\n qsecsub25 = qsec[2:5]\n assert qsecsub25.unit == qsec.unit\n assert isinstance(qsecsub25, u.Quantity)\n assert len(qsecsub25) == 3\n\n # make sure isscalar, len, and indexing behave correcly for non-arrays.\n qsecnotarray = u.Quantity(10., u.second)\n assert qsecnotarray.isscalar\n with pytest.raises(TypeError):\n len(qsecnotarray)\n with pytest.raises(TypeError):\n qsecnotarray[0]\n\n qseclen0array = u.Quantity(np.array(10), u.second, dtype=int)\n # 0d numpy array should act basically like a scalar\n assert qseclen0array.isscalar\n with pytest.raises(TypeError):\n len(qseclen0array)\n with pytest.raises(TypeError):\n qseclen0array[0]\n assert isinstance(qseclen0array.value, int)\n\n a = np.array([(1., 2., 3.), (4., 5., 6.), (7., 8., 9.)],\n dtype=[('x', float),\n ('y', float),\n ('z', float)])\n qkpc = u.Quantity(a, u.kpc)\n assert not qkpc.isscalar\n qkpc0 = qkpc[0]\n assert qkpc0.value == a[0]\n assert qkpc0.unit == qkpc.unit\n assert isinstance(qkpc0, u.Quantity)\n assert qkpc0.isscalar\n qkpcx = qkpc['x']\n assert np.all(qkpcx.value == a['x'])\n assert qkpcx.unit == qkpc.unit\n assert isinstance(qkpcx, u.Quantity)\n assert not qkpcx.isscalar\n qkpcx1 = qkpc['x'][1]\n assert qkpcx1.unit == qkpc.unit\n assert isinstance(qkpcx1, u.Quantity)\n assert qkpcx1.isscalar\n qkpc1x = qkpc[1]['x']\n assert qkpc1x.isscalar\n assert qkpc1x == qkpcx1\n\n # can also create from lists, will auto-convert to arrays\n qsec = u.Quantity(list(range(10)), u.second)\n assert isinstance(qsec.value, np.ndarray)\n\n # quantity math should work with arrays\n assert_array_equal((qsec * 2).value, (np.arange(10) * 2))\n assert_array_equal((qsec / 2).value, (np.arange(10) / 2))\n # quantity addition/subtraction should *not* work with arrays b/c unit\n # ambiguous\n with pytest.raises(u.UnitsError):\n assert_array_equal((qsec + 2).value, (np.arange(10) + 2))\n with pytest.raises(u.UnitsError):\n assert_array_equal((qsec - 2).value, (np.arange(10) + 2))\n\n # should create by unit multiplication, too\n qsec2 = np.arange(10) * u.second\n qsec3 = u.second * np.arange(10)\n\n assert np.all(qsec == qsec2)\n assert np.all(qsec2 == qsec3)\n\n # make sure numerical-converters fail when arrays are present\n with pytest.raises(TypeError):\n float(qsec)\n with pytest.raises(TypeError):\n int(qsec)\n\n\ndef test_array_indexing_slicing():\n q = np.array([1., 2., 3.]) * u.m\n assert q[0] == 1. * u.m\n assert np.all(q[0:2] == u.Quantity([1., 2.], u.m))\n\n\ndef test_array_setslice():\n q = np.array([1., 2., 3.]) * u.m\n q[1:2] = np.array([400.]) * u.cm\n assert np.all(q == np.array([1., 4., 3.]) * u.m)\n\n\ndef test_inverse_quantity():\n \"\"\"\n Regression test from issue #679\n \"\"\"\n q = u.Quantity(4., u.meter / u.second)\n qot = q / 2\n toq = 2 / q\n npqot = q / np.array(2)\n\n assert npqot.value == 2.0\n assert npqot.unit == (u.meter / u.second)\n\n assert qot.value == 2.0\n assert qot.unit == (u.meter / u.second)\n\n assert toq.value == 0.5\n assert toq.unit == (u.second / u.meter)\n\n\ndef test_quantity_mutability():\n q = u.Quantity(9.8, u.meter / u.second / u.second)\n\n with pytest.raises(AttributeError):\n q.value = 3\n\n with pytest.raises(AttributeError):\n q.unit = u.kg\n\n\ndef test_quantity_initialized_with_quantity():\n q1 = u.Quantity(60, u.second)\n\n q2 = u.Quantity(q1, u.minute)\n assert q2.value == 1\n\n q3 = u.Quantity([q1, q2], u.second)\n assert q3[0].value == 60\n assert q3[1].value == 60\n\n q4 = u.Quantity([q2, q1])\n assert q4.unit == q2.unit\n assert q4[0].value == 1\n assert q4[1].value == 1\n\n\ndef test_quantity_string_unit():\n q1 = 1. * u.m / 's'\n assert q1.value == 1\n assert q1.unit == (u.m / u.s)\n\n q2 = q1 * \"m\"\n assert q2.unit == ((u.m * u.m) / u.s)\n\n\n@raises(ValueError)\ndef test_quantity_invalid_unit_string():\n \"foo\" * u.m\n\n\ndef test_implicit_conversion():\n q = u.Quantity(1.0, u.meter)\n # Manually turn this on to simulate what might happen in a subclass\n q._include_easy_conversion_members = True\n assert_allclose(q.centimeter, 100)\n assert_allclose(q.cm, 100)\n assert_allclose(q.parsec, 3.240779289469756e-17)\n\n\ndef test_implicit_conversion_autocomplete():\n q = u.Quantity(1.0, u.meter)\n # Manually turn this on to simulate what might happen in a subclass\n q._include_easy_conversion_members = True\n q.foo = 42\n\n attrs = dir(q)\n assert 'centimeter' in attrs\n assert 'cm' in attrs\n assert 'parsec' in attrs\n assert 'foo' in attrs\n assert 'to' in attrs\n assert 'value' in attrs\n # Something from the base class, object\n assert '__setattr__' in attrs\n\n with pytest.raises(AttributeError):\n q.l\n\n\ndef test_quantity_iterability():\n \"\"\"Regressiont est for issue #878.\n\n Scalar quantities should not be iterable and should raise a type error on\n iteration.\n \"\"\"\n\n q1 = [15.0, 17.0] * u.m\n assert isiterable(q1)\n\n q2 = next(iter(q1))\n assert q2 == 15.0 * u.m\n assert not isiterable(q2)\n pytest.raises(TypeError, iter, q2)\n\n\ndef test_copy():\n\n q1 = u.Quantity(np.array([[1., 2., 3.], [4., 5., 6.]]), unit=u.m)\n q2 = q1.copy()\n\n assert np.all(q1.value == q2.value)\n assert q1.unit == q2.unit\n assert q1.dtype == q2.dtype\n assert q1.value is not q2.value\n\n q3 = q1.copy(order='F')\n assert q3.flags['F_CONTIGUOUS']\n assert np.all(q1.value == q3.value)\n assert q1.unit == q3.unit\n assert q1.dtype == q3.dtype\n assert q1.value is not q3.value\n\n q4 = q1.copy(order='C')\n assert q4.flags['C_CONTIGUOUS']\n assert np.all(q1.value == q4.value)\n assert q1.unit == q4.unit\n assert q1.dtype == q4.dtype\n assert q1.value is not q4.value\n\n\ndef test_deepcopy():\n q1 = u.Quantity(np.array([1., 2., 3.]), unit=u.m)\n q2 = copy.deepcopy(q1)\n\n assert isinstance(q2, u.Quantity)\n assert np.all(q1.value == q2.value)\n assert q1.unit == q2.unit\n assert q1.dtype == q2.dtype\n\n assert q1.value is not q2.value\n\n\ndef test_equality_numpy_scalar():\n \"\"\"\n A regression test to ensure that numpy scalars are correctly compared\n (which originally failed due to the lack of ``__array_priority__``).\n \"\"\"\n assert 10 != 10. * u.m\n assert np.int64(10) != 10 * u.m\n assert 10 * u.m != np.int64(10)\n\n\ndef test_quantity_pickelability():\n \"\"\"\n Testing pickleability of quantity\n \"\"\"\n\n q1 = np.arange(10) * u.m\n\n q2 = pickle.loads(pickle.dumps(q1))\n\n assert np.all(q1.value == q2.value)\n assert q1.unit.is_equivalent(q2.unit)\n assert q1.unit == q2.unit\n\n\ndef test_quantity_initialisation_from_string():\n q = u.Quantity('1')\n assert q.unit == u.dimensionless_unscaled\n assert q.value == 1.\n q = u.Quantity('1.5 m/s')\n assert q.unit == u.m/u.s\n assert q.value == 1.5\n assert u.Unit(q) == u.Unit('1.5 m/s')\n q = u.Quantity('.5 m')\n assert q == u.Quantity(0.5, u.m)\n q = u.Quantity('-1e1km')\n assert q == u.Quantity(-10, u.km)\n q = u.Quantity('-1e+1km')\n assert q == u.Quantity(-10, u.km)\n q = u.Quantity('+.5km')\n assert q == u.Quantity(.5, u.km)\n q = u.Quantity('+5e-1km')\n assert q == u.Quantity(.5, u.km)\n q = u.Quantity('5', u.m)\n assert q == u.Quantity(5., u.m)\n q = u.Quantity('5 km', u.m)\n assert q.value == 5000.\n assert q.unit == u.m\n q = u.Quantity('5Em')\n assert q == u.Quantity(5., u.Em)\n\n with pytest.raises(TypeError):\n u.Quantity('')\n with pytest.raises(TypeError):\n u.Quantity('m')\n with pytest.raises(TypeError):\n u.Quantity('1.2.3 deg')\n with pytest.raises(TypeError):\n u.Quantity('1+deg')\n with pytest.raises(TypeError):\n u.Quantity('1-2deg')\n with pytest.raises(TypeError):\n u.Quantity('1.2e-13.3m')\n with pytest.raises(TypeError):\n u.Quantity(['5'])\n with pytest.raises(TypeError):\n u.Quantity(np.array(['5']))\n with pytest.raises(ValueError):\n u.Quantity('5E')\n with pytest.raises(ValueError):\n u.Quantity('5 foo')\n\n\ndef test_unsupported():\n q1 = np.arange(10) * u.m\n\n with pytest.raises(TypeError):\n q2 = np.bitwise_and(q1, q1)\n\n\ndef test_unit_identity():\n q = 1.0 * u.hour\n assert q.unit is u.hour\n\n\ndef test_quantity_to_view():\n q1 = np.array([1000, 2000]) * u.m\n q2 = q1.to(u.km)\n assert q1.value[0] == 1000\n assert q2.value[0] == 1\n\n\n@raises(ValueError)\ndef test_quantity_tuple_power():\n (5.0 * u.m) ** (1, 2)\n\n\ndef test_quantity_fraction_power():\n q = (25.0 * u.m**2) ** Fraction(1, 2)\n assert q.value == 5.\n assert q.unit == u.m\n # Regression check to ensure we didn't create an object type by raising\n # the value of the quantity to a Fraction. [#3922]\n assert q.dtype.kind == 'f'\n\n\ndef test_inherit_docstrings():\n assert u.Quantity.argmax.__doc__ == np.ndarray.argmax.__doc__\n\n\ndef test_quantity_from_table():\n \"\"\"\n Checks that units from tables are respected when converted to a Quantity.\n This also generically checks the use of *anything* with a `unit` attribute\n passed into Quantity\n \"\"\"\n from... table import Table\n\n t = Table(data=[np.arange(5), np.arange(5)], names=['a', 'b'])\n t['a'].unit = u.kpc\n\n qa = u.Quantity(t['a'])\n assert qa.unit == u.kpc\n assert_array_equal(qa.value, t['a'])\n\n qb = u.Quantity(t['b'])\n assert qb.unit == u.dimensionless_unscaled\n assert_array_equal(qb.value, t['b'])\n\n # This does *not* auto-convert, because it's not necessarily obvious that's\n # desired. Instead we revert to standard `Quantity` behavior\n qap = u.Quantity(t['a'], u.pc)\n assert qap.unit == u.pc\n assert_array_equal(qap.value, t['a'] * 1000)\n\n qbp = u.Quantity(t['b'], u.pc)\n assert qbp.unit == u.pc\n assert_array_equal(qbp.value, t['b'])\n\n\ndef test_assign_slice_with_quantity_like():\n # Regression tests for gh-5961\n from astropy.table import Table, Column\n # first check directly that we can use a Column to assign to a slice.\n c = Column(np.arange(10.), unit=u.mm)\n q = u.Quantity(c)\n q[:2] = c[:2]\n # next check that we do not fail the original problem.\n t = Table()\n t['x'] = np.arange(10) * u.mm\n t['y'] = np.ones(10) * u.mm\n assert type(t['x']) is Column\n\n xy = np.vstack([t['x'], t['y']]).T * u.mm\n ii = [0, 2, 4]\n\n assert xy[ii, 0].unit == t['x'][ii].unit\n # should not raise anything\n xy[ii, 0] = t['x'][ii]\n\n\ndef test_insert():\n \"\"\"\n Test Quantity.insert method. This does not test the full capabilities\n of the underlying np.insert, but hits the key functionality for\n Quantity.\n \"\"\"\n q = [1, 2] * u.m\n\n # Insert a compatible float with different units\n q2 = q.insert(0, 1 * u.km)\n assert np.all(q2.value == [1000, 1, 2])\n assert q2.unit is u.m\n assert q2.dtype.kind == 'f'\n\n if minversion(np, '1.8.0'):\n q2 = q.insert(1, [1, 2] * u.km)\n assert np.all(q2.value == [1, 1000, 2000, 2])\n assert q2.unit is u.m\n\n # Cannot convert 1.5 * u.s to m\n with pytest.raises(u.UnitsError):\n q.insert(1, 1.5 * u.s)\n\n # Tests with multi-dim quantity\n q = [[1, 2], [3, 4]] * u.m\n q2 = q.insert(1, [10, 20] * u.m, axis=0)\n assert np.all(q2.value == [[1, 2],\n [10, 20],\n [3, 4]])\n\n q2 = q.insert(1, [10, 20] * u.m, axis=1)\n assert np.all(q2.value == [[1, 10, 2],\n [3, 20, 4]])\n\n q2 = q.insert(1, 10 * u.m, axis=1)\n assert np.all(q2.value == [[1, 10, 2],\n [3, 10, 4]])\n\n\ndef test_repr_array_of_quantity():\n \"\"\"\n Test print/repr of object arrays of Quantity objects with different\n units.\n\n Regression test for the issue first reported in\n https://github.com/astropy/astropy/issues/3777\n \"\"\"\n\n a = np.array([1 * u.m, 2 * u.s], dtype=object)\n if NUMPY_LT_1_14:\n assert repr(a) == 'array([<Quantity 1.0 m>, <Quantity 2.0 s>], dtype=object)'\n assert str(a) == '[<Quantity 1.0 m> <Quantity 2.0 s>]'\n else:\n assert repr(a) == 'array([<Quantity 1. m>, <Quantity 2. s>], dtype=object)'\n assert str(a) == '[<Quantity 1. m> <Quantity 2. s>]'\n\n\nclass TestSpecificTypeQuantity:\n def setup(self):\n class Length(u.SpecificTypeQuantity):\n _equivalent_unit = u.m\n\n class Length2(Length):\n _default_unit = u.m\n\n class Length3(Length):\n _unit = u.m\n\n self.Length = Length\n self.Length2 = Length2\n self.Length3 = Length3\n\n def test_creation(self):\n l = self.Length(np.arange(10.)*u.km)\n assert type(l) is self.Length\n with pytest.raises(u.UnitTypeError):\n self.Length(np.arange(10.) * u.hour)\n\n with pytest.raises(u.UnitTypeError):\n self.Length(np.arange(10.))\n\n l2 = self.Length2(np.arange(5.))\n assert type(l2) is self.Length2\n assert l2._default_unit is self.Length2._default_unit\n\n with pytest.raises(u.UnitTypeError):\n self.Length3(np.arange(10.))\n\n def test_view(self):\n l = (np.arange(5.) * u.km).view(self.Length)\n assert type(l) is self.Length\n with pytest.raises(u.UnitTypeError):\n (np.arange(5.) * u.s).view(self.Length)\n\n v = np.arange(5.).view(self.Length)\n assert type(v) is self.Length\n assert v._unit is None\n\n l3 = np.ones((2, 2)).view(self.Length3)\n assert type(l3) is self.Length3\n assert l3.unit is self.Length3._unit\n\n def test_operation_precedence_and_fallback(self):\n l = self.Length(np.arange(5.)*u.cm)\n sum1 = l + 1.*u.m\n assert type(sum1) is self.Length\n sum2 = 1.*u.km + l\n assert type(sum2) is self.Length\n sum3 = l + l\n assert type(sum3) is self.Length\n res1 = l * (1.*u.m)\n assert type(res1) is u.Quantity\n res2 = l * l\n assert type(res2) is u.Quantity\n\n\[email protected]('not HAS_MATPLOTLIB')\[email protected]('MATPLOTLIB_LT_15')\nclass TestQuantityMatplotlib:\n \"\"\"Test if passing matplotlib quantities works.\n\n TODO: create PNG output and check against reference image\n once `astropy.wcsaxes` is merged, which provides\n the machinery for this.\n\n See https://github.com/astropy/astropy/issues/1881\n See https://github.com/astropy/astropy/pull/2139\n \"\"\"\n\n def test_plot(self):\n data = u.Quantity([4, 5, 6], 's')\n plt.plot(data)\n\n def test_scatter(self):\n x = u.Quantity([4, 5, 6], 'second')\n y = [1, 3, 4] * u.m\n plt.scatter(x, y)\n\n\ndef test_unit_class_override():\n class MyQuantity(u.Quantity):\n pass\n\n my_unit = u.Unit(\"my_deg\", u.deg)\n my_unit._quantity_class = MyQuantity\n q1 = u.Quantity(1., my_unit)\n assert type(q1) is u.Quantity\n q2 = u.Quantity(1., my_unit, subok=True)\n assert type(q2) is MyQuantity\n\n\nclass QuantityMimic:\n def __init__(self, value, unit):\n self.value = value\n self.unit = unit\n\n def __array__(self):\n return np.array(self.value)\n\n\nclass QuantityMimic2(QuantityMimic):\n def to(self, unit):\n return u.Quantity(self.value, self.unit).to(unit)\n\n def to_value(self, unit):\n return u.Quantity(self.value, self.unit).to_value(unit)\n\n\nclass TestQuantityMimics:\n \"\"\"Test Quantity Mimics that are not ndarray subclasses.\"\"\"\n @pytest.mark.parametrize('Mimic', (QuantityMimic, QuantityMimic2))\n def test_mimic_input(self, Mimic):\n value = np.arange(10.)\n mimic = Mimic(value, u.m)\n q = u.Quantity(mimic)\n assert q.unit == u.m\n assert np.all(q.value == value)\n q2 = u.Quantity(mimic, u.cm)\n assert q2.unit == u.cm\n assert np.all(q2.value == 100 * value)\n\n @pytest.mark.parametrize('Mimic', (QuantityMimic, QuantityMimic2))\n def test_mimic_setting(self, Mimic):\n mimic = Mimic([1., 2.], u.m)\n q = u.Quantity(np.arange(10.), u.cm)\n q[8:] = mimic\n assert np.all(q[:8].value == np.arange(8.))\n assert np.all(q[8:].value == [100., 200.])\n" ]
[ [ "numpy.trunc", "numpy.ones_like", "numpy.nditer", "numpy.isfinite", "numpy.isnat", "numpy.isnan", "numpy.asarray", "numpy.nan_to_num", "numpy.atleast_1d", "numpy.zeros_like", "numpy.any", "numpy.ma.array", "numpy.array", "numpy.isinf" ], [ "numpy.get_printoptions", "numpy.all", "matplotlib.pyplot.plot", "numpy.arange", "numpy.eye", "numpy.matmul", "numpy.testing.assert_array_almost_equal", "numpy.isnan", "numpy.int64", "numpy.testing.assert_allclose", "numpy.array", "matplotlib.pyplot.scatter", "matplotlib.use", "numpy.set_printoptions", "numpy.ones", "numpy.testing.assert_array_equal", "numpy.bitwise_and", "numpy.isinf", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Hermes777/mmfashion
[ "b400d6b68d551c518112ea83008d22c97c12b7c6" ]
[ "mmfashion/models/virtual_tryon/tryon.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..import builder\nfrom ..registry import TRYON\n\[email protected]_module\nclass Tryon(nn.Module):\n def __init__(self,\n ngf,\n num_downs,\n in_channels,\n out_channels,\n down_channels=(8, 8),\n inter_channels=(8, 8),\n up_channels=[[4, 8], [2, 4], [1, 2]],\n norm_layer=nn.InstanceNorm2d,\n use_dropout=False,\n loss_l1=dict(type='L1Loss'),\n loss_vgg=dict(type='VGGLoss'),\n loss_mask=dict(type='L1Loss'),\n pretrained=None):\n super(Tryon, self).__init__()\n\n unet_block = builder.build_unet_skip_connection_block(\n dict(type='UnetSkipConnectionBlock',\n outer_nc=ngf*down_channels[0],\n inner_nc=ngf*down_channels[1],\n input_nc=None,\n submodule=None,\n norm_layer=norm_layer,\n innermost=True))\n\n for i in range(num_downs - 5):\n unet_block = builder.build_unet_skip_connection_block(\n dict(type='UnetSkipConnectionBlock',\n outer_nc=ngf*inter_channels[0],\n inner_nc=ngf*inter_channels[1],\n input_nc=None,\n submodule=unet_block,\n norm_layer=norm_layer,\n use_dropout=use_dropout))\n\n # upsample\n for ratio in up_channels:\n unet_block = builder.build_unet_skip_connection_block(\n dict(type='UnetSkipConnectionBlock',\n outer_nc=ngf*ratio[0],\n inner_nc=ngf*ratio[1],\n input_nc=None,\n submodule=unet_block,\n norm_layer=norm_layer))\n\n unet_block = builder.build_unet_skip_connection_block(\n dict(type='UnetSkipConnectionBlock',\n outer_nc=out_channels,\n inner_nc=ngf,\n input_nc=in_channels,\n submodule=unet_block,\n outermost=True,\n norm_layer=norm_layer)\n )\n self.generator = unet_block\n\n self.loss_l1 = builder.build_loss(loss_l1)\n self.loss_vgg = builder.build_loss(loss_vgg)\n self.loss_mask = builder.build_loss(loss_mask)\n\n self.init_weights(pretrained=pretrained)\n\n def forward_train(self, img, agnostic, cloth, cloth_mask):\n input = torch.cat([agnostic, cloth], 1)\n output = self.generator(input)\n\n p_rendered, m_composite = torch.split(output, 3, 1)\n p_rendered = torch.tanh(p_rendered)\n m_composite = torch.sigmoid(m_composite)\n p_tryon = cloth * m_composite + p_rendered * (1 - m_composite)\n\n losses = dict()\n losses['loss_l1'] = self.loss_l1(p_tryon, img)\n losses['loss_vgg'] = self.loss_vgg(p_tryon, img)\n losses['loss_mask'] = self.loss_mask(m_composite, cloth_mask)\n\n return losses\n\n def forward_test(self, agnostic, cloth):\n input = torch.cat([agnostic, cloth], 1)\n output = self.generator(input)\n\n p_rendered, m_composite = torch.split(output, 3, 1)\n p_rendered = torch.tanh(p_rendered)\n m_composite = torch.sigmoid(m_composite)\n p_tryon = cloth * m_composite + p_rendered * (1 - m_composite)\n\n return p_tryon\n\n\n def forward(self,\n img,\n cloth,\n cloth_mask,\n agnostic,\n parse_cloth=None,\n im_name=None,\n c_name=None,\n return_loss=True):\n if return_loss:\n return self.forward_train(img, agnostic, cloth, cloth_mask)\n else:\n return self.forward_test(agnostic, cloth)\n\n def init_weights(self, pretrained=None):\n if pretrained is not None:\n self.unet.load_state_dict(torch.load(pretrained))" ]
[ [ "torch.sigmoid", "torch.load", "torch.cat", "torch.tanh", "torch.split" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
arturofburgos/Comparing-Languages
[ "a20dc24699c762252c94c26e32c7053c04793d9d" ]
[ "Linear System/Python/linear_system.py" ]
[ "# Undergraduate Student: Arturo Burgos\n# Professor: João Rodrigo Andrade\n# Federal University of Uberlândia - UFU, Fluid Mechanics Laboratory - MFLab, Block 5P, Uberlândia, MG, Brazil\n\n\n# Fourth exercise: Solving a Linear System --> ax = b\n\n# Here I first set conditions\n\nimport numpy as np\nfrom numpy import linalg as La\nimport matplotlib.pyplot as plt\nimport time\nnp.seterr(divide='ignore', invalid='ignore')\n\nprint('\\n')\n\nn = 9\nK = np.sqrt(n)\nk = int(K)\n\n#################\n# aX = b\n#################\n\n\n# Here I set the Matrix\n\na = np.zeros((n,n))\n\n\nfor i in range(n):\n for j in range(n):\n if i == j:\n a[i,j] = -4\n elif i == j-k or i ==j+k:\n a[i,j] = 1\n elif ((i+1) % k != 0 and i == j-1) or ((i+1) % k != 1 and i == j+1): # (i+1) because in Python we start from 0\n a[i,j] = 1\n\n\nprint('The coefficient Matrix is:')\nprint(a)\nprint('\\n')\n \n\nb = np.zeros(n)\n\n\nfor i in range(k):\n if i < k-1: # (k-1) because in Python we start from 0\n b[i] = -50\n else:\n b[i] = -150\n\nfor i in range(k,n-k):\n if (i+1)%k != 0: # (i+1) because in Python we start from 0\n b[i] = 0\n else:\n b[i] = -100\n\nfor i in range(n-k,n):\n if i < n-1: # (k-1) because in Python we start from 0\n b[i] = -50\n else:\n b[i] = -150\n\nprint('The result Matrix is:')\nprint(b)\nprint('\\n')\n\n\n\ndef linearsystem(coeff,resul,size):\n\n # Initial x_k and x_k1 value\n\n x_k = np.zeros(size)\n x_k1 = np.ones(size)\n\n # Here I set the tolerance\n tolerance = 1e-9\n # Here I set the iterations\n ite = 0\n \n\n # Here I set the error based in the Infinite norm \n erro = La.norm((x_k1 - x_k),np.inf)\n #erro = (x_k1 - x_k)/x_k1\n\n\n while (erro > tolerance): #\n for i in range(0,size):\n \n x_k1[i] = resul[i]\n\n for j in range(0,size):\n if j!=i:\n x_k1[i] = x_k1[i] - coeff[i,j]*x_k[j]\n\n\n \n x_k1[i] = x_k1[i]/ coeff[i,i]\n\n\n #erro = (x_k1 - x_k)/x_k1\n erro = La.norm((x_k1 - x_k),np.inf)\n\n x_k = x_k1.copy()\n #x_k[:] = x_k1[:] # -> the same as above\n\n ite = ite + 1\n\n \n\n print('The number of iterations is: ')\n print(ite)\n print('\\n')\n\n print('Note that now the error is not an array anymore, but is normalized :')\n print(erro)\n print('\\n')\n\n return x_k1\n\n\nt_initial = time.time()\nres = linearsystem(a,b,n)\nt_final = time.time()\n\n\nprint('The solution is:')\nprint(res)\nprint('\\n')\n\n\nprint(\"\\n\\n--- %s seconds ---\\n\" % (t_final - t_initial))\n\n# PLOT OF THE MATRIX\n\n\n\ndef samplemat(dims,bb):\n \n\n aa = np.zeros(dims)\n \n \n #print(bb)\n\n aa = np.reshape(bb,(dims))\n\n \n return aa\n\n\n# Display matrix\nplt.matshow(samplemat((k, k),res))\nplt.colorbar()\n\nplt.show()" ]
[ [ "numpy.sqrt", "numpy.reshape", "numpy.linalg.norm", "numpy.ones", "numpy.seterr", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.show", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jamesduke/professional-services
[ "c9787015e890c2a99e13a970ecdb8597fc1ad274" ]
[ "examples/cloudml-fraud-detection/trainer/input_fn_utils.py" ]
[ "\"\"\"File with functions to process input data and serve TF graph.\n\nCreate input function on processed data for training and evaluation and\nserving function for out of sample unprocessed data.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport posixpath\n\nimport tensorflow as tf\nfrom tensorflow_transform.saved import input_fn_maker\nfrom tensorflow_transform.tf_metadata import metadata_io\n\nfrom constants import constants\nfrom utils.datasettype import DatasetType\n\n\ndef make_input_schema():\n \"\"\"Builds the schema of the data from TFRecord to be inputted in the model.\n\n Returns:\n A dictionary mapping keys of column names to `tf.FixedLenFeature` and\n specifies the shape and dtypes.\n \"\"\"\n\n schema = {}\n for c in constants.FEATURE_COLUMNS:\n schema[c] = tf.FixedLenFeature(shape=[], dtype=tf.float32)\n schema[constants.LABEL_COLUMN] = tf.FixedLenFeature(shape=[], dtype=tf.int64)\n schema[constants.KEY_COLUMN] = tf.FixedLenFeature(shape=[], dtype=tf.int64)\n return schema\n\n\ndef read_dataset(input_dir, mode, batch_size):\n \"\"\"Reads data to be inputted in TF experiment from DataFlow output.\n\n Processed data, stored as TFRecord.\n\n Args:\n input_dir: Directory to read data from (output of DataFlow job).\n mode: Specifies the type of dataset (train, validation, test).\n batch_size: Batch size used to read data.\n\n Raises:\n ValueError: Expected one of the following: `tf.estimator.ModeKeys.TRAIN`,\n `tf.estimator.ModeKeys.EVAL`, `tf.estimator.ModeKeys.INFER`.\n\n Returns:\n Input function.\n \"\"\"\n\n def _parser(data):\n features = {c: data[c] for c in constants.FEATURE_COLUMNS}\n key = constants.KEY_COLUMN\n features.update({key: data[key]})\n labels = data[constants.LABEL_COLUMN]\n return features, labels\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n d = DatasetType.TRAIN\n elif mode == tf.estimator.ModeKeys.EVAL:\n d = DatasetType.VAL\n elif mode == tf.estimator.ModeKeys.INFER:\n d = DatasetType.TEST\n else:\n raise ValueError(\n 'Expected one of the following: tf.estimator.ModeKeys.TRAIN, EVAL, '\n 'INFER ; got {} instead.'.format(mode))\n assert d\n\n prefix_input = [\n posixpath.join(input_dir,\n '{}*'.format(constants.PATH_TRANSFORMED_DATA_SPLIT[d]))\n ]\n\n def _input_fn():\n \"\"\"Input function that serves as input to the Experiment class.\n\n Returns:\n `tf.Data.dataset` object containing features and labels.\n \"\"\"\n\n filenames = tf.train.match_filenames_once(prefix_input)\n dataset = tf.data.TFRecordDataset(filenames)\n schema = make_input_schema()\n dataset = dataset.map(\n lambda x: tf.parse_single_example(serialized=x, features=schema))\n dataset = dataset.map(_parser)\n dataset = dataset.repeat(None)\n dataset = dataset.batch(batch_size)\n return dataset\n\n return _input_fn\n\n\ndef get_serving_input_fn(input_dir):\n \"\"\"Creates and returns function serving unlabelled data for scoring.\n\n Args:\n input_dir: string, path to input data.\n\n Returns:\n Serving function.\n \"\"\"\n\n raw_metadata = metadata_io.read_metadata(\n posixpath.join(input_dir, constants.PATH_INPUT_SCHEMA))\n transform_fn_path = posixpath.join(\n input_dir, constants.PATH_INPUT_TRANSFORMATION, 'transform_fn')\n\n return input_fn_maker.build_default_transforming_serving_input_receiver_fn(\n raw_metadata=raw_metadata,\n transform_savedmodel_dir=transform_fn_path,\n exclude_raw_keys=[constants.LABEL_COLUMN],\n include_raw_keys=constants.FEATURE_COLUMNS + [constants.KEY_COLUMN])\n" ]
[ [ "tensorflow.data.TFRecordDataset", "tensorflow.parse_single_example", "tensorflow.FixedLenFeature", "tensorflow.train.match_filenames_once" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
YasmeenVH/growspace
[ "d153a22a4c7e94f36a265a268a5445c6d4d6d1b1" ]
[ "growspace/envs/growspace_multiplant.py" ]
[ "from enum import Enum\nfrom enum import IntEnum\nfrom random import sample\n\nimport cv2\nimport gym\nimport numpy as np\nfrom numpy.linalg import norm\nimport time\nfrom growspace.plants.tree import PixelBranch\nfrom numpy.linalg import norm\nfrom scipy.spatial import distance\nimport sys\nimport itertools\nfrom sklearn import preprocessing\nfrom numba import jit\nfrom functools import partial\nnp.set_printoptions(threshold=sys.maxsize)\n\nDEFAULT_RES = 84\nBRANCH_THICCNESS = .015\nBRANCH_LENGTH = (1/10)*DEFAULT_RES\nMAX_BRANCHING = 8\nLIGHT_WIDTH = .25\nLIGHT_DIF = 200\nLIGHT_DISPLACEMENT = .1\nLIGHT_W_INCREMENT = .1\nMIN_LIGHT_WIDTH = .1\nMAX_LIGHT_WIDTH = 1\nFIRST_BRANCH_HEIGHT = int(.2*DEFAULT_RES)\n\n\nclass Actions(Enum):\n move_left = 0\n move_right = 1\n increase_beam = 2\n decrease_beam = 3\n noop = 4\n\n\ndef to_int(v):\n return int(round(v))\n\ndef unpack(w):\n return map(list, zip(*enumerate(w)))\n\n\nir = to_int # shortcut for function call\n\nclass Features(IntEnum):\n light = 0\n scatter = 1\n\nclass GrowSpaceEnv_Fairness(gym.Env):\n\n def __init__(self, width=DEFAULT_RES, height=DEFAULT_RES, light_dif=LIGHT_DIF, obs_type = None, level=None, setting = 'hard_above'):\n self.width = width\n self.height = height\n self.seed()\n self.light_dif = light_dif\n self.obs_type = obs_type\n if self.obs_type == None:\n self.observation_space = gym.spaces.Box(\n 0, 255, shape=(84, 84, 3), dtype=np.uint8)\n if self.obs_type == 'Binary':\n self.observation_space = gym.spaces.Box(\n 0, 1, shape=(84, 84, 5), dtype=np.uint8)\n self.level = level\n self.setting = setting\n self.__initialized = False\n self.feature_maps = np.zeros((len(Features), self.height, self.width), dtype=np.uint8)\n\n\n def seed(self, seed=None):\n return [np.random.seed(seed)]\n\n def light_scatter(self):\n # select scattering with respect to position of the light\n filter_ = np.logical_and(self.feature_maps[Features.light], self.feature_maps[Features.scatter])\n return np.argwhere(filter_)\n\n\n def light_move_R(self):\n if np.around(self.x1_light + self.light_width,2) <= self.width - LIGHT_DISPLACEMENT*self.width: # limit of coordinates\n self.x1_light += LIGHT_DISPLACEMENT*self.width # stay put\n else:\n self.x1_light = self.width - self.light_width\n\n def light_move_L(self):\n if np.around(self.x1_light,2) >= LIGHT_DISPLACEMENT*self.width: # limit of coordinates\n self.x1_light -= LIGHT_DISPLACEMENT*self.width\n else:\n self.x1_light = 0 # move by .1 leftdd\n\n def light_decrease(self):\n if np.around(self.light_width,1) <= MIN_LIGHT_WIDTH*self.width:\n self.light_width = self.light_width\n else:\n self.light_width -= LIGHT_W_INCREMENT*self.width\n\n def light_increase(self):\n if self.light_width >= MAX_LIGHT_WIDTH*self.width:\n pass\n elif self.x1_light + self.light_width >= self.width:\n self.light_width = self.width-self.x1_light\n else:\n self.light_width += LIGHT_W_INCREMENT*self.width\n\n\n def tree_grow(self, activated_photons, mindist, maxdist): #\n\n # apply filter to both idx and branches\n\n branches_trimmed = self.branches\n branches_trimmed2 = self.branches2\n for i in range(len(activated_photons) - 1, 0, -1): # number of possible scatters, check if they allow for branching with min_dist\n closest_branch = 0\n dist = 1 * self.width\n\n if len(self.branches) > MAX_BRANCHING:\n branches_trimmed = sample(self.branches, MAX_BRANCHING)\n else:\n branches_trimmed = self.branches\n\n for branch in branches_trimmed:\n photon_ptx = np.flip(activated_photons[i]) # Flip for inverted coordinates\n tip_to_scatter = norm(photon_ptx - np.array(branch.tip_point))\n\n if tip_to_scatter < dist:\n dist = tip_to_scatter\n closest_branch = branch\n\n if dist < mindist:\n activated_photons = np.delete(activated_photons, i)\n\n # when distance is greater than max distance, branching occurs to find other points.\n elif dist < maxdist:\n\n closest_branch.grow_count += 1\n branch_length = BRANCH_LENGTH / dist\n photon = np.flip(activated_photons[i])\n g = (photon - closest_branch.tip_point) * branch_length\n closest_branch.grow_direction += np.round(g).astype(np.int)\n\n\n ## FOR SECOND TREE\n for i in range(len(activated_photons) - 1, 0, -1): # number of possible scatters, check if they allow for branching with min_dist\n closest_branch = 0\n dist = 1 * self.width\n\n if len(self.branches2) > MAX_BRANCHING:\n branches_trimmed2 = sample(self.branches2, MAX_BRANCHING)\n else:\n branches_trimmed2 = self.branches2\n\n for branch in branches_trimmed2:\n photon_ptx = np.flip(activated_photons[i]) # Flip for inverted coordinates\n tip_to_scatter = norm(photon_ptx - np.array(branch.tip_point))\n\n if tip_to_scatter < dist:\n dist = tip_to_scatter\n closest_branch2 = branch\n\n if dist < mindist:\n activated_photons = np.delete(activated_photons, i)\n\n # when distance is greater than max distance, branching occurs to find other points.\n elif dist < maxdist:\n\n closest_branch2.grow_count += 1\n branch_length = BRANCH_LENGTH / dist\n photon = np.flip(activated_photons[i])\n g = (photon - closest_branch2.tip_point) * branch_length\n closest_branch2.grow_direction += np.round(g).astype(np.int)\n\n\n for branch in branches_trimmed:\n if branch.grow_count > 0:\n (x2, y2) = branch.tip_point + branch.grow_direction / branch.grow_count\n x2 = np.clip(x2, 0, self.width-1)\n y2 = np.clip(y2 ,0, self.height -1)\n newBranch = PixelBranch(branch.x2, ir(x2), branch.y2, ir(y2), self.width, self.height)\n self.branches.append(newBranch)\n branch.child.append(newBranch)\n branch.grow_count = 0\n branch.grow_direction.fill(0)\n\n\n # increase thickness of first elements added to tree as they grow\n self.branches[0].update_width()\n branch_coords = []\n\n #sending coordinates out\n for branch in self.branches:\n # x2 and y2 since they are the tips\n branch_coords.append(branch.tip_point)\n\n\n self.tips = branch_coords\n\n for branch in branches_trimmed2:\n if branch.grow_count > 0:\n (x2, y2) = branch.tip_point + branch.grow_direction / branch.grow_count\n x2 = np.clip(x2, 0, self.width-1)\n y2 = np.clip(y2 ,0, self.height -1)\n newBranch2 = PixelBranch(branch.x2, ir(x2), branch.y2, ir(y2), self.width, self.height)\n self.branches2.append(newBranch2)\n branch.child.append(newBranch2)\n branch.grow_count = 0\n branch.grow_direction.fill(0)\n\n\n # increase thickness of first elements added to tree as they grow\n self.branches2[0].update_width()\n branch_coords2 = []\n\n #sending coordinates out\n for branch in self.branches2:\n # x2 and y2 since they are the tips\n branch_coords2.append(branch.tip_point)\n\n\n self.tips2 = branch_coords2\n\n\n return branch_coords, branch_coords2\n\n def distance_target(self, coords):\n # Calculate distance from each tip grown\n dist = distance.cdist(coords, [self.target],\n 'euclidean')\n\n # Get smallest distance to target\n min_dist = min(dist)\n #print(min_dist)\n\n return min_dist\n\n def get_observation(self, debug_show_scatter=False):\n # new empty image\n\n img = np.zeros((self.height, self.width, 3), dtype=np.uint8)\n\n if self.obs_type == 'Binary':\n\n # ---light beam --- #\n\n yellow = (0, 128, 128) # RGB color (dark yellow)\n x1 = ir(self.x1_light * self.width)\n x2 = ir(self.x2_light * self.width)\n cv2.rectangle(\n img, pt1=(x1, 0), pt2=(x2, self.height), color=yellow, thickness=-1)\n # print(img.shape)\n light_img = np.sum(img, axis=2)\n light = np.where(light_img <=128, light_img, 1)\n\n # ---tree--- #\n img1 = np.zeros((self.height, self.width, 3), dtype=np.uint8)\n for branch in self.branches:\n pt1, pt2 = branch.get_pt1_pt2()\n thiccness = ir(branch.width * BRANCH_THICCNESS * self.width)\n # print(\"branch width\", branch.width, \" BRANCH THICCNESS: \", BRANCH_THICCNESS, \" width: \", self.width)\n cv2.line(\n img1,\n pt1=pt1,\n pt2=pt2,\n color=(0, 255, 0),\n thickness=thiccness)\n tree_img = np.sum(img1, axis=2)\n tree = np.where(tree_img <= 255, tree_img, 1)\n\n # ---target--- #\n img2 = np.zeros((self.height, self.width, 3), dtype=np.uint8)\n x = ir(self.target[0] * self.width)\n y = ir(self.target[1] * self.height)\n cv2.circle(\n img,\n center=(x, y),\n radius=ir(.03 * self.width),\n color=(0, 0, 255),\n thickness=-1)\n\n target_img = np.sum(img2, axis=2)\n target = np.where(target_img <= 255, target_img, 1)\n\n final_img = np.dstack((light, tree, target))\n #print(\"dimensions of binary :\",final_img.shape)\n\n final_img = cv2.flip(final_img, 0)\n\n return final_img\n\n\n if self.obs_type == None:\n # place light as rectangle\n yellow = (0, 128, 128) # RGB color (dark yellow)\n img[self.feature_maps[Features.light].nonzero()] = yellow\n\n cv2.rectangle(\n img, pt1=(int(self.x1_light), 0), pt2=(int(self.x2_light),self.height),color=yellow,thickness=-1)\n\n\n if debug_show_scatter:\n xs, ys = self.light_scatter()\n for k in range(len(xs)):\n x = ir(xs[k] * self.width)\n y = ir(ys[k] * self.height)\n cv2.circle(\n img,\n center=(x, y),\n radius=2,\n color=(255, 0, 0),\n thickness=-1)\n\n # Draw plant as series of lines (1 branch = 1 line)\n for branch in self.branches:\n thiccness = ir(branch.width * BRANCH_THICCNESS * self.width)\n cv2.line(img, pt1=branch.p, pt2=branch.tip_point, color=(0, 255, 0), thickness=thiccness)\n\n ## draw second tree\n for branch in self.branches2:\n thiccness = ir(branch.width * BRANCH_THICCNESS * self.width)\n cv2.line(img, pt1=branch.p, pt2=branch.tip_point, color=(0, 255, 0), thickness=thiccness)\n\n\n # place goal as filled circle with center and radius\n # also important - place goal last because must be always visible\n x = ir(self.target[0]) #* self.width)\n y = ir(self.target[1])#* self.height)\n cv2.circle(\n img,\n center=(x, y),\n radius=ir(.03 * self.width),\n color=(0, 0, 255),\n thickness=-1)\n\n # flip image, because plant grows from the bottom, not the top\n img = cv2.flip(img, 0)\n\n return img\n\n def reset(self):\n self.light_width = ir(LIGHT_WIDTH*self.width)\n\n if self.setting == 'easy':\n random_start = ir(np.random.rand()*self.width)\n random_start2 = ir(random_start + self.light_width)\n self.target = [random_start+(self.light_width/2), 0.8*self.height]\n\n elif self.setting == 'hard_middle':\n random_start = ir(np.random.uniform(low=0.05, high=0.2)*self.width)\n random_start2 = ir(np.random.uniform(low=0.8, high=0.95)*self.width)\n\n self.target = [0.5*self.width, 0.8*self.height]\n\n elif self.setting == 'hard_above':\n coin_flip = np.random.randint(2, size=1)\n random_start = ir(np.random.uniform(low=0.05, high=0.2)*self.width)\n random_start2 = ir(np.random.uniform(low=0.8, high=0.95)*self.width)\n if coin_flip == 0:\n self.target = [random_start, 0.8*self.height]\n if coin_flip == 1:\n self.target = [random_start2, 0.8*self.height]\n\n else:\n random_start = ir(np.random.rand()*self.width)\n random_start2 = ir(np.random.rand()*self.width)\n self.target = [ir(np.random.rand()*self.width), 0.8*self.height] # [np.random.uniform(0, 1), .8]\n\n start_light = ir(np.random.rand()*self.width)\n if np.abs(random_start2-random_start) < self.light_width:\n random_start2 = random_start2 + self.light_width\n if random_start2 > 1*self.width:\n random_start2 = ir(0.99*self.width)\n\n self.branches = [\n PixelBranch(\n x=random_start,\n x2=random_start,\n y=0,\n y2=FIRST_BRANCH_HEIGHT,\n img_width=self.width,\n img_height=self.height)]\n\n self.branches2 = [\n PixelBranch(\n x=random_start2,\n x2=random_start2,\n y=0,\n y2=FIRST_BRANCH_HEIGHT,\n img_width=self.width,\n img_height=self.height)]\n\n if start_light > .87*self.width:\n self.x1_light = .75*self.width\n elif start_light < 0.13*self.width:\n self.x1_light = 0\n else:\n self.x1_light = start_light - (self.light_width / 2)\n\n self.x2_light = self.x1_light + self.light_width\n\n #self.x_scatter = np.random.uniform(0, 1, self.light_dif)\n y_scatter = np.random.randint(0, self.width, self.light_dif)\n x_scatter = np.random.randint(FIRST_BRANCH_HEIGHT, self.height, self.light_dif)\n #self.y_scatter = np.random.uniform(FIRST_BRANCH_HEIGHT, 1, self.light_dif)\n self.feature_maps[Features.scatter].fill(False)\n self.feature_maps[Features.scatter][x_scatter, y_scatter] = True\n self.steps = 0\n self.new_branches = 0\n self.tips_per_step = 0\n self.b1 = 0 #branches from plant 1\n self.b2 = 0 #branches from plant 2\n self.light_move = 0\n self.tips = [self.branches[0].tip_point]\n self.tips2 = [self.branches2[0].tip_point]\n self.draw_beam()\n #self.__initialized = True\n return self.get_observation()\n\n def step(self, action):\n\n if action == 0:\n self.light_move_L()\n\n if action == 1:\n self.light_move_R()\n\n if action == 2:\n self.light_increase()\n\n if action == 3:\n self.light_decrease()\n\n if action == 4:\n # then we keep the light in place\n pass\n\n self.x2_light = self.x1_light + self.light_width\n\n # filter scattering\n\n pts = self.light_scatter()\n # Branching step for light in this position\n tips = self.tree_grow(pts, .01*self.width, .15*self.height)\n self.draw_beam()\n\n # Calculate distance to target\n d1 = self.distance_target(tips[0])\n d2 = self.distance_target(tips[1])\n\n if d1 <= 3.2:\n r1 = 1/3.2\n\n else:\n r1 = 1/d1\n\n if d2 <= 3.2:\n r2 = 1/3.2\n else:\n r2 = 1/d2\n\n if r1 < r2:\n reward = r1\n elif r2 == r1:\n reward = r2\n else:\n reward = r2\n\n if reward == 1/3.2:\n success = 1\n else:\n success = 0\n\n\n # Render image of environment at current state\n observation = self.get_observation() #image\n\n plant = (observation[:, :, 1] / 255) # binary map of plant\n pixel_plant = np.sum(plant)\n\n done = False # because we don't have a terminal condition\n misc = {\"tips\": tips, \"target\": self.target, \"light\": self.x1_light, \"light_width\": self.light_width, \"step\": self.steps, \"success\": success}\n\n if self.steps == 0:\n self.new_branches = len(tips[0]) + len(tips[1])\n self.b1 = len(tips[0])\n self.b2 = len(tips[1])\n\n misc['new_branches'] = self.new_branches\n misc['new_b1'] = self.b1\n misc['new_b2'] = self.b2\n self.light_move = self.light_move\n\n else:\n new_branches = len(tips[0])+len(tips[1])-self.new_branches\n self.b1 = len(tips[0]) - self.b1\n self.b2 = len(tips[1]) - self.b2\n misc['new_b1'] = self.b1\n misc['new_b2'] = self.b2\n misc['new_branches'] = new_branches\n self.new_branches = len(tips[0]) + len(tips[1]) # reset for future step\n self.light_move = np.abs(self.light_move - self.x1_light)\n misc['light_move'] = self.light_move\n\n misc['img'] = observation\n misc['plant_pixel'] = pixel_plant\n\n self.steps += 1\n\n return observation, float(reward), done, misc\n\n def render(self, mode='human',\n debug_show_scatter=False): # or mode=\"rgb_array\"\n img = self.get_observation(debug_show_scatter)\n\n if mode == \"human\":\n cv2.imshow('plant', img) # create opencv window to show plant\n cv2.waitKey(1) # this is necessary or the window closes immediately\n else:\n return img\n\n def draw_beam(self):\n self.feature_maps[Features.light].fill(False)\n cv2.rectangle(\n self.feature_maps[Features.light], pt1=(int(self.x1_light), 0),\n pt2=(int(self.x2_light), self.height),\n color=True ,\n thickness=-1)\n\nif __name__ == '__main__':\n import time\n\n gse = GrowSpaceEnv_Fairness()\n\n def key2action(key):\n\n if key == ord('a'):\n return 0 # move left\n elif key == ord('d'):\n return 1 # move right\n elif key == ord('s'):\n return 4 # stay in place\n elif key == ord('w'):\n return 2\n elif key == ord('x'):\n return 3\n else:\n return None\n\n rewards_mean = []\n while True:\n gse.reset()\n img = gse.get_observation(debug_show_scatter=False)\n cv2.imshow(\"plant\", img)\n rewards = []\n for _ in range(50):\n action = key2action(cv2.waitKey(-1))\n if action is None:\n quit()\n\n b,t,c,f = gse.step(action)\n\n rewards.append(t)\n cv2.imshow(\"plant\", gse.get_observation(debug_show_scatter=False))\n total = sum(rewards)\n\n rewards_mean.append(total)\n av = np.mean(rewards_mean)\n print(\"amount of rewards:\", total)\n print('mean:', av)\n\n" ]
[ [ "numpy.around", "numpy.round", "numpy.mean", "numpy.where", "numpy.random.randint", "numpy.clip", "numpy.zeros", "scipy.spatial.distance.cdist", "numpy.delete", "numpy.random.rand", "numpy.array", "numpy.flip", "numpy.logical_and", "numpy.sum", "numpy.abs", "numpy.random.seed", "numpy.set_printoptions", "numpy.dstack", "numpy.argwhere", "numpy.random.uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
lacouth/scikit-learn
[ "d44c7ae8eb164ebf563f16301e67c519c54cb119", "d44c7ae8eb164ebf563f16301e67c519c54cb119" ]
[ "sklearn/impute/_knn.py", "sklearn/cluster/_affinity_propagation.py" ]
[ "# Authors: Ashim Bhattarai <[email protected]>\n# Thomas J Fan <[email protected]>\n# License: BSD 3 clause\n\nimport numpy as np\n\nfrom ._base import _BaseImputer\nfrom ..utils.validation import FLOAT_DTYPES\nfrom ..metrics import pairwise_distances_chunked\nfrom ..metrics.pairwise import _NAN_METRICS\nfrom ..neighbors._base import _get_weights\nfrom ..neighbors._base import _check_weights\nfrom ..utils import is_scalar_nan\nfrom ..utils._mask import _get_mask\nfrom ..utils.validation import check_is_fitted\n\n\nclass KNNImputer(_BaseImputer):\n \"\"\"Imputation for completing missing values using k-Nearest Neighbors.\n\n Each sample's missing values are imputed using the mean value from\n `n_neighbors` nearest neighbors found in the training set. Two samples are\n close if the features that neither is missing are close.\n\n Read more in the :ref:`User Guide <knnimpute>`.\n\n .. versionadded:: 0.22\n\n Parameters\n ----------\n missing_values : int, float, str, np.nan or None, default=np.nan\n The placeholder for the missing values. All occurrences of\n `missing_values` will be imputed. For pandas' dataframes with\n nullable integer dtypes with missing values, `missing_values`\n should be set to np.nan, since `pd.NA` will be converted to np.nan.\n\n n_neighbors : int, default=5\n Number of neighboring samples to use for imputation.\n\n weights : {'uniform', 'distance'} or callable, default='uniform'\n Weight function used in prediction. Possible values:\n\n - 'uniform' : uniform weights. All points in each neighborhood are\n weighted equally.\n - 'distance' : weight points by the inverse of their distance.\n in this case, closer neighbors of a query point will have a\n greater influence than neighbors which are further away.\n - callable : a user-defined function which accepts an\n array of distances, and returns an array of the same shape\n containing the weights.\n\n metric : {'nan_euclidean'} or callable, default='nan_euclidean'\n Distance metric for searching neighbors. Possible values:\n\n - 'nan_euclidean'\n - callable : a user-defined function which conforms to the definition\n of ``_pairwise_callable(X, Y, metric, **kwds)``. The function\n accepts two arrays, X and Y, and a `missing_values` keyword in\n `kwds` and returns a scalar distance value.\n\n copy : bool, default=True\n If True, a copy of X will be created. If False, imputation will\n be done in-place whenever possible.\n\n add_indicator : bool, default=False\n If True, a :class:`MissingIndicator` transform will stack onto the\n output of the imputer's transform. This allows a predictive estimator\n to account for missingness despite imputation. If a feature has no\n missing values at fit/train time, the feature won't appear on the\n missing indicator even if there are missing values at transform/test\n time.\n\n Attributes\n ----------\n indicator_ : :class:`~sklearn.impute.MissingIndicator`\n Indicator used to add binary indicators for missing values.\n ``None`` if add_indicator is False.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n References\n ----------\n * Olga Troyanskaya, Michael Cantor, Gavin Sherlock, Pat Brown, Trevor\n Hastie, Robert Tibshirani, David Botstein and Russ B. Altman, Missing\n value estimation methods for DNA microarrays, BIOINFORMATICS Vol. 17\n no. 6, 2001 Pages 520-525.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.impute import KNNImputer\n >>> X = [[1, 2, np.nan], [3, 4, 3], [np.nan, 6, 5], [8, 8, 7]]\n >>> imputer = KNNImputer(n_neighbors=2)\n >>> imputer.fit_transform(X)\n array([[1. , 2. , 4. ],\n [3. , 4. , 3. ],\n [5.5, 6. , 5. ],\n [8. , 8. , 7. ]])\n \"\"\"\n\n def __init__(\n self,\n *,\n missing_values=np.nan,\n n_neighbors=5,\n weights=\"uniform\",\n metric=\"nan_euclidean\",\n copy=True,\n add_indicator=False\n ):\n super().__init__(missing_values=missing_values, add_indicator=add_indicator)\n self.n_neighbors = n_neighbors\n self.weights = weights\n self.metric = metric\n self.copy = copy\n\n def _calc_impute(self, dist_pot_donors, n_neighbors, fit_X_col, mask_fit_X_col):\n \"\"\"Helper function to impute a single column.\n\n Parameters\n ----------\n dist_pot_donors : ndarray of shape (n_receivers, n_potential_donors)\n Distance matrix between the receivers and potential donors from\n training set. There must be at least one non-nan distance between\n a receiver and a potential donor.\n\n n_neighbors : int\n Number of neighbors to consider.\n\n fit_X_col : ndarray of shape (n_potential_donors,)\n Column of potential donors from training set.\n\n mask_fit_X_col : ndarray of shape (n_potential_donors,)\n Missing mask for fit_X_col.\n\n Returns\n -------\n imputed_values: ndarray of shape (n_receivers,)\n Imputed values for receiver.\n \"\"\"\n # Get donors\n donors_idx = np.argpartition(dist_pot_donors, n_neighbors - 1, axis=1)[\n :, :n_neighbors\n ]\n\n # Get weight matrix from from distance matrix\n donors_dist = dist_pot_donors[\n np.arange(donors_idx.shape[0])[:, None], donors_idx\n ]\n\n weight_matrix = _get_weights(donors_dist, self.weights)\n\n # fill nans with zeros\n if weight_matrix is not None:\n weight_matrix[np.isnan(weight_matrix)] = 0.0\n\n # Retrieve donor values and calculate kNN average\n donors = fit_X_col.take(donors_idx)\n donors_mask = mask_fit_X_col.take(donors_idx)\n donors = np.ma.array(donors, mask=donors_mask)\n\n return np.ma.average(donors, axis=1, weights=weight_matrix).data\n\n def fit(self, X, y=None):\n \"\"\"Fit the imputer on X.\n\n Parameters\n ----------\n X : array-like shape of (n_samples, n_features)\n Input data, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n Returns\n -------\n self : object\n \"\"\"\n # Check data integrity and calling arguments\n if not is_scalar_nan(self.missing_values):\n force_all_finite = True\n else:\n force_all_finite = \"allow-nan\"\n if self.metric not in _NAN_METRICS and not callable(self.metric):\n raise ValueError(\"The selected metric does not support NaN values\")\n if self.n_neighbors <= 0:\n raise ValueError(\n \"Expected n_neighbors > 0. Got {}\".format(self.n_neighbors)\n )\n\n X = self._validate_data(\n X,\n accept_sparse=False,\n dtype=FLOAT_DTYPES,\n force_all_finite=force_all_finite,\n copy=self.copy,\n )\n\n _check_weights(self.weights)\n self._fit_X = X\n self._mask_fit_X = _get_mask(self._fit_X, self.missing_values)\n\n super()._fit_indicator(self._mask_fit_X)\n\n return self\n\n def transform(self, X):\n \"\"\"Impute all missing values in X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The input data to complete.\n\n Returns\n -------\n X : array-like of shape (n_samples, n_output_features)\n The imputed dataset. `n_output_features` is the number of features\n that is not always missing during `fit`.\n \"\"\"\n\n check_is_fitted(self)\n if not is_scalar_nan(self.missing_values):\n force_all_finite = True\n else:\n force_all_finite = \"allow-nan\"\n X = self._validate_data(\n X,\n accept_sparse=False,\n dtype=FLOAT_DTYPES,\n force_all_finite=force_all_finite,\n copy=self.copy,\n reset=False,\n )\n\n mask = _get_mask(X, self.missing_values)\n mask_fit_X = self._mask_fit_X\n valid_mask = ~np.all(mask_fit_X, axis=0)\n\n X_indicator = super()._transform_indicator(mask)\n\n # Removes columns where the training data is all nan\n if not np.any(mask):\n # No missing values in X\n # Remove columns where the training data is all nan\n return X[:, valid_mask]\n\n row_missing_idx = np.flatnonzero(mask.any(axis=1))\n\n non_missing_fix_X = np.logical_not(mask_fit_X)\n\n # Maps from indices from X to indices in dist matrix\n dist_idx_map = np.zeros(X.shape[0], dtype=int)\n dist_idx_map[row_missing_idx] = np.arange(row_missing_idx.shape[0])\n\n def process_chunk(dist_chunk, start):\n row_missing_chunk = row_missing_idx[start : start + len(dist_chunk)]\n\n # Find and impute missing by column\n for col in range(X.shape[1]):\n if not valid_mask[col]:\n # column was all missing during training\n continue\n\n col_mask = mask[row_missing_chunk, col]\n if not np.any(col_mask):\n # column has no missing values\n continue\n\n (potential_donors_idx,) = np.nonzero(non_missing_fix_X[:, col])\n\n # receivers_idx are indices in X\n receivers_idx = row_missing_chunk[np.flatnonzero(col_mask)]\n\n # distances for samples that needed imputation for column\n dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][\n :, potential_donors_idx\n ]\n\n # receivers with all nan distances impute with mean\n all_nan_dist_mask = np.isnan(dist_subset).all(axis=1)\n all_nan_receivers_idx = receivers_idx[all_nan_dist_mask]\n\n if all_nan_receivers_idx.size:\n col_mean = np.ma.array(\n self._fit_X[:, col], mask=mask_fit_X[:, col]\n ).mean()\n X[all_nan_receivers_idx, col] = col_mean\n\n if len(all_nan_receivers_idx) == len(receivers_idx):\n # all receivers imputed with mean\n continue\n\n # receivers with at least one defined distance\n receivers_idx = receivers_idx[~all_nan_dist_mask]\n dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][\n :, potential_donors_idx\n ]\n\n n_neighbors = min(self.n_neighbors, len(potential_donors_idx))\n value = self._calc_impute(\n dist_subset,\n n_neighbors,\n self._fit_X[potential_donors_idx, col],\n mask_fit_X[potential_donors_idx, col],\n )\n X[receivers_idx, col] = value\n\n # process in fixed-memory chunks\n gen = pairwise_distances_chunked(\n X[row_missing_idx, :],\n self._fit_X,\n metric=self.metric,\n missing_values=self.missing_values,\n force_all_finite=force_all_finite,\n reduce_func=process_chunk,\n )\n for chunk in gen:\n # process_chunk modifies X in place. No return value.\n pass\n\n return super()._concatenate_indicator(X[:, valid_mask], X_indicator)\n", "\"\"\"Affinity Propagation clustering algorithm.\"\"\"\n\n# Author: Alexandre Gramfort [email protected]\n# Gael Varoquaux [email protected]\n\n# License: BSD 3 clause\n\nimport numpy as np\nimport warnings\n\nfrom ..exceptions import ConvergenceWarning\nfrom ..base import BaseEstimator, ClusterMixin\nfrom ..utils import as_float_array, check_random_state\nfrom ..utils.deprecation import deprecated\nfrom ..utils.validation import check_is_fitted\nfrom ..metrics import euclidean_distances\nfrom ..metrics import pairwise_distances_argmin\nfrom .._config import config_context\n\n\ndef _equal_similarities_and_preferences(S, preference):\n def all_equal_preferences():\n return np.all(preference == preference.flat[0])\n\n def all_equal_similarities():\n # Create mask to ignore diagonal of S\n mask = np.ones(S.shape, dtype=bool)\n np.fill_diagonal(mask, 0)\n\n return np.all(S[mask].flat == S[mask].flat[0])\n\n return all_equal_preferences() and all_equal_similarities()\n\n\ndef affinity_propagation(\n S,\n *,\n preference=None,\n convergence_iter=15,\n max_iter=200,\n damping=0.5,\n copy=True,\n verbose=False,\n return_n_iter=False,\n random_state=None\n):\n \"\"\"Perform Affinity Propagation Clustering of data.\n\n Read more in the :ref:`User Guide <affinity_propagation>`.\n\n Parameters\n ----------\n\n S : array-like of shape (n_samples, n_samples)\n Matrix of similarities between points.\n\n preference : array-like of shape (n_samples,) or float, default=None\n Preferences for each point - points with larger values of\n preferences are more likely to be chosen as exemplars. The number of\n exemplars, i.e. of clusters, is influenced by the input preferences\n value. If the preferences are not passed as arguments, they will be\n set to the median of the input similarities (resulting in a moderate\n number of clusters). For a smaller amount of clusters, this can be set\n to the minimum value of the similarities.\n\n convergence_iter : int, default=15\n Number of iterations with no change in the number\n of estimated clusters that stops the convergence.\n\n max_iter : int, default=200\n Maximum number of iterations\n\n damping : float, default=0.5\n Damping factor between 0.5 and 1.\n\n copy : bool, default=True\n If copy is False, the affinity matrix is modified inplace by the\n algorithm, for memory efficiency.\n\n verbose : bool, default=False\n The verbosity level.\n\n return_n_iter : bool, default=False\n Whether or not to return the number of iterations.\n\n random_state : int, RandomState instance or None, default=None\n Pseudo-random number generator to control the starting state.\n Use an int for reproducible results across function calls.\n See the :term:`Glossary <random_state>`.\n\n .. versionadded:: 0.23\n this parameter was previously hardcoded as 0.\n\n Returns\n -------\n\n cluster_centers_indices : ndarray of shape (n_clusters,)\n Index of clusters centers.\n\n labels : ndarray of shape (n_samples,)\n Cluster labels for each point.\n\n n_iter : int\n Number of iterations run. Returned only if `return_n_iter` is\n set to True.\n\n Notes\n -----\n For an example, see :ref:`examples/cluster/plot_affinity_propagation.py\n <sphx_glr_auto_examples_cluster_plot_affinity_propagation.py>`.\n\n When the algorithm does not converge, it returns an empty array as\n ``cluster_center_indices`` and ``-1`` as label for each training sample.\n\n When all training samples have equal similarities and equal preferences,\n the assignment of cluster centers and labels depends on the preference.\n If the preference is smaller than the similarities, a single cluster center\n and label ``0`` for every sample will be returned. Otherwise, every\n training sample becomes its own cluster center and is assigned a unique\n label.\n\n References\n ----------\n Brendan J. Frey and Delbert Dueck, \"Clustering by Passing Messages\n Between Data Points\", Science Feb. 2007\n \"\"\"\n S = as_float_array(S, copy=copy)\n n_samples = S.shape[0]\n\n if S.shape[0] != S.shape[1]:\n raise ValueError(\"S must be a square array (shape=%s)\" % repr(S.shape))\n\n if preference is None:\n preference = np.median(S)\n if damping < 0.5 or damping >= 1:\n raise ValueError(\"damping must be >= 0.5 and < 1\")\n\n preference = np.array(preference)\n\n if n_samples == 1 or _equal_similarities_and_preferences(S, preference):\n # It makes no sense to run the algorithm in this case, so return 1 or\n # n_samples clusters, depending on preferences\n warnings.warn(\n \"All samples have mutually equal similarities. \"\n \"Returning arbitrary cluster center(s).\"\n )\n if preference.flat[0] >= S.flat[n_samples - 1]:\n return (\n (np.arange(n_samples), np.arange(n_samples), 0)\n if return_n_iter\n else (np.arange(n_samples), np.arange(n_samples))\n )\n else:\n return (\n (np.array([0]), np.array([0] * n_samples), 0)\n if return_n_iter\n else (np.array([0]), np.array([0] * n_samples))\n )\n\n random_state = check_random_state(random_state)\n\n # Place preference on the diagonal of S\n S.flat[:: (n_samples + 1)] = preference\n\n A = np.zeros((n_samples, n_samples))\n R = np.zeros((n_samples, n_samples)) # Initialize messages\n # Intermediate results\n tmp = np.zeros((n_samples, n_samples))\n\n # Remove degeneracies\n S += (\n np.finfo(S.dtype).eps * S + np.finfo(S.dtype).tiny * 100\n ) * random_state.randn(n_samples, n_samples)\n\n # Execute parallel affinity propagation updates\n e = np.zeros((n_samples, convergence_iter))\n\n ind = np.arange(n_samples)\n\n for it in range(max_iter):\n # tmp = A + S; compute responsibilities\n np.add(A, S, tmp)\n I = np.argmax(tmp, axis=1)\n Y = tmp[ind, I] # np.max(A + S, axis=1)\n tmp[ind, I] = -np.inf\n Y2 = np.max(tmp, axis=1)\n\n # tmp = Rnew\n np.subtract(S, Y[:, None], tmp)\n tmp[ind, I] = S[ind, I] - Y2\n\n # Damping\n tmp *= 1 - damping\n R *= damping\n R += tmp\n\n # tmp = Rp; compute availabilities\n np.maximum(R, 0, tmp)\n tmp.flat[:: n_samples + 1] = R.flat[:: n_samples + 1]\n\n # tmp = -Anew\n tmp -= np.sum(tmp, axis=0)\n dA = np.diag(tmp).copy()\n tmp.clip(0, np.inf, tmp)\n tmp.flat[:: n_samples + 1] = dA\n\n # Damping\n tmp *= 1 - damping\n A *= damping\n A -= tmp\n\n # Check for convergence\n E = (np.diag(A) + np.diag(R)) > 0\n e[:, it % convergence_iter] = E\n K = np.sum(E, axis=0)\n\n if it >= convergence_iter:\n se = np.sum(e, axis=1)\n unconverged = np.sum((se == convergence_iter) + (se == 0)) != n_samples\n if (not unconverged and (K > 0)) or (it == max_iter):\n never_converged = False\n if verbose:\n print(\"Converged after %d iterations.\" % it)\n break\n else:\n never_converged = True\n if verbose:\n print(\"Did not converge\")\n\n I = np.flatnonzero(E)\n K = I.size # Identify exemplars\n\n if K > 0 and not never_converged:\n c = np.argmax(S[:, I], axis=1)\n c[I] = np.arange(K) # Identify clusters\n # Refine the final set of exemplars and clusters and return results\n for k in range(K):\n ii = np.where(c == k)[0]\n j = np.argmax(np.sum(S[ii[:, np.newaxis], ii], axis=0))\n I[k] = ii[j]\n\n c = np.argmax(S[:, I], axis=1)\n c[I] = np.arange(K)\n labels = I[c]\n # Reduce labels to a sorted, gapless, list\n cluster_centers_indices = np.unique(labels)\n labels = np.searchsorted(cluster_centers_indices, labels)\n else:\n warnings.warn(\n \"Affinity propagation did not converge, this model \"\n \"will not have any cluster centers.\",\n ConvergenceWarning,\n )\n labels = np.array([-1] * n_samples)\n cluster_centers_indices = []\n\n if return_n_iter:\n return cluster_centers_indices, labels, it + 1\n else:\n return cluster_centers_indices, labels\n\n\n###############################################################################\n\n\nclass AffinityPropagation(ClusterMixin, BaseEstimator):\n \"\"\"Perform Affinity Propagation Clustering of data.\n\n Read more in the :ref:`User Guide <affinity_propagation>`.\n\n Parameters\n ----------\n damping : float, default=0.5\n Damping factor (between 0.5 and 1) is the extent to\n which the current value is maintained relative to\n incoming values (weighted 1 - damping). This in order\n to avoid numerical oscillations when updating these\n values (messages).\n\n max_iter : int, default=200\n Maximum number of iterations.\n\n convergence_iter : int, default=15\n Number of iterations with no change in the number\n of estimated clusters that stops the convergence.\n\n copy : bool, default=True\n Make a copy of input data.\n\n preference : array-like of shape (n_samples,) or float, default=None\n Preferences for each point - points with larger values of\n preferences are more likely to be chosen as exemplars. The number\n of exemplars, ie of clusters, is influenced by the input\n preferences value. If the preferences are not passed as arguments,\n they will be set to the median of the input similarities.\n\n affinity : {'euclidean', 'precomputed'}, default='euclidean'\n Which affinity to use. At the moment 'precomputed' and\n ``euclidean`` are supported. 'euclidean' uses the\n negative squared euclidean distance between points.\n\n verbose : bool, default=False\n Whether to be verbose.\n\n random_state : int, RandomState instance or None, default=None\n Pseudo-random number generator to control the starting state.\n Use an int for reproducible results across function calls.\n See the :term:`Glossary <random_state>`.\n\n .. versionadded:: 0.23\n this parameter was previously hardcoded as 0.\n\n Attributes\n ----------\n cluster_centers_indices_ : ndarray of shape (n_clusters,)\n Indices of cluster centers.\n\n cluster_centers_ : ndarray of shape (n_clusters, n_features)\n Cluster centers (if affinity != ``precomputed``).\n\n labels_ : ndarray of shape (n_samples,)\n Labels of each point.\n\n affinity_matrix_ : ndarray of shape (n_samples, n_samples)\n Stores the affinity matrix used in ``fit``.\n\n n_iter_ : int\n Number of iterations taken to converge.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n Notes\n -----\n For an example, see :ref:`examples/cluster/plot_affinity_propagation.py\n <sphx_glr_auto_examples_cluster_plot_affinity_propagation.py>`.\n\n The algorithmic complexity of affinity propagation is quadratic\n in the number of points.\n\n When ``fit`` does not converge, ``cluster_centers_`` becomes an empty\n array and all training samples will be labelled as ``-1``. In addition,\n ``predict`` will then label every sample as ``-1``.\n\n When all training samples have equal similarities and equal preferences,\n the assignment of cluster centers and labels depends on the preference.\n If the preference is smaller than the similarities, ``fit`` will result in\n a single cluster center and label ``0`` for every sample. Otherwise, every\n training sample becomes its own cluster center and is assigned a unique\n label.\n\n References\n ----------\n\n Brendan J. Frey and Delbert Dueck, \"Clustering by Passing Messages\n Between Data Points\", Science Feb. 2007\n\n Examples\n --------\n >>> from sklearn.cluster import AffinityPropagation\n >>> import numpy as np\n >>> X = np.array([[1, 2], [1, 4], [1, 0],\n ... [4, 2], [4, 4], [4, 0]])\n >>> clustering = AffinityPropagation(random_state=5).fit(X)\n >>> clustering\n AffinityPropagation(random_state=5)\n >>> clustering.labels_\n array([0, 0, 0, 1, 1, 1])\n >>> clustering.predict([[0, 0], [4, 4]])\n array([0, 1])\n >>> clustering.cluster_centers_\n array([[1, 2],\n [4, 2]])\n \"\"\"\n\n def __init__(\n self,\n *,\n damping=0.5,\n max_iter=200,\n convergence_iter=15,\n copy=True,\n preference=None,\n affinity=\"euclidean\",\n verbose=False,\n random_state=None\n ):\n\n self.damping = damping\n self.max_iter = max_iter\n self.convergence_iter = convergence_iter\n self.copy = copy\n self.verbose = verbose\n self.preference = preference\n self.affinity = affinity\n self.random_state = random_state\n\n # TODO: Remove in 1.1\n # mypy error: Decorated property not supported\n @deprecated( # type: ignore\n \"Attribute _pairwise was deprecated in \"\n \"version 0.24 and will be removed in 1.1 (renaming of 0.26).\"\n )\n @property\n def _pairwise(self):\n return self.affinity == \"precomputed\"\n\n def _more_tags(self):\n return {\"pairwise\": self.affinity == \"precomputed\"}\n\n def fit(self, X, y=None):\n \"\"\"Fit the clustering from features, or affinity matrix.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features), or \\\n array-like of shape (n_samples, n_samples)\n Training instances to cluster, or similarities / affinities between\n instances if ``affinity='precomputed'``. If a sparse feature matrix\n is provided, it will be converted into a sparse ``csr_matrix``.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self\n\n \"\"\"\n if self.affinity == \"precomputed\":\n accept_sparse = False\n else:\n accept_sparse = \"csr\"\n X = self._validate_data(X, accept_sparse=accept_sparse)\n if self.affinity == \"precomputed\":\n self.affinity_matrix_ = X\n elif self.affinity == \"euclidean\":\n self.affinity_matrix_ = -euclidean_distances(X, squared=True)\n else:\n raise ValueError(\n \"Affinity must be 'precomputed' or \"\n \"'euclidean'. Got %s instead\" % str(self.affinity)\n )\n\n (\n self.cluster_centers_indices_,\n self.labels_,\n self.n_iter_,\n ) = affinity_propagation(\n self.affinity_matrix_,\n preference=self.preference,\n max_iter=self.max_iter,\n convergence_iter=self.convergence_iter,\n damping=self.damping,\n copy=self.copy,\n verbose=self.verbose,\n return_n_iter=True,\n random_state=self.random_state,\n )\n\n if self.affinity != \"precomputed\":\n self.cluster_centers_ = X[self.cluster_centers_indices_].copy()\n\n return self\n\n def predict(self, X):\n \"\"\"Predict the closest cluster each sample in X belongs to.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n New data to predict. If a sparse matrix is provided, it will be\n converted into a sparse ``csr_matrix``.\n\n Returns\n -------\n labels : ndarray of shape (n_samples,)\n Cluster labels.\n \"\"\"\n check_is_fitted(self)\n X = self._validate_data(X, reset=False, accept_sparse=\"csr\")\n if not hasattr(self, \"cluster_centers_\"):\n raise ValueError(\n \"Predict method is not supported when \" \"affinity='precomputed'.\"\n )\n\n if self.cluster_centers_.shape[0] > 0:\n with config_context(assume_finite=True):\n return pairwise_distances_argmin(X, self.cluster_centers_)\n else:\n warnings.warn(\n \"This model does not have any cluster centers \"\n \"because affinity propagation did not converge. \"\n \"Labeling every sample as '-1'.\",\n ConvergenceWarning,\n )\n return np.array([-1] * X.shape[0])\n\n def fit_predict(self, X, y=None):\n \"\"\"Fit the clustering from features or affinity matrix, and return\n cluster labels.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features), or \\\n array-like of shape (n_samples, n_samples)\n Training instances to cluster, or similarities / affinities between\n instances if ``affinity='precomputed'``. If a sparse feature matrix\n is provided, it will be converted into a sparse ``csr_matrix``.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n labels : ndarray of shape (n_samples,)\n Cluster labels.\n \"\"\"\n return super().fit_predict(X, y)\n" ]
[ [ "numpy.logical_not", "numpy.nonzero", "numpy.isnan", "numpy.arange", "numpy.flatnonzero", "numpy.all", "numpy.argpartition", "numpy.any", "numpy.ma.array", "numpy.zeros", "numpy.ma.average" ], [ "numpy.diag", "numpy.all", "numpy.max", "numpy.fill_diagonal", "numpy.searchsorted", "numpy.where", "numpy.unique", "numpy.arange", "numpy.subtract", "numpy.flatnonzero", "numpy.finfo", "numpy.argmax", "numpy.zeros", "numpy.median", "numpy.array", "numpy.sum", "numpy.maximum", "numpy.ones", "numpy.add" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.13", "1.16", "1.9", "1.18", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SebastianMacaluso/ReclusterTreeAlgorithms
[ "d868706dcfe592bf327d2b250ca638813dca0cdc", "d868706dcfe592bf327d2b250ca638813dca0cdc" ]
[ "src/StandardHC/N2Greedy_invM.py", "src/StandardHC/jetClustering.py" ]
[ "import sys\nimport numpy as np\nimport logging\nimport pickle\nimport itertools\nimport time\nimport copy\n\nfrom . import likelihood_invM as likelihood\nfrom . import auxFunctions_invM as auxFunctions\n\nfrom .utils import get_logger\n\nlogger = get_logger(level=logging.INFO)\n\n\n\"\"\"\nThis is an O(N^2) algorithm for a greedy clustering of nodes into a tree, based on the maximum likelihood. The algorithm also builds a dictionary with features needed to traverse, access nodes info and visualize the clustered trees.\n\"\"\"\n\n\ndef recluster(\n\t\tinput_jet,\n\t\tsave = False,\n\t\tdelta_min = None,\n\t\tlam = None,\n\t\tvisualize = False,\n):\n\t\"\"\"\n\tGet the leaves of an input jet,\n\trecluster them following the beam search algorithm determined by the beam size,\n\tcreate the new tree for the chosen algorithm,\n\tmake a jet dictionary list with all with the jets that give the best log likelihood up to N_best and save it.\n\n\tCreate a dictionary with all the jet tree info\n\t\t- jet[\"root_id\"]: root node id of the tree\n\t\t- jet[\"content\"]: list with the tree nodes (particles) momentum vectors. For the ToyJetsShower we consider a 2D model,\n\t\tso we have (py,pz), with pz the direction of the beam axis\n\t\t- jet[\"tree\"]: list with the tree structure. Each entry contains a list with the [left,right] children of a node.\n\t\tIf [-1,-1] then the node is a leaf.\n\n\tNew features added to the tree:\n\t\t- jet[\"tree_ancestors\"]: List with one entry for each leaf of the tree, where each entry lists all the ancestor node ids\n\t\twhen traversing the tree from the root to the leaf node.\n\t\t- jet[\"linkage_list\"]: linkage list to build heat clustermap visualizations.\n\t\t- jet[\"Nconst\"]: Number of leaves of the tree.\n\t\t- jet[\"algorithm\"]: Algorithm to generate the tree structure, e.g. truth, kt, antikt, CA, likelihood.\n\n\tArgs:\n\t\t- input_jet: any jet dictionary with the clustering history.\n\n\t\t- delta_min: pT cut scale for the showering process to stop.\n\n\t\t- lam: decaying rate value for the exponential distribution.\n\n\t\t- save: if true, save the reclustered jet dictionary list\n\n\t\t- visualize: if true, calculate extra features needed for the visualizations and add them to the tree dictionary.\n\n\tReturns:\n\t\t- jet dictionary\n\t\"\"\"\n\n\n\tdef _rec(jet, parent, node_id, outers_list):\n\t\t\"\"\"\n Recursive function to get a list of the tree leaves\n \"\"\"\n\t\tif jet[\"tree\"][node_id, 0] == -1:\n\n\t\t\touters_list.append(jet[\"content\"][node_id])\n\n\t\telse:\n\t\t\t_rec(\n\t\t jet,\n\t\t node_id,\n\t\t jet[\"tree\"][node_id, 0],\n\t\t outers_list,\n\t\t )\n\n\t\t\t_rec(\n\t\t jet,\n\t\t node_id,\n\t\t jet[\"tree\"][node_id, 1],\n\t\t outers_list,\n\t\t )\n\n\t\treturn outers_list\n\n\n\touters = []\n\n\t# Get constituents list (leaves)\n\tjet_const =_rec(\n\tinput_jet,\n\t-1,\n\tinput_jet[\"root_id\"],\n\touters,\n\t)\n\n\n\tstart_time = time.time()\n\n\t# Run clustering algorithm\n\ttree, \\\n\tidx, \\\n\tjetContent, \\\n\troot_node, \\\n\tNconst, \\\n\tN_leaves_list, \\\n\tlinkage_list,\\\n\tlogLH = greedyLH(\n\t\tjet_const,\n\t\tdelta_min = delta_min,\n\t\tlam = lam,\n\t\tlamRoot = float(input_jet[\"LambdaRoot\"]),\n\t)\n\n\tjet = {}\n\n\t\"\"\" Extra features needed for visualizations \"\"\"\n\tif visualize:\n\t\ttree,\\\n\t\tjetContent,\\\n\t\tnode_id,\\\n\t\ttree_ancestors = _traverse(\n\t\t\troot_node,\n\t\t jetContent,\n\t\t jetTree = tree,\n\t\t Nleaves = Nconst,\n\t\t)\n\n\t\tjet[\"node_id\"]=node_id\n\t\tjet[\"tree_ancestors\"]=tree_ancestors\n\t\troot_node = 0\n\n\n\tjet[\"root_id\"] = root_node\n\tjet[\"tree\"] = np.asarray(tree).reshape(-1, 2)\n\tjet[\"content\"] = np.asarray(jetContent).reshape(-1, 4)\n\tjet[\"linkage_list\"]=linkage_list\n\tjet[\"Nconst\"]=Nconst\n\tjet[\"algorithm\"]= \"greedyLH\"\n\tjet[\"pt_cut\"] = delta_min\n\tjet[\"Lambda\"] = lam\n\tjet[\"LambdaRoot\"] = float(input_jet[\"LambdaRoot\"])\n\tjet[\"M_Hard\"] = float(input_jet[\"M_Hard\"])\n\tjet[\"logLH\"] = np.asarray(logLH)\n\n\n\t\"\"\" Fill deltas list (needed to fill the jet log LH)\"\"\"\n\tjet = likelihood.fill_jet_info(jet, parent_id=None)\n\n\t\"\"\"Fill jet dictionaries with log likelihood of truth jet\"\"\"\n\tjet = likelihood.enrich_jet_logLH(jet, dij=True)\n\n\t\"\"\" Angular quantities\"\"\"\n\tConstPhi, PhiDelta, PhiDeltaListRel = auxFunctions.traversePhi(jet, jet[\"root_id\"], [], [], [])\n\tjet[\"ConstPhi\"] = ConstPhi\n\tjet[\"PhiDelta\"] = PhiDelta\n\tjet[\"PhiDeltaRel\"] = PhiDeltaListRel\n\n\tlogger.debug(f\" Recluster and build tree algorithm total time = {time.time() - start_time}\")\n\n\n\t\"\"\" Save reclustered tree \"\"\"\n\tif save:\n\t\tout_dir = \"data/\"\n\t\tlogger.info(f\"input_jet[name]= {input_jet['name']}\")\n\n\t\talgo = str(input_jet[\"name\"]) + '_' + str(alpha)\n\t\tout_filename = out_dir + str(algo) + '.pkl'\n\t\tlogger.info(f\"Output jet filename = {out_filename}\")\n\t\twith open(out_filename, \"wb\") as f:\n\t\t\tpickle.dump(jet, f, protocol=2)\n\n\n\treturn jet\n\n\n\n\n\n\n\ndef getConstituents(jet, node_id, outers_list):\n\t\"\"\"\n\tRecursive function to get a list of the tree leaves\n\t\"\"\"\n\tif jet[\"tree\"][node_id, 0] == -1:\n\n\t\touters_list.append(jet[\"content\"][node_id])\n\n\telse:\n\t\tgetConstituents(\n\t jet,\n\t jet[\"tree\"][node_id, 0],\n\t outers_list,\n\t )\n\n\t\tgetConstituents(\n\t jet,\n\t jet[\"tree\"][node_id, 1],\n\t outers_list,\n\t )\n\n\treturn outers_list\n\n\ndef greedyLH(levelContent, delta_min= None, lam=None, lamRoot = None):\n\t\"\"\"\n\tRuns the logLHMaxLevel function level by level starting from the list of constituents (leaves) until we reach the root of the tree.\n\n\tNote: levelContent is a list of the nodes after deleting the constituents that are merged and adding the new node in each level.\n\t So this should only have the root of the tree at the end.\n\n\tArgs:\n\t\t- levelContent: jet constituents (i.e. the leaves of the tree)\n\t\t- delta_min: pT cut scale for the showering process to stop.\n\t\t- lam: decaying rate value for the exponential distribution.\n\n\n\tReturns:\n\n\t - jetTree: dictionary that has the node id of a parent as a key and a list with the id of the 2 children as the values\n\t - idx: array that stores the node id\n\t (the node id determines the location of the momentum vector of a node in the jetContent array)\n\t of the nodes that are in the current levelContent array. It has the same elements as the content_level (they get updated\n\t level by level).\n\t - jetContent: array with the momentum of all the nodes of the jet tree (both leaves and inners).\n\t - root_node: root node id\n\t - Nconst: Number of leaves of the jet\n\t - N_leaves_list: List that given a node idx, stores for that idx, the number of leaves for the branch below that node.\n\t - linkage_list: linkage list to build heat clustermap visualizations.\n\t - logLH: list with the log likelihood of each pairing.\n\t\"\"\"\n\n\tNconst = len(levelContent)\n\n\tjetTree = [[-1,-1]]*Nconst\n\tidx = [i for i in range(Nconst)]\n\tjetContent = copy.deepcopy(levelContent)\n\troot_node = 2 * Nconst - 2\n\tN_leaves_list = [1.] * Nconst\n\tlinkage_list = []\n\tlogLH=[]\n\tlevelDeltas = [0.] * Nconst\n\n\n\t\"\"\" Calculate the nearest neighbor (NN) based on max log likelihood (logLH) for each leaf of the tree.\"\"\"\n\tNNpairs = NNeighbors(\n\t\t\tlevelContent,\n\t\t\tlevelDeltas,\n\t\t\tNconst = Nconst,\n\t\t\tdelta_min = delta_min,\n\t\t\tlam = lam,\n\t)\n\n\n\t\"\"\" Cluster constituents. This is O(N) at each level x N levels => O(N^2) \"\"\"\n\tfor j in range(Nconst - 1):\n\n\t\tlogger.debug(f\"===============================================\")\n\t\tlogger.debug(f\" LEVEL = {j}\")\n\n\t\t\"\"\"Heavy resonance is modeled by a different decaying rate\"\"\"\n\t\tif j==Nconst-2:\n\t\t\tlam =lamRoot\n\n\n\t\tlogLHMaxLevel(\n\t\t\tNNpairs,\n\t\t\tlevelContent,\n\t\t\tlevelDeltas,\n\t\t\tlogLH,\n\t\t\tjetTree,\n\t\t\tjetContent,\n\t\t\tidx,\n\t\t\tNparent = Nconst + j,\n\t\t\tN_leaves_list = N_leaves_list,\n\t\t\tlinkage_list = linkage_list,\n\t\t\tdelta_min = delta_min,\n\t\t\tlam = lam,\n\t\t)\n\n\n\treturn jetTree, idx, jetContent, root_node, Nconst, N_leaves_list, linkage_list, logLH\n\n\n\n\n\n\n\ndef NNeighbors(\n levelContent,\n\tlevelDeltas,\n Nconst=None,\n\tdelta_min = None,\n\tlam = None\n):\n\t\"\"\"\n\t-For each leaf i of the tree, calculate its nearest neighbor (NN) j and the log likelihood for that pairing. This is O(N^2)\n\tFormat: NNpairs = [ (logLH,[leaf i,leaf j]) for i in leaves]\n\n\tCalculate the log likelihood between all possible pairings of the leaves at a certain level and get the maximum. Each entry in NN[airs corresponds to a node and contains that node and its nearest neighbor node. Format: [(logLH,[node,node NN])..]\n\tFor efficiency, we only loop over the nodes to the left of each node. So, if the max logLH pairing is with a node to the right, that will be considered when the \"neighbor node\" becomes the \"node\".\n\tWe also have a list that keeps track of the index of each node (in the \"idx\" list), thus we want 1 pairing per node. For the 1st node, we add an entry (-np.inf, [0, -999])] so that this will have the lowest priority. All the pairings of the 1st node are considered by the nodes to the right.\n\n\tArgs:\n\t - levelContent: array with the constituents momentum list for the current level (i.e. after deleting the constituents that are merged and\n\t adding the new node from merging them in all previous levels)\n\t - levelDeltas: array with the delta values (for the splitting of a node in the Toy Jets Shower Model) for the current level.\n\t - Nconst: Number of leaves\n\t - delta_min: pT cut scale for the showering process to stop.\n\t\t- lam: decaying rate value for the exponential distribution.\n\n\tReturns:\n\t\t- NNpairs\n\n\t\"\"\"\n\n\tNNpairs = [(-np.inf, [0, -999])] + \\\n\t [\n\t\t max(\n\t\t\t [\n\t\t\t\t (\n\t\t\t\t\t likelihood.split_logLH(\n\t\t\t\t\t\t levelContent[k],\n\t\t\t\t\t\t levelDeltas[k],\n\t\t\t\t\t\t levelContent[k - j],\n\t\t\t\t\t\t levelDeltas[k - j],\n\t\t\t\t\t\t delta_min,\n\t\t\t\t\t\t lam,\n\t\t\t\t\t ),\n\t\t\t\t\t [k, k - j]\n\t\t\t\t )\n\t\t\t\t for j in range(k, 0, -1)\n\t\t\t ], key=lambda x: x[0]\n\t\t )\n\t\t for k in range(1, Nconst, 1)\n\t ]\n\n\n\treturn NNpairs\n\n\n\n\n\n\n\ndef logLHMaxLevel(\n\tNNpairs,\n levelContent,\n\tlevelDeltas,\n logLH,\n jetTree,\n jetContent,\n idx,\n Nparent=None,\n N_leaves_list=None,\n\tlinkage_list=None,\n\tdelta_min = None,\n\tlam = None,\n):\n\t\"\"\"\n\t- Update the jet dictionary information by deleting the nodes that are merged and adding the new node at each level.\n\t- Update the nearest neighbors (NN) list of the nodes whose NN was deleted.\n\n\tArgs:\n\n\t\t- levelContent: array with the constituents momentum list for the current level (i.e. after deleting the constituents that are merged and\n\t adding the new node from merging them in all previous levels)\n\t - levelDeltas: array with the delta values (for the splitting of a node in the Toy Jets Shower Model) for the current level.\n\t - logLH: list with all the previous max log likelihood pairings.\n\t - jetTree: jet tree structure list\n\t - jetContent: array with the momentum of all the nodes of the jet tree (both leaves and inners) after adding one\n\t more level in the clustering.\n\t (We add a new node each time we cluster 2 pseudojets)\n\t - idx: array that stores the node id (the node id determines the location of the momentum of a node in the jetContent array)\n\t of the nodes that are in the current levelContent array. It has the same number of elements as levelContent (they get updated\n\t level by level).\n\t - Nparent: index of each parent added to the tree.\n\t\t- N_leaves_list: List that given a node idx, stores for that idx, the number of leaves for the branch below that node. It is initialized only with the tree leaves\n\t - linkage_list: linkage list to build heat clustermap visualizations.\n\t [SciPy linkage list website](https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html)\n\t Linkage list format: A (n - 1) by 4 matrix Z is returned. At the i-th iteration, clusters with indices Z[i, 0] and Z[i, 1] are combined to form cluster (n + 1) . A cluster with an index less than n corresponds to one of the n original observations. The distance between clusters Z[i, 0] and Z[i, 1] is given by Z[i, 2]. The fourth value Z[i, 3] represents the number of original observations in the newly formed cluster.\n\t - delta_min: pT cut scale for the showering process to stop.\n\t\t- lam: decaying rate value for the exponential distribution.\n\n\t\"\"\"\n\n\n\t\"\"\" \n\tright: Index of the pair that gives the max logLH, also the index of the right node to be removed from the idx list.\n\tWe do not consider the 1st entry to find the max log LH pairing, because all the pairings of that node are considered by the nodes to the right. This way we do not need to update the 1st node NN when the NN is clustered with some other node. \n\tThis avoids getting an error. Reason: when Delta_Parent<Delta_cut that pairing is not viable within our model, thus we give it a logLH = - Infinity. However, we also had set to (- infinity) the node with idx=0 and the 1st node of NNpairs at each level (only when we had to update its NN).\n\tThus, (especially toward the top of the tree), it can happen that all the options have logLH= - Infinity, choosing the 1st entry on the NNpairs list. The problem is that this entry sometimes has as the NN a node that is missing (was already clustered) because, as mentioned, we do not update that node NN.\n\t\"\"\"\n\n\tlogger.debug(f\" maxPairLogLH, maxPairIdx = {max(NNpairs[1::], key=lambda x: x[0])}\")\n\tlogger.debug(f\" NNpairs = {NNpairs}\")\n\tright = NNpairs.index(max(NNpairs[1::], key=lambda x: x[0]))\n\n\n\t\"\"\" Remove nodes pair with max logLH \"\"\"\n\tmaxPairLogLH, maxPairIdx = NNpairs.pop(right)\n\n\tleftIdx = maxPairIdx[1]\n\trightIdx = maxPairIdx[0]\n\n\n\t\"\"\" Index of the left node to be removed \"\"\"\n\tleft = [entry[1][0] for entry in NNpairs].index(leftIdx)\n\tlogger.debug(f\" (lef,right) = {left,right}\")\n\tlogger.debug(f\" left idxs list = {[entry[1][0] for entry in NNpairs]}\")\n\n\tNNpairs.pop(left)\n\n\n\n\t\"\"\" Update levelDeltas, idx, levelContent, jetContent, N_leaves_list, linkage_list, jetTree and logLH lists \"\"\"\n\tidx.pop(right)\n\tidx.pop(left)\n\tidx.append(Nparent)\n\n\trightContent = levelContent.pop(right)\n\tleftContent = levelContent.pop(left)\n\n\tnewNode = np.sum([leftContent,rightContent],axis = 0)\n\tlevelContent.append(newNode)\n\tjetContent.append(newNode)\n\n\t\"\"\"Calculate parent mass squared\"\"\"\n\tnewDelta = likelihood.get_delta_LR(leftContent,rightContent)\n\tlevelDeltas.pop(right)\n\tlevelDeltas.pop(left)\n\tlevelDeltas.append(newDelta)\n\n\tN_leaves_list.append(N_leaves_list[leftIdx] + N_leaves_list[rightIdx])\n\n\tlinkage_list.append([leftIdx, rightIdx, Nparent, N_leaves_list[-1]])\n\n\tjetTree.append([leftIdx, rightIdx])\n\n\tlogLH.append(maxPairLogLH)\n\tlogger.debug(f\" Per level logLH = {logLH}\")\n\n\n\t\"\"\" Find if any other node had one of the merged nodes as its NN \"\"\"\n\tNNidxUpdate = [i for i, entry in enumerate(NNpairs) if (entry[1][1] == leftIdx or entry[1][1] == rightIdx)]\n\tlogger.debug(f\" NNpairs after pop = {NNpairs}\")\n\tif NNidxUpdate!=[]:\n\n\t\tlogger.debug(f\" Indices that need to get the NN updated = {NNidxUpdate}\")\n\t\tlogger.debug(f\" First entry of NNpairs = {NNpairs[0]}\")\n\n\t\tif NNidxUpdate[0]==0:\n\t\t\t\"\"\" Do not update the 1st entry NN, but set the logLH = - Infinity. All the pairings of the 1st element of the list are taken into account by the elements to the right\"\"\"\n\t\t\tNNpairs[NNidxUpdate[0]] = (-np.inf, NNpairs[NNidxUpdate[0]][1])\n\t\t\tNNidxUpdate = NNidxUpdate[1::]\n\n\t\tNNpairsUpdate = [\n\t\t\tmax(\n\t\t\t\t[\n\t\t\t\t\t(\n\t\t\t\t\t\tlikelihood.split_logLH(\n\t\t\t\t\t\t\tlevelContent[k],\n\t\t\t\t\t\t\tlevelDeltas[k],\n\t\t\t\t\t\t\tlevelContent[k - j],\n\t\t\t\t\t\t\tlevelDeltas[k - j],\n\t\t\t\t\t\t\tdelta_min,\n\t\t\t\t\t\t\tlam\n\t\t\t\t\t\t),\n\t\t\t\t\t\t[idx[k], idx[k - j]]\n\t\t\t\t\t)\n\t\t\t\t\tfor j in range(k, 0, -1)\n\t\t\t\t],\n\t\t\t\tkey=lambda x: x[0]\n\t\t\t)\n\t\t\tfor k in NNidxUpdate\n\t\t]\n\n\t\tfor i,entry in enumerate(NNidxUpdate):\n\t\t\tNNpairs[entry] = NNpairsUpdate[i]\n\t\t\tlogger.debug(f\" i,entry = {i,entry}\")\n\t\t\tlogger.debug(f\" NNpairs after updating = {NNpairs}\")\n\n\n\tlogger.debug(f\"-----\"*5)\n\tlogger.debug(f\" idx = {idx}\")\n\tlogger.debug(f\" levelContent = {levelContent}\")\n\n\t\"\"\" Find merged node NN and append to list \"\"\"\n\tif len(levelContent)>1:\n\t\tNewNodeNN = max(\n\t\t\t[\n\t\t\t\t(\n\t\t\t\t\tlikelihood.split_logLH(\n\t\t\t\t\t\tnewNode,\n\t\t\t\t\t\tnewDelta,\n\t\t\t\t\t\tlevelContent[j],\n\t\t\t\t\t\tlevelDeltas[j],\n\t\t\t\t\t\tdelta_min,\n\t\t\t\t\t\tlam\n\t\t\t\t\t),\n\t\t\t\t\t[Nparent, idx[j]]\n\t\t\t\t)\n\t\t\t\tfor j in range(len(levelContent)-1)\n\t\t\t],\n\t\t\tkey=lambda x: x[0]\n\t\t)\n\n\t\tNNpairs.append(NewNodeNN)\n\n\t\tlogger.debug(f\" NNpairs after adding merged node = {NNpairs}\")\n\n\n\n\n\n\n\n\n\n\ndef _traverse(\n root,\n jetContent,\n jetTree=None,\n Nleaves=None,\n dendrogram=True,\n):\n\t\"\"\"\n This function call the recursive function _traverse_rec to make the trees starting from the root\n :param root: root node id\n :param jetContent: array with the momentum of all the nodes of the jet tree (both leaves and inners).\n :param jetTree: dictionary that has the node id of a parent as a key and a list with the id of the 2 children as the values\n :param Nleaves: Number of constituents (leaves)\n :param dendrogram: bool. If True, then return tree_ancestors list.\n\n :return:\n - tree: Reclustered tree structure.\n - content: Reclustered tree momentum vectors\n - node_id: list where leaves idxs are added in the order that they appear when we traverse the reclustered tree (each number indicates the node id that we picked when we did the reclustering.). However, the idx value specifies the order in which the leaf nodes appear when traversing the origianl jet (e.g. truth level jet). The value here is an integer between 0 and Nleaves.\n So if we go from truth to kt algorithm, then in the truth tree the leaves go as [0,1,2,3,4,,...,Nleaves-1]\n - tree_ancestors: List with one entry for each leaf of the tree, where each entry lists all the ancestor node ids when traversing the tree from the root to the leaf node.\n\n \"\"\"\n\n\ttree = []\n\tcontent = []\n\tnode_id = []\n\ttree_ancestors = []\n\n\t_traverse_rec(\n root,\n -1,\n False,\n tree,\n content,\n jetContent,\n jetTree=jetTree,\n Nleaves=Nleaves,\n node_id=node_id,\n ancestors=[],\n tree_ancestors=tree_ancestors,\n dendrogram=dendrogram,\n )\n\n\treturn tree, content, node_id, tree_ancestors\n\n\n\n\n\n\ndef _traverse_rec(\n root,\n parent_id,\n is_left,\n tree,\n content,\n jetContent,\n jetTree=None,\n Nleaves=None,\n node_id=None,\n ancestors=None,\n tree_ancestors=[],\n dendrogram=False,\n):\n\t\"\"\"\n\tRecursive function to build the jet tree structure.\n\t:param root: parent node momentum\n\t:param parent_id: parent node idx\n\t:param is_left: bool.\n\t:param tree: List with the tree\n\t:param content: List with the momentum vectors\n\t:param jetContent: array with the momentum of all the nodes of the jet tree (both leaves and inners).\n\t:param jetTree: dictionary that has the node id of a parent as a key and a list with the id of the 2 children as the values\n\t:param Nleaves: Number of constituents (leaves)\n\t:param node_id: list where leaves idxs are added in the order they appear when we traverse the reclustered tree (each number indicates the node id\n\tthat we picked when we did the reclustering.). However, the idx value specifies the order in which the leaf nodes appear when traversing the truth level jet . The value here is an integer between 0 and Nleaves.\n\tSo if we went from truth to kt algorithm, then in the truth tree the leaves go as [0,1,2,3,4,,...,Nleaves-1]\n\t:param ancestors: 1 entry of tree_ancestors (there is one for each leaf of the tree). It is appended to tree_ancestors.\n\t:param tree_ancestors: List with one entry for each leaf of the tree, where each entry lists all the ancestor node ids when traversing the tree from the root to the leaf node.\n\t:param dendrogram: bool. If True, append ancestors to tree_ancestors list.\n\t\"\"\"\n\n\t\"\"\"\"\n\t(With each momentum vector we increase the content array by one element and the tree array by 2 elements. \n\tBut then we take id=tree.size()//2, so the id increases by 1.)\n\t\"\"\"\n\tid = len(tree) // 2\n\n\tif parent_id >= 0:\n\t\tif is_left:\n\n\t\t\t\"\"\"Insert in the tree list, the location of the lef child in the content array.\"\"\"\n\t\t\ttree[2 * parent_id] = id\n\t\telse:\n\n\t\t\t\"\"\"Insert in the tree list, the location of the right child in the content array.\"\"\"\n\t\t\ttree[2 * parent_id + 1] = id\n\n\n\t\"\"\"Insert 2 new nodes to the vector that constitutes the tree. If the current node is a parent, then we will replace the -1 with its children idx in the content array\"\"\"\n\ttree.append(-1)\n\ttree.append(-1)\n\n\n\t\"\"\" Append node momentum to content list \"\"\"\n\tcontent.append(jetContent[root])\n\n\n\t\"\"\" Fill node ancestors list \"\"\"\n\tnew_ancestors = None\n\tif dendrogram:\n\t\tnew_ancestors = np.copy(ancestors)\n\t\tlogger.debug(f\" ancestors before = {ancestors}\")\n\n\t\tnew_ancestors = np.append(new_ancestors, root)\n\t\tlogger.debug(f\" ancestors after = {ancestors}\")\n\n\n\t\"\"\" Move from the root down recursively until we get to the leaves. \"\"\"\n\tif root >= Nleaves:\n\n\t\tchildren = jetTree[root]\n\n\t\tlogger.debug(f\"Children = {children}\")\n\n\t\tL_idx = children[0]\n\t\tR_idx = children[1]\n\n\n\t\t_traverse_rec(L_idx, id,\n True,\n tree,\n content,\n jetContent,\n jetTree,\n Nleaves=Nleaves,\n node_id=node_id,\n ancestors=new_ancestors,\n dendrogram=dendrogram,\n tree_ancestors=tree_ancestors,\n )\n\n\t\t_traverse_rec(R_idx,\n id,\n False,\n tree,\n content,\n jetContent,\n jetTree,\n Nleaves=Nleaves,\n node_id=node_id,\n ancestors=new_ancestors,\n dendrogram=dendrogram,\n tree_ancestors=tree_ancestors,\n )\n\n\n\n\telse:\n\t\t\"\"\" If the node is a leaf, then append idx to node_id and its ancestors as a new row of tree_ancestors \"\"\"\n\n\t\tnode_id.append(root)\n\n\t\tif dendrogram:\n\n\t\t\ttree_ancestors.append(new_ancestors)\n\t\t\tlogger.debug(f\"tree_ancestors= {tree_ancestors}\")\n\n\n\n", "import numpy as np\nimport logging\nimport pickle\nimport time\nimport importlib\nimport copy\nimport argparse\nimport os\n\nfrom . import reclusterTree\nfrom . import linkageList\nfrom . import heatClustermap\nfrom . import likelihood\nfrom . import N2Greedy\nfrom . import beamSearchOptimal as BSO\nfrom .utils import get_logger\n\nlogger = get_logger(level=logging.INFO)\n\n\n\n# data_dir=\"/scratch/sm4511/TreeAlgorithms/data/\"\nroot_dir=\"/Users/sebastianmacaluso/Documents/PrinceData/\"\n\n\n\n\n\n\"\"\"####################################\"\"\"\n\ndef appendJets(start, end, Njets, truth = False, BS = False, Greedy = False, jetType = None):\n \"\"\" Load truth trees and create logLH lists \"\"\"\n\n startTime = time.time()\n\n dic = {}\n\n Total_jetsList = []\n Total_jetsListLogLH = []\n avg_logLH = []\n # Nconst = []\n\n TruthFilename = \"Truth/tree_\" + str(Njets) + \"_truth_\"\n GreedyFilename = \"GreedyJets/Greedy_\" + str(Njets) + \"_\"\n BSFilename = \"BeamSearchJets/BSO_\" + str(Njets) + \"_\"\n\n if truth:\n filename = TruthFilename\n elif BS:\n filename = BSFilename\n elif Greedy:\n filename = GreedyFilename\n else:\n raise ValuError(f\" Please specify algorithm\")\n\n for i in range(start, end):\n if ( os.path.isfile(root_dir+jetType+\"/\"+TruthFilename+ str(i) + \".pkl\")\n and os.path.isfile(root_dir+jetType+\"/\"+BSFilename+ str(i) + \".pkl\")\n and os.path.isfile(root_dir+jetType+\"/\"+GreedyFilename+ str(i) + \".pkl\")):\n\n with open(root_dir+jetType+\"/\"+filename+ str(i) + \".pkl\", \"rb\") as fd:\n jetsList, jetsListLogLH = pickle.load(fd, encoding='latin-1')\n\n\n Total_jetsList.append(jetsList)\n Total_jetsListLogLH.append(jetsListLogLH)\n\n if (i + 1) % 20 == 0:\n avg_logLH.append(np.average(np.asarray(Total_jetsListLogLH[i - 19:i + 1]).flatten()))\n\n \"\"\" Standard deviation for the average log LH for the N runs\"\"\"\n sigma = np.std(avg_logLH)\n\n \"\"\" Statistical error for the mean log LH for the total number of jets as err = sqrt(s)/ sqrt(N), where sigma s the sample variance\"\"\"\n flatTotal_jetsListLogLH = np.asarray(Total_jetsListLogLH).flatten()\n statSigma = np.std(flatTotal_jetsListLogLH) / np.sqrt(len(flatTotal_jetsListLogLH))\n\n dic[\"jetsList\"] = Total_jetsList\n dic[\"jetsListLogLH\"] = flatTotal_jetsListLogLH\n dic[\"avgLogLH\"] = np.asarray(avg_logLH)\n dic[\"sigma\"] = sigma\n dic[\"statSigma\"] = statSigma\n\n logger.info(f\" TOTAL TIME = {time.time() - startTime}\")\n\n return dic\n\n\n\n\n\ndef appendTruthJets(start, end, Njets, truth = False, BS = False, Greedy = False, jetType = None):\n \"\"\" Load truth trees and create logLH lists \"\"\"\n\n startTime = time.time()\n\n dic = {}\n\n Total_jetsList = []\n Total_jetsListLogLH = []\n avg_logLH = []\n # Nconst = []\n\n TruthFilename = \"Truth/tree_\" + str(Njets) + \"_truth_\"\n GreedyFilename = \"GreedyJets/Greedy_\" + str(Njets) + \"_\"\n BSFilename = \"BeamSearchJets/BSO_\" + str(Njets) + \"_\"\n\n if truth:\n filename = TruthFilename\n elif BS:\n filename = BSFilename\n elif Greedy:\n filename = GreedyFilename\n else:\n raise ValuError(f\" Please specify algorithm\")\n\n for i in range(start, end):\n if ( os.path.isfile(root_dir+jetType+\"/\"+TruthFilename+ str(i) + \".pkl\")\n and os.path.isfile(root_dir+jetType+\"/\"+BSFilename+ str(i) + \".pkl\")\n and os.path.isfile(root_dir+jetType+\"/\"+GreedyFilename+ str(i) + \".pkl\")):\n\n with open(root_dir+jetType+\"/\"+filename+ str(i) + \".pkl\", \"rb\") as fd:\n jetsList = pickle.load(fd, encoding='latin-1')\n\n\n enrichTruthLogLH = [np.sum(jet[\"logLH\"]) for jet in jetsList]\n\n Total_jetsList.append(jetsList)\n Total_jetsListLogLH.append(enrichTruthLogLH)\n\n if (i+1)%20==0:\n avg_logLH.append(np.average(np.asarray(Total_jetsListLogLH[i-19:i+1]).flatten()))\n\n # print(\"lenght avg_logLH = \", len(avg_logLH))\n\n \"\"\" Standard deviation for the average log LH for the N runs\"\"\"\n sigma = np.std(avg_logLH)\n\n \"\"\" Statistical error for the mean log LH for the total number of jets as err = sqrt(s)/ sqrt(N), where s is the sample variance\"\"\"\n flatTotal_jetsListLogLH = np.asarray(Total_jetsListLogLH).flatten()\n statSigma = np.std(flatTotal_jetsListLogLH) / np.sqrt(len(flatTotal_jetsListLogLH))\n\n dic[\"jetsList\"] = Total_jetsList\n # dic[\"NconstList\"] = np.asarray(Nconst)\n dic[\"jetsListLogLH\"] = flatTotal_jetsListLogLH\n dic[\"avgLogLH\"] = np.asarray(avg_logLH)\n dic[\"sigma\"] = sigma\n dic[\"statSigma\"] = statSigma\n\n logger.info(f\" TOTAL TIME = {time.time() - startTime}\")\n\n return dic\n\n# -------------\n\n\n\ndef appendTruthJetsOnly(start, end, Njets, truth = False, BS = False, Greedy = False, jetType = None):\n \"\"\" Load truth trees and create logLH lists \"\"\"\n\n startTime = time.time()\n\n dic = {}\n\n Total_jetsList = []\n Total_jetsListLogLH = []\n avg_logLH = []\n # Nconst = []\n\n TruthFilename = \"Truth/tree_\" + str(Njets) + \"_truth_\"\n GreedyFilename = \"GreedyJets/Greedy_\" + str(Njets) + \"_\"\n BSFilename = \"BeamSearchJets/BSO_\" + str(Njets) + \"_\"\n\n if truth:\n filename = TruthFilename\n elif BS:\n filename = BSFilename\n elif Greedy:\n filename = GreedyFilename\n else:\n raise ValuError(f\" Please specify algorithm\")\n\n for i in range(start, end):\n if os.path.isfile(root_dir+jetType+\"/\"+TruthFilename+ str(i) + \".pkl\"):\n\n with open(root_dir+jetType+\"/\"+filename+ str(i) + \".pkl\", \"rb\") as fd:\n jetsList = pickle.load(fd, encoding='latin-1')\n\n\n enrichTruthLogLH = [np.sum(jet[\"logLH\"]) for jet in jetsList]\n\n Total_jetsList.append(jetsList)\n Total_jetsListLogLH.append(enrichTruthLogLH)\n\n if (i+1)%20==0:\n avg_logLH.append(np.average(np.asarray(Total_jetsListLogLH[i-19:i+1]).flatten()))\n\n # print(\"lenght avg_logLH = \", len(avg_logLH))\n\n \"\"\" Standard deviation for the average log LH for the N runs\"\"\"\n sigma = np.std(avg_logLH)\n\n \"\"\" Statistical error for the mean log LH for the total number of jets as err = sqrt(s)/ sqrt(N), where s is the sample variance\"\"\"\n flatTotal_jetsListLogLH = np.asarray(Total_jetsListLogLH).flatten()\n statSigma = np.std(flatTotal_jetsListLogLH) / np.sqrt(len(flatTotal_jetsListLogLH))\n\n dic[\"jetsList\"] = Total_jetsList\n # dic[\"NconstList\"] = np.asarray(Nconst)\n dic[\"jetsListLogLH\"] = flatTotal_jetsListLogLH\n dic[\"avgLogLH\"] = np.asarray(avg_logLH)\n dic[\"sigma\"] = sigma\n dic[\"statSigma\"] = statSigma\n\n logger.info(f\" TOTAL TIME = {time.time() - startTime}\")\n\n return dic\n\n\n\"\"\" ################################### \"\"\"\n# def appendTruthJets(start, end, Njets, data_dir):\n# \"\"\" Load truth trees and create logLH lists \"\"\"\n#\n# startTime = time.time()\n#\n# dic = {}\n#\n# Total_jetsList = []\n# Total_jetsListLogLH = []\n# avg_logLH = []\n# # Nconst = []\n#\n# for i in range(start, end):\n# with open(data_dir+\"Truth/tree_\" + str(Njets) + \"_truth_\" + str(i) + \".pkl\", \"rb\") as fd:\n# jetsList = pickle.load(fd, encoding='latin-1')\n#\n# # \"\"\"Number of jet constituents\"\"\"\n# # Nconst.append([len(jet[\"leaves\"]) for jet in jetsList])\n#\n# # \"\"\"Fill jet dictionaries with log likelihood of truth jet\"\"\"\n# # [likelihood.enrich_jet_logLH(jet, dij=True) for jet in jetsList]\n#\n# enrichTruthLogLH = [np.sum(jet[\"logLH\"]) for jet in jetsList]\n#\n# Total_jetsList.append(jetsList)\n# Total_jetsListLogLH.append(enrichTruthLogLH)\n#\n# if (i+1)%20==0:\n# avg_logLH.append(np.average(np.asarray(Total_jetsListLogLH[i-19:i+1]).flatten()))\n#\n# # print(\"lenght avg_logLH = \", len(avg_logLH))\n#\n# \"\"\" Standard deviation for the average log LH for the N runs\"\"\"\n# sigma = np.std(avg_logLH)\n#\n# \"\"\" Statistical error for the mean log LH for the total number of jets as err = sqrt(s)/ sqrt(N), where s is the sample variance\"\"\"\n# flatTotal_jetsListLogLH = np.asarray(Total_jetsListLogLH).flatten()\n# statSigma = np.std(flatTotal_jetsListLogLH) / np.sqrt(len(flatTotal_jetsListLogLH))\n#\n# dic[\"jetsList\"] = Total_jetsList\n# # dic[\"NconstList\"] = np.asarray(Nconst)\n# dic[\"jetsListLogLH\"] = flatTotal_jetsListLogLH\n# dic[\"avgLogLH\"] = np.asarray(avg_logLH)\n# dic[\"sigma\"] = sigma\n# dic[\"statSigma\"] = statSigma\n#\n# logger.info(f\" TOTAL TIME = {time.time() - startTime}\")\n#\n# return dic\n\n\n\n\n\ndef appendGreedyJets(start, end, Njets):\n \"\"\" Load greedy trees and logLH lists \"\"\"\n\n startTime = time.time()\n\n dic = {}\n\n Total_jetsList = []\n Total_jetsListLogLH = []\n avg_logLH = []\n for i in range(start, end):\n with open(data_dir+\"GreedyJets/Greedy_\" + str(Njets) + \"Mw_\" + str(i) + \".pkl\", \"rb\") as fd:\n jetsList, jetsListLogLH = pickle.load(fd, encoding='latin-1')\n\n # \"\"\" Fill deltas list (needed to fill the jet log LH)\"\"\"\n # [traverseTree(jet) for jet in jetsList]\n\n # [likelihood.fill_jet_info(jet, parent_id=None) for jet in jetsList]\n #\n # \"\"\"Fill jet dictionaries with log likelihood of truth jet\"\"\"\n # [likelihood.enrich_jet_logLH(jet, dij=True) for jet in jetsList]\n\n # jetsListLogLH = [np.sum(jet[\"logLH\"]) for jet in jetsList]\n\n Total_jetsList.append(jetsList)\n Total_jetsListLogLH.append(jetsListLogLH)\n\n if (i+1)%20==0:\n avg_logLH.append(np.average(np.asarray(Total_jetsListLogLH[i-19:i+1]).flatten()))\n\n \"\"\" Standard deviation for the average log LH for the N runs\"\"\"\n sigma = np.std(avg_logLH)\n\n \"\"\" Statistical error for the mean log LH for the total number of jets as err = sqrt(s)/ sqrt(N), where sigma s the sample variance\"\"\"\n flatTotal_jetsListLogLH = np.asarray(Total_jetsListLogLH).flatten()\n statSigma = np.std(flatTotal_jetsListLogLH) / np.sqrt(len(flatTotal_jetsListLogLH))\n\n dic[\"jetsList\"] = Total_jetsList\n dic[\"jetsListLogLH\"] = flatTotal_jetsListLogLH\n dic[\"avgLogLH\"] = np.asarray(avg_logLH)\n dic[\"sigma\"] = sigma\n dic[\"statSigma\"] = statSigma\n\n logger.info(f\" TOTAL TIME = {time.time() - startTime}\")\n\n return dic\n\n\n\n\n\ndef appendBSO_Scan(start, end, Njets):\n \"\"\" Load beam search trees and logLH lists \"\"\"\n\n startTime = time.time()\n\n dic = {}\n\n Total_jetsList = []\n Total_jetsListLogLH = []\n avg_logLH = []\n for i in range(start, end):\n with open(data_dir+\"BeamSearchJets/BSO_\" + str(Njets) + \"Mw_\" + str(i) + \".pkl\", \"rb\") as fd:\n jetsList, jetsListLogLH = pickle.load(fd, encoding='latin-1')\n\n # \"\"\" Fill deltas list (needed to fill the jet log LH)\"\"\"\n # [traverseTree(jet) for jet in jetsList]\n\n # [likelihood.fill_jet_info(jet, parent_id=None) for jet in jetsList]\n #\n # \"\"\"Fill jet dictionaries with log likelihood of truth jet\"\"\"\n # [likelihood.enrich_jet_logLH(jet, dij=True) for jet in jetsList]\n\n # jetsListLogLH = [np.sum(jet[\"logLH\"]) for jet in jetsList]\n\n\n Total_jetsList.append(jetsList)\n Total_jetsListLogLH.append(jetsListLogLH)\n\n if (i+1)%20==0:\n avg_logLH.append(np.average(np.asarray(Total_jetsListLogLH[i-19:i+1]).flatten()))\n\n \"\"\" Standard deviation for the average log LH for the N runs\"\"\"\n sigma = np.std(avg_logLH)\n\n \"\"\" Statistical error for the mean log LH for the total number of jets as err = sqrt(s)/ sqrt(N), where sigma s the sample variance\"\"\"\n flatTotal_jetsListLogLH = np.asarray(Total_jetsListLogLH).flatten()\n statSigma = np.std(flatTotal_jetsListLogLH) / np.sqrt(len(flatTotal_jetsListLogLH))\n\n dic[\"jetsList\"] = Total_jetsList\n dic[\"jetsListLogLH\"] = flatTotal_jetsListLogLH\n dic[\"avgLogLH\"] = np.asarray(avg_logLH)\n dic[\"sigma\"] = sigma\n dic[\"statSigma\"] = statSigma\n\n logger.info(f\" TOTAL TIME = {time.time() - startTime}\")\n\n return dic\n\n\n\n\n\n# def traverseTree(jet):\n#\n# \"\"\" Traverse jet to get ancestors list and content list starting from the root node\"\"\"\n# tree, \\\n# content, \\\n# node_id, \\\n# tree_ancestors = N2Greedy._traverse(\n# jet[\"root_id\"],\n# jet[\"content\"],\n# jetTree=jet[\"tree\"],\n# Nleaves=jet[\"Nconst\"],\n# )\n#\n# jet[\"root_id\"] = 0\n# jet[\"node_id\"] = node_id\n# jet[\"tree\"] = np.asarray(tree).reshape(-1, 2)\n# jet[\"content\"] = np.asarray(content).reshape(-1, 2)\n# jet[\"tree_ancestors\"] = tree_ancestors\n\n\n\n\n\"\"\" RUN GREEDY AND BEAM SEARCH ALGORITHMS \"\"\"\n\ndef fill_GreedyList(input_jets, Nbest=1, k1=0, k2=2):\n \"\"\" Run the greedy algorithm over a list of sets of input jets.\n Args: input jets\n returns: clustered jets\n jets logLH\n \"\"\"\n\n\n\n with open(args.data_dir + str(input_jets) + '.pkl', \"rb\") as fd:\n truth_jets = pickle.load(fd, encoding='latin-1')[k1:k2]\n\n startTime = time.time()\n\n # for k,truth_jet in enumerate(truth_jets):\n # print(\"k = \",k)\n # if k==27:\n # print(\"M_Hard = \",truth_jet[\"M_Hard\"])\n # # print(\"Nconst = \", truth_jet[\"Nconst\"] )\n # N2Greedy.recluster(\n # truth_jet,\n # delta_min=truth_jet[\"pt_cut\"],\n # lam=float(truth_jet[\"Lambda\"]),\n # visualize=True,\n # )\n\n\n greedyJets = [N2Greedy.recluster(\n truth_jet,\n delta_min=truth_jet[\"pt_cut\"],\n lam=float(truth_jet[\"Lambda\"]),\n visualize = True,\n ) for truth_jet in truth_jets]\n\n print(\"TOTAL TIME = \", time.time() - startTime)\n\n greedyJetsLogLH = [sum(jet[\"logLH\"]) for jet in greedyJets]\n\n return greedyJets, greedyJetsLogLH\n\n\ndef fill_BSList(input_jets, Nbest=1, k1=0, k2=2):\n \"\"\" Run the Beam search algorithm (algorithm where when the logLH of 2 or more trees is the same, we only keep one of them) over a list of sets of input jets.\n Args: input jets\n returns: clustered jets\n jets logLH\n \"\"\"\n\n\n\n with open(args.data_dir + str(input_jets) + '.pkl', \"rb\") as fd:\n truth_jets = pickle.load(fd, encoding='latin-1')[k1:k2]\n\n startTime = time.time()\n BSO_jetsList = []\n\n a = time.time()\n\n for i, truth_jet in enumerate(truth_jets):\n\n if i % 50 == 0:\n print(\" # of reclustered jets = \", i, \"; Partial time = \", time.time() - a)\n # print(\"PARTIAL TIME = \",time.time() -a)\n a = time.time()\n\n N = len(truth_jet[\"leaves\"])\n\n BSO_jetsList.append(BSO.recluster(\n truth_jet,\n beamSize=min(3 * N, np.asarray(N * (N - 1) / 2).astype(int)),\n delta_min=truth_jet[\"pt_cut\"],\n lam=float(truth_jet[\"Lambda\"]),\n N_best=Nbest,\n visualize = True,\n )[0]\n )\n\n print(\"TOTAL TIME = \", time.time() - startTime)\n\n BSO_jetsListLogLH = [sum(jet[\"logLH\"]) for jet in BSO_jetsList]\n\n return BSO_jetsList, BSO_jetsListLogLH\n\n\n\ndef fill_ktAlgos(input_jets, k1=0, k2=2, alpha = None):\n \"\"\" Run the generalized kt algorithm over a list of sets of input jets.\n Args: input jets\n returns: clustered jets\n jets logLH\n \"\"\"\n\n\n\n with open(args.data_dir + str(input_jets) + '.pkl', \"rb\") as fd:\n truth_jets = pickle.load(fd, encoding='latin-1')[k1:k2]\n\n startTime = time.time()\n\n generalizedKtjets = [reclusterTree.recluster(truth_jet, alpha=alpha, save=False)\n for truth_jet in truth_jets]\n\n print(\"TOTAL TIME = \", time.time() - startTime)\n\n return generalizedKtjets\n\n\nif __name__ == \"__main__\":\n\n # def runGreedy_Scan(start, end, Njets):\n # \"\"\" Run greedy algorithm\"\"\"\n #\n # for i in range(start, end):\n # jetsList, jetsListLogLH = fill_GreedyList(\"tree_\" + str(Njets) + \"_truth_\" + str(i), k1=0,\n # k2=Njets)\n #\n # with open(data_dir + \"GreedyJets/Greedy_\" + str(Njets) + \"Mw_\" + str(i) + \".pkl\", \"wb\") as f:\n # pickle.dump((jetsList, jetsListLogLH), f)\n #\n #\n # def runBSO_Scan(start, end, Njets):\n # \"\"\" Run beam search algorithm\"\"\"\n #\n # for i in range(start, end):\n # BSO_jetsList, BSO_jetsListLogLH = fill_BSList(\"tree_\" + str(Njets) + \"_truth_\" + str(i), k1=0,\n # k2=Njets)\n #\n # with open(data_dir + \"BeamSearchJets/BSO_\" + str(Njets) + \"Mw_\" + str(i) + \".pkl\", \"wb\") as f:\n # pickle.dump((BSO_jetsList, BSO_jetsListLogLH), f)\n\n def runGreedy_Scan(i, Njets):\n \"\"\" Run greedy algorithm\"\"\"\n\n jetsList, jetsListLogLH = fill_GreedyList(\"tree_\" + str(Njets) + \"_truth_\" + str(i), k1=0,\n k2=Njets)\n\n output_dir = args.output_dir+\"GreedyJets/\"\n os.system('mkdir -p ' + output_dir)\n\n with open(output_dir+\"Greedy_\" + str(Njets) + \"_\" + str(i) + \".pkl\", \"wb\") as f:\n pickle.dump((jetsList, jetsListLogLH), f)\n\n\n def runBSO_Scan(i, Njets):\n \"\"\" Run beam search algorithm\"\"\"\n\n BSO_jetsList, BSO_jetsListLogLH = fill_BSList(\"tree_\" + str(Njets) + \"_truth_\" + str(i), k1=0,\n k2=Njets)\n\n output_dir = args.output_dir+\"BeamSearchJets/\"\n os.system('mkdir -p ' + output_dir)\n\n with open(output_dir+\"BSO_\" + str(Njets) + \"_\" + str(i) + \".pkl\", \"wb\") as f:\n pickle.dump((BSO_jetsList, BSO_jetsListLogLH), f)\n\n\n\n def runKtAntiKtCA_Scan(i, Njets, alpha=None):\n \"\"\" Run beam search algorithm\"\"\"\n generalizedKtjets = fill_ktAlgos(\"tree_\" + str(Njets) + \"_truth_\" + str(i),\n k1=0,\n k2=Njets,\n alpha=alpha)\n\n if alpha == 1:\n name = \"Kt\"\n elif alpha == -1:\n name = \"Antikt\"\n elif alpha == 0:\n name = \"CA\"\n else:\n raise ValueError(f\"Please pick a valid value for alpha (e.g. -1,0,1)\")\n\n output_dir = args.output_dir+\"/\"+name+\"Jets/\"\n os.system('mkdir -p ' + output_dir)\n\n with open(output_dir + name+\"_\" + str(Njets) + \"_\" + str(i) + \".pkl\", \"wb\") as f:\n pickle.dump(generalizedKtjets, f)\n\n\n\n parser = argparse.ArgumentParser(description=\"Run Greedy and Beam Search algorithms\")\n parser.add_argument(\n \"-v\", \"--verbose\", action=\"store_true\", help=\"Increase output verbosity\"\n )\n\n parser.add_argument(\n \"--jetType\", type=str, required=True, help=\"Input jet type, e.g. 'QCDjets' or 'Wjets' \"\n )\n\n parser.add_argument(\n \"--greedyScan\", type=str, default=\"False\", help=\"Flag to run greedy clustering\"\n )\n parser.add_argument(\n \"--BSScan\", type=str, default=\"False\", help=\"Flag to run beam seach clustering\"\n )\n\n parser.add_argument(\n \"--KtAntiktCAscan\", type=str, default=\"False\", help=\"Flag to run generalized kt clustering\"\n )\n\n parser.add_argument(\n \"--id\", type=str, default=0, help=\"dataset id\"\n )\n\n parser.add_argument(\n \"--N_jets\", type=str, default=2, help=\"# of jets in each dataset\"\n )\n\n args = parser.parse_args()\n\n parser.add_argument(\n \"--data_dir\", type=str, default=\"/scratch/sm4511/TreeAlgorithms/data/\"+args.jetType+\"/Truth/\", help=\"Data dir\"\n )\n\n parser.add_argument(\n \"--output_dir\", type=str, default=\"/scratch/sm4511/TreeAlgorithms/data/\"+args.jetType+\"/\", help=\"Output dir\"\n )\n\n\n\n logger = get_logger(level=logging.INFO)\n\n args = parser.parse_args()\n\n data_dir = args.data_dir\n\n # To test:\n # Nstart = 0\n # Nend = 4\n # N_jets = 2\n\n # Full dataset\n # Nstart = 10\n # Nend = 30\n # N_jets = 500\n\n \"\"\"We ran a scan for 30 sets of 500 jets each.\"\"\"\n if args.greedyScan == \"True\":\n runGreedy_Scan(int(args.id), int(args.N_jets))\n # runGreedy_Scan(Nstart, Nend, N_jets)\n\n\n\n \"\"\"We ran a scan for 10 sets of 500 jets each. (Below as an example there is a scan for 4 sets of 2 jets each)\"\"\"\n if args.BSScan == \"True\":\n runBSO_Scan(int(args.id), int(args.N_jets))\n # runBSO_Scan(Nstart, Nend, N_jets)\n\n\n \"\"\"We ran a scan for 10 sets of 500 jets each. (Below as an example there is a scan for 4 sets of 2 jets each)\"\"\"\n if args.KtAntiktCAscan == \"True\":\n for alphaValue in [-1,0,1]:\n runKtAntiKtCA_Scan(int(args.id), int(args.N_jets), alpha = alphaValue)\n" ]
[ [ "numpy.asarray", "numpy.copy", "numpy.sum", "numpy.append" ], [ "numpy.asarray", "numpy.std", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mariogarcc/comphy
[ "3ab05a07dfa2eb8a1165fca1bdfd9bda6c8e27d3", "3ab05a07dfa2eb8a1165fca1bdfd9bda6c8e27d3", "3ab05a07dfa2eb8a1165fca1bdfd9bda6c8e27d3", "3ab05a07dfa2eb8a1165fca1bdfd9bda6c8e27d3" ]
[ "T01/ex03.py", "T08/ex05.py", "T08/ex06.py", "package/differentiate.py" ]
[ "from package import redact_ex, ask_continue\n\nfrom package import \\\n find_sols, \\\n falsi_solve\n\nimport numpy as np\n\n\nEXERCISE_03 = \"\"\"\\\nCalculate the solutions for f(x) using the Regula-Falsi method.\\\n\"\"\"\n\nredact_ex(EXERCISE_03, 3)\n\n\n####################################\n\ndef f(x):\n return x**2 - 3*x + np.exp(x) - 2\n\ninterval = [-2, 4]\nxmin, xmax = interval\nn = 20\nxarr = np.linspace(xmin, xmax, n+1)\n\nsols = find_sols(f, xarr)\ntry:\n assert len(sols) > 0\nexcept(AssertionError, TypeError):\n print(\"There are no solutions for f(x) in the given interval.\")\nprint(\"There {} in: \"\n .format(\"is a solution\" if len(sols) == 1 else \"are solutions\"),\n *sols, sep = '\\n', end = '\\n\\n')\n\n######## ^ Copied from ex01 ########\n\nprint(\"Regula-Falsi method\", end = '\\n\\n')\nfalsi_iters = 63\nfalsi_sol_points = []\nfor sol in sols:\n print(f\"case: solution in {sol}\")\n falsi_sol_points.append(falsi_solve(f, sol, iters = falsi_iters))\n print(f\"solution: x = {falsi_sol_points[sols.index(sol)]}\")\n print(f\"iterations: {falsi_iters}\", end = '\\n\\n')\n\n# plotting\nprint(\"Plotting follows.\")\nask_continue()\n\nimport matplotlib.pyplot as plt\n\nxlin = np.linspace(xmin, xmax, 1000)\n\nplt.rc('text', usetex = True)\nplt.rc('font', family = 'serif')\n\nplt.figure(figsize = (9, 6))\nplt.suptitle(r\"$y(x) = x^2 - 3x + e^x - 2$\", fontsize = 16)\nplt.title(r\"Regula-Falsi method\", y = 1.03)\n\nplt.plot(xlin, f(xlin), 'blue')\n\nplt.xlim(-1.5, 2.5)\nplt.ylim(-5, 10)\ngsize = max(f(xlin)) - min(f(xlin)) # plot vertical size\n\nplt.hlines(0, min(xlin), max(xlin), 'black')\n\nfor sol in falsi_sol_points:\n plt.vlines(sol, f(sol)-gsize/20, f(sol)+gsize/20, color='red')\n plt.text(sol, f(sol)+gsize/16, f'x = {sol:.5f}',\n horizontalalignment = 'center')\n\nplt.tight_layout(rect=[0, 0.03, 1, 0.95])\nplt.show()", "from package import redact_ex\n\nfrom package import solve_implicit_ode\n\nimport numpy as np\n\n\nEXERCISE_05 = \"\"\"\\\nMake a program that is able to graphically solve the previous equation\nusing the fully implicit FTCS scheme.\\\n\"\"\"\n\nredact_ex(EXERCISE_05, 5)\n\n\nslices = 20\nitern = 1000\nplot_frequency = 0.05\n\ndeltat = 1e-3\ndeltax = 1e-1\n\nalpha = 1\n\ns = alpha*deltat/deltax**2\n\n\namat = np.diag([1]+[1+2*s]*(slices-1)+[1]) \\\n + np.diag([0]+[ -s]*(slices-1)+[ ], k = 1) \\\n + np.diag([ ]+[ -s]*(slices-1)+[0], k = -1)\n\niamat = np.linalg.inv(amat)\n\niden = np.identity(len(amat))\n\n\ndef iftcs_boundary_conditions(lap, ciarr):\n\n slices = len(ciarr)-1\n\n ciarr[0] = 0; ciarr[slices] = 10\n\n return ciarr\n\n\ntprev = np.zeros(slices+1); tpprev = np.zeros(slices+1)\ntprev[0] = 0; tpprev[0] = 0\ntprev[slices] = 10; tpprev[slices] = 10\n\ninitial_conditions = [tprev, tpprev]\n\n\nprint(\"Computing...\", end='\\n\\n')\nsolve_implicit_ode(iamat, iden, initial_conditions, iftcs_boundary_conditions,\n slices, itern, plot_frequency)\n", "from package import redact_ex\n\nfrom package import solve_implicit_ode\n\nimport numpy as np\n\n\nEXERCISE_6 = \"\"\"\\\nMake a program that is able to graphically solve the previous equation\nusing the Crank-Nicolson scheme.\\\n\"\"\"\n\nredact_ex(EXERCISE_6, 6)\n\n\nslices = 20\nitern = 1000\nplot_frequency = 0.05\n\ndeltat = 1e-3\ndeltax = 1e-1\n\nalpha = 1\n\ns = alpha*deltat/deltax**2\n\n\namat = np.diag([1]+[ 1+s]*(slices-1)+[1]) \\\n + np.diag([0]+[-s/2]*(slices-1)+[ ], k = 1) \\\n + np.diag([ ]+[-s/2]*(slices-1)+[0], k = -1)\n\niamat = np.linalg.inv(amat)\n\niden = np.identity(len(amat))\n\nbmat = np.diag([1]+[ 1-s]*(slices-1)+[1]) \\\n + np.diag([0]+[ s/2]*(slices-1)+[ ], k = 1) \\\n + np.diag([ ]+[ s/2]*(slices-1)+[0], k = -1)\n\n\ndef cn_boundary_conditions(lap, ciarr):\n\n slices = len(ciarr)-1\n\n ciarr[0] = 0; ciarr[slices] = 10\n\n return ciarr\n\ntprev = np.zeros(slices+1); tpprev = np.zeros(slices+1)\ntprev[0] = 0; tpprev[0] = 0\ntprev[slices] = 10; tpprev[slices] = 10\n\ninitial_conditions = [tprev, tpprev]\n\n\nprint(\"Computing...\", end='\\n\\n')\nsolve_implicit_ode(iamat, iden, initial_conditions, cn_boundary_conditions,\n slices, itern, plot_frequency)\n", "import numpy as np\nimport itertools as it\nimport matplotlib.pyplot as plt\n\n\ndef euler_differentiate(w, bounds = None, delta = 1e-3, itern = 1e3,\n plot = True, title = None,\n shape = 'v', figsize = (10, 6), figshape = None, fontsize = 16,\n names = 'txyz', graph = 'all',\n oneout = False, force = False):\n\n if bounds is None:\n bounds = [0]*len(w)\n\n if not force and itern >= 1e9:\n raise OverflowError(\"number of iterations is too big: {!s}\" + \"\\n\" + \\\n \"you can ignore this error by setting the `force` kwarg to `False`\"\n .format(itern))\n\n itern = int(itern)\n\n var = bounds\n vec = [[v] for v in var] if plot else None\n\n if plot:\n plt.rc('text', usetex = True)\n plt.rc('axes', labelsize = fontsize)\n plt.rc('figure', titlesize = fontsize)\n plt.rc('font', family = 'serif')\n\n if graph == 'all': agraph = range(len(list(\n it.combinations(range(len(vec)), 2))))\n else: agraph = graph\n\n figshape = figshape or (\n (len(agraph), 1) if shape == 'v' else (1, len(agraph)))\n\n fig, ax = plt.subplots(figshape[0], figshape[1], figsize = figsize)\n fig.suptitle(title, x = 0.525, y = 0.975)\n\n for n in range(1, itern+1): # iterate method n times\n\n pvar = [v for v in var]\n\n for i,_ in enumerate(var): # compute new variables\n var[i] += w[i](*[delta]+[pvar[j] for j in range(len(pvar))])\n\n if plot: vec[i].append(var[i])\n\n if plot and n % int(np.sqrt(itern)) == 0: # best performance\n plot_differential(vec, fig, ax,\n shape = shape, names = names, graph = graph,\n oneout = oneout)\n\n for i in range(len(vec)): # resetting vectors for performance\n vec[i] = [vec[i][-1]]\n\n oneout = True # first item has already been removed (first plot)\n\n if plot: plt.tight_layout(rect=[0.05, 0.05, 0.95, 0.95]); plt.show()\n return None\n\n\n\n\ndef range_kutta_differentiate(w, order = 4,\n bounds = None, delta = 1e-3, itern = 1e3,\n plot = True, title = None,\n shape = 'v', figsize = (10, 6), figshape = None, fontsize = 16,\n names = 'txyz', graph = 'all',\n oneout = False, force = False):\n\n if bounds is None:\n bounds = [0]*len(w)\n\n if not force and itern >= 1e9:\n raise OverflowError(\"number of iterations is too big: {!s}\" + \"\\n\" + \\\n \"you can ignore this error by setting the `force` kwarg to `False`\"\n .format(itern))\n\n itern = int(itern)\n o = int(order)\n assert o == 2 or o == 4\n\n var = bounds\n vec = [[v] for v in var] if plot else None\n\n if plot:\n plt.rc('text', usetex = True)\n plt.rc('axes', labelsize = fontsize)\n plt.rc('figure', titlesize = fontsize)\n plt.rc('font', family = 'serif')\n\n if graph == 'all': agraph = range(len(list(\n it.combinations(range(len(vec)), 2))))\n else: agraph = graph\n\n figshape = figshape or (\n (len(agraph), 1) if shape == 'v' else (1, len(agraph)))\n\n fig, ax = plt.subplots(figshape[0], figshape[1], figsize = figsize)\n fig.suptitle(title, x = 0.525, y = 0.975)\n\n for n in range(1, itern+1):\n\n pvar = [v for v in var]\n k = dict()\n\n for i,_ in enumerate(var):\n\n k[i] = dict()\n k[i][0] = w[i](*[delta]+[pvar[j] for j in range(len(pvar))])\n\n for o in range(1, order):\n div = (1*(1+(o%3 != 0))) # 1, 2, 2, 1\n tel = delta/div\n kel = k[i][o-1]/div\n k[i][o] = w[i](*[delta+tel]+[pvar[j]+kel \\\n for j in range(len(pvar))])\n\n var[i] += sum(k[i][o]/(3*(1+(o%3 == 0))) \\\n for o in range(order)) # k1/6 + k2/3 + k3/3 + k4/6\n\n if plot: vec[i].append(var[i])\n\n if plot and (n % int(np.sqrt(itern)) == 0 or n == itern): # performance\n plot_differential(vec, fig, ax,\n shape = shape, names = names, graph = graph,\n oneout = oneout)\n\n for i in range(len(vec)): # resetting vectors for performance\n vec[i] = [vec[i][-1]]\n\n oneout = True # first item has already been removed (first plot)\n\n if plot: plt.tight_layout(rect=[0.00, 0.05, 1.00, 0.95]); plt.show()\n # rect is left, bottom, right, top\n return None\n\n\n\n\ndef plot_differential(vec, fig, ax,\n shape = 'v', names = 'txyz', graph = 'all',\n oneout = False):\n\n s = 0; ys = 0\n nv = len(vec)\n pp = list(it.combinations(range(nv), 2)) # plot keys\n\n vnames = names\n if graph == 'all': graph = range(len(pp))\n\n if oneout is False: # fixing plot\n for el in range(nv):\n vec[el] = vec[el][1:]\n\n for left, right in pp:\n s += 1\n if s-1 not in graph: continue\n ys += 1\n if type(ax) == np.ndarray:\n if type(ax[0]) == np.ndarray:\n print(\"figshape not yet supported (use None)\")\n else:\n ax[ys-1].set_xlabel(r'$'+vnames[left]+r'$')\n ax[ys-1].set_ylabel(r'$'+vnames[right]+r'$')\n ax[ys-1].plot(vec[left], vec[right], color = '#3c78f0')\n else:\n ax.set_xlabel(r'$'+vnames[left]+r'$')\n ax.set_ylabel(r'$'+vnames[right]+r'$')\n ax.plot(vec[left], vec[right], color = '#3c78f0')\n\n return None\n\n\n\n\ndef euler_differentiate_mod(w, bounds = None, delta = 1e-3, itern = 1e3,\n plot = True, title = None,\n shape = 'v', figsize = (10, 6), figshape = None, fontsize = 16,\n names = 'txyz', graph = 'all',\n oneout = False, force = False,\n tols = [10, 0.1], step_mults = [0.1, 10],\n max_delta = 1, min_delta = 1e-9,\n verbose = False):\n\n if bounds is None:\n bounds = [0]*len(w)\n\n if not force and itern >= 1e9:\n raise OverflowError(\"number of iterations is too big: {!s}\" + \"\\n\" + \\\n \"you can ignore this error by setting the `force` kwarg to `False`\"\n .format(itern))\n\n itern = int(itern)\n\n var = bounds\n vec = [[v] for v in var] if plot else None\n\n if plot:\n plt.rc('text', usetex = True)\n plt.rc('axes', labelsize = fontsize)\n plt.rc('figure', titlesize = fontsize)\n plt.rc('font', family = 'serif')\n\n if graph == 'all': agraph = range(len(list(\n it.combinations(range(len(vec)), 2))))\n else: agraph = graph\n\n figshape = figshape or (\n (len(agraph), 1) if shape == 'v' else (1, len(agraph)))\n\n fig, ax = plt.subplots(figshape[0], figshape[1], figsize = figsize)\n fig.suptitle(title, x = 0.525, y = 0.975)\n\n n = 1\n while n < itern:\n\n pvar = [v for v in var]\n\n for i,_ in enumerate(var): # compute new variables\n var[i] += w[i](*[delta]+[pvar[j] for j in range(len(pvar))])\n\n if plot: vec[i].append(var[i])\n\n if plot and n % int(np.sqrt(itern)) == 0: # best performance\n plot_differential(vec, fig, ax,\n shape = shape, names = names, graph = graph,\n oneout = oneout)\n\n for i in range(len(vec)): # resetting vectors for performance\n vec[i] = [vec[i][-1]]\n\n oneout = True # first item has already been removed (first plot)\n\n fchanges = [abs(var[i]-pvar[i]) for i in range(1, len(var))]\n fd = max(fchanges)\n\n if verbose: print(f\"delta = {delta}\")\n try:\n if len(check) > 2:\n n += 1\n except:\n check = []\n if fd > tols[0]:\n check.append(n)\n delta *= step_mults[0] if delta >= min_delta else 1\n elif fd < tols[1]:\n check.append(n)\n delta *= step_mults[1] if delta <= max_delta else 1\n else:\n check = []\n n += 1\n\n if plot:\n plt.tight_layout(rect=[0.05, 0.05, 0.95, 0.95])\n plt.show()\n\n return None\n" ]
[ [ "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.title", "numpy.linspace", "matplotlib.pyplot.ylim", "matplotlib.pyplot.rc", "matplotlib.pyplot.xlim", "numpy.exp", "matplotlib.pyplot.suptitle", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "numpy.linalg.inv", "numpy.diag", "numpy.zeros" ], [ "numpy.linalg.inv", "numpy.diag", "numpy.zeros" ], [ "matplotlib.pyplot.tight_layout", "numpy.sqrt", "matplotlib.pyplot.rc", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sun-jiao/torch_DCEC
[ "0c168276f6f93c890f8c938c77e70cb15db6cdb1" ]
[ "torch_DCEC.py" ]
[ "from __future__ import print_function, division\n\nif __name__ == \"__main__\":\n\n import argparse\n import torch\n import torch.nn as nn\n import torch.optim as optim\n from torch.optim import lr_scheduler\n from torchvision import datasets, models, transforms\n import os\n import math\n import fnmatch\n import nets\n import utils\n import training_functions\n import torch.utils.data\n from torch.utils.tensorboard import SummaryWriter\n\n # Translate string entries to bool for parser\n def str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n parser = argparse.ArgumentParser(description='Use DCEC for clustering')\n parser.add_argument('--mode', default='train_full', choices=['train_full', 'pretrain'], help='mode')\n parser.add_argument('--tensorboard', default=True, type=bool, help='export training stats to tensorboard')\n parser.add_argument('--pretrain', default=True, type=str2bool, help='perform autoencoder pretraining')\n parser.add_argument('--pretrained_net', default=1, help='index or path of pretrained net')\n parser.add_argument('--net_architecture', default='CAE_3', choices=['CAE_3', 'CAE_bn3', 'CAE_4', 'CAE_bn4', 'CAE_5', 'CAE_bn5'], help='network architecture used')\n parser.add_argument('--dataset', default='MNIST-train',\n choices=['MNIST-train', 'custom', 'MNIST-test', 'MNIST-full'],\n help='custom or prepared dataset')\n parser.add_argument('--dataset_path', default='data', help='path to dataset')\n parser.add_argument('--batch_size', default=256, type=int, help='batch size')\n parser.add_argument('--rate', default=0.001, type=float, help='learning rate for clustering')\n parser.add_argument('--rate_pretrain', default=0.001, type=float, help='learning rate for pretraining')\n parser.add_argument('--weight', default=0.0, type=float, help='weight decay for clustering')\n parser.add_argument('--weight_pretrain', default=0.0, type=float, help='weight decay for clustering')\n parser.add_argument('--sched_step', default=200, type=int, help='scheduler steps for rate update')\n parser.add_argument('--sched_step_pretrain', default=200, type=int,\n help='scheduler steps for rate update - pretrain')\n parser.add_argument('--sched_gamma', default=0.1, type=float, help='scheduler gamma for rate update')\n parser.add_argument('--sched_gamma_pretrain', default=0.1, type=float,\n help='scheduler gamma for rate update - pretrain')\n parser.add_argument('--epochs', default=1, type=int, help='clustering epochs') # value should be 1000 in real train\n parser.add_argument('--epochs_pretrain', default=1, type=int, help='pretraining epochs') # value should be 300 in real train\n parser.add_argument('--printing_frequency', default=10, type=int, help='training stats printing frequency')\n parser.add_argument('--gamma', default=0.1, type=float, help='clustering loss weight')\n parser.add_argument('--update_interval', default=80, type=int, help='update interval for target distribution')\n parser.add_argument('--tol', default=1e-2, type=float, help='stop criterium tolerance')\n parser.add_argument('--num_clusters', default=10, type=int, help='number of clusters')\n parser.add_argument('--custom_img_size', default=[28, 28, 3], nargs=3, type=int, help='size of custom images')\n parser.add_argument('--leaky', default=True, type=str2bool)\n parser.add_argument('--neg_slope', default=0.01, type=float)\n parser.add_argument('--activations', default=False, type=str2bool)\n parser.add_argument('--bias', default=True, type=str2bool)\n args = parser.parse_args()\n print(args)\n\n if args.mode == 'pretrain' and not args.pretrain:\n print(\"Nothing to do :(\")\n exit()\n\n board = args.tensorboard\n\n # Deal with pretraining option and way of showing network path\n pretrain = args.pretrain\n net_is_path = True\n idx = 1\n if not pretrain:\n try:\n int(args.pretrained_net)\n idx = args.pretrained_net\n net_is_path = False\n except:\n pass\n params = {'pretrain': pretrain}\n\n # Directories\n # Create directories structure\n dirs = ['runs', 'reports', 'nets']\n list(map(lambda x: os.makedirs(x, exist_ok=True), dirs))\n\n # Net architecture\n model_name = args.net_architecture\n # Indexing (for automated reports saving) - allows to run many trainings and get all the reports collected\n if pretrain or (not pretrain and net_is_path):\n reports_list = sorted(os.listdir('reports'), reverse=True)\n if reports_list:\n for file in reports_list:\n # print(file)\n if fnmatch.fnmatch(file, model_name + '*'):\n idx = int(str(file)[-7:-4]) + 1\n break\n\n # Base filename\n name = model_name + '_' + str(idx).zfill(3)\n\n # Filenames for report and weights\n name_txt = name + '.txt'\n name_net = name\n pretrained = name + '_pretrained.pt'\n\n # Arrange filenames for report, network weights, pretrained network weights\n name_txt = os.path.join('reports', name_txt)\n name_net = os.path.join('nets', name_net)\n if net_is_path and not pretrain:\n pretrained = args.pretrained_net\n else:\n pretrained = os.path.join('nets', pretrained)\n if not pretrain and not os.path.isfile(pretrained):\n print(\"No pretrained weights, try again choosing pretrained network or create new with pretrain=True\")\n\n model_files = [name_net, pretrained]\n params['model_files'] = model_files\n\n # Open file\n if pretrain:\n f = open(name_txt, 'w')\n else:\n f = open(name_txt, 'a')\n params['txt_file'] = f\n\n # Delete tensorboard entry if exist (not to overlap as the charts become unreadable)\n try:\n os.system(\"rm -rf runs/\" + name)\n except:\n pass\n\n # Initialize tensorboard writer\n if board:\n writer = SummaryWriter('runs/' + name)\n params['writer'] = writer\n else:\n params['writer'] = None\n\n # Hyperparameters\n\n # Used dataset\n dataset = args.dataset\n\n # Batch size\n batch = args.batch_size\n params['batch'] = batch\n # Number of workers (typically 4*num_of_GPUs)\n workers = 4\n # Learning rate\n rate = args.rate\n rate_pretrain = args.rate_pretrain\n # Adam params\n # Weight decay\n weight = args.weight\n weight_pretrain = args.weight_pretrain\n # Scheduler steps for rate update\n sched_step = args.sched_step\n sched_step_pretrain = args.sched_step_pretrain\n # Scheduler gamma - multiplier for learning rate\n sched_gamma = args.sched_gamma\n sched_gamma_pretrain = args.sched_gamma_pretrain\n\n # Number of epochs\n epochs = args.epochs\n pretrain_epochs = args.epochs_pretrain\n params['pretrain_epochs'] = pretrain_epochs\n\n # Printing frequency\n print_freq = args.printing_frequency\n params['print_freq'] = print_freq\n\n # Clustering loss weight:\n gamma = args.gamma\n params['gamma'] = gamma\n\n # Update interval for target distribution:\n update_interval = args.update_interval\n params['update_interval'] = update_interval\n\n # Tolerance for label changes:\n tol = args.tol\n params['tol'] = tol\n\n # Number of clusters\n num_clusters = args.num_clusters\n\n # Report for settings\n tmp = \"Training the '\" + model_name + \"' architecture\"\n utils.print_both(f, tmp)\n tmp = \"\\n\" + \"The following parameters are used:\"\n utils.print_both(f, tmp)\n tmp = \"Batch size:\\t\" + str(batch)\n utils.print_both(f, tmp)\n tmp = \"Number of workers:\\t\" + str(workers)\n utils.print_both(f, tmp)\n tmp = \"Learning rate:\\t\" + str(rate)\n utils.print_both(f, tmp)\n tmp = \"Pretraining learning rate:\\t\" + str(rate_pretrain)\n utils.print_both(f, tmp)\n tmp = \"Weight decay:\\t\" + str(weight)\n utils.print_both(f, tmp)\n tmp = \"Pretraining weight decay:\\t\" + str(weight_pretrain)\n utils.print_both(f, tmp)\n tmp = \"Scheduler steps:\\t\" + str(sched_step)\n utils.print_both(f, tmp)\n tmp = \"Scheduler gamma:\\t\" + str(sched_gamma)\n utils.print_both(f, tmp)\n tmp = \"Pretraining scheduler steps:\\t\" + str(sched_step_pretrain)\n utils.print_both(f, tmp)\n tmp = \"Pretraining scheduler gamma:\\t\" + str(sched_gamma_pretrain)\n utils.print_both(f, tmp)\n tmp = \"Number of epochs of training:\\t\" + str(epochs)\n utils.print_both(f, tmp)\n tmp = \"Number of epochs of pretraining:\\t\" + str(pretrain_epochs)\n utils.print_both(f, tmp)\n tmp = \"Clustering loss weight:\\t\" + str(gamma)\n utils.print_both(f, tmp)\n tmp = \"Update interval for target distribution:\\t\" + str(update_interval)\n utils.print_both(f, tmp)\n tmp = \"Stop criterium tolerance:\\t\" + str(tol)\n utils.print_both(f, tmp)\n tmp = \"Number of clusters:\\t\" + str(num_clusters)\n utils.print_both(f, tmp)\n tmp = \"Leaky relu:\\t\" + str(args.leaky)\n utils.print_both(f, tmp)\n tmp = \"Leaky slope:\\t\" + str(args.neg_slope)\n utils.print_both(f, tmp)\n tmp = \"Activations:\\t\" + str(args.activations)\n utils.print_both(f, tmp)\n tmp = \"Bias:\\t\" + str(args.bias)\n utils.print_both(f, tmp)\n\n # Data preparation\n if 'MNIST' in dataset:\n # Uses slightly modified torchvision MNIST class\n import mnist\n\n mnist_train = True\n mnist_download = False\n mnist_small = False\n mnist_full = False\n\n if dataset == 'MNIST-train':\n tmp = \"\\nData preparation\\nReading data from: MNIST train dataset\"\n mnist_train = True\n mnist_download = True\n elif dataset == 'MNIST-test':\n tmp = \"\\nData preparation\\nReading data from: MNIST test dataset\"\n mnist_train = False\n mnist_download = True\n elif dataset == 'MNIST-full':\n tmp = \"\\nData preparation\\nReading data from: MNIST full dataset\"\n mnist_full = True\n mnist_download = True\n\n utils.print_both(f, tmp)\n img_size = [28, 28, 1]\n tmp = \"Image size used:\\t{0}x{1}\".format(img_size[0], img_size[1])\n utils.print_both(f, tmp)\n\n dataset = mnist.MNIST('../data', train=mnist_train, download=mnist_download, small=mnist_small, full=mnist_full,\n transform=transforms.Compose([\n transforms.ToTensor(),\n # transforms.Normalize((0.1307,), (0.3081,))\n ]))\n\n dataloader = torch.utils.data.DataLoader(dataset,\n batch_size=batch, shuffle=False, num_workers=workers)\n\n dataset_size = len(dataset)\n tmp = \"Training set size:\\t\" + str(dataset_size)\n utils.print_both(f, tmp)\n else:\n # Data folder\n data_dir = args.dataset_path\n tmp = \"\\nData preparation\\nReading data from:\\t./\" + data_dir\n utils.print_both(f, tmp)\n\n # Image size\n custom_size = args.custom_img_size\n if isinstance(custom_size, list):\n img_size = custom_size\n\n tmp = \"Image size used:\\t{0}x{1}\".format(img_size[0], img_size[1])\n utils.print_both(f, tmp)\n\n # Transformations\n data_transforms = transforms.Compose([\n transforms.Resize(img_size[0:2]),\n # transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n\n # Read data from selected folder and apply transformations\n image_dataset = datasets.ImageFolder(data_dir, data_transforms)\n # Prepare data for network: schuffle and arrange batches\n dataloader = torch.utils.data.DataLoader(image_dataset, batch_size=batch,\n shuffle=False, num_workers=workers)\n\n # Size of data sets\n dataset_size = len(image_dataset)\n tmp = \"Training set size:\\t\" + str(dataset_size)\n utils.print_both(f, tmp)\n\n params['dataset_size'] = dataset_size\n\n # GPU check\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n tmp = \"\\nPerforming calculations on:\\t\" + str(device)\n utils.print_both(f, tmp + '\\n')\n params['device'] = device\n\n # Evaluate the proper model\n to_eval = \"nets.\" + model_name + \"(img_size, num_clusters=num_clusters, leaky = args.leaky, neg_slope = args.neg_slope)\"\n model = eval(to_eval)\n\n # Tensorboard model representation\n # if board:\n # writer.add_graph(model, torch.autograd.Variable(torch.Tensor(batch, img_size[2], img_size[0], img_size[1])))\n\n model = model.to(device)\n # Reconstruction loss\n criterion_1 = nn.MSELoss(reduction='mean')\n # Clustering loss\n criterion_2 = nn.KLDivLoss(reduction='sum')\n\n criteria = [criterion_1, criterion_2]\n\n optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=rate, weight_decay=weight)\n\n optimizer_pretrain = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=rate_pretrain, weight_decay=weight_pretrain)\n\n optimizers = [optimizer, optimizer_pretrain]\n\n scheduler = lr_scheduler.StepLR(optimizer, step_size=sched_step, gamma=sched_gamma)\n scheduler_pretrain = lr_scheduler.StepLR(optimizer_pretrain, step_size=sched_step_pretrain, gamma=sched_gamma_pretrain)\n\n schedulers = [scheduler, scheduler_pretrain]\n\n if args.mode == 'train_full':\n model = training_functions.train_model(model, dataloader, criteria, optimizers, schedulers, epochs, params)\n elif args.mode == 'pretrain':\n model = training_functions.pretraining(model, dataloader, criteria[0], optimizers[1], schedulers[1], epochs, params)\n\n # Save final model\n torch.save(model.state_dict(), name_net + '.pt')\n\n # Close files\n f.close()\n if board:\n writer.close()\n" ]
[ [ "torch.nn.KLDivLoss", "torch.utils.data.DataLoader", "torch.utils.tensorboard.SummaryWriter", "torch.cuda.is_available", "torch.nn.MSELoss", "torch.optim.lr_scheduler.StepLR" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
johnberg1/Text-Based-Inference
[ "504f9246336277fe14ff11bfeabd61fcdb5d0ff9" ]
[ "models/psp.py" ]
[ "\"\"\"\nThis file defines the core research contribution\n\"\"\"\nimport copy\nfrom argparse import Namespace\n\nimport torch\nfrom torch import nn\nimport math\n\nfrom configs.paths_config import model_paths\nfrom models.encoders import psp_encoders\nfrom models.encoders import psp_encoders_adain\nfrom models.stylegan2.model import Generator\n\n\nclass pSp(nn.Module):\n\n\tdef __init__(self, opts):\n\t\tsuper(pSp, self).__init__()\n\t\tself.set_opts(opts)\n\t\tself.n_styles = int(math.log(self.opts.output_size, 2)) * 2 - 2\n\t\t# Define architecture\n\t\tself.encoder = self.set_encoder()\n\t\tself.decoder = Generator(self.opts.output_size, 512, 8)\n\t\tself.face_pool = torch.nn.AdaptiveAvgPool2d((256, 256))\n\t\t# Load weights if needed\n\t\tself.load_weights()\n\n\tdef set_encoder(self):\n\t\treturn psp_encoders_adain.GradualStyleEncoder(50, 'ir_se', self.n_styles, self.opts)\n\n\tdef load_weights(self):\n\t\tif self.opts.checkpoint_path is not None:\n\t\t\tprint(f'Loading SAM from checkpoint: {self.opts.checkpoint_path}')\n\t\t\tckpt = torch.load(self.opts.checkpoint_path, map_location='cpu')\n\t\t\tself.encoder.load_state_dict(self.__get_keys(ckpt, 'encoder'), strict=False)\n\t\t\tself.decoder.load_state_dict(self.__get_keys(ckpt, 'decoder'), strict=True)\n\t\t\tif self.opts.start_from_encoded_w_plus:\n\t\t\t\tself.pretrained_encoder = self.__get_pretrained_psp_encoder()\n\t\t\t\tself.pretrained_encoder.load_state_dict(self.__get_keys(ckpt, 'pretrained_encoder'), strict=True)\n\t\t\tself.__load_latent_avg(ckpt)\n\t\telse:\n\t\t\tprint('Loading encoders weights from irse50!')\n\t\t\tencoder_ckpt = torch.load(model_paths['ir_se50'])\n\t\t\t# Transfer the RGB input of the irse50 network to the first 3 input channels of SAM's encoder\n\t\t\tif self.opts.input_nc != 3:\n\t\t\t\tshape = encoder_ckpt['input_layer.0.weight'].shape\n\t\t\t\taltered_input_layer = torch.randn(shape[0], self.opts.input_nc, shape[2], shape[3], dtype=torch.float32)\n\t\t\t\taltered_input_layer[:, :3, :, :] = encoder_ckpt['input_layer.0.weight']\n\t\t\t\tencoder_ckpt['input_layer.0.weight'] = altered_input_layer\n\t\t\tself.encoder.load_state_dict(encoder_ckpt, strict=False)\n\t\t\tprint(f'Loading decoder weights from pretrained path: {self.opts.stylegan_weights}')\n\t\t\tckpt = torch.load(self.opts.stylegan_weights)\n\t\t\tself.decoder.load_state_dict(ckpt['g_ema'], strict=True)\n\t\t\tself.__load_latent_avg(ckpt, repeat=self.n_styles)\n\t\t\tif self.opts.start_from_encoded_w_plus:\n\t\t\t\tself.pretrained_encoder = self.__load_pretrained_psp_encoder()\n\t\t\t\tself.pretrained_encoder.eval()\n\n\tdef forward(self, x, txt_embed, resize=True, latent_mask=None, input_code=False, randomize_noise=True,\n\t\t\t\tinject_latent=None, return_latents=False, alpha=None, input_is_full=False):\n\t\tif input_code:\n\t\t\tcodes = x\n\t\telse:\n\t\t\tcodes = self.encoder(x, txt_embed)\n\t\t\t# normalize with respect to the center of an average face\n\t\t\tif self.opts.start_from_latent_avg:\n\t\t\t\tcodes = codes + self.latent_avg\n\t\t\t# normalize with respect to the latent of the encoded image of pretrained pSp encoder\n\t\t\telif self.opts.start_from_encoded_w_plus:\n\t\t\t\twith torch.no_grad():\n\t\t\t\t\tencoded_latents = self.pretrained_encoder(x)\n\t\t\t\t\tencoded_latents = encoded_latents + self.latent_avg\n\t\t\t\tcodes = codes + encoded_latents\n\n\t\tif latent_mask is not None:\n\t\t\tfor i in latent_mask:\n\t\t\t\tif inject_latent is not None:\n\t\t\t\t\tif alpha is not None:\n\t\t\t\t\t\tcodes[:, i] = alpha * inject_latent[:, i] + (1 - alpha) * codes[:, i]\n\t\t\t\t\telse:\n\t\t\t\t\t\tcodes[:, i] = inject_latent[:, i]\n\t\t\t\telse:\n\t\t\t\t\tcodes[:, i] = 0\n\n\t\tinput_is_latent = (not input_code) or (input_is_full)\n\t\timages, result_latent = self.decoder([codes],\n\t\t\t\t\t\t\t\t\t\t\t input_is_latent=input_is_latent,\n\t\t\t\t\t\t\t\t\t\t\t randomize_noise=randomize_noise,\n\t\t\t\t\t\t\t\t\t\t\t return_latents=return_latents)\n\n\t\tif resize:\n\t\t\timages = self.face_pool(images)\n\n\t\tif return_latents:\n\t\t\treturn images, result_latent\n\t\telse:\n\t\t\treturn images\n\n\tdef set_opts(self, opts):\n\t\tself.opts = opts\n\n\tdef __load_latent_avg(self, ckpt, repeat=None):\n\t\tif 'latent_avg' in ckpt:\n\t\t\tself.latent_avg = ckpt['latent_avg'].to(self.opts.device)\n\t\t\tif repeat is not None:\n\t\t\t\tself.latent_avg = self.latent_avg.repeat(repeat, 1)\n\t\telse:\n\t\t\tself.latent_avg = None\n\n\tdef __get_pretrained_psp_encoder(self):\n\t\topts_encoder = vars(copy.deepcopy(self.opts))\n\t\topts_encoder['input_nc'] = 3\n\t\topts_encoder = Namespace(**opts_encoder)\n\t\tencoder = psp_encoders.GradualStyleEncoder(50, 'ir_se', self.n_styles, opts_encoder)\n\t\treturn encoder\n\n\tdef __load_pretrained_psp_encoder(self):\n\t\tprint(f'Loading pSp encoder from checkpoint: {self.opts.pretrained_psp_path}')\n\t\tckpt = torch.load(self.opts.pretrained_psp_path, map_location='cpu')\n\t\tencoder_ckpt = self.__get_keys(ckpt, name='encoder')\n\t\tencoder = self.__get_pretrained_psp_encoder()\n\t\tencoder.load_state_dict(encoder_ckpt, strict=False)\n\t\treturn encoder\n\n\t@staticmethod\n\tdef __get_keys(d, name):\n\t\tif 'state_dict' in d:\n\t\t\td = d['state_dict']\n\t\td_filt = {k[len(name) + 1:]: v for k, v in d.items() if k[:len(name)] == name}\n\t\treturn d_filt\n" ]
[ [ "torch.randn", "torch.no_grad", "torch.nn.AdaptiveAvgPool2d", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ZongSingHuang/Binary-Whale-Optimization-Algorithm
[ "482a7ffd19f1274a92bfa99b27782a59d134c70e" ]
[ "main_9010.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 23 21:29:10 2020\n\n@author: ZongSing_NB\n\"\"\"\n\nfrom BWOA import BWOA\nimport numpy as np\nimport pandas as pd\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import cross_val_score\nimport functools\nimport time\nimport warnings\n#------------------------------------------------------------------------------\n\n\nwarnings.filterwarnings(\"ignore\")\nnp.random.seed(42)\n#------------------------------------------------------------------------------\n\n\n# 讀資料\nBreastcancer = pd.read_csv('Breastcancer.csv', header=None).values\nBreastEW = pd.read_csv('BreastEW.csv', header=None).values\nCongress = pd.read_csv('Congress.csv', header=None).values\nExactly = pd.read_csv('Exactly.csv', header=None).values\nExactly2 = pd.read_csv('Exactly2.csv', header=None).values\nHeartEW = pd.read_csv('HeartEW.csv', header=None).values\nIonosphere = pd.read_csv('Ionosphere.csv', header=None).values\nKrVsKpEW = pd.read_csv('KrVsKpEW.csv', header=None).values\nLymphography = pd.read_csv('Lymphography.csv', header=None).values\nM_of_n = pd.read_csv('M-of-n.csv', header=None).values\nPenglungEW = pd.read_csv('PenglungEW.csv', header=None).values\nSonar = pd.read_csv('Sonar.csv', header=None).values\nSpectEW = pd.read_csv('SpectEW.csv', header=None).values\nTic_tac_toe = pd.read_csv('Tic-tac-toe.csv', header=None).values\nVote = pd.read_csv('Vote.csv', header=None).values\nWaveformEW = pd.read_csv('WaveformEW.csv', header=None).values\nWine = pd.read_csv('Wine.csv', header=None).values\nZoo = pd.read_csv('Zoo.csv', header=None).values\n\nX1, y1 = Breastcancer[:, :-1], Breastcancer[:, -1]\nX2, y2 = BreastEW[:, :-1], BreastEW[:, -1]\nX3, y3 = Congress[:, :-1], Congress[:, -1]\nX4, y4 = Exactly[:, :-1], Exactly[:, -1]\nX5, y5 = Exactly2[:, :-1], Exactly2[:, -1]\nX6, y6 = HeartEW[:, :-1], HeartEW[:, -1]\nX7, y7 = Ionosphere[:, :-1], Ionosphere[:, -1]\nX8, y8 = KrVsKpEW[:, :-1], KrVsKpEW[:, -1]\nX9, y9 = Lymphography[:, :-1], Lymphography[:, -1]\nX10, y10 = M_of_n[:, :-1], M_of_n[:, -1]\nX11, y11 = PenglungEW[:, :-1], PenglungEW[:, -1]\nX12, y12 = Sonar[:, :-1], Sonar[:, -1]\nX13, y13 = SpectEW[:, :-1], SpectEW[:, -1]\nX14, y14 = Tic_tac_toe[:, :-1], Tic_tac_toe[:, -1]\nX15, y15 = Vote[:, :-1], Vote[:, -1]\nX16, y16 = WaveformEW[:, :-1], WaveformEW[:, -1]\nX17, y17 = Wine[:, :-1], Wine[:, -1]\nX18, y18 = Zoo[:, :-1], Zoo[:, -1]\n#------------------------------------------------------------------------------\n\n\ndef fitness(x, X, y):\n if x.ndim==1:\n x = x.reshape(1, -1)\n loss = np.zeros(x.shape[0])\n \n for i in range(x.shape[0]):\n if np.sum(x[i, :])>0:\n score = cross_val_score(KNeighborsClassifier(n_neighbors=5), X[:, x[i, :].astype(bool)], y, cv=skf)\n loss[i] = 0.99*(1-score.mean()) + 0.01*(np.sum(x[i, :])/X.shape[1])\n else:\n loss[i] = np.inf\n # print(666)\n return loss\n#------------------------------------------------------------------------------\n\n\nd = -1\ng = 70\np = 8\ntimes = 20\ntable = np.zeros((7, 18)) # ['avg acc', '% selected', 'avg time', 'avg loss', 'worst loss', 'best loss', 'std loss']\ntable[4, :] = -np.ones(18)*np.inf # worst\ntable[5, :] = np.ones(18)*np.inf # best\nall_for_std = np.zeros((times, 18))\nskf = StratifiedKFold(n_splits=10, shuffle=True)\n#------------------------------------------------------------------------------\n\n\nfor i in range(times):\n total_time = time.time()\n #------------------------------------------------------------------------------\n \n \n start1 = time.time()\n loss1 = functools.partial(fitness, X=X1, y=y1)\n optimizer = BWOA(fit_func=loss1, \n num_dim=X1.shape[1], num_particle=p, max_iter=g, x_max=1, x_min=0)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 0]: table[4, 0] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 0]: table[5, 0] = optimizer.gBest_score\n table[3, 0] += optimizer.gBest_score\n table[2, 0] += time.time()-start1\n all_for_std[i, 0] = optimizer.gBest_score\n \n table[0, 0] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X1[:, optimizer.gBest_X.astype(bool)], y1, cv=skf).mean()\n table[1, 0] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n \n # score = cross_val_score(KNeighborsClassifier(n_neighbors=5), X1, y1, cv=skf)\n # print(X1.shape[1])\n # print(score.mean())\n # print('==='*16)\n #------------------------------------------------------------------------------\n \n \n start2 = time.time()\n loss2 = functools.partial(fitness, X=X2, y=y2)\n optimizer = BWOA(fit_func=loss2, \n num_dim=X2.shape[1], num_particle=p, max_iter=g, x_max=1, x_min=0)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 1]: table[4, 1] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 1]: table[5, 1] = optimizer.gBest_score\n table[3, 1] += optimizer.gBest_score\n table[2, 1] += time.time()-start2\n all_for_std[i, 1] = optimizer.gBest_score\n \n table[0, 1] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X2[:, optimizer.gBest_X.astype(bool)], y2, cv=skf).mean()\n table[1, 1] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start3 = time.time()\n loss3 = functools.partial(fitness, X=X3, y=y3)\n optimizer = BWOA(fit_func=loss3, \n num_dim=X3.shape[1], num_particle=p, max_iter=g, x_max=1, x_min=0)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 2]: table[4, 2] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 2]: table[5, 2] = optimizer.gBest_score\n table[3, 2] += optimizer.gBest_score\n table[2, 2] += time.time()-start3\n all_for_std[i, 2] = optimizer.gBest_score\n \n table[0, 2] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X3[:, optimizer.gBest_X.astype(bool)], y3, cv=skf).mean()\n table[1, 2] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start4 = time.time()\n loss4 = functools.partial(fitness, X=X4, y=y4)\n optimizer = BWOA(fit_func=loss4, \n num_dim=X4.shape[1], num_particle=p, max_iter=g, x_max=1, x_min=0)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 3]: table[4, 3] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 3]: table[5, 3] = optimizer.gBest_score\n table[3, 3] += optimizer.gBest_score\n table[2, 3] += time.time()-start4\n all_for_std[i, 3] = optimizer.gBest_score\n \n table[0, 3] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X4[:, optimizer.gBest_X.astype(bool)], y4, cv=skf).mean()\n table[1, 3] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start5 = time.time()\n loss5 = functools.partial(fitness, X=X5, y=y5)\n optimizer = BWOA(fit_func=loss5, \n num_dim=X5.shape[1], num_particle=p, max_iter=g, x_max=1, x_min=0)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 4]: table[4, 4] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 4]: table[5, 4] = optimizer.gBest_score\n table[3, 4] += optimizer.gBest_score\n table[2, 4] += time.time()-start5\n all_for_std[i, 4] = optimizer.gBest_score\n \n table[0, 4] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X5[:, optimizer.gBest_X.astype(bool)], y5, cv=skf).mean()\n table[1, 4] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start6 = time.time()\n loss6 = functools.partial(fitness, X=X6, y=y6)\n optimizer = BWOA(fit_func=loss6, \n num_dim=X6.shape[1], num_particle=p, max_iter=g, x_max=1, x_min=0)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 5]: table[4, 5] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 5]: table[5, 5] = optimizer.gBest_score\n table[3, 5] += optimizer.gBest_score\n table[2, 5] += time.time()-start6\n all_for_std[i, 5] = optimizer.gBest_score\n \n table[0, 5] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X6[:, optimizer.gBest_X.astype(bool)], y6, cv=skf).mean()\n table[1, 5] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start7 = time.time()\n loss7 = functools.partial(fitness, X=X7, y=y7)\n optimizer = BWOA(fit_func=loss7, \n num_dim=X7.shape[1], num_particle=p, max_iter=g, x_max=1, x_min=0)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 6]: table[4, 6] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 6]: table[5, 6] = optimizer.gBest_score\n table[3, 6] += optimizer.gBest_score\n table[2, 6] += time.time()-start7\n all_for_std[i, 6] = optimizer.gBest_score\n \n table[0, 6] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X7[:, optimizer.gBest_X.astype(bool)], y7, cv=skf).mean()\n table[1, 6] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start8 = time.time()\n loss8 = functools.partial(fitness, X=X8, y=y8)\n optimizer = BWOA(fit_func=loss8, \n num_dim=X8.shape[1], num_particle=p, max_iter=g, x_max=1, x_min=0)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 7]: table[4, 7] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 7]: table[5, 7] = optimizer.gBest_score\n table[3, 7] += optimizer.gBest_score\n table[2, 7] += time.time()-start8\n all_for_std[i, 7] = optimizer.gBest_score\n \n table[0, 7] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X8[:, optimizer.gBest_X.astype(bool)], y8, cv=skf).mean()\n table[1, 7] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start9 = time.time()\n loss9 = functools.partial(fitness, X=X9, y=y9)\n optimizer = BWOA(fit_func=loss9, \n num_dim=X9.shape[1], num_particle=p, max_iter=g, x_max=1, x_min=0)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 8]: table[4, 8] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 8]: table[5, 8] = optimizer.gBest_score\n table[3, 8] += optimizer.gBest_score\n table[2, 8] += time.time()-start9\n all_for_std[i, 8] = optimizer.gBest_score\n \n table[0, 8] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X9[:, optimizer.gBest_X.astype(bool)], y9, cv=skf).mean()\n table[1, 8] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start10 = time.time()\n loss10 = functools.partial(fitness, X=X10, y=y10)\n optimizer = BWOA(fit_func=loss10, \n num_dim=X10.shape[1], num_particle=p, max_iter=g, x_max=1, x_min=0)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 9]: table[4, 9] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 9]: table[5, 9] = optimizer.gBest_score\n table[3, 9] += optimizer.gBest_score\n table[2, 9] += time.time()-start10\n all_for_std[i, 9] = optimizer.gBest_score\n \n table[0, 9] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X10[:, optimizer.gBest_X.astype(bool)], y10, cv=skf).mean()\n table[1, 9] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start11 = time.time()\n loss11 = functools.partial(fitness, X=X11, y=y11)\n optimizer = BWOA(fit_func=loss11, \n num_dim=X11.shape[1], num_particle=p, max_iter=g, x_max=1, x_min=0)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 10]: table[4, 10] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 10]: table[5, 10] = optimizer.gBest_score\n table[3, 10] += optimizer.gBest_score\n table[2, 10] += time.time()-start11\n all_for_std[i, 10] = optimizer.gBest_score\n \n table[0, 10] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X11[:, optimizer.gBest_X.astype(bool)], y11, cv=skf).mean()\n table[1, 10] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start12 = time.time()\n loss12 = functools.partial(fitness, X=X12, y=y12)\n optimizer = BWOA(fit_func=loss12, \n num_dim=X12.shape[1], num_particle=p, max_iter=g, x_max=1, x_min=0)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 11]: table[4, 11] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 11]: table[5, 11] = optimizer.gBest_score\n table[3, 11] += optimizer.gBest_score\n table[2, 11] += time.time()-start12\n all_for_std[i, 11] = optimizer.gBest_score\n \n table[0, 11] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X12[:, optimizer.gBest_X.astype(bool)], y12, cv=skf).mean()\n table[1, 11] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start13 = time.time()\n loss13 = functools.partial(fitness, X=X13, y=y13)\n optimizer = BWOA(fit_func=loss13, \n num_dim=X13.shape[1], num_particle=p, max_iter=g, x_max=1, x_min=0)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 12]: table[4, 12] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 12]: table[5, 12] = optimizer.gBest_score\n table[3, 12] += optimizer.gBest_score\n table[2, 12] += time.time()-start13\n all_for_std[i, 12] = optimizer.gBest_score\n \n table[0, 12] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X13[:, optimizer.gBest_X.astype(bool)], y13, cv=skf).mean()\n table[1, 12] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start14 = time.time()\n loss14 = functools.partial(fitness, X=X14, y=y14)\n optimizer = BWOA(fit_func=loss14, \n num_dim=X14.shape[1], num_particle=p, max_iter=g, x_max=1, x_min=0)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 13]: table[4, 13] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 13]: table[5, 13] = optimizer.gBest_score\n table[3, 13] += optimizer.gBest_score\n table[2, 13] += time.time()-start14\n all_for_std[i, 13] = optimizer.gBest_score\n \n table[0, 13] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X14[:, optimizer.gBest_X.astype(bool)], y14, cv=skf).mean()\n table[1, 13] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start15 = time.time()\n loss15 = functools.partial(fitness, X=X15, y=y15)\n optimizer = BWOA(fit_func=loss15, \n num_dim=X15.shape[1], num_particle=p, max_iter=g, x_max=1, x_min=0)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 14]: table[4, 14] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 14]: table[5, 14] = optimizer.gBest_score\n table[3, 14] += optimizer.gBest_score\n table[2, 14] += time.time()-start15\n all_for_std[i, 14] = optimizer.gBest_score\n \n table[0, 14] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X15[:, optimizer.gBest_X.astype(bool)], y15, cv=skf).mean()\n table[1, 14] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start16 = time.time()\n loss16 = functools.partial(fitness, X=X16, y=y16)\n optimizer = BWOA(fit_func=loss16, \n num_dim=X16.shape[1], num_particle=p, max_iter=g, x_max=1, x_min=0)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 15]: table[4, 15] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 15]: table[5, 15] = optimizer.gBest_score\n table[3, 15] += optimizer.gBest_score\n table[2, 15] += time.time()-start16\n all_for_std[i, 15] = optimizer.gBest_score\n \n table[0, 15] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X16[:, optimizer.gBest_X.astype(bool)], y16, cv=skf).mean()\n table[1, 15] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start17 = time.time()\n loss17 = functools.partial(fitness, X=X17, y=y17)\n optimizer = BWOA(fit_func=loss17, \n num_dim=X17.shape[1], num_particle=p, max_iter=g, x_max=1, x_min=0)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 16]: table[4, 16] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 16]: table[5, 16] = optimizer.gBest_score\n table[3, 16] += optimizer.gBest_score\n table[2, 16] += time.time()-start17\n all_for_std[i, 16] = optimizer.gBest_score\n \n \n table[0, 16] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X17[:, optimizer.gBest_X.astype(bool)], y17, cv=skf).mean()\n table[1, 16] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n start18 = time.time()\n loss18 = functools.partial(fitness, X=X18, y=y18)\n optimizer = BWOA(fit_func=loss18, \n num_dim=X18.shape[1], num_particle=p, max_iter=g, x_max=1, x_min=0)\n optimizer.opt()\n \n if optimizer.gBest_score>table[4, 17]: table[4, 17] = optimizer.gBest_score\n if optimizer.gBest_score<table[5, 17]: table[5, 17] = optimizer.gBest_score\n table[3, 17] += optimizer.gBest_score\n table[2, 17] += time.time()-start18\n all_for_std[i, 17] = optimizer.gBest_score\n \n table[0, 17] += cross_val_score(KNeighborsClassifier(n_neighbors=5), X18[:, optimizer.gBest_X.astype(bool)], y18, cv=skf).mean()\n table[1, 17] += np.sum(optimizer.gBest_X)/len(optimizer.gBest_X)\n #------------------------------------------------------------------------------\n \n \n print(i+1, ' ', round(time.time()-total_time, 2), 'sec')\n #------------------------------------------------------------------------------\n\n\ntable[:4, :] = table[:4, :] / times\ntable[6, :] = np.std(all_for_std, axis=0)\ntable = pd.DataFrame(table)\ntable.columns=['Breastcancer', 'BreastEW', 'Congress', 'Exactly', 'Exactly2', 'HeartEW',\n 'Ionosphere', 'KrVsKpEW', 'Lymphography', 'M-of-n', 'PenglungEW', 'Sonar', \n 'SpectEW', 'Tic-tac-toe', 'Vote', 'WaveformEW', 'Wine', 'Zoo']\ntable.index = ['avg acc', '% selected', 'avg time', 'avg loss', 'worst loss', 'best loss', 'std loss']\n\nall_for_std = pd.DataFrame(all_for_std)\nall_for_std.columns=['Breastcancer', 'BreastEW', 'Congress', 'Exactly', 'Exactly2', 'HeartEW',\n 'Ionosphere', 'KrVsKpEW', 'Lymphography', 'M-of-n', 'PenglungEW', 'Sonar', \n 'SpectEW', 'Tic-tac-toe', 'Vote', 'WaveformEW', 'Wine', 'Zoo']" ]
[ [ "pandas.read_csv", "numpy.random.seed", "sklearn.model_selection.StratifiedKFold", "pandas.DataFrame", "numpy.ones", "sklearn.neighbors.KNeighborsClassifier", "numpy.std", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
gieses/pytrapment
[ "7c639b697e94da0307123b45303ce1a7743050d6" ]
[ "pytrapment/entrapment.py" ]
[ "\"\"\"Module to perform QC on the xiRT performance.\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom pyteomics import fasta, parser\nfrom scipy.spatial import distance\n\n\ndef compute_composition_df(seq_df):\n \"\"\"\n Compute the composition matrix for all proteins.\n\n Args:\n seq_df: df, dataframe with sequences\n\n Returns:\n df, with the composition of the proteins\n \"\"\"\n # get composition table\n df_seq_comp = pd.DataFrame(\n list(seq_df[\"sequence\"].apply(parser.amino_acid_composition).values)) * 1.0\n\n # add column with 0s for amino acids that didnt occur in the protein fasta file\n for i in parser.std_amino_acids:\n if i not in df_seq_comp.columns:\n df_seq_comp[i] = 0\n\n df_seq_comp = df_seq_comp.fillna(0.0)\n df_seq_comp.index = seq_df.index\n return df_seq_comp\n\n\ndef get_nearest_neighbor_proteins(fasta_host, fasta_trap):\n \"\"\"\n Retrieve the nearest neighbors for all proteins in the host fasta.\n\n Args:\n fasta_host:\n fasta_trap:\n\n Returns:\n df, dataframe with proteins for host and entrapment database.\n \"\"\"\n # make nearest neighbor thing\n # ger protein table\n df_prot_host = fasta2dataframe(fasta_host)\n df_comp_host = compute_composition_df(df_prot_host)\n df_peptides_host = digest_protein_df(df_prot_host)\n\n df_prot_trap = fasta2dataframe(fasta_trap)\n df_comp_trap = compute_composition_df(df_prot_trap)\n df_peptides_trap = digest_protein_df(df_prot_trap)\n\n # perform the filtering\n df_comp_trap, df_prot_trap = filter_trap_fasta(df_prot_trap, df_comp_trap,\n df_peptides_trap, df_peptides_host)\n\n # get best human protein matching by euclidean distance\n neighbor = []\n distances = np.zeros(df_comp_host.shape[0])\n for ii, row in enumerate(df_comp_host.iterrows()):\n # compute the distance from the query (current row) to all other proteins in the\n # trap database\n ary = distance.cdist(df_comp_trap, pd.DataFrame(row[1]).transpose(), metric='euclidean')\n # retrieve minimal disance entry here and use the index as neighbor and include\n # it to the fasta df later\n neighbor.append(df_comp_trap[ary == ary.min()].index.values[0])\n distances[ii] = ary.min()\n # print identifier for id mapping\n # neighbors = [i.split(\"|\")[1] for i in np.ravel(neighbor)]\n fasta_df_entrapment = df_prot_trap.loc[neighbor]\n # store seed-neighbor pairs\n fasta_df_entrapment[\"host_seed\"] = df_comp_host.index\n\n final_fasta_df = pd.concat([df_prot_host, fasta_df_entrapment])\n final_fasta_df[\"db_type\"] = [\"host\"] * len(df_prot_host) + [\"trap\"] * len(fasta_df_entrapment)\n return final_fasta_df\n\n\ndef filter_trap_fasta(df_prot_trap, df_comp_trap, df_peptides_trap, df_peptides_host):\n \"\"\"\n Remove proteins with peptides that also occur in the host database.\n\n Args:\n df_comp_trap: df, protein entries from the entrapment database (composition)\n df_peptides_host: df, peptides from the host fasta\n df_peptides_trap: df, peptides from the entrapment fasta\n df_prot_trap: df, proteins from the entrapment database\n\n Returns:\n (df_comp_trap, df_prot_trap), returns a tuple of valid (unique) trapment ids.\n \"\"\"\n # make sure I/L witht he same mass doesnt mess with overlapping peptides\n df_peptides_host[\"sequence\"] = df_peptides_host[\"sequence\"].str.replace(\"I\", \"L\")\n df_peptides_trap[\"sequence\"] = df_peptides_trap[\"sequence\"].str.replace(\"I\", \"L\")\n\n df_peptides_host = df_peptides_host.set_index(\"sequence\")\n df_peptides_trap = df_peptides_trap.set_index(\"sequence\")\n df_joined = df_peptides_trap.join(df_peptides_host, rsuffix=\"_host\", lsuffix=\"_trap\",\n how=\"left\")\n blacklist_proteins_trap = df_joined.dropna(subset=[\"protein_host\"])[\"protein_trap\"].unique()\n # drop proteins from dfs\n df_prot_trap = df_prot_trap.drop(blacklist_proteins_trap)\n df_comp_trap = df_comp_trap.drop(blacklist_proteins_trap)\n return df_comp_trap, df_prot_trap\n\n\ndef fasta2dataframe(FASTA):\n \"\"\"\n Convert the entries in the FASTA file to a dataframe with ID, sequence and Type as column.\n\n Parameters\n FASTA : str\n Location of the FASTA file..\n\n Returns\n dataframe\n \"\"\"\n # store proteins and ids here\n unique_proteins = []\n unique_ids = []\n\n with open(FASTA, mode='rt') as ffile:\n for description, sequence in fasta.FASTA(ffile):\n unique_proteins.append(sequence)\n unique_ids.append(description)\n\n # convert to dataframe\n df = pd.DataFrame(unique_proteins)\n df.columns = [\"sequence\"]\n df[\"Type\"] = \"Protein\"\n df.index = unique_ids\n\n # make sure ordering is lost\n df = df.sample(frac=1, random_state=42)\n return df\n\n\ndef digest_protein_df(df_fasta, rule=\"trypsin\", min_length=6):\n \"\"\"\n Digest a dataframe of proteins into a dataframe with unique peptides.\n\n Args:\n df_fasta: df, dataframe with protein, sequence columns\n rule: str, pyteomics string identifier\n min_length: int, minimal length for a peptide\n\n Returns:\n peptide_df with <protein_name:peptide> entries.\n \"\"\"\n # create all peptide sequences first\n cleaved = df_fasta[\"sequence\"].apply(parser.cleave, args=(rule,)).explode()\n return cleaved[cleaved.apply(len) >= min_length].rename_axis(\"protein\").reset_index()\n" ]
[ [ "pandas.concat", "numpy.zeros", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
oliverdutton/probability
[ "951adff452dbb26cf78a6765c10f70f18a934918" ]
[ "tensorflow_probability/python/experimental/sts_gibbs/gibbs_sampler.py" ]
[ "# Copyright 2020 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Gibbs sampling inference for (a special case of) STS models.\n\nThese methods implement Gibbs sampling steps for STS models that combine a\nsingle LocalLevel or LocalLinearTrend component with a linear regression\ncomponent, with conjugate\nInverseGamma priors on the scale and a Gaussian prior on the weights. This model\nclass is somewhat general, in that we assume that any seasonal/holiday variation\ncan be encoded in the design matrix of the linear regression. The intent is to\nsupport deployment of STS inference in latency-sensitive applications.\n\nThis Gibbs sampler tends to reach acceptable answers much more quickly than\nfitting the same models by gradient-based methods (VI or HMC). Because it does\nnot marginalize out the linear Gaussian latents analytically, it may be more\nprone to getting stuck at a single (perhaps suboptimal) posterior explanation;\nhowever, in practice it often finds good solutions.\n\nThe speed advantage of Gibbs sampling in this model likely arises from a\ncombination of:\n\n- Analytically sampling the regression weights once per sampling cycle, instead\n of requiring a quadratically-expensive update at each timestep of Kalman\n filtering (as in DynamicLinearRegression), or relying on gradient-based\n approximate inference (as in LinearRegression).\n- Exploiting conjugacy to sample the scale parameters directly.\n- Specializing the Gibbs step for the latent level to the case of a\n scalar process with identity transitions.\n\nIt would be possible to expand this sampler to support additional STS models,\npotentially at a cost with respect to some of these performance advantages (and\nadditional code):\n\n- To support general latent state-space models, one would augment the sampler\n state to track all parameters in the model. Each component would need to\n register Gibbs sampling steps for its parameters (assuming conjugate priors),\n as a function of the sampled latent trajectory. The resampling steps for the\n observation_noise_scale and level_scale parameters would then be replaced with\n a generic loop over all parameters in the model.\n- For specific models it may be possible to implement an efficient prior\n sampling algorithm, analagous to `LocalLevelStateSpaceModel._joint_sample_n`.\n This may be significantly faster than the generic sampler and can speed up\n the posterior sampling step for the latent trajectory.\n\"\"\"\n\nimport collections\n\nimport numpy as np\nimport six\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python import bijectors as tfb\nfrom tensorflow_probability.python import distributions as tfd\nfrom tensorflow_probability.python import sts\nfrom tensorflow_probability.python.distributions import normal_conjugate_posteriors\nfrom tensorflow_probability.python.experimental import distributions as tfde\nfrom tensorflow_probability.python.experimental.sts_gibbs import spike_and_slab\nfrom tensorflow_probability.python.internal import distribution_util as dist_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import prefer_static\nfrom tensorflow_probability.python.internal import samplers\nfrom tensorflow_probability.python.sts import components as sts_components\nfrom tensorflow_probability.python.sts.internal import util as sts_util\n\n# The sampler state stores current values for each model parameter,\n# and auxiliary quantities such as the latent level. It should have the property\n# that `model.make_state_space_model(num_timesteps, GibbsSamplerState(...))`\n# behaves properly -- i.e., that the state contains all model\n# parameters *in the same order* as they are listed in `model.parameters`. This\n# is currently enforced by construction in `build_gibbs_fittable_model`.\nGibbsSamplerState = collections.namedtuple( # pylint: disable=unexpected-keyword-arg\n 'GibbsSamplerState',\n ['observation_noise_scale',\n 'level_scale',\n 'weights',\n 'level',\n 'seed',\n 'slope_scale',\n 'slope',])\n# Make the two slope-related quantities optional, for backwards compatibility.\nGibbsSamplerState.__new__.__defaults__ = (0., # slope_scale\n 0.) # slope\n\n\n# TODO(b/151571025): revert to `tfd.InverseGamma` once its sampler is XLA-able.\nclass XLACompilableInverseGamma(tfd.InverseGamma):\n\n def _sample_n(self, n, seed=None):\n return 1. / tfd.Gamma(\n concentration=self.concentration,\n rate=self.scale).sample(n, seed=seed)\n\n\nclass DummySpikeAndSlabPrior(tfd.Distribution):\n \"\"\"Dummy prior on sparse regression weights.\"\"\"\n\n def __init__(self):\n super().__init__(\n dtype=tf.float32,\n reparameterization_type=tfd.FULLY_REPARAMETERIZED,\n validate_args=False,\n allow_nan_stats=True,\n name='dummy_spike_and_slab_prior')\n\n @property\n def event_shape(self):\n # Present as a vector-valued distribution.\n return tf.TensorShape([1])\n\n def _parameter_control_dependencies(self, is_init):\n if not is_init:\n raise ValueError(\n 'Cannot explicitly operate on a spike-and-slab prior; '\n 'only Gibbs sampling is supported.')\n return []\n\n def _default_event_space_bijector(self):\n return tfb.Identity()\n\n\nclass SpikeAndSlabSparseLinearRegression(sts_components.LinearRegression):\n \"\"\"Dummy component for sparse regression with a spike-and-slab prior.\"\"\"\n\n def __init__(self,\n design_matrix,\n weights_prior=None,\n sparse_weights_nonzero_prob=0.5,\n name=None):\n # Extract precision matrix from a multivariate normal prior.\n weights_prior_precision = None\n if hasattr(weights_prior, 'precision'):\n weights_prior_precision = weights_prior.precision()\n elif weights_prior is not None:\n inverse_scale = weights_prior.scale.inverse()\n weights_prior_precision = inverse_scale.matmul(inverse_scale,\n adjoint=True).to_dense()\n self._weights_prior_precision = weights_prior_precision\n self._sparse_weights_nonzero_prob = sparse_weights_nonzero_prob\n super().__init__(design_matrix=design_matrix,\n weights_prior=DummySpikeAndSlabPrior(),\n name=name)\n\n\ndef _tile_normal_to_mvn_diag(normal_dist, dim):\n return tfd.MultivariateNormalDiag(\n loc=normal_dist.loc[..., tf.newaxis],\n scale_diag=(normal_dist.scale[..., tf.newaxis] *\n tf.ones([dim], dtype=normal_dist.scale.dtype)))\n\n\ndef _is_multivariate_normal(dist):\n return (isinstance(dist, tfd.MultivariateNormalLinearOperator) or\n isinstance(dist,\n tfde.MultivariateNormalPrecisionFactorLinearOperator))\n\n\ndef build_model_for_gibbs_fitting(observed_time_series,\n design_matrix,\n weights_prior,\n level_variance_prior,\n observation_noise_variance_prior,\n slope_variance_prior=None,\n sparse_weights_nonzero_prob=None):\n \"\"\"Builds a StructuralTimeSeries model instance that supports Gibbs sampling.\n\n To support Gibbs sampling, a model must have have conjugate priors on all\n scale and weight parameters, and must be constructed so that\n `model.parameters` matches the parameters and ordering specified by the\n `GibbsSamplerState` namedtuple. Currently, this includes (only) models\n consisting of the sum of a LocalLevel or LocalLinearTrend component with\n a LinearRegression or SpikeAndSlabSparseLinearRegression component.\n\n Args:\n observed_time_series: optional `float` `Tensor` of shape [..., T, 1]`\n (omitting the trailing unit dimension is also supported when `T > 1`),\n specifying an observed time series. May optionally be an instance of\n `tfp.sts.MaskedTimeSeries`, which includes a mask `Tensor` to specify\n timesteps with missing observations.\n design_matrix: float `Tensor` of shape `concat([batch_shape,\n [num_timesteps, num_features]])`. This may also optionally be\n an instance of `tf.linalg.LinearOperator`.\n weights_prior: Optional distribution instance specifying a normal prior on\n weights. This may be a multivariate normal instance with event shape\n `[num_features]`, or a scalar normal distribution with event shape `[]`.\n In either case, the batch shape must broadcast to the batch shape of\n `observed_time_series`. If a `sparse_weights_nonzero_prob` is specified,\n requesting sparse regression, then the `weights_prior` mean is ignored\n (because nonzero means are not currently implemented by the spike-and-slab\n sampler). In this case, `weights_prior=None` is also valid, and will use\n the default prior of the spike-and-slab sampler.\n level_variance_prior: An instance of `tfd.InverseGamma` representing a prior\n on the level variance (`level_scale**2`) of a local level model. May have\n batch shape broadcastable to the batch shape of `observed_time_series`.\n observation_noise_variance_prior: An instance of `tfd.InverseGamma`\n representing a prior on the observation noise variance (\n `observation_noise_scale**2`). May have batch shape broadcastable to the\n batch shape of `observed_time_series`.\n slope_variance_prior: Optional instance of `tfd.InverseGamma` representing\n a prior on slope variance (`slope_scale**2`) of a local linear trend\n model. May have batch shape broadcastable to the batch shape of\n `observed_time_series`. If specified, a local linear trend model is used\n rather than a local level model.\n Default value: `None`.\n sparse_weights_nonzero_prob: Optional scalar float `Tensor` prior\n probability that any given feature has nonzero weight. If specified, this\n triggers a sparse regression with a spike-and-slab prior, where\n `sparse_weights_nonzero_prob` is the prior probability of the 'slab'\n component.\n Default value: `None`.\n Returns:\n model: A `tfp.sts.StructuralTimeSeries` model instance.\n \"\"\"\n if isinstance(weights_prior, tfd.Normal):\n # Canonicalize scalar normal priors as diagonal MVNs.\n if isinstance(design_matrix, tf.linalg.LinearOperator):\n num_features = design_matrix.shape_tensor()[-1]\n else:\n num_features = tf.shape(design_matrix)[-1]\n weights_prior = _tile_normal_to_mvn_diag(weights_prior, num_features)\n elif weights_prior is not None and not _is_multivariate_normal(weights_prior):\n raise ValueError('Weights prior must be a normal distribution or `None`.')\n if not isinstance(level_variance_prior, tfd.InverseGamma):\n raise ValueError(\n 'Level variance prior must be an inverse gamma distribution.')\n if (slope_variance_prior is not None and\n not isinstance(slope_variance_prior, tfd.InverseGamma)):\n raise ValueError(\n 'Slope variance prior must be an inverse gamma distribution; got: {}.'\n .format(slope_variance_prior))\n if not isinstance(observation_noise_variance_prior, tfd.InverseGamma):\n raise ValueError('Observation noise variance prior must be an inverse '\n 'gamma distribution.')\n\n sqrt = tfb.Invert(tfb.Square()) # Converts variance priors to scale priors.\n\n # Level or trend component.\n if slope_variance_prior:\n local_variation = sts.LocalLinearTrend(\n observed_time_series=observed_time_series,\n level_scale_prior=sqrt(level_variance_prior),\n slope_scale_prior=sqrt(slope_variance_prior),\n name='local_linear_trend')\n else:\n local_variation = sts.LocalLevel(\n observed_time_series=observed_time_series,\n level_scale_prior=sqrt(level_variance_prior),\n name='local_level')\n\n # Regression component.\n if sparse_weights_nonzero_prob is not None:\n regression = SpikeAndSlabSparseLinearRegression(\n design_matrix=design_matrix,\n weights_prior=weights_prior,\n sparse_weights_nonzero_prob=sparse_weights_nonzero_prob,\n name='sparse_regression')\n else:\n regression = sts.LinearRegression(design_matrix=design_matrix,\n weights_prior=weights_prior,\n name='regression')\n model = sts.Sum([local_variation, regression],\n observed_time_series=observed_time_series,\n observation_noise_scale_prior=sqrt(\n observation_noise_variance_prior),\n # The Gibbs sampling steps in this file do not account for an\n # offset to the observed series. Instead, we assume the\n # observed series has already been centered and\n # scale-normalized.\n constant_offset=0.)\n model.supports_gibbs_sampling = True\n return model\n\n\ndef _get_design_matrix(model):\n \"\"\"Returns the design matrix for an STS model with a regression component.\"\"\"\n design_matrices = [component.design_matrix for component in model.components\n if hasattr(component, 'design_matrix')]\n if not design_matrices:\n raise ValueError('Model does not contain a regression component.')\n if len(design_matrices) > 1:\n raise ValueError('Model contains multiple regression components.')\n return design_matrices[0]\n\n\ndef fit_with_gibbs_sampling(model,\n observed_time_series,\n num_chains=(),\n num_results=2000,\n num_warmup_steps=200,\n initial_state=None,\n seed=None):\n \"\"\"Fits parameters for an STS model using Gibbs sampling.\n\n Args:\n model: A `tfp.sts.StructuralTimeSeries` model instance return by\n `build_model_for_gibbs_fitting`.\n observed_time_series: `float` `Tensor` of shape [..., T, 1]`\n (omitting the trailing unit dimension is also supported when `T > 1`),\n specifying an observed time series. May optionally be an instance of\n `tfp.sts.MaskedTimeSeries`, which includes a mask `Tensor` to specify\n timesteps with missing observations.\n num_chains: Optional int to indicate the number of parallel MCMC chains.\n Default to an empty tuple to sample a single chain.\n num_results: Optional int to indicate number of MCMC samples.\n num_warmup_steps: Optional int to indicate number of MCMC samples.\n initial_state: A `GibbsSamplerState` structure of the initial states of the\n MCMC chains.\n seed: Optional `Python` `int` seed controlling the sampled values.\n Returns:\n model: A `GibbsSamplerState` structure of posterior samples.\n \"\"\"\n if not hasattr(model, 'supports_gibbs_sampling'):\n raise ValueError('This STS model does not support Gibbs sampling. Models '\n 'for Gibbs sampling must be created using the '\n 'method `build_model_for_gibbs_fitting`.')\n if not tf.nest.is_nested(num_chains):\n num_chains = [num_chains]\n\n [\n observed_time_series,\n is_missing\n ] = sts_util.canonicalize_observed_time_series_with_mask(\n observed_time_series)\n dtype = observed_time_series.dtype\n\n # The canonicalized time series always has trailing dimension `1`,\n # because although LinearGaussianSSMs support vector observations, STS models\n # describe scalar time series only. For our purposes it'll be cleaner to\n # remove this dimension.\n observed_time_series = observed_time_series[..., 0]\n batch_shape = prefer_static.concat(\n [num_chains,\n prefer_static.shape(observed_time_series)[:-1]], axis=-1)\n level_slope_shape = prefer_static.concat(\n [num_chains, prefer_static.shape(observed_time_series)], axis=-1)\n\n # Treat a LocalLevel model as the special case of LocalLinearTrend where\n # the slope_scale is always zero.\n initial_slope_scale = 0.\n initial_slope = 0.\n if isinstance(model.components[0], sts.LocalLinearTrend):\n initial_slope_scale = 1. * tf.ones(batch_shape, dtype=dtype)\n initial_slope = tf.zeros(level_slope_shape, dtype=dtype)\n\n if initial_state is None:\n initial_state = GibbsSamplerState(\n observation_noise_scale=tf.ones(batch_shape, dtype=dtype),\n level_scale=tf.ones(batch_shape, dtype=dtype),\n slope_scale=initial_slope_scale,\n weights=tf.zeros(prefer_static.concat([\n batch_shape,\n _get_design_matrix(model).shape[-1:]], axis=0), dtype=dtype),\n level=tf.zeros(level_slope_shape, dtype=dtype),\n slope=initial_slope,\n seed=None) # Set below.\n\n if isinstance(seed, six.integer_types):\n tf.random.set_seed(seed)\n\n # Always use the passed-in `seed` arg, ignoring any seed in the initial state.\n initial_state = initial_state._replace(\n seed=samplers.sanitize_seed(seed, salt='initial_GibbsSamplerState'))\n\n sampler_loop_body = _build_sampler_loop_body(model,\n observed_time_series,\n is_missing)\n\n samples = tf.scan(sampler_loop_body,\n np.arange(num_warmup_steps + num_results),\n initial_state)\n return tf.nest.map_structure(lambda x: x[num_warmup_steps:], samples)\n\n\ndef one_step_predictive(model,\n posterior_samples,\n num_forecast_steps=0,\n original_mean=0.,\n original_scale=1.,\n thin_every=10):\n \"\"\"Constructs a one-step-ahead predictive distribution at every timestep.\n\n Unlike the generic `tfp.sts.one_step_predictive`, this method uses the\n latent levels from Gibbs sampling to efficiently construct a predictive\n distribution that mixes over posterior samples. The predictive distribution\n may also include additional forecast steps.\n\n This method returns the predictive distributions for each timestep given\n previous timesteps and sampled model parameters, `p(observed_time_series[t] |\n observed_time_series[:t], weights, observation_noise_scale)`. Note that the\n posterior values of the weights and noise scale will in general be informed\n by observations from all timesteps *including the step being predicted*, so\n this is not a strictly kosher probabilistic quantity, but in general we assume\n that it's close, i.e., that the step being predicted had very small individual\n impact on the overall parameter posterior.\n\n Args:\n model: A `tfd.sts.StructuralTimeSeries` model instance. This must be of the\n form constructed by `build_model_for_gibbs_sampling`.\n posterior_samples: A `GibbsSamplerState` instance in which each element is a\n `Tensor` with initial dimension of size `num_samples`.\n num_forecast_steps: Python `int` number of additional forecast steps to\n append.\n Default value: `0`.\n original_mean: Optional scalar float `Tensor`, added to the predictive\n distribution to undo the effect of input normalization.\n Default value: `0.`\n original_scale: Optional scalar float `Tensor`, used to rescale the\n predictive distribution to undo the effect of input normalization.\n Default value: `1.`\n thin_every: Optional Python `int` factor by which to thin the posterior\n samples, to reduce complexity of the predictive distribution. For example,\n if `thin_every=10`, every `10`th sample will be used.\n Default value: `10`.\n Returns:\n predictive_dist: A `tfd.MixtureSameFamily` instance of event shape\n `[num_timesteps + num_forecast_steps]` representing the predictive\n distribution of each timestep given previous timesteps.\n \"\"\"\n dtype = dtype_util.common_dtype([\n posterior_samples.level_scale,\n posterior_samples.observation_noise_scale,\n posterior_samples.level,\n original_mean,\n original_scale], dtype_hint=tf.float32)\n num_observed_steps = prefer_static.shape(posterior_samples.level)[-1]\n\n original_mean = tf.convert_to_tensor(original_mean, dtype=dtype)\n original_scale = tf.convert_to_tensor(original_scale, dtype=dtype)\n thinned_samples = tf.nest.map_structure(lambda x: x[::thin_every],\n posterior_samples)\n\n if prefer_static.rank_from_shape( # If no slope was inferred, treat as zero.\n prefer_static.shape(thinned_samples.slope)) <= 1:\n thinned_samples = thinned_samples._replace(\n slope=tf.zeros_like(thinned_samples.level),\n slope_scale=tf.zeros_like(thinned_samples.level_scale))\n\n num_steps_from_last_observation = tf.concat([\n tf.ones([num_observed_steps], dtype=dtype),\n tf.range(1, num_forecast_steps + 1, dtype=dtype)], axis=0)\n\n # The local linear trend model expects that the level at step t + 1 is equal\n # to the level at step t, plus the slope at time t - 1,\n # plus transition noise of scale 'level_scale' (which we account for below).\n if num_forecast_steps > 0:\n num_batch_dims = prefer_static.rank_from_shape(\n prefer_static.shape(thinned_samples.level)) - 2\n # All else equal, the current level will remain stationary.\n forecast_level = tf.tile(thinned_samples.level[..., -1:],\n tf.concat([tf.ones([num_batch_dims + 1],\n dtype=tf.int32),\n [num_forecast_steps]], axis=0))\n # If the model includes slope, the level will steadily increase.\n forecast_level += (thinned_samples.slope[..., -1:] *\n tf.range(1., num_forecast_steps + 1.,\n dtype=forecast_level.dtype))\n\n level_pred = tf.concat([thinned_samples.level[..., :1], # t == 0\n (thinned_samples.level[..., :-1] +\n thinned_samples.slope[..., :-1]) # 1 <= t < T\n ] + (\n [forecast_level]\n if num_forecast_steps > 0 else []),\n axis=-1)\n\n design_matrix = _get_design_matrix(\n model).to_dense()[:num_observed_steps + num_forecast_steps]\n regression_effect = tf.linalg.matvec(design_matrix, thinned_samples.weights)\n\n y_mean = ((level_pred + regression_effect) *\n original_scale[..., tf.newaxis] + original_mean[..., tf.newaxis])\n\n # To derive a forecast variance, including slope uncertainty, let\n # `r[:k]` be iid Gaussian RVs with variance `level_scale**2` and `s[:k]` be\n # iid Gaussian RVs with variance `slope_scale**2`. Then the forecast level at\n # step `T + k` can be written as\n # (level[T] + # Last known level.\n # r[0] + ... + r[k] + # Sum of random walk terms on level.\n # slope[T] * k # Contribution from last known slope.\n # (k - 1) * s[0] + # Contributions from random walk terms on slope.\n # (k - 2) * s[1] +\n # ... +\n # 1 * s[k - 1])\n # which has variance of\n # (level_scale**2 * k +\n # slope_scale**2 * ( (k - 1)**2 +\n # (k - 2)**2 +\n # ... + 1 ))\n # Here the `slope_scale` coefficient is the `k - 1`th square pyramidal\n # number [1], which is given by\n # (k - 1) * k * (2 * k - 1) / 6.\n #\n # [1] https://en.wikipedia.org/wiki/Square_pyramidal_number\n variance_from_level = (thinned_samples.level_scale[..., tf.newaxis]**2 *\n num_steps_from_last_observation)\n variance_from_slope = thinned_samples.slope_scale[..., tf.newaxis]**2 * (\n (num_steps_from_last_observation - 1) *\n num_steps_from_last_observation *\n (2 * num_steps_from_last_observation - 1)) / 6.\n y_scale = (original_scale * tf.sqrt(\n thinned_samples.observation_noise_scale[..., tf.newaxis]**2 +\n variance_from_level + variance_from_slope))\n\n num_posterior_draws = prefer_static.shape(y_mean)[0]\n return tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(\n logits=tf.zeros([num_posterior_draws], dtype=y_mean.dtype)),\n components_distribution=tfd.Normal(\n loc=dist_util.move_dimension(y_mean, 0, -1),\n scale=dist_util.move_dimension(y_scale, 0, -1)))\n\n\ndef _resample_weights(design_matrix, target_residuals, observation_noise_scale,\n weights_prior_scale, seed=None):\n \"\"\"Samples regression weights from their conditional posterior.\n\n This assumes a conjugate normal regression model,\n\n ```\n weights ~ Normal(loc=0., covariance_matrix=weights_prior_scale**2 * I)\n target_residuals ~ Normal(loc=matvec(design_matrix, weights),\n covariance_matrix=observation_noise_scale**2 * I)\n ```\n\n and returns a sample from `p(weights | target_residuals,\n observation_noise_scale, design_matrix)`.\n\n Args:\n design_matrix: Float `Tensor` design matrix of shape\n `[..., num_timesteps, num_features]`.\n target_residuals: Float `Tensor` of shape `[..., num_observations]`\n observation_noise_scale: Scalar float `Tensor` (with optional batch shape)\n standard deviation of the iid observation noise.\n weights_prior_scale: Instance of `tf.linalg.LinearOperator` of shape\n `[num_features, num_features]` (with optional batch shape),\n specifying the scale of a multivariate Normal prior on regression\n weights.\n seed: Optional `Python` `int` seed controlling the sampled values.\n Returns:\n weights: Float `Tensor` of shape `[..., num_features]`, sampled from\n the conditional posterior `p(weights | target_residuals,\n observation_noise_scale, weights_prior_scale)`.\n \"\"\"\n weights_mean, weights_prec = (\n normal_conjugate_posteriors.mvn_conjugate_linear_update(\n linear_transformation=design_matrix,\n observation=target_residuals,\n prior_scale=weights_prior_scale,\n likelihood_scale=tf.linalg.LinearOperatorScaledIdentity(\n num_rows=prefer_static.shape(design_matrix)[-2],\n multiplier=observation_noise_scale)))\n sampled_weights = weights_prec.cholesky().solvevec(\n samplers.normal(\n shape=prefer_static.shape(weights_mean),\n dtype=design_matrix.dtype, seed=seed), adjoint=True)\n return weights_mean + sampled_weights\n\n\ndef _resample_latents(observed_residuals,\n level_scale,\n observation_noise_scale,\n initial_state_prior,\n slope_scale=None,\n is_missing=None,\n sample_shape=(),\n seed=None):\n \"\"\"Uses Durbin-Koopman sampling to resample the latent level and slope.\n\n Durbin-Koopman sampling [1] is an efficient algorithm to sample from the\n posterior latents of a linear Gaussian state space model. This method\n implements the algorithm.\n\n [1] Durbin, J. and Koopman, S.J. (2002) A simple and efficient simulation\n smoother for state space time series analysis.\n\n Args:\n observed_residuals: Float `Tensor` of shape `[..., num_observations]`,\n specifying the centered observations `(x - loc)`.\n level_scale: Float scalar `Tensor` (may contain batch dimensions)\n specifying the standard deviation of the level random walk steps.\n observation_noise_scale: Float scalar `Tensor` (may contain batch\n dimensions) specifying the standard deviation of the observation noise.\n initial_state_prior: instance of `tfd.MultivariateNormalLinearOperator`.\n slope_scale: Optional float scalar `Tensor` (may contain batch dimensions)\n specifying the standard deviation of slope random walk steps. If\n provided, a `LocalLinearTrend` model is used, otherwise, a `LocalLevel`\n model is used.\n is_missing: Optional `bool` `Tensor` missingness mask.\n sample_shape: Optional `int` `Tensor` shape of samples to draw.\n seed: `int` `Tensor` of shape `[2]` controlling stateless sampling.\n Returns:\n latents: Float `Tensor` resampled latent level, of shape\n `[..., num_timesteps, latent_size]`, where `...` concatenates the\n sample shape with any batch shape from `observed_time_series`.\n \"\"\"\n\n num_timesteps = prefer_static.shape(observed_residuals)[-1]\n if slope_scale is None:\n ssm = sts.LocalLevelStateSpaceModel(\n num_timesteps=num_timesteps,\n initial_state_prior=initial_state_prior,\n observation_noise_scale=observation_noise_scale,\n level_scale=level_scale)\n else:\n ssm = sts.LocalLinearTrendStateSpaceModel(\n num_timesteps=num_timesteps,\n initial_state_prior=initial_state_prior,\n observation_noise_scale=observation_noise_scale,\n level_scale=level_scale,\n slope_scale=slope_scale)\n\n return ssm.posterior_sample(observed_residuals[..., tf.newaxis],\n sample_shape=sample_shape,\n mask=is_missing,\n seed=seed)\n\n\ndef _resample_scale(prior, observed_residuals,\n is_missing=None,\n seed=None):\n \"\"\"Samples a scale parameter from its conditional posterior.\n\n We assume the conjugate InverseGamma->Normal model:\n\n ```\n scale ~ Sqrt(InverseGamma(prior.concentration, prior.scale))\n for i in [1, ..., num_observations]:\n x[i] ~ Normal(loc, scale)\n ```\n\n in which `loc` is known, and return a sample from `p(scale | x)`.\n\n Args:\n prior: Prior distribution as a `tfd.InverseGamma` instance.\n observed_residuals: Float `Tensor` of shape `[..., num_observations]`,\n specifying the centered observations `(x - loc)`.\n is_missing: Optional `bool` `Tensor` of shape `[..., num_observations]`. A\n `True` value indicates that the corresponding observation is missing.\n seed: Optional `Python` `int` seed controlling the sampled value.\n Returns:\n sampled_scale: A `Tensor` sample from the posterior `p(scale | x)`.\n \"\"\"\n if is_missing is not None:\n num_missing = tf.reduce_sum(tf.cast(is_missing, observed_residuals.dtype),\n axis=-1)\n num_observations = prefer_static.shape(observed_residuals)[-1]\n if is_missing is not None:\n observed_residuals = tf.where(is_missing,\n tf.zeros_like(observed_residuals),\n observed_residuals)\n num_observations -= num_missing\n\n variance_posterior = type(prior)(\n concentration=prior.concentration + num_observations / 2.,\n scale=prior.scale + tf.reduce_sum(\n tf.square(observed_residuals), axis=-1) / 2.)\n new_scale = tf.sqrt(variance_posterior.sample(seed=seed))\n\n # Support truncated priors.\n if hasattr(prior, 'upper_bound') and prior.upper_bound is not None:\n new_scale = tf.minimum(new_scale, prior.upper_bound)\n\n return new_scale\n\n\ndef _build_sampler_loop_body(model,\n observed_time_series,\n is_missing=None):\n \"\"\"Builds a Gibbs sampler for the given model and observed data.\n\n Args:\n model: A `tf.sts.StructuralTimeSeries` model instance. This must be of the\n form constructed by `build_model_for_gibbs_sampling`.\n observed_time_series: Float `Tensor` time series of shape\n `[..., num_timesteps]`.\n is_missing: Optional `bool` `Tensor` of shape `[..., num_timesteps]`. A\n `True` value indicates that the observation for that timestep is missing.\n Returns:\n sampler_loop_body: Python callable that performs a single cycle of Gibbs\n sampling. Its first argument is a `GibbsSamplerState`, and it returns a\n new `GibbsSamplerState`. The second argument (passed by `tf.scan`) is\n ignored.\n \"\"\"\n level_component = model.components[0]\n if not (isinstance(level_component, sts.LocalLevel) or\n isinstance(level_component, sts.LocalLinearTrend)):\n raise ValueError('Expected the first model component to be an instance of '\n '`tfp.sts.LocalLevel` or `tfp.sts.LocalLinearTrend`; '\n 'instead saw {}'.format(level_component))\n model_has_slope = isinstance(level_component, sts.LocalLinearTrend)\n\n regression_component = model.components[1]\n if not (isinstance(regression_component, sts.LinearRegression) or\n isinstance(regression_component, SpikeAndSlabSparseLinearRegression)):\n raise ValueError('Expected the second model component to be an instance of '\n '`tfp.sts.LinearRegression` or '\n '`SpikeAndSlabSparseLinearRegression`; '\n 'instead saw {}'.format(regression_component))\n model_has_spike_slab_regression = isinstance(\n regression_component, SpikeAndSlabSparseLinearRegression)\n\n if is_missing is not None: # Ensure series does not contain NaNs.\n observed_time_series = tf.where(is_missing,\n tf.zeros_like(observed_time_series),\n observed_time_series)\n\n num_observed_steps = prefer_static.shape(observed_time_series)[-1]\n design_matrix = _get_design_matrix(model).to_dense()[:num_observed_steps]\n if is_missing is not None:\n # Replace design matrix with zeros at unobserved timesteps. This ensures\n # they will not affect the posterior on weights.\n design_matrix = tf.where(is_missing[..., tf.newaxis],\n tf.zeros_like(design_matrix),\n design_matrix)\n\n # Untransform scale priors -> variance priors by reaching thru Sqrt bijector.\n observation_noise_param = model.parameters[0]\n if 'observation_noise' not in observation_noise_param.name:\n raise ValueError('Model parameters {} do not match the expected sampler '\n 'state.'.format(model.parameters))\n observation_noise_variance_prior = observation_noise_param.prior.distribution\n if model_has_slope:\n level_scale_variance_prior, slope_scale_variance_prior = [\n p.prior.distribution for p in level_component.parameters]\n else:\n level_scale_variance_prior = (\n level_component.parameters[0].prior.distribution)\n\n if model_has_spike_slab_regression:\n spike_and_slab_sampler = spike_and_slab.SpikeSlabSampler(\n design_matrix,\n weights_prior_precision=regression_component._weights_prior_precision, # pylint: disable=protected-access\n nonzero_prior_prob=regression_component._sparse_weights_nonzero_prob, # pylint: disable=protected-access\n observation_noise_variance_prior_concentration=(\n observation_noise_variance_prior.concentration),\n observation_noise_variance_prior_scale=(\n observation_noise_variance_prior.scale),\n observation_noise_variance_upper_bound=(\n observation_noise_variance_prior.upper_bound\n if hasattr(observation_noise_variance_prior, 'upper_bound')\n else None))\n else:\n weights_prior_scale = (\n regression_component.parameters[0].prior.scale)\n\n def sampler_loop_body(previous_sample, _):\n \"\"\"Runs one sampler iteration, resampling all model variables.\"\"\"\n\n (weights_seed,\n level_seed,\n observation_noise_scale_seed,\n level_scale_seed,\n loop_seed) = samplers.split_seed(\n previous_sample.seed, n=5, salt='sampler_loop_body')\n # Preserve backward-compatible seed behavior by splitting slope separately.\n slope_scale_seed, = samplers.split_seed(\n previous_sample.seed, n=1, salt='sampler_loop_body_slope')\n\n # We encourage a reasonable initialization by sampling the weights first,\n # so at the first step they are regressed directly against the observed\n # time series. If we instead sampled the level first it might 'explain away'\n # some observed variation that we would ultimately prefer to explain through\n # the regression weights, because the level can represent arbitrary\n # variation, while the weights are limited to representing variation in the\n # subspace given by the design matrix.\n if model_has_spike_slab_regression:\n (observation_noise_variance,\n weights) = spike_and_slab_sampler.sample_noise_variance_and_weights(\n initial_nonzeros=tf.not_equal(previous_sample.weights, 0.),\n targets=observed_time_series - previous_sample.level,\n seed=weights_seed)\n observation_noise_scale = tf.sqrt(observation_noise_variance)\n else:\n weights = _resample_weights(\n design_matrix=design_matrix,\n target_residuals=observed_time_series - previous_sample.level,\n observation_noise_scale=previous_sample.observation_noise_scale,\n weights_prior_scale=weights_prior_scale,\n seed=weights_seed)\n # Noise scale will be resampled below.\n observation_noise_scale = previous_sample.observation_noise_scale\n\n regression_residuals = observed_time_series - tf.linalg.matvec(\n design_matrix, weights)\n latents = _resample_latents(\n observed_residuals=regression_residuals,\n level_scale=previous_sample.level_scale,\n slope_scale=previous_sample.slope_scale if model_has_slope else None,\n observation_noise_scale=observation_noise_scale,\n initial_state_prior=level_component.initial_state_prior,\n is_missing=is_missing,\n seed=level_seed)\n level = latents[..., 0]\n level_residuals = level[..., 1:] - level[..., :-1]\n if model_has_slope:\n slope = latents[..., 1]\n level_residuals -= slope[..., :-1]\n slope_residuals = slope[..., 1:] - slope[..., :-1]\n\n # Estimate level scale from the empirical changes in level.\n level_scale = _resample_scale(\n prior=level_scale_variance_prior,\n observed_residuals=level_residuals,\n is_missing=None,\n seed=level_scale_seed)\n if model_has_slope:\n slope_scale = _resample_scale(\n prior=slope_scale_variance_prior,\n observed_residuals=slope_residuals,\n is_missing=None,\n seed=slope_scale_seed)\n if not model_has_spike_slab_regression:\n # Estimate noise scale from the residuals.\n observation_noise_scale = _resample_scale(\n prior=observation_noise_variance_prior,\n observed_residuals=regression_residuals - level,\n is_missing=is_missing,\n seed=observation_noise_scale_seed)\n\n return GibbsSamplerState(\n observation_noise_scale=observation_noise_scale,\n level_scale=level_scale,\n slope_scale=(slope_scale if model_has_slope\n else previous_sample.slope_scale),\n weights=weights,\n level=level,\n slope=(slope if model_has_slope\n else previous_sample.slope),\n seed=loop_seed)\n return sampler_loop_body\n" ]
[ [ "tensorflow.compat.v2.nest.map_structure", "tensorflow.compat.v2.zeros_like", "tensorflow.compat.v2.not_equal", "tensorflow.compat.v2.nest.is_nested", "numpy.arange", "tensorflow.compat.v2.concat", "tensorflow.compat.v2.sqrt", "tensorflow.compat.v2.minimum", "tensorflow.compat.v2.cast", "tensorflow.compat.v2.square", "tensorflow.compat.v2.shape", "tensorflow.compat.v2.convert_to_tensor", "tensorflow.compat.v2.linalg.matvec", "tensorflow.compat.v2.zeros", "tensorflow.compat.v2.random.set_seed", "tensorflow.compat.v2.ones", "tensorflow.compat.v2.range", "tensorflow.compat.v2.TensorShape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
EricSteinberger/DREAM
[ "bfe21bbb0f60ab27a1af9774308efbbbd41e68c4", "bfe21bbb0f60ab27a1af9774308efbbbd41e68c4", "bfe21bbb0f60ab27a1af9774308efbbbd41e68c4", "bfe21bbb0f60ab27a1af9774308efbbbd41e68c4", "bfe21bbb0f60ab27a1af9774308efbbbd41e68c4" ]
[ "Leduc_SDCFR.py", "PokerRL/eval/_/EvaluatorMasterBase.py", "DREAM_and_DeepCFR/workers/la/sampling_algorithms/MainPokerModuleFLAT_Baseline.py", "DREAM_and_DeepCFR/workers/ps/dist.py", "PokerRL/eval/rl_br/workers/la/Local_RLBR_LearnerActor.py" ]
[ "import numpy as np\n\nfrom DREAM_and_DeepCFR.EvalAgentDeepCFR import EvalAgentDeepCFR\nfrom DREAM_and_DeepCFR.TrainingProfile import TrainingProfile\nfrom DREAM_and_DeepCFR.workers.driver.Driver import Driver\nfrom HYPERS import *\nfrom PokerRL.game.games import StandardLeduc # or any other game\n\nif __name__ == '__main__':\n ctrl = Driver(t_prof=TrainingProfile(\n name=\"Leduc_ESSDCFR_v001_SEED\" + str(np.random.randint(1000000)),\n\n n_batches_adv_training=SDCFR_LEDUC_BATCHES,\n n_traversals_per_iter=SDCFR_LEDUC_TRAVERSALS_ES,\n sampler=\"es\",\n eval_modes_of_algo=(\n EvalAgentDeepCFR.EVAL_MODE_SINGLE,\n ),\n\n n_batches_avrg_training=4000,\n\n game_cls=StandardLeduc,\n\n DISTRIBUTED=False,\n ),\n eval_methods={\n \"br\": 20,\n })\n ctrl.run()\n", "# Copyright (c) 2019 Eric Steinberger\n\n\nimport numpy as np\n\nfrom PokerRL.rl.base_cls.workers.WorkerBase import WorkerBase\n\n\nclass EvaluatorMasterBase(WorkerBase):\n \"\"\"\n Baseclass to all Evaluators. An Evaluator is an algorithm to evaluate an agent's performance in a certain metric.\n \"\"\"\n\n def __init__(self, t_prof, eval_env_bldr, chief_handle, evaluator_name, log_conf_interval=False):\n \"\"\"\n Args:\n t_prof (TrainingProfile)\n chief_handle (class instance or ray ActorHandle)\n evaluator_name (str): Name of the evaluator\n \"\"\"\n super().__init__(t_prof=t_prof)\n self._eval_env_bldr = eval_env_bldr\n self._chief_handle = chief_handle\n\n self._is_multi_stack = len(self._t_prof.eval_stack_sizes) > 1\n\n self._log_conf_interval = log_conf_interval\n\n self._evaluator_name = evaluator_name\n\n self._exp_name_total, self._exp_names_conf = self._create_experiments(self_name=evaluator_name)\n\n if self._is_multi_stack:\n self._exp_name_multi_stack = {\n eval_mode:\n self._ray.get(\n self._ray.remote(self._chief_handle.create_experiment,\n self._t_prof.name\n + \" \" + eval_mode\n + \"Multi_Stack\"\n + \": \" + evaluator_name\n + \" Averaged Total\"))\n for eval_mode in self._t_prof.eval_modes_of_algo\n }\n if self._log_conf_interval:\n self._exp_names_multi_stack_conf = {\n eval_mode:\n self._ray.get(\n [\n self._ray.remote(self._chief_handle.create_experiment,\n self._t_prof.name\n + \" \" + eval_mode\n + \": \" + evaluator_name\n + \" Conf_\" + bound_end)\n for bound_end in [\"lower95\", \"upper95\"]\n ]\n )\n for eval_mode in self._t_prof.eval_modes_of_algo\n }\n\n @property\n def is_multi_stack(self):\n \"\"\"\n Whether the agent is evaluated in games that start with different stack sizes each time.\n \"\"\"\n return self._is_multi_stack\n\n def evaluate(self, iter_nr):\n \"\"\" Evaluate an agent and send the results as logs to the Chief. \"\"\"\n raise NotImplementedError\n\n def update_weights(self):\n \"\"\" Update the local weights on the master, for instance by calling .pull_current_strat_from_chief() \"\"\"\n raise NotImplementedError\n\n def pull_current_strat_from_chief(self):\n \"\"\"\n Pulls and Returns weights or any other changing algorithm info of any format from the Chief.\n \"\"\"\n return self._ray.get(self._ray.remote(self._chief_handle.pull_current_eval_strategy,\n self._evaluator_name\n ))\n\n def _create_experiments(self, self_name, ):\n \"\"\"\n Registers a new experiment either for each player and their average or just for their average.\n \"\"\"\n\n if self._log_conf_interval:\n exp_names_conf = {\n eval_mode:\n [\n self._ray.get(\n [\n self._ray.remote(self._chief_handle.create_experiment,\n self._t_prof.name\n + \" \" + eval_mode\n + \"_stack_\" + str(stack_size[0])\n + \": \" + self_name\n + \" Conf_\" + bound_end)\n for bound_end in [\"lower95\", \"upper95\"]\n ]\n )\n for stack_size in self._t_prof.eval_stack_sizes\n ]\n for eval_mode in self._t_prof.eval_modes_of_algo\n }\n else:\n exp_names_conf = None\n\n exp_name_total = {\n eval_mode:\n [\n self._ray.get(\n self._ray.remote(self._chief_handle.create_experiment,\n self._t_prof.name\n + \" \" + eval_mode\n + \"_stack_\" + str(stack_size[0])\n + \": \" + self_name\n + \" Total\"))\n for stack_size in self._t_prof.eval_stack_sizes\n ]\n for eval_mode in self._t_prof.eval_modes_of_algo\n }\n\n return exp_name_total, exp_names_conf\n\n def _get_95confidence(self, scores):\n mean = np.mean(scores).item()\n std = np.std(scores).item()\n\n _d = 1.96 * std / np.sqrt(scores.shape[0])\n return float(mean), float(_d)\n\n def _log_results(self, agent_mode, stack_size_idx, iter_nr, score, upper_conf95=None, lower_conf95=None):\n \"\"\"\n Log evaluation results by sending these results to the Chief, who will later send them to the Crayon log server.\n\n Args:\n agent_mode: Evaluation mode of the agent whose performance is logged\n stack_size_idx: If evaluating multiple starting stack sizes, this is an index describing which one\n this data is from.\n iter_nr: Algorithm Iteration of this data\n score: Score in this evaluation (e.g. exploitability)\n \"\"\"\n graph_name = \"Evaluation/\" + self._eval_env_bldr.env_cls.WIN_METRIC\n\n self._ray.remote(self._chief_handle.add_scalar,\n self._exp_name_total[agent_mode][stack_size_idx], graph_name, iter_nr, score)\n\n if self._log_conf_interval:\n assert upper_conf95 is not None\n assert lower_conf95 is not None\n self._ray.remote(self._chief_handle.add_scalar,\n self._exp_names_conf[agent_mode][stack_size_idx][0], graph_name, iter_nr, lower_conf95)\n self._ray.remote(self._chief_handle.add_scalar,\n self._exp_names_conf[agent_mode][stack_size_idx][1], graph_name, iter_nr, upper_conf95)\n\n def _log_multi_stack(self, agent_mode, iter_nr, score_total, upper_conf95=None, lower_conf95=None):\n \"\"\"\n Additional logging for multistack evaluations\n \"\"\"\n graph_name = \"Evaluation/\" + self._eval_env_bldr.env_cls.WIN_METRIC\n self._ray.remote(self._chief_handle.add_scalar,\n self._exp_name_multi_stack[agent_mode], graph_name, iter_nr, score_total)\n\n if self._log_conf_interval:\n assert upper_conf95 is not None\n assert lower_conf95 is not None\n self._ray.remote(self._chief_handle.add_scalar,\n self._exp_names_multi_stack_conf[agent_mode][0], graph_name, iter_nr, lower_conf95)\n self._ray.remote(self._chief_handle.add_scalar,\n self._exp_names_multi_stack_conf[agent_mode][1], graph_name, iter_nr, upper_conf95)\n", "# Copyright (c) 2019 Eric Steinberger\n\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom PokerRL.rl.neural.CardEmbedding import CardEmbedding\nfrom PokerRL.rl.neural.LayerNorm import LayerNorm\n\n\nclass MainPokerModuleFLAT_Baseline(nn.Module):\n\n def __init__(self,\n env_bldr,\n device,\n mpm_args,\n ):\n super().__init__()\n\n self._args = mpm_args\n self._env_bldr = env_bldr\n\n self._device = device\n\n self._board_start = self._env_bldr.obs_board_idxs[0]\n self._board_stop = self._board_start + len(self._env_bldr.obs_board_idxs)\n\n self.dropout = nn.Dropout(p=mpm_args.dropout)\n\n self.card_emb = CardEmbedding(env_bldr=env_bldr, dim=mpm_args.dim, device=device)\n\n if mpm_args.deep:\n self.cards_fc_1 = nn.Linear(in_features=self.card_emb.out_size * 2,\n out_features=mpm_args.dim * 3)\n self.cards_fc_2 = nn.Linear(in_features=mpm_args.dim * 3, out_features=mpm_args.dim * 3)\n self.cards_fc_3 = nn.Linear(in_features=mpm_args.dim * 3, out_features=mpm_args.dim)\n\n self.history_1 = nn.Linear(in_features=self._env_bldr.pub_obs_size - self._env_bldr.obs_size_board,\n out_features=mpm_args.dim)\n self.history_2 = nn.Linear(in_features=mpm_args.dim, out_features=mpm_args.dim)\n\n self.comb_1 = nn.Linear(in_features=2 * mpm_args.dim, out_features=mpm_args.dim)\n self.comb_2 = nn.Linear(in_features=mpm_args.dim, out_features=mpm_args.dim)\n\n else:\n self.layer_1 = nn.Linear(in_features=self.card_emb.out_size * 2\n + self._env_bldr.pub_obs_size - self._env_bldr.obs_size_board,\n out_features=mpm_args.dim)\n self.layer_2 = nn.Linear(in_features=mpm_args.dim, out_features=mpm_args.dim)\n self.layer_3 = nn.Linear(in_features=mpm_args.dim, out_features=mpm_args.dim)\n\n if self._args.normalize:\n self.norm = LayerNorm(mpm_args.dim)\n\n self.to(device)\n # print(\"n parameters:\", sum(p.numel() for p in self.parameters() if p.requires_grad))\n\n @property\n def output_units(self):\n return self._args.dim\n\n @property\n def device(self):\n return self._device\n\n def forward(self, pub_obses, range_idxs):\n \"\"\"\n 1. do list -> padded\n 2. feed through pre-processing fc layers\n 3. PackedSequence (sort, pack)\n 4. rnn\n 5. unpack (unpack re-sort)\n 6. cut output to only last entry in sequence\n\n Args:\n pub_obses (list): list of np arrays of shape [np.arr([history_len, n_features]), ...)\n range_idxs (LongTensor): range_idxs (one for each pub_obs) tensor([2, 421, 58, 912, ...])\n \"\"\"\n if isinstance(pub_obses, list):\n pub_obses = torch.from_numpy(np.array(pub_obses)).to(self._device, torch.float32)\n\n hist_o = torch.cat([\n pub_obses[:, :self._board_start],\n pub_obses[:, self._board_stop:]\n ], dim=-1)\n\n # \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n # Card embeddings\n # \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n range_idxs_0 = range_idxs // 10000 # Big hack! See LearnedBaselineSampler for the reverse opp\n range_idxs_1 = range_idxs % 10000\n\n card_o_0 = self.card_emb(pub_obses=pub_obses,\n range_idxs=torch.where(range_idxs_0 == 8888, torch.zeros_like(range_idxs_0),\n range_idxs_0))\n\n card_o_0 = torch.where(range_idxs_0.unsqueeze(1).expand_as(card_o_0) == 8888,\n torch.full_like(card_o_0, fill_value=-1),\n card_o_0,\n )\n\n card_o_1 = self.card_emb(pub_obses=pub_obses,\n range_idxs=torch.where(range_idxs_1 == 8888, torch.zeros_like(range_idxs_1),\n range_idxs_1))\n card_o_1 = torch.where(range_idxs_1.unsqueeze(1).expand_as(card_o_0) == 8888,\n torch.full_like(card_o_1, fill_value=-1),\n card_o_1,\n )\n card_o = torch.cat([card_o_0, card_o_1], dim=-1)\n\n # \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n # Network\n # \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n if self._args.dropout > 0:\n A = lambda x: self.dropout(F.relu(x))\n else:\n A = lambda x: F.relu(x)\n\n if self._args.deep:\n card_o = A(self.cards_fc_1(card_o))\n card_o = A(self.cards_fc_2(card_o) + card_o)\n card_o = A(self.cards_fc_3(card_o))\n\n hist_o = A(self.history_1(hist_o))\n hist_o = A(self.history_2(hist_o) + hist_o)\n\n y = A(self.comb_1(torch.cat([card_o, hist_o], dim=-1)))\n y = A(self.comb_2(y) + y)\n\n else:\n y = torch.cat([hist_o, card_o], dim=-1)\n y = A(self.layer_1(y))\n y = A(self.layer_2(y) + y)\n y = A(self.layer_3(y) + y)\n\n # \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n # Normalize last layer\n # \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n if self._args.normalize:\n y = self.norm(y)\n\n return y\n\n\nclass MPMArgsFLAT_Baseline:\n\n def __init__(self,\n deep=True,\n dim=128,\n dropout=0.0,\n normalize=True,\n ):\n self.deep = deep\n self.dim = dim\n self.dropout = dropout\n self.normalize = normalize\n\n def get_mpm_cls(self):\n return MainPokerModuleFLAT_Baseline\n", "# Copyright (c) Eric Steinberger 2020\n\nimport ray\nimport torch\n\nfrom DREAM_and_DeepCFR.workers.ps.local import ParameterServer as _LocalParameterServer\n\n\[email protected](num_cpus=1, num_gpus=1 if torch.cuda.is_available() else 0)\nclass ParameterServer(_LocalParameterServer):\n\n def __init__(self, t_prof, owner, chief_handle):\n super().__init__(t_prof=t_prof, owner=owner, chief_handle=chief_handle)\n", "import numpy as np\n\nfrom PokerRL.eval.rl_br import _util\nfrom PokerRL.rl import rl_util\nfrom PokerRL.rl.agent_modules.DDQN import DDQN\nfrom PokerRL.rl.base_cls.workers.WorkerBase import WorkerBase\n\n\nclass Local_RLBR_LearnerActor(WorkerBase):\n\n def __init__(self, t_prof, chief_handle, eval_agent_cls):\n super().__init__(t_prof=t_prof)\n self._args = t_prof.module_args[\"rlbr\"]\n\n self._env_bldr = rl_util.get_env_builder(t_prof=t_prof)\n\n self._chief_handle = chief_handle\n self._eval_agent_cls = eval_agent_cls\n self._eval_env_bldr = _util.get_env_builder_rlbr(t_prof=t_prof)\n\n self._ddqns = [None for _ in range(self._eval_env_bldr.N_SEATS)]\n self._rlbr_seat_id = None\n self._agent_seat_id = None\n self._rlbr_env_wrapper = None\n self._opponent = None\n self._buf = None\n self._br_memory_saver = None\n\n if t_prof.nn_type == \"recurrent\":\n from PokerRL.rl.buffers.CircularBufferRNN import CircularBufferRNN\n from PokerRL.rl.buffers.BRMemorySaverRNN import BRMemorySaverRNN\n\n self.CircularBufferCls = CircularBufferRNN\n self.BRMemorySaverCls = BRMemorySaverRNN\n elif t_prof.nn_type == \"feedforward\":\n from PokerRL.rl.buffers.CircularBufferFLAT import CircularBufferFLAT\n from PokerRL.rl.buffers.BRMemorySaverFLAT import BRMemorySaverFLAT\n\n self.CircularBufferCls = CircularBufferFLAT\n self.BRMemorySaverCls = BRMemorySaverFLAT\n\n else:\n raise ValueError(t_prof.nn_type)\n\n def reset(self, p_training, eval_opponent_state_dict, stack_size):\n self._rlbr_seat_id = p_training\n self._agent_seat_id = 1 - p_training\n self._opponent = self._eval_agent_cls(t_prof=self._t_prof)\n self._opponent.load_state_dict(eval_opponent_state_dict)\n self._rlbr_env_wrapper = self._eval_env_bldr.get_new_wrapper(is_evaluating=True, stack_size=stack_size)\n self._ddqns[p_training] = DDQN(owner=p_training, ddqn_args=self._args.ddqn_args,\n env_bldr=self._eval_env_bldr)\n self._buf = self.CircularBufferCls(env_bldr=self._env_bldr, max_size=self._args.ddqn_args.cir_buf_size)\n self._br_memory_saver = self.BRMemorySaverCls(env_bldr=self._eval_env_bldr, buffer=self._buf)\n\n def get_grads(self, p_id):\n return self._ray.grads_to_numpy(self._ddqns[p_id].get_grads_one_batch_from_buffer(buffer=self._buf))\n\n def play(self, n_episodes):\n self._ddqns[self._rlbr_seat_id].eval()\n accumulated_rew = 0.0\n for n in range(n_episodes):\n\n # \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n # Reset\n # \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n obs, r_for_all, done, info = self._rlbr_env_wrapper.reset()\n self._opponent.reset(deck_state_dict=self._rlbr_env_wrapper.env.cards_state_dict())\n\n range_idxs = [\n self._rlbr_env_wrapper.env.get_range_idx(p_id=p_id)\n for p_id in range(self._eval_env_bldr.N_SEATS)\n ]\n\n # Store last game to buffer and reset memory saver\n self._br_memory_saver.reset(range_idx=range_idxs[self._rlbr_seat_id])\n\n # \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n # Play Episode\n # \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n while not done:\n p_id_acting = self._rlbr_env_wrapper.env.current_player.seat_id\n if self._t_prof.DEBUGGING:\n if p_id_acting != self._opponent._internal_env_wrapper.env.current_player.seat_id:\n raise RuntimeError(\"Bad bad bug in RL-BR.\")\n\n # RL-BR acting\n if p_id_acting == self._rlbr_seat_id:\n legal_actions_list_t = self._rlbr_env_wrapper.env.get_legal_actions()\n action_int = self._ddqns[self._rlbr_seat_id].select_br_a(\n pub_obses=[obs],\n range_idxs=np.array([range_idxs[self._rlbr_seat_id]], dtype=np.int32),\n explore=True,\n legal_actions_lists=[legal_actions_list_t]\n )[0].item()\n\n self._br_memory_saver.add_experience(obs_t_before_acted=obs,\n a_selected_t=action_int,\n legal_actions_list_t=legal_actions_list_t)\n\n # Notify agent\n self._opponent.notify_of_action(p_id_acted=self._rlbr_seat_id, action_he_did=action_int)\n\n # EvalAgent (opponent) acting\n else:\n action_int, _ = self._opponent.get_action(step_env=True, need_probs=False)\n\n # Step\n obs, r_for_all, done, info = self._rlbr_env_wrapper.step(action=action_int)\n\n # Add terminal memory\n self._br_memory_saver.add_to_buf(reward_p=r_for_all[self._rlbr_seat_id], terminal_obs=obs)\n\n # For tracking running reward while training\n accumulated_rew += r_for_all[self._rlbr_seat_id]\n\n return accumulated_rew \\\n * self._eval_env_bldr.env_cls.EV_NORMALIZER \\\n * self._rlbr_env_wrapper.env.REWARD_SCALAR \\\n / n_episodes\n\n def update_target_net(self, p_id):\n self._ddqns[p_id].update_target_net()\n\n def update(self, eps, nets):\n for p_id in range(self._t_prof.n_seats):\n if nets[p_id] is not None:\n self._ddqns[p_id].load_net_state_dict(\n state_dict=self._ray.state_dict_to_torch(self._ray.get(nets[p_id]),\n device=self._ddqns[p_id].device))\n\n if eps[p_id] is not None:\n self._ddqns[p_id].eps = self._ray.get(eps[p_id])\n\n # TODO this is duplicate code from RLBR LA\n def compute_rlbr(self, n_hands_each_seat, ddqn_state_dicts, stack_size):\n agent_losses = np.empty(shape=n_hands_each_seat * 2, dtype=np.float32)\n rlbr_dqn_each_seat = [\n DDQN.inference_version_from_state_dict(state_dict=ddqn_state_dicts[p],\n env_bldr=self._eval_env_bldr)\n for p in range(self._t_prof.n_seats)\n ]\n rlbr_env_wrapper = self._eval_env_bldr.get_new_wrapper(is_evaluating=True, stack_size=stack_size)\n\n for rlbr_seat_id in range(rlbr_env_wrapper.env.N_SEATS):\n rlbr_agent = rlbr_dqn_each_seat[rlbr_seat_id]\n for iteration_id in range(n_hands_each_seat):\n\n # \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n # Reset\n # \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n obs, r_for_all, done, info = _util.reset_episode_multi_action_space(rlbr_env_wrapper=rlbr_env_wrapper,\n opponent_agent=self._opponent)\n range_idx_rlbr = rlbr_env_wrapper.env.get_range_idx(p_id=rlbr_seat_id)\n\n # \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n # Play Episode\n # \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n while not done:\n p_id_acting = rlbr_env_wrapper.env.current_player.seat_id\n\n # RL-BR acting\n if p_id_acting == rlbr_seat_id:\n action_int = rlbr_agent.select_br_a(\n pub_obses=[obs],\n range_idxs=np.array([range_idx_rlbr], dtype=np.int32),\n legal_actions_lists=[rlbr_env_wrapper.env.get_legal_actions()],\n explore=False,\n )[0]\n self._opponent.notify_of_action(p_id_acted=rlbr_seat_id, action_he_did=action_int)\n\n # EvalAgent (opponent) acting\n else:\n action_int, _ = self._opponent.get_action(step_env=True, need_probs=False)\n\n # Step\n obs, r_for_all, done, info = rlbr_env_wrapper.step(action=action_int)\n\n # add rews\n agent_losses[iteration_id + (rlbr_seat_id * n_hands_each_seat)] = r_for_all[rlbr_seat_id] \\\n * rlbr_env_wrapper.env.REWARD_SCALAR \\\n * rlbr_env_wrapper.env.EV_NORMALIZER\n\n return agent_losses.tolist()\n" ]
[ [ "numpy.random.randint" ], [ "numpy.std", "numpy.mean", "numpy.sqrt" ], [ "torch.nn.Dropout", "torch.cat", "torch.zeros_like", "torch.nn.Linear", "torch.nn.functional.relu", "torch.full_like", "numpy.array" ], [ "torch.cuda.is_available" ], [ "numpy.array", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Patyrn/Divide-and-Learn
[ "ff03689c7ab6a7155ebd019babce8f79d0757a53", "ff03689c7ab6a7155ebd019babce8f79d0757a53", "ff03689c7ab6a7155ebd019babce8f79d0757a53" ]
[ "dnl/RelaxSolver.py", "IntOpt/shortespath/shortespath.py", "SPOTree_scheduling/leaf_model.py" ]
[ "from dnl.IconEasySolver import ICON_scheduling, calculate_energy_from_solver, ICON_scheduling_relaxation\nfrom KnapsackSolving import solveKnapsackProblem, solveKnapsackProblemRelaxation\nfrom dnl.Params import KNAPSACK, ICON_SCHEDULING_EASY\nimport numpy as np\n\ndef get_relax_optimization_objective(Y, weights, opt_params,relaxation=False):\n solver = opt_params.get('solver')\n if solver == KNAPSACK:\n return compute_obj_knapsack(Y, weights, opt_params,relaxation)\n elif solver == ICON_SCHEDULING_EASY:\n return compute_icon_scheduling_obj(Y, opt_params,relaxation)\n else:\n print('error')\n\n\ndef compute_obj_knapsack( Y, weights, opt_params,relaxation):\n capacity = opt_params.get('capacity')\n obj_vals = np.zeros(len(Y))\n index = range(len(Y))\n for i, benchmark_Y, benchmark_weights in zip(index, Y, weights):\n benchmark_Y = benchmark_Y.reshape(-1).tolist()\n benchmark_weights = benchmark_weights.reshape(-1).tolist()\n if relaxation:\n solution = solveKnapsackProblemRelaxation(benchmark_Y, [benchmark_weights], capacity, warmstart=None)\n else:\n solution = solveKnapsackProblem(benchmark_Y, [benchmark_weights], capacity, warmstart=None)\n predicted_opt_items = np.asarray(solution['assignments'])\n obj_vals[i] = np.sum(benchmark_Y * predicted_opt_items)\n return obj_vals\n\n\ndef compute_icon_scheduling_obj(Y, opt_params,relaxation):\n obj_vals = np.zeros(len(Y))\n index = range(len(Y))\n for i, benchmark_Y in zip(index,Y):\n benchmark_Y = benchmark_Y.reshape(-1).tolist()\n if relaxation:\n solver = ICON_scheduling_relaxation(price=benchmark_Y, opt_params=opt_params, verbose=False)\n # print('Relaxation')\n else:\n objVal, solver = ICON_scheduling(price=benchmark_Y, opt_params=opt_params)\n optimal_objective_value = calculate_energy_from_solver(solver, benchmark_Y)\n obj_vals[i] = optimal_objective_value\n return obj_vals", "import torch\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom torch import optim\nimport torch.nn.functional as F\nimport random\nimport numpy as np\nimport scipy as sp\nimport gurobipy as gp\nfrom qpthlocal.qp import QPFunction\nfrom qpthlocal.qp import QPSolvers\nfrom qpthlocal.qp import make_gurobi_model\nimport pickle\nimport sys\nimport datetime\nfrom collections import defaultdict\nimport math\nfrom sklearn import preprocessing\nfrom sklearn.metrics import confusion_matrix\nimport logging \nimport datetime\nimport time\nfrom collections import defaultdict\nfrom sklearn.metrics import mean_squared_error as mse\nfrom scipy.special import expit, logit\nimport copy\nsys.path.insert(0,'../Interior/')\nsys.path.insert(0,'../..')\n\n# from ip_model import *\nfrom ip_model_whole import *\nfrom remove_redundancy import _remove_redundancy, _remove_redundancy_sparse, _remove_redundancy_dense\nfrom sgd_learner import *\nimport pandas as pd\ndef bceloss(inputs,target):\n\treturn -(np.log(1-expit(inputs)) + target*inputs).mean()\ndef _remove_redundant_rows (A_eq):\n # remove redundant (linearly dependent) rows from equality constraints\n n_rows_A = A_eq.shape[0]\n redundancy_warning = (\"A_eq does not appear to be of full row rank. To \"\n \"improve performance, check the problem formulation \"\n \"for redundant equality constraints.\")\n # if (sps.issparse(A_eq)):\n # if rr and A_eq.size > 0: # TODO: Fast sparse rank check?\n # A_eq, b_eq, status, message = _remove_redundancy_sparse(A_eq, b_eq)\n # if A_eq.shape[0] < n_rows_A:\n # warn(redundancy_warning, OptimizeWarning, stacklevel=1)\n # if status != 0:\n # complete = True\n # return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds,\n # x, x0, undo, complete, status, message)\n\n # This is a wild guess for which redundancy removal algorithm will be\n # faster. More testing would be good.\n small_nullspace = 5\n if A_eq.size > 0:\n try: # TODO: instead use results of first SVD in _remove_redundancy\n rank = np.linalg.matrix_rank(A_eq)\n except Exception: # oh well, we'll have to go with _remove_redundancy_dense\n rank = 0\n if A_eq.size > 0 and rank < A_eq.shape[0]:\n warn(redundancy_warning, OptimizeWarning, stacklevel=3)\n dim_row_nullspace = A_eq.shape[0]-rank\n if dim_row_nullspace <= small_nullspace:\n d_removed, status, message = _remove_redundancy(A_eq)\n if dim_row_nullspace > small_nullspace :\n d_removed, status, message = _remove_redundancy_dense(A_eq)\n if A_eq.shape[0] < rank:\n message = (\"Due to numerical issues, redundant equality \"\n \"constraints could not be removed automatically. \"\n \"Try providing your constraint matrices as sparse \"\n \"matrices to activate sparse presolve, try turning \"\n \"off redundancy removal, or try turning off presolve \"\n \"altogether.\")\n status = 4\n if status != 0:\n complete = True\n return d_removed\n\ndef get_loss(net,A, X, y,instances):\n net.eval()\n rslt = []\n c_pred = net(torch.from_numpy(X).float()).squeeze().detach().numpy()\n c = y\n for k,v in instances.items():\n source, destination = v\n b = np.zeros(len(A))\n b [source] =1\n b[destination ]=-1\n model = gp.Model()\n model.setParam('OutputFlag', 0)\n x = model.addMVar(shape=A.shape[1], vtype=gp.GRB.BINARY, name=\"x\")\n model.setObjective(c_pred @x, gp.GRB.MINIMIZE)\n model.addConstr(A @ x == b, name=\"eq\")\n model.optimize()\n if model.status ==2:\n sol =x.X \n rslt.append( c.dot(sol))\n else:\n print(model.status, k,v)\n net.train()\n return mse(c_pred,c), sum(rslt)\n\n\ndef validation_module(net,A, X,y, training_instances,validation_instances, test_instances,time,\n\tepoch,subepoch,**kwargs):\n\n # return bceloss(c_pred,c), sum(rslt)\n\n dict_validation = {}\n losses_test = get_loss(net, A, X,y,test_instances)\n dict_validation['test_prediction_loss'] = losses_test[0]\n dict_validation['test_task_loss'] = losses_test[1]\n\n losses_train = get_loss(net, A, X,y,training_instances)\n dict_validation['train_prediction_loss'] = losses_train[0]\n dict_validation['train_task_loss'] = losses_train[1]\n\n losses_validation = get_loss(net, A, X,y,validation_instances)\n dict_validation['validation_prediction_loss'] = losses_validation[0]\n dict_validation['validation_task_loss'] = losses_validation[1] \n\n dict_validation['batch'] = subepoch\n dict_validation['epoch'] = epoch\n dict_validation['time'] = time \n return dict_validation\n\ndef make_fc(num_layers, num_features, num_targets=1,\n activation_fn = nn.ReLU,intermediate_size=50, regularizers = True):\n net_layers = [nn.Linear(num_features, intermediate_size),\n activation_fn()]\n for hidden in range(num_layers-2):\n net_layers.append(nn.Linear(intermediate_size, intermediate_size))\n net_layers.append(activation_fn())\n net_layers.append(nn.Linear(intermediate_size, num_targets))\n net_layers.append(nn.ReLU())\n return nn.Sequential(*net_layers)\n \nclass two_stage_matching:\t\n\tdef __init__(self,A,num_features, num_layers, intermediate_size,\n\t\tactivation_fn = nn.ReLU, num_instance=1,\n\t\tepochs=10,batchsize= 256, optimizer=optim.Adam,\n\t\tvalidation=False,**hyperparams):\n\n\t\tself.A = A\n\t\tself.num_features = num_features\n\t\tself.num_layers = num_layers\n\t\tself.activation_fn = activation_fn\n\t\tself.intermediate_size = intermediate_size\n\t\t\n\t\tself.epochs = epochs\n\t\tself.batchsize = batchsize\n\t\tself.validation = validation\n\t\tself.net = make_fc(num_layers=num_layers, num_features=num_features, \n\t\t\tactivation_fn= activation_fn,\n\t\t\tintermediate_size= intermediate_size)\n\t\tself.optimizer = optimizer(self.net.parameters(), **hyperparams)\n\n\tdef fit(self,X,y,instances):\n\n\t\ttest_instances = instances['test']\n\t\tvalidation_instances = instances['validation']\n\t\ttrain_instances = instances['train']\t\n\t\ttime_ = 0\n\t\tself.model_time = 0\t\t\n\t\tn_train = X.shape[0]\n\n\t\tif self.validation:\n\t\t\tvalidation_list = []\n\t\tindexes = np.arange(n_train)\n\t\tloss_fn = nn.MSELoss()# nn.KLDivLoss(reduction='batchmean') \n\t\t\n\t\tfor e in range(self.epochs):\n\t\t\tstart_time = time.time()\n\t\t\tnp.random.shuffle(indexes)\n\t\t\tnum_batches = len(indexes) //(self.batchsize)\n\t\t\tbi = 0#batch-index\n\t\t\tfor b in range(num_batches):\n\t\t\t\tself.optimizer.zero_grad()\n\t\t\t\tX_np = X[indexes[bi:(bi+self.batchsize)]]\n\t\t\t\ty_np = y[indexes[bi:(bi+self.batchsize)]]\n\t\t\t\tbi += self.batchsize\n\t\t\t\tX_torch = torch.from_numpy(X_np).float()\n\t\t\t\ty_torch = torch.from_numpy(y_np).float()\n\n\t\t\t\tc_pred = self.net(X_torch).squeeze()\n\t\t\t\tloss = loss_fn(c_pred,y_torch)\n\t\t\t\tloss.backward()\n\n\t\t\t\tself.optimizer.step()\n\t\t\tend_time = time.time()\n\t\t\ttime_ += end_time - start_time\n\t\t\tif self.validation:\t\t\t\n\t\t\t\tvalidation_list.append( validation_module(self.net,self.A, \n\t\t\t\tX,y,train_instances,validation_instances, test_instances,time_,e,b))\n\n\t\t\t\n\t\t\tprint(\"Epoch {} Loss:{} Time: {:%Y-%m-%d %H:%M:%S}\".format(e+1,loss.sum().item(),\n\t\t\t\tdatetime.datetime.now()))\n\t\tif self.validation :\n\t\n\t\t\tdd = defaultdict(list)\n\t\t\tfor d in validation_list:\n\t\t\t\tfor key, value in d.items():\n\t\t\t\t\tdd[key].append(value)\n\t\t\tdf = pd.DataFrame.from_dict(dd)\n\n\t\t\tlogging.info('Completion Time %s \\n' %str(datetime.datetime.now()) )\n\t\t\treturn df\n\tdef predict(self,X):\n\t\tX_torch = torch.from_numpy(X).float()\n\t\tself.net.eval()\n\t\tpred= self.net(X_torch)\n\t\tself.net.train()\n\t\treturn pred.detach().detach().numpy().squeeze()\t\n\tdef validation_result(self,X,y, instances):\n\t\tvalidation_rslt = get_loss(self.net, self.A, X,y,instances)\n\t\treturn validation_rslt[0], validation_rslt[1]\n\t\nclass qptl:\n\tdef __init__(self,A,num_features, num_layers, intermediate_size,num_instance= 1,\n\t\tactivation_fn = nn.ReLU, epochs=10,optimizer=optim.Adam,\n\t\tgamma=1e-5,validation=False,\n\t\t**hyperparams):\n\t\tself.num_features = num_features\n\t\tself.num_layers = num_layers\n\t\tself.activation_fn = activation_fn\n\t\tself.intermediate_size = intermediate_size\n\t\tself.A = A\n\t\tself.num_instance = num_instance\n\t\t\n\t\t\n\t\tself.epochs = epochs\n\t\tself.optimizer = optimizer\n\t\tself.validation = validation\n\n\t\tself.net = make_fc(num_layers=num_layers, num_features=num_features, \n\t\t\tactivation_fn= activation_fn,\n\t\t\tintermediate_size= intermediate_size)\n\t\tself.optimizer = optimizer(self.net.parameters(), **hyperparams)\n\t\tself.gamma= gamma\n\n\tdef fit(self,X,y,instances):\n\n\t\ttest_instances = instances['test']\n\t\tvalidation_instances = instances['validation']\n\t\ttrain_instances = instances['train']\t\n\t\ttime_ = 0\n\t\tself.model_time = 0\t\t\n\t\tn_train = X.shape[0]\n\n\t\tif self.validation:\n\t\t\tvalidation_list = []\n\t\tlogging.info(\"training started\")\n\t\t# rows_to_be_removed = _remove_redundant_rows(self.A)\n\t\t# A_torch = torch.from_numpy(np.delete(self.A, rows_to_be_removed, axis=0)).float()\n\n\t\tA_torch = torch.from_numpy(self.A).float()\n\t\tQ_torch = self.gamma*torch.eye(A_torch.shape[1])\t\n\t\tX_torch = torch.from_numpy(X).float()\n\t\ty_torch = torch.from_numpy(y).float()\n\t\tG_torch = -1*torch.eye(A_torch.shape[1])\n\t\th_torch = torch.zeros(A_torch.shape[1])\n\t\t\n\t\tfor e in range(self.epochs):\n\t\t\tfor i in range(self.num_instance):\n\t\t\t\tstart_time = time.time()\n\t\t\t\tself.optimizer.zero_grad()\n\t\t\t\tsource, dest = train_instances[i]\n\t\t\t\t# b = np.zeros(len(self.A))\n\t\t\t\t# b[source] =1\n\t\t\t\t# b[dest ]=-1\n\t\t\t\t# b= np.delete(b, rows_to_be_removed)\n\t\t\t\t# b_torch = torch.from_numpy(b).float()\t\t\t\t\n\t\t\t\tb_torch = torch.zeros(len(self.A))\n\t\t\t\tb_torch[source] =1\n\t\t\t\tb_torch[dest ]=-1\n\t\t\t\tmodel_params_quad = make_gurobi_model(G_torch.detach().numpy(),\n\t\t\t\t\th_torch.detach().numpy(),A_torch.detach().numpy(),\n\t\t\t\t\tb_torch.detach().numpy(), Q_torch.detach().numpy())\n\n\t\t\t\t# model_params_quad = make_gurobi_model(None,None,\n\t\t\t\t# \tA_torch.detach().numpy(),\n\t\t\t\t# \tb_torch.detach().numpy(), Q_torch.detach().numpy())\n\t\t\t\tc_pred = self.net(X_torch)\n\t\t\t\tif any(torch.isnan(torch.flatten(c_pred)).tolist()):\n\t\t\t\t\t\tlogging.info(\"**Alert** nan in param c_pred \")\n\t\t\t\tif any(torch.isinf(torch.flatten(c_pred)).tolist()):\n\t\t\t\t\t\tlogging.info(\"**Alert** inf in param c_pred \")\n\t\t\t\tlogging.info(\"shapes c {} A {} b {} G {} h {} Q {}\".format(c_pred.shape,\n\t\t\t\t\tA_torch.shape,b_torch.shape,G_torch.shape,h_torch.shape,\n\t\t\t\t\tQ_torch.shape ))\n\t\t\t\tx = QPFunction(verbose=False, solver=QPSolvers.GUROBI,\n\t\t\t\t\t\tmodel_params= model_params_quad)(Q_torch.expand(1, *Q_torch.shape), \n\t\t\t\t\t\tc_pred.squeeze(),G_torch.expand(1, *G_torch.shape), \n\t\t\t\t\t\th_torch.expand(1, *h_torch.shape),\n\t\t\t\t\t\t A_torch.expand(1, *A_torch.shape), \n\t\t\t\t\t\tb_torch.expand(1, *b_torch.shape))\n\n\t\t\t\t# x = QPFunction(verbose=False, solver=QPSolvers.GUROBI,\n\t\t\t\t# \t\tmodel_params= model_params_quad)(Q_torch.expand(1, *Q_torch.shape), \n\t\t\t\t# \t\tc_pred.squeeze(),torch.Tensor(), \n\t\t\t\t# \t\ttorch.Tensor(),\n\t\t\t\t# \t\t A_torch.expand(1, *A_torch.shape), \n\t\t\t\t# \t\tb_torch.expand(1, *b_torch.shape))\n\n\t\t\t\tc_pred.retain_grad()\n\t\t\t\tloss = (y_torch*x).mean()\n\t\t\t\tloss.backward()\n\t\t\t\tc_grad = copy.deepcopy(c_pred.grad)\n\t\t\t\tif any(torch.isnan(torch.flatten(c_grad)).tolist()):\n\t\t\t\t\tlogging.info(\"**Alert** nan in param c_grad \")\n\t\t\t\t\n\n\t\t\t\tself.optimizer.step()\n\t\t\t\t# logging.info(\"bkwd done\")\n\n\t\t\t\tend_time = time.time()\n\t\t\t\ttime_ += end_time - start_time\n\t\t\t\tif self.validation:\n\t\t\t\t\tif ((i+1)%20==0):\n\t\t\t\t\t\tvalidation_list.append( validation_module(self.net,self.A, \n\t\t\t\t\tX,y,train_instances,validation_instances, \n\t\t\t\t\ttest_instances,time_,e,i))\n\t\t\t\n\t\t\tprint(\"Epoch {} Loss:{} Time: {:%Y-%m-%d %H:%M:%S}\".format(e+1,loss.sum().item(),\n\t\t\t\tdatetime.datetime.now()))\n\t\tif self.validation :\n\t\n\t\t\tdd = defaultdict(list)\n\t\t\tfor d in validation_list:\n\t\t\t\tfor key, value in d.items():\n\t\t\t\t\tdd[key].append(value)\n\t\t\tdf = pd.DataFrame.from_dict(dd)\n\n\t\t\tlogging.info('Completion Time %s \\n' %str(datetime.datetime.now()) )\n\t\t\treturn df\n\tdef predict(self,X):\n\t\tX_torch = torch.from_numpy(X).float()\n\t\tself.net.eval()\n\t\tpred= self.net(X_torch)\n\t\tself.net.train()\n\t\treturn pred.detach().detach().numpy().squeeze()\t\n\tdef validation_result(self,X,y, instances):\n\t\tvalidation_rslt = get_loss(self.net, self.A, X,y,instances)\n\t\treturn validation_rslt[0], validation_rslt[1]\n\nclass intopt:\n\tdef __init__(self,A, num_features, num_layers, intermediate_size,\n\t\tnum_instance= 1,activation_fn = nn.ReLU,epochs=10,optimizer=optim.Adam,\n\t\tmethod=1,max_iter=100,smoothing=False,thr = None,mu0=None,full_row_rank=True,\n\t\tvalidation=False,**hyperparams):\n\t\t\n\t\tself.A = A\n\t\tself.num_features = num_features\n\t\tself.num_layers = num_layers\n\t\tself.activation_fn = activation_fn\n\t\tself.intermediate_size = intermediate_size\n\t\tself.num_instance = num_instance\n\t\tself.method = method\n\t\t\n\t\tself.epochs = epochs\n\t\tself.method = method\n\t\tself.optimizer = optimizer\n\t\tself.max_iter = max_iter\n\t\tself.smoothing = smoothing\n\t\tself.thr = thr\n\t\tself.mu0 = mu0\n\t\tself.validation = validation\n\t\tself.full_row_rank = full_row_rank\n\t\n\t\tself.net = make_fc(num_layers=num_layers, num_features=num_features, \n\t\t\tactivation_fn= activation_fn,\n\t\t\tintermediate_size= intermediate_size)\n\t\tself.optimizer = optimizer(self.net.parameters(), **hyperparams)\n\n\tdef fit(self,X,y,instances):\n\t\t#A_torch = torch.from_numpy(self.A).float()\t\n\t\ttest_instances = instances['test']\n\t\tvalidation_instances = instances['validation']\n\t\ttrain_instances = instances['train']\t\n\t\ttime_ = 0\n\t\tself.model_time = 0\t\t\n\t\tn_train = X.shape[0]\n\n\t\tif self.validation:\n\t\t\tvalidation_list = []\n\t\t# model = gp.Model()\n\t\t# model.setParam('OutputFlag', 0)\n\t\t# x = model.addMVar(shape= self.A.shape[1], lb=0.0, vtype=gp.GRB.CONTINUOUS, name=\"x\")\n\t\tif self.full_row_rank:\n\t\t\trows_to_be_removed = _remove_redundant_rows(self.A)\n\t\t\tA_torch = torch.from_numpy(np.delete(self.A, rows_to_be_removed, axis=0)).float()\n\t\telse:\n\t\t\tA_torch = torch.from_numpy(self.A).float()\n\t\tlogging.info(\"shape of A {} shape of A-torch {}\".format(self.A.shape,A_torch.shape))\n\t\t# A_ = np.delete(A_, rows_to_be_removed, axis=0)\n\t\t# b_ = np.delete(b_, rows_to_be_removed)\n\t\t# A_torch = torch.from_numpy(self.A).float()\n\t\tX_torch = torch.from_numpy(X).float()\n\t\ty_torch = torch.from_numpy(y).float()\n\t\tlogging.info(\"training started\")\n\t\tfor e in range(self.epochs):\n\t\t\tfor i in range(self.num_instance):\n\t\t\t\tstart_time = time.time()\n\t\t\t\tself.optimizer.zero_grad()\n\t\t\t\tsource, dest = train_instances[i]\n\t\t\t\tif self.full_row_rank:\n\t\t\t\t\tb = np.zeros(len(self.A))\n\t\t\t\t\tb[source] =1\n\t\t\t\t\tb[dest ]=-1\n\t\t\t\t\tb= np.delete(b, rows_to_be_removed)\n\t\t\t\t\tb_torch = torch.from_numpy(b).float()\n\t\t\t\telse:\n\t\t\t\t\tb_torch = torch.zeros(len(self.A))\n\t\t\t\t\tb_torch[source] = 1\n\t\t\t\t\tb_torch[dest] = -1\n\n\n\t\t\t\tc_pred = self.net(X_torch).squeeze()\n\t\t\t\tx = IPOfunc(A_torch,b_torch,torch.Tensor(),torch.Tensor(),\n\t\t\t\tbounds= [(0., None)],\n\t\t\t\t\tmax_iter=self.max_iter,mu0 = self.mu0, \n\t\t\t\t\tthr=self.thr,method = self.method,\n smoothing=self.smoothing)(c_pred)\n\t\t\t\tloss = (y_torch*x).mean()\n\t\t\t\tloss.backward()\n\t\t\t\tself.optimizer.step()\t\t\t\n\t\t\t\tend_time = time.time()\n\t\t\t\ttime_ += end_time - start_time\n\t\t\t\tif self.validation:\n\t\t\t\t\tif ((i+1)%20==0) :\t\n\t\t\t\t\t\tvalidation_list.append( validation_module(self.net,self.A, \n\t\t\t\t\t\tX,y,train_instances,validation_instances, \n\t\t\t\t\t\ttest_instances,time_,e,i))\n\n\t\t\tprint(\"Epoch {} Loss:{} Time: {:%Y-%m-%d %H:%M:%S}\".format(e+1,loss.item(),\n\t\t\t\tdatetime.datetime.now()))\n\t\tif self.validation :\n\t\n\t\t\tdd = defaultdict(list)\n\t\t\tfor d in validation_list:\n\t\t\t\tfor key, value in d.items():\n\t\t\t\t\tdd[key].append(value)\n\t\t\tdf = pd.DataFrame.from_dict(dd)\n\n\n\t\t\tlogging.info('Completion Time %s \\n' %str(datetime.datetime.now()) )\n\t\t\treturn df\n\tdef predict(self,X):\n\t\tX_torch = torch.from_numpy(X).float()\n\t\tself.net.eval()\n\t\tpred= self.net(X_torch)\n\t\tself.net.train()\n\t\treturn pred.detach().detach().numpy().squeeze()\n\tdef validation_result(self,X,y, instances):\n\t\tvalidation_rslt = get_loss(self.net, self.A, X,y,instances)\n\t\treturn validation_rslt[0], validation_rslt[1]\n\nclass SPO:\n\tdef __init__(self,A,num_features, num_layers, intermediate_size,num_instance= 1,\n\t\tactivation_fn = nn.ReLU, epochs=10,optimizer=optim.Adam,\n\t\tvalidation=False,**hyperparams):\n\t\tself.A = A\n\t\tself.num_features = num_features\n\t\tself.num_layers = num_layers\n\t\tself.activation_fn = activation_fn\n\t\tself.intermediate_size = intermediate_size\n\t\t\n\t\tself.epochs = epochs\n\t\tself.num_instance = num_instance\n\t\tself.validation = validation\n\t\t\n\t\n\t\tself.net = make_fc(num_layers=num_layers, num_features=num_features, \n\t\t\tactivation_fn= activation_fn,\n\t\t\tintermediate_size= intermediate_size)\n\t\tself.optimizer = optimizer(self.net.parameters(), **hyperparams)\n\t\t\n\tdef fit(self,X,y,instances):\n\t\t#A_torch = torch.from_numpy(self.A).float()\t\n\t\ttest_instances = instances['test']\n\t\tvalidation_instances = instances['validation']\n\t\ttrain_instances = instances['train']\t\n\t\ttime_ = 0\n\t\tself.model_time = 0\t\t\n\t\tn_train = X.shape[0]\n\n\t\tif self.validation:\n\t\t\tvalidation_list = []\n\n\t\tX_torch = torch.from_numpy(X).float()\n\t\ty_torch = torch.from_numpy(y).float()\n\n\t\ttrue_solution ={}\n\t\tlogging.info(\"training started\")\n\t\tfor e in range(self.epochs):\n\t\t\tfor i in range(self.num_instance):\n\t\t\t\tstart_time = time.time()\n\t\t\t\tself.optimizer.zero_grad()\n\t\t\t\tsource, dest = train_instances[i]\n\t\t\t\tb = np.zeros(len(self.A))\n\t\t\t\tb[source] =1\n\t\t\t\tb[dest ]=-1\n\t\t\t\tif i not in true_solution:\n\t\t\t\t\tmodel = gp.Model()\n\t\t\t\t\tmodel.setParam('OutputFlag', 0)\n\t\t\t\t\tx = model.addMVar(shape= self.A.shape[1], lb=0.0, vtype=gp.GRB.CONTINUOUS, name=\"x\")\n\t\t\t\t\tmodel.addConstr(self.A @ x == b, name=\"eq\")\n\t\t\t\t\tmodel.setObjective((y_torch.detach().numpy())@x, gp.GRB.MINIMIZE)\n\t\t\t\t\tmodel.optimize()\n\t\t\t\t\tx_true = x.X\n\n\t\t\t\t\ttrue_solution[i] = np.copy(x_true)\n\t\t\t\tx_true = true_solution[i]\n\n\t\t\t\tc_pred = self.net(X_torch).squeeze()\n\t\t\t\tc_spo = (2*c_pred - y_torch)\n\t\t\t\t\n\t\t\t\tmodel = gp.Model()\n\t\t\t\tmodel.setParam('OutputFlag', 0)\n\t\t\t\tx = model.addMVar(shape= self.A.shape[1], lb=0.0, ub=1.0,vtype=gp.GRB.CONTINUOUS, name=\"x\")\n\t\t\t\tmodel.addConstr(self.A @ x == b, name=\"eq\")\n\t\t\t\tmodel.setObjective((c_spo.detach().numpy())@x, gp.GRB.MINIMIZE)\n\t\t\t\tmodel.optimize()\n\t\t\t\t#print(model.status)\n\t\t\t\tx_spo = x.X\n\t\t\t\tgrad = torch.from_numpy( x_true - x_spo).float()\n\t\t\t\tloss = self.net(X_torch).squeeze()\n\t\t\t\tloss.backward(gradient=grad)\n\t\t\t\tself.optimizer.step()\n\t\t\t\tlogging.info(\"bkwd done\")\n\n\t\t\t\tend_time = time.time()\n\t\t\t\ttime_ += end_time - start_time\n\t\t\t\tif self.validation:\n\t\t\t\t\tif ((i+1)%20==0):\n\t\t\t\t\t\tvalidation_list.append( validation_module(self.net,self.A, \n\t\t\t\t\tX,y,train_instances,validation_instances, \n\t\t\t\t\ttest_instances,time_,e,i))\n\n\t\t\tprint(\"Epoch {} Loss:{} Time: {:%Y-%m-%d %H:%M:%S}\".format(e+1,loss.sum().item(),\n\t\t\t\tdatetime.datetime.now()))\n\t\tif self.validation :\n\t\n\t\t\tdd = defaultdict(list)\n\t\t\tfor d in validation_list:\n\t\t\t\tfor key, value in d.items():\n\t\t\t\t\tdd[key].append(value)\n\t\t\tdf = pd.DataFrame.from_dict(dd)\n\t\t\t# print(validation_module(self.net,self.A, \n\t\t\t# \t\t\tX,y,train_instances,validation_instances, \n\t\t\t# \t\t\ttest_instances,time_,e,i))\n\t\t\t# pred = self.predict(X)\n\t\t\t# print(mse(pred,y))\n\t\t\tlogging.info('Completion Time %s \\n' %str(datetime.datetime.now()) )\n\t\t\treturn df\n\tdef validation_result(self,X,y, instances):\n\t\tvalidation_rslt = get_loss(self.net, self.A, X,y,instances)\n\t\treturn validation_rslt[0], validation_rslt[1]\n \n\n\tdef predict(self,X):\n\t\tX_torch = torch.from_numpy(X).float()\n\t\tself.net.eval()\n\t\tpred= self.net(X_torch)\n\t\tself.net.train()\n\t\treturn pred.detach().detach().numpy().squeeze()", "'''\nHelper class for mtp.py\n\nDefines the leaf nodes of the tree, specifically\n- the computation of the predicted cost vectors and decisions within the given leaf of the tree\n- the SPO/MSE loss from using the predicted decision within the leaf\n'''\n\nimport numpy as np\nfrom SPOTree_scheduling.decision_problem_solver import*\n#from scipy.spatial import distance\n\n'''\nmtp.py depends on the classes and functions below. \nThese classes/methods are used to define the model object in each leaf node,\nas well as helper functions for certain operations in the tree fitting procedure.\n\nSummary of methods and functions to specify:\n Methods as a part of class LeafModel: fit(), predict(), to_string(), error(), error_pruning()\n Other helper functions: get_sub(), are_Ys_diverse()\n \n'''\n\n'''\nLeafModel: the model used in each leaf. \nHas five methods: fit, predict, to_string, error, error_pruning\n\nSPO_weight_param: number between 0 and 1:\nError metric: SPO_loss*SPO_weight_param + MSE_loss*(1-SPO_weight_param)\n'''\nclass LeafModel(object):\n \n #Any additional args passed to mtp's init() function are directly passed here\n def __init__(self,*args,**kwargs):\n self.SPO_weight_param = kwargs[\"SPO_weight_param\"]\n self.SPO_full_error = kwargs[\"SPO_full_error\"]\n return\n \n '''\n This function trains the leaf node model on the data (A,Y,weights).\n \n A and Y can take any form (lists, matrices, vectors, etc.). For our applications, I recommend making Y\n the response data (e.g., choices) and A alternative-specific data (e.g., features, choice sets)\n \n weights: a numpy array of case weights. Is 1-dimensional, with weights[i] yielding \n weight of observation/customer i. If you know you will not be using case weights\n in your particular application, you can ignore this input entirely.\n \n Returns 0 or 1.\n 0: No errors occurred when fitting leaf node model\n 1: An error occurred when fitting the leaf node model (probably due to insufficient data)\n If fit returns 1, then the tree will not consider the split that led to this leaf node model\n \n fit_init is a LeafModel object which represents a previously-trained leaf node model.\n If specified, fit_init is used for initialization when training this current LeafModel object.\n Useful for faster computation when fit_init's coefficients are close to the optimal solution of the new data.\n \n For those interested in defining their own leaf node functions:\n (1) It is not required to use the fit_init argument in your code\n (2) All edge cases must be handled in code below (ex: arguments\n consist of a single entry, weights are all zero, Y has one unique choice, etc.).\n In these cases, either hard-code a model that works with these edge-cases (e.g., \n if all Ys = 1, predict 1 with probability one), or have the fit function return 1 (error)\n (3) Store the fitted model as an attribute to the self object. You can name the attribute\n anything you want (i.e., it does not have to be self.model_obj and self.model_coef below),\n as long as its consistent with your predict_prob() and to_string() methods\n \n Any additional args passed to mtp's fit() function are directly passed here\n '''\n def fit(self, A, Y, weights, fit_init=None, refit=False, SPO_loss_bound=None, MSE_loss_bound=None, **kwargs): \n #no need to refit this model since it is already fit to optimality\n #note: change this behavior if debias=TRUE\n if refit == True:\n return(0)\n \n self.SPO_loss_bound = SPO_loss_bound\n self.MSE_loss_bound = MSE_loss_bound\n \n def fast_row_avg(X,weights):\n # print(X.shape)\n # print(weights.shape)\n return (np.matmul(weights,X)/sum(weights)).reshape(-1)\n \n #if no observations are mapped to this leaf, then assign any feasible cost vector here \n if sum(weights) == 0:\n self.mean_cost = np.ones(get_num_decisions(**kwargs))\n else:\n self.mean_cost = fast_row_avg(Y,weights)\n # print(self.mean_cost.reshape(1,-1).shape)\n self.decision = find_opt_decision(self.mean_cost.reshape(1,-1),**kwargs)['weights'].reshape(-1)\n \n return(0)\n \n '''\n This function applies model from fit() to predict choice data given new data A.\n Returns a list/numpy array of choices (one list entry per observation, i.e. l[i] yields prediction for ith obs.).\n Note: make sure to call fit() first before this method.\n \n Any additional args passed to mtp's predict() function are directly passed here\n '''\n def predict(self, A, get_cost=False, *args,**kwargs):\n if get_cost==True:\n #Returns predicted cost corresponding to this leaf node\n return np.array([self.mean_cost]*len(A))\n else:\n #Returns predicted decision corresponding to this leaf node\n return np.array([self.decision]*len(A))\n '''\n This function outputs the errors for each observation in pair (A,Y). \n Used in training when comparing different tree splits.\n Ex: mean-squared-error between observed data Y and predict(A)\n \n How to pass additional arguments to this function: simply pass these arguments to the init()/fit() functions and store them\n in the self object.\n '''\n def error(self,A,Y):\n def MSEloss(C,Cpred):\n #return distance.cdist(C, Cpred, 'sqeuclidean').reshape(-1)\n MSE = (C**2).sum(axis=1)[:, None] - 2 * C.dot(Cpred.transpose()) + ((Cpred**2).sum(axis=1)[None, :])\n return MSE.reshape(-1)\n \n def SPOloss(C,decision):\n return np.matmul(C,decision).reshape(-1)\n \n if self.SPO_weight_param == 1.0:\n if self.SPO_full_error == True:\n SPO_loss = SPOloss(Y,self.decision) - A\n else:\n SPO_loss = SPOloss(Y,self.decision)\n return SPO_loss\n elif self.SPO_weight_param == 0.0:\n MSE_loss = MSEloss(Y, self.mean_cost.reshape(1,-1))\n return MSE_loss\n else:\n if self.SPO_full_error == True:\n SPO_loss = SPOloss(Y,self.decision) - A\n else:\n SPO_loss = SPOloss(Y,self.decision)\n MSE_loss = MSEloss(Y, self.mean_cost.reshape(1,-1))\n return self.SPO_weight_param*SPO_loss/self.SPO_loss_bound+(1.0-self.SPO_weight_param)*MSE_loss/self.MSE_loss_bound\n \n '''\n This function outputs the errors for each observation in pair (A,Y). \n Used in pruning to determine the best tree subset.\n Ex: mean-squared-error between observed data Y and predict(A)\n \n How to pass additional arguments to this function: simply pass these arguments to the init()/fit() functions and store them\n in the self object.\n '''\n def error_pruning(self,A,Y):\n return self.error(A,Y)\n \n '''\n This function returns the string representation of the fitted model\n Used in traverse() method, which traverses the tree and prints out all terminal node models\n \n Any additional args passed to mtp's traverse() function are directly passed here\n '''\n def to_string(self,*leafargs,**leafkwargs):\n return \"Mean cost vector: \\n\" + str(self.mean_cost) +\"\\n\"+\"decision: \\n\"+str(self.decision)\n \n\n'''\nGiven attribute data A, choice data Y, and observation indices data_inds,\nextract those observations of A and Y corresponding to data_inds\n\nIf only attribute data A is given, returns A.\nIf only choice data Y is given, returns Y.\n\nUsed to partition the data in the tree-fitting procedure\n'''\ndef get_sub(data_inds,A=None,Y=None,is_boolvec=False):\n if A is None:\n return Y[data_inds]\n if Y is None:\n return A[data_inds]\n else:\n return A[data_inds],Y[data_inds]\n\n'''\nThis function takes as input choice data Y and outputs a boolean corresponding\nto whether all of the choices in Y are the same. \n\nIt is used as a test for whether we should make a node a leaf. If are_Ys_diverse(Y)=False,\nthen the node will become a leaf. Otherwise, if the node passes the other tests (doesn't exceed\nmax depth, etc), we will consider splitting on the node.\n'''\ndef are_Ys_diverse(Y):\n #return False iff all cost vectors (rows of Y) are the same\n tmp = [len(np.unique(Y[:,j])) for j in range(Y.shape[1])]\n return (np.max(tmp) > 1)\n\n" ]
[ [ "numpy.asarray", "numpy.sum" ], [ "torch.nn.Sequential", "numpy.linalg.matrix_rank", "torch.Tensor", "torch.zeros", "scipy.special.expit", "numpy.arange", "torch.eye", "torch.from_numpy", "numpy.random.shuffle", "sklearn.metrics.mean_squared_error", "torch.flatten", "torch.nn.Linear", "numpy.delete", "numpy.copy", "pandas.DataFrame.from_dict", "torch.nn.ReLU", "torch.nn.MSELoss" ], [ "numpy.max", "numpy.matmul", "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
florischabert/fairseq
[ "21bd5ffc9928f33d01936aeca0298f1c339ce8b1" ]
[ "fairseq/modules/sinusoidal_positional_embedding.py" ]
[ "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.onnx.operators\n\nfrom fairseq import utils\n\n\nclass SinusoidalPositionalEmbedding(nn.Module):\n \"\"\"This module produces sinusoidal positional embeddings of any length.\n\n Padding symbols are ignored.\n \"\"\"\n\n def __init__(self, embedding_dim, padding_idx, init_size=1024):\n super().__init__()\n self.embedding_dim = embedding_dim\n self.padding_idx = padding_idx\n self.weights = SinusoidalPositionalEmbedding.get_embedding(\n init_size,\n embedding_dim,\n padding_idx,\n )\n self.onnx_trace = False\n self.register_buffer('_float_tensor', torch.FloatTensor(1))\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n @staticmethod\n def get_embedding(num_embeddings, embedding_dim, padding_idx=None):\n \"\"\"Build sinusoidal embeddings.\n\n This matches the implementation in tensor2tensor, but differs slightly\n from the description in Section 3.5 of \"Attention Is All You Need\".\n \"\"\"\n half_dim = embedding_dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)\n emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)\n emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)\n if embedding_dim % 2 == 1:\n # zero pad\n emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)\n if padding_idx is not None:\n emb[padding_idx, :] = 0\n return emb\n\n def forward(self, input, incremental_state=None, timestep=None):\n \"\"\"Input is expected to be of size [bsz x seqlen].\"\"\"\n bsz, seq_len = torch.onnx.operators.shape_as_tensor(input)\n max_pos = self.padding_idx + 1 + seq_len\n if self.weights is None or max_pos > self.weights.size(0):\n # recompute/expand embeddings if needed\n self.weights = SinusoidalPositionalEmbedding.get_embedding(\n max_pos,\n self.embedding_dim,\n self.padding_idx,\n )\n self.weights = self.weights.type_as(self._float_tensor)\n\n if incremental_state is not None:\n # positions is the same for every token when decoding a single step\n pos = (timestep.int() + 1).long() if timestep is not None else seq_len\n if self.onnx_trace:\n return self.weights[self.padding_idx + pos, :].unsqueeze(1).repeat(bsz, 1, 1)\n return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)\n\n positions = utils.make_positions(input, self.padding_idx, onnx_trace=self.onnx_trace)\n if self.onnx_trace:\n flat_embeddings = self.weights[positions.long()]\n embedding_shape = torch.LongTensor([1, 15, 256])\n embeddings = torch.onnx.operators.reshape_from_tensor_shape(flat_embeddings, embedding_shape)\n return embeddings\n return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach()\n\n def max_positions(self):\n \"\"\"Maximum number of supported positions.\"\"\"\n return int(1e5) # an arbitrary large number\n" ]
[ [ "torch.cos", "torch.LongTensor", "torch.zeros", "torch.sin", "torch.onnx.operators.shape_as_tensor", "torch.FloatTensor", "torch.arange", "torch.onnx.operators.reshape_from_tensor_shape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
OHBA-analysis/Quinn2021_Waveform
[ "76e4bbba719ee261dad4a59ab9c554cf05c152f6" ]
[ "emd_waveform_fig345.py" ]
[ "#!/usr/bin/python\n\n# vim: set expandtab ts=4 sw=4:\n\n# %% -----------------------------------------------------\n#\n# This script runs the simulations and analysis of the noisy 12Hz oscillator\n# seen in figures 3, 4 and 5. The oscillation is generated and some general EMD\n# and wavelet frequency metrics are computed. The three figures are then\n# generated using these variables.\n\n# %% -----------------------------------------------------\n# Imports and definitions\n\nimport os\nimport emd\nimport sails\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import signal, stats, ndimage\nfrom emd_waveform_utils import config\n\nimport matplotlib\nmatplotlib.rc('font', serif=config['fontname'])\n\n# %% ---------------------------------------------------\n# Define systems from Feynman Vol 1 50-6\n\ndef linear_system(x, K):\n \"\"\" A linear system which scales a signal by a factor\"\"\"\n return K * x\n\n\ndef nonlinear_system(x, K, eta=.43, power=2):\n \"\"\" A non-linear system which scales a signal by a factor introduces a\n waveform distortion\"\"\"\n return K * (x + eta * (x ** power))\n\n\n# %% ---------------------------------------------------\n# Generate simuated data\n\n# Create 60 seconds of data at 12Hz\npeak_freq = 12\nsample_rate = 512\nseconds = 60\nnoise_std = None\nx = emd.utils.ar_simulate(peak_freq, sample_rate, seconds,\n noise_std=noise_std, random_seed=42, r=.99)\nx = x * 1e-5\nt = np.linspace(0, seconds, seconds*sample_rate)\n\n# Apply linear and non-linear equations and add noise\n\nx_linear_raw = linear_system(x, K=1)\nx_nonlinear_raw = nonlinear_system(x, K=1, eta=2)\n\nx_linear = x_linear_raw + np.random.randn(len(t), 1)*2e-2\nx_nonlinear = x_nonlinear_raw + np.random.randn(len(t), 1)*2e-2\n\n# %% ---------------------------------------------------\n# Run frequency analyses\n\n# Welch's Periodogram\nf, pxx_linear = signal.welch(x_linear[:, 0], fs=sample_rate, nperseg=2048)\nf, pxx_nonlinear = signal.welch(x_nonlinear[:, 0], fs=sample_rate, nperseg=2048)\n\n# EMD\nsift_config = {'imf_opts': {'sd_thresh': 5e-2},\n 'mask_freqs': 120/sample_rate,\n 'mask_amp_mode': 'ratio_sig',\n 'mask_step_factor': 2.5}\n\nimf_linear = emd.sift.mask_sift(x_linear, **sift_config)\nimf_nonlinear = emd.sift.mask_sift(x_nonlinear, **sift_config)\n\nIP_linear, IF_linear, IA_linear = emd.spectra.frequency_transform(imf_linear, sample_rate, 'hilbert')\nIP_nonlinear, IF_nonlinear, IA_nonlinear = emd.spectra.frequency_transform(imf_nonlinear, sample_rate, 'hilbert')\n\n\n# %% --------------------------------------------------\n# Cycle analysis\n\ndef my_range(x):\n return x.max() - x.min()\n\ndef asc2desc(x):\n \"\"\"Ascending to Descending ratio ( A / A+D ).\"\"\"\n pt = emd.cycles.cf_peak_sample(x, interp=True)\n tt = emd.cycles.cf_trough_sample(x, interp=True)\n if (pt is None) or (tt is None):\n return np.nan\n asc = pt + (len(x) - tt)\n desc = tt - pt\n return asc / len(x)\n\ndef peak2trough(x):\n \"\"\"Peak to trough ratio ( P / P+T ).\"\"\"\n des = emd.cycles.cf_descending_zero_sample(x, interp=True)\n if des is None:\n return np.nan\n return des / len(x)\n\nCl = emd.cycles.Cycles(IP_linear[:, 2])\nCl.compute_cycle_metric('max_amp', IA_linear[:, 2], np.max)\nCl.compute_cycle_metric('max_if', IF_linear[:, 2], np.max)\nCl.compute_cycle_metric('if_range', IF_linear[:, 2], my_range)\n\nCn = emd.cycles.Cycles(IP_nonlinear[:, 2])\nCn.compute_cycle_metric('max_amp', IA_nonlinear[:, 2], np.max)\nCn.compute_cycle_metric('max_if', IF_nonlinear[:, 2], np.max)\nCn.compute_cycle_metric('if_range', IF_nonlinear[:, 2], my_range)\nCn.compute_cycle_metric('asc2desc', imf_nonlinear[:, 2], asc2desc)\nCn.compute_cycle_metric('peak2trough', imf_nonlinear[:, 2], peak2trough)\n\nconditions = ['is_good==1', 'max_amp>0.04', 'if_range<8', 'max_if<18']\npa_linear, phase_x = emd.cycles.phase_align(IP_linear[:, 2], IF_linear[:, 2],\n cycles=Cl.iterate(conditions=conditions))\n\npa_nonlinear, phase_x = emd.cycles.phase_align(IP_nonlinear[:, 2], IF_nonlinear[:, 2],\n cycles=Cn.iterate(conditions=conditions))\ndf_nonlinear = Cn.get_metric_dataframe(conditions=conditions)\n\n# %% --------------------------------------------------\n# Time-frequency transform\n\n# Hilbert-Huang Transform\nedges, centres = emd.spectra.define_hist_bins(0, 40, 64)\nspec_linear = emd.spectra.hilberthuang_1d(IF_linear, IA_linear, edges, mode='energy')/x_linear.shape[0]\nspec_nonlinear = emd.spectra.hilberthuang_1d(IF_nonlinear, IA_nonlinear, edges, mode='energy')/x_nonlinear.shape[0]\n\n# Carrier frequency histogram definition\nedges, bins = emd.spectra.define_hist_bins(2, 35, 64, 'linear')\n\n# Compute the 2d Hilbert-Huang transform (power over time x carrier frequency)\nhht_linear = emd.spectra.hilberthuang(IF_linear[:, 2], IA_linear[:, 2], edges, mode='amplitude')\nhht_nonlinear = emd.spectra.hilberthuang(IF_nonlinear[:, 2], IA_nonlinear[:, 2], edges, mode='amplitude')\n\n# Smooth HHTs to help visualisation\nhht_linear = ndimage.gaussian_filter(hht_linear, .5)\nhht_nonlinear = ndimage.gaussian_filter(hht_nonlinear, 1)\n\n# Compute 2d wavelet transform\ncwt_linear = sails.wavelet.morlet(x_linear[:, 0], bins, sample_rate, normalise='simple', ret_mode='amplitude')\ncwt_nonlinear = sails.wavelet.morlet(x_nonlinear[:, 0], bins, sample_rate, normalise='simple', ret_mode='amplitude')\n\n# %% --------------------------------------------------\n# FIGURE 3 - Example system with time-frequency transforms\n\n\ndef decorate_ax(ax):\n for tag in ['top', 'right']:\n ax.spines[tag].set_visible(False)\n\n\ninds = np.arange(7550, 8550)\n\nwidth = config['3col_width'] / 25.4\nheight = width\n\nmatches = Cn.get_matching_cycles(conditions)\ngoods = emd._cycles_support.project_cycles_to_samples(matches, Cn.cycle_vect)[:, 0]\n\nplt.figure(figsize=(width*2, height*2))\n\n# Plot time-series\nplt.axes([.1, .5, .875, .45], frameon=False)\nplt.xticks([])\nplt.yticks([])\nplt.plot(x_nonlinear[inds]+0.5, 'k')\nplt.plot(imf_nonlinear[inds, 2:].sum(axis=1)-0.25, 'g')\nplt.text(-50, 1, 'Cycle #', verticalalignment='center', horizontalalignment='right')\nplt.text(-50, 0.5, 'Signal', verticalalignment='center', horizontalalignment='right')\nplt.text(-50, -.2, 'IMF-3', verticalalignment='center', horizontalalignment='right')\n\n\n# Instantaneous Phase\nip = IP_nonlinear[inds, 2]\nbad_cycles = np.logical_or(np.diff(ip) < -3, goods[inds[:-1]] == False)\nbad_cycles = np.r_[bad_cycles, True]\nbad_cycles = goods[inds[:-1]] == False\nbad_cycles = np.r_[bad_cycles, True]\n\nip[np.where(np.diff(ip) < -3)[0]] = np.nan\nto_plot = ip/15 - 1.15\nplt.plot(to_plot)\n#to_plot[:np.where(np.isnan(to_plot))[0][17]] = np.nan\nto_plot[bad_cycles == False] = np.nan\nplt.plot(to_plot, 'r')\nmn = np.nanmin(to_plot)\nmx = np.nanmax(to_plot)\nplt.plot([-25, -25], [mn, mx], 'k')\nplt.plot([-35, len(inds)], [mx, mx], color=[.8, .8, .8], linewidth=.5)\nplt.plot([-35, len(inds)], [np.mean((mn, mx)), np.mean((mn, mx))], color=[.8, .8, .8], linewidth=.5)\nplt.plot([-35, len(inds)], [mn, mn], color=[.8, .8, .8], linewidth=.5)\nplt.text(-30, mx, 'pi', verticalalignment='center', horizontalalignment='right')\nplt.text(-30, np.mean((mn, mx)), '0', verticalalignment='center', horizontalalignment='right')\nplt.text(-30, mn, '-pi', verticalalignment='center', horizontalalignment='right')\nplt.text(-105, np.mean((mn, mx)), 'Instantaneous\\nPhase (rads)', ha='center', va='center', rotation=0)\n\n# Instantanous Frequency\nfrange = emd._cycles_support.project_cycles_to_samples(Cn.metrics['if_range'], Cn.cycle_vect)[:, 0]\niif = IF_nonlinear[inds, 2].copy()\n#iif[goods==0] = np.nan\niif[bad_cycles] = np.nan\nto_plot = iif/20 - 2.15\nplt.plot(to_plot)\nfreq_range = np.array([8, 12, 16])\nfreq_range_conv = freq_range/20 - 2.2\n\nmn = np.nanmin(to_plot)\nmx = np.nanmax(to_plot)\nplt.plot([-25, -25], [mn, mx], 'k')\nplt.plot([-35, len(inds)], [mx, mx], color=[.8, .8, .8], linewidth=.5)\nplt.plot([-35, len(inds)], [np.mean((mn, mx)), np.mean((mn, mx))], color=[.8, .8, .8], linewidth=.5)\nplt.plot([-35, len(inds)], [mn, mn], color=[.8, .8, .8], linewidth=.5)\nfor ii in range(3):\n plt.text(-30, freq_range_conv[ii], '{0}Hz'.format(freq_range[ii]),\n verticalalignment='center', horizontalalignment='right')\nplt.text(-105, freq_range_conv[1], 'Instantaneous\\nFrequency (Hz)', ha='center', va='center', rotation=0)\n\n# Cycle Boundaries\nyl = plt.ylim()\ncycle_bounds = np.where(np.diff(Cn.cycle_vect[inds, 0]) > .5)[0]\nfor ii in range(len(cycle_bounds)):\n plt.plot([cycle_bounds[ii], cycle_bounds[ii]], [-2.2, 1.4], color=[.8, .8, .8], linewidth=.5)\n if ii < len(cycle_bounds)-1:\n plt.text( (cycle_bounds[ii]+cycle_bounds[ii+1])/2, 1, str(ii+1), horizontalalignment='center')\nplt.ylim(yl)\nplt.xlim(-55, 896)\n\n# Hilbert-Huang Transform\ntt = np.linspace(0, len(inds)/sample_rate, len(inds))\nplt.axes([.15, .275, .825, .2])\npcm = plt.pcolormesh(tt, bins, hht_nonlinear[:, inds], cmap='hot_r', vmin=0, vmax=.175)\nyl = plt.ylim()\nfor ii in range(len(cycle_bounds)):\n plt.plot([tt[cycle_bounds[ii]], t[cycle_bounds[ii]]], [0, bins[-1]], color=[.8, .8, .8], linewidth=.5)\nplt.ylim(yl)\nfor tag in ['top', 'right']:\n plt.gca().spines[tag].set_visible(False)\nplt.gca().set_xticklabels([])\nplt.ylabel('Frequency (Hz)')\nplt.xlim(0, 1.75)\nax = plt.axes([.97, .285, .015, .18])\ncb = plt.colorbar(pcm, cax=ax)\nax.yaxis.set_ticks_position('left')\ncb.set_label('Power')\n\n# Wavelet Transform\nplt.axes([.15, .05, .825, .2])\npcm = plt.pcolormesh(tt, bins, cwt_nonlinear[:, inds], cmap='hot_r', vmin=0, vmax=.175)\nyl = plt.ylim()\nfor ii in range(len(cycle_bounds)):\n plt.plot([tt[cycle_bounds[ii]], tt[cycle_bounds[ii]]], [0, bins[-1]], color=[.8, .8, .8], linewidth=.5)\nplt.ylim(yl)\nfor tag in ['top', 'right']:\n plt.gca().spines[tag].set_visible(False)\nplt.ylabel('Frequency (Hz)')\nplt.xlabel('Time (seconds)')\nplt.xlim(0, 1.75)\nax = plt.axes([.97, .06, .015, .18])\ncb = plt.colorbar(pcm, cax=ax)\nax.yaxis.set_ticks_position('left')\ncb.set_label('Power')\n\noutname = os.path.join(config['figdir'], 'emd_fig3_simu_decomp.png')\nplt.savefig(outname, dpi=300, transparent=True)\n\n# %% --------------------------------------------------\n# FIGURE 4 - PHASE ALIGNMENT IN SIMULATION\n\n# Get temporally aligned waveforms and instantanous frequencies\nwaveform_linear = np.zeros((100, Cl.ncycles))*np.nan\ninstfreq_linear = np.zeros((100, Cl.ncycles))*np.nan\n\nfor ii, inds in Cl.iterate(conditions=conditions):\n waveform_linear[:len(inds), ii] = imf_linear[inds, 2]\n instfreq_linear[:len(inds), ii] = IF_linear[inds, 2]\n\nctrl_linear = emd.cycles.get_control_points(imf_linear[:, 2], Cl.iterate(conditions=conditions), interp=True)\nctrl_mets_linear = emd.cycles.get_control_point_metrics(ctrl_linear)\n\nwaveform_nonlinear = np.zeros((100, Cn.ncycles))*np.nan\ninstfreq_nonlinear = np.zeros((100, Cn.ncycles))*np.nan\n\nfor ii, inds in Cn.iterate(conditions=conditions):\n waveform_nonlinear[:len(inds), ii] = imf_nonlinear[inds, 2]\n instfreq_nonlinear[:len(inds), ii] = IF_nonlinear[inds, 2]\n\nctrl_nonlinear = emd.cycles.get_control_points(imf_nonlinear[:, 2], Cn.iterate(conditions=conditions), interp=True)\nctrl_mets_nonlinear = emd.cycles.get_control_point_metrics(ctrl_nonlinear)\n\nI = np.argsort(ctrl_nonlinear[:, 4])[::-1]\nsegments = np.zeros((ctrl_nonlinear.shape[0], 60))*np.nan\nfor ii in range(ctrl_nonlinear.shape[0]):\n for jj in range(1, ctrl_nonlinear.shape[1]):\n # Round segments to ints for visualisation\n segments[ii, int(np.floor(ctrl_nonlinear[ii, jj-1])):int(np.ceil(ctrl_nonlinear[ii, jj]))] = jj\n\n# Figure start\nwidth = config['2col_width'] / 25.4\nheight = width\n\nplt.figure(figsize=(width*2, height*2))\n\n# Plot control point segments\nplt.axes([.1, .1, .2, .65])\nplt.pcolormesh(segments[I, :])\nplt.xticks(np.linspace(0, 40, 3))\ndecorate_ax(plt.gca())\nplt.ylabel('Cycles (sorted)')\nplt.xticks(np.linspace(0, 0.08*sample_rate, 5), np.linspace(0, 80, 5).astype(int))\nplt.xlabel('Time (ms)')\nplt.axes([.1, .775, .144, .075], frameon=False)\nplt.xticks([])\nplt.yticks([])\ncols = plt.cm.viridis(np.linspace(0, 1, 4))\nfor ii in range(4):\n xvals = np.linspace(0, .25)+.25*ii\n plt.plot(xvals, np.sin(2*np.pi*xvals), linewidth=3, color=cols[ii, :])\n\n# Plot control point metrics\nplt.axes([.31, .1, .1, .65])\nplt.plot(ctrl_mets_nonlinear[0][I], np.arange(len(ctrl_mets_nonlinear[0])), '.')\nplt.plot(ctrl_mets_nonlinear[1][I], np.arange(len(ctrl_mets_nonlinear[0])), '+')\nplt.plot(np.zeros_like(ctrl_mets_nonlinear[1][I]), np.arange(len(ctrl_mets_nonlinear[0])), 'k', linewidth=.5)\nplt.xlim(-.3, .3)\nplt.ylim(0, ctrl_nonlinear.shape[0])\nplt.yticks([])\ndecorate_ax(plt.gca())\n\nplt.axes([.31, .775, .1, .15])\nplt.hist(ctrl_mets_nonlinear[0], np.linspace(-1, 1), alpha=.5)\nplt.hist(ctrl_mets_nonlinear[1], np.linspace(-1, 1), alpha=.5)\nplt.xlim(-.3, .3)\nplt.xticks(np.linspace(-.25, .25, 3), [])\nplt.legend(['Peak/Trough', 'Ascent/Descent'], frameon=False,\n fontsize=8, loc='center', bbox_to_anchor=(0.175, 0.45, 1, 1))\nplt.ylim(0, 250)\ndecorate_ax(plt.gca())\nplt.title('Control-Point Ratios')\n\n# Plot temporally aligned instantaneous frequency\nplt.axes([.5, .1, .2, .65])\nplt.pcolormesh(instfreq_nonlinear[:, I].T)\ndecorate_ax(plt.gca())\nplt.xticks(np.linspace(0, 0.08*sample_rate, 5), np.linspace(0, 80, 5).astype(int))\nplt.xlabel('Time (ms)')\nplt.xlim(0, 60)\n\nplt.axes([.5, .775, .2, .15])\n#plt.plot(instfreq_nonlinear, color=[.8, .8, .8])\nplt.plot(np.nanmean(instfreq_nonlinear, axis=1))\ndecorate_ax(plt.gca())\nplt.title('Cycle-Onset Aligned IF')\nplt.xlim(0, 60)\nplt.xticks(np.linspace(0, 0.08*sample_rate, 5), [])\n\n# Plot phase aligned instantaneous frequency\nplt.axes([.75, .1, .2, .65])\npcm = plt.pcolormesh(pa_nonlinear[:, I].T)\nplt.xticks(np.arange(5)*12, ['-pi', '-pi/2', '0', 'pi/2', 'pi'])\nplt.xlabel('Theta Phase')\nplt.yticks(np.arange(6)*100, [])\n\nplt.axes([.75, .775, .2, .15])\n#plt.plot(pa_nonlinear[:, :-1], color=[.8, .8, .8])\nplt.plot(np.nanmean(pa_nonlinear[:, :-1], axis=1))\nplt.xlim(0, 48)\ndecorate_ax(plt.gca())\nplt.xticks(np.arange(5)*12, [])\nplt.title('Phase-Aligned IF')\n\n# Inst. freq colourbar\nax = plt.axes([.685, .45, .015, .18])\ncb = plt.colorbar(pcm, cax=ax)\nax.yaxis.set_ticks_position('left')\nplt.title('Instantaneous\\nFrequency (Hz)', fontsize=9)\n\noutname = os.path.join(config['figdir'], 'emd_fig4_simu_phasealign.png')\nplt.savefig(outname, dpi=300, transparent=True)\n\n# %% --------------------------------------------------\n# FIGURE 4 - PHASE ALIGNMENT IN SIMULATION : REVISED\nI2 = I[::5]\n\nwidth = config['2col_width'] / 25.4\nheight = config['3col_width'] / 25.4\n\ncol_height = 0.45\ntop_height = 0.3\n\n# Figure start\nplt.figure(figsize=(width*3, height*2))\n\n# Plot control point segments\nplt.axes([.1, .1, .2, col_height])\n#plt.pcolormesh(segments[I2, :])\nplt.plot(ctrl_nonlinear[I2, 1], np.arange(len(I2)), '^')\nplt.plot(ctrl_nonlinear[I2, 2], np.arange(len(I2)), 'x')\nplt.plot(ctrl_nonlinear[I2, 3], np.arange(len(I2)), 'v')\nplt.plot(ctrl_nonlinear[I2, 4], np.arange(len(I2)), '.')\nplt.legend(['Peak', 'Desc', 'Trough', 'Asc'], frameon=False, loc='center', bbox_to_anchor=(0.4, 0.2, 1, 1))\nplt.xticks(np.linspace(0, 64, 5), (np.linspace(0, 125, 5)).astype(int))\nplt.xlabel('Time (ms)')\nplt.xlim(0, 64)\nplt.ylim(0, len(I2))\nplt.ylabel('# Cycle (Sorted by duration)')\ndecorate_ax(plt.gca())\n\nplt.axes([.1, .6, .2, top_height-0.05])\nplt.plot((0.5, 0.5), (0, 800), 'k--')\n#plt.hist(ctrl_mets_nonlinear[0][I], np.linspace(-1, 1), alpha=.5)\n#plt.hist(ctrl_mets_nonlinear[1][I], np.linspace(-1, 1), alpha=.5)\nplt.hist(df_nonlinear['peak2trough'].values, np.linspace(0, 1), alpha=0.5)\nplt.hist(df_nonlinear['asc2desc'].values, np.linspace(0, 1), alpha=0.5)\n#plt.xticks(np.linspace(-.25, .25, 3))\nplt.legend(['Sinusoid', 'Peak/Trough', 'Ascent/Descent'], frameon=False,\n fontsize=10, loc='center', bbox_to_anchor=(0.5, 0.4, 1, 1))\ndecorate_ax(plt.gca())\nplt.xlim(1/3, 2/3)\nplt.ylim(0, 250)\nplt.title('Control-Point Ratios\\n')\nplt.xlabel('Ratio')\nplt.ylabel('Num Cycles')\n\n# Plot temporally aligned instantaneous frequency\nplt.axes([.425, .1, .2, col_height])\nplt.imshow(instfreq_nonlinear[:64, I2].T, interpolation='nearest', vmin=6, vmax=14, aspect='auto', origin='lower')\ndecorate_ax(plt.gca())\nplt.xticks(np.linspace(0, 64, 5), (np.linspace(0, 125, 5)).astype(int))\nplt.xlabel('Time (ms)')\nplt.xlim(0, 64)\n\nplt.axes([.425, .6, .2, top_height/2])\nmn = np.nanmean(instfreq_nonlinear[:, I], axis=1)\nsem = np.nanstd(instfreq_nonlinear[:, I], axis=1)\nsem = sem / np.sqrt(np.sum(np.isnan(instfreq_nonlinear[:, I]) == False, axis=1))\nplt.errorbar(np.arange(100), mn, yerr=sem, errorevery=4)\ndecorate_ax(plt.gca())\nplt.xticks(np.linspace(0, 64, 5), (np.linspace(0, 125, 5)).astype(int))\nplt.xlim(0, 64)\nplt.legend(['Avg IF (std-error of mean)'], loc='center', bbox_to_anchor=(0.3, 0.5, 1, 1), frameon=False)\nplt.ylabel('Instantaneous\\nFrequency (Hz)')\n\nplt.axes([.425, .8, .2, 0.075])\nplt.plot(np.nanmean(waveform_nonlinear[:, I], axis=1), 'k')\nfor tag in ['top', 'right', 'bottom']:\n plt.gca().spines[tag].set_visible(False)\nplt.xticks([])\nplt.ylim(-0.1, 0.1)\nplt.legend(['Avg Waveform'], loc='center', bbox_to_anchor=(0.3, 0.5, 1, 1), frameon=False)\nplt.xlim(0, 64)\nplt.ylabel(r'Amplitude (a.u.)')\nplt.title('Cycle-Onset Alignment\\n\\n')#\\nInstantaneous. Frequency\\n(std-error of mean)')\n\n# Plot phase aligned instantaneous frequency\nplt.axes([.75, .1, .2, col_height])\npcm = plt.imshow(pa_nonlinear[:, I2].T, interpolation='nearest', vmin=6, vmax=14, aspect='auto', origin='lower')\ndecorate_ax(plt.gca())\nplt.xticks(np.arange(5)*12, ['-pi', '-pi/2', '0', 'pi/2', 'pi'])\nplt.xlabel('Theta Phase (rads)')\n\nplt.axes([.75, .6, .2, top_height/2])\nmn = np.nanmean(pa_nonlinear[:, I], axis=1)\nsem = np.nanstd(pa_nonlinear[:, I], axis=1) / np.sqrt(I.shape[0])\nplt.errorbar(np.arange(48), mn, yerr=sem, errorevery=2)\nplt.xlim(0, 48)\ndecorate_ax(plt.gca())\nplt.xticks(np.arange(5)*12, ['-pi', '-pi/2', '0', 'pi/2', 'pi'])\nplt.ylabel('Instantaneous\\nFrequency (Hz)')\nplt.legend(['Avg IF (std-error of mean)'], loc='center', bbox_to_anchor=(0.3, 0.5, 1, 1), frameon=False)\n\nplt.axes([.75, .8, .2, 0.075])\nplt.plot(196*np.sin(2*np.pi*np.linspace(0, 1, 48)), 'k')\nfor tag in ['top', 'right', 'bottom']:\n plt.gca().spines[tag].set_visible(False)\nplt.xticks([])\nplt.ylim(-200, 200)\nplt.legend(['Avg Waveform'], loc='center', bbox_to_anchor=(0.3, 0.5, 1, 1), frameon=False)\nplt.ylabel(r'Amplitude (a.u.)')\nplt.title('Phase Alignment\\n\\n')#\\nInstantaneous. Frequency\\n(std-error of mean)')\n\n# Inst. freq colourbar\nax = plt.axes([.635, .25, .015, .18])\ncb = plt.colorbar(pcm, cax=ax)\nax.yaxis.set_ticks_position('left')\nplt.title('Instantaneous\\nFrequency (Hz)\\n', fontsize=12)\n\noutname = os.path.join(config['figdir'], 'emd_fig4_simu_phasealign_revised.png')\nplt.savefig(outname, dpi=300, transparent=True)\n\noutname = os.path.join(config['figdir'], 'emd_fig4_simu_phasealign_revised.pdf')\nplt.savefig(outname, dpi=300, transparent=True)\n\n# %% --------------------------------------------------\n# FIGURE 5 - SHAPE COMPARISON\n\npa_linear_avg = np.nanmean(pa_linear, axis=1)\nfs = np.mean(pa_linear_avg)\nlin_phase = emd.spectra.phase_from_freq(pa_linear_avg, 48*fs)\npa_nonlinear_avg = np.nanmean(pa_nonlinear, axis=1)\nfs = np.mean(pa_nonlinear_avg)\nnonlin_phase = emd.spectra.phase_from_freq(pa_nonlinear_avg, 48*fs)\nlin_phase = np.r_[-np.pi, lin_phase]\nnonlin_phase = np.r_[-np.pi, nonlin_phase]\n\ncols = [np.array([31, 119, 180])/255, np.array([255, 127, 14])/255]\n\ncmap = 'hot_r'\ninds = np.arange(sample_rate*1.6, sample_rate*2.55).astype(int)\n\nwidth = config['2col_width'] / 25.4\nheigh = config['3col_width'] / 25.4\n\nplt.figure(figsize=(width*2, height*2))\n\n# Original time-series\nplt.axes([.3, .8, .4, .15], frameon=False)\nplt.plot(x_linear_raw[inds], 'k')\nplt.xticks([])\nplt.yticks([])\n\n# Linear and nonlinear systems\nplt.axes([0.05, .6, .4, .25], frameon=False)\nplt.plot(x_linear[inds]+.4, 'k')\nplt.plot(imf_linear[inds, 2]-.2, color=cols[0])\nplt.xticks([])\nplt.yticks([])\nplt.axes([0.55, .6, .4, .25], frameon=False)\nplt.plot(x_nonlinear[inds]+.4, 'k')\nplt.plot(imf_nonlinear[inds, 2]-.2, color=cols[1])\nplt.xticks([])\nplt.yticks([])\n\n# Linear phase-aligned IF\nplt.axes([0.05+.1, .35, .2, .2])\nplt.plot(pa_linear, color=[.8, .8, .8])\nplt.plot(np.nanmean(pa_linear, axis=1), color=cols[0])\nplt.xticks(np.arange(4)*12, ['-pi', '-pi/2', '0', 'pi/2', 'pi'])\nplt.grid(True)\nplt.xlim(0, 48)\nplt.ylim(5, 20)\ndecorate_ax(plt.gca())\nplt.title('Phase-Aligned IF')\nplt.xlabel('Theta-Phase')\n\n# Non-linear phase-aligned IF\nplt.axes([.55+.1, .35, .2, .2])\nplt.plot(pa_nonlinear, color=[.8, .8, .8])\nplt.plot(np.nanmean(pa_nonlinear, axis=1), color=cols[1])\nplt.xticks(np.arange(4)*12, ['-pi', '-pi/2', '0', 'pi/2', 'pi'])\nplt.grid(True)\nplt.xlim(0, 48)\nplt.ylim(5, 20)\ndecorate_ax(plt.gca())\nplt.title('Phase-Aligned IF')\nplt.xlabel('Theta-Phase')\n\n# Phase-aligned IF comparison\nplt.axes([.075, .05, .2, .175])\nplt.plot(np.nanmean(pa_linear, axis=1), color=cols[0])\nplt.plot(np.nanmean(pa_nonlinear, axis=1), color=cols[1])\nplt.xticks(np.arange(5)*12, ['-pi', '-pi/2', '0', 'pi/2', 'pi'])\nplt.grid(True)\ndecorate_ax(plt.gca())\nplt.ylim(9, 15)\nplt.title('Phase-Aligned\\nAverage IF')\n\n# Phase aligned IF t-test\nplt.axes([.4, .05, .2, .175])\nt, p = stats.ttest_ind(pa_nonlinear, pa_linear, axis=1)\nplt.plot(t)\nplt.xticks(np.arange(5)*12, ['-pi', '-pi/2', '0', 'pi/2', 'pi'])\nplt.grid(True)\ndecorate_ax(plt.gca())\nplt.xlim(0, 48)\nplt.ylabel('t-value')\nplt.title('Nonlinear>Linear\\nt-test')\n\n# Normalised waveforms\nplt.axes([.725, .05, .2, .175])\nplt.plot(-np.sin(lin_phase), color=cols[0])\nplt.plot(-np.sin(nonlin_phase), color=cols[1])\nplt.xticks(np.arange(5)*12, [])\nplt.grid(True)\nplt.legend(['Linear', 'Nonlinear'])\ndecorate_ax(plt.gca())\nplt.ylim(-1, 1)\nplt.xlim(0, 4*12)\nplt.yticks(np.linspace(-1, 1, 3))\nplt.title('Normalised Waveforms')\n\noutname = os.path.join(config['figdir'], 'emd_fig5_shape_compare.png')\nplt.savefig(outname, dpi=300, transparent=True)\n" ]
[ [ "numpy.nanmax", "matplotlib.pyplot.legend", "matplotlib.pyplot.imshow", "numpy.sqrt", "numpy.linspace", "numpy.nanmin", "matplotlib.pyplot.axes", "matplotlib.pyplot.plot", "numpy.mean", "numpy.nanmean", "numpy.zeros_like", "numpy.nanstd", "scipy.signal.welch", "matplotlib.pyplot.gca", "numpy.arange", "numpy.sin", "numpy.ceil", "numpy.diff", "matplotlib.pyplot.text", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "numpy.isnan", "matplotlib.pyplot.ylim", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlabel", "numpy.floor", "numpy.argsort", "numpy.array", "matplotlib.rc", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xticks", "scipy.ndimage.gaussian_filter", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.xlim", "matplotlib.pyplot.grid", "matplotlib.pyplot.pcolormesh", "matplotlib.pyplot.yticks", "scipy.stats.ttest_ind" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
Zuwei-Zhao/ort-customops
[ "4e78e9651bdcbe8518448c3cbeab90eb92a1be05" ]
[ "test/test_mytorch.py" ]
[ "import onnx\nimport unittest\nimport torchvision\nimport numpy as np\nfrom onnxruntime_customops.utils import trace_for_onnx, op_from_model\nfrom onnxruntime_customops import eager_op, hook_model_op, PyOp, mytorch as torch\n\n\nclass TestTorchE2E(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.mobilenet = torchvision.models.mobilenet_v2(pretrained=True)\n cls.argmax_input = None\n\n @staticmethod\n def on_hook(*x):\n TestTorchE2E.argmax_input = x[0]\n return x\n\n def test_imagenet_postprocess(self):\n mb_core_path = \"mobilev2.onnx\"\n mb_full_path = \"mobilev2_full.onnx\"\n dummy_input = torch.randn(10, 3, 224, 224)\n np_input = dummy_input.numpy()\n torch.onnx.export(self.mobilenet, dummy_input, mb_core_path, opset_version=11)\n mbnet2 = op_from_model(mb_core_path)\n\n with trace_for_onnx(dummy_input, names=['b10_input']) as tc_sess:\n scores = mbnet2(*tc_sess.get_inputs())\n probabilities = torch.softmax(scores, dim=1)\n batch_top1 = probabilities.argmax(dim=1)\n\n np_argmax = probabilities.numpy() # for the result comparison\n np_output = batch_top1.numpy()\n\n tc_sess.save_as_onnx(mb_full_path, batch_top1)\n\n hkdmdl = hook_model_op(onnx.load_model(mb_full_path), 'argmax', self.on_hook, [PyOp.dt_float])\n mbnet2_full = eager_op.EagerOp.from_model(hkdmdl)\n batch_top1_2 = mbnet2_full(np_input)\n np.testing.assert_allclose(np_argmax, self.argmax_input, rtol=1e-5)\n np.testing.assert_array_equal(batch_top1_2, np_output)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.testing.assert_array_equal", "numpy.testing.assert_allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zhuoju36/StructEngPy
[ "ec279271e3468a4a8418bf722b5ceee003abb37c" ]
[ "csys.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 22 21:57:50 2016\n\n@author: HZJ\n\"\"\"\nimport numpy as np\nimport uuid\n\nclass Cartisian(object):\n def __init__(self,origin, pt1, pt2, name=None):\n \"\"\"\n origin: 3x1 vector\n pt1: 3x1 vector\n pt2: 3x1 vector\n \"\"\"\n self.__origin=origin \n vec1 = np.array([pt1[0] - origin[0] , pt1[1] - origin[1] , pt1[2] - origin[2]])\n vec2 = np.array([pt2[0] - origin[0] , pt2[1] - origin[1] , pt2[2] - origin[2]])\n cos = np.dot(vec1, vec2)/np.linalg.norm(vec1)/np.linalg.norm(vec2)\n if cos == 1 or cos == -1:\n raise Exception(\"Three points should not in a line!!\") \n self.__x = vec1/np.linalg.norm(vec1)\n z = np.cross(vec1, vec2)\n self.__z = z/np.linalg.norm(z)\n self.__y = np.cross(self.z, self.x)\n self.__name=uuid.uuid1() if name==None else name\n \n @property\n def name(self):\n return self.__name\n \n @property\n def origin(self):\n return self.__origin\n \n @property\n def x(self):\n return self.__x\n \n @property\n def y(self):\n return self.__y\n \n @property\n def z(self):\n return self.__z\n \n @property\n def transform_matrix(self):\n x=self.x\n y=self.y\n z=self.z\n V=np.array([[x[0],y[0],z[0]],\n [x[1],y[1],z[1]],\n [x[2],y[2],z[2]]])\n return V.transpose()\n \n def set_by_3pts(self,origin, pt1, pt2):\n \"\"\"\n origin: tuple 3\n pt1: tuple 3\n pt2: tuple 3\n \"\"\"\n self.origin=origin \n vec1 = np.array([pt1[0] - origin[0] , pt1[1] - origin[1] , pt1[2] - origin[2]])\n vec2 = np.array([pt2[0] - origin[0] , pt2[1] - origin[1] , pt2[2] - origin[2]])\n cos = np.dot(vec1, vec2)/np.linalg.norm(vec1)/np.linalg.norm(vec2)\n if cos == 1 or cos == -1:\n raise Exception(\"Three points should not in a line!!\") \n self.x = vec1/np.linalg.norm(vec1)\n z = np.cross(vec1, vec2)\n self.z = z/np.linalg.norm(z)\n self.y = np.cross(self.z, self.x)\n \n def set_origin(self,x, y, z):\n \"\"\"\n origin: tuple 3\n pt1: tuple 3\n pt2: tuple 3\n \"\"\"\n self.origin = (x,y,z)\n \n def align_with_global(self):\n self.__x=np.array([1,0,0])\n self.__y=np.array([0,1,0])\n self.__z=np.array([0,0,1])\n\nif __name__=='__main__':\n csys=Cartisian((0,0,0),(1,1,0),(0,1,0))\n" ]
[ [ "numpy.dot", "numpy.array", "numpy.linalg.norm", "numpy.cross" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
apls777/kaggle-imaterialist2020-model
[ "7822b52f743afb3367a4448a303ac1ee0f869e1d", "7822b52f743afb3367a4448a303ac1ee0f869e1d", "6a653615fa48cbeaf34adda7c0545a49739b3189", "7822b52f743afb3367a4448a303ac1ee0f869e1d", "7822b52f743afb3367a4448a303ac1ee0f869e1d" ]
[ "tf_tpu_models/official/mnasnet/mnasnet_models.py", "tf_tpu_models/official/mask_rcnn/submission.py", "tf_tpu_models/official/detection/modeling/maskrcnn_model.py", "tf_tpu_models/official/mask_rcnn/object_detection/preprocessor.py", "tf_tpu_models/official/mnasnet/mnasnet_model.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Predefined MnasNet models.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\nimport tensorflow.compat.v1 as tf\n\nfrom . import mnasnet_model\n\n\nclass MnasNetDecoder(object):\n \"\"\"A class of MnasNet decoder to get model configuration.\"\"\"\n\n def _decode_block_string(self, block_string):\n \"\"\"Gets a MNasNet block through a string notation of arguments.\n\n E.g. r2_k3_s2_e1_i32_o16_se0.25_noskip: r - number of repeat blocks,\n k - kernel size, s - strides (1-9), e - expansion ratio, i - input filters,\n o - output filters, se - squeeze/excitation ratio\n\n Args:\n block_string: a string, a string representation of block arguments.\n\n Returns:\n A BlockArgs instance.\n Raises:\n ValueError: if the strides option is not correctly specified.\n \"\"\"\n assert isinstance(block_string, str)\n ops = block_string.split('_')\n options = {}\n for op in ops:\n splits = re.split(r'(\\d.*)', op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\n\n if 's' not in options or len(options['s']) != 2:\n raise ValueError('Strides options should be a pair of integers.')\n\n return mnasnet_model.BlockArgs(\n kernel_size=int(options['k']),\n num_repeat=int(options['r']),\n input_filters=int(options['i']),\n output_filters=int(options['o']),\n expand_ratio=int(options['e']),\n id_skip=('noskip' not in block_string),\n se_ratio=float(options['se']) if 'se' in options else None,\n strides=[int(options['s'][0]), int(options['s'][1])])\n\n def _encode_block_string(self, block):\n \"\"\"Encodes a MnasNet block to a string.\"\"\"\n args = [\n 'r%d' % block.num_repeat,\n 'k%d' % block.kernel_size,\n 's%d%d' % (block.strides[0], block.strides[1]),\n 'e%s' % block.expand_ratio,\n 'i%d' % block.input_filters,\n 'o%d' % block.output_filters\n ]\n if (block.se_ratio is not None and block.se_ratio > 0 and\n block.se_ratio <= 1):\n args.append('se%s' % block.se_ratio)\n if block.id_skip is False:\n args.append('noskip')\n return '_'.join(args)\n\n def decode(self, string_list):\n \"\"\"Decodes a list of string notations to specify blocks inside the network.\n\n Args:\n string_list: a list of strings, each string is a notation of MnasNet\n block.\n\n Returns:\n A list of namedtuples to represent MnasNet blocks arguments.\n \"\"\"\n assert isinstance(string_list, list)\n blocks_args = []\n for block_string in string_list:\n blocks_args.append(self._decode_block_string(block_string))\n return blocks_args\n\n def encode(self, blocks_args):\n \"\"\"Encodes a list of MnasNet Blocks to a list of strings.\n\n Args:\n blocks_args: A list of namedtuples to represent MnasNet blocks arguments.\n Returns:\n a list of strings, each string is a notation of MnasNet block.\n \"\"\"\n block_strings = []\n for block in blocks_args:\n block_strings.append(self._encode_block_string(block))\n return block_strings\n\n\ndef mnasnet_b1(depth_multiplier=None):\n \"\"\"Creates a mnasnet-b1 model.\n\n Args:\n depth_multiplier: multiplier to number of filters per layer.\n\n Returns:\n blocks_args: a list of BlocksArgs for internal MnasNet blocks.\n global_params: GlobalParams, global parameters for the model.\n \"\"\"\n blocks_args = [\n 'r1_k3_s11_e1_i32_o16_noskip', 'r3_k3_s22_e3_i16_o24',\n 'r3_k5_s22_e3_i24_o40', 'r3_k5_s22_e6_i40_o80', 'r2_k3_s11_e6_i80_o96',\n 'r4_k5_s22_e6_i96_o192', 'r1_k3_s11_e6_i192_o320_noskip'\n ]\n decoder = MnasNetDecoder()\n global_params = mnasnet_model.GlobalParams(\n batch_norm_momentum=0.99,\n batch_norm_epsilon=1e-3,\n dropout_rate=0.2,\n data_format='channels_last',\n num_classes=1000,\n depth_multiplier=depth_multiplier,\n depth_divisor=8,\n min_depth=None,\n stem_size=32,\n use_keras=True)\n return decoder.decode(blocks_args), global_params\n\n\ndef mnasnet_a1(depth_multiplier=None):\n \"\"\"Creates a mnasnet-a1 model.\n\n Args:\n depth_multiplier: multiplier to number of filters per layer.\n\n Returns:\n blocks_args: a list of BlocksArgs for internal MnasNet blocks.\n global_params: GlobalParams, global parameters for the model.\n \"\"\"\n blocks_args = [\n 'r1_k3_s11_e1_i32_o16_noskip', 'r2_k3_s22_e6_i16_o24',\n 'r3_k5_s22_e3_i24_o40_se0.25', 'r4_k3_s22_e6_i40_o80',\n 'r2_k3_s11_e6_i80_o112_se0.25', 'r3_k5_s22_e6_i112_o160_se0.25',\n 'r1_k3_s11_e6_i160_o320'\n ]\n global_params = mnasnet_model.GlobalParams(\n batch_norm_momentum=0.99,\n batch_norm_epsilon=1e-3,\n dropout_rate=0.2,\n data_format='channels_last',\n num_classes=1000,\n depth_multiplier=depth_multiplier,\n depth_divisor=8,\n min_depth=None,\n stem_size=32,\n use_keras=True)\n decoder = MnasNetDecoder()\n return decoder.decode(blocks_args), global_params\n\n\ndef mnasnet_small(depth_multiplier=None):\n \"\"\"Creates a mnasnet-a1 model.\n\n Args:\n depth_multiplier: multiplier to number of filters per layer.\n\n Returns:\n blocks_args: a list of BlocksArgs for internal MnasNet blocks.\n global_params: GlobalParams, global parameters for the model.\n \"\"\"\n blocks_args = [\n 'r1_k3_s11_e1_i16_o8', 'r1_k3_s22_e3_i8_o16',\n 'r2_k3_s22_e6_i16_o16', 'r4_k5_s22_e6_i16_o32_se0.25',\n 'r3_k3_s11_e6_i32_o32_se0.25', 'r3_k5_s22_e6_i32_o88_se0.25',\n 'r1_k3_s11_e6_i88_o144'\n ]\n global_params = mnasnet_model.GlobalParams(\n batch_norm_momentum=0.99,\n batch_norm_epsilon=1e-3,\n dropout_rate=0,\n data_format='channels_last',\n num_classes=1000,\n depth_multiplier=depth_multiplier,\n depth_divisor=8,\n min_depth=None,\n stem_size=8,\n use_keras=True)\n decoder = MnasNetDecoder()\n return decoder.decode(blocks_args), global_params\n\n\ndef mnasnet_d1(depth_multiplier=None):\n \"\"\"Creates a jointly searched mnasnet backbone for mnas-fpn.\n\n Args:\n depth_multiplier: multiplier to number of filters per layer.\n\n Returns:\n blocks_args: a list of BlocksArgs for internal MnasNet blocks.\n global_params: GlobalParams, global parameters for the model.\n \"\"\"\n blocks_args = [\n 'r1_k3_s11_e9_i32_o24', 'r3_k3_s22_e9_i24_o36',\n 'r5_k3_s22_e9_i36_o48', 'r4_k5_s22_e9_i48_o96',\n 'r5_k7_s11_e3_i96_o96', 'r3_k3_s22_e9_i96_o80',\n 'r1_k7_s11_e6_i80_o320_noskip'\n ]\n global_params = mnasnet_model.GlobalParams(\n batch_norm_momentum=0.99,\n batch_norm_epsilon=1e-3,\n dropout_rate=0.2,\n data_format='channels_last',\n num_classes=1000,\n depth_multiplier=depth_multiplier,\n depth_divisor=8,\n min_depth=None,\n stem_size=32,\n use_keras=False)\n decoder = MnasNetDecoder()\n return decoder.decode(blocks_args), global_params\n\n\ndef mnasnet_d1_320(depth_multiplier=None):\n \"\"\"Creates a jointly searched mnasnet backbone for 320x320 input size.\n\n Args:\n depth_multiplier: multiplier to number of filters per layer.\n\n Returns:\n blocks_args: a list of BlocksArgs for internal MnasNet blocks.\n global_params: GlobalParams, global parameters for the model.\n \"\"\"\n blocks_args = [\n 'r3_k5_s11_e6_i32_o24', 'r4_k7_s22_e9_i24_o36',\n 'r5_k5_s22_e9_i36_o48', 'r5_k7_s22_e6_i48_o96',\n 'r5_k3_s11_e9_i96_o144', 'r5_k5_s22_e6_i144_o160',\n 'r1_k7_s11_e9_i160_o320'\n ]\n\n global_params = mnasnet_model.GlobalParams(\n batch_norm_momentum=0.99,\n batch_norm_epsilon=1e-3,\n dropout_rate=0.2,\n data_format='channels_last',\n num_classes=1000,\n depth_multiplier=depth_multiplier,\n depth_divisor=8,\n min_depth=None,\n stem_size=32,\n use_keras=False)\n decoder = MnasNetDecoder()\n return decoder.decode(blocks_args), global_params\n\n\ndef get_model_params(model_name, override_params):\n \"\"\"Get the block args and global params for a given model.\"\"\"\n if model_name == 'mnasnet-a1':\n blocks_args, global_params = mnasnet_a1()\n elif model_name == 'mnasnet-b1':\n blocks_args, global_params = mnasnet_b1()\n elif model_name == 'mnasnet-small':\n blocks_args, global_params = mnasnet_small()\n elif model_name == 'mnasnet-d1':\n blocks_args, global_params = mnasnet_d1()\n elif model_name == 'mnasnet-d1-320':\n blocks_args, global_params = mnasnet_d1_320()\n else:\n raise NotImplementedError('model name is not pre-defined: %s' % model_name)\n\n if override_params:\n # ValueError will be raised here if override_params has fields not included\n # in global_params.\n global_params = global_params._replace(**override_params)\n return blocks_args, global_params\n\n\ndef build_mnasnet_model(images, model_name, training, override_params=None):\n \"\"\"A helper functiion to create a MnasNet model and return predicted logits.\n\n Args:\n images: input images tensor.\n model_name: string, the model name of a pre-defined MnasNet.\n training: boolean, whether the model is constructed for training.\n override_params: A dictionary of params for overriding. Fields must exist in\n mnasnet_model.GlobalParams.\n\n Returns:\n logits: the logits tensor of classes.\n endpoints: the endpoints for each layer.\n Raises:\n When model_name specified an undefined model, raises NotImplementedError.\n When override_params has invalid fields, raises ValueError.\n \"\"\"\n assert isinstance(images, tf.Tensor)\n blocks_args, global_params = get_model_params(model_name, override_params)\n with tf.variable_scope(model_name):\n model = mnasnet_model.MnasNetModel(blocks_args, global_params)\n logits = model(images, training=training)\n\n logits = tf.squeeze(tf.expand_dims(logits, 0), 0)\n logits = tf.identity(logits, 'logits')\n return logits, model.endpoints\n\n\ndef build_mnasnet_base(images, model_name, training, override_params=None):\n \"\"\"A helper functiion to create a MnasNet base model and return global_pool.\n\n Args:\n images: input images tensor.\n model_name: string, the model name of a pre-defined MnasNet.\n training: boolean, whether the model is constructed for training.\n override_params: A dictionary of params for overriding. Fields must exist in\n mnasnet_model.GlobalParams.\n\n Returns:\n features: global pool features.\n endpoints: the endpoints for each layer.\n Raises:\n When model_name specified an undefined model, raises NotImplementedError.\n When override_params has invalid fields, raises ValueError.\n \"\"\"\n assert isinstance(images, tf.Tensor)\n blocks_args, global_params = get_model_params(model_name, override_params)\n\n with tf.variable_scope(model_name):\n model = mnasnet_model.MnasNetModel(blocks_args, global_params)\n features = model(images, training=training, features_only=True)\n\n features = tf.identity(features, 'global_pool')\n return features, model.endpoints\n", "import json\nimport logging\nfrom tf_tpu_models.official.mask_rcnn.coco_utils import generate_segmentation_from_masks\nfrom tf_tpu_models.official.mask_rcnn.evaluation import process_prediction_for_eval\nimport six\nimport numpy as np\nfrom PIL import Image\nimport os\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.summary import summary_iterator\n\n\nSUBMISSION_IMAGE_SIZE = 1024\n\n\ndef generate_submission(eval_estimator,\n input_fn,\n checkpoint_path,\n num_attributes):\n \"\"\"Runs COCO evaluation once.\"\"\"\n predictor = eval_estimator.predict(input_fn=input_fn, yield_single_examples=False, checkpoint_path=checkpoint_path)\n # Every predictor.next() gets a batch of prediction (a dictionary).\n\n # get attribute thresholds\n step = int(checkpoint_path.rsplit('-', 1)[-1])\n attr_thresholds = get_attribute_thresholds(eval_estimator.model_dir, step, num_attributes)\n\n # load image IDs\n with open('/workspace/project/data/test_coco.json') as f:\n test_annotations = json.load(f)\n\n image_filenames = {int(image['id']): image['file_name'] for image in test_annotations['images']}\n\n batch_idx = 0\n rows = []\n while True:\n try:\n batch_predictions = six.next(predictor)\n logging.info('Running inference on batch %d...', (batch_idx + 1))\n except StopIteration:\n logging.info('Finished the eval set at %d batch.', (batch_idx + 1))\n break\n\n batch_predictions = process_prediction_for_eval(batch_predictions)\n rows += _generate_submission_rows(batch_predictions, attr_thresholds, image_filenames)\n batch_idx += 1\n\n return rows\n\n\ndef _generate_submission_rows(predictions, attr_thresholds, image_filenames):\n rows = []\n for i, image_id in enumerate(predictions['source_id']):\n if (i + 1) % 100 == 0:\n logging.info(' loading image %d/%d...' % (i + 1, len(predictions['source_id'])))\n\n image_height = int(predictions['image_info'][i][3])\n image_width = int(predictions['image_info'][i][4])\n\n if image_width > image_height:\n new_width = SUBMISSION_IMAGE_SIZE\n new_height = int(image_height / (image_width / new_width))\n else:\n new_height = SUBMISSION_IMAGE_SIZE\n new_width = int(image_width / (image_height / new_height))\n\n for box_index in range(int(predictions['num_detections'][i])):\n mask = generate_segmentation_from_masks(predictions['detection_masks'][i][box_index:(box_index + 1)],\n predictions['detection_boxes'][i][box_index:(box_index + 1)],\n image_height,\n image_width,\n is_image_mask=False)[0]\n\n pil_image = Image.fromarray(mask.astype(np.uint8))\n pil_image = pil_image.resize((new_width, new_height), Image.NEAREST)\n resized_binary_mask = np.asarray(pil_image)\n encoded_mask = rle_encode(resized_binary_mask)\n\n # get attributes\n attr_predictions = predictions['detection_attributes'][i][box_index]\n attr_ids = np.argwhere(attr_predictions >= attr_thresholds).flatten()\n\n bbox_x, bbox_y, bbox_w, bbox_h = predictions['detection_boxes'][i][box_index]\n\n row = {\n 'ImageId': image_filenames[int(image_id)].split('.')[0],\n 'EncodedPixels': ' '.join(str(x) for x in encoded_mask),\n 'ClassId': int(predictions['detection_classes'][i][box_index]) - 1,\n 'AttributesIds': ','.join(str(attr_id) for attr_id in attr_ids),\n 'image_width': new_width,\n 'image_height': new_height,\n 'mask_area': resized_binary_mask.sum(),\n 'bbox_x': bbox_x,\n 'bbox_y': bbox_y,\n 'bbox_width': bbox_w,\n 'bbox_height': bbox_h,\n 'score': predictions['detection_scores'][i][box_index],\n }\n\n rows.append(row)\n\n return rows\n\n\ndef rle_encode(mask):\n pixels = mask.T.flatten()\n\n # We need to allow for cases where there is a '1' at either end of the sequence.\n # We do this by padding with a zero at each end when needed.\n use_padding = False\n if pixels[0] or pixels[-1]:\n use_padding = True\n pixel_padded = np.zeros([len(pixels) + 2], dtype=pixels.dtype)\n pixel_padded[1:-1] = pixels\n pixels = pixel_padded\n\n rle = np.where(pixels[1:] != pixels[:-1])[0] + 2\n if use_padding:\n rle = rle - 1\n\n rle[1::2] = rle[1::2] - rle[:-1:2]\n\n return rle\n\n\ndef get_attribute_thresholds(model_dir: str, step: int, num_attributes: int):\n \"\"\"Returns the best evaluation result based on the compare function.\"\"\"\n eval_result = None\n for event_file in gfile.Glob(os.path.join(model_dir, 'eval', '*.tfevents.*')):\n for event in summary_iterator.summary_iterator(event_file):\n if event.step == step:\n assert event.HasField('summary')\n\n eval_result = {}\n for value in event.summary.value:\n if value.HasField('simple_value'):\n eval_result[value.tag] = value.simple_value\n\n break\n\n thresholds = np.zeros(num_attributes, dtype=np.float32)\n for metric_name, value in eval_result.items():\n if metric_name.startswith('attribute_threshold/attr_'):\n attr_id = int(metric_name.rsplit('_', 1)[-1])\n thresholds[attr_id] = value\n\n return thresholds\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Model defination for the Mask R-CNN Model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v1 as tf\n\nfrom dataloader import anchor\nfrom dataloader import mode_keys\nfrom modeling import base_model\nfrom modeling import losses\nfrom modeling.architecture import factory\nfrom ops import postprocess_ops\nfrom ops import roi_ops\nfrom ops import sampling_ops\nfrom ops import spatial_transform_ops\nfrom utils import box_utils\n\n\nclass MaskrcnnModel(base_model.BaseModel):\n \"\"\"RetinaNet model function.\"\"\"\n\n def __init__(self, params):\n super(MaskrcnnModel, self).__init__(params)\n\n self._params = params\n\n self._include_mask = params.architecture.include_mask\n self._include_attributes = (params.architecture.num_attributes is not None)\n\n # Architecture generators.\n self._backbone_fn = factory.backbone_generator(params)\n self._fpn_fn = factory.multilevel_features_generator(params)\n self._rpn_head_fn = factory.rpn_head_generator(params)\n self._generate_rois_fn = roi_ops.ROIGenerator(params.roi_proposal)\n self._sample_rois_fn = sampling_ops.ROISampler(params.roi_sampling)\n self._sample_masks_fn = sampling_ops.MaskSampler(\n params.architecture.mask_target_size,\n params.mask_sampling.num_mask_samples_per_image)\n\n self._frcnn_head_fn = factory.fast_rcnn_head_generator(params)\n\n if self._include_mask:\n self._mrcnn_head_fn = factory.mask_rcnn_head_generator(params)\n\n if self._include_attributes:\n self._attributes_head_fn = factory.attributes_head_generator(params)\n\n # Loss function.\n self._rpn_score_loss_fn = losses.RpnScoreLoss(params.rpn_score_loss)\n self._rpn_box_loss_fn = losses.RpnBoxLoss(params.rpn_box_loss)\n self._frcnn_class_loss_fn = losses.FastrcnnClassLoss()\n self._frcnn_box_loss_fn = losses.FastrcnnBoxLoss(params.frcnn_box_loss)\n\n if self._include_mask:\n self._mask_loss_fn = losses.MaskrcnnLoss()\n\n if self._include_attributes:\n self._attributes_loss_fn = losses.AttributesLoss(params.attributes_loss)\n\n self._generate_detections_fn = postprocess_ops.GenericDetectionGenerator(\n params.postprocess)\n\n def _build_outputs(self, images, labels, mode):\n is_training = mode == mode_keys.TRAIN\n model_outputs = {}\n\n if 'anchor_boxes' in labels:\n anchor_boxes = labels['anchor_boxes']\n else:\n anchor_boxes = anchor.Anchor(\n self._params.architecture.min_level,\n self._params.architecture.max_level,\n self._params.anchor.num_scales,\n self._params.anchor.aspect_ratios,\n self._params.anchor.anchor_size,\n images.get_shape().as_list()[1:3]).multilevel_boxes\n\n batch_size = tf.shape(images)[0]\n for level in anchor_boxes:\n anchor_boxes[level] = tf.tile(\n tf.expand_dims(anchor_boxes[level], 0), [batch_size, 1, 1])\n\n backbone_features = self._backbone_fn(images, is_training)\n fpn_features = self._fpn_fn(backbone_features, is_training)\n\n rpn_score_outputs, rpn_box_outputs = self._rpn_head_fn(\n fpn_features, is_training)\n model_outputs.update({\n 'rpn_score_outputs': rpn_score_outputs,\n 'rpn_box_outputs': rpn_box_outputs,\n })\n rpn_rois, _ = self._generate_rois_fn(\n rpn_box_outputs,\n rpn_score_outputs,\n anchor_boxes,\n labels['image_info'][:, 1, :],\n is_training)\n\n if is_training:\n rpn_rois = tf.stop_gradient(rpn_rois)\n\n # Sample proposals.\n rpn_rois, matched_gt_boxes, matched_gt_classes, matched_gt_indices = (\n self._sample_rois_fn(\n rpn_rois, labels['gt_boxes'], labels['gt_classes']))\n\n # Create bounding box training targets.\n box_targets = box_utils.encode_boxes(\n matched_gt_boxes, rpn_rois, weights=[10.0, 10.0, 5.0, 5.0])\n # If the target is background, the box target is set to all 0s.\n box_targets = tf.where(\n tf.tile(\n tf.expand_dims(tf.equal(matched_gt_classes, 0), axis=-1),\n [1, 1, 4]),\n tf.zeros_like(box_targets),\n box_targets)\n model_outputs.update({\n 'class_targets': matched_gt_classes,\n 'box_targets': box_targets,\n })\n\n roi_features = spatial_transform_ops.multilevel_crop_and_resize(\n fpn_features, rpn_rois, output_size=7)\n\n class_outputs, box_outputs = self._frcnn_head_fn(roi_features, is_training)\n model_outputs.update({\n 'class_outputs': class_outputs,\n 'box_outputs': box_outputs,\n })\n\n if not is_training:\n detection_results = self._generate_detections_fn(\n box_outputs, class_outputs, rpn_rois, labels['image_info'][:, 1:2, :])\n model_outputs.update(detection_results)\n\n if not self._include_mask:\n return model_outputs\n\n if is_training:\n rpn_rois, classes, mask_targets, gather_nd_gt_indices = self._sample_masks_fn(\n rpn_rois, matched_gt_boxes, matched_gt_classes, matched_gt_indices,\n labels['gt_masks'])\n mask_targets = tf.stop_gradient(mask_targets)\n\n classes = tf.cast(classes, dtype=tf.int32)\n\n model_outputs.update({\n 'mask_targets': mask_targets,\n 'sampled_class_targets': classes,\n })\n else:\n rpn_rois = detection_results['detection_boxes']\n classes = tf.cast(detection_results['detection_classes'], dtype=tf.int32)\n\n mask_roi_features = spatial_transform_ops.multilevel_crop_and_resize(\n fpn_features, rpn_rois, output_size=14)\n\n mask_outputs = self._mrcnn_head_fn(mask_roi_features, classes, is_training)\n\n if is_training:\n model_outputs.update({\n 'mask_outputs': mask_outputs,\n })\n else:\n model_outputs.update({\n 'detection_masks': tf.nn.sigmoid(mask_outputs)\n })\n\n if not self._include_attributes:\n return model_outputs\n\n attribute_outputs = self._attributes_head_fn(mask_roi_features, is_training)\n\n if is_training:\n attribute_targets = tf.gather_nd(labels['gt_attributes'], gather_nd_gt_indices) # [batch, K, num_attributes]\n\n model_outputs.update({\n 'attribute_outputs': attribute_outputs,\n 'attribute_targets': attribute_targets,\n })\n else:\n model_outputs['detection_attributes'] = tf.nn.sigmoid(attribute_outputs)\n\n return model_outputs\n\n def build_losses(self, outputs, labels):\n rpn_score_loss = self._rpn_score_loss_fn(\n outputs['rpn_score_outputs'], labels['rpn_score_targets'])\n rpn_box_loss = self._rpn_box_loss_fn(\n outputs['rpn_box_outputs'], labels['rpn_box_targets'])\n\n frcnn_class_loss = self._frcnn_class_loss_fn(\n outputs['class_outputs'], outputs['class_targets'])\n frcnn_box_loss = self._frcnn_box_loss_fn(\n outputs['box_outputs'],\n outputs['class_targets'],\n outputs['box_targets'])\n\n if self._include_mask:\n mask_loss = self._mask_loss_fn(\n outputs['mask_outputs'],\n outputs['mask_targets'],\n outputs['sampled_class_targets'])\n else:\n mask_loss = 0.0\n\n if self._include_attributes:\n attributes_loss = self._attributes_loss_fn(\n outputs['attribute_outputs'],\n outputs['attribute_targets'],\n outputs['sampled_class_targets'])\n else:\n attributes_loss = 0.0\n\n model_loss = (rpn_score_loss + rpn_box_loss + frcnn_class_loss\n + frcnn_box_loss + mask_loss + attributes_loss)\n\n self.add_scalar_summary('losses/rpn_score_loss', rpn_score_loss)\n self.add_scalar_summary('losses/rpn_box_loss', rpn_box_loss)\n self.add_scalar_summary('losses/fast_rcnn_class_loss', frcnn_class_loss)\n self.add_scalar_summary('losses/fast_rcnn_box_loss', frcnn_box_loss)\n\n if self._include_mask:\n self.add_scalar_summary('losses/mask_loss', mask_loss)\n\n if self._include_attributes:\n self.add_scalar_summary('losses/attributes_loss', attributes_loss)\n\n self.add_scalar_summary('losses/model_loss', model_loss)\n\n return model_loss\n\n def build_metrics(self, outputs, labels):\n raise NotImplementedError('The `build_metrics` is not implemented.')\n\n def build_predictions(self, outputs, labels):\n predictions = {\n 'pred_image_info': labels['image_info'],\n 'pred_num_detections': outputs['num_detections'],\n 'pred_detection_boxes': outputs['detection_boxes'],\n 'pred_detection_classes': outputs['detection_classes'],\n 'pred_detection_scores': outputs['detection_scores'],\n }\n\n if self._include_mask:\n predictions.update({\n 'pred_detection_masks': outputs['detection_masks'],\n })\n\n if self._include_attributes:\n predictions.update({\n 'pred_detection_attributes': outputs['detection_attributes'],\n })\n\n if 'groundtruths' in labels:\n predictions['pred_source_id'] = labels['groundtruths']['source_id']\n predictions['gt_source_id'] = labels['groundtruths']['source_id']\n predictions['gt_height'] = labels['groundtruths']['height']\n predictions['gt_width'] = labels['groundtruths']['width']\n predictions['gt_image_info'] = labels['image_info']\n predictions['gt_num_detections'] = (\n labels['groundtruths']['num_detections'])\n predictions['gt_boxes'] = labels['groundtruths']['boxes']\n predictions['gt_classes'] = labels['groundtruths']['classes']\n predictions['gt_areas'] = labels['groundtruths']['areas']\n predictions['gt_is_crowds'] = labels['groundtruths']['is_crowds']\n\n return predictions\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Preprocess images and bounding boxes for detection.\n\nWe perform two sets of operations in preprocessing stage:\n(a) operations that are applied to both training and testing data,\n(b) operations that are applied only to training data for the purpose of\n data augmentation.\n\nA preprocessing function receives a set of inputs,\ne.g. an image and bounding boxes,\nperforms an operation on them, and returns them.\nSome examples are: randomly cropping the image, randomly mirroring the image,\n randomly changing the brightness, contrast, hue and\n randomly jittering the bounding boxes.\n\nThe image is a rank 4 tensor: [1, height, width, channels] with\ndtype=tf.float32. The groundtruth_boxes is a rank 2 tensor: [N, 4] where\nin each row there is a box with [ymin xmin ymax xmax].\nBoxes are in normalized coordinates meaning\ntheir coordinate values range in [0, 1]\n\nImportant Note: In tensor_dict, images is a rank 4 tensor, but preprocessing\nfunctions receive a rank 3 tensor for processing the image. Thus, inside the\npreprocess function we squeeze the image to become a rank 3 tensor and then\nwe pass it to the functions. At the end of the preprocess we expand the image\nback to rank 4.\n\"\"\"\n\nimport tensorflow.compat.v1 as tf\n\nfrom . import box_list\n\n\ndef _flip_boxes_left_right(boxes):\n \"\"\"Left-right flip the boxes.\n\n Args:\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n\n Returns:\n Flipped boxes.\n \"\"\"\n ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)\n flipped_xmin = tf.subtract(1.0, xmax)\n flipped_xmax = tf.subtract(1.0, xmin)\n flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], 1)\n return flipped_boxes\n\n\ndef _flip_masks_left_right(masks):\n \"\"\"Left-right flip masks.\n\n Args:\n masks: rank 3 float32 tensor with shape\n [num_instances, height, width] representing instance masks.\n\n Returns:\n flipped masks: rank 3 float32 tensor with shape\n [num_instances, height, width] representing instance masks.\n \"\"\"\n return masks[:, :, ::-1]\n\n\ndef keypoint_flip_horizontal(keypoints, flip_point, flip_permutation,\n scope=None):\n \"\"\"Flips the keypoints horizontally around the flip_point.\n\n This operation flips the x coordinate for each keypoint around the flip_point\n and also permutes the keypoints in a manner specified by flip_permutation.\n\n Args:\n keypoints: a tensor of shape [num_instances, num_keypoints, 2]\n flip_point: (float) scalar tensor representing the x coordinate to flip the\n keypoints around.\n flip_permutation: rank 1 int32 tensor containing the keypoint flip\n permutation. This specifies the mapping from original keypoint indices\n to the flipped keypoint indices. This is used primarily for keypoints\n that are not reflection invariant. E.g. Suppose there are 3 keypoints\n representing ['head', 'right_eye', 'left_eye'], then a logical choice for\n flip_permutation might be [0, 2, 1] since we want to swap the 'left_eye'\n and 'right_eye' after a horizontal flip.\n scope: name scope.\n\n Returns:\n new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]\n \"\"\"\n with tf.name_scope(scope, 'FlipHorizontal'):\n keypoints = tf.transpose(keypoints, [1, 0, 2])\n keypoints = tf.gather(keypoints, flip_permutation)\n v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2)\n u = flip_point * 2.0 - u\n new_keypoints = tf.concat([v, u], 2)\n new_keypoints = tf.transpose(new_keypoints, [1, 0, 2])\n return new_keypoints\n\n\ndef random_horizontal_flip(image,\n boxes=None,\n masks=None,\n keypoints=None,\n keypoint_flip_permutation=None,\n seed=None):\n \"\"\"Randomly flips the image and detections horizontally.\n\n The probability of flipping the image is 50%.\n\n Args:\n image: rank 3 float32 tensor with shape [height, width, channels].\n boxes: (optional) rank 2 float32 tensor with shape [N, 4]\n containing the bounding boxes.\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n masks: (optional) rank 3 float32 tensor with shape\n [num_instances, height, width] containing instance masks. The masks\n are of the same height, width as the input `image`.\n keypoints: (optional) rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]. The keypoints are in y-x\n normalized coordinates.\n keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip\n permutation.\n seed: random seed\n\n Returns:\n image: image which is the same shape as input image.\n\n If boxes, masks, keypoints, and keypoint_flip_permutation are not None,\n the function also returns the following tensors.\n\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n masks: rank 3 float32 tensor with shape [num_instances, height, width]\n containing instance masks.\n keypoints: rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]\n\n Raises:\n ValueError: if keypoints are provided but keypoint_flip_permutation is not.\n \"\"\"\n\n def _flip_image(image):\n # flip image\n image_flipped = tf.image.flip_left_right(image)\n return image_flipped\n\n if keypoints is not None and keypoint_flip_permutation is None:\n raise ValueError(\n 'keypoints are provided but keypoints_flip_permutation is not provided')\n\n with tf.name_scope('RandomHorizontalFlip', values=[image, boxes]):\n result = []\n # random variable defining whether to do flip or not\n do_a_flip_random = tf.greater(tf.random_uniform([], seed=seed), 0.5)\n\n # flip image\n image = tf.cond(do_a_flip_random, lambda: _flip_image(image), lambda: image)\n result.append(image)\n\n # flip boxes\n if boxes is not None:\n boxes = tf.cond(do_a_flip_random, lambda: _flip_boxes_left_right(boxes),\n lambda: boxes)\n result.append(boxes)\n\n # flip masks\n if masks is not None:\n masks = tf.cond(do_a_flip_random, lambda: _flip_masks_left_right(masks),\n lambda: masks)\n result.append(masks)\n\n # flip keypoints\n if keypoints is not None and keypoint_flip_permutation is not None:\n permutation = keypoint_flip_permutation\n keypoints = tf.cond(\n do_a_flip_random,\n lambda: keypoint_flip_horizontal(keypoints, 0.5, permutation),\n lambda: keypoints)\n result.append(keypoints)\n\n return tuple(result)\n\n\ndef _compute_new_static_size(image, min_dimension, max_dimension):\n \"\"\"Compute new static shape for resize_to_range method.\"\"\"\n image_shape = image.get_shape().as_list()\n orig_height = image_shape[0]\n orig_width = image_shape[1]\n num_channels = image_shape[2]\n orig_min_dim = min(orig_height, orig_width)\n # Calculates the larger of the possible sizes\n large_scale_factor = min_dimension / float(orig_min_dim)\n # Scaling orig_(height|width) by large_scale_factor will make the smaller\n # dimension equal to min_dimension, save for floating point rounding errors.\n # For reasonably-sized images, taking the nearest integer will reliably\n # eliminate this error.\n large_height = int(round(orig_height * large_scale_factor))\n large_width = int(round(orig_width * large_scale_factor))\n large_size = [large_height, large_width]\n if max_dimension:\n # Calculates the smaller of the possible sizes, use that if the larger\n # is too big.\n orig_max_dim = max(orig_height, orig_width)\n small_scale_factor = max_dimension / float(orig_max_dim)\n # Scaling orig_(height|width) by small_scale_factor will make the larger\n # dimension equal to max_dimension, save for floating point rounding\n # errors. For reasonably-sized images, taking the nearest integer will\n # reliably eliminate this error.\n small_height = int(round(orig_height * small_scale_factor))\n small_width = int(round(orig_width * small_scale_factor))\n small_size = [small_height, small_width]\n new_size = large_size\n if max(large_size) > max_dimension:\n new_size = small_size\n else:\n new_size = large_size\n return tf.constant(new_size + [num_channels])\n\n\ndef _compute_new_dynamic_size(image, min_dimension, max_dimension):\n \"\"\"Compute new dynamic shape for resize_to_range method.\"\"\"\n image_shape = tf.shape(image)\n orig_height = tf.to_float(image_shape[0])\n orig_width = tf.to_float(image_shape[1])\n num_channels = image_shape[2]\n orig_min_dim = tf.minimum(orig_height, orig_width)\n # Calculates the larger of the possible sizes\n min_dimension = tf.constant(min_dimension, dtype=tf.float32)\n large_scale_factor = min_dimension / orig_min_dim\n # Scaling orig_(height|width) by large_scale_factor will make the smaller\n # dimension equal to min_dimension, save for floating point rounding errors.\n # For reasonably-sized images, taking the nearest integer will reliably\n # eliminate this error.\n large_height = tf.to_int32(tf.round(orig_height * large_scale_factor))\n large_width = tf.to_int32(tf.round(orig_width * large_scale_factor))\n large_size = tf.stack([large_height, large_width])\n if max_dimension:\n # Calculates the smaller of the possible sizes, use that if the larger\n # is too big.\n orig_max_dim = tf.maximum(orig_height, orig_width)\n max_dimension = tf.constant(max_dimension, dtype=tf.float32)\n small_scale_factor = max_dimension / orig_max_dim\n # Scaling orig_(height|width) by small_scale_factor will make the larger\n # dimension equal to max_dimension, save for floating point rounding\n # errors. For reasonably-sized images, taking the nearest integer will\n # reliably eliminate this error.\n small_height = tf.to_int32(tf.round(orig_height * small_scale_factor))\n small_width = tf.to_int32(tf.round(orig_width * small_scale_factor))\n small_size = tf.stack([small_height, small_width])\n new_size = tf.cond(\n tf.to_float(tf.reduce_max(large_size)) > max_dimension,\n lambda: small_size, lambda: large_size)\n else:\n new_size = large_size\n return tf.stack(tf.unstack(new_size) + [num_channels])\n\n\ndef resize_to_range(image,\n masks=None,\n min_dimension=None,\n max_dimension=None,\n method=tf.image.ResizeMethod.BILINEAR,\n align_corners=False,\n pad_to_max_dimension=False):\n \"\"\"Resizes an image so its dimensions are within the provided value.\n\n The output size can be described by two cases:\n 1. If the image can be rescaled so its minimum dimension is equal to the\n provided value without the other dimension exceeding max_dimension,\n then do so.\n 2. Otherwise, resize so the largest dimension is equal to max_dimension.\n\n Args:\n image: A 3D tensor of shape [height, width, channels]\n masks: (optional) rank 3 float32 tensor with shape\n [num_instances, height, width] containing instance masks.\n min_dimension: (optional) (scalar) desired size of the smaller image\n dimension.\n max_dimension: (optional) (scalar) maximum allowed size\n of the larger image dimension.\n method: (optional) interpolation method used in resizing. Defaults to\n BILINEAR.\n align_corners: bool. If true, exactly align all 4 corners of the input\n and output. Defaults to False.\n pad_to_max_dimension: Whether to resize the image and pad it with zeros\n so the resulting image is of the spatial size\n [max_dimension, max_dimension]. If masks are included they are padded\n similarly.\n\n Returns:\n Note that the position of the resized_image_shape changes based on whether\n masks are present.\n resized_image: A 3D tensor of shape [new_height, new_width, channels],\n where the image has been resized (with bilinear interpolation) so that\n min(new_height, new_width) == min_dimension or\n max(new_height, new_width) == max_dimension.\n resized_masks: If masks is not None, also outputs masks. A 3D tensor of\n shape [num_instances, new_height, new_width].\n resized_image_shape: A 1D tensor of shape [3] containing shape of the\n resized image.\n\n Raises:\n ValueError: if the image is not a 3D tensor.\n \"\"\"\n if len(image.get_shape()) != 3:\n raise ValueError('Image should be 3D tensor')\n\n with tf.name_scope('ResizeToRange', values=[image, min_dimension]):\n if image.get_shape().is_fully_defined():\n new_size = _compute_new_static_size(image, min_dimension, max_dimension)\n else:\n new_size = _compute_new_dynamic_size(image, min_dimension, max_dimension)\n new_image = tf.image.resize_images(\n image, new_size[:-1], method=method, align_corners=align_corners)\n\n if pad_to_max_dimension:\n new_image = tf.image.pad_to_bounding_box(\n new_image, 0, 0, max_dimension, max_dimension)\n\n result = [new_image]\n if masks is not None:\n new_masks = tf.expand_dims(masks, 3)\n new_masks = tf.image.resize_images(\n new_masks,\n new_size[:-1],\n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,\n align_corners=align_corners)\n new_masks = tf.squeeze(new_masks, 3)\n if pad_to_max_dimension:\n new_masks = tf.image.pad_to_bounding_box(\n new_masks, 0, 0, max_dimension, max_dimension)\n result.append(new_masks)\n\n result.append(new_size)\n return result\n\n\ndef _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from):\n \"\"\"Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to.\n\n Args:\n boxlist_to_copy_to: BoxList to which extra fields are copied.\n boxlist_to_copy_from: BoxList from which fields are copied.\n\n Returns:\n boxlist_to_copy_to with extra fields.\n \"\"\"\n for field in boxlist_to_copy_from.get_extra_fields():\n boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field))\n return boxlist_to_copy_to\n\n\ndef box_list_scale(boxlist, y_scale, x_scale, scope=None):\n \"\"\"scale box coordinates in x and y dimensions.\n\n Args:\n boxlist: BoxList holding N boxes\n y_scale: (float) scalar tensor\n x_scale: (float) scalar tensor\n scope: name scope.\n\n Returns:\n boxlist: BoxList holding N boxes\n \"\"\"\n with tf.name_scope(scope, 'Scale'):\n y_scale = tf.cast(y_scale, tf.float32)\n x_scale = tf.cast(x_scale, tf.float32)\n y_min, x_min, y_max, x_max = tf.split(\n value=boxlist.get(), num_or_size_splits=4, axis=1)\n y_min = y_scale * y_min\n y_max = y_scale * y_max\n x_min = x_scale * x_min\n x_max = x_scale * x_max\n scaled_boxlist = box_list.BoxList(\n tf.concat([y_min, x_min, y_max, x_max], 1))\n return _copy_extra_fields(scaled_boxlist, boxlist)\n\n\ndef keypoint_scale(keypoints, y_scale, x_scale, scope=None):\n \"\"\"Scales keypoint coordinates in x and y dimensions.\n\n Args:\n keypoints: a tensor of shape [num_instances, num_keypoints, 2]\n y_scale: (float) scalar tensor\n x_scale: (float) scalar tensor\n scope: name scope.\n\n Returns:\n new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]\n \"\"\"\n with tf.name_scope(scope, 'Scale'):\n y_scale = tf.cast(y_scale, tf.float32)\n x_scale = tf.cast(x_scale, tf.float32)\n new_keypoints = keypoints * [[[y_scale, x_scale]]]\n return new_keypoints\n\n\ndef scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None):\n \"\"\"Scales boxes from normalized to pixel coordinates.\n\n Args:\n image: A 3D float32 tensor of shape [height, width, channels].\n boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding\n boxes in normalized coordinates. Each row is of the form\n [ymin, xmin, ymax, xmax].\n keypoints: (optional) rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized\n coordinates.\n\n Returns:\n image: unchanged input image.\n scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the\n bounding boxes in pixel coordinates.\n scaled_keypoints: a 3D float32 tensor with shape\n [num_instances, num_keypoints, 2] containing the keypoints in pixel\n coordinates.\n \"\"\"\n boxlist = box_list.BoxList(boxes)\n image_height = tf.shape(image)[0]\n image_width = tf.shape(image)[1]\n scaled_boxes = box_list_scale(boxlist, image_height, image_width).get()\n result = [image, scaled_boxes]\n if keypoints is not None:\n scaled_keypoints = keypoint_scale(keypoints, image_height, image_width)\n result.append(scaled_keypoints)\n return tuple(result)\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains definitions for MnesNet model.\n\n[1] Mingxing Tan, Bo Chen, Ruoming Pang, Vijay Vasudevan, Quoc V. Le\n MnasNet: Platform-Aware Neural Architecture Search for Mobile.\n arXiv:1807.11626\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport numpy as np\nimport six\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow.compat.v1 as tf\n\nfrom . import mnas_utils\n\nGlobalParams = collections.namedtuple('GlobalParams', [\n 'batch_norm_momentum', 'batch_norm_epsilon', 'dropout_rate', 'data_format',\n 'num_classes', 'depth_multiplier', 'depth_divisor', 'min_depth',\n 'stem_size', 'use_keras'\n])\nGlobalParams.__new__.__defaults__ = (None,) * len(GlobalParams._fields)\n\n# TODO(hongkuny): Consider rewrite an argument class with encoding/decoding.\nBlockArgs = collections.namedtuple('BlockArgs', [\n 'kernel_size', 'num_repeat', 'input_filters', 'output_filters',\n 'expand_ratio', 'id_skip', 'strides', 'se_ratio'\n])\n# defaults will be a public argument for namedtuple in Python 3.7\n# https://docs.python.org/3/library/collections.html#collections.namedtuple\nBlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields)\n\n\ndef conv_kernel_initializer(shape, dtype=None, partition_info=None):\n \"\"\"Initialization for convolutional kernels.\n\n The main difference with tf.variance_scaling_initializer is that\n tf.variance_scaling_initializer uses a truncated normal with an uncorrected\n standard deviation, whereas here we use a normal distribution. Similarly,\n tf.contrib.layers.variance_scaling_initializer uses a truncated normal with\n a corrected standard deviation.\n\n Args:\n shape: shape of variable\n dtype: dtype of variable\n partition_info: unused\n\n Returns:\n an initialization for the variable\n \"\"\"\n del partition_info\n kernel_height, kernel_width, _, out_filters = shape\n fan_out = int(kernel_height * kernel_width * out_filters)\n return tf.random_normal(\n shape, mean=0.0, stddev=np.sqrt(2.0 / fan_out), dtype=dtype)\n\n\ndef dense_kernel_initializer(shape, dtype=None, partition_info=None):\n \"\"\"Initialization for dense kernels.\n\n This initialization is equal to\n tf.variance_scaling_initializer(scale=1.0/3.0, mode='fan_out',\n distribution='uniform').\n It is written out explicitly here for clarity.\n\n Args:\n shape: shape of variable\n dtype: dtype of variable\n partition_info: unused\n\n Returns:\n an initialization for the variable\n \"\"\"\n del partition_info\n init_range = 1.0 / np.sqrt(shape[1])\n return tf.random_uniform(shape, -init_range, init_range, dtype=dtype)\n\n\ndef round_filters(filters, global_params):\n \"\"\"Round number of filters based on depth multiplier.\"\"\"\n multiplier = global_params.depth_multiplier\n divisor = global_params.depth_divisor\n min_depth = global_params.min_depth\n if not multiplier:\n return filters\n\n filters *= multiplier\n min_depth = min_depth or divisor\n new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)\n # Make sure that round down does not go down by more than 10%.\n if new_filters < 0.9 * filters:\n new_filters += divisor\n return new_filters\n\n\ndef _get_conv2d(filters,\n kernel_size,\n strides,\n kernel_initializer,\n padding,\n use_bias,\n data_format='channels_last',\n use_keras=True):\n \"\"\"A helper function to create Conv2D layer.\"\"\"\n if use_keras:\n return tf.keras.layers.Conv2D(\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n kernel_initializer=kernel_initializer,\n padding=padding,\n data_format=data_format,\n use_bias=use_bias)\n else:\n return tf.layers.Conv2D(\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n kernel_initializer=kernel_initializer,\n padding=padding,\n data_format=data_format,\n use_bias=use_bias)\n\n\nclass MnasBlock(object):\n \"\"\"A class of MnasNet Inveretd Residual Bottleneck.\n\n Attributes:\n has_se: boolean. Whether the block contains a Squeeze and Excitation layer\n inside.\n endpoints: dict. A list of internal tensors.\n \"\"\"\n\n def __init__(self, block_args, global_params):\n \"\"\"Initializes a MnasNet block.\n\n Args:\n block_args: BlockArgs, arguments to create a MnasBlock.\n global_params: GlobalParams, a set of global parameters.\n \"\"\"\n self._block_args = block_args\n self._batch_norm_momentum = global_params.batch_norm_momentum\n self._batch_norm_epsilon = global_params.batch_norm_epsilon\n self._use_keras = global_params.use_keras\n self._data_format = global_params.data_format\n if self._data_format == 'channels_first':\n self._channel_axis = 1\n self._spatial_dims = [2, 3]\n else:\n self._channel_axis = -1\n self._spatial_dims = [1, 2]\n self.has_se = (self._block_args.se_ratio is not None) and (\n self._block_args.se_ratio > 0) and (self._block_args.se_ratio <= 1)\n\n self.endpoints = None\n\n # Builds the block accordings to arguments.\n self._build()\n\n def block_args(self):\n return self._block_args\n\n def _build(self):\n \"\"\"Builds MnasNet block according to the arguments.\"\"\"\n filters = self._block_args.input_filters * self._block_args.expand_ratio\n if self._block_args.expand_ratio != 1:\n # Expansion phase:\n self._expand_conv = _get_conv2d(\n filters=filters,\n kernel_size=[1, 1],\n strides=[1, 1],\n kernel_initializer=conv_kernel_initializer,\n padding='same',\n use_bias=False,\n data_format=self._data_format,\n use_keras=self._use_keras)\n # TODO(hongkuny): b/120622234 need to manage update ops directly.\n self._bn0 = tf.layers.BatchNormalization(\n axis=self._channel_axis,\n momentum=self._batch_norm_momentum,\n epsilon=self._batch_norm_epsilon,\n fused=True)\n\n kernel_size = self._block_args.kernel_size\n # Depth-wise convolution phase:\n if self._use_keras:\n self._depthwise_conv = tf.keras.layers.DepthwiseConv2D(\n [kernel_size, kernel_size],\n strides=self._block_args.strides,\n depthwise_initializer=conv_kernel_initializer,\n padding='same',\n data_format=self._data_format,\n use_bias=False)\n else:\n self._depthwise_conv = mnas_utils.DepthwiseConv2D(\n [kernel_size, kernel_size],\n strides=self._block_args.strides,\n depthwise_initializer=conv_kernel_initializer,\n padding='same',\n data_format=self._data_format,\n use_bias=False)\n self._bn1 = tf.layers.BatchNormalization(\n axis=self._channel_axis,\n momentum=self._batch_norm_momentum,\n epsilon=self._batch_norm_epsilon,\n fused=True)\n\n if self.has_se:\n num_reduced_filters = max(\n 1, int(self._block_args.input_filters * self._block_args.se_ratio))\n # Squeeze and Excitation layer.\n self._se_reduce = _get_conv2d(\n num_reduced_filters,\n kernel_size=[1, 1],\n strides=[1, 1],\n kernel_initializer=conv_kernel_initializer,\n padding='same',\n use_bias=True,\n data_format=self._data_format,\n use_keras=self._use_keras)\n self._se_expand = _get_conv2d(\n filters,\n kernel_size=[1, 1],\n strides=[1, 1],\n kernel_initializer=conv_kernel_initializer,\n padding='same',\n use_bias=True,\n data_format=self._data_format,\n use_keras=self._use_keras)\n\n # Output phase:\n filters = self._block_args.output_filters\n self._project_conv = _get_conv2d(\n filters,\n kernel_size=[1, 1],\n strides=[1, 1],\n kernel_initializer=conv_kernel_initializer,\n padding='same',\n use_bias=False,\n data_format=self._data_format,\n use_keras=self._use_keras)\n self._bn2 = tf.layers.BatchNormalization(\n axis=self._channel_axis,\n momentum=self._batch_norm_momentum,\n epsilon=self._batch_norm_epsilon,\n fused=True)\n\n def _call_se(self, input_tensor):\n \"\"\"Call Squeeze and Excitation layer.\n\n Args:\n input_tensor: Tensor, a single input tensor for Squeeze/Excitation layer.\n\n Returns:\n A output tensor, which should have the same shape as input.\n \"\"\"\n se_tensor = tf.reduce_mean(input_tensor, self._spatial_dims, keepdims=True)\n se_tensor = self._se_expand(tf.nn.relu(self._se_reduce(se_tensor)))\n tf.logging.info('Built Squeeze and Excitation with tensor shape: %s' %\n (se_tensor.shape))\n return tf.sigmoid(se_tensor) * input_tensor\n\n def call(self, inputs, training=True):\n \"\"\"Implementation of MnasBlock call().\n\n Args:\n inputs: the inputs tensor.\n training: boolean, whether the model is constructed for training.\n\n Returns:\n A output tensor.\n \"\"\"\n tf.logging.info('Block input: %s shape: %s' % (inputs.name, inputs.shape))\n if self._block_args.expand_ratio != 1:\n x = tf.nn.relu(self._bn0(self._expand_conv(inputs), training=training))\n else:\n x = inputs\n tf.logging.info('Expand: %s shape: %s' % (x.name, x.shape))\n\n x = tf.nn.relu(self._bn1(self._depthwise_conv(x), training=training))\n tf.logging.info('DWConv: %s shape: %s' % (x.name, x.shape))\n\n if self.has_se:\n with tf.variable_scope('se'):\n x = self._call_se(x)\n\n self.endpoints = {'expansion_output': x}\n\n x = self._bn2(self._project_conv(x), training=training)\n if self._block_args.id_skip:\n if all(\n s == 1 for s in self._block_args.strides\n ) and self._block_args.input_filters == self._block_args.output_filters:\n x = tf.add(x, inputs)\n tf.logging.info('Project: %s shape: %s' % (x.name, x.shape))\n return tf.identity(x)\n\n\nclass MnasNetModel(tf.keras.Model):\n \"\"\"A class implements tf.keras.Model for MnesNet model.\n\n Reference: https://arxiv.org/abs/1807.11626\n \"\"\"\n\n def __init__(self, blocks_args=None, global_params=None):\n \"\"\"Initializes an `MnasNetModel` instance.\n\n Args:\n blocks_args: A list of BlockArgs to construct MnasNet block modules.\n global_params: GlobalParams, a set of global parameters.\n\n Raises:\n ValueError: when blocks_args is not specified as a list.\n \"\"\"\n super(MnasNetModel, self).__init__()\n if not isinstance(blocks_args, list):\n raise ValueError('blocks_args should be a list.')\n self._global_params = global_params\n self._blocks_args = blocks_args\n self.endpoints = None\n self._build()\n\n def _build(self):\n \"\"\"Builds a MnasNet model.\"\"\"\n self._blocks = []\n # Builds blocks.\n for block_args in self._blocks_args:\n assert block_args.num_repeat > 0\n # Update block input and output filters based on depth multiplier.\n block_args = block_args._replace(\n input_filters=round_filters(block_args.input_filters,\n self._global_params),\n output_filters=round_filters(block_args.output_filters,\n self._global_params))\n\n # The first block needs to take care of stride and filter size increase.\n self._blocks.append(MnasBlock(block_args, self._global_params))\n if block_args.num_repeat > 1:\n # pylint: disable=protected-access\n block_args = block_args._replace(\n input_filters=block_args.output_filters, strides=[1, 1])\n # pylint: enable=protected-access\n for _ in xrange(block_args.num_repeat - 1):\n self._blocks.append(MnasBlock(block_args, self._global_params))\n\n batch_norm_momentum = self._global_params.batch_norm_momentum\n batch_norm_epsilon = self._global_params.batch_norm_epsilon\n if self._global_params.data_format == 'channels_first':\n channel_axis = 1\n else:\n channel_axis = -1\n\n # Stem part.\n stem_size = self._global_params.stem_size\n self._conv_stem = _get_conv2d(\n filters=round_filters(stem_size, self._global_params),\n kernel_size=[3, 3],\n strides=[2, 2],\n kernel_initializer=conv_kernel_initializer,\n padding='same',\n use_bias=False,\n data_format=self._global_params.data_format,\n use_keras=self._global_params.use_keras)\n self._bn0 = tf.layers.BatchNormalization(\n axis=channel_axis,\n momentum=batch_norm_momentum,\n epsilon=batch_norm_epsilon,\n fused=True)\n\n # Head part.\n self._conv_head = _get_conv2d(\n filters=1280,\n kernel_size=[1, 1],\n strides=[1, 1],\n kernel_initializer=conv_kernel_initializer,\n padding='same',\n use_bias=False,\n data_format=self._global_params.data_format,\n use_keras=self._global_params.use_keras)\n self._bn1 = tf.layers.BatchNormalization(\n axis=channel_axis,\n momentum=batch_norm_momentum,\n epsilon=batch_norm_epsilon,\n fused=True)\n\n self._avg_pooling = tf.keras.layers.GlobalAveragePooling2D(\n data_format=self._global_params.data_format)\n if self._global_params.use_keras:\n self._fc = tf.keras.layers.Dense(\n self._global_params.num_classes,\n kernel_initializer=dense_kernel_initializer)\n else:\n self._fc = tf.layers.Dense(\n self._global_params.num_classes,\n kernel_initializer=dense_kernel_initializer)\n if self._global_params.dropout_rate > 0:\n self._dropout = tf.keras.layers.Dropout(self._global_params.dropout_rate)\n else:\n self._dropout = None\n\n def call(self, inputs, training=True, features_only=None):\n \"\"\"Implementation of MnasNetModel call().\n\n Args:\n inputs: input tensors.\n training: boolean, whether the model is constructed for training.\n features_only: build the base feature network only.\n\n Returns:\n output tensors.\n \"\"\"\n outputs = None\n self.endpoints = {}\n # Calls Stem layers\n with tf.variable_scope('mnas_stem'):\n outputs = tf.nn.relu(\n self._bn0(self._conv_stem(inputs), training=training))\n tf.logging.info('Built stem layers with output shape: %s' % outputs.shape)\n self.endpoints['stem'] = outputs\n\n # Calls blocks.\n reduction_idx = 0\n for idx, block in enumerate(self._blocks):\n is_reduction = False\n if ((idx == len(self._blocks) - 1) or\n self._blocks[idx + 1].block_args().strides[0] > 1):\n is_reduction = True\n reduction_idx += 1\n\n with tf.variable_scope('mnas_blocks_%s' % idx):\n outputs = block.call(outputs, training=training)\n self.endpoints['block_%s' % idx] = outputs\n if is_reduction:\n self.endpoints['reduction_%s' % reduction_idx] = outputs\n if block.endpoints:\n for k, v in six.iteritems(block.endpoints):\n self.endpoints['block_%s/%s' % (idx, k)] = v\n if is_reduction:\n self.endpoints['reduction_%s/%s' % (reduction_idx, k)] = v\n self.endpoints['global_pool'] = outputs\n\n if not features_only:\n # Calls final layers and returns logits.\n with tf.variable_scope('mnas_head'):\n outputs = tf.nn.relu(\n self._bn1(self._conv_head(outputs), training=training))\n outputs = self._avg_pooling(outputs)\n if self._dropout:\n outputs = self._dropout(outputs, training=training)\n outputs = self._fc(outputs)\n self.endpoints['head'] = outputs\n return outputs\n" ]
[ [ "tensorflow.compat.v1.variable_scope", "tensorflow.compat.v1.expand_dims", "tensorflow.compat.v1.identity" ], [ "tensorflow.python.summary.summary_iterator.summary_iterator", "numpy.asarray", "numpy.argwhere", "numpy.zeros", "numpy.where" ], [ "tensorflow.compat.v1.nn.sigmoid", "tensorflow.compat.v1.expand_dims", "tensorflow.compat.v1.equal", "tensorflow.compat.v1.shape", "tensorflow.compat.v1.stop_gradient", "tensorflow.compat.v1.zeros_like", "tensorflow.compat.v1.cast", "tensorflow.compat.v1.gather_nd" ], [ "tensorflow.compat.v1.concat", "tensorflow.compat.v1.subtract", "tensorflow.compat.v1.shape", "tensorflow.compat.v1.constant", "tensorflow.compat.v1.image.resize_images", "tensorflow.compat.v1.round", "tensorflow.compat.v1.maximum", "tensorflow.compat.v1.random_uniform", "tensorflow.compat.v1.unstack", "tensorflow.compat.v1.transpose", "tensorflow.compat.v1.name_scope", "tensorflow.compat.v1.split", "tensorflow.compat.v1.minimum", "tensorflow.compat.v1.image.flip_left_right", "tensorflow.compat.v1.cast", "tensorflow.compat.v1.reduce_max", "tensorflow.compat.v1.stack", "tensorflow.compat.v1.expand_dims", "tensorflow.compat.v1.to_float", "tensorflow.compat.v1.gather", "tensorflow.compat.v1.image.pad_to_bounding_box", "tensorflow.compat.v1.squeeze" ], [ "tensorflow.compat.v1.keras.layers.Conv2D", "tensorflow.compat.v1.keras.layers.Dropout", "numpy.sqrt", "tensorflow.compat.v1.keras.layers.GlobalAveragePooling2D", "tensorflow.compat.v1.reduce_mean", "tensorflow.compat.v1.sigmoid", "tensorflow.compat.v1.keras.layers.DepthwiseConv2D", "tensorflow.compat.v1.keras.layers.Dense", "tensorflow.compat.v1.layers.Dense", "tensorflow.compat.v1.logging.info", "tensorflow.compat.v1.random_uniform", "tensorflow.compat.v1.layers.BatchNormalization", "tensorflow.compat.v1.variable_scope", "tensorflow.compat.v1.add", "tensorflow.compat.v1.layers.Conv2D", "tensorflow.compat.v1.identity" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
payno/silx
[ "13301e61627f98fa837008250ac74a0627a7a560", "13301e61627f98fa837008250ac74a0627a7a560", "13301e61627f98fa837008250ac74a0627a7a560" ]
[ "setup.py", "silx/gui/plot/MaskToolsWidget.py", "silx/io/fabioh5.py" ]
[ "#!/usr/bin/python\n# coding: utf8\n# /*##########################################################################\n#\n# Copyright (c) 2015-2018 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# ###########################################################################*/\n\n__authors__ = [\"Jérôme Kieffer\", \"Thomas Vincent\"]\n__date__ = \"23/04/2018\"\n__license__ = \"MIT\"\n\n\nimport sys\nimport os\nimport platform\nimport shutil\nimport logging\nimport glob\n# io import has to be here also to fix a bug on Debian 7 with python2.7\n# Without this, the system io module is not loaded from numpy.distutils.\n# The silx.io module seems to be loaded instead.\nimport io\n\nlogging.basicConfig(level=logging.INFO)\n\nlogger = logging.getLogger(\"silx.setup\")\n\n\nfrom distutils.command.clean import clean as Clean\nfrom distutils.command.build import build as _build\ntry:\n from setuptools import Command\n from setuptools.command.build_py import build_py as _build_py\n from setuptools.command.build_ext import build_ext\n from setuptools.command.sdist import sdist\n logger.info(\"Use setuptools\")\nexcept ImportError:\n try:\n from numpy.distutils.core import Command\n except ImportError:\n from distutils.core import Command\n from distutils.command.build_py import build_py as _build_py\n from distutils.command.build_ext import build_ext\n from distutils.command.sdist import sdist\n logger.info(\"Use distutils\")\n\ntry:\n import sphinx\n import sphinx.util.console\n sphinx.util.console.color_terminal = lambda: False\n from sphinx.setup_command import BuildDoc\nexcept ImportError:\n sphinx = None\n\n\nPROJECT = \"silx\"\n\nif \"LANG\" not in os.environ and sys.platform == \"darwin\" and sys.version_info[0] > 2:\n print(\"\"\"WARNING: the LANG environment variable is not defined,\nan utf-8 LANG is mandatory to use setup.py, you may face unexpected UnicodeError.\nexport LANG=en_US.utf-8\nexport LC_ALL=en_US.utf-8\n\"\"\")\n\n\ndef get_version():\n \"\"\"Returns current version number from version.py file\"\"\"\n import version\n return version.strictversion\n\n\ndef get_readme():\n \"\"\"Returns content of README.rst file\"\"\"\n dirname = os.path.dirname(os.path.abspath(__file__))\n filename = os.path.join(dirname, \"README.rst\")\n with io.open(filename, \"r\", encoding=\"utf-8\") as fp:\n long_description = fp.read()\n return long_description\n\n\nclassifiers = [\"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"Environment :: MacOS X\",\n \"Environment :: Win32 (MS Windows)\",\n \"Environment :: X11 Applications :: Qt\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Programming Language :: Cython\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Scientific/Engineering :: Physics\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ]\n\n\n# ########## #\n# version.py #\n# ########## #\n\nclass build_py(_build_py):\n \"\"\"\n Enhanced build_py which copies version.py to <PROJECT>._version.py\n \"\"\"\n def find_package_modules(self, package, package_dir):\n modules = _build_py.find_package_modules(self, package, package_dir)\n if package == PROJECT:\n modules.append((PROJECT, '_version', 'version.py'))\n return modules\n\n\n########\n# Test #\n########\n\nclass PyTest(Command):\n \"\"\"Command to start tests running the script: run_tests.py\"\"\"\n user_options = []\n\n description = \"Execute the unittests\"\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n import subprocess\n errno = subprocess.call([sys.executable, 'run_tests.py'])\n if errno != 0:\n raise SystemExit(errno)\n\n\n# ################### #\n# build_doc command #\n# ################### #\n\nif sphinx is None:\n class SphinxExpectedCommand(Command):\n \"\"\"Command to inform that sphinx is missing\"\"\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n raise RuntimeError(\n 'Sphinx is required to build or test the documentation.\\n'\n 'Please install Sphinx (http://www.sphinx-doc.org).')\n\n\nclass BuildMan(Command):\n \"\"\"Command to build man pages\"\"\"\n\n description = \"Build man pages of the provided entry points\"\n\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def entry_points_iterator(self):\n \"\"\"Iterate other entry points available on the project.\"\"\"\n entry_points = self.distribution.entry_points\n console_scripts = entry_points.get('console_scripts', [])\n gui_scripts = entry_points.get('gui_scripts', [])\n scripts = []\n scripts.extend(console_scripts)\n scripts.extend(gui_scripts)\n for script in scripts:\n # Remove ending extra dependencies\n script = script.split(\"[\")[0]\n elements = script.split(\"=\")\n target_name = elements[0].strip()\n elements = elements[1].split(\":\")\n module_name = elements[0].strip()\n function_name = elements[1].strip()\n yield target_name, module_name, function_name\n\n def run_targeted_script(self, target_name, script_name, env, log_output=False):\n \"\"\"Execute targeted script using --help and --version to help checking\n errors. help2man is not very helpful to do it for us.\n\n :return: True is both return code are equal to 0\n :rtype: bool\n \"\"\"\n import subprocess\n\n if log_output:\n extra_args = {}\n else:\n try:\n # Python 3\n from subprocess import DEVNULL\n except ImportError:\n # Python 2\n import os\n DEVNULL = open(os.devnull, 'wb')\n extra_args = {'stdout': DEVNULL, 'stderr': DEVNULL}\n\n succeeded = True\n command_line = [sys.executable, script_name, \"--help\"]\n if log_output:\n logger.info(\"See the following execution of: %s\", \" \".join(command_line))\n p = subprocess.Popen(command_line, env=env, **extra_args)\n status = p.wait()\n if log_output:\n logger.info(\"Return code: %s\", status)\n succeeded = succeeded and status == 0\n command_line = [sys.executable, script_name, \"--version\"]\n if log_output:\n logger.info(\"See the following execution of: %s\", \" \".join(command_line))\n p = subprocess.Popen(command_line, env=env, **extra_args)\n status = p.wait()\n if log_output:\n logger.info(\"Return code: %s\", status)\n succeeded = succeeded and status == 0\n return succeeded\n\n def run(self):\n build = self.get_finalized_command('build')\n path = sys.path\n path.insert(0, os.path.abspath(build.build_lib))\n\n env = dict((str(k), str(v)) for k, v in os.environ.items())\n env[\"PYTHONPATH\"] = os.pathsep.join(path)\n if not os.path.isdir(\"build/man\"):\n os.makedirs(\"build/man\")\n import subprocess\n import tempfile\n import stat\n script_name = None\n\n entry_points = self.entry_points_iterator()\n for target_name, module_name, function_name in entry_points:\n logger.info(\"Build man for entry-point target '%s'\" % target_name)\n # help2man expect a single executable file to extract the help\n # we create it, execute it, and delete it at the end\n\n py3 = sys.version_info >= (3, 0)\n try:\n # create a launcher using the right python interpreter\n script_fid, script_name = tempfile.mkstemp(prefix=\"%s_\" % target_name, text=True)\n script = os.fdopen(script_fid, 'wt')\n script.write(\"#!%s\\n\" % sys.executable)\n script.write(\"import %s as app\\n\" % module_name)\n script.write(\"app.%s()\\n\" % function_name)\n script.close()\n # make it executable\n mode = os.stat(script_name).st_mode\n os.chmod(script_name, mode + stat.S_IEXEC)\n\n # execute help2man\n man_file = \"build/man/%s.1\" % target_name\n command_line = [\"help2man\", script_name, \"-o\", man_file]\n if not py3:\n # Before Python 3.4, ArgParser --version was using\n # stderr to print the version\n command_line.append(\"--no-discard-stderr\")\n # Then we dont know if the documentation will contains\n # durtty things\n succeeded = self.run_targeted_script(target_name, script_name, env, False)\n if not succeeded:\n logger.info(\"Error while generating man file for target '%s'.\", target_name)\n self.run_targeted_script(target_name, script_name, env, True)\n raise RuntimeError(\"Fail to generate '%s' man documentation\" % target_name)\n\n p = subprocess.Popen(command_line, env=env)\n status = p.wait()\n if status != 0:\n logger.info(\"Error while generating man file for target '%s'.\", target_name)\n self.run_targeted_script(target_name, script_name, env, True)\n raise RuntimeError(\"Fail to generate '%s' man documentation\" % target_name)\n finally:\n # clean up the script\n if script_name is not None:\n os.remove(script_name)\n\n\nif sphinx is not None:\n class BuildDocCommand(BuildDoc):\n \"\"\"Command to build documentation using sphinx.\n\n Project should have already be built.\n \"\"\"\n\n def run(self):\n # make sure the python path is pointing to the newly built\n # code so that the documentation is built on this and not a\n # previously installed version\n\n build = self.get_finalized_command('build')\n sys.path.insert(0, os.path.abspath(build.build_lib))\n\n # # Copy .ui files to the path:\n # dst = os.path.join(\n # os.path.abspath(build.build_lib), \"silx\", \"gui\")\n # if not os.path.isdir(dst):\n # os.makedirs(dst)\n # for i in os.listdir(\"gui\"):\n # if i.endswith(\".ui\"):\n # src = os.path.join(\"gui\", i)\n # idst = os.path.join(dst, i)\n # if not os.path.exists(idst):\n # shutil.copy(src, idst)\n\n # Build the Users Guide in HTML and TeX format\n for builder in ['html', 'latex']:\n self.builder = builder\n self.builder_target_dir = os.path.join(self.build_dir, builder)\n self.mkpath(self.builder_target_dir)\n BuildDoc.run(self)\n sys.path.pop(0)\nelse:\n BuildDocCommand = SphinxExpectedCommand\n\n\n# ################### #\n# test_doc command #\n# ################### #\n\nif sphinx is not None:\n class TestDocCommand(BuildDoc):\n \"\"\"Command to test the documentation using sphynx doctest.\n\n http://www.sphinx-doc.org/en/1.4.8/ext/doctest.html\n \"\"\"\n def run(self):\n # make sure the python path is pointing to the newly built\n # code so that the documentation is built on this and not a\n # previously installed version\n\n build = self.get_finalized_command('build')\n sys.path.insert(0, os.path.abspath(build.build_lib))\n\n # Build the Users Guide in HTML and TeX format\n for builder in ['doctest']:\n self.builder = builder\n self.builder_target_dir = os.path.join(self.build_dir, builder)\n self.mkpath(self.builder_target_dir)\n BuildDoc.run(self)\n sys.path.pop(0)\n\nelse:\n TestDocCommand = SphinxExpectedCommand\n\n\n# ############################# #\n# numpy.distutils Configuration #\n# ############################# #\n\ndef configuration(parent_package='', top_path=None):\n \"\"\"Recursive construction of package info to be used in setup().\n\n See http://docs.scipy.org/doc/numpy/reference/distutils.html#numpy.distutils.misc_util.Configuration\n \"\"\"\n try:\n from numpy.distutils.misc_util import Configuration\n except ImportError:\n raise ImportError(\n \"To install this package, you must install numpy first\\n\"\n \"(See https://pypi.python.org/pypi/numpy)\")\n config = Configuration(None, parent_package, top_path)\n config.set_options(\n ignore_setup_xxx_py=True,\n assume_default_configuration=True,\n delegate_options_to_subpackages=True,\n quiet=True)\n config.add_subpackage(PROJECT)\n return config\n\n# ############## #\n# Compiler flags #\n# ############## #\n\n\nclass Build(_build):\n \"\"\"Command to support more user options for the build.\"\"\"\n\n user_options = [\n ('no-openmp', None,\n \"do not use OpenMP for compiled extension modules\"),\n ('openmp', None,\n \"use OpenMP for the compiled extension modules\"),\n ('no-cython', None,\n \"do not compile Cython extension modules (use default compiled c-files)\"),\n ('force-cython', None,\n \"recompile all Cython extension modules\"),\n ]\n user_options.extend(_build.user_options)\n\n boolean_options = ['no-openmp', 'openmp', 'no-cython', 'force-cython']\n boolean_options.extend(_build.boolean_options)\n\n def initialize_options(self):\n _build.initialize_options(self)\n self.no_openmp = None\n self.openmp = None\n self.no_cython = None\n self.force_cython = None\n\n def finalize_options(self):\n _build.finalize_options(self)\n self.finalize_cython_options(min_version='0.21.1')\n self.finalize_openmp_options()\n\n def _parse_env_as_bool(self, key):\n content = os.environ.get(key, \"\")\n value = content.lower()\n if value in [\"1\", \"true\", \"yes\", \"y\"]:\n return True\n if value in [\"0\", \"false\", \"no\", \"n\"]:\n return False\n if value in [\"none\", \"\"]:\n return None\n msg = \"Env variable '%s' contains '%s'. But a boolean or an empty \\\n string was expected. Variable ignored.\"\n logger.warning(msg, key, content)\n return None\n\n def finalize_openmp_options(self):\n \"\"\"Check if extensions must be compiled with OpenMP.\n\n The result is stored into the object.\n \"\"\"\n if self.openmp:\n use_openmp = True\n elif self.no_openmp:\n use_openmp = False\n else:\n env_force_cython = self._parse_env_as_bool(\"WITH_OPENMP\")\n if env_force_cython is not None:\n use_openmp = env_force_cython\n else:\n # Use it by default\n use_openmp = True\n\n if use_openmp:\n if platform.system() == \"Darwin\":\n # By default Xcode5 & XCode6 do not support OpenMP, Xcode4 is OK.\n osx = tuple([int(i) for i in platform.mac_ver()[0].split(\".\")])\n if osx >= (10, 8):\n logger.warning(\"OpenMP support ignored. Your platform do not support it\")\n use_openmp = False\n\n # Remove attributes used by distutils parsing\n # use 'use_openmp' instead\n del self.no_openmp\n del self.openmp\n self.use_openmp = use_openmp\n\n def finalize_cython_options(self, min_version=None):\n \"\"\"\n Check if cythonization must be used for the extensions.\n\n The result is stored into the object.\n \"\"\"\n\n if self.force_cython:\n use_cython = \"force\"\n elif self.no_cython:\n use_cython = \"no\"\n else:\n env_force_cython = self._parse_env_as_bool(\"FORCE_CYTHON\")\n env_with_cython = self._parse_env_as_bool(\"WITH_CYTHON\")\n if env_force_cython is True:\n use_cython = \"force\"\n elif env_with_cython is True:\n use_cython = \"yes\"\n elif env_with_cython is False:\n use_cython = \"no\"\n else:\n # Use it by default\n use_cython = \"yes\"\n\n if use_cython in [\"force\", \"yes\"]:\n try:\n import Cython.Compiler.Version\n if min_version and Cython.Compiler.Version.version < min_version:\n msg = \"Cython version is too old. At least version is %s \\\n expected. Cythonization is skipped.\"\n logger.warning(msg, str(min_version))\n use_cython = \"no\"\n except ImportError:\n msg = \"Cython is not available. Cythonization is skipped.\"\n logger.warning(msg)\n use_cython = \"no\"\n\n # Remove attribute used by distutils parsing\n # use 'use_cython' and 'force_cython' instead\n del self.no_cython\n self.force_cython = use_cython == \"force\"\n self.use_cython = use_cython in [\"force\", \"yes\"]\n\n\nclass BuildExt(build_ext):\n \"\"\"Handle extension compilation.\n\n Command-line argument and environment can custom:\n\n - The use of cython to cythonize files, else a default version is used\n - Build extension with support of OpenMP (by default it is enabled)\n - If building with MSVC, compiler flags are converted from gcc flags.\n \"\"\"\n\n COMPILE_ARGS_CONVERTER = {'-fopenmp': '/openmp'}\n\n LINK_ARGS_CONVERTER = {'-fopenmp': ''}\n\n description = 'Build silx extensions'\n\n def finalize_options(self):\n build_ext.finalize_options(self)\n build_obj = self.distribution.get_command_obj(\"build\")\n self.use_openmp = build_obj.use_openmp\n self.use_cython = build_obj.use_cython\n self.force_cython = build_obj.force_cython\n\n def patch_with_default_cythonized_files(self, ext):\n \"\"\"Replace cython files by .c or .cpp files in extension's sources.\n\n It replaces the *.pyx and *.py source files of the extensions\n to either *.cpp or *.c source files.\n No compilation is performed.\n\n :param Extension ext: An extension to patch.\n \"\"\"\n new_sources = []\n for source in ext.sources:\n base, file_ext = os.path.splitext(source)\n if file_ext in ('.pyx', '.py'):\n if ext.language == 'c++':\n cythonized = base + '.cpp'\n else:\n cythonized = base + '.c'\n if not os.path.isfile(cythonized):\n raise RuntimeError(\"Source file not found: %s. Cython is needed\" % cythonized)\n print(\"Use default cythonized file for %s\" % source)\n new_sources.append(cythonized)\n else:\n new_sources.append(source)\n ext.sources = new_sources\n\n def patch_extension(self, ext):\n \"\"\"\n Patch an extension according to requested Cython and OpenMP usage.\n\n :param Extension ext: An extension\n \"\"\"\n # Cytonize\n if not self.use_cython:\n self.patch_with_default_cythonized_files(ext)\n else:\n from Cython.Build import cythonize\n patched_exts = cythonize(\n [ext],\n compiler_directives={'embedsignature': True},\n force=self.force_cython\n )\n ext.sources = patched_exts[0].sources\n\n # Remove OpenMP flags if OpenMP is disabled\n if not self.use_openmp:\n ext.extra_compile_args = [\n f for f in ext.extra_compile_args if f != '-fopenmp']\n ext.extra_link_args = [\n f for f in ext.extra_link_args if f != '-fopenmp']\n\n # Convert flags from gcc to MSVC if required\n if self.compiler.compiler_type == 'msvc':\n ext.extra_compile_args = [self.COMPILE_ARGS_CONVERTER.get(f, f)\n for f in ext.extra_compile_args]\n ext.extra_link_args = [self.LINK_ARGS_CONVERTER.get(f, f)\n for f in ext.extra_link_args]\n\n elif self.compiler.compiler_type == 'unix':\n # Avoids runtime symbol collision for manylinux1 platform\n # See issue #1070\n extern = 'extern \"C\" ' if ext.language == 'c++' else ''\n return_type = 'void' if sys.version_info[0] <= 2 else 'PyObject*'\n\n ext.extra_compile_args.append(\n '''-fvisibility=hidden -D'PyMODINIT_FUNC=%s__attribute__((visibility(\"default\"))) %s ' ''' % (extern, return_type))\n\n def is_debug_interpreter(self):\n \"\"\"\n Returns true if the script is executed with a debug interpreter.\n\n It looks to be a non-standard code. It is not working for Windows and\n Mac. But it have to work at least for Debian interpreters.\n\n :rtype: bool\n \"\"\"\n if sys.version_info >= (3, 0):\n # It is normalized on Python 3\n # But it is not available on Windows CPython\n if hasattr(sys, \"abiflags\"):\n return \"d\" in sys.abiflags\n else:\n # It's a Python 2 interpreter\n # pydebug is not available on Windows/Mac OS interpreters\n if hasattr(sys, \"pydebug\"):\n return sys.pydebug\n\n # We can't know if we uses debug interpreter\n return False\n\n def patch_compiler(self):\n \"\"\"\n Patch the compiler to:\n - always compile extensions with debug symboles (-g)\n - only compile asserts in debug mode (-DNDEBUG)\n\n Plus numpy.distutils/setuptools/distutils inject a lot of duplicated\n flags. This function tries to clean up default debug options.\n \"\"\"\n build_obj = self.distribution.get_command_obj(\"build\")\n if build_obj.debug:\n debug_mode = build_obj.debug\n else:\n # Force debug_mode also when it uses python-dbg\n # It is needed for Debian packaging\n debug_mode = self.is_debug_interpreter()\n\n if self.compiler.compiler_type == \"unix\":\n args = list(self.compiler.compiler_so)\n # clean up debug flags -g is included later in another way\n must_be_cleaned = [\"-DNDEBUG\", \"-g\"]\n args = filter(lambda x: x not in must_be_cleaned, args)\n args = list(args)\n\n # always insert symbols\n args.append(\"-g\")\n # only strip asserts in release mode\n if not debug_mode:\n args.append('-DNDEBUG')\n # patch options\n self.compiler.compiler_so = list(args)\n\n def build_extensions(self):\n self.patch_compiler()\n for ext in self.extensions:\n self.patch_extension(ext)\n build_ext.build_extensions(self)\n\n################################################################################\n# Clean command\n################################################################################\n\n\nclass CleanCommand(Clean):\n description = \"Remove build artifacts from the source tree\"\n\n def expand(self, path_list):\n \"\"\"Expand a list of path using glob magic.\n\n :param list[str] path_list: A list of path which may contains magic\n :rtype: list[str]\n :returns: A list of path without magic\n \"\"\"\n path_list2 = []\n for path in path_list:\n if glob.has_magic(path):\n iterator = glob.iglob(path)\n path_list2.extend(iterator)\n else:\n path_list2.append(path)\n return path_list2\n\n def find(self, path_list):\n \"\"\"Find a file pattern if directories.\n\n Could be done using \"**/*.c\" but it is only supported in Python 3.5.\n\n :param list[str] path_list: A list of path which may contains magic\n :rtype: list[str]\n :returns: A list of path without magic\n \"\"\"\n import fnmatch\n path_list2 = []\n for pattern in path_list:\n for root, _, filenames in os.walk('.'):\n for filename in fnmatch.filter(filenames, pattern):\n path_list2.append(os.path.join(root, filename))\n return path_list2\n\n def run(self):\n Clean.run(self)\n\n cython_files = self.find([\"*.pyx\"])\n cythonized_files = [path.replace(\".pyx\", \".c\") for path in cython_files]\n cythonized_files += [path.replace(\".pyx\", \".cpp\") for path in cython_files]\n\n # really remove the directories\n # and not only if they are empty\n to_remove = [self.build_base]\n to_remove = self.expand(to_remove)\n to_remove += cythonized_files\n\n if not self.dry_run:\n for path in to_remove:\n try:\n if os.path.isdir(path):\n shutil.rmtree(path)\n else:\n os.remove(path)\n logger.info(\"removing '%s'\", path)\n except OSError:\n pass\n\n################################################################################\n# Source tree\n################################################################################\n\nclass SourceDistWithCython(sdist):\n \"\"\"\n Force cythonization of the extensions before generating the source\n distribution.\n\n To provide the widest compatibility the cythonized files are provided\n without suppport of OpenMP.\n \"\"\"\n\n description = \"Create a source distribution including cythonozed files (tarball, zip file, etc.)\"\n\n def finalize_options(self):\n sdist.finalize_options(self)\n self.extensions = self.distribution.ext_modules\n\n def run(self):\n self.cythonize_extensions()\n sdist.run(self)\n\n def cythonize_extensions(self):\n from Cython.Build import cythonize\n cythonize(\n self.extensions,\n compiler_directives={'embedsignature': True},\n force=True\n )\n\n################################################################################\n# Debian source tree\n################################################################################\n\n\nclass sdist_debian(sdist):\n \"\"\"\n Tailor made sdist for debian\n * remove auto-generated doc\n * remove cython generated .c files\n * remove cython generated .cpp files\n * remove .bat files\n * include .l man files\n \"\"\"\n\n description = \"Create a source distribution for Debian (tarball, zip file, etc.)\"\n\n @staticmethod\n def get_debian_name():\n import version\n name = \"%s_%s\" % (PROJECT, version.debianversion)\n return name\n\n def prune_file_list(self):\n sdist.prune_file_list(self)\n to_remove = [\"doc/build\", \"doc/pdf\", \"doc/html\", \"pylint\", \"epydoc\"]\n print(\"Removing files for debian\")\n for rm in to_remove:\n self.filelist.exclude_pattern(pattern=\"*\", anchor=False, prefix=rm)\n\n # this is for Cython files specifically: remove C & html files\n search_root = os.path.dirname(os.path.abspath(__file__))\n for root, _, files in os.walk(search_root):\n for afile in files:\n if os.path.splitext(afile)[1].lower() == \".pyx\":\n base_file = os.path.join(root, afile)[len(search_root) + 1:-4]\n self.filelist.exclude_pattern(pattern=base_file + \".c\")\n self.filelist.exclude_pattern(pattern=base_file + \".cpp\")\n self.filelist.exclude_pattern(pattern=base_file + \".html\")\n\n # do not include third_party/_local files\n self.filelist.exclude_pattern(pattern=\"*\", prefix=\"silx/third_party/_local\")\n\n def make_distribution(self):\n self.prune_file_list()\n sdist.make_distribution(self)\n dest = self.archive_files[0]\n dirname, basename = os.path.split(dest)\n base, ext = os.path.splitext(basename)\n while ext in [\".zip\", \".tar\", \".bz2\", \".gz\", \".Z\", \".lz\", \".orig\"]:\n base, ext = os.path.splitext(base)\n # if ext:\n # dest = \"\".join((base, ext))\n # else:\n # dest = base\n # sp = dest.split(\"-\")\n # base = sp[:-1]\n # nr = sp[-1]\n debian_arch = os.path.join(dirname, self.get_debian_name() + \".orig.tar.gz\")\n os.rename(self.archive_files[0], debian_arch)\n self.archive_files = [debian_arch]\n print(\"Building debian .orig.tar.gz in %s\" % self.archive_files[0])\n\n\n# ##### #\n# setup #\n# ##### #\n\ndef get_project_configuration(dry_run):\n \"\"\"Returns project arguments for setup\"\"\"\n # Use installed numpy version as minimal required version\n # This is useful for wheels to advertise the numpy version they were built with\n if dry_run:\n numpy_requested_version = \"\"\n else:\n from numpy.version import version as numpy_version\n numpy_requested_version = \">=%s\" % numpy_version\n logger.info(\"Install requires: numpy %s\", numpy_requested_version)\n\n install_requires = [\n # for most of the computation\n \"numpy%s\" % numpy_requested_version,\n # for the script launcher\n \"setuptools\"]\n\n setup_requires = [\"setuptools\", \"numpy\"]\n\n package_data = {\n # Resources files for silx\n 'silx.resources': [\n 'gui/logo/*.png',\n 'gui/logo/*.svg',\n 'gui/icons/*.png',\n 'gui/icons/*.svg',\n 'gui/icons/*.mng',\n 'gui/icons/*.gif',\n 'gui/icons/*/*.png',\n 'opencl/*.cl',\n 'opencl/image/*.cl',\n 'opencl/sift/*.cl',\n 'opencl/codec/*.cl',\n 'gui/colormaps/*.npy'],\n 'silx.examples': ['*.png'],\n }\n\n entry_points = {\n 'console_scripts': ['silx = silx.__main__:main'],\n # 'gui_scripts': [],\n }\n\n cmdclass = dict(\n build=Build,\n build_py=build_py,\n test=PyTest,\n build_doc=BuildDocCommand,\n test_doc=TestDocCommand,\n build_ext=BuildExt,\n build_man=BuildMan,\n clean=CleanCommand,\n sdist=SourceDistWithCython,\n debian_src=sdist_debian)\n\n if dry_run:\n # DRY_RUN implies actions which do not require NumPy\n #\n # And they are required to succeed without Numpy for example when\n # pip is used to install silx when Numpy is not yet present in\n # the system.\n setup_kwargs = {}\n else:\n config = configuration()\n setup_kwargs = config.todict()\n\n setup_kwargs.update(name=PROJECT,\n version=get_version(),\n url=\"http://www.silx.org/\",\n author=\"data analysis unit\",\n author_email=\"[email protected]\",\n classifiers=classifiers,\n description=\"Software library for X-ray data analysis\",\n long_description=get_readme(),\n install_requires=install_requires,\n setup_requires=setup_requires,\n cmdclass=cmdclass,\n package_data=package_data,\n zip_safe=False,\n entry_points=entry_points,\n )\n return setup_kwargs\n\n\ndef setup_package():\n \"\"\"Run setup(**kwargs)\n\n Depending on the command, it either runs the complete setup which depends on numpy,\n or a *dry run* setup with no dependency on numpy.\n \"\"\"\n\n # Check if action requires build/install\n dry_run = len(sys.argv) == 1 or (len(sys.argv) >= 2 and (\n '--help' in sys.argv[1:] or\n sys.argv[1] in ('--help-commands', 'egg_info', '--version',\n 'clean', '--name')))\n\n if dry_run:\n # DRY_RUN implies actions which do not require dependencies, like NumPy\n try:\n from setuptools import setup\n logger.info(\"Use setuptools.setup\")\n except ImportError:\n from distutils.core import setup\n logger.info(\"Use distutils.core.setup\")\n else:\n try:\n from setuptools import setup\n except ImportError:\n from numpy.distutils.core import setup\n logger.info(\"Use numpy.distutils.setup\")\n\n setup_kwargs = get_project_configuration(dry_run)\n setup(**setup_kwargs)\n\n\nif __name__ == \"__main__\":\n setup_package()\n", "# coding: utf-8\n# /*##########################################################################\n#\n# Copyright (c) 2017-2018 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# ###########################################################################*/\n\"\"\"Widget providing a set of tools to draw masks on a PlotWidget.\n\nThis widget is meant to work with :class:`silx.gui.plot.PlotWidget`.\n\n- :class:`ImageMask`: Handle mask bitmap update and history\n- :class:`MaskToolsWidget`: GUI for :class:`Mask`\n- :class:`MaskToolsDockWidget`: DockWidget to integrate in :class:`PlotWindow`\n\"\"\"\nfrom __future__ import division\n\n\n__authors__ = [\"T. Vincent\", \"P. Knobel\"]\n__license__ = \"MIT\"\n__date__ = \"24/04/2018\"\n\n\nimport os\nimport sys\nimport numpy\nimport logging\nimport collections\n\nfrom silx.image import shapes\n\nfrom ._BaseMaskToolsWidget import BaseMask, BaseMaskToolsWidget, BaseMaskToolsDockWidget\nfrom . import items\nfrom ..colors import cursorColorForColormap, rgba\nfrom .. import qt\n\nfrom silx.third_party.EdfFile import EdfFile\nfrom silx.third_party.TiffIO import TiffIO\n\ntry:\n import fabio\nexcept ImportError:\n fabio = None\n\n\n_logger = logging.getLogger(__name__)\n\n\nclass ImageMask(BaseMask):\n \"\"\"A 2D mask field with update operations.\n\n Coords follows (row, column) convention and are in mask array coords.\n\n This is meant for internal use by :class:`MaskToolsWidget`.\n \"\"\"\n def __init__(self, image=None):\n \"\"\"\n\n :param image: :class:`silx.gui.plot.items.ImageBase` instance\n \"\"\"\n BaseMask.__init__(self, image)\n self.reset(shape=(0, 0)) # Init the mask with a 2D shape\n\n def getDataValues(self):\n \"\"\"Return image data as a 2D or 3D array (if it is a RGBA image).\n\n :rtype: 2D or 3D numpy.ndarray\n \"\"\"\n return self._dataItem.getData(copy=False)\n\n def save(self, filename, kind):\n \"\"\"Save current mask in a file\n\n :param str filename: The file where to save to mask\n :param str kind: The kind of file to save in 'edf', 'tif', 'npy',\n or 'msk' (if FabIO is installed)\n :raise Exception: Raised if the file writing fail\n \"\"\"\n if kind == 'edf':\n edfFile = EdfFile(filename, access=\"w+\")\n edfFile.WriteImage({}, self.getMask(copy=False), Append=0)\n\n elif kind == 'tif':\n tiffFile = TiffIO(filename, mode='w')\n tiffFile.writeImage(self.getMask(copy=False), software='silx')\n\n elif kind == 'npy':\n try:\n numpy.save(filename, self.getMask(copy=False))\n except IOError:\n raise RuntimeError(\"Mask file can't be written\")\n\n elif kind == 'msk':\n if fabio is None:\n raise ImportError(\"Fit2d mask files can't be written: Fabio module is not available\")\n try:\n data = self.getMask(copy=False)\n image = fabio.fabioimage.FabioImage(data=data)\n image = image.convert(fabio.fit2dmaskimage.Fit2dMaskImage)\n image.save(filename)\n except Exception:\n _logger.debug(\"Backtrace\", exc_info=True)\n raise RuntimeError(\"Mask file can't be written\")\n\n else:\n raise ValueError(\"Format '%s' is not supported\" % kind)\n\n # Drawing operations\n def updateRectangle(self, level, row, col, height, width, mask=True):\n \"\"\"Mask/Unmask a rectangle of the given mask level.\n\n :param int level: Mask level to update.\n :param int row: Starting row of the rectangle\n :param int col: Starting column of the rectangle\n :param int height:\n :param int width:\n :param bool mask: True to mask (default), False to unmask.\n \"\"\"\n assert 0 < level < 256\n selection = self._mask[max(0, row):row + height + 1,\n max(0, col):col + width + 1]\n if mask:\n selection[:, :] = level\n else:\n selection[selection == level] = 0\n self._notify()\n\n def updatePolygon(self, level, vertices, mask=True):\n \"\"\"Mask/Unmask a polygon of the given mask level.\n\n :param int level: Mask level to update.\n :param vertices: Nx2 array of polygon corners as (row, col)\n :param bool mask: True to mask (default), False to unmask.\n \"\"\"\n fill = shapes.polygon_fill_mask(vertices, self._mask.shape)\n if mask:\n self._mask[fill != 0] = level\n else:\n self._mask[numpy.logical_and(fill != 0,\n self._mask == level)] = 0\n self._notify()\n\n def updatePoints(self, level, rows, cols, mask=True):\n \"\"\"Mask/Unmask points with given coordinates.\n\n :param int level: Mask level to update.\n :param rows: Rows of selected points\n :type rows: 1D numpy.ndarray\n :param cols: Columns of selected points\n :type cols: 1D numpy.ndarray\n :param bool mask: True to mask (default), False to unmask.\n \"\"\"\n valid = numpy.logical_and(\n numpy.logical_and(rows >= 0, cols >= 0),\n numpy.logical_and(rows < self._mask.shape[0],\n cols < self._mask.shape[1]))\n rows, cols = rows[valid], cols[valid]\n\n if mask:\n self._mask[rows, cols] = level\n else:\n inMask = self._mask[rows, cols] == level\n self._mask[rows[inMask], cols[inMask]] = 0\n self._notify()\n\n def updateDisk(self, level, crow, ccol, radius, mask=True):\n \"\"\"Mask/Unmask a disk of the given mask level.\n\n :param int level: Mask level to update.\n :param int crow: Disk center row.\n :param int ccol: Disk center column.\n :param float radius: Radius of the disk in mask array unit\n :param bool mask: True to mask (default), False to unmask.\n \"\"\"\n rows, cols = shapes.circle_fill(crow, ccol, radius)\n self.updatePoints(level, rows, cols, mask)\n\n def updateLine(self, level, row0, col0, row1, col1, width, mask=True):\n \"\"\"Mask/Unmask a line of the given mask level.\n\n :param int level: Mask level to update.\n :param int row0: Row of the starting point.\n :param int col0: Column of the starting point.\n :param int row1: Row of the end point.\n :param int col1: Column of the end point.\n :param int width: Width of the line in mask array unit.\n :param bool mask: True to mask (default), False to unmask.\n \"\"\"\n rows, cols = shapes.draw_line(row0, col0, row1, col1, width)\n self.updatePoints(level, rows, cols, mask)\n\n\nclass MaskToolsWidget(BaseMaskToolsWidget):\n \"\"\"Widget with tools for drawing mask on an image in a PlotWidget.\"\"\"\n\n _maxLevelNumber = 255\n\n def __init__(self, parent=None, plot=None):\n super(MaskToolsWidget, self).__init__(parent, plot,\n mask=ImageMask())\n self._origin = (0., 0.) # Mask origin in plot\n self._scale = (1., 1.) # Mask scale in plot\n self._z = 1 # Mask layer in plot\n self._data = numpy.zeros((0, 0), dtype=numpy.uint8) # Store image\n\n def setSelectionMask(self, mask, copy=True):\n \"\"\"Set the mask to a new array.\n\n :param numpy.ndarray mask:\n The array to use for the mask or None to reset the mask.\n :type mask: numpy.ndarray of uint8 of dimension 2, C-contiguous.\n Array of other types are converted.\n :param bool copy: True (the default) to copy the array,\n False to use it as is if possible.\n :return: None if failed, shape of mask as 2-tuple if successful.\n The mask can be cropped or padded to fit active image,\n the returned shape is that of the active image.\n \"\"\"\n if mask is None:\n self.resetSelectionMask()\n return self._data.shape[:2]\n\n mask = numpy.array(mask, copy=False, dtype=numpy.uint8)\n if len(mask.shape) != 2:\n _logger.error('Not an image, shape: %d', len(mask.shape))\n return None\n\n # if mask has not changed, do nothing\n if numpy.array_equal(mask, self.getSelectionMask()):\n return mask.shape\n\n # ensure all mask attributes are synchronized with the active image\n # and connect listener\n activeImage = self.plot.getActiveImage()\n if activeImage is not None and activeImage.getLegend() != self._maskName:\n self._activeImageChanged()\n self.plot.sigActiveImageChanged.connect(self._activeImageChanged)\n\n if self._data.shape[0:2] == (0, 0) or mask.shape == self._data.shape[0:2]:\n self._mask.setMask(mask, copy=copy)\n self._mask.commit()\n return mask.shape\n else:\n _logger.warning('Mask has not the same size as current image.'\n ' Mask will be cropped or padded to fit image'\n ' dimensions. %s != %s',\n str(mask.shape), str(self._data.shape))\n resizedMask = numpy.zeros(self._data.shape[0:2],\n dtype=numpy.uint8)\n height = min(self._data.shape[0], mask.shape[0])\n width = min(self._data.shape[1], mask.shape[1])\n resizedMask[:height, :width] = mask[:height, :width]\n self._mask.setMask(resizedMask, copy=False)\n self._mask.commit()\n return resizedMask.shape\n\n # Handle mask refresh on the plot\n def _updatePlotMask(self):\n \"\"\"Update mask image in plot\"\"\"\n mask = self.getSelectionMask(copy=False)\n if mask is not None:\n # get the mask from the plot\n maskItem = self.plot.getImage(self._maskName)\n mustBeAdded = maskItem is None\n if mustBeAdded:\n maskItem = items.MaskImageData()\n maskItem._setLegend(self._maskName)\n # update the items\n maskItem.setData(mask, copy=False)\n maskItem.setColormap(self._colormap)\n maskItem.setOrigin(self._origin)\n maskItem.setScale(self._scale)\n maskItem.setZValue(self._z)\n\n if mustBeAdded:\n self.plot._add(maskItem)\n\n elif self.plot.getImage(self._maskName):\n self.plot.remove(self._maskName, kind='image')\n\n def showEvent(self, event):\n try:\n self.plot.sigActiveImageChanged.disconnect(\n self._activeImageChangedAfterCare)\n except (RuntimeError, TypeError):\n pass\n self._activeImageChanged() # Init mask + enable/disable widget\n self.plot.sigActiveImageChanged.connect(self._activeImageChanged)\n\n def hideEvent(self, event):\n try:\n self.plot.sigActiveImageChanged.disconnect(\n self._activeImageChanged)\n except (RuntimeError, TypeError):\n pass\n if not self.browseAction.isChecked():\n self.browseAction.trigger() # Disable drawing tool\n\n if self.getSelectionMask(copy=False) is not None:\n self.plot.sigActiveImageChanged.connect(\n self._activeImageChangedAfterCare)\n\n def _setOverlayColorForImage(self, image):\n \"\"\"Set the color of overlay adapted to image\n\n :param image: :class:`.items.ImageBase` object to set color for.\n \"\"\"\n if isinstance(image, items.ColormapMixIn):\n colormap = image.getColormap()\n self._defaultOverlayColor = rgba(\n cursorColorForColormap(colormap['name']))\n else:\n self._defaultOverlayColor = rgba('black')\n\n def _activeImageChangedAfterCare(self, *args):\n \"\"\"Check synchro of active image and mask when mask widget is hidden.\n\n If active image has no more the same size as the mask, the mask is\n removed, otherwise it is adjusted to origin, scale and z.\n \"\"\"\n activeImage = self.plot.getActiveImage()\n if activeImage is None or activeImage.getLegend() == self._maskName:\n # No active image or active image is the mask...\n self._data = numpy.zeros((0, 0), dtype=numpy.uint8)\n self._mask.setDataItem(None)\n self._mask.reset()\n\n if self.plot.getImage(self._maskName):\n self.plot.remove(self._maskName, kind='image')\n\n self.plot.sigActiveImageChanged.disconnect(\n self._activeImageChangedAfterCare)\n else:\n self._setOverlayColorForImage(activeImage)\n self._setMaskColors(self.levelSpinBox.value(),\n self.transparencySlider.value() /\n self.transparencySlider.maximum())\n\n self._origin = activeImage.getOrigin()\n self._scale = activeImage.getScale()\n self._z = activeImage.getZValue() + 1\n self._data = activeImage.getData(copy=False)\n if self._data.shape[:2] != self._mask.getMask(copy=False).shape:\n # Image has not the same size, remove mask and stop listening\n if self.plot.getImage(self._maskName):\n self.plot.remove(self._maskName, kind='image')\n\n self.plot.sigActiveImageChanged.disconnect(\n self._activeImageChangedAfterCare)\n else:\n # Refresh in case origin, scale, z changed\n self._mask.setDataItem(activeImage)\n self._updatePlotMask()\n\n def _activeImageChanged(self, *args):\n \"\"\"Update widget and mask according to active image changes\"\"\"\n activeImage = self.plot.getActiveImage()\n if (activeImage is None or activeImage.getLegend() == self._maskName or\n activeImage.getData(copy=False).size == 0):\n # No active image or active image is the mask or image has no data...\n self.setEnabled(False)\n\n self._data = numpy.zeros((0, 0), dtype=numpy.uint8)\n self._mask.reset()\n self._mask.commit()\n\n else: # There is an active image\n self.setEnabled(True)\n\n self._setOverlayColorForImage(activeImage)\n\n self._setMaskColors(self.levelSpinBox.value(),\n self.transparencySlider.value() /\n self.transparencySlider.maximum())\n\n self._origin = activeImage.getOrigin()\n self._scale = activeImage.getScale()\n self._z = activeImage.getZValue() + 1\n self._data = activeImage.getData(copy=False)\n self._mask.setDataItem(activeImage)\n if self._data.shape[:2] != self._mask.getMask(copy=False).shape:\n self._mask.reset(self._data.shape[:2])\n self._mask.commit()\n else:\n # Refresh in case origin, scale, z changed\n self._updatePlotMask()\n\n # Threshold tools only available for data with colormap\n self.thresholdGroup.setEnabled(self._data.ndim == 2)\n\n self._updateInteractiveMode()\n\n # Handle whole mask operations\n def load(self, filename):\n \"\"\"Load a mask from an image file.\n\n :param str filename: File name from which to load the mask\n :raise Exception: An exception in case of failure\n :raise RuntimeWarning: In case the mask was applied but with some\n import changes to notice\n \"\"\"\n _, extension = os.path.splitext(filename)\n extension = extension.lower()[1:]\n\n if extension == \"npy\":\n try:\n mask = numpy.load(filename)\n except IOError:\n _logger.error(\"Can't load filename '%s'\", filename)\n _logger.debug(\"Backtrace\", exc_info=True)\n raise RuntimeError('File \"%s\" is not a numpy file.', filename)\n elif extension in [\"tif\", \"tiff\"]:\n try:\n image = TiffIO(filename, mode=\"r\")\n mask = image.getImage(0)\n except Exception as e:\n _logger.error(\"Can't load filename %s\", filename)\n _logger.debug(\"Backtrace\", exc_info=True)\n raise e\n elif extension == \"edf\":\n try:\n mask = EdfFile(filename, access='r').GetData(0)\n except Exception as e:\n _logger.error(\"Can't load filename %s\", filename)\n _logger.debug(\"Backtrace\", exc_info=True)\n raise e\n elif extension == \"msk\":\n if fabio is None:\n raise ImportError(\"Fit2d mask files can't be read: Fabio module is not available\")\n try:\n mask = fabio.open(filename).data\n except Exception as e:\n _logger.error(\"Can't load fit2d mask file\")\n _logger.debug(\"Backtrace\", exc_info=True)\n raise e\n else:\n msg = \"Extension '%s' is not supported.\"\n raise RuntimeError(msg % extension)\n\n effectiveMaskShape = self.setSelectionMask(mask, copy=False)\n if effectiveMaskShape is None:\n return\n if mask.shape != effectiveMaskShape:\n msg = 'Mask was resized from %s to %s'\n msg = msg % (str(mask.shape), str(effectiveMaskShape))\n raise RuntimeWarning(msg)\n\n def _loadMask(self):\n \"\"\"Open load mask dialog\"\"\"\n dialog = qt.QFileDialog(self)\n dialog.setWindowTitle(\"Load Mask\")\n dialog.setModal(1)\n\n extensions = collections.OrderedDict()\n extensions[\"EDF files\"] = \"*.edf\"\n extensions[\"TIFF files\"] = \"*.tif *.tiff\"\n extensions[\"NumPy binary files\"] = \"*.npy\"\n # Fit2D mask is displayed anyway fabio is here or not\n # to show to the user that the option exists\n extensions[\"Fit2D mask files\"] = \"*.msk\"\n\n filters = []\n filters.append(\"All supported files (%s)\" % \" \".join(extensions.values()))\n for name, extension in extensions.items():\n filters.append(\"%s (%s)\" % (name, extension))\n filters.append(\"All files (*)\")\n\n dialog.setNameFilters(filters)\n dialog.setFileMode(qt.QFileDialog.ExistingFile)\n dialog.setDirectory(self.maskFileDir)\n if not dialog.exec_():\n dialog.close()\n return\n\n filename = dialog.selectedFiles()[0]\n dialog.close()\n\n self.maskFileDir = os.path.dirname(filename)\n try:\n self.load(filename)\n except RuntimeWarning as e:\n message = e.args[0]\n msg = qt.QMessageBox(self)\n msg.setIcon(qt.QMessageBox.Warning)\n msg.setText(\"Mask loaded but an operation was applied.\\n\" + message)\n msg.exec_()\n except Exception as e:\n message = e.args[0]\n msg = qt.QMessageBox(self)\n msg.setIcon(qt.QMessageBox.Critical)\n msg.setText(\"Cannot load mask from file. \" + message)\n msg.exec_()\n\n def _saveMask(self):\n \"\"\"Open Save mask dialog\"\"\"\n dialog = qt.QFileDialog(self)\n dialog.setWindowTitle(\"Save Mask\")\n dialog.setModal(1)\n filters = [\n 'EDF (*.edf)',\n 'TIFF (*.tif)',\n 'NumPy binary file (*.npy)',\n # Fit2D mask is displayed anyway fabio is here or not\n # to show to the user that the option exists\n 'Fit2D mask (*.msk)',\n ]\n dialog.setNameFilters(filters)\n dialog.setFileMode(qt.QFileDialog.AnyFile)\n dialog.setAcceptMode(qt.QFileDialog.AcceptSave)\n dialog.setDirectory(self.maskFileDir)\n if not dialog.exec_():\n dialog.close()\n return\n\n # convert filter name to extension name with the .\n extension = dialog.selectedNameFilter().split()[-1][2:-1]\n filename = dialog.selectedFiles()[0]\n dialog.close()\n\n if not filename.lower().endswith(extension):\n filename += extension\n\n if os.path.exists(filename):\n try:\n os.remove(filename)\n except IOError:\n msg = qt.QMessageBox(self)\n msg.setIcon(qt.QMessageBox.Critical)\n msg.setText(\"Cannot save.\\n\"\n \"Input Output Error: %s\" % (sys.exc_info()[1]))\n msg.exec_()\n return\n\n self.maskFileDir = os.path.dirname(filename)\n try:\n self.save(filename, extension[1:])\n except Exception as e:\n msg = qt.QMessageBox(self)\n msg.setIcon(qt.QMessageBox.Critical)\n msg.setText(\"Cannot save file %s\\n%s\" % (filename, e.args[0]))\n msg.exec_()\n\n def resetSelectionMask(self):\n \"\"\"Reset the mask\"\"\"\n self._mask.reset(shape=self._data.shape[:2])\n self._mask.commit()\n\n def _plotDrawEvent(self, event):\n \"\"\"Handle draw events from the plot\"\"\"\n if (self._drawingMode is None or\n event['event'] not in ('drawingProgress', 'drawingFinished')):\n return\n\n if not len(self._data):\n return\n\n level = self.levelSpinBox.value()\n\n if (self._drawingMode == 'rectangle' and\n event['event'] == 'drawingFinished'):\n # Convert from plot to array coords\n doMask = self._isMasking()\n ox, oy = self._origin\n sx, sy = self._scale\n\n height = int(abs(event['height'] / sy))\n width = int(abs(event['width'] / sx))\n\n row = int((event['y'] - oy) / sy)\n if sy < 0:\n row -= height\n\n col = int((event['x'] - ox) / sx)\n if sx < 0:\n col -= width\n\n self._mask.updateRectangle(\n level,\n row=row,\n col=col,\n height=height,\n width=width,\n mask=doMask)\n self._mask.commit()\n\n elif (self._drawingMode == 'polygon' and\n event['event'] == 'drawingFinished'):\n doMask = self._isMasking()\n # Convert from plot to array coords\n vertices = (event['points'] - self._origin) / self._scale\n vertices = vertices.astype(numpy.int)[:, (1, 0)] # (row, col)\n self._mask.updatePolygon(level, vertices, doMask)\n self._mask.commit()\n\n elif self._drawingMode == 'pencil':\n doMask = self._isMasking()\n # convert from plot to array coords\n col, row = (event['points'][-1] - self._origin) / self._scale\n col, row = int(col), int(row)\n brushSize = self._getPencilWidth()\n\n if self._lastPencilPos != (row, col):\n if self._lastPencilPos is not None:\n # Draw the line\n self._mask.updateLine(\n level,\n self._lastPencilPos[0], self._lastPencilPos[1],\n row, col,\n brushSize,\n doMask)\n\n # Draw the very first, or last point\n self._mask.updateDisk(level, row, col, brushSize / 2., doMask)\n\n if event['event'] == 'drawingFinished':\n self._mask.commit()\n self._lastPencilPos = None\n else:\n self._lastPencilPos = row, col\n\n def _loadRangeFromColormapTriggered(self):\n \"\"\"Set range from active image colormap range\"\"\"\n activeImage = self.plot.getActiveImage()\n if (isinstance(activeImage, items.ColormapMixIn) and\n activeImage.getLegend() != self._maskName):\n # Update thresholds according to colormap\n colormap = activeImage.getColormap()\n if colormap['autoscale']:\n min_ = numpy.nanmin(activeImage.getData(copy=False))\n max_ = numpy.nanmax(activeImage.getData(copy=False))\n else:\n min_, max_ = colormap['vmin'], colormap['vmax']\n self.minLineEdit.setText(str(min_))\n self.maxLineEdit.setText(str(max_))\n\n\nclass MaskToolsDockWidget(BaseMaskToolsDockWidget):\n \"\"\":class:`MaskToolsWidget` embedded in a QDockWidget.\n\n For integration in a :class:`PlotWindow`.\n\n :param parent: See :class:`QDockWidget`\n :param plot: The PlotWidget this widget is operating on\n :paran str name: The title of this widget\n \"\"\"\n def __init__(self, parent=None, plot=None, name='Mask'):\n widget = MaskToolsWidget(plot=plot)\n super(MaskToolsDockWidget, self).__init__(parent, name, widget)\n", "# coding: utf-8\n# /*##########################################################################\n# Copyright (C) 2016-2018 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# ############################################################################*/\n\"\"\"This module provides functions to read fabio images as an HDF5 file.\n\n >>> import silx.io.fabioh5\n >>> f = silx.io.fabioh5.File(\"foobar.edf\")\n\n.. note:: This module has a dependency on the `h5py <http://www.h5py.org/>`_\n and `fabio <https://github.com/silx-kit/fabio>`_ libraries,\n which are not mandatory dependencies for `silx`.\n\n\"\"\"\n\nimport collections\nimport datetime\nimport logging\nimport numbers\nimport os\n\nimport fabio.file_series\nimport numpy\n\nfrom . import commonh5\nfrom silx.third_party import six\nfrom silx import version as silx_version\nimport silx.utils.number\n\ntry:\n import h5py\nexcept ImportError as e:\n h5py = None\n\n\n_logger = logging.getLogger(__name__)\n\n\n_fabio_extensions = set([])\n\n\ndef supported_extensions():\n \"\"\"Returns all extensions supported by fabio.\n\n :returns: A set containing extensions like \"*.edf\".\n :rtype: Set[str]\n \"\"\"\n global _fabio_extensions\n if len(_fabio_extensions) > 0:\n return _fabio_extensions\n\n formats = fabio.fabioformats.get_classes(reader=True)\n all_extensions = set([])\n\n for reader in formats:\n if not hasattr(reader, \"DEFAULT_EXTENSIONS\"):\n continue\n\n ext = reader.DEFAULT_EXTENSIONS\n ext = [\"*.%s\" % e for e in ext]\n all_extensions.update(ext)\n\n _fabio_extensions = set(all_extensions)\n return _fabio_extensions\n\n\nclass _FileSeries(fabio.file_series.file_series):\n \"\"\"\n .. note:: Overwrite a function to fix an issue in fabio.\n \"\"\"\n def jump(self, num):\n \"\"\"\n Goto a position in sequence\n \"\"\"\n assert num < len(self) and num >= 0, \"num out of range\"\n self._current = num\n return self[self._current]\n\n\nclass FrameData(commonh5.LazyLoadableDataset):\n \"\"\"Expose a cube of image from a Fabio file using `FabioReader` as\n cache.\"\"\"\n\n def __init__(self, name, fabio_reader, parent=None):\n if fabio_reader.is_spectrum():\n attrs = {\"interpretation\": \"spectrum\"}\n else:\n attrs = {\"interpretation\": \"image\"}\n commonh5.LazyLoadableDataset.__init__(self, name, parent, attrs=attrs)\n self.__fabio_reader = fabio_reader\n self._shape = None\n self._dtype = None\n\n def _create_data(self):\n return self.__fabio_reader.get_data()\n\n def _update_cache(self):\n if isinstance(self.__fabio_reader.fabio_file(),\n fabio.file_series.file_series):\n # Reading all the files is taking too much time\n # Reach the information from the only first frame\n first_image = self.__fabio_reader.fabio_file().first_image()\n self._dtype = first_image.data.dtype\n shape0 = self.__fabio_reader.frame_count()\n shape1, shape2 = first_image.data.shape\n self._shape = shape0, shape1, shape2\n else:\n self._dtype = super(commonh5.LazyLoadableDataset, self).dtype\n self._shape = super(commonh5.LazyLoadableDataset, self).shape\n\n @property\n def dtype(self):\n if self._dtype is None:\n self._update_cache()\n return self._dtype\n\n @property\n def shape(self):\n if self._shape is None:\n self._update_cache()\n return self._shape\n\n def __iter__(self):\n for frame in self.__fabio_reader.iter_frames():\n yield frame.data\n\n def __getitem__(self, item):\n # optimization for fetching a single frame if data not already loaded\n if not self._is_initialized:\n if isinstance(item, six.integer_types) and \\\n isinstance(self.__fabio_reader.fabio_file(),\n fabio.file_series.file_series):\n if item < 0:\n # negative indexing\n item += len(self)\n return self.__fabio_reader.fabio_file().jump_image(item).data\n return super(FrameData, self).__getitem__(item)\n\n\nclass RawHeaderData(commonh5.LazyLoadableDataset):\n \"\"\"Lazy loadable raw header\"\"\"\n\n def __init__(self, name, fabio_reader, parent=None):\n commonh5.LazyLoadableDataset.__init__(self, name, parent)\n self.__fabio_reader = fabio_reader\n\n def _create_data(self):\n \"\"\"Initialize hold data by merging all headers of each frames.\n \"\"\"\n headers = []\n types = set([])\n for fabio_frame in self.__fabio_reader.iter_frames():\n header = fabio_frame.header\n\n data = []\n for key, value in header.items():\n data.append(\"%s: %s\" % (str(key), str(value)))\n\n data = \"\\n\".join(data)\n try:\n line = data.encode(\"ascii\")\n types.add(numpy.string_)\n except UnicodeEncodeError:\n try:\n line = data.encode(\"utf-8\")\n types.add(numpy.unicode_)\n except UnicodeEncodeError:\n # Fallback in void\n line = numpy.void(data)\n types.add(numpy.void)\n\n headers.append(line)\n\n if numpy.void in types:\n dtype = numpy.void\n elif numpy.unicode_ in types:\n dtype = numpy.unicode_\n else:\n dtype = numpy.string_\n\n if dtype == numpy.unicode_ and h5py is not None:\n # h5py only support vlen unicode\n dtype = h5py.special_dtype(vlen=six.text_type)\n\n return numpy.array(headers, dtype=dtype)\n\n\nclass MetadataGroup(commonh5.LazyLoadableGroup):\n \"\"\"Abstract class for groups containing a reference to a fabio image.\n \"\"\"\n\n def __init__(self, name, metadata_reader, kind, parent=None, attrs=None):\n commonh5.LazyLoadableGroup.__init__(self, name, parent, attrs)\n self.__metadata_reader = metadata_reader\n self.__kind = kind\n\n def _create_child(self):\n keys = self.__metadata_reader.get_keys(self.__kind)\n for name in keys:\n data = self.__metadata_reader.get_value(self.__kind, name)\n dataset = commonh5.Dataset(name, data)\n self.add_node(dataset)\n\n @property\n def _metadata_reader(self):\n return self.__metadata_reader\n\n\nclass DetectorGroup(commonh5.LazyLoadableGroup):\n \"\"\"Define the detector group (sub group of instrument) using Fabio data.\n \"\"\"\n\n def __init__(self, name, fabio_reader, parent=None, attrs=None):\n if attrs is None:\n attrs = {\"NX_class\": \"NXdetector\"}\n commonh5.LazyLoadableGroup.__init__(self, name, parent, attrs)\n self.__fabio_reader = fabio_reader\n\n def _create_child(self):\n data = FrameData(\"data\", self.__fabio_reader)\n self.add_node(data)\n\n # TODO we should add here Nexus informations we can extract from the\n # metadata\n\n others = MetadataGroup(\"others\", self.__fabio_reader, kind=FabioReader.DEFAULT)\n self.add_node(others)\n\n\nclass ImageGroup(commonh5.LazyLoadableGroup):\n \"\"\"Define the image group (sub group of measurement) using Fabio data.\n \"\"\"\n\n def __init__(self, name, fabio_reader, parent=None, attrs=None):\n commonh5.LazyLoadableGroup.__init__(self, name, parent, attrs)\n self.__fabio_reader = fabio_reader\n\n def _create_child(self):\n basepath = self.parent.parent.name\n data = commonh5.SoftLink(\"data\", path=basepath + \"/instrument/detector_0/data\")\n self.add_node(data)\n detector = commonh5.SoftLink(\"info\", path=basepath + \"/instrument/detector_0\")\n self.add_node(detector)\n\n\nclass SampleGroup(commonh5.LazyLoadableGroup):\n \"\"\"Define the image group (sub group of measurement) using Fabio data.\n \"\"\"\n\n def __init__(self, name, fabio_reader, parent=None):\n attrs = {\"NXclass\": \"NXsample\"}\n commonh5.LazyLoadableGroup.__init__(self, name, parent, attrs)\n self.__fabio_reader = fabio_reader\n\n def _create_child(self):\n if self.__fabio_reader.has_ub_matrix():\n scalar = {\"interpretation\": \"scalar\"}\n data = self.__fabio_reader.get_unit_cell_abc()\n data = commonh5.Dataset(\"unit_cell_abc\", data, attrs=scalar)\n self.add_node(data)\n unit_cell_data = numpy.zeros((1, 6), numpy.float32)\n unit_cell_data[0, :3] = data\n data = self.__fabio_reader.get_unit_cell_alphabetagamma()\n data = commonh5.Dataset(\"unit_cell_alphabetagamma\", data, attrs=scalar)\n self.add_node(data)\n unit_cell_data[0, 3:] = data\n data = commonh5.Dataset(\"unit_cell\", unit_cell_data, attrs=scalar)\n self.add_node(data)\n data = self.__fabio_reader.get_ub_matrix()\n data = commonh5.Dataset(\"ub_matrix\", data, attrs=scalar)\n self.add_node(data)\n\n\nclass MeasurementGroup(commonh5.LazyLoadableGroup):\n \"\"\"Define the measurement group for fabio file.\n \"\"\"\n\n def __init__(self, name, fabio_reader, parent=None, attrs=None):\n commonh5.LazyLoadableGroup.__init__(self, name, parent, attrs)\n self.__fabio_reader = fabio_reader\n\n def _create_child(self):\n keys = self.__fabio_reader.get_keys(FabioReader.COUNTER)\n\n # create image measurement but take care that no other metadata use\n # this name\n for i in range(1000):\n name = \"image_%i\" % i\n if name not in keys:\n data = ImageGroup(name, self.__fabio_reader)\n self.add_node(data)\n break\n else:\n raise Exception(\"image_i for 0..1000 already used\")\n\n # add all counters\n for name in keys:\n data = self.__fabio_reader.get_value(FabioReader.COUNTER, name)\n dataset = commonh5.Dataset(name, data)\n self.add_node(dataset)\n\n\nclass FabioReader(object):\n \"\"\"Class which read and cache data and metadata from a fabio image.\"\"\"\n\n DEFAULT = 0\n COUNTER = 1\n POSITIONER = 2\n\n def __init__(self, file_name=None, fabio_image=None, file_series=None):\n \"\"\"\n Constructor\n\n :param str file_name: File name of the image file to read\n :param fabio.fabioimage.FabioImage fabio_image: An already openned\n :class:`fabio.fabioimage.FabioImage` instance.\n :param Union[list[str],fabio.file_series.file_series] file_series: An\n list of file name or a :class:`fabio.file_series.file_series`\n instance\n \"\"\"\n self.__at_least_32bits = False\n self.__signed_type = False\n\n self.__load(file_name, fabio_image, file_series)\n self.__counters = {}\n self.__positioners = {}\n self.__measurements = {}\n self.__key_filters = set([])\n self.__data = None\n self.__frame_count = self.frame_count()\n self._read()\n\n def __load(self, file_name=None, fabio_image=None, file_series=None):\n if file_name is not None and fabio_image:\n raise TypeError(\"Parameters file_name and fabio_image are mutually exclusive.\")\n if file_name is not None and fabio_image:\n raise TypeError(\"Parameters fabio_image and file_series are mutually exclusive.\")\n\n self.__must_be_closed = False\n\n if file_name is not None:\n self.__fabio_file = fabio.open(file_name)\n self.__must_be_closed = True\n elif fabio_image is not None:\n if isinstance(fabio_image, fabio.fabioimage.FabioImage):\n self.__fabio_file = fabio_image\n else:\n raise TypeError(\"FabioImage expected but %s found.\", fabio_image.__class__)\n elif file_series is not None:\n if isinstance(file_series, list):\n self.__fabio_file = _FileSeries(file_series)\n elif isinstance(file_series, fabio.file_series.file_series):\n self.__fabio_file = file_series\n else:\n raise TypeError(\"file_series or list expected but %s found.\", file_series.__class__)\n\n def close(self):\n \"\"\"Close the object, and free up associated resources.\n\n The associated FabioImage is closed only if the object was created from\n a filename by this class itself.\n\n After calling this method, attempts to use the object (and children)\n may fail.\n \"\"\"\n if self.__must_be_closed:\n # It looks like there is no close on FabioImage\n # self.__fabio_image.close()\n pass\n self.__fabio_image = None\n\n def fabio_file(self):\n return self.__fabio_file\n\n def frame_count(self):\n \"\"\"Returns the number of frames available.\"\"\"\n if isinstance(self.__fabio_file, fabio.file_series.file_series):\n return len(self.__fabio_file)\n elif isinstance(self.__fabio_file, fabio.fabioimage.FabioImage):\n return self.__fabio_file.nframes\n else:\n raise TypeError(\"Unsupported type %s\", self.__fabio_file.__class__)\n\n def iter_frames(self):\n \"\"\"Iter all the available frames.\n\n A frame provides at least `data` and `header` attributes.\n \"\"\"\n if isinstance(self.__fabio_file, fabio.file_series.file_series):\n for file_number in range(len(self.__fabio_file)):\n with self.__fabio_file.jump_image(file_number) as fabio_image:\n # return the first frame only\n assert(fabio_image.nframes == 1)\n yield fabio_image\n elif isinstance(self.__fabio_file, fabio.fabioimage.FabioImage):\n for frame_count in range(self.__fabio_file.nframes):\n if self.__fabio_file.nframes == 1:\n yield self.__fabio_file\n else:\n yield self.__fabio_file.getframe(frame_count)\n else:\n raise TypeError(\"Unsupported type %s\", self.__fabio_file.__class__)\n\n def _create_data(self):\n \"\"\"Initialize hold data by merging all frames into a single cube.\n\n Choose the cube size which fit the best the data. If some images are\n smaller than expected, the empty space is set to 0.\n\n The computation is cached into the class, and only done ones.\n \"\"\"\n images = []\n for fabio_frame in self.iter_frames():\n images.append(fabio_frame.data)\n\n # returns the data without extra dim in case of single frame\n if len(images) == 1:\n return images[0]\n\n # get the max size\n max_dim = max([i.ndim for i in images])\n max_shape = [0] * max_dim\n for image in images:\n for dim in range(image.ndim):\n if image.shape[dim] > max_shape[dim]:\n max_shape[dim] = image.shape[dim]\n max_shape = tuple(max_shape)\n\n # fix smallest images\n for index, image in enumerate(images):\n if image.shape == max_shape:\n continue\n location = [slice(0, i) for i in image.shape]\n while len(location) < max_dim:\n location.append(0)\n normalized_image = numpy.zeros(max_shape, dtype=image.dtype)\n normalized_image[location] = image\n images[index] = normalized_image\n\n # create a cube\n return numpy.array(images)\n\n def __get_dict(self, kind):\n \"\"\"Returns a dictionary from according to an expected kind\"\"\"\n if kind == self.DEFAULT:\n return self.__measurements\n elif kind == self.COUNTER:\n return self.__counters\n elif kind == self.POSITIONER:\n return self.__positioners\n else:\n raise Exception(\"Unexpected kind %s\", kind)\n\n def get_data(self):\n \"\"\"Returns a cube from all available data from frames\n\n :rtype: numpy.ndarray\n \"\"\"\n if self.__data is None:\n self.__data = self._create_data()\n return self.__data\n\n def get_keys(self, kind):\n \"\"\"Get all available keys according to a kind of metadata.\n\n :rtype: list\n \"\"\"\n return self.__get_dict(kind).keys()\n\n def get_value(self, kind, name):\n \"\"\"Get a metadata value according to the kind and the name.\n\n :rtype: numpy.ndarray\n \"\"\"\n value = self.__get_dict(kind)[name]\n if not isinstance(value, numpy.ndarray):\n if kind in [self.COUNTER, self.POSITIONER]:\n # Force normalization for counters and positioners\n old = self._set_vector_normalization(at_least_32bits=True, signed_type=True)\n else:\n old = None\n value = self._convert_metadata_vector(value)\n self.__get_dict(kind)[name] = value\n if old is not None:\n self._set_vector_normalization(*old)\n return value\n\n def _set_counter_value(self, frame_id, name, value):\n \"\"\"Set a counter metadata according to the frame id\"\"\"\n if name not in self.__counters:\n self.__counters[name] = [None] * self.__frame_count\n self.__counters[name][frame_id] = value\n\n def _set_positioner_value(self, frame_id, name, value):\n \"\"\"Set a positioner metadata according to the frame id\"\"\"\n if name not in self.__positioners:\n self.__positioners[name] = [None] * self.__frame_count\n self.__positioners[name][frame_id] = value\n\n def _set_measurement_value(self, frame_id, name, value):\n \"\"\"Set a measurement metadata according to the frame id\"\"\"\n if name not in self.__measurements:\n self.__measurements[name] = [None] * self.__frame_count\n self.__measurements[name][frame_id] = value\n\n def _enable_key_filters(self, fabio_file):\n self.__key_filters.clear()\n if hasattr(fabio_file, \"RESERVED_HEADER_KEYS\"):\n # Provided in fabio 0.5\n for key in fabio_file.RESERVED_HEADER_KEYS:\n self.__key_filters.add(key.lower())\n\n def _read(self):\n \"\"\"Read all metadata from the fabio file and store it into this\n object.\"\"\"\n\n file_series = isinstance(self.__fabio_file, fabio.file_series.file_series)\n if not file_series:\n self._enable_key_filters(self.__fabio_file)\n\n for frame_id, fabio_frame in enumerate(self.iter_frames()):\n if file_series:\n self._enable_key_filters(fabio_frame)\n self._read_frame(frame_id, fabio_frame.header)\n\n def _is_filtered_key(self, key):\n \"\"\"\n If this function returns True, the :meth:`_read_key` while not be\n called with this `key`while reading the metatdata frame.\n\n :param str key: A key of the metadata\n :rtype: bool\n \"\"\"\n return key.lower() in self.__key_filters\n\n def _read_frame(self, frame_id, header):\n \"\"\"Read all metadata from a frame and store it into this\n object.\"\"\"\n for key, value in header.items():\n if self._is_filtered_key(key):\n continue\n self._read_key(frame_id, key, value)\n\n def _read_key(self, frame_id, name, value):\n \"\"\"Read a key from the metadata and cache it into this object.\"\"\"\n self._set_measurement_value(frame_id, name, value)\n\n def _set_vector_normalization(self, at_least_32bits, signed_type):\n previous = self.__at_least_32bits, self.__signed_type\n self.__at_least_32bits = at_least_32bits\n self.__signed_type = signed_type\n return previous\n\n def _normalize_vector_type(self, dtype):\n \"\"\"Normalize the \"\"\"\n if self.__at_least_32bits:\n if numpy.issubdtype(dtype, numpy.signedinteger):\n dtype = numpy.result_type(dtype, numpy.uint32)\n if numpy.issubdtype(dtype, numpy.unsignedinteger):\n dtype = numpy.result_type(dtype, numpy.uint32)\n elif numpy.issubdtype(dtype, numpy.floating):\n dtype = numpy.result_type(dtype, numpy.float32)\n elif numpy.issubdtype(dtype, numpy.complexfloating):\n dtype = numpy.result_type(dtype, numpy.complex64)\n if self.__signed_type:\n if numpy.issubdtype(dtype, numpy.unsignedinteger):\n signed = numpy.dtype(\"%s%i\" % ('i', dtype.itemsize))\n dtype = numpy.result_type(dtype, signed)\n return dtype\n\n def _convert_metadata_vector(self, values):\n \"\"\"Convert a list of numpy data into a numpy array with the better\n fitting type.\"\"\"\n converted = []\n types = set([])\n has_none = False\n for v in values:\n if v is None:\n converted.append(None)\n has_none = True\n else:\n c = self._convert_value(v)\n converted.append(c)\n types.add(c.dtype)\n\n if has_none and len(types) == 0:\n # That's a list of none values\n return numpy.array([0] * len(values), numpy.int8)\n\n result_type = numpy.result_type(*types)\n\n if issubclass(result_type.type, numpy.string_):\n # use the raw data to create the array\n result = values\n elif issubclass(result_type.type, numpy.unicode_):\n # use the raw data to create the array\n result = values\n else:\n result = converted\n\n result_type = self._normalize_vector_type(result_type)\n\n if has_none:\n # Fix missing data according to the array type\n if result_type.kind == \"S\":\n none_value = b\"\"\n elif result_type.kind == \"U\":\n none_value = u\"\"\n elif result_type.kind == \"f\":\n none_value = numpy.float(\"NaN\")\n elif result_type.kind == \"i\":\n none_value = numpy.int(0)\n elif result_type.kind == \"u\":\n none_value = numpy.int(0)\n elif result_type.kind == \"b\":\n none_value = numpy.bool(False)\n else:\n none_value = None\n\n for index, r in enumerate(result):\n if r is not None:\n continue\n result[index] = none_value\n\n return numpy.array(result, dtype=result_type)\n\n def _convert_value(self, value):\n \"\"\"Convert a string into a numpy object (scalar or array).\n\n The value is most of the time a string, but it can be python object\n in case if TIFF decoder for example.\n \"\"\"\n if isinstance(value, list):\n # convert to a numpy array\n return numpy.array(value)\n if isinstance(value, dict):\n # convert to a numpy associative array\n key_dtype = numpy.min_scalar_type(list(value.keys()))\n value_dtype = numpy.min_scalar_type(list(value.values()))\n associative_type = [('key', key_dtype), ('value', value_dtype)]\n assert key_dtype.kind != \"O\" and value_dtype.kind != \"O\"\n return numpy.array(list(value.items()), dtype=associative_type)\n if isinstance(value, numbers.Number):\n dtype = numpy.min_scalar_type(value)\n assert dtype.kind != \"O\"\n return dtype.type(value)\n\n if isinstance(value, six.binary_type):\n try:\n value = value.decode('utf-8')\n except UnicodeDecodeError:\n return numpy.void(value)\n\n if \" \" in value:\n result = self._convert_list(value)\n else:\n result = self._convert_scalar_value(value)\n return result\n\n def _convert_scalar_value(self, value):\n \"\"\"Convert a string into a numpy int or float.\n\n If it is not possible it returns a numpy string.\n \"\"\"\n try:\n numpy_type = silx.utils.number.min_numerical_convertible_type(value)\n converted = numpy_type(value)\n except ValueError:\n converted = numpy.string_(value)\n return converted\n\n def _convert_list(self, value):\n \"\"\"Convert a string into a typed numpy array.\n\n If it is not possible it returns a numpy string.\n \"\"\"\n try:\n numpy_values = []\n values = value.split(\" \")\n types = set([])\n for string_value in values:\n v = self._convert_scalar_value(string_value)\n numpy_values.append(v)\n types.add(v.dtype.type)\n\n result_type = numpy.result_type(*types)\n\n if issubclass(result_type.type, (numpy.string_, six.binary_type)):\n # use the raw data to create the result\n return numpy.string_(value)\n elif issubclass(result_type.type, (numpy.unicode_, six.text_type)):\n # use the raw data to create the result\n return numpy.unicode_(value)\n else:\n return numpy.array(numpy_values, dtype=result_type)\n except ValueError:\n return numpy.string_(value)\n\n def has_sample_information(self):\n \"\"\"Returns true if there is information about the sample in the\n file\n\n :rtype: bool\n \"\"\"\n return self.has_ub_matrix()\n\n def has_ub_matrix(self):\n \"\"\"Returns true if a UB matrix is available.\n\n :rtype: bool\n \"\"\"\n return False\n\n def is_spectrum(self):\n \"\"\"Returns true if the data should be interpreted as\n MCA data.\n\n :rtype: bool\n \"\"\"\n return False\n\n\nclass EdfFabioReader(FabioReader):\n \"\"\"Class which read and cache data and metadata from a fabio image.\n\n It is mostly the same as FabioReader, but counter_mne and\n motor_mne are parsed using a special way.\n \"\"\"\n\n def __init__(self, file_name=None, fabio_image=None, file_series=None):\n FabioReader.__init__(self, file_name, fabio_image, file_series)\n self.__unit_cell_abc = None\n self.__unit_cell_alphabetagamma = None\n self.__ub_matrix = None\n\n def _read_frame(self, frame_id, header):\n \"\"\"Overwrite the method to check and parse special keys: counter and\n motors keys.\"\"\"\n self.__catch_keys = set([])\n if \"motor_pos\" in header and \"motor_mne\" in header:\n self.__catch_keys.add(\"motor_pos\")\n self.__catch_keys.add(\"motor_mne\")\n self._read_mnemonic_key(frame_id, \"motor\", header)\n if \"counter_pos\" in header and \"counter_mne\" in header:\n self.__catch_keys.add(\"counter_pos\")\n self.__catch_keys.add(\"counter_mne\")\n self._read_mnemonic_key(frame_id, \"counter\", header)\n FabioReader._read_frame(self, frame_id, header)\n\n def _is_filtered_key(self, key):\n if key in self.__catch_keys:\n return True\n return FabioReader._is_filtered_key(self, key)\n\n def _get_mnemonic_key(self, base_key, header):\n mnemonic_values_key = base_key + \"_mne\"\n mnemonic_values = header.get(mnemonic_values_key, \"\")\n mnemonic_values = mnemonic_values.split()\n pos_values_key = base_key + \"_pos\"\n pos_values = header.get(pos_values_key, \"\")\n pos_values = pos_values.split()\n\n result = collections.OrderedDict()\n nbitems = max(len(mnemonic_values), len(pos_values))\n for i in range(nbitems):\n if i < len(mnemonic_values):\n mnemonic = mnemonic_values[i]\n else:\n # skip the element\n continue\n\n if i < len(pos_values):\n pos = pos_values[i]\n else:\n pos = None\n\n result[mnemonic] = pos\n return result\n\n def _read_mnemonic_key(self, frame_id, base_key, header):\n \"\"\"Parse a mnemonic key\"\"\"\n is_counter = base_key == \"counter\"\n is_positioner = base_key == \"motor\"\n data = self._get_mnemonic_key(base_key, header)\n\n for mnemonic, pos in data.items():\n if is_counter:\n self._set_counter_value(frame_id, mnemonic, pos)\n elif is_positioner:\n self._set_positioner_value(frame_id, mnemonic, pos)\n else:\n raise Exception(\"State unexpected (base_key: %s)\" % base_key)\n\n def _get_first_header(self):\n \"\"\"\n ..note:: This function can be cached\n \"\"\"\n fabio_file = self.fabio_file()\n if isinstance(fabio_file, fabio.file_series.file_series):\n return fabio_file.jump_image(0).header\n return fabio_file.header\n\n def has_ub_matrix(self):\n \"\"\"Returns true if a UB matrix is available.\n\n :rtype: bool\n \"\"\"\n header = self._get_first_header()\n expected_keys = set([\"UB_mne\", \"UB_pos\", \"sample_mne\", \"sample_pos\"])\n return expected_keys.issubset(header)\n\n def parse_ub_matrix(self):\n header = self._get_first_header()\n ub_data = self._get_mnemonic_key(\"UB\", header)\n s_data = self._get_mnemonic_key(\"sample\", header)\n if len(ub_data) > 9:\n _logger.warning(\"UB_mne and UB_pos contains more than expected keys.\")\n if len(s_data) > 6:\n _logger.warning(\"sample_mne and sample_pos contains more than expected keys.\")\n\n data = numpy.array([s_data[\"U0\"], s_data[\"U1\"], s_data[\"U2\"]], dtype=float)\n unit_cell_abc = data\n\n data = numpy.array([s_data[\"U3\"], s_data[\"U4\"], s_data[\"U5\"]], dtype=float)\n unit_cell_alphabetagamma = data\n\n ub_matrix = numpy.array([[\n [ub_data[\"UB0\"], ub_data[\"UB1\"], ub_data[\"UB2\"]],\n [ub_data[\"UB3\"], ub_data[\"UB4\"], ub_data[\"UB5\"]],\n [ub_data[\"UB6\"], ub_data[\"UB7\"], ub_data[\"UB8\"]]]], dtype=float)\n\n self.__unit_cell_abc = unit_cell_abc\n self.__unit_cell_alphabetagamma = unit_cell_alphabetagamma\n self.__ub_matrix = ub_matrix\n\n def get_unit_cell_abc(self):\n \"\"\"Get a numpy array data as defined for the dataset unit_cell_abc\n from the NXsample dataset.\n\n :rtype: numpy.ndarray\n \"\"\"\n if self.__unit_cell_abc is None:\n self.parse_ub_matrix()\n return self.__unit_cell_abc\n\n def get_unit_cell_alphabetagamma(self):\n \"\"\"Get a numpy array data as defined for the dataset\n unit_cell_alphabetagamma from the NXsample dataset.\n\n :rtype: numpy.ndarray\n \"\"\"\n if self.__unit_cell_alphabetagamma is None:\n self.parse_ub_matrix()\n return self.__unit_cell_alphabetagamma\n\n def get_ub_matrix(self):\n \"\"\"Get a numpy array data as defined for the dataset ub_matrix\n from the NXsample dataset.\n\n :rtype: numpy.ndarray\n \"\"\"\n if self.__ub_matrix is None:\n self.parse_ub_matrix()\n return self.__ub_matrix\n\n def is_spectrum(self):\n \"\"\"Returns true if the data should be interpreted as\n MCA data.\n EDF files or file series, with two or more header names starting with\n \"MCA\", should be interpreted as MCA data.\n\n :rtype: bool\n \"\"\"\n count = 0\n for key in self._get_first_header():\n if key.lower().startswith(\"mca\"):\n count += 1\n if count >= 2:\n return True\n return False\n\n\nclass File(commonh5.File):\n \"\"\"Class which handle a fabio image as a mimick of a h5py.File.\n \"\"\"\n\n def __init__(self, file_name=None, fabio_image=None, file_series=None):\n \"\"\"\n Constructor\n\n :param str file_name: File name of the image file to read\n :param fabio.fabioimage.FabioImage fabio_image: An already openned\n :class:`fabio.fabioimage.FabioImage` instance.\n :param Union[list[str],fabio.file_series.file_series] file_series: An\n list of file name or a :class:`fabio.file_series.file_series`\n instance\n \"\"\"\n self.__fabio_reader = self.create_fabio_reader(file_name, fabio_image, file_series)\n if fabio_image is not None:\n file_name = fabio_image.filename\n\n attrs = {\"NX_class\": \"NXroot\",\n \"file_time\": datetime.datetime.now().isoformat(),\n \"creator\": \"silx %s\" % silx_version}\n if file_name is not None:\n attrs[\"file_name\"] = file_name\n commonh5.File.__init__(self, name=file_name, attrs=attrs)\n scan = self.create_scan_group(self.__fabio_reader)\n self.add_node(scan)\n\n def create_scan_group(self, fabio_reader):\n \"\"\"Factory to create the scan group.\n\n :param FabioImage fabio_image: A Fabio image\n :param FabioReader fabio_reader: A reader for the Fabio image\n :rtype: commonh5.Group\n \"\"\"\n\n scan = commonh5.Group(\"scan_0\", attrs={\"NX_class\": \"NXentry\"})\n instrument = commonh5.Group(\"instrument\", attrs={\"NX_class\": \"NXinstrument\"})\n measurement = MeasurementGroup(\"measurement\", fabio_reader, attrs={\"NX_class\": \"NXcollection\"})\n file_ = commonh5.Group(\"file\", attrs={\"NX_class\": \"NXcollection\"})\n positioners = MetadataGroup(\"positioners\", fabio_reader, FabioReader.POSITIONER, attrs={\"NX_class\": \"NXpositioner\"})\n raw_header = RawHeaderData(\"scan_header\", fabio_reader, self)\n detector = DetectorGroup(\"detector_0\", fabio_reader)\n\n scan.add_node(instrument)\n instrument.add_node(positioners)\n instrument.add_node(file_)\n instrument.add_node(detector)\n file_.add_node(raw_header)\n scan.add_node(measurement)\n\n if fabio_reader.has_sample_information():\n sample = SampleGroup(\"sample\", fabio_reader)\n scan.add_node(sample)\n\n return scan\n\n def create_fabio_reader(self, file_name, fabio_image, file_series):\n \"\"\"Factory to create fabio reader.\n\n :rtype: FabioReader\"\"\"\n use_edf_reader = False\n first_file_name = None\n first_image = None\n\n if isinstance(file_series, list):\n first_file_name = file_series[0]\n elif isinstance(file_series, fabio.file_series.file_series):\n first_image = file_series.first_image()\n elif fabio_image is not None:\n first_image = fabio_image\n else:\n first_file_name = file_name\n\n if first_file_name is not None:\n _, ext = os.path.splitext(first_file_name)\n ext = ext[1:]\n edfimage = fabio.edfimage.EdfImage\n if hasattr(edfimage, \"DEFAULT_EXTENTIONS\"):\n # Typo on fabio 0.5\n edf_extensions = edfimage.DEFAULT_EXTENTIONS\n else:\n edf_extensions = edfimage.DEFAULT_EXTENSIONS\n use_edf_reader = ext in edf_extensions\n elif first_image is not None:\n use_edf_reader = isinstance(first_image, fabio.edfimage.EdfImage)\n else:\n assert(False)\n\n if use_edf_reader:\n reader = EdfFabioReader(file_name, fabio_image, file_series)\n else:\n reader = FabioReader(file_name, fabio_image, file_series)\n return reader\n\n def close(self):\n \"\"\"Close the object, and free up associated resources.\n\n After calling this method, attempts to use the object (and children)\n may fail.\n \"\"\"\n self.__fabio_reader.close()\n self.__fabio_reader = None\n" ]
[ [ "numpy.distutils.misc_util.Configuration", "numpy.distutils.core.setup" ], [ "numpy.load", "numpy.array", "numpy.zeros", "numpy.logical_and" ], [ "numpy.string_", "numpy.bool", "numpy.issubdtype", "numpy.void", "numpy.dtype", "numpy.result_type", "numpy.int", "numpy.float", "numpy.min_scalar_type", "numpy.array", "numpy.zeros", "numpy.unicode_" ] ]
[ { "matplotlib": [], "numpy": [ "1.11", "1.19", "1.24", "1.16", "1.23", "1.20", "1.7", "1.12", "1.21", "1.22", "1.14", "1.6", "1.13", "1.9", "1.17", "1.10", "1.18", "1.15", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
1212Prajwol-Pdl/SmartProcessAnalytics
[ "b25b6e922e19cc61cfb9eb96395ad177af1daf71" ]
[ "Code-SPA/RNN_feedback.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jul 23 17:52:31 2019\r\n\r\n@author: Weike (Vicky) Sun [email protected]/[email protected]\r\n(c) 2020 Weike Sun, all rights reserved\r\n\"\"\"\r\n\r\n#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jun 4 09:15:46 2018\r\n\r\n@author: weiksun\r\n@comment: this file contains the RNN formulation for regression purpose w/ feedback connection\r\n\"\"\"\r\n\r\n\"\"\"\r\nImport package\r\n\"\"\"\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\n\r\n\"\"\"\r\nGenerate batch data\r\n\"\"\"\r\ndef gen_batch(raw_x, raw_y,raw_yp, batch_size, num_steps, epoch_overlap):\r\n data_length = len(raw_x)\r\n dx = np.shape(raw_x)[1]\r\n dy = np.shape(raw_y)[1]\r\n dyp = np.shape(raw_yp)[1]\r\n \r\n batch_partition_length = data_length // batch_size\r\n data_x = np.zeros([batch_size, batch_partition_length,dx], dtype= np.float32)\r\n data_y = np.zeros([batch_size, batch_partition_length,dy], dtype= np.float32)\r\n data_yp = np.zeros([batch_size, batch_partition_length,dyp], dtype= np.float32)\r\n \r\n for i in range(batch_size):\r\n data_x[i] = raw_x[batch_partition_length * i : batch_partition_length * (i+1)]\r\n data_y[i] = raw_y[batch_partition_length * i : batch_partition_length * (i+1)] \r\n data_yp[i] = raw_yp[batch_partition_length * i : batch_partition_length * (i+1)] \r\n \r\n if epoch_overlap == None:\r\n epoch_size = batch_partition_length // num_steps\r\n \r\n for i in range(epoch_size):\r\n x = data_x[:, i * num_steps:(i + 1) * num_steps]\r\n y = data_y[:, i * num_steps:(i + 1) * num_steps]\r\n yp = data_yp[:, i * num_steps:(i + 1) * num_steps]\r\n\r\n yield (x, y, yp)\r\n else:\r\n epoch_size = (batch_partition_length - num_steps + 1)//(epoch_overlap+1)\r\n \r\n for i in range(epoch_size):\r\n x = data_x[:, i*(epoch_overlap+1):i*(epoch_overlap+1)+num_steps]\r\n y = data_y[:, i*(epoch_overlap+1):i*(epoch_overlap+1)+num_steps]\r\n yp = data_yp[:, i*(epoch_overlap+1):i*(epoch_overlap+1)+num_steps]\r\n\r\n yield (x, y, yp)\r\n \r\n\r\n\r\n\"\"\"\r\nGenerate batch data for multiple series\r\n\"\"\"\r\ndef gen_batch_multi(raw_x, raw_y, timeindex, batch_size, num_steps, epoch_overlap):\r\n \r\n cum = 0\r\n num_series = len(timeindex)\r\n for s in range(num_series):\r\n \r\n num = np.shape(timeindex[s+1])[0] \r\n x = raw_x[cum:cum+num]\r\n y = raw_y[cum:cum+num]\r\n yp = np.insert(y,0,0,axis=0)[:-1]\r\n data_length = len(x)\r\n dx = np.shape(x)[1]\r\n dy = np.shape(y)[1]\r\n dyp = np.shape(yp)[1]\r\n \r\n batch_partition_length = data_length // batch_size\r\n data_x = np.zeros([batch_size, batch_partition_length,dx], dtype= np.float32)\r\n data_y = np.zeros([batch_size, batch_partition_length,dy], dtype= np.float32)\r\n data_yp = np.zeros([batch_size, batch_partition_length,dyp], dtype= np.float32)\r\n \r\n for i in range(batch_size):\r\n data_x[i] = x[batch_partition_length * i : batch_partition_length * (i+1)]\r\n data_y[i] = y[batch_partition_length * i : batch_partition_length * (i+1)] \r\n data_yp[i] = yp[batch_partition_length * i : batch_partition_length * (i+1)] \r\n \r\n if epoch_overlap == None:\r\n epoch_size = batch_partition_length // num_steps\r\n \r\n for i in range(epoch_size):\r\n x = data_x[:, i * num_steps:(i + 1) * num_steps]\r\n y = data_y[:, i * num_steps:(i + 1) * num_steps]\r\n yp = data_yp[:, i * num_steps:(i + 1) * num_steps]\r\n \r\n yield (x, y, yp,s)\r\n else:\r\n epoch_size = (batch_partition_length - num_steps + 1)//(epoch_overlap+1)\r\n \r\n for i in range(epoch_size):\r\n x = data_x[:, i*(epoch_overlap+1):i*(epoch_overlap+1)+num_steps]\r\n y = data_y[:, i*(epoch_overlap+1):i*(epoch_overlap+1)+num_steps]\r\n yp = data_yp[:, i*(epoch_overlap+1):i*(epoch_overlap+1)+num_steps]\r\n \r\n yield (x, y, yp,s)\r\n \r\n cum += num \r\n\r\n\r\n\"\"\"\r\nGenerate batch data for kstep prediction\r\n\"\"\"\r\ndef gen_batch_kstep(raw_x, raw_y,raw_yp, rnn_state, batch_size, num_steps, epoch_overlap):\r\n data_length = len(raw_x)\r\n dx = np.shape(raw_x)[1]\r\n dy = np.shape(raw_y)[1]\r\n dyp = np.shape(raw_yp)[1]\r\n ds = np.shape(rnn_state)[1]\r\n \r\n \r\n batch_partition_length = data_length // batch_size\r\n data_x = np.zeros([batch_size, batch_partition_length,dx], dtype= np.float32)\r\n data_y = np.zeros([batch_size, batch_partition_length,dy], dtype= np.float32)\r\n data_yp = np.zeros([batch_size, batch_partition_length,dyp], dtype= np.float32)\r\n data_s = np.zeros([batch_size, batch_partition_length,ds], dtype= np.float32)\r\n \r\n\r\n for i in range(batch_size):\r\n data_x[i] = raw_x[batch_partition_length * i : batch_partition_length * (i+1)]\r\n data_y[i] = raw_y[batch_partition_length * i : batch_partition_length * (i+1)]\r\n data_yp[i] = raw_yp[batch_partition_length * i : batch_partition_length * (i+1)] \r\n data_s[i] = rnn_state[batch_partition_length * i : batch_partition_length * (i+1)] \r\n \r\n if epoch_overlap == None:\r\n epoch_size = batch_partition_length // num_steps\r\n \r\n \r\n for i in range(epoch_size):\r\n x = data_x[:, i * num_steps:(i + 1) * num_steps]\r\n y = data_y[:, i * num_steps:(i + 1) * num_steps]\r\n yp = data_yp[:, i * num_steps:(i + 1) * num_steps]\r\n s = data_s[:, i * num_steps:(i + 1) * num_steps]\r\n\r\n yield (x, y, yp, s)\r\n else:\r\n epoch_size = (batch_partition_length - num_steps + 1)//(epoch_overlap+1)\r\n \r\n for i in range(epoch_size):\r\n x = data_x[:, i*(epoch_overlap+1):i*(epoch_overlap+1)+num_steps]\r\n y = data_y[:, i*(epoch_overlap+1):i*(epoch_overlap+1)+num_steps]\r\n yp = data_yp[:, i*(epoch_overlap+1):i*(epoch_overlap+1)+num_steps]\r\n s = data_s[:, i*(epoch_overlap+1):i*(epoch_overlap+1)+num_steps]\r\n\r\n yield (x, y, yp, s)\r\n \r\n \r\n \r\n\r\n\"\"\"\r\nGenerate batch data for kstep prediction\r\n\"\"\"\r\ndef gen_batch_kstep_layer(raw_x, raw_y,raw_yp, rnn_state):\r\n data_length = len(raw_x)\r\n dx = np.shape(raw_x)[1]\r\n dy = np.shape(raw_y)[1]\r\n dyp = np.shape(raw_yp)[1]\r\n \r\n num_layers = len(rnn_state)\r\n batch_size = data_length\r\n batch_partition_length = 1\r\n\r\n data_x = np.zeros([batch_size, batch_partition_length,dx], dtype= np.float32)\r\n data_y = np.zeros([batch_size, batch_partition_length,dy], dtype= np.float32)\r\n data_yp = np.zeros([batch_size, batch_partition_length,dyp], dtype= np.float32)\r\n final_data_s = ()\r\n\r\n for i in range(batch_size):\r\n data_x[i] = raw_x[batch_partition_length * i : batch_partition_length * (i+1)]\r\n data_y[i] = raw_y[batch_partition_length * i : batch_partition_length * (i+1)]\r\n data_yp[i] = raw_yp[batch_partition_length * i : batch_partition_length * (i+1)]\r\n \r\n for l in range(num_layers): \r\n final_data_s += (rnn_state[l][:-1],)\r\n\r\n \r\n yield (data_x, data_y, data_yp, final_data_s)\r\n\r\n \r\n \r\n \r\ndef gen_epochs(raw_data_x,raw_data_y,raw_data_yp, num_epochs, num_steps, batch_size,epoch_overlap):\r\n for i in range(int(num_epochs)):\r\n yield gen_batch(raw_data_x,raw_data_y, raw_data_yp, batch_size, num_steps, epoch_overlap)\r\n\r\n\r\n\r\ndef gen_epochs_multi(raw_data_x,raw_data_y, timeindex, num_epochs, num_steps, batch_size,epoch_overlap):\r\n for i in range(int(num_epochs)):\r\n yield gen_batch_multi(raw_data_x,raw_data_y, timeindex, batch_size, num_steps, epoch_overlap)\r\n\r\n\r\ndef reset_graph():\r\n if 'sess' in globals() and sess:\r\n sess.close()\r\n tf.reset_default_graph()\r\n\r\n\r\n\r\n\"\"\"\r\nDefine RNN graph\r\n\"\"\"\r\ndef build_multilayer_rnn_graph_with_dynamic_rnn(cell_type, activation,state_size, num_steps, num_layers, input_size_x, input_size_y , learning_rate, lambda_l2_reg,random_seed=0):\r\n\r\n reset_graph()\r\n tf.set_random_seed(random_seed) #make reproducible results\r\n \r\n input_size_x += input_size_y\r\n \r\n \"\"\"Define the graph inputs\"\"\"\r\n batch_size = tf.placeholder(tf.int32, [], name='batch_size')\r\n x = tf.placeholder(tf.float32, [None, num_steps, input_size_x], name='x')\r\n y = tf.placeholder(tf.float32, [None, num_steps, input_size_y], name='y')\r\n input_prob = tf.placeholder(tf.float32, name='input_prob')\r\n state_prob = tf.placeholder(tf.float32,name='state_prob')\r\n output_prob = tf.placeholder(tf.float32,name='output_prob')\r\n rnn_inputs = x\r\n\r\n \"\"\"Define a single cell with variational dropout\"\"\"\r\n def get_a_cell(state_size,input_prob,state_prob,num_input):\r\n if cell_type == 'LSTM':\r\n if activation == 'linear':\r\n lstm=tf.nn.rnn_cell.LSTMCell(num_units=state_size, activation = tf.identity, state_is_tuple=True)\r\n cell_drop=tf.contrib.rnn.DropoutWrapper(lstm,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob)\r\n elif activation == 'relu':\r\n lstm=tf.nn.rnn_cell.LSTMCell(num_units=state_size, activation = tf.nn.relu, state_is_tuple=True)\r\n cell_drop=tf.contrib.rnn.DropoutWrapper(lstm,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob)\r\n else: #tanh by default\r\n lstm=tf.nn.rnn_cell.LSTMCell(num_units=state_size, state_is_tuple=True)\r\n cell_drop=tf.contrib.rnn.DropoutWrapper(lstm,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob)\r\n\r\n\r\n elif cell_type == 'GRU':\r\n \r\n if activation == 'linear':\r\n gru=tf.nn.rnn_cell.GRUCell(state_size, activation = tf.identity)\r\n cell_drop=tf.contrib.rnn.DropoutWrapper(gru,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob)\r\n elif activation == 'relu':\r\n gru=tf.nn.rnn_cell.GRUCell(state_size, activation = tf.nn.relu)\r\n cell_drop=tf.contrib.rnn.DropoutWrapper(gru,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob)\r\n else:\r\n gru=tf.nn.rnn_cell.GRUCell(state_size)\r\n cell_drop=tf.contrib.rnn.DropoutWrapper(gru,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob)\r\n\r\n else:\r\n if activation == 'linear':\r\n cell_basic = tf.contrib.rnn.BasicRNNCell(state_size,activation=tf.identity)\r\n cell_drop=tf.contrib.rnn.DropoutWrapper(cell_basic,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob)\r\n\r\n elif activation == 'relu':\r\n cell_basic = tf.contrib.rnn.BasicRNNCell(state_size, activation=tf.nn.relu)\r\n cell_drop = tf.contrib.rnn.DropoutWrapper(cell_basic, variational_recurrent=True, dtype=tf.float32,\r\n input_size=num_input, input_keep_prob=input_prob,\r\n state_keep_prob=state_prob)\r\n else: #tanh by default\r\n cell_basic = tf.contrib.rnn.BasicRNNCell(state_size)\r\n cell_drop = tf.contrib.rnn.DropoutWrapper(cell_basic, variational_recurrent=True, dtype=tf.float32,\r\n input_size=num_input, input_keep_prob=input_prob,\r\n state_keep_prob=state_prob)\r\n\r\n return cell_drop\r\n\r\n\r\n \"\"\"Wrap the cell in multilayer\"\"\"\r\n cell=tf.nn.rnn_cell.MultiRNNCell([get_a_cell(state_size,input_prob,state_prob,input_size_x if layer==0 else state_size) for layer in range(num_layers)],state_is_tuple=True)\r\n cell=tf.nn.rnn_cell.DropoutWrapper(cell,variational_recurrent=True,dtype=tf.float32,input_size=input_size_x,output_keep_prob=output_prob)\r\n init_state = cell.zero_state(batch_size, dtype=tf.float32)\r\n\r\n \"\"\"Build dynamic graph\"\"\"\r\n rnn_outputs, final_state = tf.nn.dynamic_rnn(cell=cell, inputs=rnn_inputs,initial_state=init_state)\r\n\r\n \"\"\"Add prediction layer\"\"\"\r\n with tf.variable_scope('softmax'):\r\n W = tf.get_variable('W', [state_size, input_size_y])\r\n b = tf.get_variable('b', [input_size_y], initializer=tf.constant_initializer(0.0))\r\n\r\n rnn_outputs = tf.reshape(rnn_outputs, [-1, state_size])\r\n predictions = tf.matmul(rnn_outputs, W) + b\r\n yy = tf.reshape(y, [-1, input_size_y]) #batch_size*num_steps when yo udefine a placeholder in Tensorflow, the shape of the input during the session should be the same as the shape of the plcae holder\r\n \"Mean squared error loss\"\r\n loss=tf.reduce_mean(tf.square(tf.reshape(predictions,[-1])-tf.reshape(yy,[-1])))\r\n\r\n \"Adding regularization\"\r\n if lambda_l2_reg > 0 :\r\n cell_l2 = tf.reduce_sum([tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables() if not (\"noreg\" in tf_var.name or \"Bias\" in tf_var.name)])\r\n Predict_l2 = tf.nn.l2_loss(W) #+ tf.nn.l2_loss(b)\r\n total_loss = tf.reduce_sum(loss + lambda_l2_reg* tf.reduce_sum(cell_l2+Predict_l2) )\r\n else:\r\n total_loss = loss\r\n\r\n \"Define the train_step\"\r\n train_step = tf.train.AdamOptimizer(learning_rate).minimize(total_loss)\r\n\r\n\r\n\r\n return dict(x=x,\r\n y=y,\r\n batch_size=batch_size,\r\n input_prob=input_prob,\r\n state_prob=state_prob,\r\n output_prob=output_prob,\r\n init_state=init_state,\r\n final_state=final_state,\r\n rnn_outputs = rnn_outputs,\r\n total_loss= total_loss,\r\n loss = loss,\r\n train_step=train_step,\r\n preds = predictions,\r\n saver= tf.train.Saver())\r\n \r\n \r\n \r\n \r\n\"\"\"\r\nTrain RNN graph\r\n\"\"\"\r\ndef train_rnn(raw_data_x, raw_data_y, val_data_x, val_data_y,g, num_epochs, num_steps, batch_size, input_prob, output_prob, state_prob, epoch_before_val = 50, max_checks_without_progress=50,epoch_overlap=None, verbose=True, save=False):\r\n\r\n with tf.Session() as sess:\r\n \"initialize the variables\"\r\n sess.run(tf.global_variables_initializer())\r\n\r\n raw_data_yp = np.insert(raw_data_y,0,0,axis=0)[:-1]\r\n val_data_yp = np.insert(val_data_y,0,0,axis=0)[:-1]\r\n\r\n\r\n \"see the trainable variables\"\r\n# print(\"The trainable variables are:\")\r\n variable_names = [v.name for v in tf.trainable_variables()]\r\n variable_shapes = [v.get_shape() for v in tf.trainable_variables()]\r\n parameter_num = 0\r\n for name, shape in zip(variable_names, variable_shapes):\r\n# print('{}\\nShape: {}'.format(name, shape))\r\n parameter_num += shape[0]*shape[1] if np.size(shape)>1 else shape[0]\r\n\r\n \"train the graph\"\r\n training_losses = []\r\n val_losses = []\r\n #set early_stopping cretirion\r\n checks_without_progress = 0\r\n best_loss = np.infty\r\n \r\n for idx, epoch in enumerate(gen_epochs(raw_data_x,raw_data_y,raw_data_yp,num_epochs, num_steps, batch_size,epoch_overlap)):\r\n training_loss = 0\r\n steps = 0\r\n\r\n training_state = None\r\n\r\n for steps,(X, Y, YP) in enumerate(epoch):\r\n feed_dict = {g['x']: np.dstack((X,YP)), g['y']: Y, g['batch_size']:batch_size, g['input_prob']: input_prob ,g['output_prob']: output_prob,g['state_prob']:state_prob}\r\n# feed_dict = {g['x']: X, g['y']: Y, g['batch_size']:batch_size, g['input_prob']: 1 ,g['output_prob']: 1,g['state_prob']:1}\r\n\r\n #continue to feed in if in the same class\r\n if training_state is not None:\r\n feed_dict[g['init_state']] = training_state\r\n\r\n training_loss_, training_state, _ = sess.run([g['loss'],\r\n g['final_state'],\r\n g['train_step']],\r\n feed_dict=feed_dict)\r\n training_loss += training_loss_\r\n\r\n\r\n if np.isnan(training_loss_):\r\n print('Explode!!!!!!!!!')\r\n return (None, None, None)\r\n \r\n \r\n if verbose and idx%100==0:\r\n print(\"Average training total loss for Epoch\", idx, \":\", training_loss/(steps+1))\r\n training_losses.append(training_loss / (steps+1))\r\n \r\n '''Test on validation set'''\r\n if idx > epoch_before_val:\r\n# print('Using validation for early stopping')\r\n '''see performance on validation set and do early stopping'''\r\n val_loss = 0\r\n steps_val = 0\r\n \r\n val_state = None\r\n for steps_val,(X_val, Y_val, YP_val) in enumerate(gen_batch(val_data_x, val_data_y, val_data_yp, batch_size, num_steps,epoch_overlap)):\r\n feed_dict_val = {g['x']: np.dstack((X_val,YP_val)), g['y']: Y_val, g['batch_size']:batch_size, g['input_prob']: 1 ,g['output_prob']: 1,g['state_prob']:1}\r\n \r\n #continue to feed in if in the same class\r\n if val_state is not None:\r\n feed_dict_val[g['init_state']] = val_state\r\n \r\n val_loss_,val_state = sess.run([g['loss'], g['final_state']],feed_dict=feed_dict_val)\r\n \r\n val_loss += val_loss_\r\n \r\n val_loss = val_loss/(steps_val+1)\r\n val_losses.append(val_loss)\r\n\r\n if val_loss < best_loss:\r\n best_loss = val_loss\r\n checks_without_progress = 0\r\n g['saver'].save(sess, save)\r\n else:\r\n checks_without_progress += 1\r\n if checks_without_progress > max_checks_without_progress:\r\n print(\"Early stopping!\")\r\n return (training_losses, val_losses, int(parameter_num))\r\n\r\n \r\n \r\n \r\n \r\n if isinstance(save, str):\r\n g['saver'].save(sess, save)\r\n print(\"Max number train epoch reached\")\r\n \r\n training_losses = np.array(training_losses)\r\n val_losses = np.array(val_losses)\r\n \r\n \r\n return (training_losses,val_losses, int(parameter_num))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\"\"\"\r\nTrain RNN graph for multiple series\r\n\"\"\"\r\ndef train_rnn_multi(raw_data_x, raw_data_y, val_data_x, val_data_y, timeindex_train, timeindex_val, g, num_epochs, num_steps, batch_size, input_prob, output_prob, state_prob, epoch_before_val = 50, max_checks_without_progress=50,epoch_overlap=None, verbose=True, save=False):\r\n\r\n with tf.Session() as sess:\r\n \"initialize the variables\"\r\n sess.run(tf.global_variables_initializer())\r\n\r\n\r\n \"see the trainable variables\"\r\n# print(\"The trainable variables are:\")\r\n variable_names = [v.name for v in tf.trainable_variables()]\r\n variable_shapes = [v.get_shape() for v in tf.trainable_variables()]\r\n parameter_num = 0\r\n for name, shape in zip(variable_names, variable_shapes):\r\n# print('{}\\nShape: {}'.format(name, shape))\r\n parameter_num += shape[0]*shape[1] if np.size(shape)>1 else shape[0]\r\n\r\n \"train the graph\"\r\n training_losses = []\r\n val_losses = []\r\n #set early_stopping cretirion\r\n checks_without_progress = 0\r\n best_loss = np.infty\r\n \r\n for idx, epoch in enumerate(gen_epochs_multi(raw_data_x,raw_data_y, timeindex_train, num_epochs, num_steps, batch_size,epoch_overlap)):\r\n training_loss = 0\r\n steps = 0\r\n s_threshold=0\r\n \r\n training_state = None\r\n\r\n for steps,(X, Y, YP, s) in enumerate(epoch):\r\n feed_dict = {g['x']: np.dstack((X,YP)), g['y']: Y, g['batch_size']:batch_size, g['input_prob']: input_prob ,g['output_prob']: output_prob,g['state_prob']:state_prob}\r\n #start to feed 0 initial for a new set of class\r\n if s == s_threshold:\r\n s_threshold += 1\r\n training_state = None\r\n #continue to feed in if in the same class\r\n if training_state is not None:\r\n feed_dict[g['init_state']] = training_state\r\n\r\n training_loss_, training_state, _ = sess.run([g['loss'],\r\n g['final_state'],\r\n g['train_step']],\r\n feed_dict=feed_dict)\r\n training_loss += training_loss_\r\n\r\n# print(steps)\r\n# print(training_loss_)\r\n if verbose and idx%100==0:\r\n print(\"Average training total loss for Epoch\", idx, \":\", training_loss/(steps+1), steps, training_loss_)\r\n training_losses.append(training_loss / (steps+1))\r\n \r\n '''Test on validation set'''\r\n if idx > epoch_before_val:\r\n# print('Using validation for early stopping')\r\n '''see performance on validation set and do early stopping'''\r\n val_loss = 0\r\n steps_val = 0\r\n s_val_threshold = 0\r\n \r\n val_state = None\r\n for steps_val,(X_val, Y_val, YP_val, s_val) in enumerate(gen_batch_multi(val_data_x, val_data_y, timeindex_val, batch_size, num_steps, epoch_overlap)):\r\n feed_dict_val = {g['x']: np.dstack((X_val,YP_val)), g['y']: Y_val, g['batch_size']:batch_size, g['input_prob']: 1 ,g['output_prob']: 1,g['state_prob']:1}\r\n \r\n\r\n #start to feed 0 initial for a new set of class\r\n if s_val == s_val_threshold:\r\n s_val_threshold += 1\r\n val_state = None\r\n #continue to feed in if in the same class\r\n if val_state is not None:\r\n feed_dict_val[g['init_state']] = val_state\r\n \r\n val_loss_,val_state = sess.run([g['loss'], g['final_state']],feed_dict=feed_dict_val)\r\n \r\n val_loss += val_loss_\r\n print('val')\r\n print(val_loss)\r\n \r\n val_loss = val_loss/(steps_val+1)\r\n val_losses.append(val_loss)\r\n\r\n if val_loss < best_loss:\r\n best_loss = val_loss\r\n checks_without_progress = 0\r\n g['saver'].save(sess, save)\r\n else:\r\n checks_without_progress += 1\r\n if checks_without_progress > max_checks_without_progress:\r\n print(\"Early stopping!\")\r\n return (training_losses, val_losses, int(parameter_num))\r\n\r\n \r\n \r\n \r\n \r\n if isinstance(save, str):\r\n g['saver'].save(sess, save)\r\n print(\"Max number train epoch reached\")\r\n \r\n training_losses = np.array(training_losses)\r\n val_losses = np.array(val_losses)\r\n \r\n return (training_losses,val_losses, int(parameter_num))\r\n\r\n\r\n\r\n\r\n\r\n\"\"\"\r\nTest RNN graph 0 step\r\n\"\"\"\r\ndef test_rnn(test_data_x,test_data_y, g, checkpoint, input_prob, output_prob, state_prob, num_test):\r\n with tf.Session() as sess:\r\n sess.run(tf.global_variables_initializer())\r\n\r\n test_data_yp = np.insert(test_data_y,0,0,axis=0)[:-1]\r\n\r\n \"read the trained graph\"\r\n g['saver'].restore(sess, checkpoint)\r\n\r\n \r\n \"run the test points\"\r\n #run the whole sequence, one class one total run \r\n for index,(X, Y, YP) in enumerate(gen_batch(test_data_x, test_data_y,test_data_yp, 1, num_test, None)):\r\n feed_dict={g['x']: np.dstack((X,YP)), g['y']:Y, g['batch_size']:1, g['input_prob']: input_prob,g['output_prob']:output_prob,g['state_prob']:state_prob}\r\n\r\n preds, rnn_outputs = sess.run([g['preds'], g['rnn_outputs']], feed_dict)\r\n \r\n \r\n loss = np.sum((preds[1:]-test_data_y[1:])**2,axis=0)/(test_data_y.shape[0]-1)\r\n \r\n return (preds,loss,rnn_outputs)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\"\"\"\r\nTest RNN graph 0 step for multiplayer afterwards\r\n\"\"\"\r\ndef test_rnn_layer(test_data_x,test_data_y, g, checkpoint, input_prob, output_prob, state_prob, num_test, num_layers):\r\n with tf.Session() as sess:\r\n sess.run(tf.global_variables_initializer())\r\n\r\n test_data_yp = np.insert(test_data_y,0,0,axis=0)[:-1]\r\n \r\n final = {}\r\n \r\n \r\n \"read the trained graph\"\r\n g['saver'].restore(sess, checkpoint)\r\n\r\n \"run the test points\"\r\n for index,(X, Y, YP) in enumerate(gen_batch(test_data_x, test_data_y,test_data_yp, 1, 1, None)):\r\n if index >0:\r\n feed_dict={g['x']: np.dstack((X,YP)), g['y']:Y,g['init_state']: rnn_outputs, g['batch_size']:1, g['input_prob']: input_prob,g['output_prob']:output_prob,g['state_prob']:state_prob}\r\n else:\r\n feed_dict={g['x']: np.dstack((X,YP)), g['y']:Y, g['batch_size']:1, g['input_prob']: input_prob,g['output_prob']:output_prob,g['state_prob']:state_prob}\r\n\r\n preds, rnn_outputs = sess.run([g['preds'],g['final_state']], feed_dict)\r\n \r\n if index>0:\r\n final_preds = np.vstack((final_preds,preds))\r\n else:\r\n final_preds = preds\r\n \r\n \r\n for i in range(num_layers):\r\n if index >0:\r\n final[i] = np.vstack((final[i],rnn_outputs[i]))\r\n\r\n else:\r\n final[i] = rnn_outputs[i]\r\n \r\n \r\n final_inter_state=()\r\n for i in range(num_layers):\r\n final_inter_state += (final[i],) \r\n \r\n loss = np.sum((final_preds[1:]-test_data_y[1:])**2,axis=0)/(test_data_y.shape[0]-1)\r\n\r\n \r\n return (final_preds, loss, final_inter_state)\r\n\r\n\r\n\r\n\r\n\"\"\"\r\nTest RNN graph single layer\r\n\"\"\"\r\ndef test_rnn_kstep(test_data_x,test_data_y, preds, rnn_outputs, g, checkpoint, input_prob, output_prob, state_prob, num_test, kstep = 3):\r\n with tf.Session() as sess:\r\n sess.run(tf.global_variables_initializer())\r\n result= {}\r\n\r\n \"read the trained graph\"\r\n g['saver'].restore(sess, checkpoint)\r\n\r\n losses = []\r\n for step_num in range(kstep):\r\n k=step_num+1\r\n \r\n for index,(X, Y, YP, S) in enumerate(gen_batch_kstep(test_data_x[k:], test_data_y[k:], preds[:-1],rnn_outputs[:-1], num_test-k,1, None)):\r\n \r\n feed_dict={g['x']: np.dstack((X,YP)), g['y']:Y, g['init_state']: np.squeeze(S), g['batch_size']:num_test, g['input_prob']: input_prob,g['output_prob']:output_prob,g['state_prob']:state_prob}\r\n preds, rnn_outputs= sess.run([g['preds'], g['rnn_outputs']], feed_dict)\r\n \r\n loss = np.sum((preds[1:]-test_data_y[1+k:])**2,axis=0)/test_data_y[1+k:].shape[0]\r\n\r\n result[k] = preds\r\n losses.append(loss)\r\n \r\n\r\n return (result,losses)\r\n\r\n\r\n\r\n\r\n\"\"\"\r\nTest RNN graph multi layer\r\n\"\"\"\r\ndef test_rnn_kstep_layer(test_data_x,test_data_y, preds, rnn_outputs, g, checkpoint, input_prob, output_prob, state_prob, num_test, kstep = 3):\r\n with tf.Session() as sess:\r\n sess.run(tf.global_variables_initializer())\r\n result= {}\r\n\r\n \"read the trained graph\"\r\n g['saver'].restore(sess, checkpoint)\r\n\r\n losses = []\r\n for step_num in range(kstep):\r\n k=step_num+1\r\n \r\n for index,(X, Y, YP, S) in enumerate(gen_batch_kstep_layer(test_data_x[k:], test_data_y[k:], preds[:-1],rnn_outputs)):\r\n \r\n feed_dict={g['x']: np.dstack((X,YP)), g['y']:Y, g['init_state']: S, g['batch_size']:num_test, g['input_prob']: input_prob,g['output_prob']:output_prob,g['state_prob']:state_prob}\r\n preds, rnn_outputs= sess.run([g['preds'], g['final_state']], feed_dict)\r\n \r\n loss = np.sum((preds[1:]-test_data_y[k+1:])**2,axis=0)/test_data_y[k+1:].shape[0]\r\n\r\n result[k] = preds\r\n losses.append(loss)\r\n \r\n\r\n return (result,losses)\r\n" ]
[ [ "tensorflow.nn.dynamic_rnn", "tensorflow.get_variable", "tensorflow.reduce_sum", "numpy.squeeze", "tensorflow.nn.l2_loss", "tensorflow.contrib.rnn.BasicRNNCell", "tensorflow.train.AdamOptimizer", "numpy.size", "tensorflow.reset_default_graph", "numpy.insert", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.trainable_variables", "tensorflow.nn.rnn_cell.GRUCell", "numpy.zeros", "tensorflow.matmul", "numpy.isnan", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.set_random_seed", "numpy.array", "numpy.sum", "tensorflow.nn.rnn_cell.DropoutWrapper", "tensorflow.contrib.rnn.DropoutWrapper", "tensorflow.nn.rnn_cell.LSTMCell", "tensorflow.reshape", "numpy.dstack", "tensorflow.constant_initializer", "numpy.shape", "tensorflow.variable_scope", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
virtualparadox/bbmap
[ "ea57dba1a1a112de3060793de600da91fa32fbc0" ]
[ "pytools/lib/readqc_utils.py" ]
[ "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n Read qc utilities\n\n Created: Jul 24 2013\n sulsj\n\n\"\"\"\n\nimport os\nimport subprocess\nimport matplotlib\nimport numpy as np\n\nfrom common import checkpoint_step\nfrom os_utility import make_dir, change_mod, run_sh_command, rm_dir\nfrom readqc_constants import RQCReadQcConfig, RQCContamDb, RQCReadQcReferenceDatabases, RQCReadQc\nfrom rqc_utility import safe_basename, get_cat_cmd, localize_file, safe_dirname\nfrom rqc_constants import RQCExitCodes\n\nmatplotlib.use(\"Agg\") ## This needs to skip the DISPLAY env var checking\nimport matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontProperties\nimport mpld3\nfrom matplotlib.ticker import MultipleLocator, FormatStrFormatter\n\n\n\"\"\" STEP1 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nfast_subsample_fastq_sequences\n\n Title : fast_subsample_fastq_sequences\n\n Function : This function subsamples the data from a specified fastq file.\n\n Usage : fast_subsample_fastq_sequences( $seq_unit, subsampledFile, $subsamplingRate, $max_count, \\$totalBaseCount, $totalReadNum, log)\n\n Args : 1) The source fastq file.\n 2) The destination fastq file.\n 3) The percentage of read subsampling.\n 4) The maximum number of reads at which to stop subsampling.\n 5) A reference to the variable that will store the\n basecount.\n 6) A reference to the variable that will store the\n number of reads.\n 7) A reference to a JGI_Log object.\n\n Returns : JGI_SUCCESS: The fastq data was successfully sampled.\n JGI_FAILURE: The fastq data could not be sampled.\n\n Comments : Pass as parameters both the subsample_rate and the read_count\n in order to stop subsampling at the read_count.\n The read_count can also be null, in which case the number\n of reads corresponding to the percentage subsample_rate will be subsampled.\n\n@param fastq: source fastq file (full path)\n@param outputFileName: subsampled output file name (basename)\n@param subsamplingRate: sample rate < 1.0\n@param isStatGenOnly: boolean -> generate stats output or not\n@param log\n\n@return retCode: success or failure\n@return subsampledFile: subsampled output file name (full path)\n@return totalBaseCount: total #bases (to be added to readqc_stats.txt)\n@return totalReadNum: total #reads (to be added to readqc_stats.txt)\n@return subsampledReadNum: total #reads sampled (to be added to readqc_stats.txt)\n\n\"\"\"\ndef fast_subsample_fastq_sequences(sourceFastq, outputFileName, subsamplingRate, isStatGenOnly, log):\n ## Tools\n cdir = os.path.dirname(__file__)\n bbtoolsReformatShCmd = os.path.join(cdir, '../../reformat.sh') #RQCReadQcCommands.BBTOOLS_REFORMAT_CMD\n\n READ_OUTPUT_PATH = RQCReadQcConfig.CFG[\"output_path\"]\n log.info(\"Sampling %s at %.2f rate\", sourceFastq, subsamplingRate)\n\n retCode = None\n totalBaseCount = 0\n totalReadNum = 0\n subsampledReadNum = 0\n bIsPaired = False\n readLength = 0\n\n subsampleDir = \"subsample\"\n subsamplePath = os.path.join(READ_OUTPUT_PATH, subsampleDir)\n make_dir(subsamplePath)\n change_mod(subsamplePath, \"0755\")\n\n subsampledFile = os.path.join(subsamplePath, outputFileName)\n\n fileSize = os.path.getsize(sourceFastq)\n log.info(\"Source fastq file size = %s\", fileSize)\n\n ## Replaced subsampler with reformat.sh\n ## subsample with bbtoolsReformatShCmd:\n ## $ reformat.sh in=7348.8.68143.fastq out=subsample.fastq samplerate=0.01 qout=33\n ## - 21G == 180.399 seconds ~ 6x faster than subsample_fastq_pl\n ## new subampler from BBTOOLS\n ## without qin=33 then it uses auto detect, Illumina is phread64 but we need to convert to phred33\n ##reformat.sh in=7257.1.64419.CACATTGTGAG.s1.0.fastq out=temp.out samplerate=0.02 qin=33 qout=33 overwrite\n\n ## 20140820\n ## bhist=<file> Write a base composition histogram to file. ## Cycle Nucleotide Composition\n ## gchist=<file> Write a gc content histogram to file. ## Read GC, mean, std\n ## qhist=<file> Write a quality histogram to file. ## Average Base Position Quality\n ## bqhist=<file> Write a quality histogram designed for box plots. ## Average Base Position Quality Boxplot\n ## obqhist=<file> Write a base quality histogram to file. ## Base quality histogram; *.base_qual.stats\n\n reformatPrefix = os.path.basename(subsampledFile).replace(\".fastq\", \"\")\n\n reformatLogFile = os.path.join(subsamplePath, reformatPrefix + \".reformat.log\")\n reformatGchistFile = os.path.join(subsamplePath, reformatPrefix + \".reformat.gchist.txt\") ## Read GC\n reformatBhistFile = os.path.join(subsamplePath, reformatPrefix + \".reformat.bhist.txt\") ## Cycle Nucleotide Composition\n reformatQhistFile = os.path.join(subsamplePath, reformatPrefix + \".reformat.qhist.txt\") ## Average Base Position Quality\n reformatBqhistFile = os.path.join(subsamplePath, reformatPrefix + \".reformat.bqhist.txt\") ## Average Base Position Quality Boxplot\n reformatObqhistFile = os.path.join(subsamplePath, reformatPrefix + \".reformat.obqhist.txt\") ## Base quality histogram\n\n if not isStatGenOnly: ## if subsampling for blast, do not generate the stat files\n subsampleCmd = \"%s in=%s out=%s samplerate=%s qin=33 qout=33 ow=t > %s 2>&1 \" % \\\n (bbtoolsReformatShCmd, sourceFastq, subsampledFile, subsamplingRate, reformatLogFile)\n else:\n subsampleCmd = \"%s in=%s out=%s samplerate=%s qin=33 qout=33 ow=t gcplot=t bhist=%s qhist=%s gchist=%s gcbins=auto bqhist=%s obqhist=%s > %s 2>&1 \" % \\\n (bbtoolsReformatShCmd, sourceFastq, subsampledFile, subsamplingRate, reformatBhistFile,\n reformatQhistFile, reformatGchistFile, reformatBqhistFile, reformatObqhistFile, reformatLogFile)\n\n _, _, exitCode = run_sh_command(subsampleCmd, True, log, True)\n\n if exitCode == 0:\n ##java -ea -Xmx200m -cp /usr/common/jgi/utilities/bbtools/prod-33.18/lib/BBTools.jar jgi.ReformatReads in=7257.1.64419.CACATTGTGAG.s1.0.fastq out=temp.out samplerate=0.02 qin=33 qout=33 overwrite\n ##Executing jgi.ReformatReads [in=7257.1.64419.CACATTGTGAG.s1.0.fastq, out=temp.out, samplerate=0.02, qin=33, qout=33, overwrite]\n ##\n ##Unspecified format for output temp.out; defaulting to fastq.\n ##Input is being processed as paired\n ##Writing interleaved.\n ##Input: 6661 reads 1671911 bases\n ##Processed: 278 reads 69778 bases\n ##Output: 278 reads (4.17%) 69778 bases (4.17%)\n ##\n ##Time: 0.181 seconds.\n ##Reads Processed: 278 1.54k reads/sec\n ##Bases Processed: 69778 0.39m bases/sec\n\n ## NEW\n if os.path.isfile(reformatLogFile):\n with open(reformatLogFile) as STAT_FH:\n for l in STAT_FH.readlines():\n if l.startswith(\"Input:\"):\n toks = l.split()\n totalBaseCount = int(toks[3])\n totalReadNum = int(toks[1])\n # elif l.startswith(\"Processed:\") or l.startswith(\"Output:\"):\n elif l.startswith(\"Output:\"):\n toks = l.split()\n subsampledReadNum = int(toks[1])\n elif l.startswith(\"Input is being processed as\"):\n toks = l.split()\n if toks[-1].strip() == \"paired\":\n bIsPaired = True\n\n log.info(\"Total base count of input fastq = %s\", totalBaseCount)\n log.info(\"Total num reads of input fastq = %s\", totalReadNum)\n log.info(\"Total num reads of sampled = %s\", subsampledReadNum)\n\n readLength = int(totalBaseCount / totalReadNum)\n\n log.info(\"Read length = %d\", readLength)\n log.info(\"Paired = %s\", bIsPaired)\n\n if totalReadNum > 0 and subsampledReadNum > 0:\n ##\n ## TODO: deal with sequnits with small number of reads\n ## How to record the status in readqc.log\n ##\n retCode = RQCExitCodes.JGI_SUCCESS\n log.info(\"illumina_readqc_subsampling complete: output file = %s\", subsampledFile)\n\n elif totalReadNum > 0 and subsampledReadNum <= 0:\n retCode = RQCExitCodes.JGI_FAILURE\n log.error(\"illumina_readqc_subsampling failure. subsampledReadNum <= 0.\")\n\n else:\n retCode = RQCExitCodes.JGI_FAILURE\n log.error(\"illumina_readqc_subsampling failure. totalReadNum <= 0 and subsampledReadNum <= 0.\")\n\n else:\n retCode = RQCExitCodes.JGI_FAILURE\n log.error(\"illumina_readqc_subsampling failure. Can't find stat file from subsampling.\")\n\n else:\n retCode = RQCExitCodes.JGI_FAILURE\n log.error(\"illumina_readqc_subsampling failure. Failed to run bbtoolsReformatShCmd. Exit code != 0\")\n with open(reformatLogFile, 'r') as f:\n log.error(f.read())\n retCode = -2\n\n return retCode, subsampledFile, totalBaseCount, totalReadNum, subsampledReadNum, bIsPaired, readLength\n\n\n\"\"\" STEP2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nwrite_unique_20_mers\n\n Title: write_unique_k_mers (k=20 or 25)\n Function: Given a fastq file, finds unique 20/25 mers from the start\n of the read and along a random position of the read\n Usage: write_unique_20_mers(\\@seq_files, $log)\n Args: 1) ref to an array containing bz2 zipped fastq file path(s)\n 2) output directory\n 3) log file object\n Returns: exitCode, merSamplerOutFile, pngPlotFile, htmlPlotFile\n Comments: Using bbcountunique.sh's output file named merSampler.<fastq_name>.m20.e25000\n create a plot png file, merSampler.<fastq_name>.m20.e25000.png\n\n bbcountunique.sh: Generates a kmer uniqueness histogram, binned by file position.\n There are 3 columns for single reads, 6 columns for paired:\n count number of reads or pairs processed\n r1_first percent unique 1st kmer of read 1\n r1_rand percent unique random kmer of read 1\n r2_first percent unique 1st kmer of read 2\n r2_rand percent unique random kmer of read 2\n pair percent unique concatenated kmer from read 1 and 2\n\n@param fastq: source fastq file (full path)\n@param log\n\n@return retCode: success or failure\n@return mersampler_out_file: plot data (to be added to readqc_files.txt)\n@return pngPlotFile: output plot (to be added to readqc_files.txt)\n@return htmlPlotFile: output d3 interactive plot (to be added to readqc_files.txt)\n\n\"\"\"\ndef write_unique_20_mers(fastq, log):\n ## Tools\n cdir = os.path.dirname(__file__)\n bbcountuniqueShCmd = os.path.join(cdir, '../../bbcountunique.sh') #RQCReadQcCommands.BBCOUNTUNIQUE_SH_CMD\n\n READ_OUTPUT_PATH = RQCReadQcConfig.CFG[\"output_path\"]\n uniqMerDir = \"uniqueness\"\n uniqMerPath = os.path.join(READ_OUTPUT_PATH, uniqMerDir)\n make_dir(uniqMerPath)\n change_mod(uniqMerPath, \"0755\")\n\n ## cmd line for merSampler\n uniqMerSize = RQCReadQc.ILLUMINA_MER_SAMPLE_MER_SIZE ## 20 ==> 25 RQC-823 08102016\n reportFreq = RQCReadQc.ILLUMINA_MER_SAMPLE_REPORT_FRQ ## 25000\n\n sequnitFileName, exitCode = safe_basename(fastq, log)\n sequnitFileNamePrefix = sequnitFileName.replace(\".fastq\", \"\").replace(\".gz\", \"\")\n\n ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n ## bbcountunique.sh, a new version of sampler from bbtools\n ## bbcountunique.sh in=$FILENAME out=out.txt percent=t count=t cumulative=t\n ## ex) bbcountunique.sh in=$SEQDIR/dna/$SEQFILE.fastq.gz out=7378.1.69281.CGATG-2.txt percent=t count=t cumulative=f int=f\n ## ex2)\n ## cmd: bbcountunique.sh k=20 interval=25000 in=7601.1.77813.CTTGTA.fastq.gz out=7601.1.77813.CTTGTA.merSampler.m20.e25000_2 percent=t count=t cumulative=f int=f\n\n log.info(\"bbcountunique.sh started.\")\n\n merSamplerOutFile = os.path.join(uniqMerPath, sequnitFileNamePrefix + \".merSampler.m\" + str(uniqMerSize) + \".e\" + str(reportFreq) + \"_2\")\n\n ## RQC-823\n ## Adding shuffling before bbcountunique\n ## 08302016 Reverted to no shuffling\n ##\n ## shuffle.sh in=input.fastq.gz out=stdout.fq -Xmx40g | bbcountunique.sh in=stdin.fq -Xmx40g ==> not working\n # shuffledFastqFile = os.path.join(uniqMerPath, sequnitFileNamePrefix + \".shuffled.fq\")\n # suffleCmd = \"%s in=%s out=%s\" % (shuffleShCmd, fastq, shuffledFastqFile)\n # stdOut, stdErr, exitCode = run_sh_command(suffleCmd, True, log, True)\n # if exitCode != 0:\n # log.error(\"failed to suffle fastq for unique mer analysis.\")\n # return RQCExitCodes.JGI_FAILURE, None, None, None\n\n bbcountuniqCmd = \"%s in=%s out=%s k=%s interval=%s percent=t count=t cumulative=f int=f ow=t\" \\\n % (bbcountuniqueShCmd, fastq, merSamplerOutFile, uniqMerSize, reportFreq)\n\n _, _, exitCode = run_sh_command(bbcountuniqCmd, True, log, True)\n\n if exitCode != 0:\n log.error(\"Failed to sample unique %s mers by bbcountunique.sh.\", uniqMerSize)\n return RQCExitCodes.JGI_FAILURE, None, None, None\n\n log.info(\"bbcountunique.sh completed.\")\n\n ## Old plotting data\n # nSeq nStartUniMer fracStartUniMer nRandUniMer fracRandUniMer\n ## 0 1 2 3 4\n ##25000 2500 0.1 9704 0.3882\n\n ## New plotting data from bbcountunique\n # count first rand first_cnt rand_cnt\n # 0 1 2 3 4\n # 25000 66.400 76.088 16600 19022\n # 50000 52.148 59.480 13037 14870\n # 75000 46.592 53.444 11648 13361\n # 100000 43.072 49.184 10768 12296 ...\n\n pngPlotFile = None\n htmlPlotFile = None\n\n if os.path.isfile(merSamplerOutFile):\n ## sanity check\n\n ## OLD\n ## #nSeq nStartUniMer fracStartUniMer nRandUniMer fracRandUniMer\n ## ex) 25000 16594 0.6638 18986 0.7594\n ## 50000 29622 0.5211 33822 0.5934\n ## 75000 41263 0.4656 47228 0.5362\n ## 100000 52026 0.4305 59545 0.4927 ...\n \"\"\"\n 2016-09-07\n #count first rand first_cnt rand_cnt avg_quality perfect_prob\n 25000 96.480 98.636 24120 24659 30.36 80.94\n 50000 96.204 97.996 24051 24499 30.41 81.17\n 75000 95.512 97.568 23878 24392 29.99 80.06\n 100000 95.408 97.588 23852 24397 30.24 80.78\n 125000 95.176 97.240 23794 24310 30.23 80.86\n \"\"\"\n\n line = None\n numData = 0\n\n with open(merSamplerOutFile, \"r\") as FH:\n lines = FH.readlines()\n line = lines[-1] ## get the last line\n numData = sum(1 for l in lines)\n\n toks = line.split()\n assert len(toks) == 7, \"ERROR: merSamplerOutFile format error: %s \" % (merSamplerOutFile)\n\n if numData < 3:\n log.error(\"Not enough data in merSamplerOutFile: %s\", merSamplerOutFile)\n return RQCExitCodes.JGI_SUCCESS, None, None, None\n\n\n ## Generating plots\n rawDataMatrix = np.loadtxt(merSamplerOutFile, delimiter='\\t', comments='#')\n # Bryce: 2016-09-07, its 7 now. Its failed 622 pipelines ...\n # assert len(rawDataMatrix[1][:]) == 5\n\n fig, ax = plt.subplots()\n\n markerSize = 5.0\n lineWidth = 1.5\n\n ## Note: no need to show all the data points\n ## If the number of data points > 5k, get only 5k data.\n jump = 1\n if len(rawDataMatrix[:, 0]) > 10000:\n jump = int(len(rawDataMatrix[:, 0]) / 5000)\n\n xData = rawDataMatrix[:, 0][0::jump]\n yData = rawDataMatrix[:, 1][0::jump]\n yData2 = rawDataMatrix[:, 2][0::jump]\n\n totalReadNum = rawDataMatrix[-1, 0] ## sampled read num from the last line of the data file\n assert int(totalReadNum) > 0\n maxX = int(totalReadNum) * 3\n\n p1 = ax.plot(xData, yData, 'g', marker='x', markersize=markerSize, linewidth=lineWidth, label=\"Starting %s Mer Uniqueness\" % (str(uniqMerSize)), alpha=0.5)\n p2 = ax.plot(xData, yData2, 'b', marker='x', markersize=markerSize, linewidth=lineWidth, label=\"Random %s Mer Uniqueness\" % (str(uniqMerSize)), alpha=0.5)\n\n ## Curve-fitting\n from scipy.optimize import curve_fit\n\n ## fit function: f(x)=a*log(x)+b\n def fit_func(x, a, b):\n return a * np.log(x) + b\n\n fitpars, _ = curve_fit(fit_func, rawDataMatrix[:, 0], rawDataMatrix[:, 1])\n\n fix_x = [i for i in range(25000, maxX, 25000)]\n ax.plot(fix_x, fit_func(fix_x, *fitpars), 'r', linewidth=lineWidth, label=\"fit\", alpha=0.5)\n\n ax.set_xlabel(\"Read Sampled\", fontsize=12, alpha=0.5)\n ax.set_ylabel(\"Percentage Unique\", fontsize=12, alpha=0.5)\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"right\"].set_visible(False)\n ax.get_xaxis().tick_bottom()\n ax.get_yaxis().tick_left()\n fontProp = FontProperties()\n fontProp.set_size(\"small\")\n fontProp.set_family(\"Bitstream Vera Sans\")\n ax.legend(loc=1, prop=fontProp)\n ax.set_xlim([0, maxX])\n ax.set_ylim([0, 100])\n ax.grid(color=\"gray\", linestyle=':')\n\n ## Add tooltip\n labels = [\"%.2f\" % i for i in rawDataMatrix[:, 1]]\n mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(p1[0], labels=labels))\n\n labels = [\"%.2f\" % i for i in rawDataMatrix[:, 2]]\n mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(p2[0], labels=labels))\n\n ## Create both dynamic and static plots\n pngPlotFile = merSamplerOutFile + \"_mer_sampler_plot.png\"\n plt.savefig(pngPlotFile, dpi=fig.dpi)\n\n htmlPlotFile = merSamplerOutFile + \"_mer_sampler_plot_d3.html\"\n mpld3.save_html(fig, htmlPlotFile)\n\n log.info(\"New data file from bbcountunique: %s\", merSamplerOutFile)\n log.info(\"New png plot: %s\", pngPlotFile)\n log.info(\"New D3 plot: %s\", htmlPlotFile)\n\n else:\n log.error(\"Cannot find merSamplerOutFile by bbcountunique.sh, %s\", merSamplerOutFile)\n return RQCExitCodes.JGI_FAILURE, None, None, None\n\n ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n # if os.path.isfile(merSamplerOutFile):\n # line = None\n # numData = 0\n #\n # with open(merSamplerOutFile, \"r\") as FH:\n # line = FH.readlines()[-1] ## get the last line\n # numData = len(line)\n #\n # ## #nSeq nStartUniMer fracStartUniMer nRandUniMer fracRandUniMer\n # ## ex) 25000 16594 0.6638 18986 0.7594\n # ## 50000 29622 0.5211 33822 0.5934\n # ## 75000 41263 0.4656 47228 0.5362\n # ## 100000 52026 0.4305 59545 0.4927 ...\n # \"\"\"\n # 2016-09-07\n # #count first rand first_cnt rand_cnt avg_quality perfect_prob\n # 25000 96.480 98.636 24120 24659 30.36 80.94\n # 50000 96.204 97.996 24051 24499 30.41 81.17\n # 75000 95.512 97.568 23878 24392 29.99 80.06\n # 100000 95.408 97.588 23852 24397 30.24 80.78\n # 125000 95.176 97.240 23794 24310 30.23 80.86\n # \"\"\"\n #\n # toks = line.split()\n # assert len(toks)==7, \"ERROR: merSamplerOutFile format error: %s \" % (merSamplerOutFile)\n #\n # # totalReadNum = int(toks[0])\n # # log.info(\"Total number of reads = %s.\" % (totalReadNum))\n # # assert totalReadNum > 0\n #\n # else:\n # log.error(\"cannot find mersampler_out_file, %s\" % (merSamplerOutFile))\n # return RQCExitCodes.JGI_FAILURE, None, None, None\n #\n # if numData < 3:\n # log.error(\"not enough data in %s\" % (merSamplerOutFile))\n # return RQCExitCodes.JGI_FAILURE, None, None, None\n\n ## verify that the mer sampler output file was created\n # if not os.path.isfile(merSamplerOutFile):\n # log.error(\"failed to find output file for %s Mer Uniqueness: %s\" % (str(uniqMerSize), merSamplerOutFile))\n # else:\n # log.info(\"MerSampler output file successfully generated (%s).\" % (merSamplerOutFile))\n\n ## verify that the mer sampler plot png file was created\n if not os.path.isfile(pngPlotFile):\n log.warning(\"Failed to find output plot png file for %s Mer Uniqueness\", str(uniqMerSize))\n else:\n log.info(\"MerSampler output png file successfully generated (%s)\", pngPlotFile)\n\n if not os.path.isfile(htmlPlotFile):\n log.warning(\"Failed to find output d3 plot html file for %s Mer Uniqueness\", str(uniqMerSize))\n else:\n log.info(\"MerSampler output png file successfully generated (%s)\", htmlPlotFile)\n\n\n return RQCExitCodes.JGI_SUCCESS, merSamplerOutFile, pngPlotFile, htmlPlotFile\n\n\n\"\"\" STEP3 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nillumina_read_gc\n\n Title: illumina_read_gc\n Function: Takes path to fastq file and generates\n read gc histograms (txt and png)\n Usage: illumina_read_gc($fastq_path, $log)\n Args: 1) path to subsampled fastq file\n 2) log file object\n Returns: JGI_SUCCESS\n JGI_FAILURE\n Comments: None.\n\n@param fastq: source fastq file (full path)\n@param log\n\n@return reformatGchistFile: hist text data (to be added to readqc_files.txt)\n@return pngFile: output plot (to be added to readqc_files.txt)\n@return htmlFile: output d3 interactive plot (to be added to readqc_files.txt)\n@return meanVal: gc mean (to be added to readqc_stats.txt)\n@return stdevVal: gc stdev (to be added to readqc_stats.txt)\n\n\"\"\"\ndef illumina_read_gc(fastq, log):\n READ_OUTPUT_PATH = RQCReadQcConfig.CFG[\"output_path\"]\n\n sequnitFileName, _ = safe_basename(fastq, log)\n sequnitFileNamePrefix = sequnitFileName.replace(\".fastq\", \"\").replace(\".gz\", \"\")\n\n subsampleDir = \"subsample\"\n subsamplePath = os.path.join(READ_OUTPUT_PATH, subsampleDir)\n\n qualDir = \"qual\"\n qualPath = os.path.join(READ_OUTPUT_PATH, qualDir)\n make_dir(qualPath)\n change_mod(qualPath, \"0755\")\n\n reformatGchistFile = os.path.join(subsamplePath, sequnitFileNamePrefix + \".reformat.gchist.txt\") ## gc hist\n\n log.debug(\"gchist file: %s\", reformatGchistFile)\n\n ## Gen Average Base Position Quality plot\n if not os.path.isfile(reformatGchistFile):\n log.error(\"Gchist file not found: %s\", reformatGchistFile)\n return None, None, None, None, None, None, None\n\n ## File format\n ## #Mean 41.647\n ## #Median 42.000\n ## #Mode 42.000\n ## #STDev 4.541\n ## #GC Count\n ## 0.0 0\n ## 1.0 0\n ## 2.0 0\n ## 3.0 0\n ## 4.0 0\n meanVal = None\n medVal = None\n modeVal = None\n stdevVal = None\n\n with open(reformatGchistFile, \"r\") as STAT_FH:\n for l in STAT_FH.readlines():\n if l.startswith(\"#Mean\"):\n meanVal = l.strip().split('\\t')[1]\n elif l.startswith(\"#Median\"):\n medVal = l.strip().split('\\t')[1]\n elif l.startswith(\"#Mode\"):\n modeVal = l.strip().split('\\t')[1]\n elif l.startswith(\"#STDev\"):\n stdevVal = l.strip().split('\\t')[1]\n\n rawDataMatrix = np.loadtxt(reformatGchistFile, comments='#', usecols=(0, 1, 2)) ## only use 3 colums: GC, Count, Cumulative\n\n ## In addition to the %GC and # reads, the cumulative read % is added.\n assert len(rawDataMatrix[1][:]) == 3\n\n fig, ax = plt.subplots()\n\n markerSize = 5.0\n lineWidth = 1.5\n\n p1 = ax.plot(rawDataMatrix[:, 0], rawDataMatrix[:, 1], 'r', marker='o', markersize=markerSize, linewidth=lineWidth, alpha=0.5)\n\n ax.set_xlabel(\"%GC\", fontsize=12, alpha=0.5)\n ax.set_ylabel(\"Read count\", fontsize=12, alpha=0.5)\n ax.grid(color=\"gray\", linestyle=':')\n\n ## Add tooltip\n toolTipStrReadCnt = [\"Read count=%d\" % i for i in rawDataMatrix[:, 1]]\n toolTipStrGcPerc = [\"GC percent=%.1f\" % i for i in rawDataMatrix[:, 0]]\n toolTipStrReadPerc = [\"Read percent=%.1f\" % (i * 100.0) for i in rawDataMatrix[:, 2]]\n toolTipStr = [\"%s, %s, %s\" % (i, j, k) for (i, j, k) in\n zip(toolTipStrGcPerc, toolTipStrReadCnt, toolTipStrReadPerc)]\n mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(p1[0], labels=toolTipStr))\n\n pngFile = os.path.join(qualPath, sequnitFileNamePrefix + \".gchist.png\")\n htmlFile = os.path.join(qualPath, sequnitFileNamePrefix + \".gchist.html\")\n\n ## Save D3 interactive plot in html format\n mpld3.save_html(fig, htmlFile)\n\n ## Save Matplotlib plot in png format\n plt.savefig(pngFile, dpi=fig.dpi)\n\n\n return reformatGchistFile, pngFile, htmlFile, meanVal, stdevVal, medVal, modeVal\n\n\n\"\"\" STEP5 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nwrite_avg_base_quality_stats\n\n Title: write_base_quality_stats\n Function: Takes path to fastq file and generates\n quality plots for each read\n Usage: write_base_quality_stats($fastq_path, $analysis, $log)\n Args: 1) path to subsampled fastq file\n 2) log file object\n Returns: None.\n Comments: None.\n\n\n@param fastq: source fastq file (full path)\n@param log\n\n@return reformatObqhistFile: output data file (to be added to readqc_files.txt)\n\n\"\"\"\ndef write_avg_base_quality_stats(fastq, log):\n READ_OUTPUT_PATH = RQCReadQcConfig.CFG[\"output_path\"]\n\n sequnitFileName, _ = safe_basename(fastq, log)\n sequnitFileNamePrefix = sequnitFileName.replace(\".fastq\", \"\").replace(\".gz\", \"\")\n\n subsampleDir = \"subsample\"\n subsamplePath = os.path.join(READ_OUTPUT_PATH, subsampleDir)\n\n qualDir = \"qual\"\n qualPath = os.path.join(READ_OUTPUT_PATH, qualDir)\n make_dir(qualPath)\n change_mod(qualPath, \"0755\")\n\n reformatObqhistFile = os.path.join(subsamplePath, sequnitFileNamePrefix + \".reformat.obqhist.txt\") ## base composition histogram\n\n log.debug(\"obqhist file: %s\", reformatObqhistFile)\n\n ## Gen base composition histogram\n if not os.path.isfile(reformatObqhistFile):\n log.error(\"Obqhist file not found: %s\", reformatObqhistFile)\n return None\n else:\n return reformatObqhistFile\n\n\n\"\"\" STEP6 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nillumina_count_q_score\n\n Title: count_q_score\n Function: Given a fastq (bz2 zipped or unzipped)\n file path, creates a histogram of\n the quality scores in the file\n Usage: count_q_score($fastq, $log)\n Args: 1) path to subsampled fastq file\n 2) log file object\n Returns: JGI_SUCCESS\n JGI_FAILURE\n Comments: Generates a file named <fastq_path>.qhist that has\n the quality score histogram.\n\n@param fastq: source fastq file (full path)\n@param log\n\n@return reformatObqhistFile: output data file (to be added to readqc_files.txt)\n@return pngFile: output plot file (to be added to readqc_files.txt)\n@return htmlFile: output d3 interactive plot file (to be added to readqc_files.txt)\n\n\"\"\"\ndef illumina_count_q_score(fastq, log):\n READ_OUTPUT_PATH = RQCReadQcConfig.CFG[\"output_path\"]\n\n sequnitFileName, _ = safe_basename(fastq, log)\n sequnitFileNamePrefix = sequnitFileName.replace(\".fastq\", \"\").replace(\".gz\", \"\")\n\n subsampleDir = \"subsample\"\n subsamplePath = os.path.join(READ_OUTPUT_PATH, subsampleDir)\n\n qualDir = \"qual\"\n qualPath = os.path.join(READ_OUTPUT_PATH, qualDir)\n make_dir(qualPath)\n change_mod(qualPath, \"0755\")\n\n reformatObqhistFile = os.path.join(subsamplePath, sequnitFileNamePrefix + \".reformat.obqhist.txt\") ## base composition histogram\n\n log.debug(\"obqhist file: %s\", reformatObqhistFile)\n\n rawDataMatrix = np.loadtxt(reformatObqhistFile, delimiter='\\t', comments='#')\n assert len(rawDataMatrix[1][:]) == 3\n\n ## Qavg nrd percent\n ## 0 1 2\n fig, ax = plt.subplots()\n\n markerSize = 5.0\n lineWidth = 1.5\n\n p1 = ax.plot(rawDataMatrix[:, 0], rawDataMatrix[:, 2], 'r', marker='o', markersize=markerSize, linewidth=lineWidth, alpha=0.5)\n ax.set_xlabel(\"Average Read Quality\", fontsize=12, alpha=0.5)\n ax.set_ylabel(\"Fraction of Reads\", fontsize=12, alpha=0.5)\n ax.grid(color=\"gray\", linestyle=':')\n\n ## Add tooltip\n mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(p1[0], labels=list(rawDataMatrix[:, 2])))\n\n pngFile = os.path.join(qualPath, sequnitFileNamePrefix + \".avg_read_quality_histogram.png\")\n htmlFile = os.path.join(qualPath, sequnitFileNamePrefix + \".avg_read_quality_histogram.html\")\n\n ## Save D3 interactive plot in html format\n mpld3.save_html(fig, htmlFile)\n\n ## Save Matplotlib plot in png format\n plt.savefig(pngFile, dpi=fig.dpi)\n\n return reformatObqhistFile, pngFile, htmlFile\n\n\n\"\"\" STEP7 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nillumina_calculate_average_quality\n\n Title: illumina_calculate_average_quality\n Function: Given a fastq (subsampled) file, calculates average quality in 21 mer windows.\n Usage: illumina_calculate_average_quality($fastq_path, $log)\n Args: 1) path to subsampled fastq file\n 2) log file object\n Returns: JGI_SUCCESS\n JGI_FAILURE\n Comments: Several output files are generated in the directory that\n the script is run.\n 1) Text output file with 21mer start position, number of mers read,\n total mers, and average accuracy of the bin\n 2) A gnuplot png file named <fastq_name>.21mer.qual.png\n\n The function assumes that the fastq file exists.\n The 21mer qual script was writtten by mli\n\n\n@return retCode: success or failure\n@return stat_file: plot data (to be added to readqc_files.txt)\n@return pngFile: output plot (to be added to readqc_files.txt)\n\n\"\"\"\n## Removed!\n##def illumina_calculate_average_quality(fastq, log):\n\n\n\n\"\"\" STEP8 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nillumina_find_common_motifs\n\n Title: illumina_find_common_motifs\n Function: Given a fastq (subsampled) file, finds most common N-string motifs.\n Usage: illumina_find_common_motifs($fastq_path, $log)\n Args: 1) path to subsampled fastq file\n 2) log file object\n Returns: JGI_SUCCESS\n JGI_FAILURE\n Comments: An output file is generated in the directory\n 1) Text output summary file most common motifs and perecent total motifs.\n\n The function assumes that the fastq file exists.\n The nstutter script was writtten by jel\n\n\n@param fastq: source fastq file (full path)\n@param log\n\n@return retCode: success or failure\n@return nstutterStatFile: output stutter data file (to be added to readqc_files.txt)\n\n\n\"\"\"\ndef illumina_find_common_motifs(fastq, log):\n ## Tools\n cdir = os.path.dirname(__file__)\n patterNFastqPlCmd = os.path.join(cdir, '../tools/patterN_fastq.pl') #RQCReadQcCommands.PATTERN_FASTQ_PL\n\n sequnitFileName, exitCode = safe_basename(fastq, log)\n sequnitFileNamePrefix = sequnitFileName.replace(\".fastq\", \"\").replace(\".gz\", \"\")\n\n READ_OUTPUT_PATH = RQCReadQcConfig.CFG[\"output_path\"]\n stutterDir = \"stutter\"\n stutterPath = os.path.join(READ_OUTPUT_PATH, stutterDir)\n make_dir(stutterPath)\n change_mod(stutterPath, \"0755\")\n\n nstutterStatFile = os.path.join(stutterPath, sequnitFileNamePrefix + \".nstutter.stat\")\n\n ## ex) patterN_fastq.pl -analog -PCT 0.1 -in 7601.1.77813.CTTGTA.s0.01.fastq > 7601.1.77813.CTTGTA.s0.01.nstutter.stat ; wait;\n makeStatFileCmd = \"%s -analog -PCT 0.1 -in %s > %s \" % (patterNFastqPlCmd, fastq, nstutterStatFile)\n combinedCmd = \"%s; wait; \" % (makeStatFileCmd)\n\n _, _, exitCode = run_sh_command(combinedCmd, True, log, True)\n\n if exitCode != 0:\n log.error(\"failed to run patterNFastqPlCmd. Exit code != 0.\")\n return RQCExitCodes.JGI_FAILURE, None\n\n if os.path.isfile(nstutterStatFile):\n log.info(\"N stutter stat file successfully created (%s)\", nstutterStatFile)\n else:\n log.warning(\"Could not locate N stutter stat file %s\", nstutterStatFile)\n nstutterStatFile = \"failed to generate\"\n\n return RQCExitCodes.JGI_SUCCESS, nstutterStatFile\n\n\n\"\"\" STEP9 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nillumina_run_bwa\n\n Title: illumina_run_bwa\n Function: Given a fastq (subsampled) file path, runs bwa aligner\n Reads are aligned to each other.\n Usage: illumina_run_bwa($fastq_file_path, $log)\n Args: 1) path to subsampled fastq file\n 2) log file object\n Returns: JGI_SUCCESS\n JGI_FAILURE\n Comments: None.\n\n\n@param fastq: source fastq file (full path)\n@param log\n\n@return retCode: success or failure\n@return summary_file: output bwa summary file (to be added to readqc_files.txt)\n\n\"\"\"\n\n## REMOVED!\n##def illumina_run_bwa(fastq, log):\n\ndef illumina_run_dedupe(fastq, log):\n ## Tools\n cdir = os.path.dirname(__file__)\n bbdedupeShCmd = os.path.join(cdir, '../../bbdedupe.sh') #RQCReadQcCommands.BBDEDUPE_SH\n\n sequnitFileName, exitCode = safe_basename(fastq, log)\n READ_OUTPUT_PATH = RQCReadQcConfig.CFG[\"output_path\"]\n\n dupDir = \"dupes\"\n dupPath = os.path.join(READ_OUTPUT_PATH, dupDir)\n make_dir(dupPath)\n change_mod(dupPath, \"0755\")\n\n dedupeSummaryFile = os.path.join(dupPath, sequnitFileName + \".bwa.summary\")\n\n ## dedupe.sh in=reads.fq s=0 ftr=49 ac=f int=f\n xmx = \"-Xmx23G\"\n bbdedupeShCmd = \"%s %s in=%s out=null qin=33 ow=t s=0 ftr=49 ac=f int=f> %s 2>&1 \" % (bbdedupeShCmd, xmx, fastq, dedupeSummaryFile)\n\n _, _, exitCode = run_sh_command(bbdedupeShCmd, True, log, True)\n\n if exitCode != 0:\n log.error(\"Failed to run bbdedupeShCmd.sh\")\n return RQCExitCodes.JGI_FAILURE, None\n\n return RQCExitCodes.JGI_SUCCESS, dedupeSummaryFile\n\n\n\"\"\" STEP10 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nillumina_run_tagdust\n\n Title: illumina_run_tagdust\n Function: Given a fastq (subsampled) file path, runs tag dust to\n find common illumina artifacts.\n Usage: illumina_run_tagdust($fastq_file_path, $log)\n Args: 1) path to subsampled fastq file\n 2) log file object\n Returns: JGI_SUCCESS\n JGI_FAILURE\n Comments: None.\n\n\n@param fastq: source fastq file (full path)\n@param log\n\n@return retCode: success or failure\n@return tagdust_out: output tagdust file (to be added to readqc_files.txt)\n\n\"\"\"\n## No longer needed!!\n##def illumina_run_tagdust(fastq, log):\n\n\n\n\"\"\" STEP11 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nillumina_detect_read_contam\n\n@param fastq: source fastq file (full path)\n@param firstBp: first bp length to cut for read contam detection\n@param log\n\n@return retCode: success or failure\n@return outFileList: output duk stat file list (to be added to readqc_files.txt)\n@return ratioResultDict: output stat value dict (to be added to readqc_stats.txt)\n\n\"\"\"\n##def illumina_detect_read_contam(fastq, log):\n## REMOVED!\n\n# # def illumina_detect_read_contam2(fastq, firstBp, log):\n## REMOVED! 08302016\n\n\n\"\"\"\nContam removal by seal.sh\n\n\"\"\"\ndef illumina_detect_read_contam3(fastq, firstBp, log):\n ## Tools\n cdir = os.path.dirname(__file__)\n sealShCmd = os.path.join(cdir, '../../seal.sh') #RQCReadQcCommands.SEAL_SH_CMD\n\n sequnitFileName, exitCode = safe_basename(fastq, log)\n sequnitFileNamePrefix = sequnitFileName.replace(\".fastq\", \"\").replace(\".gz\", \"\")\n\n READ_OUTPUT_PATH = RQCReadQcConfig.CFG[\"output_path\"]\n qualDir = \"qual\"\n qualPath = os.path.join(READ_OUTPUT_PATH, qualDir)\n make_dir(qualPath)\n change_mod(qualPath, \"0755\")\n\n numBadFiles = 0\n ratio = 0\n outFileDict = {}\n ratioResultDict = {}\n contamStatDict = {}\n\n catCmd, exitCode = get_cat_cmd(fastq, log)\n\n ## TODO: remove JGI Contaminants from CONTAM_DBS\n # CONTAM_DBS['artifact'] = ARTIFACT_FILE_NO_SPIKEIN\n # CONTAM_DBS['artifact_50bp'] = ARTIFACT_FILE_NO_SPIKEIN ## 20131203 Added for 50bp contam\n # CONTAM_DBS['DNA_spikein'] = ARTIFACT_FILE_DNA_SPIKEIN\n # CONTAM_DBS['RNA_spikein'] = ARTIFACT_FILE_RNA_SPIKEIN\n # CONTAM_DBS['contaminants'] = CONTAMINANTS\n # CONTAM_DBS['fosmid'] = FOSMID_VECTOR\n # CONTAM_DBS['mitochondrion'] = MITOCHONDRION_NCBI_REFSEQ\n # CONTAM_DBS['phix'] = PHIX\n # CONTAM_DBS['plastid'] = CHLOROPLAST_NCBI_REFSEQ\n # CONTAM_DBS['rrna'] = GENERAL_RRNA_FILE\n # CONTAM_DBS['microbes'] = MICROBES ## non-synthetic\n # CONTAM_DBS['synthetic'] = SYNTHETIC\n # CONTAM_DBS['adapters'] = ADAPTERS\n for db in RQCContamDb.CONTAM_DBS.iterkeys():\n if db == \"artifact_50bp\" and int(firstBp) == 20:\n sealStatsFile = os.path.join(qualPath, sequnitFileNamePrefix + \".artifact_20bp.seal.stats\")\n else:\n sealStatsFile = os.path.join(qualPath, sequnitFileNamePrefix + \".\" + db + \".seal.stats\")\n\n ## Localization file to /scratch/rqc\n # log.info(\"Contam DB localization started for %s\", db)\n # localizedDb = localize_file(RQCContamDb.CONTAM_DBS[db], log)\n\n ## 04262017 Skip localization temporarily until /scratch can be mounted\n ## in shifter container\n # if os.environ['NERSC_HOST'] == \"genepool\":\n # localizedDb = localize_file(RQCContamDb.CONTAM_DBS[db], log)\n # else:\n # localizedDb = None\n #\n # if localizedDb is None:\n # localizedDb = RQCContamDb.CONTAM_DBS[db] ## use the orig location\n # else:\n # log.info(\"Use the localized file, %s\", localizedDb)\n localizedDb = RQCContamDb.CONTAM_DBS[db]\n\n ## 09112017 Manually add -Xmx23G\n xmx = \"-Xmx23G\"\n if db == \"artifact_50bp\":\n cutCmd = \"cut -c 1-%s | \" % (firstBp)\n cmd = \"set -e; %s %s | %s %s in=stdin.fq out=null ref=%s k=22 hdist=0 stats=%s ow=t statscolumns=3 %s \" % \\\n (catCmd, fastq, cutCmd, sealShCmd, localizedDb, sealStatsFile, xmx)\n elif db == \"microbes\":\n cmd = \"%s in=%s out=null ref=%s hdist=0 mm=f mkf=0.5 ambig=random minlen=120 qtrim=rl trimq=10 stats=%s ow=t statscolumns=3 %s \" % \\\n (sealShCmd, fastq, localizedDb, sealStatsFile, xmx)\n else:\n cmd = \"%s in=%s out=null ref=%s k=22 hdist=0 stats=%s ow=t statscolumns=3 %s \" % \\\n (sealShCmd, fastq, localizedDb, sealStatsFile, xmx)\n\n _, _, exitCode = run_sh_command(cmd, True, log, True)\n\n if exitCode != 0:\n log.error(\"Failed to run seal.sh cmd\")\n return RQCExitCodes.JGI_FAILURE, None, None, None\n\n ## Parsing seal output\n if not os.path.isfile(sealStatsFile):\n log.warning(\"Cannot open contam output file %s\", sealStatsFile)\n numBadFiles += 1\n continue ## continue to next contam db\n\n maxStatCount = 0\n with open(sealStatsFile, \"r\") as sealFH:\n for line in sealFH:\n line.strip()\n if line.find(\"#Matched\") != -1:\n ## ex) #Matched 1123123 77.31231%\n toks = line.split()\n assert len(toks) == 3\n ratio = toks[-1].replace('%', '')\n\n ## contamintaion stat\n if not line.startswith('#'):\n t = line.rstrip().split('\\t')\n # contamStatDict[\"%s:%s\" % (db, t[0])] = t[2].replace('%', '')\n if maxStatCount < 10 and t[0].startswith(\"gi|\"):\n # contamStatDict[\"contam:%s:%s\" % (db, t[0])] = t[2].replace('%', '')\n contamStatDict[\"contam:%s:%s\" % (db, \"|\".join(t[0].split('|')[:2]))] = t[2].replace('%', '') ## save only gi part (RQC-906)\n maxStatCount += 1\n\n ## RQC-743\n if db == \"artifact_50bp\" and int(firstBp) == 20:\n db = \"artifact_20bp\"\n\n outFileDict[db + \".seal.stats\"] = sealStatsFile\n log.debug(\"Contam db and matched ratio: %s = %f\", RQCContamDb.CONTAM_KEYS[db], float(ratio))\n ratioResultDict[RQCContamDb.CONTAM_KEYS[db]] = float(ratio)\n\n if numBadFiles:\n log.info(\"Number of bad I/O cases = %s\", numBadFiles)\n return RQCExitCodes.JGI_FAILURE, None, None, None\n\n\n return RQCExitCodes.JGI_SUCCESS, outFileDict, ratioResultDict, contamStatDict\n\n\n\"\"\" STEP12 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nillumina_sciclone_analysis\n\n Title: illumina_sciclone_analysis\n Function: Takes path to fastq file and determines\n if it is from a multiplexed run or not\n Usage: illumina_sciclone_analysis($subfastq, $log)\n Args: 1) fastq file\n 2) log file object\n Returns: JGI_SUCCESS\n JGI_FAILURE\n Comments: None.\n\n\n@param origFastq: source fastq file (full path)\n@param isPairedEnd: pair- or single-ended\n@param log\n\n@return retCode: success or failure\n@return ratioResultDict: output stat value dict (to be added to readqc_stats.txt)\n@return dnaCountFile: output sam stat file (to be added to readqc_files.txt)\n@return rnaCountFile: output sam stat file (to be added to readqc_files.txt)\n\n\n\"\"\"\n\n## Removed!\n##def illumina_sciclone_analysis(origFastq, isPairedEnd, log, libName=None, isRna=None):\n\ndef illumina_sciclone_analysis2(origFastq, isPairedEnd, log, libName=None, isRna=None):\n ## detect lib is rna or not\n sequnitFileName, exitCode = safe_basename(origFastq, log)\n sequnitFileNamePrefix = sequnitFileName.replace(\".fastq\", \"\").replace(\".gz\", \"\")\n\n # if libName is None and isRna is None:\n # _, _, libName, isRna = get_lib_info(sequnitFileNamePrefix, log) ## seqProjectId, ncbiOrganismName not used\n #\n # if isRna == \"N/A\":\n # log.error(\"Failed to get lib info for %s\", sequnitFileNamePrefix)\n # return RQCExitCodes.JGI_FAILURE, -1, -1\n\n if isRna == '1':\n isRna = True\n\n elif isRna == '0':\n isRna = False\n\n if isRna:\n log.debug(\"The lib is RNA (%s)\", libName)\n else:\n log.debug(\"The lib is DNA (%s)\", libName)\n\n if isPairedEnd:\n log.debug(\"It's pair-ended.\")\n else:\n log.debug(\"It's single-ended.\")\n\n ## output dir\n sciclone_dir = \"sciclone_analysis\"\n READ_OUTPUT_PATH = RQCReadQcConfig.CFG[\"output_path\"]\n sciclone_path = os.path.join(READ_OUTPUT_PATH, sciclone_dir)\n\n ## Define the subdirectory. If it exists already, remove it\n if os.path.isdir(sciclone_path):\n rm_dir(sciclone_path)\n\n make_dir(sciclone_path)\n change_mod(sciclone_path, \"0755\")\n\n ## NOTE: Save count file in analysis object\n dnaCountFile = None\n rnaCountFile = None\n\n if isRna:\n rnaCountFile = os.path.join(sciclone_path, sequnitFileNamePrefix + \"_bbduk_sciclone_rna_count.txt\")\n else:\n dnaCountFile = os.path.join(sciclone_path, sequnitFileNamePrefix + \"_bbduk_sciclone_dna_count.txt\")\n\n cdir = os.path.dirname(__file__)\n bbdukShCmd = os.path.join(cdir, '../../bbduk.sh') #RQCReadQcCommands.BBDUK_SH_CMD\n bbdukRnaDb = RQCReadQcReferenceDatabases.SCICLONE_RNA2\n # bbdukDnaDb = RQCReadQcReferenceDatabases.SCICLONE_DNA2\n\n cmd = None\n\n if isRna:\n ## Localization file to /scratch/rqc\n # log.info(\"Sciclone RNA ref DB localization started for %s\", bbdukRnaDb)\n # localizedDb = localize_file(bbdukRnaDb, log)\n\n ## 04262017 Skip localization temporarily until /scratch can be mounted\n ## in shifter container\n # if os.environ['NERSC_HOST'] == \"genepool\":\n # localizedDb = localize_file(bbdukRnaDb, log)\n # else:\n # localizedDb = None\n #\n # if localizedDb is None:\n # localizedDb = bbdukRnaDb ## use the orig location\n # else:\n # log.info(\"Use the localized file, %s\", localizedDb)\n\n localizedDb = bbdukRnaDb ## use the orig location\n\n ## bbduk.sh in=7365.2.69553.AGTTCC.fastq.gz ref=/global/projectb/sandbox/gaag/bbtools/data/sciclone_rna.fa out=null fbm=t k=31 mbk=0 stats=sciclone2.txt\n cmd = \"%s in=%s ref=%s out=null fbm=t k=31 mbk=0 stats=%s statscolumns=3 \" % (bbdukShCmd, origFastq, localizedDb, rnaCountFile)\n\n else:\n ## Localization file to /scratch/rqc\n # log.info(\"Sciclone DNA ref DB localization started for %s\", bbdukDnaDb)\n # localizedDb = localize_file(bbdukDnaDb, log)\n\n ## 04262017 Skip localization temporarily until /scratch can be mounted\n ## in shifter container\n # if os.environ['NERSC_HOST'] == \"genepool\":\n # localizedDb = localize_file(bbdukRnaDb, log)\n # else:\n # localizedDb = None\n #\n # if localizedDb is None:\n # localizedDb = bbdukRnaDb ## use the orig location\n # else:\n # log.info(\"Use the localized file, %s\", localizedDb)\n\n localizedDb = bbdukRnaDb ## use the orig location\n\n ## bbduk.sh in=7257.1.64419.CACATTGTGAG.fastq.gz ref=/global/projectb/sandbox/gaag/bbtools/data/sciclone_dna.fa out=null fbm=t k=31 mbk=0 stats=sciclone1.txt\n cmd = \"%s in=%s ref=%s out=null fbm=t k=31 mbk=0 stats=%s statscolumns=3 \" % (bbdukShCmd, origFastq, localizedDb, dnaCountFile)\n\n _, _, exitCode = run_sh_command(cmd, True, log, True)\n\n if exitCode != 0:\n log.error(\"Failed to run bbduk.sh cmd\")\n return RQCExitCodes.JGI_FAILURE, None, None\n\n log.debug(\"rnaCountFile = %s\", rnaCountFile)\n log.debug(\"dnaCountFile = %s\", dnaCountFile)\n\n\n return RQCExitCodes.JGI_SUCCESS, dnaCountFile, rnaCountFile\n\n\n\"\"\" STEP13 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nillumina_read_megablast\n\n Title: illumina_read_megablast\n Function: Takes path(s) to bz2 zipped or gzipped fastq file\n and runs megablast against the reads.\n Usage: illumina_read_megablast(\\@seq_files, $subsampledFile, $read_length, $log)\n Args: 1) reference to an array containing bz2 zipped or gzipped fastq file path(s);\n the files should all be compressed the same way\n 2) subsampled fastq file\n 3) read length\n 4) log file object\n Returns: JGI_SUCCESS\n JGI_FAILURE\n Comments: None.\n\n\n@param subsampledFile: source fastq file (full path)\n@param log\n\n@return retCode: success or failure\n\n\"\"\"\n## No longer needed. Removed.\n##def illumina_read_megablast(subsampledFile, read_num_to_pass, log, blastDbPath=None):\n\n\n\n\"\"\" STEP14 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\"\"\"\n##def illumina_read_blastn_refseq_microbial(subsampledFile, log, blastDbPath=None):\n## 12212015 sulsj REMOVED!\n\n\n\n\"\"\" STEP14 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\"\"\"\ndef illumina_read_blastn_refseq_archaea(subsampledFile, log):\n ## Tools\n cdir = os.path.dirname(__file__)\n bbtoolsReformatShCmd = os.path.join(cdir, '../../reformat.sh') #RQCReadQcCommands.BBTOOLS_REFORMAT_CMD\n\n ## verify the fastq file\n if not os.path.isfile(subsampledFile):\n log.error(\"Failed to find fastq file\")\n return RQCExitCodes.JGI_FAILURE\n\n log.info(\"Read level contamination analysis using blastn\")\n\n READ_OUTPUT_PATH = RQCReadQcConfig.CFG[\"output_path\"]\n\n ## output dir\n megablastDir = \"megablast\"\n megablastPath = os.path.join(READ_OUTPUT_PATH, megablastDir)\n make_dir(megablastPath)\n change_mod(megablastPath, \"0755\")\n\n ## 20140929 Replaced with reformat.sh\n queryFastaFileName = \"reads.fa\"\n\n ## reformat.sh for converting fastq to fasta\n cmd = \"%s in=%s out=%s qin=33 qout=33 ow=t \" % (bbtoolsReformatShCmd, subsampledFile, os.path.join(megablastPath, queryFastaFileName))\n\n _, _, exitCode = run_sh_command(cmd, True, log, True)\n\n if exitCode != 0:\n log.error(\"Failed to run reformat.sh to convert fastq to fasta: %s\", cmd)\n return RQCExitCodes.JGI_FAILURE\n\n megablastOutputFile = None\n db = \"refseq.archaea\"\n\n log.info(\"---------------------------------------------\")\n log.info(\"Start blastn search against %s\", db)\n log.info(\"---------------------------------------------\")\n\n ## final output ==> READ_OUTPUT_PATH/megablast\n retCode, megablastOutputFile = run_blastplus_py(os.path.join(megablastPath, queryFastaFileName), db, log)\n\n if retCode != RQCExitCodes.JGI_SUCCESS:\n if megablastOutputFile is None:\n log.error(\"Failed to run blastn against %s. Ret = %s\", db, retCode)\n retCode = RQCExitCodes.JGI_FAILURE\n elif megablastOutputFile == -143:\n log.warning(\"Blast overtime. Skip the search against %s.\", db)\n retCode = -143 ## blast overtime\n else:\n log.info(\"Successfully ran blastn of reads against %s\", db)\n retCode = RQCExitCodes.JGI_SUCCESS\n\n\n return retCode\n\n\n\"\"\" STEP15 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\"\"\"\ndef illumina_read_blastn_refseq_bacteria(subsampledFile, log):\n ## Tools\n cdir = os.path.dirname(__file__)\n bbtoolsReformatShCmd = os.path.join(cdir, '../../reformat.sh') #RQCReadQcCommands.BBTOOLS_REFORMAT_CMD\n\n ## verify the fastq file\n if not os.path.isfile(subsampledFile):\n log.error(\"Failed to find fastq file for blastn\")\n return RQCExitCodes.JGI_FAILURE\n\n log.info(\"Read level contamination analysis using blastn\")\n\n READ_OUTPUT_PATH = RQCReadQcConfig.CFG[\"output_path\"]\n\n ## output dir\n megablastDir = \"megablast\"\n megablastPath = os.path.join(READ_OUTPUT_PATH, megablastDir)\n make_dir(megablastPath)\n change_mod(megablastPath, \"0755\")\n\n ## 20140929 Replaced with reformat.sh\n queryFastaFileName = \"reads.fa\"\n\n ## reformat.sh for converting fastq to fasta\n cmd = \"%s in=%s out=%s qin=33 qout=33 ow=t \" % (bbtoolsReformatShCmd, subsampledFile, os.path.join(megablastPath, queryFastaFileName))\n\n _, _, exitCode = run_sh_command(cmd, True, log, True)\n\n if exitCode != 0:\n log.error(\"Failed to run reformat.sh to convert fastq to fasta: %s\", cmd)\n return RQCExitCodes.JGI_FAILURE\n\n megablastOutputFile = None\n db = \"refseq.bacteria\"\n\n log.info(\"---------------------------------------------\")\n log.info(\"Start blastn search against %s\", db)\n log.info(\"---------------------------------------------\")\n\n ## final output ==> READ_OUTPUT_PATH/megablast\n retCode, megablastOutputFile = run_blastplus_py(os.path.join(megablastPath, queryFastaFileName), db, log)\n\n if retCode != RQCExitCodes.JGI_SUCCESS:\n if megablastOutputFile is None:\n log.error(\"Failed to run blastn against %s. Ret = %s\", db, retCode)\n retCode = RQCExitCodes.JGI_FAILURE\n elif megablastOutputFile == -143:\n log.warning(\"Blast overtime. Skip the search against %s.\", db)\n retCode = -143 ## blast overtime\n else:\n log.info(\"Successfully ran blastn of reads against %s\", db)\n retCode = RQCExitCodes.JGI_SUCCESS\n\n return retCode\n\n\n\"\"\" STEP16 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\"\"\"\ndef illumina_read_blastn_nt(subsampledFile, log):\n ## Tools\n cdir = os.path.dirname(__file__)\n bbtoolsReformatShCmd = os.path.join(cdir, '../../reformat.sh') #RQCReadQcCommands.BBTOOLS_REFORMAT_CMD\n\n ## verify the fastq file\n if not os.path.isfile(subsampledFile):\n log.error(\"Failed to find fastq file for blastn\")\n return RQCExitCodes.JGI_FAILURE\n\n log.info(\"Read level contamination analysis using blastn\")\n\n READ_OUTPUT_PATH = RQCReadQcConfig.CFG[\"output_path\"]\n\n ## output dir\n megablastDir = \"megablast\"\n megablastPath = os.path.join(READ_OUTPUT_PATH, megablastDir)\n make_dir(megablastPath)\n change_mod(megablastPath, \"0755\")\n\n ## 20140929 Replaced with reformat.sh\n queryFastaFileName = \"reads.fa\"\n\n ## reformat.sh\n cmd = \"%s in=%s out=%s qin=33 qout=33 ow=t \" % (bbtoolsReformatShCmd, subsampledFile, os.path.join(megablastPath, queryFastaFileName))\n\n _, _, exitCode = run_sh_command(cmd, True, log, True)\n\n if exitCode != 0:\n log.error(\"Failed to run reformat.sh to convert fastq to fasta: %s\", cmd)\n return RQCExitCodes.JGI_FAILURE\n\n megablastOutputFile = None\n db = \"nt\"\n\n log.info(\"----------------------------------\")\n log.info(\"Start blastn search against %s\", db)\n log.info(\"----------------------------------\")\n\n ## final output ==> READ_OUTPUT_PATH/megablast\n retCode, megablastOutputFile = run_blastplus_py(os.path.join(megablastPath, queryFastaFileName), db, log)\n\n if retCode != RQCExitCodes.JGI_SUCCESS:\n if megablastOutputFile is None:\n log.error(\"Failed to run blastn against %s. Ret = %s\", db, retCode)\n retCode = RQCExitCodes.JGI_FAILURE\n elif megablastOutputFile == -143:\n log.warning(\"Blast overtime. Skip the search against %s.\", db)\n retCode = -143 ## blast overtime\n else:\n log.info(\"Successfully ran blastn of reads against %s\", db)\n retCode = RQCExitCodes.JGI_SUCCESS\n\n return retCode\n\n\n\"\"\" STEP17 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nillumina_generate_index_sequence_detection_plot\n\n\"\"\"\ndef illumina_generate_index_sequence_detection_plot(fastq, log, isMultiplexed=None): ## TO BE REMOVED!\n isMultiplexed = 0\n if not os.path.isfile(fastq):\n log.error(\"Failed to find the input fastq file, %s\", fastq)\n return RQCExitCodes.JGI_FAILURE, None, None, None\n else:\n log.info(\"fastq file for index sequence analysis: %s\", fastq)\n\n sequnitFileName, exitCode = safe_basename(fastq, log)\n sequnitFileNamePrefix = sequnitFileName.replace(\".fastq\", \"\").replace(\".gz\", \"\")\n\n # if isMultiplexed is None:\n # isMultiplexed = get_multiplex_info(sequnitFileNamePrefix, log)\n\n #retCode = None\n\n demultiplexStatsFile = None\n demultiplexPlotDataFile = None\n detectionPlotPngFile = None\n storedDemultiplexStatsFile = None\n\n if int(isMultiplexed) == 1:\n log.info(\"Multiplexed - start analyzing...\")\n\n READ_OUTPUT_PATH = RQCReadQcConfig.CFG[\"output_path\"]\n demul_dir = \"demul\"\n demulPath = os.path.join(READ_OUTPUT_PATH, demul_dir)\n make_dir(demulPath)\n change_mod(demulPath, \"0755\")\n\n ## This version is sorted by percent for readability of stats\n demultiplexStatsFile = os.path.join(demulPath, sequnitFileNamePrefix + \".demultiplex_stats\")\n\n ## This version has index column and sort by index for plot\n demultiplexPlotDataFile = os.path.join(demulPath, sequnitFileNamePrefix + \".demultiplex_stats.tmp\")\n\n ## This path is relative to final qual location to be stored in analysis obj.\n detectionPlotPngFile = os.path.join(demulPath, sequnitFileNamePrefix + \".index_sequence_detection.png\")\n\n storedDemultiplexStatsFile = os.path.join(demulPath, sequnitFileNamePrefix + \".demultiplex_stats\")\n\n if not os.path.isfile(demultiplexStatsFile):\n indexSeq = None\n line = None\n header = None\n indexSeqCounter = {}\n\n catCmd, exitCode = get_cat_cmd(fastq, log)\n\n if fastq.endswith(\".gz\"):\n catCmd = \"zcat\" ## pigz does not work with subprocess\n\n seqCount = 0\n\n try:\n proc = subprocess.Popen([catCmd, fastq], bufsize=2 ** 16, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n\n while 1:\n line = proc.stdout.readline()\n if not line:\n break\n\n ## First line is header\n line.strip()\n header = line\n\n ## Get second line of record - sequence\n line = proc.stdout.readline()\n\n ## Get third line - junk\n line = proc.stdout.readline()\n\n ## Get the final line (4th line) - quality\n line = proc.stdout.readline()\n\n ## Parse the header\n headerFields = header.split(\":\")\n\n ## The last index is the index\n indexSeq = headerFields[-1].strip()\n assert indexSeq\n\n ## Increment the counts and store the index\n if indexSeq in indexSeqCounter:\n indexSeqCounter[indexSeq] += 1\n else:\n indexSeqCounter[indexSeq] = 1\n\n seqCount += 1\n\n except Exception as e:\n if log:\n log.error(\"Exception in file reading: %s\", e)\n log.error(\"Failed to read the given fastq file [%s]\", fastq)\n log.error(\"Fastq header doesn't have the index sequence: %s\", header)\n log.error(\"Index sequence analysis is skipped!\")\n\n return RQCExitCodes.JGI_SUCCESS, None, None, None\n\n ## Open the output file handles for writing\n log.info(\"demultiplexPlotDataFile = %s\", demultiplexPlotDataFile)\n log.info(\"detectionPlotPngFile = %s\", detectionPlotPngFile)\n log.info(\"demultiplexStatsFile = %s\", demultiplexStatsFile)\n log.info(\"storedDemultiplexStatsFile = %s\", storedDemultiplexStatsFile)\n\n plotDataFH = open(demultiplexPlotDataFile, \"w\")\n statsFH = open(demultiplexStatsFile, \"w\")\n\n ## Count the total number of indexes found\n numIndexesFound = len(indexSeqCounter)\n\n ## Store the data header information for printing\n reportHeader = \"\"\"# Demultiplexing Summary\n#\n# Seq unit name: %s\n# Total sequences: %s\n# Total indexes found: %s\n# 1=indexSeq 2=index_sequence_count 3=percent_of_total\n#\n\"\"\" % (sequnitFileName, seqCount, numIndexesFound)\n\n statsFH.write(reportHeader)\n\n ## Sort by value, descending\n log.debug(\"Sort by value of indexSeqCounter\")\n for indexSeq in sorted(indexSeqCounter, key=indexSeqCounter.get, reverse=True):\n perc = float(indexSeqCounter[indexSeq]) / float(seqCount) * 100\n l = \"%s\\t%s\\t%.6f\\n\" % (indexSeq, indexSeqCounter[indexSeq], perc)\n statsFH.write(l)\n\n ## Sort by index and add id column for plotting\n log.debug(\"Sort by index of indexSeqCounter\")\n i = 1\n for indexSeq in sorted(indexSeqCounter.iterkeys()):\n perc = float(indexSeqCounter[indexSeq]) / float(seqCount) * 100\n l = \"%s\\t%s\\t%s\\t%.6f\\n\" % (i, indexSeq, indexSeqCounter[indexSeq], perc)\n plotDataFH.write(l)\n i += 1\n\n plotDataFH.close()\n statsFH.close()\n\n log.debug(\"demultiplex plotting...\")\n ## matplotlib plotting\n # data\n # Index_seq_id Index_seq indexSeqCounter percent\n # 1 AAAAAAAAAAAA 320 0.000549\n # 2 AAAAAAAAAAAC 16 0.000027\n # 3 AAAAAAAAAAAG 8 0.000014\n # 4 AAAAAAAAAACA 4 0.000007\n # 5 AAAAAAAAAACG 2 0.000003\n # 6 AAAAAAAAAAGA 6 0.000010\n # 7 AAAAAAAAAATA 6 0.000010\n\n # rawDataMatrix = np.loadtxt(demultiplexPlotDataFile, delimiter='\\t', comments='#')\n # assert len(rawDataMatrix[1][:]) == 4\n\n ## For a textfile with 4000x4000 words this is about 10 times faster than loadtxt.\n ## http://stackoverflow.com/questions/14985233/load-text-file-as-strings-using-numpy-loadtxt\n def load_data_file(fname):\n data = []\n\n with open(fname, 'r') as FH:\n lineCnt = 0\n for line in FH:\n if lineCnt > 10000: ## experienced out of mem with a stat file with 9860976 index sequences\n log.warning(\"Too many index sequences. Only 10000 index sequences will be used for plotting.\")\n break\n data.append(line.replace('\\n', '').split('\\t'))\n lineCnt += 1\n\n return data\n\n def column(matrix, i, opt):\n if opt == \"int\":\n return [int(row[i]) for row in matrix]\n elif opt == \"float\":\n return [float(row[i]) for row in matrix]\n else:\n return [row[i] for row in matrix]\n\n rawDataMatrix = load_data_file(demultiplexPlotDataFile)\n\n fig, ax = plt.subplots()\n\n markerSize = 6.0\n lineWidth = 1.5\n\n p1 = ax.plot(column(rawDataMatrix, 0, \"int\"), column(rawDataMatrix, 3, \"float\"), 'r', marker='o', markersize=markerSize, linewidth=lineWidth, label='N', alpha=0.5)\n\n ax.set_xlabel(\"Index Sequence ID\", fontsize=12, alpha=0.5)\n ax.set_ylabel(\"Fraction\", fontsize=12, alpha=0.5)\n ax.grid(color=\"gray\", linestyle=':')\n\n ## Add tooltip\n labels = [\"%s\" % i for i in column(rawDataMatrix, 1, \"str\")]\n\n ## Show index_seq in the plot\n for i in rawDataMatrix:\n ax.text(i[0], float(i[3]) + .2, \"%s\" % i[1])\n\n mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(p1[0], labels=labels))\n\n detectionPlotHtml = os.path.join(demulPath, sequnitFileNamePrefix + \".index_sequence_detection.html\")\n\n ## Save D3 interactive plot in html format\n mpld3.save_html(fig, detectionPlotHtml)\n\n ## Save Matplotlib plot in png format\n plt.savefig(detectionPlotPngFile, dpi=fig.dpi)\n\n if exitCode != 0:\n log.error(\"Failed to create demulplex plot\")\n return RQCExitCodes.JGI_FAILURE, None, None, None\n\n log.info(\"demulplex stats and plot generation completed!\")\n\n\n else:\n log.info(\"Not multiplexed - skip this analysis.\")\n return RQCExitCodes.JGI_SUCCESS, None, None, None\n\n if detectionPlotPngFile is not None and storedDemultiplexStatsFile is not None and os.path.isfile(\n detectionPlotPngFile) and os.path.isfile(storedDemultiplexStatsFile):\n return RQCExitCodes.JGI_SUCCESS, demultiplexStatsFile, detectionPlotPngFile, detectionPlotHtml\n\n else:\n return RQCExitCodes.JGI_FAILURE, None, None, None\n\n\n\"\"\" STEP18 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nend_of_read_illumina_adapter_check\n\n usage: kmercount_pos.py [-h] [-k <int>] [-c <int>] [-t <int>] [-p <file>]\n fastaFile fastqFile [fastqFile ...]\n\n Count occurance of database kmers in reads\n\n positional arguments:\n fastaFile Input FASTA file(s). Text or gzip\n fastqFile Input FASTQ file(s). Text or gzip\n\n optional arguments:\n -h, --help show this help message and exit\n -k <int> kmer length (default: 16)\n -c <int> minimum allowed coverage (default: 2)\n -t <int> target coverage (default: 30)\n -p <file>, --plot <file> plot data and save as png to <file> (default: None)\n\n * NOTE\n - RQC-383 04082014: Updated to the newest version of kmercount_pos.py (readqc ver 5.0.4)\n\n\"\"\"\ndef end_of_read_illumina_adapter_check(firstSubsampledFastqFile, log):\n cdir = os.path.dirname(__file__)\n kmercountPosCmd = os.path.join(cdir, 'kmercount_pos.py') #RQCReadQcCommands.KMERCOUNT_POS_CMD\n adapterDbName = RQCReadQcReferenceDatabases.END_OF_READ_ILLUMINA_ADAPTER_CHECK_DB\n\n READ_OUTPUT_PATH = RQCReadQcConfig.CFG[\"output_path\"]\n\n sequnitFileName, exitCode = safe_basename(firstSubsampledFastqFile, log)\n sequnitFileNamePrefix = sequnitFileName.replace(\".fastq\", \"\").replace(\".gz\", \"\")\n\n plotFile = \"\"\n dataFile = \"\"\n\n adapterCheckDir = \"adapter\"\n adapterCheckPath = os.path.join(READ_OUTPUT_PATH, adapterCheckDir)\n make_dir(adapterCheckPath)\n change_mod(adapterCheckPath, \"0755\")\n\n plotFile = os.path.join(adapterCheckPath, sequnitFileNamePrefix + \".end_of_read_adapter_check.png\") ## ignored\n dataFile = os.path.join(adapterCheckPath, sequnitFileNamePrefix + \".end_of_read_adapter_check.txt\")\n\n ## Localization file to /scratch/rqc\n log.info(\"illumina_adapter_check DB localization started for %s\", adapterDbName)\n\n # if os.environ['NERSC_HOST'] == \"genepool\":\n # localizedDb = localize_file(adapterDbName, log)\n # else:\n # localizedDb = None\n #\n # if localizedDb is None:\n # localizedDb = adapterDbName ## use the orig location\n # else:\n # log.info(\"Use the localized file, %s\", localizedDb)\n\n localizedDb = adapterDbName ## use the orig location\n\n ## ex) kmercount_pos.py --plot plot.png Artifacts.adapters_primers_only.fa subsample.fastq > counts_by_pos.txt\n cmd = \"%s --plot %s %s %s > %s \" % (kmercountPosCmd, plotFile, localizedDb, firstSubsampledFastqFile, dataFile)\n\n ## Run cmd\n _, _, exitCode = run_sh_command(cmd, True, log, True)\n assert exitCode == 0\n\n ## mpld3 plots gen\n rawDataMatrix = np.loadtxt(dataFile, delimiter='\\t', comments='#', skiprows=0)\n assert len(rawDataMatrix[1][:]) == 3 or len(rawDataMatrix[1][:]) == 5\n\n ##pos read1 read2\n ## 0 1 2\n\n ## This output file format is changed on 2013.06.26 (RQC-442)\n ##\n ## pos read1_count read1_perc read2_count read2_perc\n ##\n fig, ax = plt.subplots()\n\n markerSize = 3.5\n lineWidth = 1.0\n\n if len(rawDataMatrix[1][:]) != 5: ## support for old file\n p1 = ax.plot(rawDataMatrix[:, 0], rawDataMatrix[:, 1], 'r', marker='o', markersize=markerSize, linewidth=lineWidth, label=\"read1\", alpha=0.5)\n p2 = ax.plot(rawDataMatrix[:, 0], rawDataMatrix[:, 2], 'g', marker='d', markersize=markerSize, linewidth=lineWidth, label=\"read2\", alpha=0.5)\n ax.set_ylabel(\"Read Count with Database K-mer\", fontsize=12, alpha=0.5)\n\n else:\n p1 = ax.plot(rawDataMatrix[:, 0], rawDataMatrix[:, 2], 'r', marker='o', markersize=markerSize, linewidth=lineWidth, label=\"read1\", alpha=0.5)\n p2 = ax.plot(rawDataMatrix[:, 0], rawDataMatrix[:, 4], 'g', marker='d', markersize=markerSize, linewidth=lineWidth, label=\"read2\", alpha=0.5)\n ax.set_ylim([0, 100])\n ax.set_ylabel(\"Percent Reads with Database K-mer\", fontsize=12, alpha=0.5)\n\n ax.set_xlabel(\"Read Position\", fontsize=12, alpha=0.5)\n ax.yaxis.set_label_coords(-0.095, 0.75)\n fontProp = FontProperties()\n fontProp.set_size(\"small\")\n fontProp.set_family(\"Bitstream Vera Sans\")\n ax.legend(loc=1, prop=fontProp)\n ax.grid(color=\"gray\", linestyle=':')\n\n ## Add tooltip\n mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(p1[0], labels=list(rawDataMatrix[:, 1])))\n mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(p2[0], labels=list(rawDataMatrix[:, 2])))\n\n pngFile = os.path.join(adapterCheckPath, sequnitFileNamePrefix + \".end_of_read_adapter_check.png\")\n htmlFile = os.path.join(adapterCheckPath, sequnitFileNamePrefix + \".end_of_read_adapter_check.html\")\n\n ## Save D3 interactive plot in html format\n mpld3.save_html(fig, htmlFile)\n\n ## Save Matplotlib plot in png format\n plt.savefig(pngFile, dpi=fig.dpi)\n\n if exitCode != 0:\n log.error(\"Failed to run kmercountPosCmd\")\n return RQCExitCodes.JGI_FAILURE, None, None, None\n\n if os.path.isfile(plotFile) and os.path.isfile(dataFile):\n log.info(\"kmercount_pos completed.\")\n return RQCExitCodes.JGI_SUCCESS, dataFile, pngFile, htmlFile\n\n else:\n log.error(\"cannot find the output files from kmercount_pos. kmercount_pos failed.\")\n return RQCExitCodes.JGI_FAILURE, None, None, None\n\n\n\"\"\" STEP19 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\ninsert_size_analysis\n\nUsing bbmerge.sh from bbtools, create insert size histogram (static/interactive) plots using D3 and data file\n\ne.q.\n java -ea -Xmx200m -cp /usr/common/jgi/utilities/bbtools/prod-v32.28/lib/BBTools.jar jgi.BBMerge\n in=/global/projectb/scratch/brycef/rqc-dev/staging/00/00/66/26/6626.2.48981.TTCTCC.fastq ihist=ihist.txt\n Executing jgi.BBMerge [in=/global/projectb/scratch/brycef/rqc-dev/staging/00/00/66/26/6626.2.48981.TTCTCC.fastq, ihist=ihist.txt]\n\n e.g.\n bbmerge.sh in=[path-to-fastq] hist=hist.txt\n - you should use the whole fastq, not just the subsampled fastq\n\n\"\"\"\ndef insert_size_analysis(fastq, log):\n ## Tools\n cdir = os.path.dirname(__file__)\n bbmergeShCmd = os.path.join(cdir, '../../bbmerge.sh') #RQCReadQcCommands.BBMERGE_SH_CMD\n\n READ_OUTPUT_PATH = RQCReadQcConfig.CFG[\"output_path\"]\n\n sequnitFileName, exitCode = safe_basename(fastq, log)\n sequnitFileNamePrefix = sequnitFileName.replace(\".fastq\", \"\").replace(\".gz\", \"\")\n\n plotFile = \"\"\n dataFile = \"\"\n\n insertSizeOutDir = \"insert_size_analysis\"\n insertSizeOutPath = os.path.join(READ_OUTPUT_PATH, insertSizeOutDir)\n make_dir(insertSizeOutPath)\n change_mod(insertSizeOutPath, \"0755\")\n\n plotFile = os.path.join(insertSizeOutPath, sequnitFileNamePrefix + \".insert_size_histo.png\")\n htmlFile = os.path.join(insertSizeOutPath, sequnitFileNamePrefix + \".insert_size_histo.html\")\n dataFile = os.path.join(insertSizeOutPath, sequnitFileNamePrefix + \".insert_size_histo.txt\")\n\n ## TODO\n ## if it's single ended\n ## 1. rqcfilter.sh for adapter trim\n ## 2. reformat.sh for getting lhist\n ## 3. analyze lhist.txt\n\n ## ex) bbmerge.sh in=7601.1.77813.CTTGTA.fastq.gz hist=.../insert_size_analysis/7601.1.77813.CTTGTA.insert_size_histo.txt\n ## reads=1000000 --> 1M reads are enough for insert size analysis\n cmd = \"%s in=%s hist=%s reads=1000000 \" % (bbmergeShCmd, fastq, dataFile)\n\n ## Run cmd\n _, stdErr, exitCode = run_sh_command(cmd, True, log, True)\n\n if exitCode != 0:\n log.error(\"Failed to run bbmerge_sh_cmd.\")\n return RQCExitCodes.JGI_FAILURE, None, None, None, None\n\n retCode = {}\n\n ## File format\n ## BBMerge version 5.0\n ## Finished reading\n ## Total time: 8.410 seconds.\n ##\n ## Pairs: 1000000\n ## Joined: 556805 55.681%\n ## Ambiguous: 9665 0.967%\n ## No Solution: 433474 43.347%\n ## Too Short: 56 0.006%\n ## Avg Insert: 234.6\n ## Standard Deviation: 33.9\n ## Mode: 250\n ##\n ## Insert range: 26 - 290\n ## 90th percentile: 277\n ## 75th percentile: 262\n ## 50th percentile: 238\n ## 25th percentile: 211\n ## 10th percentile: 188\n\n for l in stdErr.split('\\n'):\n toks = l.split()\n if l.startswith(\"Total time\"):\n retCode[\"total_time\"] = toks[2]\n elif l.startswith(\"Reads\"):\n retCode[\"num_reads\"] = toks[1]\n elif l.startswith(\"Pairs\"):\n retCode[\"num_reads\"] = toks[1]\n elif l.startswith(\"Joined\"):\n retCode[\"joined_num\"] = toks[1]\n retCode[\"joined_perc\"] = toks[2]\n elif l.startswith(\"Ambiguous\"):\n retCode[\"ambiguous_num\"] = toks[1]\n retCode[\"ambiguous_perc\"] = toks[2]\n elif l.startswith(\"No Solution\"):\n retCode[\"no_solution_num\"] = toks[2]\n retCode[\"no_solution_perc\"] = toks[3]\n elif l.startswith(\"Too Short\"):\n retCode[\"too_short_num\"] = toks[2]\n retCode[\"too_short_perc\"] = toks[3]\n elif l.startswith(\"Avg Insert\"):\n retCode[\"avg_insert\"] = toks[2]\n elif l.startswith(\"Standard Deviation\"):\n retCode[\"std_insert\"] = toks[2]\n elif l.startswith(\"Mode\"):\n retCode[\"mode_insert\"] = toks[1]\n elif l.startswith(\"Insert range\"):\n retCode[\"insert_range_start\"] = toks[2]\n retCode[\"insert_range_end\"] = toks[4]\n elif l.startswith(\"90th\"):\n retCode[\"perc_90th\"] = toks[2]\n elif l.startswith(\"50th\"):\n retCode[\"perc_50th\"] = toks[2]\n elif l.startswith(\"10th\"):\n retCode[\"perc_10th\"] = toks[2]\n elif l.startswith(\"75th\"):\n retCode[\"perc_75th\"] = toks[2]\n elif l.startswith(\"25th\"):\n retCode[\"perc_25th\"] = toks[2]\n\n log.debug(\"Insert size stats: %s\", str(retCode))\n\n ## -------------------------------------------------------------------------\n ## plotting\n rawDataMatrix = np.loadtxt(dataFile, delimiter='\\t', comments='#')\n\n ## File format\n ## loc val\n ## 1 11\n try:\n rawDataX = rawDataMatrix[:, 0]\n rawDataY = rawDataMatrix[:, 1]\n\n except IndexError:\n log.info(\"No solution from bbmerge.\")\n return RQCExitCodes.JGI_SUCCESS, dataFile, None, None, retCode\n\n fig, ax = plt.subplots()\n\n markerSize = 5.0\n lineWidth = 1.5\n\n p1 = ax.plot(rawDataX, rawDataY, 'r', marker='o', markersize=markerSize, linewidth=lineWidth, alpha=0.5)\n\n ax.set_xlabel(\"Insert Size\", fontsize=12, alpha=0.5)\n ax.set_ylabel(\"Count\", fontsize=12, alpha=0.5)\n\n ax.grid(color=\"gray\", linestyle=':')\n\n ## Add tooltip\n mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(p1[0], labels=list(rawDataY)))\n\n ## Save D3 interactive plot in html format\n mpld3.save_html(fig, htmlFile)\n\n ## Save Matplotlib plot in png format\n plt.savefig(plotFile, dpi=fig.dpi)\n\n ## Checking outputs\n if os.path.isfile(plotFile) and os.path.isfile(dataFile) and os.path.isfile(htmlFile):\n log.info(\"insert_size_analysis completed.\")\n return RQCExitCodes.JGI_SUCCESS, dataFile, plotFile, htmlFile, retCode\n\n else:\n log.error(\"cannot find the output files. insert_size_analysis failed.\")\n return RQCExitCodes.JGI_FAILURE, None, None, None, None\n\n\n\"\"\" STEP20 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\ngc_divergence_analysis\n\n\n\"\"\"\ndef gc_divergence_analysis(fastq, bIsPaired, srcDir, log):\n ## Tools\n # rscriptCmd = RQCReadQcCommands.RSCRIPT_CMD\n\n READ_FILES_FILE = RQCReadQcConfig.CFG[\"files_file\"]\n READ_OUTPUT_PATH = RQCReadQcConfig.CFG[\"output_path\"]\n\n sequnitFileName, exitCode = safe_basename(fastq, log)\n sequnitFileNamePrefix = sequnitFileName.replace(\".fastq\", \"\").replace(\".gz\", \"\")\n\n qualDir = \"qual\"\n qualPath = os.path.join(READ_OUTPUT_PATH, qualDir)\n make_dir(qualPath)\n change_mod(qualPath, \"0755\")\n\n ## get the bhist.txt produced by reformat.sh\n reformatBhistFile = None\n with open(READ_FILES_FILE, \"r\") as FFH:\n for l in FFH.readlines():\n if l.startswith(\"read base count text 1\"):\n reformatBhistFile = l.split(\"=\")[1].strip()\n assert os.path.isfile(reformatBhistFile), \"reformatBhistFile does not exist: %s\" % (l)\n break\n\n assert reformatBhistFile, \"ERROR: reformatBhistFile cannot be found in %s.\" % (READ_FILES_FILE)\n log.debug(\"gc_divergence_analysis(): bhist file = %s\", reformatBhistFile)\n\n gcDivergenceTransformedFile = os.path.join(qualPath, sequnitFileNamePrefix + \".gc.divergence.transformed.csv\") ## gc divergence transformed csv\n gcDivergenceTransformedPlot = os.path.join(qualPath, sequnitFileNamePrefix + \".gc.divergence.transformed.png\") ## gc divergence transformed plot\n gcDivergenceCoefficientsFile = os.path.join(qualPath, sequnitFileNamePrefix + \".gc.divergence.coefficients.csv\") ## gc divergence coefficients csv\n\n ## Check base composition histogram\n if not os.path.isfile(reformatBhistFile):\n log.error(\"Bhist file not found: %s\", reformatBhistFile)\n return RQCExitCodes.JGI_FAILURE, None, None, None, None\n\n ##\n ## Need R/3.1.2 b/c of library(dplyr) and library(tidyr) are only installed for the R/3.1.2 version\n ##\n\n ## Transform bhist output from reformat.sh to csv\n if bIsPaired:\n # transformCmd = \"module unload R; module load R/3.2.4; module load jgi-fastq-signal-processing/2.x; format_signal_data --input %s --output %s --read both --type composition \" %\\\n transformCmd = \"module unload R; module load R/3.2.4; module load jgi-fastq-signal-processing/2.x; format_signal_data --input %s --output %s --read both --type composition \" %\\\n (reformatBhistFile, gcDivergenceTransformedFile)\n\n # cmd = \" \".join([\"module load R; Rscript\", \"--vanilla\", os.path.join(SRCDIR, \"tools\", \"jgi-fastq-signal-processing\", \"format_signal_data\"), ])\n # transformCmd = \"module unload R; module load R; module load R/3.3.2; Rscript --vanilla %s --input %s --output %s --read both --type composition\" %\\\n if os.environ['NERSC_HOST'] in (\"denovo\", \"cori\"):\n transformCmd = \"Rscript --vanilla %s --input %s --output %s --read both --type composition\" %\\\n (os.path.join(srcDir, \"tools/jgi-fastq-signal-processing/bin\", \"format_signal_data\"), reformatBhistFile, gcDivergenceTransformedFile)\n else:\n # transformCmd = \"module unload R; module load R/3.2.4; module load jgi-fastq-signal-processing/2.x; format_signal_data --input %s --output %s --read 1 --type composition \" %\\\n transformCmd = \"module unload R; module load R/3.2.4; module load jgi-fastq-signal-processing/2.x; format_signal_data --input %s --output %s --read 1 --type composition \" %\\\n (reformatBhistFile, gcDivergenceTransformedFile)\n\n # transformCmd = \"module unload R; module load R; module load R/3.3.2; Rscript --vanilla %s --input %s --output %s --read 1 --type composition\" %\\\n if os.environ['NERSC_HOST'] in (\"denovo\", \"cori\"):\n transformCmd = \"Rscript --vanilla %s --input %s --output %s --read 1 --type composition\" %\\\n (os.path.join(srcDir, \"tools/jgi-fastq-signal-processing/bin\", \"format_signal_data\"), reformatBhistFile, gcDivergenceTransformedFile)\n\n log.debug(\"Transform cmd = %s\", transformCmd)\n\n _, _, exitCode = run_sh_command(transformCmd, True, log, True)\n\n if exitCode != 0:\n log.info(\"Failed to run GC_DIVERGENCE_TRANSFORM.\")\n return -1, None, None, None, None\n\n ## Compute divergence value\n coeff = []\n # modelCmd = \"module unload R; module load R/3.2.4; module load jgi-fastq-signal-processing/2.x; model_read_signal --input %s --output %s \" %\\\n modelCmd = \"module unload R; module load R/3.2.4; module load jgi-fastq-signal-processing/2.x; model_read_signal --input %s --output %s \" %\\\n (gcDivergenceTransformedFile, gcDivergenceCoefficientsFile)\n\n # modelCmd = \"module unload python; module unload R; module load R/3.3.2; Rscript --vanilla %s --input %s --output %s\" %\\\n if os.environ['NERSC_HOST'] in (\"denovo\", \"cori\"):\n modelCmd = \"Rscript --vanilla %s --input %s --output %s\" %\\\n (os.path.join(srcDir, \"tools/jgi-fastq-signal-processing/bin\", \"model_read_signal\"), gcDivergenceTransformedFile, gcDivergenceCoefficientsFile)\n\n log.debug(\"Model cmd = %s\", modelCmd)\n\n _, _, exitCode = run_sh_command(modelCmd, True, log, True)\n\n if exitCode != 0:\n log.info(\"Failed to run GC_DIVERGENCE_MODEL.\")\n return -1, None, None, None, None\n\n ## Parsing coefficients.csv\n ## ex)\n ## \"read\",\"variable\",\"coefficient\"\n ## \"Read 1\",\"AT\",2\n ## \"Read 1\",\"AT+CG\",2.6\n ## \"Read 1\",\"CG\",0.6\n ## \"Read 2\",\"AT\",1.7\n ## \"Read 2\",\"AT+CG\",2.3\n ## \"Read 2\",\"CG\",0.7\n assert os.path.isfile(gcDivergenceCoefficientsFile), \"GC divergence coefficient file not found.\"\n\n with open(gcDivergenceCoefficientsFile) as COEFF_FH:\n for l in COEFF_FH.readlines():\n if l.startswith(\"\\\"read\\\"\"):\n continue ## skip header line\n toks = l.strip().split(',')\n assert len(toks) == 3, \"Unexpected GC divergence coefficient file format.\"\n coeff.append({\"read\": toks[0], \"variable\": toks[1], \"coefficient\": toks[2]})\n\n ## Plotting\n # plotCmd = \"module unload R; module load R/3.2.4; module load jgi-fastq-signal-processing/2.x; plot_read_signal --input %s --output %s --type composition \" %\\\n plotCmd = \"module unload R; module load R/3.2.4; module load jgi-fastq-signal-processing/2.x; plot_read_signal --input %s --output %s --type composition \" %\\\n (gcDivergenceTransformedFile, gcDivergenceTransformedPlot)\n\n # plotCmd = \"module unload python; module unload R; module load R/3.3.2; Rscript --vanilla %s --input %s --output %s --type composition\" %\\\n if os.environ['NERSC_HOST'] in (\"denovo\", \"cori\"):\n plotCmd = \"Rscript --vanilla %s --input %s --output %s --type composition\" %\\\n (os.path.join(srcDir, \"tools/jgi-fastq-signal-processing/bin\", \"plot_read_signal\"), gcDivergenceTransformedFile, gcDivergenceTransformedPlot)\n\n log.debug(\"Plot cmd = %s\", plotCmd)\n\n _, _, exitCode = run_sh_command(plotCmd, True, log, True)\n\n if exitCode != 0:\n log.info(\"Failed to run GC_DIVERGENCE_PLOT.\")\n return -1, None, None, None, None\n\n\n return RQCExitCodes.JGI_SUCCESS, gcDivergenceTransformedFile, gcDivergenceTransformedPlot, gcDivergenceCoefficientsFile, coeff\n\n\n\"\"\" STEP22 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\ncleanup_readqc\n\nCleaning up the ReadQC analysis directory with unwanted files.\n\n@param log\n\n@return retCode: always return success\n\n\"\"\"\ndef cleanup_readqc(log):\n READ_OUTPUT_PATH = RQCReadQcConfig.CFG[\"output_path\"]\n\n ## Purge FASTQs\n cmd = \"rm -f %s/%s/*.fastq \" % (READ_OUTPUT_PATH, \"subsample\")\n _, _, exitCode = run_sh_command(cmd, True, log)\n\n if exitCode != 0:\n log.error(\"Failed to execute %s; may be already purged.\", cmd)\n\n cmd = \"rm -f %s/%s/*.fastq \" % (READ_OUTPUT_PATH, \"qual\")\n _, _, exitCode = run_sh_command(cmd, True, log)\n\n if exitCode != 0:\n log.error(\"Failed to execute %s; may be already purged.\", cmd)\n\n ## Purge FASTA qual and Fasta file\n cmd = \"rm -f %s/%s/reads.fa \" % (READ_OUTPUT_PATH, \"megablast\")\n _, _, exitCode = run_sh_command(cmd, True, log)\n\n if exitCode != 0:\n log.error(\"Failed to execute %s; may be already purged.\", cmd)\n\n cmd = \"rm -f %s/%s/reads.qual \" % (READ_OUTPUT_PATH, \"megablast\")\n _, _, exitCode = run_sh_command(cmd, True, log)\n\n if exitCode != 0:\n log.error(\"Failed to execute %s; may be already purged.\", cmd)\n\n ## Delete Sciclone files\n cmd = \"rm -f %s/%s/*.fastq \" % (READ_OUTPUT_PATH, \"sciclone_analysis\")\n _, _, exitCode = run_sh_command(cmd, True, log)\n\n if exitCode != 0:\n log.error(\"Failed to execute %s; may be already purged.\", cmd)\n\n cmd = \"rm -f %s/%s/*.fq \" % (READ_OUTPUT_PATH, \"sciclone_analysis\")\n _, _, exitCode = run_sh_command(cmd, True, log)\n\n if exitCode != 0:\n log.error(\"Failed to execute %s; may be already purged.\", cmd)\n\n cmd = \"rm -f %s/%s/*.sam \" % (READ_OUTPUT_PATH, \"sciclone_analysis\")\n _, _, exitCode = run_sh_command(cmd, True, log)\n\n if exitCode != 0:\n log.error(\"Failed to execute %s; may be already purged.\", cmd)\n\n cmd = \"rm -f %s/%s/*.sai \" % (READ_OUTPUT_PATH, \"sciclone_analysis\")\n _, _, exitCode = run_sh_command(cmd, True, log)\n\n if exitCode != 0:\n log.error(\"Failed to execute %s; may be already purged.\", cmd)\n\n ## Purge Blast files\n cmd = \"rm -f %s/%s/megablast*v*JFfTIT \" % (READ_OUTPUT_PATH, \"megablast\")\n _, _, exitCode = run_sh_command(cmd, True, log)\n\n if exitCode != 0:\n log.error(\"Failed to execute %s; may be already purged.\", cmd)\n\n ## purge megablast.reads.fa.v.nt.FmLD2a10p90E30JFfTITW45; megablast.reads.fa.v.refseq.microbial.FmLD2a10p90E30JFfTITW45\n cmd = \"rm -f %s/%s/megablast*v*FfTITW45 \" % (READ_OUTPUT_PATH, \"megablast\")\n _, _, exitCode = run_sh_command(cmd, True, log)\n\n if exitCode != 0:\n log.error(\"Failed to execute %s; may be already purged.\", cmd)\n\n\n return RQCExitCodes.JGI_SUCCESS\n\n\n## ===========================================================================================================================\n\n\n\"\"\" For NEW STEP4\n\nQC plot generation using the outputs from reformat.sh\n\n\"\"\"\ndef gen_average_base_position_quality_plot(fastq, bIsPaired, log):\n READ_OUTPUT_PATH = RQCReadQcConfig.CFG[\"output_path\"]\n\n sequnitFileName, _ = safe_basename(fastq, log)\n sequnitFileNamePrefix = sequnitFileName.replace(\".fastq\", \"\").replace(\".gz\", \"\")\n\n subsampleDir = \"subsample\"\n subsamplePath = os.path.join(READ_OUTPUT_PATH, subsampleDir)\n\n qualDir = \"qual\"\n qualPath = os.path.join(READ_OUTPUT_PATH, qualDir)\n make_dir(qualPath)\n change_mod(qualPath, \"0755\")\n\n reformatQhistFile = os.path.join(subsamplePath, sequnitFileNamePrefix + \".reformat.qhist.txt\") ## Average Base Position Quality\n\n log.debug(\"qhist file: %s\", reformatQhistFile)\n\n ## Gen Average Base Position Quality Plot\n if not os.path.isfile(reformatQhistFile):\n log.error(\"Qhist file not found: %s\", reformatQhistFile)\n return None, None, None\n\n ## New data format\n ## Load data from txt\n rawDataMatrix = np.loadtxt(reformatQhistFile, delimiter='\\t', comments='#', skiprows=0)\n assert len(rawDataMatrix[1][:]) == 5 or len(rawDataMatrix[1][:]) == 3\n\n ## New data (paired)\n # BaseNum Read1_linear Read1_log Read2_linear Read2_log\n # 1 33.469 30.347 32.459 29.127\n # 2 33.600 32.236 32.663 29.532\n # 3 33.377 30.759 32.768 29.719\n\n fig, ax = plt.subplots()\n\n markerSize = 3.5\n lineWidth = 1.0\n p1 = ax.plot(rawDataMatrix[:, 0], rawDataMatrix[:, 1], 'r', marker='o', markersize=markerSize, linewidth=lineWidth, label='read1')\n\n if bIsPaired:\n p2 = ax.plot(rawDataMatrix[:, 0], rawDataMatrix[:, 3], 'g', marker='d', markersize=markerSize, linewidth=lineWidth, label='read2')\n\n ax.set_xlabel(\"Read Position\", fontsize=12, alpha=0.5)\n ax.set_ylabel(\"Average Quality Score\", fontsize=12, alpha=0.5)\n ax.set_ylim([0, 45])\n fontProp = FontProperties()\n fontProp.set_size(\"small\")\n fontProp.set_family(\"Bitstream Vera Sans\")\n ax.legend(loc=1, prop=fontProp)\n ax.grid(color=\"gray\", linestyle=':')\n\n ## Add tooltip\n mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(p1[0], labels=list(rawDataMatrix[:, 1])))\n\n if bIsPaired:\n mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(p2[0], labels=list(rawDataMatrix[:, 3])))\n\n pngFile = os.path.join(qualPath, sequnitFileNamePrefix + \".r1_r2_baseposqual.png\")\n htmlFile = os.path.join(qualPath, sequnitFileNamePrefix + \".r1_r2_baseposqual.html\")\n\n ## Save D3 interactive plot in html format\n mpld3.save_html(fig, htmlFile)\n\n ## Save Matplotlib plot in png format\n plt.savefig(pngFile, dpi=fig.dpi)\n\n return reformatQhistFile, pngFile, htmlFile\n\n\ndef gen_average_base_position_quality_boxplot(fastq, log):\n READ_OUTPUT_PATH = RQCReadQcConfig.CFG[\"output_path\"]\n\n sequnitFileName, _ = safe_basename(fastq, log)\n sequnitFileNamePrefix = sequnitFileName.replace(\".fastq\", \"\").replace(\".gz\", \"\")\n\n subsampleDir = \"subsample\"\n subsamplePath = os.path.join(READ_OUTPUT_PATH, subsampleDir)\n\n qualDir = \"qual\"\n qualPath = os.path.join(READ_OUTPUT_PATH, qualDir)\n make_dir(qualPath)\n change_mod(qualPath, \"0755\")\n\n reformatBqhistFile = os.path.join(subsamplePath, sequnitFileNamePrefix + \".reformat.bqhist.txt\") ## Average Base Position Quality Boxplot\n\n log.debug(\"qhist file: %s\", reformatBqhistFile)\n\n ## Gen Average Base Position Quality Boxplot\n if not os.path.isfile(reformatBqhistFile):\n log.error(\"Bqhist file not found: %s\", reformatBqhistFile)\n return None, None, None, None, None\n\n ## New data format (paired)\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18\n ##BaseNum count_1 min_1 max_1 mean_1 Q1_1 med_1 Q3_1 LW_1 RW_1 count_2 min_2 max_2 mean_2 Q1_2 med_2 Q3_2 LW_2 RW_2\n # 0 6900 0 36 33.48 33 34 34 29 36 6900 0 36 33.48 33 34 34 29 36\n\n rawDataMatrix = np.loadtxt(reformatBqhistFile, delimiter='\\t', comments='#', skiprows=0)\n assert len(rawDataMatrix[1][:]) == 19 or len(rawDataMatrix[1][:]) == 10\n\n bIsPaired = True\n\n if len(rawDataMatrix[1][:]) == 10:\n bIsPaired = False\n\n ## create data for boxplot\n boxplot_data_r1 = []\n boxplot_data_r2 = []\n\n for i in rawDataMatrix:\n idx = int(i[0]) - 1 ## read base loc\n\n spread = [rawDataMatrix[idx, 5], rawDataMatrix[idx, 7]] # Q1 ~ Q3\n center = [rawDataMatrix[idx, 6]] # median\n flier_high = [rawDataMatrix[idx, 8]] # whisker lW 2%\n flier_low = [rawDataMatrix[idx, 9]] # whisker rW 98%\n boxplot_data_r1.append(np.concatenate((spread, center, flier_high, flier_low), 0))\n\n if bIsPaired:\n spread = [rawDataMatrix[idx, 14], rawDataMatrix[idx, 16]] # Q1 ~ Q3\n center = [rawDataMatrix[idx, 15]] # median\n flier_high = [rawDataMatrix[idx, 17]] # whisker lW 2%\n flier_low = [rawDataMatrix[idx, 18]] # whisker rW 98%\n boxplot_data_r2.append(np.concatenate((spread, center, flier_high, flier_low), 0))\n\n fig, ax = plt.subplots()\n ax.boxplot(boxplot_data_r1)\n plt.subplots_adjust(left=0.06, right=0.9, top=0.9, bottom=0.1)\n\n F = plt.gcf()\n\n ## How to get the current size?\n ## DPI = F.get_dpi() ## = 80\n ## DefaultSize = F.get_size_inches() ## = (8, 6)\n F.set_size_inches(18, 6) ## plot size (w, h)\n\n ax.set_xlabel(\"Read Position\", fontsize=11, alpha=0.5)\n ax.set_ylabel(\"Quality Score (Solexa Scale: 40=Highest, -15=Lowest)\", fontsize=11, alpha=0.5)\n\n ax.set_ylim([-20, 45])\n ax.yaxis.grid(True, linestyle=':', which=\"major\")\n\n majorLocator_x = MultipleLocator(5)\n majorFormatter = FormatStrFormatter(\"%d\")\n minorLocator = MultipleLocator(1)\n\n ax.xaxis.set_major_locator(majorLocator_x)\n ax.xaxis.set_major_formatter(majorFormatter)\n ax.xaxis.set_minor_locator(minorLocator)\n\n majorLocator_y = MultipleLocator(5)\n minorLocator = MultipleLocator(1)\n ax.yaxis.set_major_locator(majorLocator_y)\n ax.yaxis.set_minor_locator(minorLocator)\n\n pngFileR1 = os.path.join(qualPath, sequnitFileNamePrefix + \".r1_average_base_position_quality_boxplot.png\")\n htmlFileR1 = os.path.join(qualPath, sequnitFileNamePrefix + \".r1_average_base_position_quality_boxplot.html\")\n\n ## Save D3 interactive plot in html format\n mpld3.save_html(fig, htmlFileR1)\n\n ## Save Matplotlib plot in png format\n plt.savefig(pngFileR1, dpi=fig.dpi)\n\n plotPngPlotFileR2 = None\n plotHtmlPlotFileR2 = None\n\n if bIsPaired:\n fig, ax = plt.subplots()\n ax.boxplot(boxplot_data_r2)\n plt.subplots_adjust(left=0.06, right=0.9, top=0.9, bottom=0.1)\n\n F = plt.gcf()\n\n ## How to get the current size?\n ## DPI = F.get_dpi() ## = 80\n ## DefaultSize = F.get_size_inches() ## = (8, 6)\n F.set_size_inches(18, 6) ## plot size (w, h)\n\n ax.set_xlabel(\"Read Position\", fontsize=11, alpha=0.5)\n ax.set_ylabel(\"Quality Score (Solexa Scale: 40=Highest, -15=Lowest)\", fontsize=11, alpha=0.5)\n\n ax.set_ylim([-20, 45])\n ax.yaxis.grid(True, linestyle=':', which='major')\n\n majorLocator_x = MultipleLocator(5)\n majorFormatter = FormatStrFormatter('%d')\n minorLocator = MultipleLocator(1)\n\n ax.xaxis.set_major_locator(majorLocator_x)\n ax.xaxis.set_major_formatter(majorFormatter)\n ax.xaxis.set_minor_locator(minorLocator)\n\n majorLocator_y = MultipleLocator(5)\n minorLocator = MultipleLocator(1)\n ax.yaxis.set_major_locator(majorLocator_y)\n ax.yaxis.set_minor_locator(minorLocator)\n\n plotPngPlotFileR2 = os.path.join(qualPath, sequnitFileNamePrefix + \".r2_average_base_position_quality_boxplot.png\")\n plotHtmlPlotFileR2 = os.path.join(qualPath, sequnitFileNamePrefix + \".r2_average_base_position_quality_boxplot.html\")\n\n ## Save D3 interactive plot in html format\n mpld3.save_html(fig, plotHtmlPlotFileR2)\n\n ## Save Matplotlib plot in png format\n plt.savefig(plotPngPlotFileR2, dpi=fig.dpi)\n\n\n return reformatBqhistFile, pngFileR1, plotPngPlotFileR2, htmlFileR1, plotHtmlPlotFileR2\n\n\ndef gen_cycle_nucleotide_composition_plot(fastq, readLength, isPairedEnd, log):\n READ_OUTPUT_PATH = RQCReadQcConfig.CFG[\"output_path\"]\n\n sequnitFileName, _ = safe_basename(fastq, log)\n sequnitFileNamePrefix = sequnitFileName.replace(\".fastq\", \"\").replace(\".gz\", \"\")\n\n subsampleDir = \"subsample\"\n subsamplePath = os.path.join(READ_OUTPUT_PATH, subsampleDir)\n\n qualDir = \"qual\"\n qualPath = os.path.join(READ_OUTPUT_PATH, qualDir)\n make_dir(qualPath)\n change_mod(qualPath, \"0755\")\n\n reformatBhistFile = os.path.join(subsamplePath, sequnitFileNamePrefix + \".reformat.bhist.txt\") ## base composition histogram\n\n log.debug(\"gen_cycle_nucleotide_composition_plot(): bhist file = %s\", reformatBhistFile)\n\n ## Genbase composition histogram\n if not os.path.isfile(reformatBhistFile):\n log.error(\"Bhist file not found: %s\", reformatBhistFile)\n return None, None, None, None, None\n\n ## data\n # 0 1 2 3 4 5\n ##Pos A C G T N\n # 0 0.15111 0.26714 0.51707 0.06412 0.00056\n # 1 0.20822 0.20773 0.25543 0.32795 0.00068\n\n rawDataMatrix = np.loadtxt(reformatBhistFile, delimiter='\\t', comments='#')\n assert len(rawDataMatrix[1][:]) == 6\n\n if isPairedEnd:\n rawDataR1 = rawDataMatrix[:readLength - 1][:]\n rawDataR2 = rawDataMatrix[readLength:][:]\n\n ## r1 ------------------------------------------------------------------\n fig, ax = plt.subplots()\n\n markerSize = 2.5\n lineWidth = 1.0\n p2 = ax.plot(rawDataR1[:, 0], rawDataR1[:, 1], 'r', marker='o', markersize=markerSize, linewidth=lineWidth, label='A', alpha=0.5)\n p3 = ax.plot(rawDataR1[:, 0], rawDataR1[:, 4], 'g', marker='s', markersize=markerSize, linewidth=lineWidth, label='T', alpha=0.5)\n p4 = ax.plot(rawDataR1[:, 0], rawDataR1[:, 3], 'b', marker='*', markersize=markerSize, linewidth=lineWidth, label='G', alpha=0.5)\n p5 = ax.plot(rawDataR1[:, 0], rawDataR1[:, 2], 'm', marker='d', markersize=markerSize, linewidth=lineWidth, label='C', alpha=0.5)\n p6 = ax.plot(rawDataR1[:, 0], rawDataR1[:, 5], 'c', marker='v', markersize=markerSize, linewidth=lineWidth, label='N', alpha=0.5)\n\n ax.set_xlabel(\"Read Position\", fontsize=12, alpha=0.5)\n ax.set_ylabel(\"Fraction\", fontsize=12, alpha=0.5)\n fontProp = FontProperties()\n fontProp.set_size(\"small\")\n fontProp.set_family(\"Bitstream Vera Sans\")\n ax.legend(loc=1, prop=fontProp)\n ax.grid(color=\"gray\", linestyle=':')\n\n ## Add tooltip\n mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(p2[0], labels=list(rawDataR1[:, 1])))\n mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(p3[0], labels=list(rawDataR1[:, 4])))\n mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(p4[0], labels=list(rawDataR1[:, 3])))\n mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(p5[0], labels=list(rawDataR1[:, 2])))\n mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(p6[0], labels=list(rawDataR1[:, 5])))\n\n pngFileR1 = os.path.join(qualPath, sequnitFileNamePrefix + \".cycle_nucl_composition_r1.png\")\n htmlFileR1 = os.path.join(qualPath, sequnitFileNamePrefix + \".cycle_nucl_composition_r1.html\")\n\n ## Save D3 interactive plot in html format\n mpld3.save_html(fig, htmlFileR1)\n\n ## Save Matplotlib plot in png format\n plt.savefig(pngFileR1, dpi=fig.dpi)\n\n ## r2 ------------------------------------------------------------------\n fig, ax = plt.subplots()\n\n markerSize = 2.5\n lineWidth = 1.0\n p2 = ax.plot([(x - readLength) for x in rawDataR2[:, 0]], rawDataR2[:, 1], 'r', marker='o', markersize=markerSize, linewidth=lineWidth, label='A', alpha=0.5)\n p3 = ax.plot([(x - readLength) for x in rawDataR2[:, 0]], rawDataR2[:, 4], 'g', marker='s', markersize=markerSize, linewidth=lineWidth, label='T', alpha=0.5)\n p4 = ax.plot([(x - readLength) for x in rawDataR2[:, 0]], rawDataR2[:, 3], 'b', marker='*', markersize=markerSize, linewidth=lineWidth, label='G', alpha=0.5)\n p5 = ax.plot([(x - readLength) for x in rawDataR2[:, 0]], rawDataR2[:, 2], 'm', marker='d', markersize=markerSize, linewidth=lineWidth, label='C', alpha=0.5)\n p6 = ax.plot([(x - readLength) for x in rawDataR2[:, 0]], rawDataR2[:, 5], 'c', marker='v', markersize=markerSize, linewidth=lineWidth, label='N', alpha=0.5)\n\n ax.set_xlabel(\"Read Position\", fontsize=12, alpha=0.5)\n ax.set_ylabel(\"Fraction\", fontsize=12, alpha=0.5)\n fontProp = FontProperties()\n fontProp.set_size(\"small\")\n fontProp.set_family(\"Bitstream Vera Sans\")\n ax.legend(loc=1, prop=fontProp)\n ax.grid(color=\"gray\", linestyle=':')\n\n ## Add tooltip\n mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(p2[0], labels=list(rawDataR2[:, 1])))\n mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(p3[0], labels=list(rawDataR2[:, 4])))\n mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(p4[0], labels=list(rawDataR2[:, 3])))\n mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(p5[0], labels=list(rawDataR2[:, 2])))\n mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(p6[0], labels=list(rawDataR2[:, 5])))\n\n plotPngPlotFileR2 = os.path.join(qualPath, sequnitFileNamePrefix + \".cycle_nucl_composition_r2.png\")\n plotHtmlPlotFileR2 = os.path.join(qualPath, sequnitFileNamePrefix + \".cycle_nucl_composition_r2.html\")\n\n ## Save D3 interactive plot in html format\n mpld3.save_html(fig, plotHtmlPlotFileR2)\n\n ## Save Matplotlib plot in png format\n plt.savefig(plotPngPlotFileR2, dpi=fig.dpi)\n\n return reformatBhistFile, pngFileR1, htmlFileR1, plotPngPlotFileR2, plotHtmlPlotFileR2\n\n else:\n fig, ax = plt.subplots()\n\n markerSize = 2.5\n lineWidth = 1.0\n p2 = ax.plot(rawDataMatrix[:, 0], rawDataMatrix[:, 1], 'r', marker='o', markersize=markerSize, linewidth=lineWidth, label='A', alpha=0.5)\n p3 = ax.plot(rawDataMatrix[:, 0], rawDataMatrix[:, 4], 'g', marker='s', markersize=markerSize, linewidth=lineWidth, label='T', alpha=0.5)\n p4 = ax.plot(rawDataMatrix[:, 0], rawDataMatrix[:, 3], 'b', marker='*', markersize=markerSize, linewidth=lineWidth, label='G', alpha=0.5)\n p5 = ax.plot(rawDataMatrix[:, 0], rawDataMatrix[:, 2], 'm', marker='d', markersize=markerSize, linewidth=lineWidth, label='C', alpha=0.5)\n p6 = ax.plot(rawDataMatrix[:, 0], rawDataMatrix[:, 5], 'c', marker='v', markersize=markerSize, linewidth=lineWidth, label='N', alpha=0.5)\n\n ax.set_xlabel(\"Read Position\", fontsize=12, alpha=0.5)\n ax.set_ylabel(\"Fraction\", fontsize=12, alpha=0.5)\n fontProp = FontProperties()\n fontProp.set_size(\"small\")\n fontProp.set_family(\"Bitstream Vera Sans\")\n ax.legend(loc=1, prop=fontProp)\n ax.grid(color=\"gray\", linestyle=':')\n\n ## Add tooltip\n mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(p2[0], labels=list(rawDataMatrix[:, 1])))\n mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(p3[0], labels=list(rawDataMatrix[:, 4])))\n mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(p4[0], labels=list(rawDataMatrix[:, 3])))\n mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(p5[0], labels=list(rawDataMatrix[:, 2])))\n mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(p6[0], labels=list(rawDataMatrix[:, 5])))\n\n pngFile = os.path.join(qualPath, sequnitFileNamePrefix + \".cycle_nucl_composition.png\")\n htmlFile = os.path.join(qualPath, sequnitFileNamePrefix + \".cycle_nucl_composition.html\")\n\n ## Save D3 interactive plot in html format\n mpld3.save_html(fig, htmlFile)\n\n ## Save Matplotlib plot in png format\n plt.savefig(pngFile, dpi=fig.dpi)\n\n return reformatBhistFile, pngFile, htmlFile, None, None\n\n\ndef gen_cycle_n_base_percent_plot(fastq, readLength, isPairedEnd, log):\n READ_OUTPUT_PATH = RQCReadQcConfig.CFG[\"output_path\"]\n\n sequnitFileName, _ = safe_basename(fastq, log)\n sequnitFileNamePrefix = sequnitFileName.replace(\".fastq\", \"\").replace(\".gz\", \"\")\n\n subsampleDir = \"subsample\"\n subsamplePath = os.path.join(READ_OUTPUT_PATH, subsampleDir)\n\n qualDir = \"qual\"\n qualPath = os.path.join(READ_OUTPUT_PATH, qualDir)\n make_dir(qualPath)\n change_mod(qualPath, \"0755\")\n\n reformatBhistFile = os.path.join(subsamplePath, sequnitFileNamePrefix + \".reformat.bhist.txt\") ## base composition histogram\n\n log.debug(\"gen_cycle_n_base_percent_plot(): bhist file = %s\", reformatBhistFile)\n\n ## Genbase composition histogram\n if not os.path.isfile(reformatBhistFile):\n log.error(\"Bhist file not found: %s\", reformatBhistFile)\n return None, None, None, None, None\n\n ## data\n # 0 1 2 3 4 5\n ##Pos A C G T N\n # 0 0.15111 0.26714 0.51707 0.06412 0.00056\n # 1 0.20822 0.20773 0.25543 0.32795 0.00068\n\n rawDataMatrix = np.loadtxt(reformatBhistFile, delimiter='\\t', comments='#')\n assert len(rawDataMatrix[1][:]) == 6\n\n if isPairedEnd:\n rawDataR1 = rawDataMatrix[:readLength - 1][:]\n rawDataR2 = rawDataMatrix[readLength:][:]\n\n ## r1 ------------------------------------------------------------------\n fig, ax = plt.subplots()\n\n markerSize = 5.0\n lineWidth = 1.5\n p1 = ax.plot(rawDataR1[:, 0], rawDataR1[:, 5], 'r', marker='o', markersize=markerSize, linewidth=lineWidth, label='N', alpha=0.5)\n\n ax.set_xlabel(\"Read Position\", fontsize=12, alpha=0.5)\n ax.set_ylabel(\"Fraction\", fontsize=12, alpha=0.5)\n ax.grid(color=\"gray\", linestyle=':')\n ax.set_xlim([0, readLength])\n\n ## Add tooltip\n labels = [\"%.5f\" % i for i in rawDataR1[:, 5]]\n mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(p1[0], labels=labels))\n\n pngFileR1 = os.path.join(qualPath, sequnitFileNamePrefix + \".cycle_n_base_percent_r1.png\")\n htmlFileR1 = os.path.join(qualPath, sequnitFileNamePrefix + \".cycle_n_base_percent_r1.html\")\n\n ## Save D3 interactive plot in html format\n mpld3.save_html(fig, htmlFileR1)\n\n ## Save Matplotlib plot in png format\n plt.savefig(pngFileR1, dpi=fig.dpi)\n\n ## r2 ------------------------------------------------------------------\n fig, ax = plt.subplots()\n\n markerSize = 5.0\n lineWidth = 1.5\n p1 = ax.plot([(x - readLength) for x in rawDataR2[:, 0]], rawDataR2[:, 5], 'r', marker='o', markersize=markerSize, linewidth=lineWidth, label='N', alpha=0.5)\n\n ax.set_xlabel(\"Read Position\", fontsize=12, alpha=0.5)\n ax.set_ylabel(\"Fraction\", fontsize=12, alpha=0.5)\n ax.grid(color=\"gray\", linestyle=':')\n ax.set_xlim([0, readLength])\n\n ## Add tooltip\n labels = [\"%.5f\" % i for i in rawDataR2[:, 5]]\n mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(p1[0], labels=labels))\n\n plotPngPlotFileR2 = os.path.join(qualPath, sequnitFileNamePrefix + \".cycle_n_base_percent_r2.png\")\n plotHtmlPlotFileR2 = os.path.join(qualPath, sequnitFileNamePrefix + \".cycle_n_base_percent_r2.html\")\n\n ## Save D3 interactive plot in html format\n mpld3.save_html(fig, plotHtmlPlotFileR2)\n\n ## Save Matplotlib plot in png format\n plt.savefig(plotPngPlotFileR2, dpi=fig.dpi)\n\n return reformatBhistFile, pngFileR1, htmlFileR1, plotPngPlotFileR2, plotHtmlPlotFileR2\n\n else:\n fig, ax = plt.subplots()\n\n markerSize = 5.0\n lineWidth = 1.5\n p1 = ax.plot(rawDataMatrix[:, 0], rawDataMatrix[:, 5], 'r', marker='o', markersize=markerSize, linewidth=lineWidth, label='N', alpha=0.5)\n\n ax.set_xlabel(\"Read Position\", fontsize=12, alpha=0.5)\n ax.set_ylabel(\"Fraction\", fontsize=12, alpha=0.5)\n ax.grid(color=\"gray\", linestyle=':')\n\n ## Add tooltip\n labels = [\"%.5f\" % i for i in rawDataMatrix[:, 5]]\n mpld3.plugins.connect(fig, mpld3.plugins.PointLabelTooltip(p1[0], labels=labels))\n\n pngFile = os.path.join(qualPath, sequnitFileNamePrefix + \".cycle_n_base_percent.png\")\n htmlFile = os.path.join(qualPath, sequnitFileNamePrefix + \".cycle_n_base_percent.html\")\n\n ## Save D3 interactive plot in html format\n mpld3.save_html(fig, htmlFile)\n\n ## Save Matplotlib plot in png format\n plt.savefig(pngFile, dpi=fig.dpi)\n\n return reformatBhistFile, pngFile, htmlFile, None, None\n\n\n\"\"\" For STEP12\nsciclone_sam2summary\n\n@param sam_file: input sam file\n@param count_file: stat file for writing\n\n@return retCode: success or failure\n@return count_file: output count file\n\n\"\"\"\n## Removed!\n##def sciclone_sam2summary(sam_file, log):\n\n\n\n\"\"\" For STEP12\nrun_rna_strandedness\n\n Title: run_rna_strandedness\n Function: Takes sam file generated from rna data set and determines\n mapping to sense and antisense strand\n Usage: run_rna_strandedness($sam_file, $log)\n Args: 1) sam file\n 2) log file object\n Returns: JGI_SUCCESS\n JGI_FAILURE\n Comments: None.\n\n@param sam_file\n\n@return retCode\n@return outputLogFile: log file\n@return outResultDict: stat value dict (to be added to readqc_stats.txt)\n\n\"\"\"\n## Removed!\n\n\n\n\"\"\" For STEP12\nseparate_paired_end_fq\n\n Title : separate_paired_end_fq\n Function : Given a fastq file, this function splits the fastq into\n read1 and read2 fastq file.\n Usage : sequence_lengths( $fastq, $read1_fq, $read2_fq, $log )\n Args : 1) The name of a fastq sequence file.\n 2) The name of the read1 fastq output file\n 3) The name of the read2 fastq output file\n 4) A JGI_Log object.\n Returns : JGI_SUCCESS: The fastq file was successfully separated.\n JGI_FAILURE: The fastq file could not be separated.\n Comments : For paired end fastq files only.\n\n\n sulsj\n\n - Added gzip'd fastq support\n ## TODO: need to write a fastq IO class\n\n@param fastq\n@param read1_outfile\n@param read2_outfile\n@param log\n\n@return retCode\n\n\"\"\"\n## Removed!\n\n\n\n\"\"\" For STEP12\n\nNOTE: Borrowed from alignment.py and updated to get lib name and isRna\n\nget_lib_info\n\nLook up the seqProjectId, bio_name from the library_info table\n- to look up the references\n\n@param sequnitFileName: name of the seq unit in the seq_units.sequnitFileName field\n\n@return seqProjectId\n@return ncbiOrganismName\n@return libraryName\n@return isRna\n\n\"\"\"\n\n\"\"\" For STEP14 & STEP15\nrun_blastplus_py\n\nCall jgi-rqc-pipeline/tools/run_blastplus.py\n\n\"\"\"\ndef run_blastplus_py(queryFastaFile, db, log):\n timeoutCmd = 'timeout' #RQCReadQcCommands.TIMEOUT_CMD\n blastOutFileNamePrefix = None\n # retCode = None\n\n outDir, exitCode = safe_dirname(queryFastaFile, log)\n queryFastaFileBaseName, exitCode = safe_basename(queryFastaFile, log)\n dbFileBaseName, exitCode = safe_basename(db, log)\n\n # runBlastnCmd = \"/global/homes/s/sulsj/work/bitbucket-repo/jgi-rqc-pipeline/tools/run_blastplus.py\" ## debug\n # runBlastnCmd = \"/global/dna/projectdirs/PI/rqc/prod/jgi-rqc-pipeline/tools/run_blastplus.py\"\n # runBlastnCmd = \"/global/homes/s/sulsj/work/bitbucket-repo/jgi-rqc-pipeline/tools/run_blastplus_taxserver.py\" ## debug\n runBlastnCmd = \"/global/dna/projectdirs/PI/rqc/prod/jgi-rqc-pipeline/tools/run_blastplus_taxserver.py\"\n\n blastOutFileNamePrefix = outDir + \"/megablast.\" + queryFastaFileBaseName + \".vs.\" + dbFileBaseName\n\n ## Should use jigsaw/2.6.0 for not checking database reference fasta file\n ## 07212016 Added -s to add lineage to the subject field\n cmd = \"%s 21600s %s -d %s -o %s -q %s -s > %s.log 2>&1 \" % (timeoutCmd, runBlastnCmd, db, outDir, queryFastaFile, blastOutFileNamePrefix)\n\n _, _, exitCode = run_sh_command(cmd, True, log, True)\n\n ## Added timeout to terminate blast run manually after 6hrs\n ## If exitCode == 124 or exitCode = 143, this means the process exits with timeout.\n ## Timeout exits with 128 plus the signal number. 143 = 128 + 15 (SGITERM)\n ## Ref) http://stackoverflow.com/questions/4189136/waiting-for-a-command-to-return-in-a-bash-script\n ## timeout man page ==> If the command times out, and --preserve-status is not set, then exit with status 124.\n ##\n if exitCode in (124, 143):\n ## BLAST timeout\n ## Exit with success so that the blast step can be skipped.\n log.warning(\"##################################\")\n log.warning(\"BLAST TIMEOUT. JUST SKIP THE STEP.\")\n log.warning(\"##################################\")\n return RQCExitCodes.JGI_FAILURE, -143\n\n elif exitCode != 0:\n log.error(\"Failed to run_blastplus_py. Exit code != 0\")\n return RQCExitCodes.JGI_FAILURE, None\n\n else:\n log.info(\"run_blastplus_py complete.\")\n\n return RQCExitCodes.JGI_SUCCESS, blastOutFileNamePrefix\n\n\n\"\"\"===========================================================================\n checkpoint_step_wrapper\n\n\"\"\"\ndef checkpoint_step_wrapper(status):\n assert RQCReadQcConfig.CFG[\"status_file\"]\n checkpoint_step(RQCReadQcConfig.CFG[\"status_file\"], status)\n\n\n\"\"\"===========================================================================\n get the file content\n\"\"\"\ndef get_analysis_file_contents(fullPath):\n retCode = \"\"\n\n if os.path.isfile(fullPath):\n with open(fullPath, \"r\") as FH:\n retCode = FH.readlines()\n return retCode\n else:\n return \"file not found\"\n\n\n'''===========================================================================\n file_name_trim\n\n'''\ndef file_name_trim(fname):\n return fname.replace(\".gz\", \"\").replace(\".fastq\", \"\").replace(\".fasta\", \"\")\n\n\n## EOF\n" ]
[ [ "matplotlib.ticker.MultipleLocator", "numpy.log", "matplotlib.use", "matplotlib.font_manager.FontProperties", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "matplotlib.pyplot.gcf", "numpy.concatenate", "matplotlib.pyplot.subplots_adjust", "matplotlib.ticker.FormatStrFormatter", "scipy.optimize.curve_fit", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
lnfjpt/GraphScope
[ "917146f86d8387302a2e1de6963115e7568bf3ee" ]
[ "python/setup.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\nimport subprocess\nimport sys\nfrom distutils.cmd import Command\n\nfrom setuptools import Extension\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.command.build_ext import build_ext\nfrom setuptools.command.build_py import build_py\nfrom setuptools.command.develop import develop\nfrom setuptools.command.sdist import sdist\nfrom wheel.bdist_wheel import bdist_wheel\n\nrepo_root = os.path.dirname(os.path.abspath(__file__))\n\n\nclass BuildProto(Command):\n description = \"build protobuf file\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n subprocess.check_call(\n [\n sys.executable,\n os.path.join(\n repo_root,\n \"..\",\n \"python\",\n \"graphscope\",\n \"proto\",\n \"proto_generator.py\",\n ),\n repo_root,\n \"--python\",\n ],\n env=os.environ.copy(),\n )\n\n\nclass FormatAndLint(Command):\n description = \"format and lint code\"\n user_options = []\n\n user_options = [(\"inplace=\", \"i\", \"Run code formatter and linter inplace\")]\n\n def initialize_options(self):\n self.inplace = False\n\n def finalize_options(self):\n if self.inplace or self.inplace == \"True\" or self.inplace == \"true\":\n self.inplace = True\n else:\n self.inplace = False\n\n def run(self):\n if self.inplace:\n subprocess.check_call([sys.executable, \"-m\", \"isort\", \".\"], cwd=repo_root)\n subprocess.check_call([sys.executable, \"-m\", \"black\", \".\"], cwd=repo_root)\n subprocess.check_call([sys.executable, \"-m\", \"flake8\", \".\"], cwd=repo_root)\n else:\n subprocess.check_call(\n [sys.executable, \"-m\", \"isort\", \"--check\", \"--diff\", \".\"], cwd=repo_root\n )\n subprocess.check_call(\n [sys.executable, \"-m\", \"black\", \"--check\", \"--diff\", \".\"], cwd=repo_root\n )\n subprocess.check_call([sys.executable, \"-m\", \"flake8\", \".\"], cwd=repo_root)\n\n\nclass CustomBuildPy(build_py):\n def run(self):\n self.run_command(\"build_proto\")\n build_py.run(self)\n\n\nclass CustomBuildExt(build_ext):\n def run(self):\n self.run_command(\"build_proto\")\n build_ext.run(self)\n\n\nclass CustomDevelop(develop):\n def run(self):\n develop.run(self)\n self.run_command(\"build_proto\")\n\n\nclass CustomSDist(sdist):\n def run(self):\n self.run_command(\"build_proto\")\n sdist.run(self)\n\n\nclass CustomBDistWheel(bdist_wheel):\n def finalize_options(self):\n super(CustomBDistWheel, self).finalize_options()\n self.root_is_pure = False\n\n def run(self):\n if sys.platform == \"darwin\":\n graphlearn_shared_lib = \"libgraphlearn_shared.dylib\"\n else:\n graphlearn_shared_lib = \"libgraphlearn_shared.so\"\n if not os.path.isfile(\n os.path.join(\n repo_root,\n \"..\",\n \"learning_engine\",\n \"graph-learn\",\n \"built\",\n \"lib\",\n graphlearn_shared_lib,\n )\n ):\n raise ValueError(\"You must build the graphlearn library at first\")\n self.run_command(\"build_proto\")\n bdist_wheel.run(self)\n\n\nwith open(os.path.join(repo_root, \"..\", \"README.md\"), \"r\", encoding=\"utf-8\") as fp:\n long_description = fp.read()\n\n\ndef parsed_reqs():\n with open(os.path.join(repo_root, \"requirements.txt\"), \"r\", encoding=\"utf-8\") as fp:\n return fp.read().splitlines()\n\n\ndef parsed_dev_reqs():\n with open(\n os.path.join(repo_root, \"requirements-dev.txt\"), \"r\", encoding=\"utf-8\"\n ) as fp:\n return fp.read().splitlines()\n\n\ndef find_graphscope_packages():\n packages = []\n\n # add graphscope\n for pkg in find_packages(\".\"):\n packages.append(pkg)\n\n # add graphlearn\n for pkg in find_packages(\"../learning_engine/graph-learn\"):\n packages.append(\"graphscope.learning.%s\" % pkg)\n\n return packages\n\n\ndef resolve_graphscope_package_dir():\n package_dir = {\n \"graphscope\": \"graphscope\",\n \"graphscope.learning.examples\": \"../learning_engine/graph-learn/examples\",\n \"graphscope.learning.graphlearn\": \"../learning_engine/graph-learn/graphlearn\",\n }\n return package_dir\n\n\ndef parsed_packge_data():\n return {\n \"graphscope\": [\n \"VERSION\",\n ],\n }\n\n\ndef build_learning_engine():\n import numpy\n\n ROOT_PATH = os.path.abspath(\n os.path.join(repo_root, \"..\", \"learning_engine\", \"graph-learn\")\n )\n\n include_dirs = []\n library_dirs = []\n libraries = []\n extra_compile_args = []\n extra_link_args = []\n\n include_dirs.append(\"/usr/local/include\")\n include_dirs.append(ROOT_PATH)\n include_dirs.append(ROOT_PATH + \"/graphlearn/include\")\n include_dirs.append(ROOT_PATH + \"/built\")\n include_dirs.append(ROOT_PATH + \"/third_party/pybind11/pybind11/include\")\n include_dirs.append(ROOT_PATH + \"/third_party/glog/build\")\n include_dirs.append(ROOT_PATH + \"/third_party/protobuf/build/include\")\n include_dirs.append(numpy.get_include())\n # mac M1 support\n include_dirs.append(\"/opt/homebrew/include\")\n\n library_dirs.append(ROOT_PATH + \"/built/lib\")\n\n extra_compile_args.append(\"-D__USE_XOPEN2K8\")\n extra_compile_args.append(\"-std=c++11\")\n extra_compile_args.append(\"-fvisibility=hidden\")\n\n libraries.append(\"graphlearn_shared\")\n\n sources = [\n ROOT_PATH + \"/graphlearn/python/py_export.cc\",\n ROOT_PATH + \"/graphlearn/python/py_client.cc\",\n ]\n ext = Extension(\n \"graphscope.learning.graphlearn.pywrap_graphlearn\",\n sources,\n extra_compile_args=extra_compile_args,\n extra_link_args=extra_link_args,\n include_dirs=include_dirs,\n library_dirs=library_dirs,\n libraries=libraries,\n )\n return [ext]\n\n\ndef parse_version(root, **kwargs):\n \"\"\"\n Parse function for setuptools_scm that first tries to read '../VERSION' file\n to get a version number.\n \"\"\"\n from setuptools_scm.git import parse\n from setuptools_scm.version import meta\n\n version_file = os.path.join(repo_root, \"..\", \"VERSION\")\n if os.path.isfile(version_file):\n with open(version_file, \"r\", encoding=\"utf-8\") as fp:\n return meta(fp.read().strip())\n return parse(root, **kwargs)\n\n\nsetup(\n name=\"graphscope-client\",\n description=\"GraphScope: A One-Stop Large-Scale Graph Computing System from Alibaba\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Alibaba Damo Academy\",\n author_email=\"[email protected]\",\n url=\"https://github.com/alibaba/GraphScope\",\n license=\"Apache License 2.0\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: System :: Distributed Computing\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n keywords=\"Graph, Large-Scale, Distributed Computing\",\n use_scm_version={\n \"root\": repo_root,\n \"parse\": parse_version,\n },\n setup_requires=[\"setuptools_scm>=5.0.0\", \"grpcio\", \"grpcio-tools\", \"numpy\"],\n package_dir=resolve_graphscope_package_dir(),\n packages=find_graphscope_packages(),\n package_data=parsed_packge_data(),\n ext_modules=build_learning_engine(),\n cmdclass={\n \"build_ext\": CustomBuildExt,\n \"build_proto\": BuildProto,\n \"build_py\": CustomBuildPy,\n \"bdist_wheel\": CustomBDistWheel,\n \"sdist\": CustomSDist,\n \"develop\": CustomDevelop,\n \"lint\": FormatAndLint,\n },\n install_requires=parsed_reqs(),\n extras_require={\n \"dev\": parsed_dev_reqs(),\n },\n project_urls={\n \"Documentation\": \"https://graphscope.io/docs\",\n \"Source\": \"https://github.com/alibaba/GraphScope\",\n \"Tracker\": \"https://github.com/alibaba/GraphScope/issues\",\n },\n)\n\nif os.name == \"nt\":\n\n class _ReprableString(str):\n def __repr__(self) -> str:\n return self\n\n raise RuntimeError(\n _ReprableString(\n \"\"\"\n ====================================================================\n\n GraphScope doesn't support Windows natively, please try to install graphscope in WSL\n\n https://docs.microsoft.com/en-us/windows/wsl/install\n\n with pip.\n\n ====================================================================\"\"\"\n )\n )\n" ]
[ [ "numpy.get_include" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
minrk/bokeh
[ "ae4366e508355afc06b5fc62f1ee399635ab909d", "8bf4a6f626c505dcb9c525409c0f74debf4499d7", "ae4366e508355afc06b5fc62f1ee399635ab909d", "ae4366e508355afc06b5fc62f1ee399635ab909d", "ae4366e508355afc06b5fc62f1ee399635ab909d", "8bf4a6f626c505dcb9c525409c0f74debf4499d7" ]
[ "examples/plotting/server/vector.py", "bokeh/tests/web/two.py", "examples/glyphs/glyph1.py", "examples/glyphs/line2.py", "examples/plotting/cloud/line.py", "bokeh/tests/chaco_gg/tools.py" ]
[ "\nimport numpy as np\nfrom scipy.integrate import odeint\n\nfrom bokeh.plotting import *\n\ndef streamlines(x, y, u, v, density=1):\n '''Returns streamlines of a vector flow.\n\n * x and y are 1d arrays defining an *evenly spaced* grid.\n * u and v are 2d arrays (shape [y,x]) giving velocities.\n * density controls the closeness of the streamlines. For different\n densities in each direction, use a tuple or list [densityx, densityy].\n\n '''\n\n ## Set up some constants - size of the grid used.\n NGX = len(x)\n NGY = len(y)\n\n ## Constants used to convert between grid index coords and user coords.\n DX = x[1]-x[0]\n DY = y[1]-y[0]\n XOFF = x[0]\n YOFF = y[0]\n\n ## Now rescale velocity onto axes-coordinates\n u = u / (x[-1]-x[0])\n v = v / (y[-1]-y[0])\n speed = np.sqrt(u*u+v*v)\n ## s (path length) will now be in axes-coordinates, but we must\n ## rescale u for integrations.\n u *= NGX\n v *= NGY\n ## Now u and v in grid-coordinates.\n\n NBX = int(30*density)\n NBY = int(30*density)\n blank = np.zeros((NBY,NBX))\n\n bx_spacing = NGX/float(NBX-1)\n by_spacing = NGY/float(NBY-1)\n\n def blank_pos(xi, yi):\n return int((xi / bx_spacing) + 0.5), \\\n int((yi / by_spacing) + 0.5)\n\n def value_at(a, xi, yi):\n if type(xi) == np.ndarray:\n x = xi.astype(np.int)\n y = yi.astype(np.int)\n else:\n x = np.int(xi)\n y = np.int(yi)\n a00 = a[y,x]\n a01 = a[y,x+1]\n a10 = a[y+1,x]\n a11 = a[y+1,x+1]\n xt = xi - x\n yt = yi - y\n a0 = a00*(1-xt) + a01*xt\n a1 = a10*(1-xt) + a11*xt\n return a0*(1-yt) + a1*yt\n\n def rk4_integrate(x0, y0):\n ## This function does RK4 forward and back trajectories from\n ## the initial conditions, with the odd 'blank array'\n ## termination conditions. TODO tidy the integration loops.\n\n def f(xi, yi):\n dt_ds = 1./value_at(speed, xi, yi)\n ui = value_at(u, xi, yi)\n vi = value_at(v, xi, yi)\n return ui*dt_ds, vi*dt_ds\n\n def g(xi, yi):\n dt_ds = 1./value_at(speed, xi, yi)\n ui = value_at(u, xi, yi)\n vi = value_at(v, xi, yi)\n return -ui*dt_ds, -vi*dt_ds\n\n check = lambda xi, yi: xi>=0 and xi<NGX-1 and yi>=0 and yi<NGY-1\n\n bx_changes = []\n by_changes = []\n\n ## Integrator function\n def rk4(x0, y0, f):\n ds = 0.01 #min(1./NGX, 1./NGY, 0.01)\n stotal = 0\n xi = x0\n yi = y0\n xb, yb = blank_pos(xi, yi)\n xf_traj = []\n yf_traj = []\n while check(xi, yi):\n # Time step. First save the point.\n xf_traj.append(xi)\n yf_traj.append(yi)\n # Next, advance one using RK4\n try:\n k1x, k1y = f(xi, yi)\n k2x, k2y = f(xi + .5*ds*k1x, yi + .5*ds*k1y)\n k3x, k3y = f(xi + .5*ds*k2x, yi + .5*ds*k2y)\n k4x, k4y = f(xi + ds*k3x, yi + ds*k3y)\n except IndexError:\n # Out of the domain on one of the intermediate steps\n break\n xi += ds*(k1x+2*k2x+2*k3x+k4x) / 6.\n yi += ds*(k1y+2*k2y+2*k3y+k4y) / 6.\n # Final position might be out of the domain\n if not check(xi, yi): break\n stotal += ds\n # Next, if s gets to thres, check blank.\n new_xb, new_yb = blank_pos(xi, yi)\n if new_xb != xb or new_yb != yb:\n # New square, so check and colour. Quit if required.\n if blank[new_yb,new_xb] == 0:\n blank[new_yb,new_xb] = 1\n bx_changes.append(new_xb)\n by_changes.append(new_yb)\n xb = new_xb\n yb = new_yb\n else:\n break\n if stotal > 2:\n break\n return stotal, xf_traj, yf_traj\n\n integrator = rk4\n\n sf, xf_traj, yf_traj = integrator(x0, y0, f)\n sb, xb_traj, yb_traj = integrator(x0, y0, g)\n stotal = sf + sb\n x_traj = xb_traj[::-1] + xf_traj[1:]\n y_traj = yb_traj[::-1] + yf_traj[1:]\n\n ## Tests to check length of traj. Remember, s in units of axes.\n if len(x_traj) < 1: return None\n if stotal > .2:\n initxb, inityb = blank_pos(x0, y0)\n blank[inityb, initxb] = 1\n return x_traj, y_traj\n else:\n for xb, yb in zip(bx_changes, by_changes):\n blank[yb, xb] = 0\n return None\n\n ## A quick function for integrating trajectories if blank==0.\n trajectories = []\n def traj(xb, yb):\n if xb < 0 or xb >= NBX or yb < 0 or yb >= NBY:\n return\n if blank[yb, xb] == 0:\n t = rk4_integrate(xb*bx_spacing, yb*by_spacing)\n if t != None:\n trajectories.append(t)\n\n ## Now we build up the trajectory set. I've found it best to look\n ## for blank==0 along the edges first, and work inwards.\n for indent in range((max(NBX,NBY))//2):\n for xi in range(max(NBX,NBY)-2*indent):\n traj(xi+indent, indent)\n traj(xi+indent, NBY-1-indent)\n traj(indent, xi+indent)\n traj(NBX-1-indent, xi+indent)\n\n xs = [np.array(t[0])*DX+XOFF for t in trajectories]\n ys = [np.array(t[1])*DY+YOFF for t in trajectories]\n\n return xs, ys\n\n\nxx = np.linspace(-3, 3, 100)\nyy = np.linspace(-3, 3, 100)\n\nY, X = np.meshgrid(xx, yy)\nU = -1 - X**2 + Y\nV = 1 + X - Y**2\nspeed = np.sqrt(U*U + V*V)\ntheta = np.arctan(V/U)\n\nx0 = X[::2, ::2].flatten()\ny0 = Y[::2, ::2].flatten()\nlength = speed[::2, ::2].flatten()/40\nangle = theta[::2, ::2].flatten()\nx1 = x0 + length * np.cos(angle)\ny1 = y0 + length * np.sin(angle)\n\nxs, ys = streamlines(xx, yy, U.T, V.T, density=2)\n\ncm = np.array([\"#C7E9B4\", \"#7FCDBB\", \"#41B6C4\", \"#1D91C0\", \"#225EA8\", \"#0C2C84\"])\nix = ((length-length.min())/(length.max()-length.min())*5).astype('int')\ncolors = cm[ix]\n\noutput_server(\"vector\")\n\nfigure(tools=\"pan,wheel_zoom,box_zoom,reset,previewsave\")\n\nsegment(x0, y0, x1, y1,\n line_color=colors, line_width=2,\n)\nmulti_line(xs, ys,\n line_color=\"#ee6666\", line_width=2, line_alpha=0.8,\n name=\"vector example\"\n)\n\n\nshow() # open a browser\n", "import pandas\nimport bokeh\n\nfrom bokeh.functional import *\n\ndf = pandas.read_csv(\"../auto-mpg.csv\")\n\ns = ggplot(df, aes(\"displ\", \"mpg\", color=\"red\")) + geom_point()\n\nu = ggplot(df, aes(\"hp\", \"weight\")) + \\\n aes(color=\"green\") + \\\n geom_point()\n\nwith open(\"two.1.html\", \"w\") as f:\n f.write(s.to_html())\nwith open(\"two.2.html\", \"w\") as f:\n f.write(u.to_html())\n\n", "from __future__ import print_function\n\nfrom numpy import pi, arange, sin, cos\nimport numpy as np\nimport os.path\n\nfrom bokeh.objects import (Plot, DataRange1d, LinearAxis,\n ColumnDataSource, Glyph, PanTool, WheelZoomTool)\nfrom bokeh.glyphs import Circle\nfrom bokeh import session\n\nx = arange(-2*pi, 2*pi, 0.1)\ny = sin(x)\n\n\nsource = ColumnDataSource(\n data=dict(x=x, y=y)\n)\n\nxdr = DataRange1d(sources=[source.columns(\"x\")])\nydr = DataRange1d(sources=[source.columns(\"y\")])\n\ncircle = Circle(x=\"x\", y=\"y\", fill_color=\"red\", size=5, line_color=\"black\")\n\nglyph_renderer = Glyph(\n data_source = source,\n xdata_range = xdr,\n ydata_range = ydr,\n glyph = circle,\n )\n\nplot = Plot(x_range=xdr, y_range=ydr, data_sources=[source],\n border= 80)\nxaxis = LinearAxis(plot=plot, dimension=0, location=\"min\")\nyaxis = LinearAxis(plot=plot, dimension=1, location=\"min\")\n\npantool = PanTool(dataranges = [xdr, ydr], dimensions=[\"width\",\"height\"])\nwheelzoomtool = WheelZoomTool(dataranges=[xdr,ydr], dimensions=(\"width\",\"height\"))\n\nplot.renderers.append(glyph_renderer)\nplot.tools = [pantool,wheelzoomtool]\n\nsess = session.HTMLFileSession(\"glyph1.html\")\nsess.add(plot, recursive=True)\nsess.plotcontext.children.append(plot)\nsess.save(js=\"absolute\", css=\"absolute\")\nsess.dumpjson(file=\"glyph1.json\")\nprint(\"Wrote %s\" % sess.filename)\n\nif __name__ == \"__main__\":\n sess.view()\n", "from __future__ import print_function\n\nimport sys\nimport os.path\nimport requests\n\nfrom numpy import pi, arange, sin, cos\nimport numpy as np\n\nfrom bokeh.objects import (Plot, DataRange1d, Range1d, LinearAxis, Grid,\n ColumnDataSource, Glyph, PanTool,\n WheelZoomTool)\nfrom bokeh.glyphs import Line\nfrom bokeh import session\n\n# The Line glyph needs arrays of arrays of X and Y, so use newaxis.\nx = arange(-2*pi, 2*pi, 0.1)\ny = sin(x)\n\nsource = ColumnDataSource(\n data=dict(x=x, y=y)\n)\n\n#xdr = DataRange1d(sources=[source.columns(\"xs\")])\n#ydr = DataRange1d(sources=[source.columns(\"ys\")])\n\nxdr = Range1d(start=-2*pi, end=2*pi)\nydr = Range1d(start=-1, end=1)\n\nline = Line(x=\"x\", y=\"y\", line_color=\"blue\", line_width=2)\nglyph_renderer = Glyph(\n data_source = source,\n xdata_range = xdr,\n ydata_range = ydr,\n glyph = line)\n\npantool = PanTool(dataranges=[xdr, ydr], dimensions=(\"width\",\"height\"))\nwheelzoomtool = WheelZoomTool(dataranges=[xdr,ydr], dimensions=(\"width\",\"height\"))\n\nplot = Plot(x_range=xdr, y_range=ydr, data_sources=[source],\n border= 80)\nxaxis = LinearAxis(plot=plot, dimension=0)\nyaxis = LinearAxis(plot=plot, dimension=1)\nxgrid = Grid(plot=plot, dimension=0)\nygrid = Grid(plot=plot, dimension=1)\n\nplot.renderers.append(glyph_renderer)\nplot.tools = [pantool,wheelzoomtool]\n\ndemo_name = \"line2\"\nif len(sys.argv) > 1 and sys.argv[1] == \"server\":\n try:\n sess = session.PlotServerSession(\n serverloc=\"http://localhost:5006\",\n username=\"defaultuser\",\n userapikey=\"nokey\")\n except requests.exceptions.ConnectionError:\n print(\"ERROR: This example requires the plot server. Please make sure plot server is running, by executing 'bokeh-server'\")\n sys.exit(1)\n\n sess.use_doc(demo_name)\n sess.add(plot, recursive=True)\n sess.plotcontext.children.append(plot)\n sess.plotcontext._dirty = True\n sess.store_all()\n print(\"Stored to document\", demo_name)\nelse:\n sess = session.HTMLFileSession(demo_name + \".html\")\n sess.add(plot, recursive=True)\n sess.plotcontext.children.append(plot)\n sess.save(js=\"absolute\", css=\"absolute\")\n print(\"Wrote %s\" % sess.filename)\n", "import numpy as np\nfrom bokeh.plotting import *\n\nN = 80\n\nx = np.linspace(0, 4*np.pi, N)\ny = np.sin(x)\n\noutput_cloud(\"line\")\n\nline(x,y, color=\"#0000FF\", tools=\"pan,wheel_zoom,box_zoom,reset,previewsave\")\n\nshow()\n", "import pandas\nimport bokeh\n\nfrom bokeh.chaco_gg.functional import *\ndf = pandas.read_csv(\"../auto-mpg.csv\")\n\ns = ggplot(df, aes(\"displ\", \"mpg\", color=\"red\")) + geom_point()\ns += tool_pan(\"right\")\ns += tool_zoom()\ns += tool_regression()\n\nu = ggplot(df, aes(\"hp\", \"weight\")) + aes(color=\"green\") + geom_point()\nu += tool_pan()\nu += tool_zoom()\n\n\n" ]
[ [ "numpy.sqrt", "numpy.arctan", "numpy.meshgrid", "numpy.linspace", "numpy.cos", "numpy.sin", "numpy.int", "numpy.array", "numpy.zeros" ], [ "pandas.read_csv" ], [ "numpy.arange", "numpy.sin" ], [ "numpy.arange", "numpy.sin" ], [ "numpy.linspace", "numpy.sin" ], [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
BlenderWang9487/DummyML
[ "42177c45778d79d4200d0e039dafc67ab29b4a8b" ]
[ "tests/test_k_means.py" ]
[ "import dummyml\nimport numpy as np\nimport pytest\n\ndef test_fit_predict():\n input_dim = 784\n k = 10\n dataset_size = 60\n kms = dummyml.k_means(input_dim,k)\n\n x = np.random.rand(dataset_size,input_dim)\n y = np.random.randint(0,k,size=dataset_size, dtype=np.int32)\n\n for i in range(10):\n kms.fit(x,y)\n \n assert kms(np.random.rand(input_dim)) < k" ]
[ [ "numpy.random.rand", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Mrinal18/fusion
[ "34e563f2e50139385577c3880c5de11f8a73f220", "34e563f2e50139385577c3880c5de11f8a73f220", "34e563f2e50139385577c3880c5de11f8a73f220", "34e563f2e50139385577c3880c5de11f8a73f220" ]
[ "fusion/model/ae/tests/test_ae.py", "fusion/dataset/utils.py", "fusion/architecture/projection_head/conv_head.py", "fusion/architecture/dcgan/dcgan_decoder.py" ]
[ "import torch\nimport unittest\n\nfrom fusion.model import AE\nclass TestAE(unittest.TestCase):\n\n def test_forward(self):\n # define parameters\n dim_in = 1\n dim_l = 4\n input_size = 32\n architecture = 'DcganAutoEncoder'\n architecture_params = dict(\n input_size = input_size,\n dim_in = [dim_in],\n dim_h = 2,\n dim_l = dim_l\n )\n sources = [0]\n batch_size = 2\n # create model\n model = AE(sources, architecture, architecture_params)\n # create input\n x = [torch.rand(batch_size, dim_in, input_size, input_size)]\n # forward pass\n output = model(x)\n # check outputs\n for _, z in output.z.items():\n self.assertEqual(z.size(1), dim_l)\n self.assertEqual(output.attrs['x'][0].size(), x[0].size())\n self.assertEqual(output.attrs['x_hat'][0].size(), x[0].size())\n\n\nif __name__ == '__main__':\n unittest.main()\n", "import numpy as np\nimport random\nimport torch\n\n\ndef seed_worker(worker_id):\n worker_seed = (torch.initial_seed() + worker_id) % 2**32\n np.random.seed(worker_seed)\n random.seed(worker_seed)\n", "from typing import Optional, Type\n\nimport torch\nimport torch.nn as nn\nfrom torch import Tensor\n\nfrom fusion.architecture import ABaseArchitecture\nfrom fusion.architecture.abasearchitecture import TActivation, TConv, TNorm\nfrom fusion.architecture.base_block import BaseConvLayer\n\n\nclass ConvHead(ABaseArchitecture):\n def __init__(\n self,\n dim_in: int,\n dim_l: int,\n dim_h: int,\n num_h_layers: int = 1,\n conv_layer_class: TConv = nn.Conv2d,\n norm_layer_class: TNorm = nn.BatchNorm2d,\n activation_class: TActivation = nn.ReLU,\n weights_initialization_type: str = 'xavier_uniform',\n use_bias: bool = False\n ):\n \"\"\"\n Initialization of Convolution head model\n Args:\n dim_in: The number of input channels\n dim_l: The number of latent dimensions\n dim_h: The number of feature channels for the convolutional layer. It is kept fixed for all hidden layers\n num_h_layers: The number of convolutional layers\n conv_layer_class: The type of convolutional layer to use, default=nn.Conv2d\n norm_layer_class: The type of normalization layer to use, default=nn.BatchNorm2d\n activation_class: The type of non-linear activation function to use, default=nn.LeakyReLU\n weights_initialization_type: The weight initialization type to use, default='xavier_uniform'\n use_bias: Flag of use bias in convolutional layer\n\n Returns:\n Convolution head model\n \"\"\"\n super().__init__(\n conv_layer_class=conv_layer_class,\n norm_layer_class=norm_layer_class,\n activation_class=activation_class,\n weights_initialization_type=weights_initialization_type\n )\n self._dim_in = dim_in\n self._dim_l = dim_l\n self._bn_embedding = norm_layer_class(dim_l, affine=True)\n self._convolutional_path = nn.ModuleList([])\n # add first layer\n self._convolutional_path.append(\n BaseConvLayer(\n conv_layer_class, {\n 'in_channels': self._dim_in, 'out_channels': dim_h,\n 'kernel_size': 1, 'bias': use_bias,\n },\n norm_layer_class=norm_layer_class, norm_layer_args={\n 'num_features': dim_h\n },\n activation_class=activation_class, activation_args={\n 'inplace': True\n },\n weights_initialization_type=weights_initialization_type\n )\n )\n for i in range(1, num_h_layers):\n self._convolutional_path.append(\n BaseConvLayer(\n conv_layer_class, {\n 'in_channels': dim_h, 'out_channels': dim_h,\n 'kernel_size': 1, 'bias': use_bias,\n },\n norm_layer_class=norm_layer_class, norm_layer_args={\n 'num_features': dim_h\n },\n activation_class=activation_class, activation_args={\n 'inplace': True\n },\n weights_initialization_type=weights_initialization_type\n )\n )\n # add last layer\n self._convolutional_path.append(\n BaseConvLayer(\n conv_layer_class, {\n 'in_channels': dim_h, 'out_channels': self._dim_l,\n 'kernel_size': 1, 'bias': use_bias,\n },\n weights_initialization_type=weights_initialization_type\n )\n )\n\n self._identity_shortcut = BaseConvLayer(\n conv_layer_class, {\n 'in_channels': dim_in, 'out_channels': dim_l,\n 'kernel_size': 1, 'bias': use_bias,\n },\n weights_initialization_type='skip'\n )\n\n def init_weights(self):\n \"\"\"\n Method for initialization weights\n Return:\n Convolution head model with initialization weights\n \"\"\"\n # initialization of the convolutional path\n for layer in self._convolutional_path:\n layer.init_weights()\n # initialization of identity path\n # according to AMDIM implementation\n # https://github.com/Philip-Bachman/amdim-public/blob/8754ae149ed28da8066f696f95ba4ca0e3ffebd8/model.py#L392\n # initialize shortcut to be like identity (if possible)\n if self._dim_l >= self._dim_in:\n eye_mask = None\n if self._conv_layer_class is nn.Conv3d:\n eye_mask = torch.zeros(\n self._dim_l, self._dim_in, 1, 1, 1, dtype=bool)\n for i in range(self._dim_in):\n eye_mask[i, i, 0, 0, 0] = 1\n elif self._conv_layer_class is nn.Conv2d:\n eye_mask = torch.zeros(\n self._dim_l, self._dim_in, 1, 1, dtype=bool)\n for i in range(self._dim_in):\n eye_mask[i, i, 0, 0] = 1\n else:\n raise NotImplementedError\n self._identity_shortcut._layer[0].weight.data.uniform_(-0.01, 0.01)\n self._identity_shortcut._layer[0].weight.data.masked_fill_(eye_mask, 1.0)\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"\n Forward method of Convolution head model\n Args:\n x: input tensor\n Returns:\n x\n \"\"\"\n identity, _ = self._identity_shortcut(x)\n for layer in self._convolutional_path:\n x, _ = layer(x)\n x = self._bn_embedding(x + identity)\n return x\n", "from typing import Dict, Tuple, Type\n\nfrom torch import Tensor\nimport torch.nn as nn\n\nfrom fusion.architecture import ABaseArchitecture\nfrom fusion.architecture.base_block import BaseConvLayer, Unflatten\n\n\n\n\nclass DcganDecoder(ABaseArchitecture):\n def __init__(\n self,\n dim_in: int,\n dim_h: int,\n dim_l: int,\n dim_cls=None,\n input_size: int = 32,\n input_dim: int = 2,\n conv_layer_class: Type[nn.modules.conv._ConvNd] = nn.ConvTranspose2d,\n norm_layer_class: Type[nn.modules.batchnorm._BatchNorm] = nn.BatchNorm2d,\n activation_class: Type[nn.Module] = nn.ReLU,\n weights_initialization_type: str = 'xavier_uniform',\n ):\n \"\"\"\n Class of DCGAN Decoder\n Args:\n dim_in: The number of input channels\n dim_h: The number of feature channels for the last transposed convolutional layer,\n the number of feature channels are halved after for each consecutive transposed convolutional layer after the first\n dim_l: The number of latent dimensions\n dim_cls: A list of scalars, where each number should correspond to the output width for one of the convolutional layers.\n The information between latent variable z and the convolutional feature maps width widths in dim_cls are maximized.\n If dim_cls=None, the information between z and none of the convolutional feature maps is maximized, default=None\n input_size: The input width and height of the image, default=32\n input_dim: The number of input dimensions, e.g. an image is 2-dimensional (input_dim=2) and a volume is 3-dimensional (input_dim=3), default=2\n conv_layer_class: The type of transposed convolutional layer to use, default=nn.ConvTranspose2d\n norm_layer_class: The type of normalization layer to use, default=nn.BatchNorm2d\n activation_class: The type of non-linear activation function to use, default=nn.ReLU\n weights_initialization_type: The weight initialization type to use, default='xavier_uniform'\n Return:\n Class of DCGAN decoder model\n \"\"\"\n super().__init__(\n conv_layer_class=conv_layer_class,\n norm_layer_class=norm_layer_class,\n activation_class=activation_class,\n weights_initialization_type=weights_initialization_type\n )\n self._dim_in = dim_in\n self._dim_h = dim_h\n self._dim_l = dim_l\n self._dim_cls = dim_cls\n self._input_size = input_size\n self._unflatten = Unflatten(input_dim=input_dim)\n self._layers: nn.ModuleList = nn.ModuleList([])\n self._construct()\n\n def _construct(self):\n if self._input_size == 64:\n self._layers.append(\n BaseConvLayer(\n self._conv_layer_class, {\n 'in_channels': self._dim_l, 'out_channels': 8 * self._dim_h,\n 'kernel_size': 4, 'stride': 2, 'padding': 0, 'bias': False\n },\n norm_layer_class= self._norm_layer_class, norm_layer_args={\n 'num_features': 8 * self._dim_h\n },\n activation_class= self._activation_class, activation_args={\n 'inplace': True\n }\n )\n )\n self._layers.append(\n BaseConvLayer(\n self._conv_layer_class, {\n 'in_channels': 8 * self._dim_h, 'out_channels': 4 * self._dim_h,\n 'kernel_size': 4, 'stride': 2, 'padding': 1, 'bias': False\n },\n norm_layer_class=self._norm_layer_class, norm_layer_args={\n 'num_features': 4 * self._dim_h\n },\n activation_class=self._activation_class, activation_args={\n 'inplace': True\n }\n )\n )\n elif self._input_size == 32:\n self._layers.append(\n BaseConvLayer(\n self._conv_layer_class, {\n 'in_channels': self._dim_l, 'out_channels': 4 * self._dim_h,\n 'kernel_size': 4, 'stride': 2, 'padding': 0, 'bias': False\n },\n norm_layer_class=self._norm_layer_class, norm_layer_args={\n 'num_features': 4 * self._dim_h\n },\n activation_class=self._activation_class, activation_args={\n 'inplace': True\n }\n )\n )\n else:\n raise NotImplementedError(\"DCGAN only supports input square images ' + \\\n 'with size 32, 64 in current implementation.\")\n\n self._layers.append(\n BaseConvLayer(\n self._conv_layer_class, {\n 'in_channels': 4 * self._dim_h, 'out_channels': 2 * self._dim_h,\n 'kernel_size': 4, 'stride': 2, 'padding': 1, 'bias': False\n },\n norm_layer_class=self._norm_layer_class, norm_layer_args={\n 'num_features': 2 * self._dim_h\n },\n activation_class=self._activation_class, activation_args={\n 'inplace': True\n }\n )\n )\n self._layers.append(\n BaseConvLayer(\n self._conv_layer_class, {\n 'in_channels': 2 * self._dim_h, 'out_channels': self._dim_h,\n 'kernel_size': 4, 'stride': 2, 'padding': 1, 'bias': False\n },\n norm_layer_class=self._norm_layer_class, norm_layer_args={\n 'num_features': self._dim_h\n },\n activation_class=self._activation_class, activation_args={\n 'inplace': True\n }\n )\n )\n self._layers.append(\n BaseConvLayer(\n self._conv_layer_class, {\n 'in_channels': self._dim_h, 'out_channels': self._dim_in,\n 'kernel_size': 4, 'stride': 2, 'padding': 1, 'bias': False\n },\n activation_class=nn.Tanh\n )\n )\n\n def forward(self, x: Tensor) -> Tuple[Tensor, Dict[int, Tensor]]:\n \"\"\"\n The forward method for the DCGAN autoencoder model\n Args:\n x: The input tensor\n Returns:\n x_hat: A reconstruction of the original input tensor\n latents: The convolutional feature maps, with widths specified by self._dim_cls\n \"\"\"\n x_hat = self._unflatten(x)\n latents = None\n # Adds latent\n if self._dim_cls is not None:\n latents = {}\n latents[1] = x_hat\n for layer in self._layers:\n x_hat, conv_latent = layer(x_hat)\n # Add conv latent\n if self._dim_cls is not None:\n if conv_latent.size()[-1] in self._dim_cls:\n latents[conv_latent.size()[-1]] = conv_latent\n return x_hat, latents\n\n def init_weights(self):\n \"\"\"\n Weight initialization method\n Return:\n DcganDecoder with initialized weights\n\n \"\"\"\n for layer in self._layers:\n layer.init_weights()\n" ]
[ [ "torch.rand" ], [ "torch.initial_seed", "numpy.random.seed" ], [ "torch.nn.ModuleList", "torch.zeros" ], [ "torch.nn.ModuleList" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LewsTherin511/tf_models
[ "6c8b27e43249c84987f7c86749fd4bdf7a606871", "6c8b27e43249c84987f7c86749fd4bdf7a606871" ]
[ "research/object_detection/custom_datasets/OiDtoolkit_csv/OiDtoolkit_csv.py", "research/object_detection/object_detection_webcam.py" ]
[ "import os\nimport numpy as np\nimport pandas as pd\nimport cv2\n\n\npath_input_images = 'input_images/'\npath_input_txts = 'input_txts/'\npath_output_labels = 'output_labels/'\n\n\n# configuration\n# maximum resolution\nmax_pixels = 600\n# train/test ratio\nall_images_list = sorted([name for name in os.listdir(path_input_images)])\ntrain_batch = 900\ntest_batch = 100\n# creates list of images using the first 'train_batch', and the last 'test_batch' images\nimages_list = np.append(all_images_list[0:train_batch], all_images_list[-test_batch:])\nprint(f\"Analizing {len(all_images_list)} images, using first {train_batch} for training and last {test_batch} for test\")\n\n\n\nwith open(f\"{path_output_labels}train_labels.csv\", 'w') as output_labels_file_train, open(f\"{path_output_labels}test_labels.csv\", 'w') as output_labels_file_test:\n\toutput_labels_file_train.write(\"filename,width,height,class,xmin,ymin,xmax,ymax\\n\")\n\toutput_labels_file_test.write(\"filename,width,height,class,xmin,ymin,xmax,ymax\\n\")\n\n\n\tfor i, image_name in enumerate(images_list):\n\n\t\tif i%50==0 : print(f\"\\tChecking image nr. {i}\")\n\n\n\t\tif i<train_batch:\n\t\t\tsubset = 'train'\n\t\t\tout_labels_file = output_labels_file_train\n\t\telse:\n\t\t\tsubset = 'test'\n\t\t\tout_labels_file = output_labels_file_test\n\n\n\t\timg = cv2.imread(path_input_images + image_name, 1)\n\t\theight, width, channel = img.shape\n\n\n\t\t# resize images if their biggest size is > 600px\n\t\tfactor = max(width, height)/max_pixels if (max(width, height)) > max_pixels else 1\n\t\twidth, height = int(width/factor), int(height/factor)\n\t\timg = cv2.resize(img, (width, height))\n\n\n\t\tcv2.imwrite(f\"output_images_{subset}/{image_name}\", img)\n\n\t\twith open(f\"{path_input_txts}{os.path.splitext(image_name)[0]}.txt\", 'r') as txt_file:\n\t\t\tdf = pd.read_csv(txt_file, sep='\\s+', header=None, index_col=None)\n\t\t\t# print(df)\n\n\t\t\tfor i in range(df.shape[0]):\n\t\t\t\tlabel = df.iloc[i,0].lower()\n\t\t\t\tx_min = int(df.iloc[i,1]/factor)\n\t\t\t\ty_min = int(df.iloc[i,2]/factor)\n\t\t\t\tx_max = int(df.iloc[i,3]/factor)\n\t\t\t\ty_max = int(df.iloc[i,4]/factor)\n\n\n\n\t\t\t\tout_labels_file.write(f\"{image_name},{width},{height},{label},{x_min},{y_min},{x_max},{y_max}\\n\")\n", "###################\n### Imports ###\n###################\n\nimport numpy as np\nimport os\nimport six.moves.urllib as urllib\nimport sys\nimport tarfile\nimport tensorflow as tf\nimport zipfile\nimport cv2\nfrom collections import defaultdict\nfrom io import StringIO\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\n\n# This is needed since the notebook is stored in the object_detection folder.\nsys.path.append(\"..\")\nfrom utils import ops as utils_ops\n\n# imports from the object detection module.\nfrom utils import label_map_util\nfrom utils import visualization_utils as vis_util\n\n\n\n#---------------------------#\n# Model preparation #\n#---------------------------#\n# Any model exported using the `export_inference_graph.py` tool can be loaded here simply by\n# changing `PATH_TO_CKPT` to point to a new .pb file. By default we use an \"SSD with Mobilenet\" model here. See the [\n# detection model zoo](https://github.com/tensorflow/models/blob/master/object_detection/g3doc/detection_model_zoo.md\n# ) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.\n\n\n\n#################################################################\n#################################################################\n#################################################################\n# # Setting model name\n# ### COCO\n# # MODEL_NAME = 'models_ZOO/ssd_mobilenet_v1_coco_2018_01_28'\n# # MODEL_NAME = 'models_ZOO/ssd_resnet50_v1_fpn_shared_box_predictor_640x640_coco14_sync_2018_07_03'\n# # MODEL_NAME = 'models_ZOO/faster_rcnn_resnet50_lowproposals_coco_2018_01_28'\n# # unbearably slow\n# # MODEL_NAME = 'models_ZOO/faster_rcnn_inception_resnet_v2_atrous_coco_2018_01_28'\n# # slow, but still ok\n# # MODEL_NAME = 'models_ZOO/faster_rcnn_inception_v2_coco_2018_01_28'\n# # MODEL_NAME = 'models_ZOO/faster_rcnn_resnet50_lowproposals_coco_2018_01_28'\n#\n# ### OID4\n# # MODEL_NAME = 'models_ZOO/ssd_mobilenet_v2_oid_v4_2018_12_12'\n# # MODEL_NAME = 'models_ZOO/faster_rcnn_inception_resnet_v2_atrous_oid_v4_2018_12_12'\n#\n#\n# MODEL_FILE = MODEL_NAME + '.tar.gz'\n# DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'\n#\n# # Path to frozen detection graph. This is the actual model that is used for the object detection.\n# PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'\n#\n# # List of the strings that is used to add correct label for each box.\n# PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')\n# # PATH_TO_LABELS = os.path.join('data', 'oid_v4_label_map.pbtxt')\n#\n# # 601 classes for OiD, 90 for COCO\n# NUM_CLASSES = 90\n# ################################################################\n#################################################################\n#################################################################\n# What model to download.\nMODEL_NAME = 'custom_inference_graph/900_11'\n# Path to frozen detection graph. This is the actual model that is used for the object detection.\nPATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'\n# List of the strings that is used to add correct label for each box.\nPATH_TO_LABELS = os.path.join('data', 'guitar_label_map.pbtxt')\nNUM_CLASSES = 1\n#################################################################\n#################################################################\n#################################################################\n\n\n# Download Model if needed, and extract\nif not os.path.exists(MODEL_NAME + '/frozen_inference_graph.pb'):\n\tprint ('Downloading the model')\n\topener = urllib.request.URLopener()\n\topener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)\n\ttar_file = tarfile.open(MODEL_FILE)\n\tfor file in tar_file.getmembers():\n\t\tfile_name = os.path.basename(file.name)\n\t\tif 'frozen_inference_graph.pb' in file_name:\n\t\t\ttar_file.extract(file, os.getcwd())\n\tprint ('Download complete')\nelse:\n\tprint ('Model already exists')\n\n\n\n# ## Load a (frozen) Tensorflow model into memory.\n# normal graph used for detection\ndetection_graph = tf.Graph()\nwith detection_graph.as_default():\n\t# serialized graph to read frozen model from .pb file\n\tgraph_def = tf.GraphDef()\n\twith tf.gfile.GFile(PATH_TO_CKPT, 'rb') as f:\n\t\tgraph_def.ParseFromString(f.read())\n\t\t# import serialized graph to detection_graph\n\t\ttf.import_graph_def(graph_def, name='')\n\n\n\n\n# ## Loading label map\n# Label maps map indices to category names, so that when our convolution network predicts `5`,\n# we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a\n# dictionary mapping integers to appropriate string labels would be fine\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\ncategory_index = label_map_util.create_category_index(categories)\n\n\n\n# intializing the web camera device\n# USE THIS FOR WEBCAM\n# cap = cv2.VideoCapture(0)\ncap = cv2.VideoCapture(\"video/video_01.mp4\")\n\n\ncount_frame = 0\n# Running the tensorflow session\nwith detection_graph.as_default():\n\twith tf.Session(graph=detection_graph) as sess:\n\t\tret = True\n\t\twhile (ret):\n\t\t\tret,image_np = cap.read()\n\t\t\tif count_frame%25==0:\n\t\t\t\t# Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n\t\t\t\timage_np_expanded = np.expand_dims(image_np, axis=0)\n\t\t\t\timage_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n\t\t\t\t# Each box represents a part of the image where a particular object was detected.\n\t\t\t\tboxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n\t\t\t\t# Each score represent how level of confidence for each of the objects.\n\t\t\t\t# Score is shown on the result image, together with the class label.\n\t\t\t\tscores = detection_graph.get_tensor_by_name('detection_scores:0')\n\t\t\t\tclasses = detection_graph.get_tensor_by_name('detection_classes:0')\n\t\t\t\tnum_detections = detection_graph.get_tensor_by_name('num_detections:0')\n\t\t\t\t# Actual detection.\n\t\t\t\t(boxes, scores, classes, num_detections) = sess.run([boxes, scores, classes, num_detections],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeed_dict={image_tensor: image_np_expanded})\n\t\t\t\t# print(scores)\n\t\t\t\t# print(classes)\n\t\t\t# Visualization of the results of a detection.\n\t\t\tvis_util.visualize_boxes_and_labels_on_image_array(image_np, np.squeeze(boxes),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t np.squeeze(classes).astype(np.int32),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t np.squeeze(scores),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t category_index, use_normalized_coordinates=True,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t min_score_thresh = .65,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t line_thickness=3)\n\n\t\t\tcv2.imshow('image' ,cv2.resize(image_np ,(1280 ,960)))\n\t\t\tcount_frame +=1\n\n\n\t\t\tif cv2.waitKey(25) & 0xFF == ord('q'):\n\t\t\t\tcv2.destroyAllWindows()\n\t\t\t\tcap.release()\n\t\t\t\tbreak\n" ]
[ [ "numpy.append", "pandas.read_csv" ], [ "tensorflow.Graph", "tensorflow.import_graph_def", "numpy.expand_dims", "tensorflow.gfile.GFile", "numpy.squeeze", "tensorflow.Session", "tensorflow.GraphDef" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hbrylkowski/model-analysis
[ "3ffa05396ca8cbb92755a40f58528a8808f63c5b", "3ffa05396ca8cbb92755a40f58528a8808f63c5b", "3ffa05396ca8cbb92755a40f58528a8808f63c5b" ]
[ "tensorflow_model_analysis/evaluators/metrics_and_plots_evaluator_test.py", "tensorflow_model_analysis/writers/metrics_plots_and_validations_writer.py", "tensorflow_model_analysis/metrics/multi_class_confusion_matrix_plot.py" ]
[ "# Lint as: python3\n# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test for using the MetricsAndPlotsEvaluator API.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport apache_beam as beam\nfrom apache_beam.testing import util\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow_model_analysis.api import model_eval_lib\nfrom tensorflow_model_analysis.api import tfma_unit\nfrom tensorflow_model_analysis.eval_saved_model import testutil\nfrom tensorflow_model_analysis.eval_saved_model.example_trainers import fixed_prediction_estimator\nfrom tensorflow_model_analysis.eval_saved_model.example_trainers import fixed_prediction_estimator_no_labels\nfrom tensorflow_model_analysis.eval_saved_model.example_trainers import linear_classifier\nfrom tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator\nfrom tensorflow_model_analysis.extractors import predict_extractor\nfrom tensorflow_model_analysis.extractors import slice_key_extractor\nfrom tensorflow_model_analysis.post_export_metrics import metric_keys\nfrom tensorflow_model_analysis.post_export_metrics import metrics as metric_fns\nfrom tensorflow_model_analysis.post_export_metrics import post_export_metrics\nfrom tensorflow_model_analysis.slicer import slicer_lib as slicer\n\n\ndef _addExampleCountMetricCallback( # pylint: disable=invalid-name\n features_dict, predictions_dict, labels_dict):\n del features_dict\n del labels_dict\n metric_ops = {}\n value_op, update_op = metric_fns.total(\n tf.shape(input=predictions_dict['logits'])[0])\n metric_ops['added_example_count'] = (value_op, update_op)\n return metric_ops\n\n\ndef _addPyFuncMetricCallback( # pylint: disable=invalid-name\n features_dict, predictions_dict, labels_dict):\n del features_dict\n del predictions_dict\n\n total_value = tf.compat.v1.Variable(\n initial_value=0.0,\n dtype=tf.float64,\n trainable=False,\n collections=[\n tf.compat.v1.GraphKeys.METRIC_VARIABLES,\n tf.compat.v1.GraphKeys.LOCAL_VARIABLES\n ],\n validate_shape=True,\n name='total')\n\n def my_func(x):\n return np.sum(x, dtype=np.float64)\n\n update_op = tf.compat.v1.assign_add(\n total_value, tf.compat.v1.py_func(my_func, [labels_dict], tf.float64))\n value_op = tf.identity(total_value)\n metric_ops = {}\n metric_ops['py_func_label_sum'] = (value_op, update_op)\n return metric_ops\n\n\nclass EvaluateMetricsAndPlotsTest(testutil.TensorflowModelAnalysisTest):\n\n def setUp(self):\n super(EvaluateMetricsAndPlotsTest, self).setUp()\n self.longMessage = True # pylint: disable=invalid-name\n\n def _getEvalExportDir(self):\n return os.path.join(self._getTempDir(), 'eval_export_dir')\n\n def testEvaluateNoSlicing(self):\n temp_eval_export_dir = self._getEvalExportDir()\n _, eval_export_dir = linear_classifier.simple_linear_classifier(\n None, temp_eval_export_dir)\n eval_shared_model = self.createTestEvalSharedModel(\n eval_saved_model_path=eval_export_dir,\n add_metrics_callbacks=[_addExampleCountMetricCallback])\n extractors = [\n predict_extractor.PredictExtractor(eval_shared_model),\n slice_key_extractor.SliceKeyExtractor()\n ]\n\n with beam.Pipeline() as pipeline:\n example1 = self._makeExample(age=3.0, language='english', label=1.0)\n example2 = self._makeExample(age=3.0, language='chinese', label=0.0)\n example3 = self._makeExample(age=4.0, language='english', label=1.0)\n example4 = self._makeExample(age=5.0, language='chinese', label=0.0)\n\n (metrics, _), _ = (\n pipeline\n | 'Create' >> beam.Create([\n example1.SerializeToString(),\n example2.SerializeToString(),\n example3.SerializeToString(),\n example4.SerializeToString()\n ])\n | 'InputsToExtracts' >> model_eval_lib.InputsToExtracts()\n | 'Extract' >> tfma_unit.Extract(extractors=extractors) # pylint: disable=no-value-for-parameter\n | 'ComputeMetricsAndPlots' >> metrics_and_plots_evaluator\n .ComputeMetricsAndPlots(eval_shared_model=eval_shared_model))\n\n def check_result(got):\n try:\n self.assertEqual(1, len(got), 'got: %s' % got)\n (slice_key, value) = got[0]\n self.assertEqual((), slice_key)\n self.assertDictElementsAlmostEqual(\n value, {\n 'accuracy': 1.0,\n 'label/mean': 0.5,\n 'my_mean_age': 3.75,\n 'my_mean_age_times_label': 1.75,\n 'added_example_count': 4.0\n })\n except AssertionError as err:\n raise util.BeamAssertException(err)\n\n util.assert_that(metrics, check_result)\n\n def testEvaluateWithSlicingAndDifferentBatchSizes(self):\n temp_eval_export_dir = self._getEvalExportDir()\n _, eval_export_dir = linear_classifier.simple_linear_classifier(\n None, temp_eval_export_dir)\n eval_shared_model = self.createTestEvalSharedModel(\n eval_saved_model_path=eval_export_dir,\n add_metrics_callbacks=[_addExampleCountMetricCallback])\n extractors = [\n predict_extractor.PredictExtractor(eval_shared_model),\n slice_key_extractor.SliceKeyExtractor([\n slicer.SingleSliceSpec(),\n slicer.SingleSliceSpec(columns=['slice_key'])\n ])\n ]\n\n for batch_size in [1, 2, 4, 8]:\n\n with beam.Pipeline() as pipeline:\n example1 = self._makeExample(\n age=3.0, language='english', label=1.0, slice_key='first_slice')\n example2 = self._makeExample(\n age=3.0, language='chinese', label=0.0, slice_key='first_slice')\n example3 = self._makeExample(\n age=4.0, language='english', label=0.0, slice_key='second_slice')\n example4 = self._makeExample(\n age=5.0, language='chinese', label=1.0, slice_key='second_slice')\n example5 = self._makeExample(\n age=5.0, language='chinese', label=1.0, slice_key='second_slice')\n\n (metrics, plots), _ = (\n pipeline\n | 'Create' >> beam.Create([\n example1.SerializeToString(),\n example2.SerializeToString(),\n example3.SerializeToString(),\n example4.SerializeToString(),\n example5.SerializeToString(),\n ])\n | 'InputsToExtracts' >> model_eval_lib.InputsToExtracts()\n | 'Extract' >> tfma_unit.Extract(extractors=extractors) # pylint:disable=no-value-for-parameter\n | 'ComputeMetricsAndPlots' >>\n metrics_and_plots_evaluator.ComputeMetricsAndPlots(\n eval_shared_model=eval_shared_model,\n desired_batch_size=batch_size))\n\n def check_result(got):\n try:\n self.assertEqual(3, len(got), 'got: %s' % got)\n slices = {}\n for slice_key, value in got:\n slices[slice_key] = value\n overall_slice = ()\n first_slice = (('slice_key', 'first_slice'),)\n second_slice = (('slice_key', 'second_slice'),)\n self.assertCountEqual(\n list(slices.keys()), [overall_slice, first_slice, second_slice])\n self.assertDictElementsAlmostEqual(\n slices[overall_slice], {\n 'accuracy': 0.4,\n 'label/mean': 0.6,\n 'my_mean_age': 4.0,\n 'my_mean_age_times_label': 2.6,\n 'added_example_count': 5.0\n })\n self.assertDictElementsAlmostEqual(\n slices[first_slice], {\n 'accuracy': 1.0,\n 'label/mean': 0.5,\n 'my_mean_age': 3.0,\n 'my_mean_age_times_label': 1.5,\n 'added_example_count': 2.0\n })\n self.assertDictElementsAlmostEqual(\n slices[second_slice], {\n 'accuracy': 0.0,\n 'label/mean': 2.0 / 3.0,\n 'my_mean_age': 14.0 / 3.0,\n 'my_mean_age_times_label': 10.0 / 3.0,\n 'added_example_count': 3.0\n })\n\n except AssertionError as err:\n # This function is redefined every iteration, so it will have the\n # right value of batch_size.\n raise util.BeamAssertException('batch_size = %d, error: %s' %\n (batch_size, err)) # pylint: disable=cell-var-from-loop\n\n util.assert_that(metrics, check_result, label='metrics')\n util.assert_that(plots, util.is_empty(), label='plots')\n\n def testEvaluateWithSlicingAndUncertainty(self):\n temp_eval_export_dir = self._getEvalExportDir()\n _, eval_export_dir = linear_classifier.simple_linear_classifier(\n None, temp_eval_export_dir)\n eval_shared_model = self.createTestEvalSharedModel(\n eval_saved_model_path=eval_export_dir,\n add_metrics_callbacks=[_addExampleCountMetricCallback])\n extractors = [\n predict_extractor.PredictExtractor(eval_shared_model),\n slice_key_extractor.SliceKeyExtractor([\n slicer.SingleSliceSpec(),\n slicer.SingleSliceSpec(columns=['slice_key'])\n ])\n ]\n\n for batch_size in [1, 2, 4, 8]:\n\n with beam.Pipeline() as pipeline:\n example1 = self._makeExample(\n age=3.0, language='english', label=1.0, slice_key='first_slice')\n example2 = self._makeExample(\n age=3.0, language='chinese', label=0.0, slice_key='first_slice')\n example3 = self._makeExample(\n age=4.0, language='english', label=0.0, slice_key='second_slice')\n example4 = self._makeExample(\n age=5.0, language='chinese', label=1.0, slice_key='second_slice')\n example5 = self._makeExample(\n age=5.0, language='chinese', label=1.0, slice_key='second_slice')\n\n (metrics, _), _ = (\n pipeline\n | 'Create' >> beam.Create([\n example1.SerializeToString(),\n example2.SerializeToString(),\n example3.SerializeToString(),\n example4.SerializeToString(),\n example5.SerializeToString(),\n ])\n | 'InputsToExtracts' >> model_eval_lib.InputsToExtracts()\n | 'Extract' >> tfma_unit.Extract(extractors=extractors) # pylint: disable=no-value-for-parameter\n | 'ComputeMetricsAndPlots' >>\n metrics_and_plots_evaluator.ComputeMetricsAndPlots(\n eval_shared_model=eval_shared_model,\n desired_batch_size=batch_size,\n compute_confidence_intervals=True))\n\n def check_result(got):\n try:\n self.assertEqual(3, len(got), 'got: %s' % got)\n slices = {}\n for slice_key, value in got:\n slices[slice_key] = value\n overall_slice = ()\n first_slice = (('slice_key', 'first_slice'),)\n second_slice = (('slice_key', 'second_slice'),)\n self.assertCountEqual(\n list(slices.keys()), [overall_slice, first_slice, second_slice])\n self.assertDictElementsWithTDistributionAlmostEqual(\n slices[overall_slice], {\n 'accuracy': 0.4,\n 'label/mean': 0.6,\n 'my_mean_age': 4.0,\n 'my_mean_age_times_label': 2.6,\n 'added_example_count': 5.0\n })\n self.assertDictElementsWithTDistributionAlmostEqual(\n slices[first_slice], {\n 'accuracy': 1.0,\n 'label/mean': 0.5,\n 'my_mean_age': 3.0,\n 'my_mean_age_times_label': 1.5,\n 'added_example_count': 2.0\n })\n self.assertDictElementsWithTDistributionAlmostEqual(\n slices[second_slice], {\n 'accuracy': 0.0,\n 'label/mean': 2.0 / 3.0,\n 'my_mean_age': 14.0 / 3.0,\n 'my_mean_age_times_label': 10.0 / 3.0,\n 'added_example_count': 3.0\n })\n\n except AssertionError as err:\n # This function is redefined every iteration, so it will have the\n # right value of batch_size.\n raise util.BeamAssertException('batch_size = %d, error: %s' %\n (batch_size, err)) # pylint: disable=cell-var-from-loop\n\n util.assert_that(metrics, check_result, label='metrics')\n\n def testEvaluateNoSlicingAddPostExportAndCustomMetrics(self):\n temp_eval_export_dir = self._getEvalExportDir()\n _, eval_export_dir = linear_classifier.simple_linear_classifier(\n None, temp_eval_export_dir)\n eval_shared_model = self.createTestEvalSharedModel(\n eval_saved_model_path=eval_export_dir,\n add_metrics_callbacks=[\n _addExampleCountMetricCallback,\n # Note that since everything runs in-process this doesn't\n # actually test that the py_func can be correctly recreated\n # on workers in a distributed context.\n _addPyFuncMetricCallback,\n post_export_metrics.example_count(),\n post_export_metrics.example_weight(example_weight_key='age')\n ])\n extractors = [\n predict_extractor.PredictExtractor(eval_shared_model),\n slice_key_extractor.SliceKeyExtractor()\n ]\n\n with beam.Pipeline() as pipeline:\n example1 = self._makeExample(age=3.0, language='english', label=1.0)\n example2 = self._makeExample(age=3.0, language='chinese', label=0.0)\n example3 = self._makeExample(age=4.0, language='english', label=1.0)\n example4 = self._makeExample(age=5.0, language='chinese', label=0.0)\n\n (metrics, plots), _ = (\n pipeline\n | 'Create' >> beam.Create([\n example1.SerializeToString(),\n example2.SerializeToString(),\n example3.SerializeToString(),\n example4.SerializeToString()\n ])\n | 'InputsToExtracts' >> model_eval_lib.InputsToExtracts()\n | 'Extract' >> tfma_unit.Extract(extractors=extractors) # pylint: disable=no-value-for-parameter\n | 'ComputeMetricsAndPlots' >> metrics_and_plots_evaluator\n .ComputeMetricsAndPlots(eval_shared_model=eval_shared_model))\n\n def check_result(got):\n try:\n self.assertEqual(1, len(got), 'got: %s' % got)\n (slice_key, value) = got[0]\n self.assertEqual((), slice_key)\n self.assertDictElementsAlmostEqual(\n got_values_dict=value,\n expected_values_dict={\n 'accuracy': 1.0,\n 'label/mean': 0.5,\n 'my_mean_age': 3.75,\n 'my_mean_age_times_label': 1.75,\n 'added_example_count': 4.0,\n 'py_func_label_sum': 2.0,\n metric_keys.EXAMPLE_COUNT: 4.0,\n metric_keys.EXAMPLE_WEIGHT: 15.0\n })\n except AssertionError as err:\n raise util.BeamAssertException(err)\n\n util.assert_that(metrics, check_result, label='metrics')\n util.assert_that(plots, util.is_empty(), label='plots')\n\n def testEvaluateNoSlicingAddPostExportAndCustomMetricsUnsupervisedModel(self):\n # Mainly for testing that the ExampleCount post export metric works with\n # unsupervised models.\n temp_eval_export_dir = self._getEvalExportDir()\n _, eval_export_dir = (\n fixed_prediction_estimator_no_labels\n .simple_fixed_prediction_estimator_no_labels(None,\n temp_eval_export_dir))\n eval_shared_model = self.createTestEvalSharedModel(\n eval_saved_model_path=eval_export_dir,\n add_metrics_callbacks=[\n post_export_metrics.example_count(),\n post_export_metrics.example_weight(example_weight_key='prediction')\n ])\n extractors = [\n predict_extractor.PredictExtractor(eval_shared_model),\n slice_key_extractor.SliceKeyExtractor()\n ]\n\n with beam.Pipeline() as pipeline:\n example1 = self._makeExample(prediction=1.0)\n example2 = self._makeExample(prediction=2.0)\n\n (metrics, plots), _ = (\n pipeline\n | 'Create' >> beam.Create([\n example1.SerializeToString(),\n example2.SerializeToString(),\n ])\n | 'InputsToExtracts' >> model_eval_lib.InputsToExtracts()\n | 'Extract' >> tfma_unit.Extract(extractors=extractors) # pylint: disable=no-value-for-parameter\n | 'ComputeMetricsAndPlots' >> metrics_and_plots_evaluator\n .ComputeMetricsAndPlots(eval_shared_model=eval_shared_model))\n\n def check_result(got):\n try:\n self.assertEqual(1, len(got), 'got: %s' % got)\n (slice_key, value) = got[0]\n self.assertEqual((), slice_key)\n self.assertDictElementsAlmostEqual(\n got_values_dict=value,\n expected_values_dict={\n 'average_loss': 2.5,\n metric_keys.EXAMPLE_COUNT: 2.0,\n metric_keys.EXAMPLE_WEIGHT: 3.0\n })\n except AssertionError as err:\n raise util.BeamAssertException(err)\n\n util.assert_that(metrics, check_result, label='metrics')\n util.assert_that(plots, util.is_empty(), label='plots')\n\n def testEvaluateWithPlots(self):\n temp_eval_export_dir = self._getEvalExportDir()\n _, eval_export_dir = (\n fixed_prediction_estimator.simple_fixed_prediction_estimator(\n None, temp_eval_export_dir))\n eval_shared_model = self.createTestEvalSharedModel(\n eval_saved_model_path=eval_export_dir,\n add_metrics_callbacks=[\n post_export_metrics.example_count(),\n post_export_metrics.auc_plots()\n ])\n extractors = [\n predict_extractor.PredictExtractor(eval_shared_model),\n slice_key_extractor.SliceKeyExtractor()\n ]\n\n with beam.Pipeline() as pipeline:\n example1 = self._makeExample(prediction=0.0, label=1.0)\n example2 = self._makeExample(prediction=0.7, label=0.0)\n example3 = self._makeExample(prediction=0.8, label=1.0)\n example4 = self._makeExample(prediction=1.0, label=1.0)\n\n (metrics, plots), _ = (\n pipeline\n | 'Create' >> beam.Create([\n example1.SerializeToString(),\n example2.SerializeToString(),\n example3.SerializeToString(),\n example4.SerializeToString()\n ])\n | 'InputsToExtracts' >> model_eval_lib.InputsToExtracts()\n | 'Extract' >> tfma_unit.Extract(extractors=extractors) # pylint: disable=no-value-for-parameter\n | 'ComputeMetricsAndPlots' >> metrics_and_plots_evaluator\n .ComputeMetricsAndPlots(eval_shared_model=eval_shared_model))\n\n def check_metrics(got):\n try:\n self.assertEqual(1, len(got), 'got: %s' % got)\n (slice_key, value) = got[0]\n self.assertEqual((), slice_key)\n self.assertDictElementsAlmostEqual(\n got_values_dict=value,\n expected_values_dict={\n metric_keys.EXAMPLE_COUNT: 4.0,\n })\n except AssertionError as err:\n raise util.BeamAssertException(err)\n\n util.assert_that(metrics, check_metrics, label='metrics')\n\n def check_plots(got):\n try:\n self.assertEqual(1, len(got), 'got: %s' % got)\n (slice_key, value) = got[0]\n self.assertEqual((), slice_key)\n self.assertDictMatrixRowsAlmostEqual(\n got_values_dict=value,\n expected_values_dict={\n metric_keys.AUC_PLOTS_MATRICES: [\n (8001, [2, 1, 0, 1, 1.0 / 1.0, 1.0 / 3.0])\n ],\n })\n except AssertionError as err:\n raise util.BeamAssertException(err)\n\n util.assert_that(plots, check_plots, label='plots')\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Lint as: python3\n# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Metrics, plots, and validations writer.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n# Standard __future__ imports\nfrom __future__ import print_function\n\nimport os\n\nfrom typing import Any, Dict, Iterator, List, Optional, Text, Tuple\n\nfrom absl import logging\nimport apache_beam as beam\nimport numpy as np\nimport six\nimport tensorflow as tf\nfrom tensorflow_model_analysis import constants\nfrom tensorflow_model_analysis import math_util\nfrom tensorflow_model_analysis import types\nfrom tensorflow_model_analysis.evaluators import evaluator\nfrom tensorflow_model_analysis.metrics import metric_types\nfrom tensorflow_model_analysis.post_export_metrics import metric_keys\nfrom tensorflow_model_analysis.proto import metrics_for_slice_pb2\nfrom tensorflow_model_analysis.proto import validation_result_pb2\nfrom tensorflow_model_analysis.slicer import slicer_lib as slicer\nfrom tensorflow_model_analysis.writers import writer\n\n\ndef _match_all_files(file_path: Text) -> Text:\n \"\"\"Return expression to match all files at given path.\"\"\"\n return file_path + '*'\n\n\ndef load_and_deserialize_metrics(\n output_path: Text,\n output_file_format: Text = ''\n) -> Iterator[metrics_for_slice_pb2.MetricsForSlice]:\n \"\"\"Read and deserialize the MetricsForSlice records.\n\n Args:\n output_path: Path or pattern to search for metrics files under. If a\n directory is passed, files matching 'metrics*' will be searched for.\n output_file_format: Optional file extension to filter files by.\n\n Yields:\n MetricsForSlice protos found in matching files.\n \"\"\"\n if tf.io.gfile.isdir(output_path):\n output_path = os.path.join(output_path, constants.METRICS_KEY)\n pattern = _match_all_files(output_path)\n if output_file_format:\n pattern = pattern + '.' + output_file_format\n for path in tf.io.gfile.glob(pattern):\n for record in tf.compat.v1.python_io.tf_record_iterator(path):\n yield metrics_for_slice_pb2.MetricsForSlice.FromString(record)\n\n\ndef load_and_deserialize_plots(\n output_path: Text,\n output_file_format: Text = ''\n) -> Iterator[metrics_for_slice_pb2.PlotsForSlice]:\n \"\"\"Read and deserialize the PlotsForSlice records.\n\n Args:\n output_path: Path or pattern to search for plots files under. If a directory\n is passed, files matching 'plots*' will be searched for.\n output_file_format: Optional file extension to filter files by.\n\n Yields:\n PlotsForSlice protos found in matching files.\n \"\"\"\n if tf.io.gfile.isdir(output_path):\n output_path = os.path.join(output_path, constants.PLOTS_KEY)\n pattern = _match_all_files(output_path)\n if output_file_format:\n pattern = pattern + '.' + output_file_format\n for path in tf.io.gfile.glob(pattern):\n for record in tf.compat.v1.python_io.tf_record_iterator(path):\n yield metrics_for_slice_pb2.PlotsForSlice.FromString(record)\n\n\ndef load_and_deserialize_validation_result(\n output_path: Text,\n output_file_format: Text = '') -> validation_result_pb2.ValidationResult:\n \"\"\"Read and deserialize the ValidationResult record.\n\n Args:\n output_path: Path or pattern to search for validation file under. If a\n directory is passed, a file matching 'validations*' will be searched for.\n output_file_format: Optional file extension to filter file by.\n\n Returns:\n ValidationResult proto.\n \"\"\"\n if tf.io.gfile.isdir(output_path):\n output_path = os.path.join(output_path, constants.VALIDATIONS_KEY)\n pattern = _match_all_files(output_path)\n if output_file_format:\n pattern = pattern + '.' + output_file_format\n validation_records = []\n for path in tf.io.gfile.glob(pattern):\n for record in tf.compat.v1.python_io.tf_record_iterator(path):\n validation_records.append(\n validation_result_pb2.ValidationResult.FromString(record))\n assert len(validation_records) == 1\n return validation_records[0]\n\n\ndef _convert_to_array_value(\n array: np.ndarray) -> metrics_for_slice_pb2.ArrayValue:\n \"\"\"Converts NumPy array to ArrayValue.\"\"\"\n result = metrics_for_slice_pb2.ArrayValue()\n result.shape[:] = array.shape\n if array.dtype == 'int32':\n result.data_type = metrics_for_slice_pb2.ArrayValue.INT32\n result.int32_values[:] = array.flatten()\n elif array.dtype == 'int64':\n result.data_type = metrics_for_slice_pb2.ArrayValue.INT64\n result.int64_values[:] = array.flatten()\n elif array.dtype == 'float32':\n result.data_type = metrics_for_slice_pb2.ArrayValue.FLOAT32\n result.float32_values[:] = array.flatten()\n elif array.dtype == 'float64':\n result.data_type = metrics_for_slice_pb2.ArrayValue.FLOAT64\n result.float64_values[:] = array.flatten()\n else:\n # For all other types, cast to string and convert to bytes.\n result.data_type = metrics_for_slice_pb2.ArrayValue.BYTES\n result.bytes_values[:] = [\n tf.compat.as_bytes(x) for x in array.astype(six.text_type).flatten()\n ]\n return result\n\n\ndef convert_slice_metrics_to_proto(\n metrics: Tuple[slicer.SliceKeyType, Dict[Any, Any]],\n add_metrics_callbacks: List[types.AddMetricsCallbackType]\n) -> metrics_for_slice_pb2.MetricsForSlice:\n \"\"\"Converts the given slice metrics into serialized proto MetricsForSlice.\n\n Args:\n metrics: The slice metrics.\n add_metrics_callbacks: A list of metric callbacks. This should be the same\n list as the one passed to tfma.Evaluate().\n\n Returns:\n The MetricsForSlice proto.\n\n Raises:\n TypeError: If the type of the feature value in slice key cannot be\n recognized.\n \"\"\"\n result = metrics_for_slice_pb2.MetricsForSlice()\n slice_key, slice_metrics = metrics\n\n result.slice_key.CopyFrom(slicer.serialize_slice_key(slice_key))\n\n slice_metrics = slice_metrics.copy()\n\n if metric_keys.ERROR_METRIC in slice_metrics:\n logging.warning('Error for slice: %s with error message: %s ', slice_key,\n slice_metrics[metric_keys.ERROR_METRIC])\n result.metrics[metric_keys.ERROR_METRIC].debug_message = slice_metrics[\n metric_keys.ERROR_METRIC]\n return result\n\n # Convert the metrics from add_metrics_callbacks to the structured output if\n # defined.\n if add_metrics_callbacks and (not any(\n isinstance(k, metric_types.MetricKey) for k in slice_metrics.keys())):\n for add_metrics_callback in add_metrics_callbacks:\n if hasattr(add_metrics_callback, 'populate_stats_and_pop'):\n add_metrics_callback.populate_stats_and_pop(slice_key, slice_metrics,\n result.metrics)\n for key in sorted(slice_metrics.keys()):\n value = slice_metrics[key]\n metric_value = metrics_for_slice_pb2.MetricValue()\n if isinstance(value, metrics_for_slice_pb2.ConfusionMatrixAtThresholds):\n metric_value.confusion_matrix_at_thresholds.CopyFrom(value)\n elif isinstance(value, types.ValueWithTDistribution):\n # Currently we populate both bounded_value and confidence_interval.\n # Avoid populating bounded_value once the UI handles confidence_interval.\n # Convert to a bounded value. 95% confidence level is computed here.\n sample_mean, lower_bound, upper_bound = (\n math_util.calculate_confidence_interval(value))\n metric_value.bounded_value.value.value = sample_mean\n metric_value.bounded_value.lower_bound.value = lower_bound\n metric_value.bounded_value.upper_bound.value = upper_bound\n metric_value.bounded_value.methodology = (\n metrics_for_slice_pb2.BoundedValue.POISSON_BOOTSTRAP)\n # Populate confidence_interval\n metric_value.confidence_interval.lower_bound.value = lower_bound\n metric_value.confidence_interval.upper_bound.value = upper_bound\n t_dist_value = metrics_for_slice_pb2.TDistributionValue()\n t_dist_value.sample_mean.value = value.sample_mean\n t_dist_value.sample_standard_deviation.value = (\n value.sample_standard_deviation)\n t_dist_value.sample_degrees_of_freedom.value = (\n value.sample_degrees_of_freedom)\n # Once the UI handles confidence interval, we will avoid setting this and\n # instead use the double_value.\n t_dist_value.unsampled_value.value = value.unsampled_value\n metric_value.confidence_interval.t_distribution_value.CopyFrom(\n t_dist_value)\n elif isinstance(value, six.binary_type):\n # Convert textual types to string metrics.\n metric_value.bytes_value = value\n elif isinstance(value, six.text_type):\n # Convert textual types to string metrics.\n metric_value.bytes_value = value.encode('utf8')\n elif isinstance(value, np.ndarray):\n # Convert NumPy arrays to ArrayValue.\n metric_value.array_value.CopyFrom(_convert_to_array_value(value))\n else:\n # We try to convert to float values.\n try:\n metric_value.double_value.value = float(value)\n except (TypeError, ValueError) as e:\n metric_value.unknown_type.value = str(value)\n metric_value.unknown_type.error = e.message # pytype: disable=attribute-error\n\n if isinstance(key, metric_types.MetricKey):\n key_and_value = result.metric_keys_and_values.add()\n key_and_value.key.CopyFrom(key.to_proto())\n key_and_value.value.CopyFrom(metric_value)\n else:\n result.metrics[key].CopyFrom(metric_value)\n\n return result\n\n\ndef convert_slice_plots_to_proto(\n plots: Tuple[slicer.SliceKeyType, Dict[Any, Any]],\n add_metrics_callbacks: List[types.AddMetricsCallbackType]\n) -> metrics_for_slice_pb2.PlotsForSlice:\n \"\"\"Converts the given slice plots into PlotsForSlice proto.\n\n Args:\n plots: The slice plots.\n add_metrics_callbacks: A list of metric callbacks. This should be the same\n list as the one passed to tfma.Evaluate().\n\n Returns:\n The PlotsForSlice proto.\n \"\"\"\n result = metrics_for_slice_pb2.PlotsForSlice()\n slice_key, slice_plots = plots\n\n result.slice_key.CopyFrom(slicer.serialize_slice_key(slice_key))\n\n slice_plots = slice_plots.copy()\n\n if metric_keys.ERROR_METRIC in slice_plots:\n logging.warning('Error for slice: %s with error message: %s ', slice_key,\n slice_plots[metric_keys.ERROR_METRIC])\n error_metric = slice_plots.pop(metric_keys.ERROR_METRIC)\n result.plots[metric_keys.ERROR_METRIC].debug_message = error_metric\n return result\n\n if add_metrics_callbacks and (not any(\n isinstance(k, metric_types.MetricKey) for k in slice_plots.keys())):\n for add_metrics_callback in add_metrics_callbacks:\n if hasattr(add_metrics_callback, 'populate_plots_and_pop'):\n add_metrics_callback.populate_plots_and_pop(slice_plots, result.plots)\n plots_by_key = {}\n for key in sorted(slice_plots.keys()):\n value = slice_plots[key]\n # Remove plot name from key (multiple plots are combined into a single\n # proto).\n if isinstance(key, metric_types.MetricKey):\n parent_key = key._replace(name=None)\n else:\n continue\n if parent_key not in plots_by_key:\n key_and_value = result.plot_keys_and_values.add()\n key_and_value.key.CopyFrom(parent_key.to_proto())\n plots_by_key[parent_key] = key_and_value.value\n\n if isinstance(value, metrics_for_slice_pb2.CalibrationHistogramBuckets):\n plots_by_key[parent_key].calibration_histogram_buckets.CopyFrom(value)\n slice_plots.pop(key)\n elif isinstance(value, metrics_for_slice_pb2.ConfusionMatrixAtThresholds):\n plots_by_key[parent_key].confusion_matrix_at_thresholds.CopyFrom(value)\n slice_plots.pop(key)\n elif isinstance(\n value, metrics_for_slice_pb2.MultiClassConfusionMatrixAtThresholds):\n plots_by_key[\n parent_key].multi_class_confusion_matrix_at_thresholds.CopyFrom(value)\n slice_plots.pop(key)\n elif isinstance(\n value, metrics_for_slice_pb2.MultiLabelConfusionMatrixAtThresholds):\n plots_by_key[\n parent_key].multi_label_confusion_matrix_at_thresholds.CopyFrom(value)\n slice_plots.pop(key)\n\n if slice_plots:\n if add_metrics_callbacks is None:\n add_metrics_callbacks = []\n raise NotImplementedError(\n 'some plots were not converted or popped. keys: %s. '\n 'add_metrics_callbacks were: %s' % (\n slice_plots.keys(),\n [\n x.name for x in add_metrics_callbacks # pytype: disable=attribute-error\n ]))\n\n return result\n\n\ndef MetricsPlotsAndValidationsWriter( # pylint: disable=invalid-name\n output_paths: Dict[Text, Text],\n add_metrics_callbacks: Optional[List[types.AddMetricsCallbackType]] = None,\n metrics_key: Text = constants.METRICS_KEY,\n plots_key: Text = constants.PLOTS_KEY,\n validations_key: Text = constants.VALIDATIONS_KEY,\n output_file_format: Text = '') -> writer.Writer:\n \"\"\"Returns metrics and plots writer.\n\n Note, sharding will be enabled by default if a output_file_format is provided.\n The files will be named <output_path>-SSSSS-of-NNNNN.<output_file_format>\n where SSSSS is the shard number and NNNNN is the number of shards.\n\n Args:\n output_paths: Output paths keyed by output key (e.g. 'metrics', 'plots',\n 'validation').\n add_metrics_callbacks: Optional list of metric callbacks (if used).\n metrics_key: Name to use for metrics key in Evaluation output.\n plots_key: Name to use for plots key in Evaluation output.\n validations_key: Name to use for validations key in Evaluation output.\n output_file_format: File format to use when saving files. Currently only\n 'tfrecord' is supported.\n \"\"\"\n return writer.Writer(\n stage_name='WriteMetricsAndPlots',\n ptransform=_WriteMetricsPlotsAndValidations( # pylint: disable=no-value-for-parameter\n output_paths=output_paths,\n add_metrics_callbacks=add_metrics_callbacks or [],\n metrics_key=metrics_key,\n plots_key=plots_key,\n validations_key=validations_key,\n output_file_format=output_file_format))\n\n\[email protected]_input_types(validation_result_pb2.ValidationResult)\[email protected]_output_types(validation_result_pb2.ValidationResult)\nclass _CombineValidations(beam.CombineFn):\n \"\"\"Combines the ValidationResults protos.\n\n Combines PCollection of ValidationResults for different metrics and slices.\n \"\"\"\n\n def create_accumulator(self) -> None:\n return\n\n def add_input(\n self, result: 'Optional[validation_result_pb2.ValidationResult]',\n new_input: 'Optional[validation_result_pb2.ValidationResult]'\n ) -> 'Optional[validation_result_pb2.ValidationResult]':\n if new_input is None:\n return None\n if result is None:\n result = validation_result_pb2.ValidationResult(validation_ok=True)\n result.validation_ok &= new_input.validation_ok\n result.metric_validations_per_slice.extend(\n new_input.metric_validations_per_slice)\n return result\n\n def merge_accumulators(\n self,\n accumulators: 'List[Optional[validation_result_pb2.ValidationResult]]'\n ) -> 'Optional[validation_result_pb2.ValidationResult]':\n accumulators = [accumulator for accumulator in accumulators if accumulator]\n if not accumulators:\n return None\n result = validation_result_pb2.ValidationResult(validation_ok=True)\n for new_input in accumulators:\n result.metric_validations_per_slice.extend(\n new_input.metric_validations_per_slice)\n result.validation_ok &= new_input.validation_ok\n return result\n\n def extract_output(\n self, accumulator: 'Optional[validation_result_pb2.ValidationResult]'\n ) -> 'Optional[validation_result_pb2.ValidationResult]':\n # Verification fails if there is empty input.\n if not accumulator:\n result = validation_result_pb2.ValidationResult(validation_ok=False)\n return result\n return accumulator\n\n\[email protected]_fn\n# TODO(b/157600974): Add typehint.\[email protected]_output_types(beam.pvalue.PDone)\ndef _WriteMetricsPlotsAndValidations( # pylint: disable=invalid-name\n evaluation: evaluator.Evaluation, output_paths: Dict[Text, Text],\n add_metrics_callbacks: List[types.AddMetricsCallbackType],\n metrics_key: Text, plots_key: Text, validations_key: Text,\n output_file_format: Text) -> beam.pvalue.PDone:\n \"\"\"PTransform to write metrics and plots.\"\"\"\n\n if output_file_format and output_file_format != 'tfrecord':\n raise ValueError(\n 'only \"{}\" format is currently supported: output_file_format={}'.format(\n 'tfrecord', output_file_format))\n\n if metrics_key in evaluation:\n metrics = (\n evaluation[metrics_key] | 'ConvertSliceMetricsToProto' >> beam.Map(\n convert_slice_metrics_to_proto,\n add_metrics_callbacks=add_metrics_callbacks))\n\n if constants.METRICS_KEY in output_paths:\n _ = metrics | 'WriteMetrics' >> beam.io.WriteToTFRecord(\n file_path_prefix=output_paths[constants.METRICS_KEY],\n shard_name_template=None if output_file_format else '',\n file_name_suffix=('.' +\n output_file_format if output_file_format else ''),\n coder=beam.coders.ProtoCoder(metrics_for_slice_pb2.MetricsForSlice))\n\n if plots_key in evaluation:\n plots = (\n evaluation[plots_key] | 'ConvertSlicePlotsToProto' >> beam.Map(\n convert_slice_plots_to_proto,\n add_metrics_callbacks=add_metrics_callbacks))\n\n if constants.PLOTS_KEY in output_paths:\n _ = plots | 'WritePlots' >> beam.io.WriteToTFRecord(\n file_path_prefix=output_paths[constants.PLOTS_KEY],\n shard_name_template=None if output_file_format else '',\n file_name_suffix=('.' +\n output_file_format if output_file_format else ''),\n coder=beam.coders.ProtoCoder(metrics_for_slice_pb2.PlotsForSlice))\n\n if validations_key in evaluation:\n validations = (\n evaluation[validations_key]\n |\n 'MergeValidationResults' >> beam.CombineGlobally(_CombineValidations()))\n\n if constants.VALIDATIONS_KEY in output_paths:\n # We only use a single shard here because validations are usually single\n # values.\n _ = validations | 'WriteValidations' >> beam.io.WriteToTFRecord(\n file_path_prefix=output_paths[constants.VALIDATIONS_KEY],\n shard_name_template='',\n file_name_suffix=('.' +\n output_file_format if output_file_format else ''),\n coder=beam.coders.ProtoCoder(validation_result_pb2.ValidationResult))\n\n return beam.pvalue.PDone(list(evaluation.values())[0].pipeline)\n", "# Lint as: python3\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Multi-class confusion matrix at thresholds.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n# Standard __future__ imports\nfrom __future__ import print_function\n\nfrom typing import Dict, List, Optional, NamedTuple, Text\n\nimport apache_beam as beam\nimport numpy as np\nfrom tensorflow_model_analysis import config\nfrom tensorflow_model_analysis.metrics import metric_types\nfrom tensorflow_model_analysis.metrics import metric_util\nfrom tensorflow_model_analysis.proto import metrics_for_slice_pb2\n\nMULTI_CLASS_CONFUSION_MATRIX_PLOT_NAME = ('multi_class_confusion_matrix_plot')\n\n# Class ID used when no prediction was made because a threshold was given and\n# the top prediction was less than the threshold.\nNO_PREDICTED_CLASS_ID = -1\n\n_EPSILON = 1e-7\n\n\nclass MultiClassConfusionMatrixPlot(metric_types.Metric):\n \"\"\"Multi-class confusion matrix plot.\n\n Computes weighted example counts for all combinations of actual / (top)\n predicted classes.\n\n The inputs are assumed to contain a single positive label per example (i.e.\n only one class can be true at a time) while the predictions are assumed to sum\n to 1.0.\n \"\"\"\n\n def __init__(self,\n thresholds: Optional[List[float]] = None,\n num_thresholds: Optional[int] = None,\n name: Text = MULTI_CLASS_CONFUSION_MATRIX_PLOT_NAME):\n \"\"\"Initializes multi-class confusion matrix.\n\n Args:\n thresholds: Optional thresholds. If the top prediction is less than a\n threshold then the associated example will be assumed to have no\n prediction associated with it (the predicted_class_id will be set to\n NO_PREDICTED_CLASS_ID). Only one of either thresholds or num_thresholds\n should be used. If both are unset, then [0.0] will be assumed.\n num_thresholds: Number of thresholds to use. The thresholds will be evenly\n spaced between 0.0 and 1.0 and inclusive of the boundaries (i.e. to\n configure the thresholds to [0.0, 0.25, 0.5, 0.75, 1.0], the parameter\n should be set to 5). Only one of either thresholds or num_thresholds\n should be used.\n name: Metric name.\n \"\"\"\n super(MultiClassConfusionMatrixPlot, self).__init__(\n metric_util.merge_per_key_computations(\n _multi_class_confusion_matrix_plot),\n thresholds=thresholds,\n num_thresholds=num_thresholds,\n name=name) # pytype: disable=wrong-arg-types\n\n\nmetric_types.register_metric(MultiClassConfusionMatrixPlot)\n\n\ndef _multi_class_confusion_matrix_plot(\n thresholds: Optional[List[float]] = None,\n num_thresholds: Optional[int] = None,\n name: Text = MULTI_CLASS_CONFUSION_MATRIX_PLOT_NAME,\n eval_config: Optional[config.EvalConfig] = None,\n model_name: Text = '',\n output_name: Text = '',\n) -> metric_types.MetricComputations:\n \"\"\"Returns computations for multi-class confusion matrix at thresholds.\"\"\"\n if num_thresholds is not None and thresholds is not None:\n raise ValueError(\n 'only one of thresholds or num_thresholds can be set at a time')\n if num_thresholds is None and thresholds is None:\n thresholds = [0.0]\n if num_thresholds is not None:\n thresholds = [\n (i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)\n ]\n thresholds = [-_EPSILON] + thresholds + [1.0 + _EPSILON]\n\n key = metric_types.PlotKey(\n name=name, model_name=model_name, output_name=output_name)\n return [\n metric_types.MetricComputation(\n keys=[key],\n preprocessor=None,\n combiner=_MultiClassConfusionMatrixPlotCombiner(\n key=key, eval_config=eval_config, thresholds=thresholds))\n ]\n\n\n_MatrixEntryKey = NamedTuple('_MatrixEntryKey', [('actual_class_id', int),\n ('predicted_class_id', int)])\n# Thresholds -> entry -> example_weights\n_Matrices = Dict[float, Dict[_MatrixEntryKey, float]]\n\n\nclass _MultiClassConfusionMatrixPlotCombiner(beam.CombineFn):\n \"\"\"Creates multi-class confusion matrix at thresholds from standard inputs.\"\"\"\n\n def __init__(self, key: metric_types.PlotKey,\n eval_config: Optional[config.EvalConfig],\n thresholds: List[float]):\n self._key = key\n self._eval_config = eval_config\n self._thresholds = thresholds if thresholds else [0.0]\n\n def create_accumulator(self) -> _Matrices:\n return {}\n\n def add_input(self, accumulator: _Matrices,\n element: metric_types.StandardMetricInputs) -> _Matrices:\n label, predictions, example_weight = next(\n metric_util.to_label_prediction_example_weight(\n element,\n eval_config=self._eval_config,\n model_name=self._key.model_name,\n output_name=self._key.output_name,\n flatten=False)) # pytype: disable=wrong-arg-types\n if not label.shape:\n raise ValueError(\n 'Label missing from example: StandardMetricInputs={}'.format(element))\n if predictions.shape in ((), (1,)):\n raise ValueError(\n 'Predictions shape must be > 1 for multi-class confusion matrix: '\n 'shape={}, StandardMetricInputs={}'.format(predictions.shape,\n element))\n if label.size > 1:\n actual_class_id = np.argmax(label)\n else:\n actual_class_id = int(label)\n predicted_class_id = np.argmax(predictions)\n example_weight = float(example_weight)\n for threshold in self._thresholds:\n if threshold not in accumulator:\n accumulator[threshold] = {}\n if predictions[predicted_class_id] < threshold:\n predicted_class_id = NO_PREDICTED_CLASS_ID\n matrix_key = _MatrixEntryKey(actual_class_id, predicted_class_id)\n if matrix_key in accumulator[threshold]:\n accumulator[threshold][matrix_key] += example_weight\n else:\n accumulator[threshold][matrix_key] = example_weight\n return accumulator\n\n def merge_accumulators(self, accumulators: List[_Matrices]) -> _Matrices:\n result = {}\n for accumulator in accumulators:\n for threshold, matrix in accumulator.items():\n if threshold not in result:\n result[threshold] = {}\n for k, v in matrix.items():\n if k in result[threshold]:\n result[threshold][k] += v\n else:\n result[threshold][k] = v\n return result\n\n def extract_output(\n self, accumulator: _Matrices\n ) -> Dict[metric_types.PlotKey,\n metrics_for_slice_pb2.MultiClassConfusionMatrixAtThresholds]:\n pb = metrics_for_slice_pb2.MultiClassConfusionMatrixAtThresholds()\n for threshold in sorted(accumulator.keys()):\n # Convert -epsilon and 1.0+epsilon back to 0.0 and 1.0.\n if threshold == -_EPSILON:\n t = 0.0\n elif threshold == 1.0 + _EPSILON:\n t = 1.0\n else:\n t = threshold\n matrix = pb.matrices.add(threshold=t)\n for k in sorted(accumulator[threshold].keys()):\n matrix.entries.add(\n actual_class_id=k.actual_class_id,\n predicted_class_id=k.predicted_class_id,\n num_weighted_examples=accumulator[threshold][k])\n return {self._key: pb}\n" ]
[ [ "tensorflow.compat.v1.Variable", "tensorflow.shape", "tensorflow.identity", "tensorflow.test.main", "tensorflow.compat.v1.py_func", "numpy.sum" ], [ "tensorflow.compat.v1.python_io.tf_record_iterator", "tensorflow.io.gfile.isdir", "tensorflow.io.gfile.glob", "tensorflow.compat.as_bytes" ], [ "numpy.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Lornatang/TensorFlow-MNIST
[ "36a91a62a12726724d9d1d135fd3573d754e9659" ]
[ "v0_1/datasets/kmnist.py" ]
[ "# Copyright 2019 ChangyuLiu Authors. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\n\n\ndef process_image_with_kmnist(image, label, height=32, width=32):\n \"\"\" Resize the images to a fixes input size,\n and rescale the input channels to a range of [-1,1].\n\n Args:\n image: \"tensor, float32\", image input.\n label: \"tensor, int64\", image label.\n height: \"int64\", (224, 224, 3) -> (height, 224, 3).\n width: \"int64\", (224, 224, 3) -> (224, width, 3).\n\n Returns:\n image input, image label.\n\n \"\"\"\n image = tf.cast(image, tf.float32)\n image = image / 255.\n image = tf.image.resize(image, (height, width))\n return image, label\n\n\ndef load_data_kmnist(name='kmnist', train_size=7, test_size=2, val_size=1, buffer_size=1000, batch_size=32):\n \"\"\" load every mnist dataset.\n\n Args:\n name: \"str\", dataset name. default: 'kmnist'.\n train_size: \"int64\", train dataset. default:7\n test_size: \"int64\", test dataset. default:2\n val_size: \"int64\", val dataset. default:1\n buffer_size: \"int64\", dataset size. default:1000.\n batch_size: \"int64\", batch size. default:32\n\n Returns:\n dataset,\n\n \"\"\"\n split_weights = (train_size, test_size, val_size)\n splits = tfds.Split.TRAIN.subsplit(weighted=split_weights)\n (train_dataset, test_dataset, val_dataset) = tfds.load(name,\n split=list(splits),\n as_supervised=True)\n\n train_dataset = train_dataset.map(process_image_with_kmnist).shuffle(buffer_size).batch(batch_size)\n test_dataset = test_dataset.map(process_image_with_kmnist).batch(batch_size)\n val_dataset = val_dataset.map(process_image_with_kmnist).batch(batch_size)\n\n return train_dataset, test_dataset, val_dataset\n" ]
[ [ "tensorflow.cast", "tensorflow.image.resize" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
eddieschoute/qiskit-terra
[ "bf30f3551e0b163ebcb071a0d8186903703c5c8d", "a87fe61992003e7e7e541f41194c5f0b3e458346" ]
[ "qiskit/circuit/library/iqp.py", "qiskit/circuit/library/standard_gates/u3.py" ]
[ "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Instantaneous quantum polynomial circuit.\"\"\"\n\nfrom typing import Union, List\n\nimport numpy as np\nfrom qiskit.circuit import QuantumCircuit\nfrom qiskit.circuit.exceptions import CircuitError\n\n\nclass IQP(QuantumCircuit):\n r\"\"\"Instantaneous quantum polynomial (IQP) circuit.\n\n The circuit consists of a column of Hadamard gates,\n a column of powers of T gates,\n a sequence of powers of CS gates (up to\n :math:`\\frac{n^2-n}{2}` of them),\n and a final column of Hadamard gates, as introduced in [1].\n\n The circuit is parameterized by an n x n interactions matrix.\n The powers of each T gate are given by the diagonal elements\n of the interactions matrix. The powers of the CS gates are\n given by the upper triangle of the interactions matrix.\n\n **Reference Circuit:**\n\n .. jupyter-execute::\n :hide-code:\n\n from qiskit.circuit.library import IQP\n import qiskit.tools.jupyter\n A = [[6, 5, 3], [5, 4, 5], [3, 5, 1]]\n circuit = IQP(A)\n circuit.draw('mpl')\n\n **Expanded Circuit:**\n\n .. jupyter-execute::\n :hide-code:\n\n from qiskit.circuit.library import IQP\n import qiskit.tools.jupyter\n A = [[6, 5, 3], [5, 4, 5], [3, 5, 1]]\n circuit = IQP(A)\n %circuit_library_info circuit.decompose()\n\n **References:**\n\n [1] M. J. Bremner et al. Average-case complexity versus approximate\n simulation of commuting quantum computations,\n Phys. Rev. Lett. 117, 080501 (2016).\n `arXiv:1504.07999 <https://arxiv.org/abs/1504.07999>`_\n \"\"\"\n\n def __init__(self, interactions: Union[List, np.array]) -> None:\n \"\"\"Create IQP circuit.\n\n Args:\n interactions: input n-by-n symetric matrix.\n\n Raises:\n CircuitError: if the inputs is not as symetric matrix.\n \"\"\"\n num_qubits = len(interactions)\n inner = QuantumCircuit(num_qubits)\n interactions = np.array(interactions)\n if not np.allclose(interactions, interactions.transpose()):\n raise CircuitError(\"The interactions matrix is not symetric\")\n\n a_str = np.array_str(interactions)\n a_str.replace('\\n', ';')\n name = \"iqp:\" + a_str.replace('\\n', ';')\n super().__init__(num_qubits, name=name)\n\n inner.h(range(num_qubits))\n for i in range(num_qubits):\n for j in range(i+1, num_qubits):\n if interactions[i][j] % 4 != 0:\n inner.cu1(interactions[i][j] * np.pi / 2, i, j)\n\n for i in range(num_qubits):\n if interactions[i][i] % 8 != 0:\n inner.u1(interactions[i][i] * np.pi / 8, i)\n\n inner.h(range(num_qubits))\n all_qubits = self.qubits\n self.append(inner, all_qubits, label=name)\n", "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Two-pulse single-qubit gate.\"\"\"\n\nimport numpy\nfrom qiskit.circuit.controlledgate import ControlledGate\nfrom qiskit.circuit.gate import Gate\nfrom qiskit.circuit.quantumregister import QuantumRegister\n\n\nclass U3Gate(Gate):\n r\"\"\"Generic single-qubit rotation gate with 3 Euler angles.\n\n Implemented using two X90 pulses on IBM Quantum systems:\n\n .. math::\n U2(\\phi, \\lambda) = RZ(\\phi+\\pi/2) RX(\\frac{\\pi}{2}) RZ(\\lambda-\\pi/2)\n\n **Circuit symbol:**\n\n .. parsed-literal::\n\n ┌───────────┐\n q_0: ┤ U3(ϴ,φ,λ) ├\n └───────────┘\n\n **Matrix Representation:**\n\n .. math::\n\n \\newcommand{\\th}{\\frac{\\theta}{2}}\n\n U3(\\theta, \\phi, \\lambda) =\n \\begin{pmatrix}\n \\cos(\\th) & e^{-i\\lambda}\\sin(\\th) \\\\\n e^{i\\phi}\\sin(\\th) & e^{i(\\phi+\\lambda)\\cos(\\th)}\n \\end{pmatrix}\n\n **Examples:**\n\n .. math::\n\n U3(\\theta, -\\frac{\\pi}{2}, \\frac{pi}{2}) = RX(\\theta)\n\n .. math::\n\n U3(\\theta, 0, 0) = RY(\\theta)\n \"\"\"\n\n def __init__(self, theta, phi, lam, label=None):\n \"\"\"Create new U3 gate.\"\"\"\n super().__init__('u3', 1, [theta, phi, lam], label=label)\n\n def inverse(self):\n r\"\"\"Return inverted U3 gate.\n\n :math:`U3(\\theta,\\phi,\\lambda)^{\\dagger} =U3(-\\theta,-\\phi,-\\lambda)`)\n \"\"\"\n return U3Gate(-self.params[0], -self.params[2], -self.params[1])\n\n def control(self, num_ctrl_qubits=1, label=None, ctrl_state=None):\n \"\"\"Return a (mutli-)controlled-U3 gate.\n\n Args:\n num_ctrl_qubits (int): number of control qubits.\n label (str or None): An optional label for the gate [Default: None]\n ctrl_state (int or str or None): control state expressed as integer,\n string (e.g. '110'), or None. If None, use all 1s.\n\n Returns:\n ControlledGate: controlled version of this gate.\n \"\"\"\n if num_ctrl_qubits == 1:\n gate = CU3Gate(*self.params, label=label, ctrl_state=ctrl_state)\n gate.base_gate.label = self.label\n return gate\n return super().control(num_ctrl_qubits=num_ctrl_qubits, label=label, ctrl_state=ctrl_state)\n\n def to_matrix(self):\n \"\"\"Return a Numpy.array for the U3 gate.\"\"\"\n theta, phi, lam = self.params\n theta, phi, lam = float(theta), float(phi), float(lam)\n return numpy.array([\n [\n numpy.cos(theta / 2),\n -numpy.exp(1j * lam) * numpy.sin(theta / 2)\n ],\n [\n numpy.exp(1j * phi) * numpy.sin(theta / 2),\n numpy.exp(1j * (phi + lam)) * numpy.cos(theta / 2)\n ]\n ], dtype=complex)\n\n\nclass CU3Meta(type):\n \"\"\"A metaclass to ensure that Cu3Gate and CU3Gate are of the same type.\n\n Can be removed when Cu3Gate gets removed.\n \"\"\"\n @classmethod\n def __instancecheck__(mcs, inst):\n return type(inst) in {CU3Gate, Cu3Gate} # pylint: disable=unidiomatic-typecheck\n\n\nclass CU3Gate(ControlledGate, metaclass=CU3Meta):\n r\"\"\"Controlled-U3 gate (3-parameter two-qubit gate).\n\n This is a controlled version of the U3 gate (generic single qubit rotation).\n It is restricted to 3 parameters, and so cannot cover generic two-qubit\n controlled gates).\n\n **Circuit symbol:**\n\n .. parsed-literal::\n\n q_0: ──────■──────\n ┌─────┴─────┐\n q_1: ┤ U3(ϴ,φ,λ) ├\n └───────────┘\n\n **Matrix representation:**\n\n .. math::\n\n \\newcommand{\\th}{\\frac{\\theta}{2}}\n\n CU3(\\theta, \\phi, \\lambda)\\ q_0, q_1 =\n I \\otimes |0\\rangle\\langle 0| +\n U3(\\theta,\\phi,\\lambda) \\otimes |1\\rangle\\langle 1| =\n \\begin{pmatrix}\n 1 & 0 & 0 & 0 \\\\\n 0 & \\cos(\\th) & 0 & e^{-i\\lambda}\\sin(\\th) \\\\\n 0 & 0 & 1 & 0 \\\\\n 0 & e^{i\\phi}\\sin(\\th) & 0 & e^{i(\\phi+\\lambda)\\cos(\\th)}\n \\end{pmatrix}\n\n .. note::\n\n In Qiskit's convention, higher qubit indices are more significant\n (little endian convention). In many textbooks, controlled gates are\n presented with the assumption of more significant qubits as control,\n which in our case would be q_1. Thus a textbook matrix for this\n gate will be:\n\n .. parsed-literal::\n ┌───────────┐\n q_0: ┤ U3(ϴ,φ,λ) ├\n └─────┬─────┘\n q_1: ──────■──────\n\n .. math::\n\n CU3(\\theta, \\phi, \\lambda)\\ q_1, q_0 =\n |0\\rangle\\langle 0| \\otimes I +\n |1\\rangle\\langle 1| \\otimes U3(\\theta,\\phi,\\lambda) =\n \\begin{pmatrix}\n 1 & 0 & 0 & 0 \\\\\n 0 & 1 & 0 & 0 \\\\\n 0 & 0 & \\cos(\\th) & e^{-i\\lambda}\\sin(\\th) \\\\\n 0 & 0 & e^{i\\phi}\\sin(\\th) & e^{i(\\phi+\\lambda)\\cos(\\th)}\n \\end{pmatrix}\n \"\"\"\n\n def __init__(self, theta, phi, lam, label=None, ctrl_state=None):\n \"\"\"Create new CU3 gate.\"\"\"\n super().__init__('cu3', 2, [theta, phi, lam], num_ctrl_qubits=1,\n label=label, ctrl_state=ctrl_state)\n self.base_gate = U3Gate(theta, phi, lam)\n\n def _define(self):\n \"\"\"\n gate cu3(theta,phi,lambda) c, t\n { u1((lambda+phi)/2) c;\n u1((lambda-phi)/2) t;\n cx c,t;\n u3(-theta/2,0,-(phi+lambda)/2) t;\n cx c,t;\n u3(theta/2,phi,0) t;\n }\n \"\"\"\n from .u1 import U1Gate\n from .x import CXGate # pylint: disable=cyclic-import\n definition = []\n q = QuantumRegister(2, 'q')\n rule = [\n (U1Gate((self.params[2] + self.params[1]) / 2), [q[0]], []),\n (U1Gate((self.params[2] - self.params[1]) / 2), [q[1]], []),\n (CXGate(), [q[0], q[1]], []),\n (U3Gate(-self.params[0] / 2, 0, -(self.params[1] + self.params[2]) / 2), [q[1]], []),\n (CXGate(), [q[0], q[1]], []),\n (U3Gate(self.params[0] / 2, self.params[1], 0), [q[1]], [])\n ]\n for inst in rule:\n definition.append(inst)\n self.definition = definition\n\n def inverse(self):\n r\"\"\"Return inverted CU3 gate.\n\n :math:`CU3(\\theta,\\phi,\\lambda)^{\\dagger} =CU3(-\\theta,-\\phi,-\\lambda)`)\n \"\"\"\n return CU3Gate(-self.params[0], -self.params[2], -self.params[1])\n\n\nclass Cu3Gate(CU3Gate, metaclass=CU3Meta):\n \"\"\"The deprecated CU3Gate class.\"\"\"\n\n def __init__(self, theta, phi, lam):\n import warnings\n warnings.warn('The class Cu3Gate is deprecated as of 0.14.0, and '\n 'will be removed no earlier than 3 months after that release date. '\n 'You should use the class CU3Gate instead.',\n DeprecationWarning, stacklevel=2)\n super().__init__(theta, phi, lam)\n\n\ndef _generate_gray_code(num_bits):\n \"\"\"Generate the gray code for ``num_bits`` bits.\"\"\"\n if num_bits <= 0:\n raise ValueError('Cannot generate the gray code for less than 1 bit.')\n result = [0]\n for i in range(num_bits):\n result += [x + 2**i for x in reversed(result)]\n return [format(x, '0%sb' % num_bits) for x in result]\n\n\ndef _gray_code_chain(q, num_ctrl_qubits, gate):\n \"\"\"Apply the gate to the the last qubit in the register ``q``, controlled on all\n preceding qubits. This function uses the gray code to propagate down to the last qubit.\n\n Ported and adapted from Aqua (github.com/Qiskit/qiskit-aqua),\n commit 769ca8d, file qiskit/aqua/circuits/gates/multi_control_u1_gate.py.\n \"\"\"\n from .x import CXGate\n\n rule = []\n q_controls, q_target = q[:num_ctrl_qubits], q[num_ctrl_qubits]\n gray_code = _generate_gray_code(num_ctrl_qubits)\n last_pattern = None\n\n for pattern in gray_code:\n if '1' not in pattern:\n continue\n if last_pattern is None:\n last_pattern = pattern\n # find left most set bit\n lm_pos = list(pattern).index('1')\n\n # find changed bit\n comp = [i != j for i, j in zip(pattern, last_pattern)]\n if True in comp:\n pos = comp.index(True)\n else:\n pos = None\n if pos is not None:\n if pos != lm_pos:\n rule.append(\n (CXGate(), [q_controls[pos], q_controls[lm_pos]], [])\n )\n else:\n indices = [i for i, x in enumerate(pattern) if x == '1']\n for idx in indices[1:]:\n rule.append(\n (CXGate(), [q_controls[idx], q_controls[lm_pos]], [])\n )\n # check parity\n if pattern.count('1') % 2 == 0:\n # inverse\n rule.append(\n (gate.inverse(), [q_controls[lm_pos], q_target], [])\n )\n else:\n rule.append(\n (gate, [q_controls[lm_pos], q_target], [])\n )\n last_pattern = pattern\n\n return rule\n" ]
[ [ "numpy.array_str", "numpy.array" ], [ "numpy.exp", "numpy.cos", "numpy.sin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
BenjaminETaylor/bjsfm
[ "a952183f5acca8139a1dd8ab2191c8dd3dc14710" ]
[ "tests/test_data.py" ]
[ "import numpy as np\n\n\n####################################################################################################################\n# geometry inputs\n####################################################################################################################\nDIAMETER = 0.15 # hole diameter\nSTEP_DIST = 0.25 # distance to test away from hole edge (must be >0 for tests to pass)\n\n####################################################################################################################\n# test coverage inputs\n####################################################################################################################\nNUM_POINTS = 500 # number of points to test around circumference\n\n####################################################################################################################\n# material inputs\n####################################################################################################################\n# Hexcel 8552 IM7 Unidirectional\n# ref: https://www.wichita.edu/research/NIAR/Documents/Qual-CAM-RP-2009-015-Rev-B-Hexcel-8552-IM7-MPDR-04.16.19.pdf\n# E1c[RTD] = 20.04 Msi\n# E2c[RTD] = 1.41 Msi\n# nu12[RTD] = 0.356\n# nu21[RTD] = 0.024\n# G12[RTD] = 0.68 Msi\n# CPT = 0.0072 in\n# QUASI [25/50/25], [45/0/-45/90]2s\n# HARD [50/40/10], [0/45/0/90/0/-45/0/45/0/-45]s\n# SOFT [10/80/10], [45/-45/0/45/-45/90/45/-45/45/-45]s\nQUASI = np.array(\n [[988374.5, 316116.9, 0.],\n [316116.9, 988374.5, 0.],\n [0., 0., 336128.8]]\n)\nHARD = np.array(\n [[1841084.0, 330697.9, 0.],\n [330697.9, 758748.5, 0.],\n [0., 0., 355712.8]]\n)\nSOFT = np.array(\n [[1042123.5, 588490.7, 0.],\n [588490.7, 1042123.5, 0.],\n [0., 0., 613505.6]]\n)\n\nQUASI_INV = np.linalg.inv(QUASI)\nHARD_INV = np.linalg.inv(HARD)\nSOFT_INV = np.linalg.inv(SOFT)\n\nQUASI_THICK = 0.0072*16 # 0.1152\nHARD_THICK = 0.0072*20 # 0.144\nSOFT_THICK = 0.0072*20 # 0.144\n\nE_QUASI = 1/(QUASI_INV[0, 0]*QUASI_THICK)\nG_QUASI = 1/(QUASI_INV[2, 2]*QUASI_THICK)\nE_HARD = 1/(HARD_INV[0, 0]*HARD_THICK)\nG_HARD = 1/(HARD_INV[2, 2]*HARD_THICK)\nE_SOFT = 1/(SOFT_INV[0, 0]*SOFT_THICK)\nG_SOFT = 1/(SOFT_INV[2, 2]*SOFT_THICK)\n\n####################################################################################################################\n# strength inputs & strain allowables\n####################################################################################################################\n# Hexcel 8552 IM7 Unidirectional\n# ref: https://www.wichita.edu/research/NIAR/Documents/NCP-RP-2009-028-Rev-B-HEXCEL-8552-IM7-Uni-SAR-4-16-2019.pdf\n# mean values (minimum of ETW, RTD and CTD where available)\nSHEAR_STRN = 16.56e3/0.68e6\nQUASI_UNT = 99.35e3/E_QUASI\nQUASI_UNC = 57.68e3/E_QUASI\n\nHARD_UNT = 174.18/E_HARD\nHARD_UNC = 79.42/E_HARD\n\nSOFT_UNT = 54.17/E_HARD\nSOFT_UNC = 40.61/E_HARD\n\n####################################################################################################################\n# test point inputs\n####################################################################################################################\n# to match the original BJSFM output, points to test must be equally spaced around hole, starting at zero degrees\n# there must be two rows of points; one at the hole boundary, and another at step distance\nX_POINTS = [r * np.cos(theta) for r, theta in\n zip([DIAMETER / 2] * NUM_POINTS, np.linspace(0, 2 * np.pi, num=NUM_POINTS, endpoint=False))]\nX_POINTS += [r * np.cos(theta) for r, theta in\n zip([DIAMETER / 2 + STEP_DIST] * NUM_POINTS, np.linspace(0, 2 * np.pi, num=NUM_POINTS, endpoint=False))]\nY_POINTS = [r * np.sin(theta) for r, theta in\n zip([DIAMETER / 2] * NUM_POINTS, np.linspace(0, 2 * np.pi, num=NUM_POINTS, endpoint=False))]\nY_POINTS += [r * np.sin(theta) for r, theta in\n zip([DIAMETER / 2 + STEP_DIST] * NUM_POINTS, np.linspace(0, 2 * np.pi, num=NUM_POINTS, endpoint=False))]\n\n\n\n\n" ]
[ [ "numpy.linspace", "numpy.linalg.inv", "numpy.cos", "numpy.sin", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ReJackTion/heart_pred
[ "286358bc56beed96032b5a59f29b4aa49badfbb2" ]
[ "app.py" ]
[ "from flask import Flask, render_template, request\nimport jsonify\nimport requests\nimport pickle\nimport numpy as np\nimport pandas as pd\nimport sklearn\nfrom sklearn.preprocessing import StandardScaler\n\n\napp = Flask(__name__, template_folder='templates')\nmodel = pickle.load(open('heart_model.pkl', 'rb'))\n\[email protected]('/',methods=['GET'])\ndef Home():\n return render_template('index.html')\n\n\nscaler = StandardScaler()\ndf_num = pd.read_csv('Datasets/numerical_data.csv')\n\n\[email protected](\"/predict\", methods=['POST'])\ndef predict():\n if request.method == 'POST':\n \n age = int(request.form['age'])\n sex = int(request.form['sex'])\n cp = int(request.form['cp'])\n exang = int(request.form['exang'])\n trestbps = float(request.form['trestbps'])\n thalach = float(request.form['thalach'])\n\n if sex==0:\n sex_0 = 1\n sex_1 = 0\n elif sex==1:\n sex_0 = 0\n sex_1 = 1\n \n if cp==0:\n cp_0 = 1\n cp_1 = 0\n cp_2 = 0\n cp_3 = 0\n elif cp==1:\n cp_0 = 0\n cp_1 = 1\n cp_2 = 0\n cp_3 = 0\n elif cp==2:\n cp_0 = 0\n cp_1 = 0\n cp_2 = 1\n cp_3 = 0\n elif cp==3:\n cp_0 = 0\n cp_1 = 0\n cp_2 = 0\n cp_3 = 1\n \n if exang==0:\n exang_0 = 1\n exang_1 = 0\n elif exang==1:\n exang_0 = 0\n exang_1 = 1\n \n df_num.loc[-1] = [age, trestbps, thalach]\n scaled_data = scaler.fit_transform(df_num)\n scaled_num = scaled_data[-1,:]\n \n output = model.predict([[scaled_num[0], scaled_num[1], scaled_num[2], sex_0, sex_1, cp_0, cp_1, cp_2, cp_3, exang_0, exang_1]])\n \n if output==1:\n return render_template('index.html',prediction_text=\"Warning, you are in high risk of having heart disease!\")\n else:\n return render_template('index.html',prediction_text=\"Congratulations, you are in low risk of having heart disease:)\")\n else:\n return render_template('index.html')\n\nif __name__==\"__main__\":\n app.run(debug=True)\n\n" ]
[ [ "sklearn.preprocessing.StandardScaler", "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
DL-RenJian/retinanet-keras
[ "2d9ae0a89107f12ffcb6a1f2aa76d86503db96eb" ]
[ "get_dr_txt.py" ]
[ "import os\r\n\r\nimport numpy as np\r\nfrom keras.applications.imagenet_utils import preprocess_input\r\nfrom keras.layers import Input\r\nfrom PIL import Image\r\n\r\nfrom retinanet import Retinanet\r\nfrom utils.utils import BBoxUtility, letterbox_image, retinanet_correct_boxes\r\nfrom tqdm import tqdm\r\n\r\n\r\nclass mAP_Retinanet(Retinanet):\r\n #---------------------------------------------------#\r\n # 检测图片\r\n #---------------------------------------------------#\r\n def detect_image(self,image_id,image):\r\n self.confidence = 0.01\r\n self.bbox_util._nms_thresh = 0.5\r\n f = open(\"./input/detection-results/\"+image_id+\".txt\",\"w\") \r\n image_shape = np.array(np.shape(image)[0:2])\r\n crop_img,x_offset,y_offset = letterbox_image(image, [self.model_image_size[0],self.model_image_size[1]])\r\n photo = np.array(crop_img,dtype = np.float64)\r\n\r\n # 图片预处理,归一化\r\n photo = preprocess_input(np.reshape(photo,[1,self.model_image_size[0],self.model_image_size[1],self.model_image_size[2]]))\r\n preds = self.retinanet_model.predict(photo)\r\n # 将预测结果进行解码\r\n results = self.bbox_util.detection_out(preds,self.prior,confidence_threshold=self.confidence)\r\n if len(results[0])<=0:\r\n return image\r\n results = np.array(results)\r\n\r\n # 筛选出其中得分高于confidence的框\r\n det_label = results[0][:, 5]\r\n det_conf = results[0][:, 4]\r\n det_xmin, det_ymin, det_xmax, det_ymax = results[0][:, 0], results[0][:, 1], results[0][:, 2], results[0][:, 3]\r\n \r\n top_indices = [i for i, conf in enumerate(det_conf) if conf >= self.confidence]\r\n top_conf = det_conf[top_indices]\r\n top_label_indices = det_label[top_indices].tolist()\r\n top_xmin, top_ymin, top_xmax, top_ymax = np.expand_dims(det_xmin[top_indices],-1),np.expand_dims(det_ymin[top_indices],-1),np.expand_dims(det_xmax[top_indices],-1),np.expand_dims(det_ymax[top_indices],-1)\r\n \r\n # 去掉灰条\r\n boxes = retinanet_correct_boxes(top_ymin,top_xmin,top_ymax,top_xmax,np.array([self.model_image_size[0],self.model_image_size[1]]),image_shape)\r\n\r\n for i, c in enumerate(top_label_indices):\r\n predicted_class = self.class_names[int(c)]\r\n score = str(top_conf[i])\r\n\r\n top, left, bottom, right = boxes[i]\r\n f.write(\"%s %s %s %s %s %s\\n\" % (predicted_class, score[:6], str(int(left)), str(int(top)), str(int(right)),str(int(bottom))))\r\n\r\n f.close()\r\n return \r\n\r\nretinanet = mAP_Retinanet()\r\nimage_ids = open('VOCdevkit/VOC2007/ImageSets/Main/test.txt').read().strip().split()\r\n\r\nif not os.path.exists(\"./input\"):\r\n os.makedirs(\"./input\")\r\nif not os.path.exists(\"./input/detection-results\"):\r\n os.makedirs(\"./input/detection-results\")\r\nif not os.path.exists(\"./input/images-optional\"):\r\n os.makedirs(\"./input/images-optional\")\r\n\r\n\r\nfor image_id in tqdm(image_ids):\r\n image_path = \"./VOCdevkit/VOC2007/JPEGImages/\"+image_id+\".jpg\"\r\n image = Image.open(image_path)\r\n # image.save(\"./input/images-optional/\"+image_id+\".jpg\")\r\n retinanet.detect_image(image_id,image)\r\n # print(image_id,\" done!\")\r\n \r\n\r\nprint(\"Conversion completed!\")\r\n" ]
[ [ "numpy.reshape", "numpy.array", "numpy.expand_dims", "numpy.shape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yancie-yjr/StreamYOLO
[ "46eec034fb1a2f3b1ca211c9dc9703014da75ca3" ]
[ "exps/model/dfp_pafpn.py" ]
[ "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom exps.model.darknet import CSPDarknet\nfrom yolox.models.network_blocks import BaseConv, CSPLayer, DWConv\n\n\nclass DFPPAFPN(nn.Module):\n \"\"\"\n YOLOv3 model. Darknet 53 is the default backbone of this model.\n \"\"\"\n\n def __init__(\n self,\n depth=1.0,\n width=1.0,\n in_features=(\"dark3\", \"dark4\", \"dark5\"),\n in_channels=[256, 512, 1024],\n depthwise=False,\n act=\"silu\",\n ):\n super().__init__()\n self.backbone = CSPDarknet(depth, width, depthwise=depthwise, act=act)\n self.in_features = in_features\n self.in_channels = in_channels\n Conv = DWConv if depthwise else BaseConv\n\n self.lateral_conv0 = BaseConv(\n int(in_channels[2] * width), int(in_channels[1] * width), 1, 1, act=act\n )\n self.C3_p4 = CSPLayer(\n int(2 * in_channels[1] * width),\n int(in_channels[1] * width),\n round(3 * depth),\n False,\n depthwise=depthwise,\n act=act,\n ) # cat\n\n self.reduce_conv1 = BaseConv(\n int(in_channels[1] * width), int(in_channels[0] * width), 1, 1, act=act\n )\n self.C3_p3 = CSPLayer(\n int(2 * in_channels[0] * width),\n int(in_channels[0] * width),\n round(3 * depth),\n False,\n depthwise=depthwise,\n act=act,\n )\n\n # bottom-up conv\n self.bu_conv2 = Conv(\n int(in_channels[0] * width), int(in_channels[0] * width), 3, 2, act=act\n )\n self.C3_n3 = CSPLayer(\n int(2 * in_channels[0] * width),\n int(in_channels[1] * width),\n round(3 * depth),\n False,\n depthwise=depthwise,\n act=act,\n )\n\n # bottom-up conv\n self.bu_conv1 = Conv(\n int(in_channels[1] * width), int(in_channels[1] * width), 3, 2, act=act\n )\n self.C3_n4 = CSPLayer(\n int(2 * in_channels[1] * width),\n int(in_channels[2] * width),\n round(3 * depth),\n False,\n depthwise=depthwise,\n act=act,\n )\n\n self.jian2 = Conv(\n in_channels=int(in_channels[0] * width),\n out_channels=int(in_channels[0] * width) // 2,\n ksize=1,\n stride=1,\n act=act,\n )\n\n self.jian1 = Conv(\n in_channels=int(in_channels[1] * width),\n out_channels=int(in_channels[1] * width) // 2,\n ksize=1,\n stride=1,\n act=act,\n )\n\n self.jian0 = Conv(\n in_channels=int(in_channels[2] * width),\n out_channels=int(in_channels[2] * width) // 2,\n ksize=1,\n stride=1,\n act=act,\n )\n \n\n\n def off_forward(self, input):\n \"\"\"\n Args:\n inputs: input images.\n\n Returns:\n Tuple[Tensor]: FPN feature.\n \"\"\"\n\n\n # backbone\n rurrent_out_features = self.backbone(torch.split(input, 3, dim=1)[0])\n rurrent_features = [rurrent_out_features[f] for f in self.in_features]\n [rurrent_x2, rurrent_x1, rurrent_x0] = rurrent_features\n\n rurrent_fpn_out0 = self.lateral_conv0(rurrent_x0) # 1024->512/32\n rurrent_f_out0 = F.interpolate(rurrent_fpn_out0, size=rurrent_x1.shape[2:4], mode='nearest') # 512/16\n rurrent_f_out0 = torch.cat([rurrent_f_out0, rurrent_x1], 1) # 512->1024/16\n rurrent_f_out0 = self.C3_p4(rurrent_f_out0) # 1024->512/16\n\n rurrent_fpn_out1 = self.reduce_conv1(rurrent_f_out0) # 512->256/16\n rurrent_f_out1 = F.interpolate(rurrent_fpn_out1, size=rurrent_x2.shape[2:4], mode='nearest') # 256/8\n rurrent_f_out1 = torch.cat([rurrent_f_out1, rurrent_x2], 1) # 256->512/8\n rurrent_pan_out2 = self.C3_p3(rurrent_f_out1) # 512->256/8\n\n rurrent_p_out1 = self.bu_conv2(rurrent_pan_out2) # 256->256/16\n rurrent_p_out1 = torch.cat([rurrent_p_out1, rurrent_fpn_out1], 1) # 256->512/16\n rurrent_pan_out1 = self.C3_n3(rurrent_p_out1) # 512->512/16\n\n rurrent_p_out0 = self.bu_conv1(rurrent_pan_out1) # 512->512/32\n rurrent_p_out0 = torch.cat([rurrent_p_out0, rurrent_fpn_out0], 1) # 512->1024/32\n rurrent_pan_out0 = self.C3_n4(rurrent_p_out0) # 1024->1024/32\n\n #####\n\n\n support_out_features = self.backbone(torch.split(input, 3, dim=1)[1])\n support_features = [support_out_features[f] for f in self.in_features]\n [support_x2, support_x1, support_x0] = support_features\n\n support_fpn_out0 = self.lateral_conv0(support_x0) # 1024->512/32\n support_f_out0 = F.interpolate(support_fpn_out0, size=support_x1.shape[2:4], mode='nearest') # 512/16\n support_f_out0 = torch.cat([support_f_out0, support_x1], 1) # 512->1024/16\n support_f_out0 = self.C3_p4(support_f_out0) # 1024->512/16\n\n support_fpn_out1 = self.reduce_conv1(support_f_out0) # 512->256/16\n support_f_out1 = F.interpolate(support_fpn_out1, size=support_x2.shape[2:4], mode='nearest') # 256/8\n support_f_out1 = torch.cat([support_f_out1, support_x2], 1) # 256->512/8\n support_pan_out2 = self.C3_p3(support_f_out1) # 512->256/8\n\n support_p_out1 = self.bu_conv2(support_pan_out2) # 256->256/16\n support_p_out1 = torch.cat([support_p_out1, support_fpn_out1], 1) # 256->512/16\n support_pan_out1 = self.C3_n3(support_p_out1) # 512->512/16\n\n support_p_out0 = self.bu_conv1(support_pan_out1) # 512->512/32\n support_p_out0 = torch.cat([support_p_out0, support_fpn_out0], 1) # 512->1024/32\n support_pan_out0 = self.C3_n4(support_p_out0) # 1024->1024/32\n\n # 0.5 channel\n pan_out2 = torch.cat([self.jian2(rurrent_pan_out2), self.jian2(support_pan_out2)], dim=1) + rurrent_pan_out2\n pan_out1 = torch.cat([self.jian1(rurrent_pan_out1), self.jian1(support_pan_out1)], dim=1) + rurrent_pan_out1\n pan_out0 = torch.cat([self.jian0(rurrent_pan_out0), self.jian0(support_pan_out0)], dim=1) + rurrent_pan_out0\n\n\n outputs = (pan_out2, pan_out1, pan_out0)\n\n return outputs\n\n def online_forward(self, input, buffer=None, node='star'):\n \"\"\"\n Args:\n inputs: input images.\n\n Returns:\n Tuple[Tensor]: FPN feature.\n \"\"\"\n\n\n # backbone\n rurrent_out_features = self.backbone(input)\n rurrent_features = [rurrent_out_features[f] for f in self.in_features]\n [rurrent_x2, rurrent_x1, rurrent_x0] = rurrent_features\n\n rurrent_fpn_out0 = self.lateral_conv0(rurrent_x0) # 1024->512/32\n rurrent_f_out0 = F.interpolate(rurrent_fpn_out0, size=rurrent_x1.shape[2:4], mode='nearest') # 512/16\n rurrent_f_out0 = torch.cat([rurrent_f_out0, rurrent_x1], 1) # 512->1024/16\n rurrent_f_out0 = self.C3_p4(rurrent_f_out0) # 1024->512/16\n\n rurrent_fpn_out1 = self.reduce_conv1(rurrent_f_out0) # 512->256/16\n rurrent_f_out1 = F.interpolate(rurrent_fpn_out1, size=rurrent_x2.shape[2:4], mode='nearest') # 256/8\n rurrent_f_out1 = torch.cat([rurrent_f_out1, rurrent_x2], 1) # 256->512/8\n rurrent_pan_out2 = self.C3_p3(rurrent_f_out1) # 512->256/8\n\n rurrent_p_out1 = self.bu_conv2(rurrent_pan_out2) # 256->256/16\n rurrent_p_out1 = torch.cat([rurrent_p_out1, rurrent_fpn_out1], 1) # 256->512/16\n rurrent_pan_out1 = self.C3_n3(rurrent_p_out1) # 512->512/16\n\n rurrent_p_out0 = self.bu_conv1(rurrent_pan_out1) # 512->512/32\n rurrent_p_out0 = torch.cat([rurrent_p_out0, rurrent_fpn_out0], 1) # 512->1024/32\n rurrent_pan_out0 = self.C3_n4(rurrent_p_out0) # 1024->1024/32\n\n #####\n if node=='star':\n pan_out2 = torch.cat([self.jian2(rurrent_pan_out2), self.jian2(rurrent_pan_out2)], dim=1) + rurrent_pan_out2\n pan_out1 = torch.cat([self.jian1(rurrent_pan_out1), self.jian1(rurrent_pan_out1)], dim=1) + rurrent_pan_out1\n pan_out0 = torch.cat([self.jian0(rurrent_pan_out0), self.jian0(rurrent_pan_out0)], dim=1) + rurrent_pan_out0\n elif node=='buffer':\n\n [support_pan_out2, support_pan_out1, support_pan_out0] = buffer\n\n pan_out2 = torch.cat([self.jian2(rurrent_pan_out2), self.jian2(support_pan_out2)], dim=1) + rurrent_pan_out2\n pan_out1 = torch.cat([self.jian1(rurrent_pan_out1), self.jian1(support_pan_out1)], dim=1) + rurrent_pan_out1\n pan_out0 = torch.cat([self.jian0(rurrent_pan_out0), self.jian0(support_pan_out0)], dim=1) + rurrent_pan_out0\n\n\n outputs = (pan_out2, pan_out1, pan_out0)\n\n buffer_ = (rurrent_pan_out2,rurrent_pan_out1,rurrent_pan_out0)\n\n return outputs, buffer_\n \n\n\n def forward(self, input, buffer=None, mode='off_pipe'):\n\n if mode=='off_pipe':\n # Glops caculate mode\n if input.size()[1] == 3:\n input = torch.cat([input, input], dim=1)\n output = self.off_forward(input)\n # offline train mode\n elif input.size()[1] == 6:\n output = self.off_forward(input)\n \n return output\n \n elif mode=='on_pipe':\n # online star state\n if buffer == None:\n output, buffer_ = self.online_forward(input, node='star')\n # online inference\n else:\n assert len(buffer) == 3\n assert input.size()[1] == 3\n output, buffer_ = self.online_forward(input, buffer=buffer, node='buffer')\n \n return output, buffer_\n\n\n\n\n" ]
[ [ "torch.split", "torch.nn.functional.interpolate", "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
geyang/dmc_generalization
[ "051fefc470931810c856e89629d481b06cc8d530" ]
[ "dmc_gen/utils.py" ]
[ "import torch\nimport numpy as np\nimport os\nimport json\nimport random\nfrom dmc_gen.algorithms import augmentations\nfrom datetime import datetime\n\n\nclass Eval(object):\n\tdef __init__(self, *models):\n\t\tself.models = models\n\n\tdef __enter__(self):\n\t\tself.prev_states = []\n\t\tfor model in self.models:\n\t\t\tself.prev_states.append(model.is_training)\n\t\t\tmodel.train(False)\n\n\tdef __exit__(self, *args):\n\t\tfor model, state in zip(self.models, self.prev_states):\n\t\t\tmodel.train(state)\n\t\treturn False\n\n\ndef soft_update_params(net, target_net, tau):\n\tfor param, target_param in zip(net.parameters(), target_net.parameters()):\n\t\ttarget_param.data.copy_(\n\t\t\ttau * param.data + (1 - tau) * target_param.data\n\t\t)\n\n\ndef set_seed_everywhere(seed):\n\ttorch.manual_seed(seed)\n\tif torch.cuda.is_available():\n\t\ttorch.cuda.manual_seed_all(seed)\n\tnp.random.seed(seed)\n\trandom.seed(seed)\n\n\ndef write_info(args, fp):\n\tdata = {\n\t\t'timestamp': str(datetime.now()),\n\t\t'args': str(args)\n\t}\n\twith open(fp, 'w') as f:\n\t\tjson.dump(data, f)\n\n\ndef make_dir(dir_path):\n\ttry:\n\t\tos.makedirs(dir_path)\n\texcept OSError:\n\t\tpass\n\treturn dir_path\n\n\ndef array_init(capacity, dims, dtype):\n\t\"\"\"Preallocate array in memory\"\"\"\n\tchunks = 20\n\tzero_dim_size = int(capacity / chunks)\n\tarray = np.zeros((capacity, *dims), dtype=dtype)\n\ttemp = np.ones((zero_dim_size, *dims), dtype=dtype)\n\t\n\tfor i in range(chunks):\n\t\tarray[i*zero_dim_size:(i+1)*zero_dim_size] = temp\n\n\treturn array\n\n\nclass ReplayBuffer(object):\n\t\"\"\"Buffer to store environment transitions\"\"\"\n\tdef __init__(self, obs_shape, action_shape, capacity, batch_size):\n\t\tself.capacity = capacity\n\t\tself.batch_size = batch_size\n\n\t\tself.obs = array_init(capacity, obs_shape, dtype=np.uint8)\n\t\tself.next_obs = array_init(capacity, obs_shape, dtype=np.uint8)\n\t\tself.actions = np.empty((capacity, *action_shape), dtype=np.float32)\n\t\tself.rewards = np.empty((capacity, 1), dtype=np.float32)\n\t\tself.not_dones = np.empty((capacity, 1), dtype=np.float32)\n\n\t\tself.idx = 0\n\t\tself.full = False\n\n\tdef add(self, obs, action, reward, next_obs, done):\n\t\tnp.copyto(self.obs[self.idx], obs)\n\t\tnp.copyto(self.actions[self.idx], action)\n\t\tnp.copyto(self.rewards[self.idx], reward)\n\t\tnp.copyto(self.next_obs[self.idx], next_obs)\n\t\tnp.copyto(self.not_dones[self.idx], not done)\n\n\t\tself.idx = (self.idx + 1) % self.capacity\n\t\tself.full = self.full or self.idx == 0\n\n\tdef _get_idxs(self, n=None):\n\t\tif n is None:\n\t\t\tn = self.batch_size\n\t\treturn np.random.randint(\n\t\t\t0, self.capacity if self.full else self.idx, size=n\n\t\t)\n\n\tdef sample_soda(self, n=None):\n\t\treturn torch.as_tensor(self.obs[self._get_idxs(n)]).cuda().float()\n\n\tdef sample_curl(self, n=None):\n\t\tidxs = self._get_idxs(n)\n\n\t\tobs = torch.as_tensor(self.obs[idxs]).cuda().float()\n\t\tactions = torch.as_tensor(self.actions[idxs]).cuda()\n\t\trewards = torch.as_tensor(self.rewards[idxs]).cuda()\n\t\tnext_obs = torch.as_tensor(self.next_obs[idxs]).cuda().float()\n\t\tnot_dones = torch.as_tensor(self.not_dones[idxs]).cuda()\n\n\t\tpos = augmentations.random_crop(obs.clone())\n\t\tobs = augmentations.random_crop(obs)\n\t\tnext_obs = augmentations.random_crop(next_obs)\n\n\t\treturn obs, actions, rewards, next_obs, not_dones, pos\n\n\tdef sample(self, n=None):\n\t\tidxs = self._get_idxs(n)\n\n\t\tobs = torch.as_tensor(self.obs[idxs]).cuda().float()\n\t\tactions = torch.as_tensor(self.actions[idxs]).cuda()\n\t\trewards = torch.as_tensor(self.rewards[idxs]).cuda()\n\t\tnext_obs = torch.as_tensor(self.next_obs[idxs]).cuda().float()\n\t\tnot_dones = torch.as_tensor(self.not_dones[idxs]).cuda()\n\n\t\tobs = augmentations.random_crop(obs)\n\t\tnext_obs = augmentations.random_crop(next_obs)\n\n\t\treturn obs, actions, rewards, next_obs, not_dones\n" ]
[ [ "numpy.random.seed", "torch.manual_seed", "numpy.ones", "torch.as_tensor", "torch.cuda.is_available", "torch.cuda.manual_seed_all", "numpy.copyto", "numpy.zeros", "numpy.empty", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pseudoPixels/SourceFlow
[ "e1738c8b838c71b18598ceca29d7c487c76f876b", "e1738c8b838c71b18598ceca29d7c487c76f876b", "019112147a3e6c208c3846ef699fb6ec24a45c30" ]
[ "app_collaborative_sci_workflow/pipeline_modules/Stats_LogisticRegression/Stats_LogisticRegression_main.py", "webpage/lib/python3.5/site-packages/dask/array/tests/test_reductions.py", "webpage/lib/python3.5/site-packages/scipy/special/tests/test_precompute_gammainc.py" ]
[ "\n\nimport matplotlib\n#matplotlib.use('Agg')\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\nimport pandas as pd\n\n\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\n\n# with open(csv_dataset_path) as module_1_inp:\n# \tlines = module_1_inp.readlines()\n#\n# #only read the first line (in case it has multiples)\n# csv_dataset_path = lines[0]\n\ndataset = pd.read_csv(csv_dataset_path)\n\n# import pip\n# r = pip.main(['install', 'scikit-learn'])\n#\n# print (\"=================================\")\n# print (r)\n# print (\"=================================\")\n\n\n\n# Fitting Logistic Regression to the Training set\nfrom sklearn.linear_model import LogisticRegression\nclassifier = LogisticRegression(penalty='l2',random_state = randState)\n\n\n#cols = [featureSet]\nX = dataset[featureSet]\ny = dataset[target]\n\n\n\n# Applying k-Fold Cross Validation\nfrom sklearn.model_selection import cross_val_score\naccuracies = cross_val_score(estimator = classifier, X=X , y=y , cv = n)\n\n\nwith open(logisticRegression_classification_stats, \"w+\") as thisModuleOutput:\n thisModuleOutput.write(\"Logistic Regression:\\n========================================\\n\")\n thisModuleOutput.write(\"Classification Accuracy: \" + str( round(accuracies.mean()*100,2) ) + \" %\" )\n\n\n#print(\"Logistic Regression:\\n Accuracy:\", accuracies.mean(), \"+/-\", accuracies.std(),\"\\n\")\n\n\n\n\n\n", "from __future__ import absolute_import, division, print_function\n\nimport pytest\npytest.importorskip('numpy')\n\nimport dask.array as da\nfrom dask.array.utils import assert_eq as _assert_eq\nfrom dask.core import get_deps\nfrom dask.context import set_options\n\nimport numpy as np\n# temporary until numpy functions migrated\ntry:\n from numpy import nanprod\nexcept ImportError: # pragma: no cover\n import dask.array.numpy_compat as npcompat\n nanprod = npcompat.nanprod\n\n\ndef assert_eq(a, b):\n _assert_eq(a, b, equal_nan=True)\n\n\ndef same_keys(a, b):\n def key(k):\n if isinstance(k, str):\n return (k, -1, -1, -1)\n else:\n return k\n return sorted(a.dask, key=key) == sorted(b.dask, key=key)\n\n\ndef reduction_1d_test(da_func, darr, np_func, narr, use_dtype=True, split_every=True):\n assert_eq(da_func(darr), np_func(narr))\n assert_eq(da_func(darr, keepdims=True), np_func(narr, keepdims=True))\n assert same_keys(da_func(darr), da_func(darr))\n assert same_keys(da_func(darr, keepdims=True), da_func(darr, keepdims=True))\n if use_dtype:\n assert_eq(da_func(darr, dtype='f8'), np_func(narr, dtype='f8'))\n assert_eq(da_func(darr, dtype='i8'), np_func(narr, dtype='i8'))\n assert same_keys(da_func(darr, dtype='i8'), da_func(darr, dtype='i8'))\n if split_every:\n a1 = da_func(darr, split_every=2)\n a2 = da_func(darr, split_every={0: 2})\n assert same_keys(a1, a2)\n assert_eq(a1, np_func(narr))\n assert_eq(a2, np_func(narr))\n assert_eq(da_func(darr, keepdims=True, split_every=2),\n np_func(narr, keepdims=True))\n\n\[email protected]('dtype', ['f4', 'i4'])\ndef test_reductions_1D(dtype):\n x = np.arange(5).astype(dtype)\n a = da.from_array(x, chunks=(2,))\n\n reduction_1d_test(da.sum, a, np.sum, x)\n reduction_1d_test(da.prod, a, np.prod, x)\n reduction_1d_test(da.mean, a, np.mean, x)\n reduction_1d_test(da.var, a, np.var, x)\n reduction_1d_test(da.std, a, np.std, x)\n reduction_1d_test(da.min, a, np.min, x, False)\n reduction_1d_test(da.max, a, np.max, x, False)\n reduction_1d_test(da.any, a, np.any, x, False)\n reduction_1d_test(da.all, a, np.all, x, False)\n\n reduction_1d_test(da.nansum, a, np.nansum, x)\n reduction_1d_test(da.nanprod, a, nanprod, x)\n reduction_1d_test(da.nanmean, a, np.mean, x)\n reduction_1d_test(da.nanvar, a, np.var, x)\n reduction_1d_test(da.nanstd, a, np.std, x)\n reduction_1d_test(da.nanmin, a, np.nanmin, x, False)\n reduction_1d_test(da.nanmax, a, np.nanmax, x, False)\n\n\ndef reduction_2d_test(da_func, darr, np_func, narr, use_dtype=True,\n split_every=True):\n assert_eq(da_func(darr), np_func(narr))\n assert_eq(da_func(darr, keepdims=True), np_func(narr, keepdims=True))\n assert_eq(da_func(darr, axis=0), np_func(narr, axis=0))\n assert_eq(da_func(darr, axis=1), np_func(narr, axis=1))\n assert_eq(da_func(darr, axis=1, keepdims=True),\n np_func(narr, axis=1, keepdims=True))\n assert_eq(da_func(darr, axis=(1, 0)), np_func(narr, axis=(1, 0)))\n\n assert same_keys(da_func(darr, axis=1), da_func(darr, axis=1))\n assert same_keys(da_func(darr, axis=(1, 0)), da_func(darr, axis=(1, 0)))\n\n if use_dtype:\n assert_eq(da_func(darr, dtype='f8'), np_func(narr, dtype='f8'))\n assert_eq(da_func(darr, dtype='i8'), np_func(narr, dtype='i8'))\n\n if split_every:\n a1 = da_func(darr, split_every=4)\n a2 = da_func(darr, split_every={0: 2, 1: 2})\n assert same_keys(a1, a2)\n assert_eq(a1, np_func(narr))\n assert_eq(a2, np_func(narr))\n assert_eq(da_func(darr, keepdims=True, split_every=4),\n np_func(narr, keepdims=True))\n assert_eq(da_func(darr, axis=0, split_every=2), np_func(narr, axis=0))\n assert_eq(da_func(darr, axis=0, keepdims=True, split_every=2),\n np_func(narr, axis=0, keepdims=True))\n assert_eq(da_func(darr, axis=1, split_every=2), np_func(narr, axis=1))\n assert_eq(da_func(darr, axis=1, keepdims=True, split_every=2),\n np_func(narr, axis=1, keepdims=True))\n\n\[email protected]('dtype', ['f4', 'i4'])\ndef test_reductions_2D(dtype):\n x = np.arange(1, 122).reshape((11, 11)).astype(dtype)\n a = da.from_array(x, chunks=(4, 4))\n\n b = a.sum(keepdims=True)\n assert b._keys() == [[(b.name, 0, 0)]]\n\n reduction_2d_test(da.sum, a, np.sum, x)\n reduction_2d_test(da.prod, a, np.prod, x)\n reduction_2d_test(da.mean, a, np.mean, x)\n reduction_2d_test(da.var, a, np.var, x, False) # Difference in dtype algo\n reduction_2d_test(da.std, a, np.std, x, False) # Difference in dtype algo\n reduction_2d_test(da.min, a, np.min, x, False)\n reduction_2d_test(da.max, a, np.max, x, False)\n reduction_2d_test(da.any, a, np.any, x, False)\n reduction_2d_test(da.all, a, np.all, x, False)\n\n reduction_2d_test(da.nansum, a, np.nansum, x)\n reduction_2d_test(da.nanprod, a, nanprod, x)\n reduction_2d_test(da.nanmean, a, np.mean, x)\n reduction_2d_test(da.nanvar, a, np.nanvar, x, False) # Difference in dtype algo\n reduction_2d_test(da.nanstd, a, np.nanstd, x, False) # Difference in dtype algo\n reduction_2d_test(da.nanmin, a, np.nanmin, x, False)\n reduction_2d_test(da.nanmax, a, np.nanmax, x, False)\n\n\[email protected](['dfunc', 'func'],\n [(da.argmin, np.argmin), (da.argmax, np.argmax),\n (da.nanargmin, np.nanargmin),\n (da.nanargmax, np.nanargmax)])\ndef test_arg_reductions(dfunc, func):\n x = np.random.random((10, 10, 10))\n a = da.from_array(x, chunks=(3, 4, 5))\n\n assert_eq(dfunc(a), func(x))\n assert_eq(dfunc(a, 0), func(x, 0))\n assert_eq(dfunc(a, 1), func(x, 1))\n assert_eq(dfunc(a, 2), func(x, 2))\n with set_options(split_every=2):\n assert_eq(dfunc(a), func(x))\n assert_eq(dfunc(a, 0), func(x, 0))\n assert_eq(dfunc(a, 1), func(x, 1))\n assert_eq(dfunc(a, 2), func(x, 2))\n\n pytest.raises(ValueError, lambda: dfunc(a, 3))\n pytest.raises(TypeError, lambda: dfunc(a, (0, 1)))\n\n x2 = np.arange(10)\n a2 = da.from_array(x2, chunks=3)\n assert_eq(dfunc(a2), func(x2))\n assert_eq(dfunc(a2, 0), func(x2, 0))\n assert_eq(dfunc(a2, 0, split_every=2), func(x2, 0))\n\n\[email protected](['dfunc', 'func'],\n [(da.nanargmin, np.nanargmin),\n (da.nanargmax, np.nanargmax)])\ndef test_nanarg_reductions(dfunc, func):\n x = np.random.random((10, 10, 10))\n x[5] = np.nan\n a = da.from_array(x, chunks=(3, 4, 5))\n assert_eq(dfunc(a), func(x))\n assert_eq(dfunc(a, 0), func(x, 0))\n with pytest.raises(ValueError):\n dfunc(a, 1).compute()\n\n with pytest.raises(ValueError):\n dfunc(a, 2).compute()\n\n x[:] = np.nan\n a = da.from_array(x, chunks=(3, 4, 5))\n with pytest.raises(ValueError):\n dfunc(a).compute()\n\n\ndef test_reductions_2D_nans():\n # chunks are a mix of some/all/no NaNs\n x = np.full((4, 4), np.nan)\n x[:2, :2] = np.array([[1, 2], [3, 4]])\n x[2, 2] = 5\n x[3, 3] = 6\n a = da.from_array(x, chunks=(2, 2))\n\n reduction_2d_test(da.sum, a, np.sum, x, False, False)\n reduction_2d_test(da.prod, a, np.prod, x, False, False)\n reduction_2d_test(da.mean, a, np.mean, x, False, False)\n reduction_2d_test(da.var, a, np.var, x, False, False)\n reduction_2d_test(da.std, a, np.std, x, False, False)\n reduction_2d_test(da.min, a, np.min, x, False, False)\n reduction_2d_test(da.max, a, np.max, x, False, False)\n reduction_2d_test(da.any, a, np.any, x, False, False)\n reduction_2d_test(da.all, a, np.all, x, False, False)\n\n reduction_2d_test(da.nansum, a, np.nansum, x, False, False)\n reduction_2d_test(da.nanprod, a, nanprod, x, False, False)\n reduction_2d_test(da.nanmean, a, np.nanmean, x, False, False)\n reduction_2d_test(da.nanvar, a, np.nanvar, x, False, False)\n reduction_2d_test(da.nanstd, a, np.nanstd, x, False, False)\n reduction_2d_test(da.nanmin, a, np.nanmin, x, False, False)\n reduction_2d_test(da.nanmax, a, np.nanmax, x, False, False)\n\n assert_eq(da.argmax(a), np.argmax(x))\n assert_eq(da.argmin(a), np.argmin(x))\n assert_eq(da.nanargmax(a), np.nanargmax(x))\n assert_eq(da.nanargmin(a), np.nanargmin(x))\n assert_eq(da.argmax(a, axis=0), np.argmax(x, axis=0))\n assert_eq(da.argmin(a, axis=0), np.argmin(x, axis=0))\n assert_eq(da.nanargmax(a, axis=0), np.nanargmax(x, axis=0))\n assert_eq(da.nanargmin(a, axis=0), np.nanargmin(x, axis=0))\n assert_eq(da.argmax(a, axis=1), np.argmax(x, axis=1))\n assert_eq(da.argmin(a, axis=1), np.argmin(x, axis=1))\n assert_eq(da.nanargmax(a, axis=1), np.nanargmax(x, axis=1))\n assert_eq(da.nanargmin(a, axis=1), np.nanargmin(x, axis=1))\n\n\ndef test_moment():\n def moment(x, n, axis=None):\n return (((x - x.mean(axis=axis, keepdims=True)) ** n).sum(axis=axis) /\n np.ones_like(x).sum(axis=axis))\n\n # Poorly conditioned\n x = np.array([1., 2., 3.] * 10).reshape((3, 10)) + 1e8\n a = da.from_array(x, chunks=5)\n assert_eq(a.moment(2), moment(x, 2))\n assert_eq(a.moment(3), moment(x, 3))\n assert_eq(a.moment(4), moment(x, 4))\n\n x = np.arange(1, 122).reshape((11, 11)).astype('f8')\n a = da.from_array(x, chunks=(4, 4))\n assert_eq(a.moment(4, axis=1), moment(x, 4, axis=1))\n assert_eq(a.moment(4, axis=(1, 0)), moment(x, 4, axis=(1, 0)))\n\n # Tree reduction\n assert_eq(a.moment(order=4, split_every=4), moment(x, 4))\n assert_eq(a.moment(order=4, axis=0, split_every=4), moment(x, 4, axis=0))\n assert_eq(a.moment(order=4, axis=1, split_every=4), moment(x, 4, axis=1))\n\n\ndef test_reductions_with_negative_axes():\n x = np.random.random((4, 4, 4))\n a = da.from_array(x, chunks=2)\n\n assert_eq(a.argmin(axis=-1), x.argmin(axis=-1))\n assert_eq(a.argmin(axis=-1, split_every=2), x.argmin(axis=-1))\n\n assert_eq(a.sum(axis=-1), x.sum(axis=-1))\n assert_eq(a.sum(axis=(0, -1)), x.sum(axis=(0, -1)))\n\n\ndef test_nan():\n x = np.array([[1, np.nan, 3, 4],\n [5, 6, 7, np.nan],\n [9, 10, 11, 12]])\n d = da.from_array(x, chunks=(2, 2))\n\n assert_eq(np.nansum(x), da.nansum(d))\n assert_eq(np.nansum(x, axis=0), da.nansum(d, axis=0))\n assert_eq(np.nanmean(x, axis=1), da.nanmean(d, axis=1))\n assert_eq(np.nanmin(x, axis=1), da.nanmin(d, axis=1))\n assert_eq(np.nanmax(x, axis=(0, 1)), da.nanmax(d, axis=(0, 1)))\n assert_eq(np.nanvar(x), da.nanvar(d))\n assert_eq(np.nanstd(x, axis=0), da.nanstd(d, axis=0))\n assert_eq(np.nanargmin(x, axis=0), da.nanargmin(d, axis=0))\n assert_eq(np.nanargmax(x, axis=0), da.nanargmax(d, axis=0))\n assert_eq(nanprod(x), da.nanprod(d))\n\n\ndef test_0d_array():\n x = da.mean(da.ones(4, chunks=4), axis=0).compute()\n y = np.mean(np.ones(4))\n assert type(x) == type(y)\n\n x = da.sum(da.zeros(4, chunks=1)).compute()\n y = np.sum(np.zeros(4))\n assert type(x) == type(y)\n\n\ndef test_reduction_on_scalar():\n x = da.from_array(np.array(1.0), chunks=())\n assert (x == x).all()\n\n\ndef assert_max_deps(x, n, eq=True):\n dependencies, dependents = get_deps(x.dask)\n if eq:\n assert max(map(len, dependencies.values())) == n\n else:\n assert max(map(len, dependencies.values())) <= n\n\n\ndef test_tree_reduce_depth():\n # 2D\n x = da.from_array(np.arange(242).reshape((11, 22)), chunks=(3, 4))\n thresh = {0: 2, 1: 3}\n assert_max_deps(x.sum(split_every=thresh), 2 * 3)\n assert_max_deps(x.sum(axis=0, split_every=thresh), 2)\n assert_max_deps(x.sum(axis=1, split_every=thresh), 3)\n assert_max_deps(x.sum(split_every=20), 20, False)\n assert_max_deps(x.sum(axis=0, split_every=20), 4)\n assert_max_deps(x.sum(axis=1, split_every=20), 6)\n\n # 3D\n x = da.from_array(np.arange(11 * 22 * 29).reshape((11, 22, 29)), chunks=(3, 4, 5))\n thresh = {0: 2, 1: 3, 2: 4}\n assert_max_deps(x.sum(split_every=thresh), 2 * 3 * 4)\n assert_max_deps(x.sum(axis=0, split_every=thresh), 2)\n assert_max_deps(x.sum(axis=1, split_every=thresh), 3)\n assert_max_deps(x.sum(axis=2, split_every=thresh), 4)\n assert_max_deps(x.sum(axis=(0, 1), split_every=thresh), 2 * 3)\n assert_max_deps(x.sum(axis=(0, 2), split_every=thresh), 2 * 4)\n assert_max_deps(x.sum(axis=(1, 2), split_every=thresh), 3 * 4)\n assert_max_deps(x.sum(split_every=20), 20, False)\n assert_max_deps(x.sum(axis=0, split_every=20), 4)\n assert_max_deps(x.sum(axis=1, split_every=20), 6)\n assert_max_deps(x.sum(axis=2, split_every=20), 6)\n assert_max_deps(x.sum(axis=(0, 1), split_every=20), 20, False)\n assert_max_deps(x.sum(axis=(0, 2), split_every=20), 20, False)\n assert_max_deps(x.sum(axis=(1, 2), split_every=20), 20, False)\n assert_max_deps(x.sum(axis=(0, 1), split_every=40), 4 * 6)\n assert_max_deps(x.sum(axis=(0, 2), split_every=40), 4 * 6)\n assert_max_deps(x.sum(axis=(1, 2), split_every=40), 6 * 6)\n\n\ndef test_tree_reduce_set_options():\n x = da.from_array(np.arange(242).reshape((11, 22)), chunks=(3, 4))\n with set_options(split_every={0: 2, 1: 3}):\n assert_max_deps(x.sum(), 2 * 3)\n assert_max_deps(x.sum(axis=0), 2)\n\n\ndef test_reduction_names():\n x = da.ones(5, chunks=(2,))\n assert x.sum().name.startswith('sum')\n assert 'max' in x.max().name.split('-')[0]\n assert x.var().name.startswith('var')\n assert x.all().name.startswith('all')\n assert any(k[0].startswith('nansum') for k in da.nansum(x).dask)\n assert x.mean().name.startswith('mean')\n", "from __future__ import division, print_function, absolute_import\n\nfrom numpy.testing import dec\n\nfrom scipy._lib._testutils import xslow\nfrom scipy.special._testutils import MissingModule, check_version\nfrom scipy.special._mptestutils import (Arg, mp_assert_allclose,\n assert_mpmath_equal)\nfrom scipy.special._precompute.gammainc_asy import (compute_g, compute_alpha,\n compute_d)\nfrom scipy.special._precompute.gammainc_data import gammainc\n\ntry:\n import sympy\nexcept ImportError:\n sympy = MissingModule('sympy')\n\ntry:\n import mpmath as mp\nexcept ImportError:\n try:\n from sympy import mpmath as mp\n except ImportError:\n mp = MissingModule('mpmath')\n\n\n@check_version(mp, '0.19')\ndef test_g():\n # Test data for the g_k. See DLMF 5.11.4.\n with mp.workdps(30):\n g = [mp.mpf(1), mp.mpf(1)/12, mp.mpf(1)/288,\n -mp.mpf(139)/51840, -mp.mpf(571)/2488320,\n mp.mpf(163879)/209018880, mp.mpf(5246819)/75246796800]\n mp_assert_allclose(compute_g(7), g)\n\n\[email protected]\n@check_version(sympy, '0.7')\ndef test_alpha():\n # Test data for the alpha_k. See DLMF 8.12.14.\n with mp.workdps(30):\n alpha = [mp.mpf(0), mp.mpf(1), mp.mpf(1)/3, mp.mpf(1)/36,\n -mp.mpf(1)/270, mp.mpf(1)/4320, mp.mpf(1)/17010,\n -mp.mpf(139)/5443200, mp.mpf(1)/204120]\n mp_assert_allclose(compute_alpha(9), alpha)\n\n\n@xslow\n@check_version(sympy, '0.7')\ndef test_d():\n # Compare the d_{k, n} to the results in appendix F of [1].\n # \n # Sources\n # -------\n # [1] DiDonato and Morris, Computation of the Incomplete Gamma\n # Function Ratios and their Inverse, ACM Transactions on\n # Mathematical Software, 1986.\n\n with mp.workdps(50):\n dataset = [(0, 0, -mp.mpf('0.333333333333333333333333333333')),\n (0, 12, mp.mpf('0.102618097842403080425739573227e-7')),\n (1, 0, -mp.mpf('0.185185185185185185185185185185e-2')),\n (1, 12, mp.mpf('0.119516285997781473243076536700e-7')),\n (2, 0, mp.mpf('0.413359788359788359788359788360e-2')),\n (2, 12, -mp.mpf('0.140925299108675210532930244154e-7')),\n (3, 0, mp.mpf('0.649434156378600823045267489712e-3')),\n (3, 12, -mp.mpf('0.191111684859736540606728140873e-7')),\n (4, 0, -mp.mpf('0.861888290916711698604702719929e-3')),\n (4, 12, mp.mpf('0.288658297427087836297341274604e-7')),\n (5, 0, -mp.mpf('0.336798553366358150308767592718e-3')),\n (5, 12, mp.mpf('0.482409670378941807563762631739e-7')),\n (6, 0, mp.mpf('0.531307936463992223165748542978e-3')),\n (6, 12, -mp.mpf('0.882860074633048352505085243179e-7')),\n (7, 0, mp.mpf('0.344367606892377671254279625109e-3')),\n (7, 12, -mp.mpf('0.175629733590604619378669693914e-6')),\n (8, 0, -mp.mpf('0.652623918595309418922034919727e-3')),\n (8, 12, mp.mpf('0.377358774161109793380344937299e-6')),\n (9, 0, -mp.mpf('0.596761290192746250124390067179e-3')),\n (9, 12, mp.mpf('0.870823417786464116761231237189e-6'))]\n d = compute_d(10, 13)\n res = []\n for k, n, std in dataset:\n res.append(d[k][n])\n std = map(lambda x: x[2], dataset)\n mp_assert_allclose(res, std)\n\n\n@check_version(mp, '0.19')\ndef test_gammainc():\n # Quick check that the gammainc in\n # special._precompute.gammainc_data agrees with mpmath's\n # gammainc.\n assert_mpmath_equal(gammainc,\n lambda a, x: mp.gammainc(a, b=x, regularized=True),\n [Arg(0, 100, inclusive_a=False), Arg(0, 100)],\n nan_ok=False, rtol=1e-17, n=50, dps=50)\n" ]
[ [ "matplotlib.pyplot.switch_backend", "pandas.read_csv", "sklearn.model_selection.cross_val_score", "sklearn.linear_model.LogisticRegression" ], [ "numpy.nanmax", "numpy.nanargmax", "numpy.random.random", "numpy.nanprod", "numpy.ones_like", "numpy.arange", "numpy.nanmin", "numpy.nanvar", "numpy.full", "numpy.ones", "numpy.argmax", "numpy.argmin", "numpy.nanargmin", "numpy.nansum", "numpy.nanmean", "numpy.nanstd", "numpy.array", "numpy.zeros" ], [ "scipy.special._precompute.gammainc_asy.compute_g", "scipy.special._testutils.check_version", "scipy.special._precompute.gammainc_asy.compute_d", "scipy.special._mptestutils.Arg", "scipy.special._testutils.MissingModule", "scipy.special._precompute.gammainc_asy.compute_alpha", "scipy.special._mptestutils.mp_assert_allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Datagatherer2357/Gareth-Duffy-GMIT-Problem-sets-Python-scripts
[ "f14f62e5b76dc27c04a616d3fe73abfb5052fc3f" ]
[ "numpy1.py" ]
[ "# Gareth Duffy 13-3-2018\n# basic numpy WTOP\n\nimport numpy as np\nx = np.arange(1, 10)\nprint(x)\n\nprint(x ** 2)\n\nM = x.reshape((3, 3)) # multidimensional\nprint(M)\n\nprint(M.T) # transpose\n\nprint(np.dot(M, [5, 6, 7])) # matrix vector\n\nprint(np.linalg.eigvals(M)) # eigenvalues decompostion\n" ]
[ [ "numpy.arange", "numpy.dot", "numpy.linalg.eigvals" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
Mozilla-GitHub-Standards/ee089678ec78c1555fc3f1eff2962a95ae31dcf042f14e37b019b4fbb4b13288
[ "5d4d89070dc8da54a716bb3d0db7f394334b3325" ]
[ "src/lpcnet.py" ]
[ "#!/usr/bin/python3\n'''Copyright (c) 2018 Mozilla\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions\n are met:\n\n - Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n - Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR\n CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n'''\n\nimport math\nfrom keras.models import Model\nfrom keras.layers import Input, GRU, CuDNNGRU, Dense, Embedding, Reshape, Concatenate, Lambda, Conv1D, Multiply, Add, Bidirectional, MaxPooling1D, Activation\nfrom keras import backend as K\nfrom keras.initializers import Initializer\nfrom keras.callbacks import Callback\nfrom mdense import MDense\nimport numpy as np\nimport h5py\nimport sys\n\nframe_size = 160\npcm_bits = 8\nembed_size = 128\npcm_levels = 2**pcm_bits\n\nclass Sparsify(Callback):\n def __init__(self, t_start, t_end, interval, density):\n super(Sparsify, self).__init__()\n self.batch = 0\n self.t_start = t_start\n self.t_end = t_end\n self.interval = interval\n self.final_density = density\n\n def on_batch_end(self, batch, logs=None):\n #print(\"batch number\", self.batch)\n self.batch += 1\n if self.batch < self.t_start or ((self.batch-self.t_start) % self.interval != 0 and self.batch < self.t_end):\n #print(\"don't constrain\");\n pass\n else:\n #print(\"constrain\");\n layer = self.model.get_layer('gru_a')\n w = layer.get_weights()\n p = w[1]\n nb = p.shape[1]//p.shape[0]\n N = p.shape[0]\n #print(\"nb = \", nb, \", N = \", N);\n #print(p.shape)\n #print (\"density = \", density)\n for k in range(nb):\n density = self.final_density[k]\n if self.batch < self.t_end:\n r = 1 - (self.batch-self.t_start)/(self.t_end - self.t_start)\n density = 1 - (1-self.final_density[k])*(1 - r*r*r)\n A = p[:, k*N:(k+1)*N]\n A = A - np.diag(np.diag(A))\n A = np.transpose(A, (1, 0))\n L=np.reshape(A, (N, N//16, 16))\n S=np.sum(L*L, axis=-1)\n SS=np.sort(np.reshape(S, (-1,)))\n thresh = SS[round(N*N//16*(1-density))]\n mask = (S>=thresh).astype('float32');\n mask = np.repeat(mask, 16, axis=1)\n mask = np.minimum(1, mask + np.diag(np.ones((N,))))\n mask = np.transpose(mask, (1, 0))\n p[:, k*N:(k+1)*N] = p[:, k*N:(k+1)*N]*mask\n #print(thresh, np.mean(mask))\n w[1] = p\n layer.set_weights(w)\n \n\nclass PCMInit(Initializer):\n def __init__(self, gain=.1, seed=None):\n self.gain = gain\n self.seed = seed\n\n def __call__(self, shape, dtype=None):\n num_rows = 1\n for dim in shape[:-1]:\n num_rows *= dim\n num_cols = shape[-1]\n flat_shape = (num_rows, num_cols)\n if self.seed is not None:\n np.random.seed(self.seed)\n a = np.random.uniform(-1.7321, 1.7321, flat_shape)\n #a[:,0] = math.sqrt(12)*np.arange(-.5*num_rows+.5,.5*num_rows-.4)/num_rows\n #a[:,1] = .5*a[:,0]*a[:,0]*a[:,0]\n a = a + np.reshape(math.sqrt(12)*np.arange(-.5*num_rows+.5,.5*num_rows-.4)/num_rows, (num_rows, 1))\n return self.gain * a\n\n def get_config(self):\n return {\n 'gain': self.gain,\n 'seed': self.seed\n }\n\ndef new_lpcnet_model(rnn_units1=384, rnn_units2=16, nb_used_features = 38, training=False, use_gpu=True):\n pcm = Input(shape=(None, 3))\n feat = Input(shape=(None, nb_used_features))\n pitch = Input(shape=(None, 1))\n dec_feat = Input(shape=(None, 128))\n dec_state1 = Input(shape=(rnn_units1,))\n dec_state2 = Input(shape=(rnn_units2,))\n\n padding = 'valid' if training else 'same'\n fconv1 = Conv1D(128, 3, padding=padding, activation='tanh', name='feature_conv1')\n fconv2 = Conv1D(128, 3, padding=padding, activation='tanh', name='feature_conv2')\n\n embed = Embedding(256, embed_size, embeddings_initializer=PCMInit(), name='embed_sig')\n cpcm = Reshape((-1, embed_size*3))(embed(pcm))\n\n pembed = Embedding(256, 64, name='embed_pitch')\n cat_feat = Concatenate()([feat, Reshape((-1, 64))(pembed(pitch))])\n \n cfeat = fconv2(fconv1(cat_feat))\n\n fdense1 = Dense(128, activation='tanh', name='feature_dense1')\n fdense2 = Dense(128, activation='tanh', name='feature_dense2')\n\n cfeat = fdense2(fdense1(cfeat))\n \n rep = Lambda(lambda x: K.repeat_elements(x, frame_size, 1))\n\n if use_gpu:\n rnn = CuDNNGRU(rnn_units1, return_sequences=True, return_state=True, name='gru_a')\n rnn2 = CuDNNGRU(rnn_units2, return_sequences=True, return_state=True, name='gru_b')\n else:\n rnn = GRU(rnn_units1, return_sequences=True, return_state=True, recurrent_activation=\"sigmoid\", reset_after='true', name='gru_a')\n rnn2 = GRU(rnn_units2, return_sequences=True, return_state=True, recurrent_activation=\"sigmoid\", reset_after='true', name='gru_b')\n\n rnn_in = Concatenate()([cpcm, rep(cfeat)])\n md = MDense(pcm_levels, activation='softmax', name='dual_fc')\n gru_out1, _ = rnn(rnn_in)\n gru_out2, _ = rnn2(Concatenate()([gru_out1, rep(cfeat)]))\n ulaw_prob = md(gru_out2)\n \n rnn.trainable=False\n rnn2.trainable=False\n md.trainable=False\n embed.Trainable=False\n \n model = Model([pcm, feat, pitch], ulaw_prob)\n model.rnn_units1 = rnn_units1\n model.rnn_units2 = rnn_units2\n model.nb_used_features = nb_used_features\n model.frame_size = frame_size\n\n encoder = Model([feat, pitch], cfeat)\n \n dec_rnn_in = Concatenate()([cpcm, dec_feat])\n dec_gru_out1, state1 = rnn(dec_rnn_in, initial_state=dec_state1)\n dec_gru_out2, state2 = rnn2(Concatenate()([dec_gru_out1, dec_feat]), initial_state=dec_state2)\n dec_ulaw_prob = md(dec_gru_out2)\n\n decoder = Model([pcm, dec_feat, dec_state1, dec_state2], [dec_ulaw_prob, state1, state2])\n return model, encoder, decoder\n" ]
[ [ "numpy.diag", "numpy.random.seed", "numpy.reshape", "numpy.arange", "numpy.ones", "numpy.transpose", "numpy.random.uniform", "numpy.repeat", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jmontgom10/Mimir_pyPol
[ "cb45e78c5ee7b24233cc154c0f3666cd34e2420a" ]
[ "oldCode/03b_examineHWPbkgImages.py" ]
[ "# Allows the user to view the constructed HWP bacxground images. Shows all four\n# HWP rotations associated with a single IPPA angle\n#\n\nimport os\nimport sys\nimport numpy as np\nfrom astropy.io import ascii\nfrom astropy.table import Table as Table\nfrom astropy.table import Column as Column\nfrom astropy.convolution import convolve, convolve_fft, Gaussian2DKernel\nfrom astropy.stats import gaussian_fwhm_to_sigma, sigma_clipped_stats\nfrom photutils import detect_threshold, detect_sources\nfrom scipy.ndimage.filters import median_filter, gaussian_filter\nimport matplotlib.pyplot as plt\n\n# Add the AstroImage class\nimport astroimage as ai\n\n# Add the header handler to the BaseImage class\nfrom Mimir_header_handler import Mimir_header_handler\nai.reduced.ReducedScience.set_header_handler(Mimir_header_handler)\nai.set_instrument('mimir')\n\n# This is the location of all PPOL reduction directory\nPPOL_dir = 'C:\\\\Users\\\\Jordan\\\\FITS_data\\\\Mimir_data\\\\PPOL_reduced\\\\201611'\n\n# Build the path to the S3_Asotrometry files\nS3_dir = os.path.join(PPOL_dir, 'S3_Astrometry')\n\n# This is the location where all pyPol data will be saved\npyPol_data = 'C:\\\\Users\\\\Jordan\\\\FITS_data\\\\Mimir_data\\\\pyPol_Reduced\\\\201611\\\\'\n\n# Build the path to the supersky directory\nbkgImagesDir = os.path.join(pyPol_data, 'bkgImages')\n\n# Read in the indexFile data and select the filenames\nindexFile = os.path.join(pyPol_data, 'reducedFileIndex.csv')\nfileIndex = Table.read(indexFile, format='csv')\n\n################################################################################\n# Determine which parts of the fileIndex pertain to science images\nuseFiles = np.where(fileIndex['USE'] == 1)\n\n# Cull the file index to only include files selected for use\nfileIndex = fileIndex[useFiles]\n\n# Group the index by GROUP_ID and IPPA\nfileIndexByGroup = fileIndex.group_by(['GROUP_ID', 'AB'])\n\n\n# Define a dictionary for translating HWP rotation into IPPA\nHWPlist = (\n 4*(np.arange(16, dtype=int).reshape((4,4))%4) +\n np.arange(4,dtype=int).reshape((4,1)) + 1\n)\nIPPAlist = np.array([0, 45, 90, 135])\nIPPA_to_HWP = dict(zip(IPPAlist, HWPlist))\n\ngroupDict = {}\n# Loop through each group\nfor group in fileIndexByGroup.groups:\n # Test if it was an ABBA/BAAB dither\n thisDither = str(np.unique(group['DITHER_TYPE'].data)[0])\n if thisDither != 'ABBA': continue\n\n thisGroupName = str(np.unique(group['OBJECT'].data)[0])\n thisGroupID = str(np.unique(group['GROUP_ID'].data)[0])\n ippaDict = {}\n # Loop through each IPPA/HWP pairing in this group\n for ippa, hwps in IPPA_to_HWP.items():\n # Loop through all the HWPs in this group and build a list of filenames\n hwpImageFiles = []\n for hwp in hwps:\n hwpFile = os.path.join(\n bkgImagesDir,\n '{}_G{}_HWP{}.fits'.format(thisGroupName, thisGroupID, str(hwp))\n )\n hwpImageFiles.append(hwpFile)\n\n # Store the HWPs in the corresponding ippaDict entry\n ippaDict[ippa] = hwpImageFiles\n\n # Add the IPPA dictionary to the groupDict\n groupKey = '{}_G{}'.format(thisGroupName, thisGroupID)\n groupDict[groupKey] = ippaDict\n\n# Grab all the keys and alphabetically sort them for consistency\ngroupKeyList = list(groupDict.keys())\ngroupKeyList.sort()\n\n################################################################################\n# Define a function to handle what sholud be done whenever a key is pressed\n################################################################################\ndef on_key(event):\n global groupDict, groupKeyList, IPPAlist, fig, IPPA_num, group_num\n global HWP_0_img, HWP_1_img, HWP_2_img, HWP_3_img\n global HWP_0_AxImg, HWP_1_AxImg, HWP_2_AxImg, HWP_3_AxImg\n # global prevTarget, thisTarget, nextTarget\n global HWP_0_Label, HWP_1_Label, HWP_2_Label, HWP_3_Label\n\n # Increment the image number\n if event.key == 'right' or event.key == 'left':\n if event.key == 'right':\n #Advance to the next IPPA\n IPPA_num += 1\n\n # If there are no more IPPAs left in this group, then move to the\n # next group.\n if IPPA_num > IPPAlist.size - 1:\n IPPA_num = 0\n group_num += 1\n\n # If there are no more groups left, reloop back to zero\n if group_num > (len(groupKeyList) - 1):\n group_num = 0\n\n if group_num < (1 - len(groupKeyList)):\n group_num = 0\n\n # Having incremented the group index and the IPPA index, it's time\n # to grab the current groupDict entry\n thisGroupKey = groupKeyList[group_num]\n thisIPPA = IPPAlist[IPPA_num]\n thisHWPfiles = groupDict[thisGroupKey][thisIPPA]\n\n # Loop through all the for this group/IPPA and read them in\n try:\n HWP_0_img = ai.reduced.ReducedScience.read(thisHWPfiles[0])\n except:\n HWP_0_img = ai.reduced.ReducedScience(np.ones(HWP_0_img.shape))\n\n try:\n HWP_1_img = ai.reduced.ReducedScience.read(thisHWPfiles[1])\n except:\n HWP_1_img = ai.reduced.ReducedScience(np.ones(HWP_1_img.shape))\n\n try:\n HWP_2_img = ai.reduced.ReducedScience.read(thisHWPfiles[2])\n except:\n HWP_2_img = ai.reduced.ReducedScience(np.ones(HWP_2_img.shape))\n\n try:\n HWP_3_img = ai.reduced.ReducedScience.read(thisHWPfiles[3])\n except:\n HWP_3_img = ai.reduced.ReducedScience(np.ones(HWP_3_img.shape))\n\n if event.key == 'left':\n #Advance to the next IPPA\n IPPA_num -= 1\n\n # If there are no more IPPAs left in this group, then move to the\n # next group.\n if IPPA_num < 0:\n IPPA_num = IPPAlist.size - 1\n group_num -= 1\n\n # Having incremented the group index and the IPPA index, it's time\n # to grab the current groupDict entry\n thisGroupKey = groupKeyList[group_num]\n thisIPPA = IPPAlist[IPPA_num]\n thisHWPfiles = groupDict[thisGroupKey][thisIPPA]\n\n # Loop through all the for this group/IPPA and read them in\n try:\n HWP_0_img = ai.reduced.ReducedScience.read(thisHWPfiles[0])\n except:\n HWP_0_img = ai.reduced.ReducedScience(np.ones(HWP_0_img.shape))\n\n try:\n HWP_1_img = ai.reduced.ReducedScience.read(thisHWPfiles[1])\n except:\n HWP_1_img = ai.reduced.ReducedScience(np.ones(HWP_1_img.shape))\n\n try:\n HWP_2_img = ai.reduced.ReducedScience.read(thisHWPfiles[2])\n except:\n HWP_2_img = ai.reduced.ReducedScience(np.ones(HWP_2_img.shape))\n\n try:\n HWP_3_img = ai.reduced.ReducedScience.read(thisHWPfiles[3])\n except:\n HWP_3_img = ai.reduced.ReducedScience(np.ones(HWP_3_img.shape))\n\n ###############################\n # Update the displayed images\n ###############################\n # Display the new images\n HWP_0_AxImg.set_data(HWP_0_img.data)\n HWP_1_AxImg.set_data(HWP_1_img.data)\n HWP_2_AxImg.set_data(HWP_2_img.data)\n HWP_3_AxImg.set_data(HWP_3_img.data)\n\n # Update the annotation\n thisTitle = fig.suptitle('{}: IPPA {}'.format(thisGroupKey, thisIPPA))\n\n # Construct the label strings\n HWP_0_str, HWP_1_str, HWP_2_str, HWP_3_str = (\n ['HWP {}'.format(hwp) for hwp in IPPA_to_HWP[thisIPPA]]\n )\n\n # Update the labels\n HWP_0_Label.set_text(HWP_0_str)\n HWP_1_Label.set_text(HWP_1_str)\n HWP_2_Label.set_text(HWP_2_str)\n HWP_3_Label.set_text(HWP_3_str)\n\n ###############################\n # Update time series plot\n ###############################\n # Now plot the timeseries for this dataset\n # Grab the group ID for the current group/IPPA\n thisGroupID = int(thisGroupKey.split('_')[-1][1:])\n\n # Locate the MJD and background values for the current group/IPPA\n thisGroupIPPAbool = np.logical_and(\n fileIndex['GROUP_ID'] == thisGroupID,\n fileIndex['IPPA'] == thisIPPA\n )\n thisAbool = np.logical_and(\n thisGroupIPPAbool,\n fileIndex['AB'] == 'A'\n )\n thisBbool = np.logical_and(\n thisGroupIPPAbool,\n fileIndex['AB'] == 'B'\n )\n thisAinds = np.where(thisAbool)\n thisBinds = np.where(thisBbool)\n thisAmjd = fileIndex['MJD'][thisAinds]\n thisBmjd = fileIndex['MJD'][thisBinds]\n\n # Make an estimate of the first time stamp\n try:\n mjd0 = np.min([np.min(thisAmjd), np.min(thisBmjd)])\n except:\n mjd0 = 0\n\n thisAmjd -= mjd0\n thisBmjd -= mjd0\n thisAmjd *= 24*60*60\n thisBmjd *= 24*60*60\n thisAbkg = fileIndex['BACKGROUND'][thisAinds]\n thisBbkg = fileIndex['BACKGROUND'][thisBinds]\n\n # Identify filter\n thisFilter = np.unique(fileIndex[thisGroupIPPAbool]['FILTER'].data)[0]\n if thisFilter == 'H':\n ylims = (600, 2100)\n if thisFilter == 'Ks':\n ylims = (400, 1000)\n\n # Plot the background values\n ax4.cla()\n ax4.plot(thisBmjd, thisBbkg, marker='o', color='b')#, facecolor='b', edgecolor='k')\n ax4.plot(thisAmjd, thisAbkg, marker='o', color='r')#, facecolor='r', edgecolor='k')\n plt.setp(ax4.get_xticklabels(), fontsize = 6)\n plt.setp(ax4.get_yticklabels(), fontsize = 6)\n ax4.set_ylim((ylims))\n ax4.set_ylabel('Background Counts [ADU]')\n ax4.set_xlabel('Time [sec]')\n\n # Update the display\n fig.canvas.draw()\n\n\n#******************************************************************************\n# This script will run the mask building step of the pyPol reduction\n#******************************************************************************\nfig = plt.figure(figsize=(18,9))\n\n# Create the first axis and make the x-axis labels invisible\nax0 = plt.subplot(2,4,1)\nplt.setp(ax0.get_xticklabels(), visible = False)\nplt.setp(ax0.get_yticklabels(), fontsize = 6)\n\n# Create the second axis and make the x- and y-axis labels invisible\nax1 = plt.subplot(2,4,2, sharey=ax0, sharex=ax0)\nplt.setp(ax1.get_xticklabels(), visible = False)\nplt.setp(ax1.get_yticklabels(), visible = False)\n\n# Create the third axis and make both axis labels visible\nax2 = plt.subplot(2,4,5, sharey=ax0, sharex=ax0)\nplt.setp(ax2.get_xticklabels(), fontsize = 6)\nplt.setp(ax2.get_yticklabels(), fontsize = 6)\n\n# Create the fourth axis and make y-axis labels invisible\nax3 = plt.subplot(2,4,6, sharey=ax0, sharex=ax0)\nplt.setp(ax3.get_xticklabels(), fontsize = 6)\nplt.setp(ax3.get_yticklabels(), visible = False)\n\n# Create the final plot for the time-series\nax4 = plt.subplot(1,2,2)\nax4.yaxis.set_label_position('right')\nax4.tick_params(axis='y',\n labelleft=False, labelright=True,\n )\n\n# Rescale the figure and setup the spacing between images\nplt.subplots_adjust(left = 0.04, bottom = 0.04, right = 0.95, top = 0.96,\n wspace = 0.02, hspace = 0.02)\n\naxarr = [ax0, ax1, ax2, ax3, ax4]\n\n# Initalize the group and IPPA index at zero\nIPPA_num, group_num = 0, 0\n\n# Start by grabbing the corresponding group names and IPPAs for those indices\nthisGroupKey = groupKeyList[group_num]\nthisIPPA = IPPAlist[IPPA_num]\nthisHWPfiles = groupDict[thisGroupKey][thisIPPA]\n\n# Loop through all the for this group/IPPA and read them in\nHWP_0_img = ai.reduced.ReducedScience.read(thisHWPfiles[0])\nHWP_1_img = ai.reduced.ReducedScience.read(thisHWPfiles[1])\nHWP_2_img = ai.reduced.ReducedScience.read(thisHWPfiles[2])\nHWP_3_img = ai.reduced.ReducedScience.read(thisHWPfiles[3])\n\n# Populate each axis with its image\nHWP_0_AxImg = HWP_0_img.show(axes = axarr[0], cmap='viridis',\n vmin = 0.95, vmax = 1.05, noShow = True)\nHWP_1_AxImg = HWP_1_img.show(axes = axarr[1], cmap='viridis',\n vmin = 0.95, vmax = 1.05, noShow = True)\nHWP_2_AxImg = HWP_2_img.show(axes = axarr[2], cmap='viridis',\n vmin = 0.95, vmax = 1.05, noShow = True)\nHWP_3_AxImg = HWP_3_img.show(axes = axarr[3], cmap='viridis',\n vmin = 0.95, vmax = 1.05, noShow = True)\n\n# Now plot the timeseries for this dataset\n# Grab the group ID for the current group/IPPA\nthisGroupID = int(thisGroupKey.split('_')[-1][1:])\n\n# Locate the MJD and background values for the current group/IPPA\nthisGroupIPPAbool = np.logical_and(\n fileIndex['GROUP_ID'] == thisGroupID,\n fileIndex['IPPA'] == thisIPPA\n)\nthisAbool = np.logical_and(\n thisGroupIPPAbool,\n fileIndex['AB'] == 'A'\n)\nthisBbool = np.logical_and(\n thisGroupIPPAbool,\n fileIndex['AB'] == 'B'\n)\nthisAinds = np.where(thisAbool)\nthisBinds = np.where(thisBbool)\nthisAmjd = fileIndex['MJD'][thisAinds]\nthisBmjd = fileIndex['MJD'][thisBinds]\nmjd0 = np.min([np.min(thisAmjd), np.min(thisBmjd)])\nthisAmjd -= mjd0\nthisBmjd -= mjd0\nthisAmjd *= 24*60*60\nthisBmjd *= 24*60*60\nthisAbkg = fileIndex['BACKGROUND'][thisAinds]\nthisBbkg = fileIndex['BACKGROUND'][thisBinds]\n\n# Identify filter\nthisFilter = np.unique(fileIndex[thisGroupIPPAbool]['FILTER'].data)[0]\nif thisFilter == 'H':\n ylims = (600, 2100)\nif thisFilter == 'Ks':\n ylims = (400, 1000)\n\n# Plot the background values\nax4.plot(thisBmjd, thisBbkg, marker='o', color='b')#, facecolor='b', edgecolor='k')\nax4.plot(thisAmjd, thisAbkg, marker='o', color='r')#, facecolor='r', edgecolor='k')\nplt.setp(ax4.get_xticklabels(), fontsize = 6)\nplt.setp(ax4.get_yticklabels(), fontsize = 6)\nax4.set_ylim((ylims))\n\n# Add timeseries axis labels\nax4.set_ylabel('Background Counts [ADU]')\nax4.set_xlabel('Time [sec]')\n\n# Add some labels to each plot\nHWP_0_str, HWP_1_str, HWP_2_str, HWP_3_str = (\n ['HWP {}'.format(hwp) for hwp in IPPA_to_HWP[thisIPPA]]\n)\n\n# Add some figure annotation\nthisTitle = fig.suptitle('{}: IPPA {}'.format(thisGroupKey, thisIPPA))\n\n# Construct the label strings\nHWP_0_str, HWP_1_str, HWP_2_str, HWP_3_str = (\n ['HWP {}'.format(hwp) for hwp in IPPA_to_HWP[thisIPPA]]\n)\n\n# Update the labels\nHWP_0_Label = axarr[0].text(20, 875, HWP_0_str,\n color = 'black', backgroundcolor = 'white', size = 'medium')\nHWP_1_Label = axarr[1].text(20, 875, HWP_1_str,\n color = 'black', backgroundcolor = 'white', size = 'medium')\nHWP_2_Label = axarr[2].text(20, 875, HWP_2_str,\n color = 'black', backgroundcolor = 'white', size = 'medium')\nHWP_3_Label = axarr[3].text(20, 875, HWP_3_str,\n color = 'black', backgroundcolor = 'white', size = 'medium')\n\n# Connect the event manager...\ncid1 = fig.canvas.mpl_connect('key_press_event', on_key)\n\n# NOW show the image (without continuing execution)\n# plt.ion()\nplt.show()\n# plt.ioff()\n#\n# pdb.set_trace()\n# Disconnect the event manager and close the figure\nfig.canvas.mpl_disconnect(cid1)\n\n# Close the plot\nplt.close()\n\nprint('Done!')\n" ]
[ [ "numpy.logical_and", "numpy.unique", "numpy.min", "numpy.arange", "numpy.ones", "matplotlib.pyplot.subplot", "matplotlib.pyplot.close", "matplotlib.pyplot.subplots_adjust", "numpy.array", "numpy.where", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
HeqingZhang/mmsegmentation
[ "93301b33d7b7634b018386681be3a640f5979957", "90d8038e909be9f2154b49d15f95a648ceb75120" ]
[ "mmseg/datasets/builder.py", "mmseg/models/backbones/cgnet.py" ]
[ "import copy\nimport platform\nimport random\nfrom functools import partial\n\nimport numpy as np\nfrom mmcv.parallel import collate\nfrom mmcv.runner import get_dist_info\nfrom mmcv.utils import Registry, build_from_cfg\nfrom mmcv.utils.parrots_wrapper import DataLoader, PoolDataLoader\nfrom torch.utils.data import DistributedSampler\n\nif platform.system() != 'Windows':\n # https://github.com/pytorch/pytorch/issues/973\n import resource\n rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)\n hard_limit = rlimit[1]\n soft_limit = min(4096, hard_limit)\n resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))\n\nDATASETS = Registry('dataset')\nPIPELINES = Registry('pipeline')\n\n\ndef _concat_dataset(cfg, default_args=None):\n \"\"\"Build :obj:`ConcatDataset by.\"\"\"\n from .dataset_wrappers import ConcatDataset\n img_dir = cfg['img_dir']\n ann_dir = cfg.get('ann_dir', None)\n split = cfg.get('split', None)\n num_img_dir = len(img_dir) if isinstance(img_dir, (list, tuple)) else 1\n if ann_dir is not None:\n num_ann_dir = len(ann_dir) if isinstance(ann_dir, (list, tuple)) else 1\n else:\n num_ann_dir = 0\n if split is not None:\n num_split = len(split) if isinstance(split, (list, tuple)) else 1\n else:\n num_split = 0\n if num_img_dir > 1:\n assert num_img_dir == num_ann_dir or num_ann_dir == 0\n assert num_img_dir == num_split or num_split == 0\n else:\n assert num_split == num_ann_dir or num_ann_dir <= 1\n num_dset = max(num_split, num_img_dir)\n\n datasets = []\n for i in range(num_dset):\n data_cfg = copy.deepcopy(cfg)\n if isinstance(img_dir, (list, tuple)):\n data_cfg['img_dir'] = img_dir[i]\n if isinstance(ann_dir, (list, tuple)):\n data_cfg['ann_dir'] = ann_dir[i]\n if isinstance(split, (list, tuple)):\n data_cfg['split'] = split[i]\n datasets.append(build_dataset(data_cfg, default_args))\n\n return ConcatDataset(datasets)\n\n\ndef build_dataset(cfg, default_args=None):\n \"\"\"Build datasets.\"\"\"\n from .dataset_wrappers import ConcatDataset, RepeatDataset\n if isinstance(cfg, (list, tuple)):\n dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])\n elif cfg['type'] == 'RepeatDataset':\n dataset = RepeatDataset(\n build_dataset(cfg['dataset'], default_args), cfg['times'])\n elif isinstance(cfg.get('img_dir'), (list, tuple)) or isinstance(\n cfg.get('split', None), (list, tuple)):\n dataset = _concat_dataset(cfg, default_args)\n else:\n dataset = build_from_cfg(cfg, DATASETS, default_args)\n\n return dataset\n\n\ndef build_dataloader(dataset,\n samples_per_gpu,\n workers_per_gpu,\n num_gpus=1,\n dist=True,\n shuffle=True,\n seed=None,\n drop_last=False,\n pin_memory=True,\n dataloader_type='PoolDataLoader',\n **kwargs):\n \"\"\"Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader for all GPUs.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n samples_per_gpu (int): Number of training samples on each GPU, i.e.,\n batch size of each GPU.\n workers_per_gpu (int): How many subprocesses to use for data loading\n for each GPU.\n num_gpus (int): Number of GPUs. Only used in non-distributed training.\n dist (bool): Distributed training/test or not. Default: True.\n shuffle (bool): Whether to shuffle the data at every epoch.\n Default: True.\n seed (int | None): Seed to be used. Default: None.\n drop_last (bool): Whether to drop the last incomplete batch in epoch.\n Default: False\n pin_memory (bool): Whether to use pin_memory in DataLoader.\n Default: True\n dataloader_type (str): Type of dataloader. Default: 'PoolDataLoader'\n kwargs: any keyword argument to be used to initialize DataLoader\n\n Returns:\n DataLoader: A PyTorch dataloader.\n \"\"\"\n rank, world_size = get_dist_info()\n if dist:\n sampler = DistributedSampler(\n dataset, world_size, rank, shuffle=shuffle)\n shuffle = False\n batch_size = samples_per_gpu\n num_workers = workers_per_gpu\n else:\n sampler = None\n batch_size = num_gpus * samples_per_gpu\n num_workers = num_gpus * workers_per_gpu\n\n init_fn = partial(\n worker_init_fn, num_workers=num_workers, rank=rank,\n seed=seed) if seed is not None else None\n\n assert dataloader_type in (\n 'DataLoader',\n 'PoolDataLoader'), f'unsupported dataloader {dataloader_type}'\n\n if dataloader_type == 'PoolDataLoader':\n dataloader = PoolDataLoader\n elif dataloader_type == 'DataLoader':\n dataloader = DataLoader\n\n data_loader = dataloader(\n dataset,\n batch_size=batch_size,\n sampler=sampler,\n num_workers=num_workers,\n collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),\n pin_memory=pin_memory,\n shuffle=shuffle,\n worker_init_fn=init_fn,\n drop_last=drop_last,\n **kwargs)\n\n return data_loader\n\n\ndef worker_init_fn(worker_id, num_workers, rank, seed):\n \"\"\"Worker init func for dataloader.\n\n The seed of each worker equals to num_worker * rank + worker_id + user_seed\n\n Args:\n worker_id (int): Worker id.\n num_workers (int): Number of workers.\n rank (int): The rank of current process.\n seed (int): The random seed to use.\n \"\"\"\n\n worker_seed = num_workers * rank + worker_id + seed\n np.random.seed(worker_seed)\n random.seed(worker_seed)\n", "import torch\nimport torch.nn as nn\nimport torch.utils.checkpoint as cp\nfrom mmcv.cnn import (ConvModule, build_conv_layer, build_norm_layer,\n constant_init, kaiming_init)\nfrom mmcv.runner import load_checkpoint\nfrom mmcv.utils.parrots_wrapper import _BatchNorm\n\nfrom mmseg.utils import get_root_logger\nfrom ..builder import BACKBONES\n\n\nclass GlobalContextExtractor(nn.Module):\n \"\"\"Global Context Extractor for CGNet.\n\n This class is employed to refine the joint feature of both local feature\n and surrounding context.\n\n Args:\n channel (int): Number of input feature channels.\n reduction (int): Reductions for global context extractor. Default: 16.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed. Default: False.\n \"\"\"\n\n def __init__(self, channel, reduction=16, with_cp=False):\n super(GlobalContextExtractor, self).__init__()\n self.channel = channel\n self.reduction = reduction\n assert reduction >= 1 and channel >= reduction\n self.with_cp = with_cp\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel, channel // reduction), nn.ReLU(inplace=True),\n nn.Linear(channel // reduction, channel), nn.Sigmoid())\n\n def forward(self, x):\n\n def _inner_forward(x):\n num_batch, num_channel = x.size()[:2]\n y = self.avg_pool(x).view(num_batch, num_channel)\n y = self.fc(y).view(num_batch, num_channel, 1, 1)\n return x * y\n\n if self.with_cp and x.requires_grad:\n out = cp.checkpoint(_inner_forward, x)\n else:\n out = _inner_forward(x)\n\n return out\n\n\nclass ContextGuidedBlock(nn.Module):\n \"\"\"Context Guided Block for CGNet.\n\n This class consists of four components: local feature extractor,\n surrounding feature extractor, joint feature extractor and global\n context extractor.\n\n Args:\n in_channels (int): Number of input feature channels.\n out_channels (int): Number of output feature channels.\n dilation (int): Dilation rate for surrounding context extractor.\n Default: 2.\n reduction (int): Reduction for global context extractor. Default: 16.\n skip_connect (bool): Add input to output or not. Default: True.\n downsample (bool): Downsample the input to 1/2 or not. Default: False.\n conv_cfg (dict): Config dict for convolution layer.\n Default: None, which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN', requires_grad=True).\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='PReLU').\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed. Default: False.\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n dilation=2,\n reduction=16,\n skip_connect=True,\n downsample=False,\n conv_cfg=None,\n norm_cfg=dict(type='BN', requires_grad=True),\n act_cfg=dict(type='PReLU'),\n with_cp=False):\n super(ContextGuidedBlock, self).__init__()\n self.with_cp = with_cp\n self.downsample = downsample\n\n channels = out_channels if downsample else out_channels // 2\n if 'type' in act_cfg and act_cfg['type'] == 'PReLU':\n act_cfg['num_parameters'] = channels\n kernel_size = 3 if downsample else 1\n stride = 2 if downsample else 1\n padding = (kernel_size - 1) // 2\n\n self.conv1x1 = ConvModule(\n in_channels,\n channels,\n kernel_size,\n stride,\n padding,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg)\n\n self.f_loc = build_conv_layer(\n conv_cfg,\n channels,\n channels,\n kernel_size=3,\n padding=1,\n groups=channels,\n bias=False)\n self.f_sur = build_conv_layer(\n conv_cfg,\n channels,\n channels,\n kernel_size=3,\n padding=dilation,\n groups=channels,\n dilation=dilation,\n bias=False)\n\n self.bn = build_norm_layer(norm_cfg, 2 * channels)[1]\n self.activate = nn.PReLU(2 * channels)\n\n if downsample:\n self.bottleneck = build_conv_layer(\n conv_cfg,\n 2 * channels,\n out_channels,\n kernel_size=1,\n bias=False)\n\n self.skip_connect = skip_connect and not downsample\n self.f_glo = GlobalContextExtractor(out_channels, reduction, with_cp)\n\n def forward(self, x):\n\n def _inner_forward(x):\n out = self.conv1x1(x)\n loc = self.f_loc(out)\n sur = self.f_sur(out)\n\n joi_feat = torch.cat([loc, sur], 1) # the joint feature\n joi_feat = self.bn(joi_feat)\n joi_feat = self.activate(joi_feat)\n if self.downsample:\n joi_feat = self.bottleneck(joi_feat) # channel = out_channels\n # f_glo is employed to refine the joint feature\n out = self.f_glo(joi_feat)\n\n if self.skip_connect:\n return x + out\n else:\n return out\n\n if self.with_cp and x.requires_grad:\n out = cp.checkpoint(_inner_forward, x)\n else:\n out = _inner_forward(x)\n\n return out\n\n\nclass InputInjection(nn.Module):\n \"\"\"Downsampling module for CGNet.\"\"\"\n\n def __init__(self, num_downsampling):\n super(InputInjection, self).__init__()\n self.pool = nn.ModuleList()\n for i in range(num_downsampling):\n self.pool.append(nn.AvgPool2d(3, stride=2, padding=1))\n\n def forward(self, x):\n for pool in self.pool:\n x = pool(x)\n return x\n\n\[email protected]_module()\nclass CGNet(nn.Module):\n \"\"\"CGNet backbone.\n\n A Light-weight Context Guided Network for Semantic Segmentation\n arXiv: https://arxiv.org/abs/1811.08201\n\n Args:\n in_channels (int): Number of input image channels. Normally 3.\n num_channels (tuple[int]): Numbers of feature channels at each stages.\n Default: (32, 64, 128).\n num_blocks (tuple[int]): Numbers of CG blocks at stage 1 and stage 2.\n Default: (3, 21).\n dilations (tuple[int]): Dilation rate for surrounding context\n extractors at stage 1 and stage 2. Default: (2, 4).\n reductions (tuple[int]): Reductions for global context extractors at\n stage 1 and stage 2. Default: (8, 16).\n conv_cfg (dict): Config dict for convolution layer.\n Default: None, which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN', requires_grad=True).\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='PReLU').\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only. Default: False.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed. Default: False.\n \"\"\"\n\n def __init__(self,\n in_channels=3,\n num_channels=(32, 64, 128),\n num_blocks=(3, 21),\n dilations=(2, 4),\n reductions=(8, 16),\n conv_cfg=None,\n norm_cfg=dict(type='BN', requires_grad=True),\n act_cfg=dict(type='PReLU'),\n norm_eval=False,\n with_cp=False):\n\n super(CGNet, self).__init__()\n self.in_channels = in_channels\n self.num_channels = num_channels\n assert isinstance(self.num_channels, tuple) and len(\n self.num_channels) == 3\n self.num_blocks = num_blocks\n assert isinstance(self.num_blocks, tuple) and len(self.num_blocks) == 2\n self.dilations = dilations\n assert isinstance(self.dilations, tuple) and len(self.dilations) == 2\n self.reductions = reductions\n assert isinstance(self.reductions, tuple) and len(self.reductions) == 2\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.act_cfg = act_cfg\n if 'type' in self.act_cfg and self.act_cfg['type'] == 'PReLU':\n self.act_cfg['num_parameters'] = num_channels[0]\n self.norm_eval = norm_eval\n self.with_cp = with_cp\n\n cur_channels = in_channels\n self.stem = nn.ModuleList()\n for i in range(3):\n self.stem.append(\n ConvModule(\n cur_channels,\n num_channels[0],\n 3,\n 2 if i == 0 else 1,\n padding=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg))\n cur_channels = num_channels[0]\n\n self.inject_2x = InputInjection(1) # down-sample for Input, factor=2\n self.inject_4x = InputInjection(2) # down-sample for Input, factor=4\n\n cur_channels += in_channels\n self.norm_prelu_0 = nn.Sequential(\n build_norm_layer(norm_cfg, cur_channels)[1],\n nn.PReLU(cur_channels))\n\n # stage 1\n self.level1 = nn.ModuleList()\n for i in range(num_blocks[0]):\n self.level1.append(\n ContextGuidedBlock(\n cur_channels if i == 0 else num_channels[1],\n num_channels[1],\n dilations[0],\n reductions[0],\n downsample=(i == 0),\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg,\n with_cp=with_cp)) # CG block\n\n cur_channels = 2 * num_channels[1] + in_channels\n self.norm_prelu_1 = nn.Sequential(\n build_norm_layer(norm_cfg, cur_channels)[1],\n nn.PReLU(cur_channels))\n\n # stage 2\n self.level2 = nn.ModuleList()\n for i in range(num_blocks[1]):\n self.level2.append(\n ContextGuidedBlock(\n cur_channels if i == 0 else num_channels[2],\n num_channels[2],\n dilations[1],\n reductions[1],\n downsample=(i == 0),\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg,\n with_cp=with_cp)) # CG block\n\n cur_channels = 2 * num_channels[2]\n self.norm_prelu_2 = nn.Sequential(\n build_norm_layer(norm_cfg, cur_channels)[1],\n nn.PReLU(cur_channels))\n\n def forward(self, x):\n output = []\n\n # stage 0\n inp_2x = self.inject_2x(x)\n inp_4x = self.inject_4x(x)\n for layer in self.stem:\n x = layer(x)\n x = self.norm_prelu_0(torch.cat([x, inp_2x], 1))\n output.append(x)\n\n # stage 1\n for i, layer in enumerate(self.level1):\n x = layer(x)\n if i == 0:\n down1 = x\n x = self.norm_prelu_1(torch.cat([x, down1, inp_4x], 1))\n output.append(x)\n\n # stage 2\n for i, layer in enumerate(self.level2):\n x = layer(x)\n if i == 0:\n down2 = x\n x = self.norm_prelu_2(torch.cat([down2, x], 1))\n output.append(x)\n\n return output\n\n def init_weights(self, pretrained=None):\n \"\"\"Initialize the weights in backbone.\n\n Args:\n pretrained (str, optional): Path to pre-trained weights.\n Defaults to None.\n \"\"\"\n if isinstance(pretrained, str):\n logger = get_root_logger()\n load_checkpoint(self, pretrained, strict=False, logger=logger)\n elif pretrained is None:\n for m in self.modules():\n if isinstance(m, (nn.Conv2d, nn.Linear)):\n kaiming_init(m)\n elif isinstance(m, (_BatchNorm, nn.GroupNorm)):\n constant_init(m, 1)\n elif isinstance(m, nn.PReLU):\n constant_init(m, 0)\n else:\n raise TypeError('pretrained must be a str or None')\n\n def train(self, mode=True):\n \"\"\"Convert the model into training mode will keeping the normalization\n layer freezed.\"\"\"\n super(CGNet, self).train(mode)\n if mode and self.norm_eval:\n for m in self.modules():\n # trick: eval have effect on BatchNorm only\n if isinstance(m, _BatchNorm):\n m.eval()\n" ]
[ [ "torch.utils.data.DistributedSampler", "numpy.random.seed" ], [ "torch.cat", "torch.nn.PReLU", "torch.nn.ModuleList", "torch.nn.Sigmoid", "torch.nn.Linear", "torch.nn.AvgPool2d", "torch.nn.AdaptiveAvgPool2d", "torch.utils.checkpoint.checkpoint", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sfox14/butterfly
[ "13cc15cee5bdb7adaf376219aaf20fab0459e9ef", "13cc15cee5bdb7adaf376219aaf20fab0459e9ef", "13cc15cee5bdb7adaf376219aaf20fab0459e9ef", "13cc15cee5bdb7adaf376219aaf20fab0459e9ef" ]
[ "learning_transforms/training.py", "tests/test_complex_utils.py", "learning_transforms/speed_training_plot.py", "learning_transforms/print_results.py" ]
[ "import copy\nimport os\n\nimport torch\nfrom torch import nn\nfrom torch import optim\n\nfrom ray.tune import Trainable\n\n\nN_LBFGS_STEPS_VALIDATION = 15\n\n\nclass PytorchTrainable(Trainable):\n \"\"\"Abstract Trainable class for Pytorch models, which checkpoints the model\n and the optimizer.\n Subclass must initialize self.model and self.optimizer in _setup.\n \"\"\"\n\n def _save(self, checkpoint_dir):\n checkpoint_path = os.path.join(checkpoint_dir, \"model_optimizer.pth\")\n state = {'model': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict()}\n torch.save(state, checkpoint_path)\n return checkpoint_path\n\n def _restore(self, checkpoint_path):\n if hasattr(self, 'device'):\n checkpoint = torch.load(checkpoint_path, self.device)\n else:\n checkpoint = torch.load(checkpoint_path)\n self.model.load_state_dict(checkpoint['model'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n\nclass TrainableFixedData(PytorchTrainable):\n \"\"\"Abstract Trainable class for Pytorch models with fixed data.\n Subclass must initialize self.model, self.optimizer, and\n self.n_steps_per_epoch in _setup, and have to implement self.loss().\n \"\"\"\n def loss(self):\n raise NotImplementedError\n\n def _train(self):\n for _ in range(self.n_steps_per_epoch):\n self.optimizer.zero_grad()\n loss = self.loss()\n loss.backward()\n self.optimizer.step()\n return {'negative_loss': -loss.item()}\n\n\nclass TrainableMatrixFactorization(TrainableFixedData):\n \"\"\"Abstract Trainable class for Pytorch models that factor a target matrix.\n Subclass must initialize self.model, self.optimizer,\n self.n_steps_per_epoch, self.n_epochs_per_validation, self.target_matrix,\n and self.input in _setup, and may override self.freeze() to freeze model\n (e.g. taking argmax of logit instead of logit).\n\n \"\"\"\n def forward(self):\n return self.model(self.input)\n\n def loss(self):\n # Take transpose since the transform acts on the rows of the input\n output = self.forward().transpose(0, 1)\n if self.target_matrix.dim() == 2 and output.dim() == 3: # Real target matrix, take real part\n output = output[:, :, 0]\n return nn.functional.mse_loss(output, self.target_matrix)\n\n def freeze(self):\n pass\n\n def polish(self, nmaxsteps=50, patience=5, threshold=1e-10, save_to_self_model=False):\n if not save_to_self_model:\n model_bak = self.model\n self.model = copy.deepcopy(self.model)\n self.freeze()\n optimizer = optim.LBFGS(filter(lambda p: p.requires_grad, self.model.parameters()))\n def closure():\n optimizer.zero_grad()\n loss = self.loss()\n loss.backward()\n return loss\n n_bad_steps = 0\n best_loss = float('inf')\n for i in range(nmaxsteps):\n loss = optimizer.step(closure)\n if loss.item() < best_loss - threshold:\n best_loss = loss.item()\n n_bad_steps = 0\n else:\n n_bad_steps += 1\n if n_bad_steps > patience:\n break\n if not save_to_self_model:\n self.model = model_bak\n return loss.item()\n\n def _train(self):\n for _ in range(self.n_steps_per_epoch):\n self.optimizer.zero_grad()\n loss = self.loss()\n loss.backward()\n self.optimizer.step()\n loss = loss.item()\n if (self._iteration + 1) % self.n_epochs_per_validation == 0:\n loss = min(loss, self.polish(N_LBFGS_STEPS_VALIDATION, save_to_self_model=False))\n return {'negative_loss': -loss, 'mean_loss': loss, 'nparameters': self.nparameters}\n", "import copy\nimport itertools\nimport unittest\n\nimport torch\n\nimport torch_butterfly\nfrom torch_butterfly.complex_utils import complex_mul, complex_matmul, index_last_dim\n\n\nclass ButterflyComplexUtilsTest(unittest.TestCase):\n\n def setUp(self):\n self.rtol = 1e-3\n self.atol = 1e-5\n\n def test_complex_matmul(self):\n \"\"\"Check that our index_last_dim backward is also correct for real input\n \"\"\"\n bs = (3, 5)\n for device in ['cpu', 'cuda']:\n X = torch.randn(*bs, 128, 16, dtype=torch.complex64, device=device, requires_grad=True)\n Y = torch.randn(*bs, 16, 32, dtype=torch.complex64, device=device, requires_grad=True)\n prod = complex_matmul(X, Y)\n prod_sum = complex_mul(X.unsqueeze(-1), Y.unsqueeze(-3)).sum(dim=-2)\n self.assertTrue(torch.allclose(prod, prod_sum, self.rtol, self.atol))\n g = torch.randn_like(prod)\n grad_X, grad_Y = torch.autograd.grad(prod, (X, Y), g)\n grad_X_sum, grad_Y_sum = torch.autograd.grad(prod_sum, (X, Y), g)\n self.assertTrue(torch.allclose(grad_X, grad_X_sum, self.rtol, self.atol))\n self.assertTrue(torch.allclose(grad_Y, grad_Y_sum, self.rtol, self.atol))\n\n X = torch.randn(5, 3, 32, 32, dtype=torch.complex64, device=device, requires_grad=True)\n Y = torch.randn(6, 3, 32, 32, dtype=torch.complex64, device=device, requires_grad=True)\n prod = complex_matmul(X.permute(2, 3, 0, 1), Y.permute(2, 3, 1, 0)).permute(2, 3, 0, 1)\n prod_sum = complex_mul(X.unsqueeze(1), Y).sum(dim=2)\n self.assertTrue(torch.allclose(prod, prod_sum, self.rtol, self.atol))\n g = torch.randn_like(prod)\n grad_X, grad_Y = torch.autograd.grad(prod, (X, Y), g)\n grad_X_sum, grad_Y_sum = torch.autograd.grad(prod_sum, (X, Y), g)\n self.assertTrue(torch.allclose(grad_X, grad_X_sum, self.rtol, self.atol))\n self.assertTrue(torch.allclose(grad_Y, grad_Y_sum, self.rtol, self.atol))\n\n def test_index_last_dim(self):\n \"\"\"Check that our index_last_dim backward is also correct for real input\n \"\"\"\n sizes = (2, 3, 17)\n p = torch.randperm(sizes[-1])\n X = torch.randn(sizes, requires_grad=True)\n out_torch = X[..., p]\n out = index_last_dim(X, p)\n self.assertTrue(torch.allclose(out, out_torch))\n g = torch.randn_like(out)\n grad_x_torch, = torch.autograd.grad(out_torch, X, g)\n grad_x, = torch.autograd.grad(out, X, g)\n self.assertTrue(torch.allclose(grad_x, grad_x_torch))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "import pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\nimport matplotlib.patches as mpatches\nplt.rcParams['font.family'] = 'serif'\n\nrs = [1]\nmarkers = ['o', 'v', 'D', 'p', 's', '>']\nloc = 'speed_training_data.pkl'\ndata = pickle.load(open(loc,'rb'))\n\ncolors = ['red', 'orange', 'green', 'blue']\n\nspeedups_fft = data['speedup_fft']\nspeedups_butterfly = data['speedup_butterfly']\nsizes = data['sizes']\nlw = 3\nmsize = 6\n\nprint('data: ', data)\n\nstart_idx = 0\n\nprint('fft speedup: ', speedups_fft[start_idx:])\nprint('butterfly speedup: ', speedups_butterfly[start_idx:])\n\nprint('sizes, speedups: ', sizes.size, speedups_fft.shape)\nplt.plot(sizes[start_idx:],speedups_fft[start_idx:], linewidth=lw, label='FFT',marker=markers[0],color=colors[0],\n markeredgecolor=colors[0],markersize=msize)\nplt.plot(sizes[start_idx:],speedups_butterfly[start_idx:], linewidth=lw, label='Butterfly',marker=markers[0],color=colors[3],\n markeredgecolor=colors[3],markersize=msize)\n\nplt.axhline(y=1.0, color='black',linewidth=3)\nplt.xscale('log', basex=2)\nplt.yscale('log')\nplt.xlabel(r'$N$',fontsize=14)\n# plt.ylabel(\"Speedup over GEMM\", fontsize=14)\nplt.ylabel(\"Speedup over dense multiply\", fontsize=18)\n\nclasses = [mpatches.Patch(color=colors[0], label='FFT'),\n mpatches.Patch(color=colors[3], label='Butterfly')]\n\nplt.legend(handles=classes, ncol=4, bbox_to_anchor=(0.75, -0.15))#, loc='upper left')\n\nplt.savefig('speed_training_plot.pdf', bbox_inches='tight')\n", "import pickle\nfrom pathlib import Path\nimport numpy as np\n\nresult_dir = 'results_new'\nexperiment_names = []\nexperiment_names += [[f'dft_factorization_TrainableBP_True_{size}' for size in [8, 16, 32, 64, 128, 256, 512, 1024]]]\nexperiment_names += [[f'dct_factorization_TrainableBPP_True_{size}' for size in [8, 16, 32, 64, 128, 256, 512, 1024]]]\nexperiment_names += [[f'dst_factorization_TrainableBPP_True_{size}' for size in [8, 16, 32, 64, 128, 256, 512, 1024]]]\nexperiment_names += [[f'convolution_factorization_TrainableBPBP_True_{size}' for size in [8, 16, 32, 64, 128, 256, 512, 1024]]]\nexperiment_names += [[f'hadamard_factorization_TrainableBP_True_{size}' for size in [8, 16, 32, 64, 128, 256, 512, 1024]]]\nexperiment_names += [[f'hartley_factorization_TrainableBP_True_{size}' for size in [8, 16, 32, 64, 128, 256, 512, 1024]]]\nexperiment_names += [[f'legendre_factorization_TrainableBP_True_{size}' for size in [8, 16, 32, 64, 128, 256, 512, 1024]]]\nexperiment_names += [[f'randn_factorization_TrainableBP_True_{size}' for size in [8, 16, 32, 64, 128, 256, 512, 1024]]]\n\nall_rmse = []\nfor experiment_names_ in experiment_names:\n print(experiment_names_[0])\n best_loss = []\n best_polished_loss = []\n for experiment_name in experiment_names_:\n checkpoint_path = Path(result_dir) / experiment_name / 'trial.pkl'\n with checkpoint_path.open('rb') as f:\n trials = pickle.load(f)\n losses = [-trial.last_result['negative_loss'] for trial in trials]\n polished_losses = [-trial.last_result.get('polished_negative_loss', float('-inf')) for trial in trials]\n # best_loss.append(min(losses))\n best_loss.append(np.sort(losses)[0]) # to deal with NaN\n best_polished_loss.append(np.sort(polished_losses)[0]) # to deal with NaN\n # print(np.array(losses))\n # print(np.sort(losses))\n # best_trial = max(trials, key=lambda trial: trial.last_result['negative_loss'])\n # train_model = best_trial._get_trainable_cls()(best_trial.config)\n # train_model = TrainableHadamardFactorFixedOrder(best_trial.config)\n # train_model = TrainableHadamardFactorSoftmax(best_trial.config)\n # train_model = TrainableHadamardFactorSparsemax(best_trial.config)\n # train_model.restore(str(Path(best_trial.logdir) / best_trial._checkpoint.value))\n # model = train_model.model\n # best_rmse = np.sqrt(best_loss)\n # print(best_rmse)\n print(np.sqrt(best_polished_loss))\n all_rmse.append(np.sqrt(best_polished_loss))\n\nprint(np.array(all_rmse))\ntransform_names = ['DFT', 'DCT', 'DST', 'Conv', 'Hadamard', 'Hartley', 'Legendre', 'Rand']\n\nimport pickle\n\nwith open('rmse.pkl', 'wb') as f:\n pickle.dump({'names': transform_names, 'rmse': all_rmse}, f)\n" ]
[ [ "torch.nn.functional.mse_loss", "torch.load", "torch.save" ], [ "torch.randn_like", "torch.randperm", "torch.randn", "torch.allclose", "torch.autograd.grad" ], [ "matplotlib.pyplot.legend", "matplotlib.patches.Patch", "matplotlib.pyplot.axhline", "matplotlib.pyplot.switch_backend", "matplotlib.pyplot.yscale", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xscale", "matplotlib.pyplot.ylabel" ], [ "numpy.array", "numpy.sqrt", "numpy.sort" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DataLab-CQU/stellargraph
[ "5ca1e59e91cb6ac470bf19ff3da39b3a1a68650e", "5ca1e59e91cb6ac470bf19ff3da39b3a1a68650e", "5ca1e59e91cb6ac470bf19ff3da39b3a1a68650e" ]
[ "tests/layer/test_graph_attention.py", "stellargraph/core/element_data.py", "tests/utils/test_hyperbolic.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# Copyright 2018-2020 Data61, CSIRO\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nGAT tests\n\"\"\"\nimport pytest\nimport scipy.sparse as sps\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.layers import Input\nfrom stellargraph.mapper import (\n FullBatchNodeGenerator,\n FullBatchLinkGenerator,\n GraphSAGENodeGenerator,\n)\nfrom stellargraph.layer import *\nfrom ..test_utils.graphs import example_graph\nfrom .. import test_utils\n\n\npytestmark = test_utils.ignore_stellargraph_experimental_mark\n\n\nclass Test_GraphAttention:\n \"\"\"\n Tests of GraphAttention layer\n \"\"\"\n\n N = 10\n F_in = 5\n F_out = 2\n attn_heads = 8\n activation = \"relu\"\n layer = GraphAttention\n\n def get_inputs(self):\n x_inp = [\n Input(batch_shape=(1, self.N, self.F_in)),\n Input(batch_shape=(1, self.N, self.N)),\n ]\n\n # duplicate input here for Test_GraphAttentionSparse to work\n return x_inp, x_inp\n\n def get_matrix(self, edges=[]):\n # adjacency matrix with self-loops only\n A = np.eye(self.N)\n for e, v in edges:\n A[e[0], e[1]] = v\n return [A[None, :, :]]\n\n def test_constructor(self):\n # attn_heads_reduction = \"concat\":\n layer = self.layer(\n units=self.F_out,\n attn_heads=self.attn_heads,\n attn_heads_reduction=\"concat\",\n activation=self.activation,\n )\n assert layer.units == self.F_out\n assert layer.attn_heads == self.attn_heads\n assert layer.output_dim == self.F_out * self.attn_heads\n assert layer.activation == keras.activations.get(self.activation)\n\n # attn_heads_reduction = \"average\":\n layer = self.layer(\n units=self.F_out,\n attn_heads=self.attn_heads,\n attn_heads_reduction=\"average\",\n activation=self.activation,\n )\n assert layer.output_dim == self.F_out\n\n # attn_heads_reduction = \"ave\":\n with pytest.raises(ValueError):\n self.layer(\n units=self.F_out,\n attn_heads=self.attn_heads,\n attn_heads_reduction=\"ave\",\n activation=self.activation,\n )\n\n def test_apply_concat(self):\n gat = self.layer(\n units=self.F_out,\n attn_heads=self.attn_heads,\n attn_heads_reduction=\"concat\",\n activation=self.activation,\n kernel_initializer=\"ones\",\n )\n x_inp, layer_inp = self.get_inputs()\n\n # Instantiate layer with squeezed matrix\n x_out = gat(layer_inp)\n\n model = keras.Model(inputs=x_inp, outputs=x_out)\n\n assert model.output_shape[-1] == self.F_out * self.attn_heads\n\n As = self.get_matrix()\n X = np.ones((1, self.N, self.F_in)) # features\n\n expected = np.ones((self.N, self.F_out * self.attn_heads)) * self.F_in\n actual = model.predict([X] + As)\n\n np.testing.assert_allclose(actual.squeeze(), expected)\n\n def test_apply_average(self):\n gat = self.layer(\n units=self.F_out,\n attn_heads=self.attn_heads,\n attn_heads_reduction=\"average\",\n activation=self.activation,\n kernel_initializer=\"ones\",\n attn_kernel_initializer=\"zeros\",\n bias_initializer=\"zeros\",\n )\n x_inp, layer_inp = self.get_inputs()\n\n # Instantiate layer with squeezed matrix\n x_out = gat(layer_inp)\n\n model = keras.Model(inputs=x_inp, outputs=x_out)\n assert model.output_shape[-1] == self.F_out\n\n X = np.ones((1, self.N, self.F_in)) # features\n for i in range(self.N):\n X[:, i, :] = i + 1\n\n As = self.get_matrix()\n\n expected = (X * self.F_in)[..., : self.F_out]\n actual = model.predict([X] + As)\n\n np.testing.assert_allclose(actual.squeeze(), expected.squeeze())\n\n def test_apply_average_with_neighbours(self):\n gat_saliency = self.layer(\n units=self.F_out,\n attn_heads=self.attn_heads,\n attn_heads_reduction=\"average\",\n activation=self.activation,\n kernel_initializer=\"ones\",\n attn_kernel_initializer=\"zeros\",\n bias_initializer=\"zeros\",\n saliency_map_support=True,\n )\n\n gat_origin = self.layer(\n units=self.F_out,\n attn_heads=self.attn_heads,\n attn_heads_reduction=\"average\",\n activation=self.activation,\n kernel_initializer=\"ones\",\n attn_kernel_initializer=\"zeros\",\n bias_initializer=\"zeros\",\n saliency_map_support=False,\n )\n\n x_inp, layer_inp = self.get_inputs()\n\n # Instantiate layer with squeezed matrix\n x_out_saliency = gat_saliency(layer_inp)\n x_out_origin = gat_origin(layer_inp)\n\n model_origin = keras.Model(inputs=x_inp, outputs=x_out_origin)\n model_saliency = keras.Model(inputs=x_inp, outputs=x_out_saliency)\n assert model_origin.output_shape[-1] == self.F_out\n assert model_saliency.output_shape[-1] == self.F_out\n\n X = np.zeros((1, self.N, self.F_in)) # features\n for i in range(self.N):\n X[:, i, :] = i\n\n As = self.get_matrix([((0, 1), 1), ((1, 0), 1)])\n\n expected = (X * self.F_in)[..., : self.F_out]\n expected[:, :2] = self.F_in / 2\n actual_origin = model_origin.predict([X] + As)\n actual_saliency = model_saliency.predict([X] + As)\n np.testing.assert_allclose(expected, actual_origin)\n np.testing.assert_allclose(expected, actual_saliency)\n\n def test_layer_config(self):\n layer = self.layer(\n units=self.F_out,\n attn_heads=self.attn_heads,\n attn_heads_reduction=\"concat\",\n activation=self.activation,\n )\n conf = layer.get_config()\n\n assert conf[\"units\"] == self.F_out\n assert conf[\"attn_heads\"] == self.attn_heads\n assert conf[\"attn_heads_reduction\"] == \"concat\"\n assert conf[\"activation\"] == self.activation\n assert conf[\"use_bias\"] == True\n assert conf[\"kernel_initializer\"][\"class_name\"] == \"GlorotUniform\"\n assert conf[\"bias_initializer\"][\"class_name\"] == \"Zeros\"\n assert conf[\"kernel_regularizer\"] == None\n assert conf[\"bias_regularizer\"] == None\n assert conf[\"kernel_constraint\"] == None\n assert conf[\"bias_constraint\"] == None\n\n\nclass Test_GraphAttentionSparse(Test_GraphAttention):\n \"\"\"\n Tests of GraphAttentionSparse layer\n \"\"\"\n\n N = 10\n F_in = 5\n F_out = 2\n attn_heads = 8\n activation = \"relu\"\n layer = GraphAttentionSparse\n\n def get_inputs(self):\n x_inp = [\n Input(batch_shape=(1, self.N, self.F_in)),\n Input(batch_shape=(1, None, 2), dtype=\"int64\"),\n Input(batch_shape=(1, None), dtype=\"float32\"),\n ]\n\n A_mat = SqueezedSparseConversion(shape=(self.N, self.N))(x_inp[1:])\n\n # For dense matrix, remove batch dimension\n layer_inp = x_inp[:1] + [A_mat]\n\n return x_inp, layer_inp\n\n def get_matrix(self, edges=[]):\n # adjacency matrix with self-loops + edges\n A_sparse = sps.eye(self.N, format=\"lil\")\n for e, v in edges:\n A_sparse[e[0], e[1]] = v\n # Extract indices & values to feed to tensorflow\n A_sparse = A_sparse.tocoo()\n A_indices = np.expand_dims(\n np.hstack((A_sparse.row[:, None], A_sparse.col[:, None])), 0\n )\n A_values = np.expand_dims(A_sparse.data, 0)\n return [A_indices, A_values]\n\n\nclass Test_GAT:\n \"\"\"\n Tests of GAT class\n \"\"\"\n\n N = 10\n F_in = 5\n F_out = 2\n attn_heads = 8\n layer_sizes = [4, 16]\n activations = [\"relu\", \"linear\"]\n sparse = False\n method = \"gat\"\n\n def test_constructor(self):\n G = example_graph(feature_size=self.F_in)\n gen = FullBatchNodeGenerator(G, sparse=self.sparse, method=self.method)\n # test default if no activations are passed:\n gat = GAT(layer_sizes=self.layer_sizes, generator=gen, bias=True)\n assert gat.activations == [\"elu\", \"elu\"]\n\n # test error if too many activations:\n with pytest.raises(ValueError):\n gat = GAT(layer_sizes=[10], activations=self.activations, generator=gen)\n\n # test error if too few activations:\n with pytest.raises(ValueError):\n gat = GAT(layer_sizes=[10, 10], activations=[\"relu\"], generator=gen)\n\n # test error where layer_sizes is not a list:\n with pytest.raises(TypeError):\n gat = GAT(\n layer_sizes=10,\n activations=self.activations,\n attn_heads=self.attn_heads,\n generator=gen,\n bias=True,\n )\n\n # test error where layer_sizes values are not valid\n with pytest.raises(ValueError):\n gat = GAT(\n layer_sizes=[4, 0],\n activations=self.activations,\n attn_heads=self.attn_heads,\n generator=gen,\n bias=True,\n )\n\n # test for incorrect length of att_heads list:\n with pytest.raises(ValueError):\n gat = GAT(\n layer_sizes=self.layer_sizes,\n activations=self.activations,\n attn_heads=[8, 8, 1],\n generator=gen,\n bias=True,\n )\n\n # test for invalid values in att_heads list:\n with pytest.raises(ValueError):\n gat = GAT(\n layer_sizes=self.layer_sizes,\n activations=self.activations,\n attn_heads=[8, 0],\n generator=gen,\n bias=True,\n )\n\n # test for invalid type of att_heads argument:\n with pytest.raises(TypeError):\n gat = GAT(\n layer_sizes=self.layer_sizes,\n activations=self.activations,\n attn_heads=8.0,\n generator=gen,\n bias=True,\n )\n\n # test error where activations is not a list:\n with pytest.raises(TypeError):\n gat = GAT(\n layer_sizes=self.layer_sizes,\n activations=\"relu\",\n generator=gen,\n bias=True,\n )\n\n # test attn_heads_reduction errors:\n with pytest.raises(TypeError):\n gat = GAT(\n layer_sizes=self.layer_sizes,\n activations=self.activations,\n attn_heads=self.attn_heads,\n attn_heads_reduction=\"concat\",\n generator=gen,\n bias=True,\n )\n with pytest.raises(ValueError):\n gat = GAT(\n layer_sizes=self.layer_sizes,\n activations=self.activations,\n attn_heads=self.attn_heads,\n attn_heads_reduction=[\"concat\", \"concat\", \"average\"],\n generator=gen,\n bias=True,\n )\n with pytest.raises(ValueError):\n gat = GAT(\n layer_sizes=self.layer_sizes,\n activations=self.activations,\n attn_heads=self.attn_heads,\n attn_heads_reduction=[\"concat\", \"sum\"],\n generator=gen,\n bias=True,\n )\n\n # test error where len(activations) is not equal to len(layer_sizes):\n with pytest.raises(ValueError):\n gat = GAT(\n layer_sizes=self.layer_sizes,\n activations=[\"relu\"],\n generator=gen,\n bias=True,\n )\n\n # Default attention heads reductions:\n gat = GAT(\n layer_sizes=self.layer_sizes,\n activations=self.activations,\n attn_heads=self.attn_heads,\n generator=gen,\n bias=True,\n )\n\n assert gat.activations == self.activations\n assert gat.attn_heads_reduction == [\"concat\", \"average\"]\n assert gat.generator == gen\n\n # User-specified attention heads reductions:\n gat = GAT(\n layer_sizes=self.layer_sizes,\n activations=self.activations,\n attn_heads=self.attn_heads,\n attn_heads_reduction=[\"concat\", \"concat\"],\n generator=gen,\n bias=True,\n )\n\n assert gat.attn_heads_reduction == [\"concat\", \"concat\"]\n\n def test_gat_build_constructor(self):\n G = example_graph(feature_size=self.F_in)\n gen = FullBatchNodeGenerator(G, sparse=self.sparse, method=self.method)\n gat = GAT(\n layer_sizes=self.layer_sizes,\n activations=self.activations,\n attn_heads=self.attn_heads,\n generator=gen,\n bias=True,\n )\n\n assert len(gat.in_out_tensors()) == 2\n x_in, x_out = gat.in_out_tensors()\n assert len(x_in) == 4 if self.sparse else 3\n assert int(x_in[0].shape[-1]) == self.F_in\n assert K.int_shape(x_in[-1]) == (1, G.number_of_nodes(), G.number_of_nodes())\n assert int(x_out.shape[-1]) == self.layer_sizes[-1]\n\n def test_gat_build_linkmodel_constructor(self):\n G = example_graph(feature_size=self.F_in)\n gen = FullBatchLinkGenerator(G, sparse=self.sparse, method=self.method)\n gat = GAT(\n layer_sizes=self.layer_sizes,\n activations=self.activations,\n attn_heads=self.attn_heads,\n generator=gen,\n bias=True,\n )\n\n assert len(gat.in_out_tensors()) == 2\n x_in, x_out = gat.in_out_tensors()\n assert len(x_in) == 4 if self.sparse else 3\n assert int(x_in[0].shape[-1]) == self.F_in\n assert int(x_out.shape[-1]) == self.layer_sizes[-1]\n\n def test_gat_build_constructor_no_generator(self):\n G = example_graph(feature_size=self.F_in)\n gat = GAT(\n layer_sizes=self.layer_sizes,\n activations=self.activations,\n attn_heads=self.attn_heads,\n bias=True,\n num_nodes=1000,\n num_features=self.F_in,\n multiplicity=1,\n )\n assert gat.use_sparse == False\n\n x_in, x_out = gat.in_out_tensors()\n assert len(x_in) == 4 if self.sparse else 3\n assert int(x_in[0].shape[-1]) == self.F_in\n assert int(x_out.shape[-1]) == self.layer_sizes[-1]\n\n def test_gat_build_constructor_wrong_generator(self):\n G = example_graph(feature_size=self.F_in)\n gen = GraphSAGENodeGenerator(G, self.N, [5, 10])\n\n # test error where generator is of the wrong type for GAT:\n with pytest.raises(TypeError):\n gat = GAT(\n layer_sizes=self.layer_sizes,\n activations=self.activations,\n attn_heads=self.attn_heads,\n bias=True,\n generator=gen,\n )\n\n def test_gat_build_l2norm(self):\n G = example_graph(feature_size=self.F_in)\n gen = FullBatchNodeGenerator(G, sparse=self.sparse, method=self.method)\n gat = GAT(\n layer_sizes=self.layer_sizes,\n activations=self.activations,\n attn_heads=self.attn_heads,\n generator=gen,\n bias=True,\n normalize=\"l2\",\n kernel_initializer=\"ones\",\n attn_kernel_initializer=\"ones\",\n )\n\n x_in, x_out = gat.in_out_tensors()\n\n model = keras.Model(inputs=x_in, outputs=x_out)\n\n ng = gen.flow(G.nodes())\n actual = model.predict(ng)\n expected = np.ones((G.number_of_nodes(), self.layer_sizes[-1])) * (\n 1.0 / G.number_of_nodes()\n )\n\n np.testing.assert_allclose(expected, actual[0])\n\n def test_gat_build_no_norm(self):\n G = example_graph(feature_size=self.F_in)\n gen = FullBatchNodeGenerator(G, sparse=self.sparse, method=self.method)\n gat = GAT(\n layer_sizes=self.layer_sizes,\n activations=self.activations,\n attn_heads=self.attn_heads,\n generator=gen,\n bias=True,\n normalize=None,\n kernel_initializer=\"ones\",\n attn_kernel_initializer=\"ones\",\n )\n\n x_in, x_out = gat.in_out_tensors()\n\n model = keras.Model(inputs=x_in, outputs=x_out)\n\n ng = gen.flow(G.nodes())\n actual = model.predict(ng)\n\n expected = np.ones((G.number_of_nodes(), self.layer_sizes[-1])) * (\n self.F_in\n * self.layer_sizes[0]\n * self.attn_heads\n * np.max(G.node_features(G.nodes()))\n )\n np.testing.assert_allclose(expected, actual[0])\n\n def test_gat_build_wrong_norm(self):\n G = example_graph(feature_size=self.F_in)\n gen = FullBatchNodeGenerator(G)\n with pytest.raises(ValueError):\n gat = GAT(\n layer_sizes=self.layer_sizes,\n activations=self.activations,\n attn_heads=self.attn_heads,\n generator=gen,\n bias=True,\n normalize=\"whatever\",\n )\n\n def test_gat_serialize(self):\n G = example_graph(feature_size=self.F_in)\n gen = FullBatchNodeGenerator(G, sparse=self.sparse, method=self.method)\n gat = GAT(\n layer_sizes=self.layer_sizes,\n activations=self.activations,\n attn_heads=self.attn_heads,\n generator=gen,\n bias=True,\n normalize=\"l2\",\n )\n\n x_in, x_out = gat.in_out_tensors()\n model = keras.Model(inputs=x_in, outputs=x_out)\n\n ng = gen.flow(G.nodes())\n\n # Save model\n model_json = model.to_json()\n\n # Set all weights to one\n model_weights = [np.ones_like(w) for w in model.get_weights()]\n\n # Load model from json & set all weights\n model2 = keras.models.model_from_json(\n model_json,\n custom_objects={\n \"GraphAttention\": GraphAttention,\n \"GatherIndices\": GatherIndices,\n },\n )\n model2.set_weights(model_weights)\n\n # Test deserialized model\n actual = model2.predict(ng)\n expected = np.ones((G.number_of_nodes(), self.layer_sizes[-1])) * (\n 1.0 / G.number_of_nodes()\n )\n np.testing.assert_allclose(expected, actual[0])\n\n def test_kernel_and_bias_defaults(self):\n graph = example_graph(feature_size=self.F_in)\n gen = FullBatchNodeGenerator(graph, sparse=self.sparse, method=self.method)\n gat = GAT(\n layer_sizes=self.layer_sizes,\n activations=self.activations,\n attn_heads=self.attn_heads,\n generator=gen,\n )\n for layer in gat._layers:\n if isinstance(layer, GraphAttention):\n assert isinstance(\n layer.kernel_initializer, tf.initializers.GlorotUniform\n )\n assert isinstance(layer.bias_initializer, tf.initializers.Zeros)\n assert isinstance(\n layer.attn_kernel_initializer, tf.initializers.GlorotUniform\n )\n assert layer.kernel_regularizer is None\n assert layer.bias_regularizer is None\n assert layer.attn_kernel_regularizer is None\n assert layer.kernel_constraint is None\n assert layer.bias_constraint is None\n assert layer.attn_kernel_constraint is None\n\n def test_save_load(self, tmpdir):\n graph = example_graph(feature_size=self.F_in)\n gen = FullBatchNodeGenerator(graph, sparse=self.sparse, method=self.method)\n gat = GAT(\n layer_sizes=self.layer_sizes,\n activations=self.activations,\n attn_heads=self.attn_heads,\n generator=gen,\n )\n\n test_utils.model_save_load(tmpdir, gat)\n\n\ndef TestGATsparse(Test_GAT):\n sparse = True\n method = \"gat\"\n", "# -*- coding: utf-8 -*-\n#\n# Copyright 2017-2020 Data61, CSIRO\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport itertools\nfrom collections import defaultdict\n\nimport numpy as np\nimport pandas as pd\nimport scipy.sparse as sps\n\nfrom ..globalvar import SOURCE, TARGET, WEIGHT, TYPE_ATTR_NAME\nfrom .validation import require_dataframe_has_columns, comma_sep\n\n\nclass ExternalIdIndex:\n \"\"\"\n An ExternalIdIndex maps between \"external IDs\" and \"integer locations\" or \"internal locations\"\n (ilocs).\n\n It is designed to allow handling only efficient integers internally, but easily convert between\n them and the user-facing IDs.\n \"\"\"\n\n def __init__(self, ids):\n self._index = pd.Index(ids)\n # reserve 2 ^ (n-bits) - 1 for sentinel\n self.dtype = np.min_scalar_type(len(self._index))\n\n if not self._index.is_unique:\n # had some duplicated IDs, which is an error\n duplicated = self._index[self._index.duplicated()].unique()\n raise ValueError(\n f\"expected IDs to appear once, found some that appeared more: {comma_sep(duplicated)}\"\n )\n\n @property\n def pandas_index(self) -> pd.Index:\n \"\"\"\n Return a collection of all the elements contained in this index.\n \"\"\"\n return self._index\n\n def __len__(self):\n return len(self._index)\n\n def contains_external(self, id):\n \"\"\"\n Whether the external ID is indexed by this ``ExternalIdIndex``.\n \"\"\"\n return id in self._index\n\n def is_valid(self, ilocs: np.ndarray) -> np.ndarray:\n \"\"\"\n Flags the locations of all the ilocs that are valid (that is, where to_iloc didn't fail).\n \"\"\"\n return (0 <= ilocs) & (ilocs < len(self))\n\n def require_valid(self, query_ids, ilocs: np.ndarray) -> np.ndarray:\n valid = self.is_valid(ilocs)\n\n if not valid.all():\n missing_values = np.asarray(query_ids)[~valid]\n\n if len(missing_values) == 1:\n raise KeyError(missing_values[0])\n\n raise KeyError(missing_values)\n\n def to_iloc(self, ids, smaller_type=True, strict=False) -> np.ndarray:\n \"\"\"\n Convert external IDs ``ids`` to integer locations.\n\n Args:\n ids: a collection of external IDs\n smaller_type: if True, convert the ilocs to the smallest type that can hold them, to reduce storage\n strict: if True, check that all IDs are known and throw a KeyError if not\n\n Returns:\n A numpy array of the integer locations for each id that exists, with missing IDs\n represented by either the largest value of the dtype (if smaller_type is True) or -1 (if\n smaller_type is False)\n \"\"\"\n internal_ids = self._index.get_indexer(ids)\n if strict:\n self.require_valid(ids, internal_ids)\n\n # reduce the storage required (especially useful if this is going to be stored rather than\n # just transient)\n if smaller_type:\n return internal_ids.astype(self.dtype)\n return internal_ids\n\n def from_iloc(self, internal_ids) -> np.ndarray:\n \"\"\"\n Convert integer locations to their corresponding external ID.\n \"\"\"\n return self._index.to_numpy()[internal_ids]\n\n\nclass ElementData:\n \"\"\"\n An ``ElementData`` stores \"shared\" information about a set of a graph elements (nodes or\n edges). Elements of every type must have this information, such as the type itself or the\n source, target and weight for edges.\n\n It indexes these in terms of ilocs (see :class:`.ExternalIdIndex`). The data is stored as columns\n of raw numpy arrays, because indexing such arrays is significantly (orders of magnitude) faster\n than indexing pandas dataframes, series or indices.\n\n Args:\n ids (sequence): the IDs of each element\n type_info (list of tuple of type name, numpy array): the associated feature vectors of each type, where the size of the first dimension defines the elements of that type\n \"\"\"\n\n def __init__(self, ids, type_info):\n if not isinstance(type_info, list):\n raise TypeError(\n f\"type_info: expected list, found {type(type_info).__name__}\"\n )\n\n type_ranges = {}\n features = {}\n all_types = []\n type_sizes = []\n\n rows_so_far = 0\n\n # validation\n for type_name, data in type_info:\n if not isinstance(data, np.ndarray):\n raise TypeError(\n f\"type_info (for {type_name!r}): expected numpy array, found {type(data).__name__}\"\n )\n\n if len(data.shape) < 2:\n raise ValueError(\n f\"type_info (for {type_name!r}): expected at least 2 dimensions, found {len(data.shape)}\"\n )\n\n rows = data.shape[0]\n start = rows_so_far\n\n rows_so_far += rows\n stop = rows_so_far\n\n all_types.append(type_name)\n type_sizes.append(stop - start)\n type_ranges[type_name] = range(start, stop)\n features[type_name] = data\n\n if rows_so_far != len(ids):\n raise ValueError(\n f\"type_info: expected features for each of the {len(ids)} IDs, found a total of {rows_so_far} features\"\n )\n\n self._id_index = ExternalIdIndex(ids)\n\n # there's typically a small number of types, so we can map them down to a small integer type\n # (usually uint8) for minimum storage requirements\n self._type_index = ExternalIdIndex(all_types)\n self._type_column = self._type_index.to_iloc(all_types).repeat(type_sizes)\n self._type_element_ilocs = type_ranges\n\n self._features = features\n\n def __len__(self) -> int:\n return len(self._id_index)\n\n def __contains__(self, item) -> bool:\n return self._id_index.contains_external(item)\n\n @property\n def ids(self) -> ExternalIdIndex:\n \"\"\"\n Returns:\n All of the IDs of these elements.\n \"\"\"\n return self._id_index\n\n @property\n def types(self) -> ExternalIdIndex:\n \"\"\"\n Returns:\n All the type names of these elements.\n \"\"\"\n return self._type_index\n\n def type_range(self, type_name):\n \"\"\"\n Returns:\n A range over the ilocs of the given type name\n \"\"\"\n return self._type_element_ilocs[type_name]\n\n @property\n def type_ilocs(self) -> np.ndarray:\n \"\"\"\n Returns:\n A numpy array with the type of each element, stores as the raw iloc of that type.\n \"\"\"\n return self._type_column\n\n def type_of_iloc(self, id_ilocs) -> np.ndarray:\n \"\"\"\n Return the types of the ID(s).\n\n Args:\n id_ilocs: a \"selector\" based on the element ID integer locations\n\n Returns:\n A sequence of types, corresponding to each of the ID(s) integer locations\n \"\"\"\n type_codes = self._type_column[id_ilocs]\n return self._type_index.from_iloc(type_codes)\n\n def features_of_type(self, type_name) -> np.ndarray:\n \"\"\"\n Returns all features for a given type.\n\n Args:\n type_name (hashable): the name of the type\n \"\"\"\n return self._features[type_name]\n\n def features(self, type_name, id_ilocs) -> np.ndarray:\n \"\"\"\n Return features for a set of IDs within a given type.\n\n Args:\n type_name (hashable): the name of the type for all of the IDs\n ids (iterable of IDs): a sequence of IDs of elements of type type_name\n\n Returns:\n A 2D numpy array, where the rows correspond to the ids\n \"\"\"\n start = self._type_element_ilocs[type_name].start\n feature_ilocs = id_ilocs - start\n\n # FIXME: better error messages\n if (feature_ilocs < 0).any():\n # ids were < start, e.g. from an earlier type, or unknown (-1)\n raise ValueError(\"unknown IDs\")\n\n try:\n return self._features[type_name][feature_ilocs, :]\n except IndexError:\n # some of the indices were too large (from a later type)\n raise ValueError(\"unknown IDs\")\n\n def feature_info(self):\n \"\"\"\n Returns:\n A dictionary of type_name to a tuple of an integer representing the size of the\n features of that type, and the dtype of the features.\n \"\"\"\n return {\n type_name: (type_features.shape[1:], type_features.dtype)\n for type_name, type_features in self._features.items()\n }\n\n\nclass NodeData(ElementData):\n # nodes don't have extra functionality at the moment\n pass\n\n\nclass FlatAdjacencyList:\n \"\"\"\n Stores an adjacency list in one contiguous numpy array in a format similar\n to a ragged tensor (https://www.tensorflow.org/guide/ragged_tensor).\n \"\"\"\n\n def __init__(self, flat_array, splits):\n self.splits = splits\n self.flat = flat_array\n\n def __getitem__(self, idx):\n if idx < 0:\n raise KeyError(\"node ilocs must be non-negative.\")\n start = self.splits[idx]\n stop = self.splits[idx + 1]\n return self.flat[start:stop]\n\n def items(self):\n for idx in range(len(self.splits) - 1):\n yield (idx, self[idx])\n\n\nclass EdgeData(ElementData):\n \"\"\"\n Args:\n ids (sequence): the IDs of each element\n sources (numpy.ndarray): the ilocs of the source of each edge\n targets (numpy.ndarray): the ilocs of the target of each edge\n weight (numpy.ndarray): the weight of each edge\n type_info (list of tuple of type name, numpy array): the associated feature vectors of each type, where the size of the first dimension defines the elements of that type\n number_of_nodes (int): the total number of nodes in the graph\n \"\"\"\n\n def __init__(self, ids, sources, targets, weights, type_info, number_of_nodes):\n super().__init__(ids, type_info)\n\n for name, column in {\n \"sources\": sources,\n \"targets\": targets,\n \"weights\": weights,\n }.items():\n if not isinstance(column, np.ndarray):\n raise TypeError(\n f\"{name}: expected a NumPy ndarray, found {type(column).__name__}\"\n )\n\n if len(column.shape) != 1:\n raise TypeError(\n f\"{name}: expected rank-1 array, found shape {column.shape}\"\n )\n\n if len(column) != len(self._id_index):\n raise TypeError(\n f\"{name}: expected length {len(self._id_index)} to match IDs, found length {len(column)}\"\n )\n\n self.sources = sources\n self.targets = targets\n self.weights = weights\n self.number_of_nodes = number_of_nodes\n\n # These are lazily initialized, to only pay the (construction) time and memory cost when\n # actually using them\n self._edges_dict = self._edges_in_dict = self._edges_out_dict = None\n\n # when there's no neighbors for something, an empty array should be returned; this uses a\n # tiny dtype to minimise unnecessary type promotion (e.g. if this is used with an int32\n # array, the result will still be int32).\n self._empty_ilocs = np.array([], dtype=np.uint8)\n\n def _init_directed_adj_lists(self):\n self._edges_in_dict, self._edges_out_dict = self._create_directed_adj_lists()\n\n def _create_directed_adj_lists(self):\n # record the edge ilocs of incoming and outgoing edges\n\n def _to_dir_adj_list(arr):\n neigh_counts = np.bincount(arr, minlength=self.number_of_nodes)\n splits = np.zeros(len(neigh_counts) + 1, dtype=self._id_index.dtype)\n splits[1:] = np.cumsum(neigh_counts, dtype=self._id_index.dtype)\n flat = np.argsort(arr).astype(self._id_index.dtype, copy=False)\n return FlatAdjacencyList(flat, splits)\n\n return _to_dir_adj_list(self.targets), _to_dir_adj_list(self.sources)\n\n def _init_undirected_adj_lists(self):\n self._edges_dict = self._create_undirected_adj_lists()\n\n def _create_undirected_adj_lists(self):\n # record the edge ilocs of both-direction edges\n num_edges = len(self.targets)\n\n # the dtype of the edge_ilocs\n # the argsort results in integers in [0, 2 * num_edges),\n # so the dtype potentially needs to be slightly larger\n dtype = np.min_scalar_type(2 * len(self.sources))\n\n # sentinel masks out node_ilocs so must be the same type as node_ilocs node edge_ilocs\n sentinel = np.cast[np.min_scalar_type(self.number_of_nodes)](-1)\n self_loops = self.sources == self.targets\n num_self_loops = self_loops.sum()\n\n combined = np.concatenate([self.sources, self.targets])\n # mask out duplicates of self loops\n combined[num_edges:][self_loops] = sentinel\n\n flat_array = np.argsort(combined).astype(dtype, copy=False)\n\n # get targets without self loops inplace\n # sentinels are sorted to the end\n filtered_targets = combined[num_edges:]\n filtered_targets.sort()\n\n # remove the sentinels if there are any (the full array will be retained\n # forever; we're assume that there's self loops are a small fraction\n # of the total number of edges)\n if num_self_loops > 0:\n flat_array = flat_array[:-num_self_loops]\n filtered_targets = filtered_targets[:-num_self_loops]\n\n flat_array %= num_edges\n neigh_counts = np.bincount(self.sources, minlength=self.number_of_nodes)\n neigh_counts += np.bincount(filtered_targets, minlength=self.number_of_nodes)\n splits = np.zeros(len(neigh_counts) + 1, dtype=dtype)\n splits[1:] = np.cumsum(neigh_counts, dtype=dtype)\n\n return FlatAdjacencyList(flat_array, splits)\n\n def _adj_lookup(self, *, ins, outs):\n if ins and outs:\n if self._edges_dict is None:\n self._init_undirected_adj_lists()\n return self._edges_dict\n if ins:\n if self._edges_in_dict is None:\n self._init_directed_adj_lists()\n return self._edges_in_dict\n if outs:\n if self._edges_out_dict is None:\n self._init_directed_adj_lists()\n return self._edges_out_dict\n\n raise ValueError(\n \"expected at least one of 'ins' or 'outs' to be True, found neither\"\n )\n\n def degrees(self, *, ins=True, outs=True):\n \"\"\"\n Compute the degrees of every non-isolated node.\n\n Args:\n ins (bool): count incoming edges\n outs (bool): count outgoing edges\n\n Returns:\n The in-, out- or total (summed) degree of all non-isolated nodes as a numpy array (if\n ``ret`` is the return value, ``ret[i]`` is the degree of the node with iloc ``i``)\n \"\"\"\n adj = self._adj_lookup(ins=ins, outs=outs)\n return defaultdict(int, ((key, len(value)) for key, value in adj.items()))\n\n def edge_ilocs(self, node_id, *, ins, outs) -> np.ndarray:\n \"\"\"\n Return the integer locations of the edges for the given node_id\n\n Args:\n node_id: the ID of the node\n\n\n Returns:\n The integer locations of the edges for the given node_id.\n \"\"\"\n\n return self._adj_lookup(ins=ins, outs=outs)[node_id]\n", "# -*- coding: utf-8 -*-\n#\n# Copyright 2020 Data61, CSIRO\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\n\nimport numpy as np\nfrom stellargraph.utils.hyperbolic import *\n\n\[email protected]\ndef seeded():\n seed = np.random.randint(2 ** 32, dtype=np.uint32)\n # log for reproducibility\n print(\"seed:\", seed)\n np.random.seed(seed)\n\n\ndef _generate(num_vectors, norm_range=(0, 1), euclidean_max_norm=None):\n if euclidean_max_norm:\n c = None\n normaliser = euclidean_max_norm\n else:\n c = np.random.random() * 10\n normaliser = c ** -0.5\n\n n = np.random.choice([1, 2, 5, 10, 30, 100])\n extra_axes = np.random.choice([0, 1, 3])\n\n small, big = norm_range\n norms = (small + np.random.random(size=num_vectors) * (big - small)) * normaliser\n raws = 2 * np.random.random(size=(num_vectors, n)) - 1\n scale = norms / np.linalg.norm(raws, axis=1)\n vs = raws * scale[:, None]\n\n return c, vs.astype(np.float32)[(None,) * extra_axes]\n\n\ndef test_poincare_ball_exp_specialisation(seeded):\n for _ in range(100):\n # curvature\n c, _vs = _generate(0)\n # tangent space (Euclidean) vectors\n _c, vs = _generate(17, euclidean_max_norm=1000)\n\n specialised = poincare_ball_exp(c, None, vs)\n assert specialised.shape == vs.shape\n\n actual = poincare_ball_exp(c, np.zeros_like(vs), vs)\n np.testing.assert_allclose(specialised.numpy(), actual.numpy())\n\n\ndef test_poincare_ball_distance_self(seeded):\n # the distance between a point and itself should be 0\n for _ in range(100):\n c, vs = _generate(17)\n\n d = poincare_ball_distance(c, vs, vs)\n assert d.shape == vs.shape[:-1]\n np.testing.assert_allclose(d, 0, atol=1e-5)\n\n\ndef test_poincare_ball_distance_exp(seeded):\n # d(0, exp_0(v)) is approximately 2||v||_2, for sufficiently short v\n for _ in range(100):\n c, _vs = _generate(0)\n _c, tangents = _generate(17, euclidean_max_norm=1e-10)\n\n tangent_lengths = np.linalg.norm(tangents, axis=-1)\n\n def check(x, y):\n d = poincare_ball_distance(c, x, y)\n assert d.shape == tangents.shape[:-1]\n np.testing.assert_allclose(d, 2 * tangent_lengths, rtol=1e-3)\n\n zeros = np.zeros_like(tangents)\n zero_moved = poincare_ball_exp(c, None, tangents)\n assert zero_moved.shape == tangents.shape\n check(zeros, zero_moved)\n check(zero_moved, zeros)\n\n\ndef test_poincare_ball_distance_vs_euclidean(seeded):\n for _ in range(100):\n # d_c(0, x) is approximtely 2||x||_2 for sufficiently small x\n c, vs = _generate(17, norm_range=(0, 0.01))\n\n zeros = np.zeros_like(vs)\n hyperbolic = poincare_ball_distance(c, zeros, vs)\n assert hyperbolic.shape == vs.shape[:-1]\n\n euclidean = np.linalg.norm(vs, axis=-1)\n np.testing.assert_allclose(hyperbolic, 2 * euclidean, rtol=1e-3, atol=1e-15)\n\n # d_c(0, x) is much larger than 2||x||_2 for sufficiently large x\n c, vs = _generate(17, norm_range=(0.99, 1))\n zeros = np.zeros_like(vs)\n hyperbolic = poincare_ball_distance(c, zeros, vs)\n assert hyperbolic.shape == vs.shape[:-1]\n\n euclidean = np.linalg.norm(vs, axis=-1)\n np.testing.assert_array_less(4 * euclidean, hyperbolic)\n" ]
[ [ "scipy.sparse.eye", "tensorflow.keras.backend.int_shape", "tensorflow.keras.Model", "tensorflow.keras.models.model_from_json", "tensorflow.keras.activations.get", "tensorflow.keras.layers.Input" ], [ "numpy.asarray", "numpy.cumsum", "pandas.Index", "numpy.concatenate", "numpy.bincount", "numpy.argsort", "numpy.min_scalar_type", "numpy.array" ], [ "numpy.random.random", "numpy.random.seed", "numpy.random.choice", "numpy.linalg.norm", "numpy.testing.assert_array_less", "numpy.zeros_like", "numpy.testing.assert_allclose", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [ "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zhaojuanmao/fairscale
[ "61ece000bd1b70029270e2dccab66ffa2ca16d51", "61ece000bd1b70029270e2dccab66ffa2ca16d51", "61ece000bd1b70029270e2dccab66ffa2ca16d51" ]
[ "fairscale/nn/pipe/checkpoint.py", "fairscale/nn/pipe/rpc.py", "tests/nn/model_parallel/test_random.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n# Copyright 2019 Kakao Brain\n#\n\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Checkpointing with preceding recomputation.\n\nPyTorch already provides the official checkpointing utilities in\n:mod:`torch.utils.checkpoint`. The official checkpointing combines\nrecomputation and recursive backpropagation into one autograd function named\n``CheckpointFunction``. Hence, the recomputation can be started only when the\ngradients arrive to the function. In Pipe, the recomputation needs to precede\nthe gradient arrival to minimize the GPU idle time.\n\nWe solve this problem by introducing separate autograd functions named\n:class:`Recompute` and :class:`Checkpoint`. Each function represents\nrecomputation and recursive backpropagation, respectively. We can manipulate\nthe control flow in aspect of both the autograd engine and CUDA with a pair of\nthe functions.\n\nSpecifically, we place CUDA stream synchronization between :class:`Recompute`\nand :class:`Checkpoint` to delay only :class:`Checkpoint` until the gradient is\ncopied entirely.\n\n\"\"\"\nfrom collections import deque\nfrom contextlib import contextmanager\nimport threading\nfrom typing import TYPE_CHECKING, Deque, Generator, List, Optional, Tuple, Union\n\nimport torch\nfrom torch import ByteTensor, Tensor\nimport torch.autograd\n\nfrom .dependency import fork, join\nfrom .microbatch import Batch\nfrom .phony import get_phony\n\n__all__ = [\"is_checkpointing\", \"is_recomputing\"]\n\n\nTensors = Tuple[Tensor, ...]\nTensorOrTensors = Union[Tensor, Tensors]\n\n# Types for shared memory between Checkpoint and Recompute.\nRecomputed = Tuple[TensorOrTensors, Tensors] # (output, input_leaf)\nRNGStates = Tuple[ByteTensor, Optional[ByteTensor]] # (cpu_rng_state, gpu_rng_state)\n\n\nif TYPE_CHECKING:\n from typing_extensions import Protocol\nelse:\n Protocol = object\n\n\n# Protocol with __call__ instead of Callable can be used as an attribute type.\n# See: https://github.com/python/mypy/issues/708#issuecomment-561735949\nclass Function(Protocol):\n def __call__(self, input: TensorOrTensors) -> TensorOrTensors:\n ...\n\n\nclass Checkpointing:\n \"\"\"Generates a pair of :class:`Checkpoint` and :class:`Recompute`.\"\"\"\n\n def __init__(self, function: Function, batch: Batch) -> None:\n self.function = function\n self.batch = batch\n\n # Shared memory between Checkpoint and Recompute. 1-length deque is\n # used for mutability and length limitation.\n self.recomputed: Deque[Recomputed] = deque(maxlen=1)\n self.rng_states: Deque[RNGStates] = deque(maxlen=1)\n\n def checkpoint(self) -> Batch:\n \"\"\"Returns a batch applied by :class:`Checkpoint`.\"\"\"\n input_atomic = self.batch.atomic\n input = tuple(self.batch)\n\n # Use a phony which requires grad to ensure that Checkpoint can be\n # tracked by the autograd engine even when none of the input tensors\n # require grad.\n phony = get_phony(self.batch[0].device, requires_grad=True)\n\n output = Checkpoint.apply(phony, self.recomputed, self.rng_states, self.function, input_atomic, *input)\n\n # Gradients are only supported for float Tensors.\n if isinstance(output, tuple):\n output = tuple([x if x.is_floating_point() else x.detach() for x in output])\n\n return Batch(output, self.batch.index)\n\n def recompute(self, batch: Batch) -> None:\n \"\"\"Applies :class:`Recompute` to the batch in place.\"\"\"\n input_atomic = self.batch.atomic\n input = tuple(self.batch)\n\n # batch[0] is always requiring grad, because it has been passed\n # checkpoint with a phony requiring grad.\n batch[0], phony = fork(batch[0])\n phony = Recompute.apply(phony, self.recomputed, self.rng_states, self.function, input_atomic, *input)\n batch[0] = join(batch[0], phony)\n\n\nclass ThreadLocal(threading.local):\n def __init__(self) -> None:\n self.is_checkpointing = False\n self.is_recomputing = False\n\n\nthread_local = ThreadLocal()\n\n\n@contextmanager\ndef enable_checkpointing() -> Generator[None, None, None]:\n \"\"\"Makes :func:`is_checkpointing` return :data:`True` within a context.\"\"\"\n orig = thread_local.is_checkpointing\n thread_local.is_checkpointing = True\n try:\n yield\n finally:\n thread_local.is_checkpointing = orig\n\n\n@contextmanager\ndef enable_recomputing() -> Generator[None, None, None]:\n \"\"\"Makes :func:`is_recomputing` return :data:`True` within a context.\"\"\"\n orig = thread_local.is_recomputing\n thread_local.is_recomputing = True\n try:\n yield\n finally:\n thread_local.is_recomputing = orig\n\n\ndef is_checkpointing() -> bool:\n \"\"\"Whether the current forward propagation is under checkpointing.\n\n Returns:\n bool: :data:`True` if it's under checkpointing.\n\n \"\"\"\n return thread_local.is_checkpointing\n\n\ndef is_recomputing() -> bool:\n \"\"\"Whether the current forward propagation is under checkpoint\n recomputation. Use this to prevent duplicated side-effects at forward\n propagation::\n\n class Counter(nn.Module):\n def __init__(self):\n super().__init__()\n self.counter = 0\n\n def forward(self, input):\n if not is_recomputing():\n self.counter += 1\n return input\n\n Returns:\n bool: :data:`True` if it's under checkpoint recomputation.\n\n .. seealso:: :ref:`Detecting Recomputation`\n\n \"\"\"\n return thread_local.is_recomputing\n\n\nclass Context:\n \"\"\"The common interface between the :class:`Checkpoint` and\n :class:`Recompute` context.\n \"\"\"\n\n recomputed: Deque[Recomputed]\n rng_states: Deque[RNGStates]\n function: Function\n input_atomic: bool\n\n saved_tensors: Tuple[Tensor, ...]\n\n def save_for_backward(self, *tensors: Tensor) -> None: # pragma: no cover\n pass\n\n\ndef save_rng_states(device: torch.device, rng_states: Deque[RNGStates],) -> None:\n \"\"\":meth:`Checkpoint.forward` captures the current PyTorch's random number\n generator states at CPU and GPU to reuse in :meth:`Recompute.backward`.\n\n .. seealso:: :ref:`Referential Transparency`\n\n \"\"\"\n cpu_rng_state = torch.get_rng_state()\n\n gpu_rng_state: Optional[ByteTensor]\n if device.type == \"cuda\":\n gpu_rng_state = torch.cuda.get_rng_state(device)\n else:\n gpu_rng_state = None\n\n rng_states.clear()\n rng_states.append((cpu_rng_state, gpu_rng_state))\n\n\n@contextmanager\ndef restore_rng_states(device: torch.device, rng_states: Deque[RNGStates],) -> Generator[None, None, None]:\n \"\"\":meth:`Recompute.backward` restores the random number generator states\n captured by :func:`save_rng_states` within its context.\n\n .. seealso:: :ref:`Referential Transparency`\n\n \"\"\"\n cpu_rng_state, gpu_rng_state = rng_states[0]\n\n gpu_devices: List[torch.device] = []\n if device.type == \"cuda\":\n gpu_devices.append(device)\n\n with torch.random.fork_rng(gpu_devices):\n torch.set_rng_state(cpu_rng_state)\n if gpu_rng_state is not None:\n torch.cuda.set_rng_state(gpu_rng_state, device)\n yield\n\n\nclass Checkpoint(torch.autograd.Function):\n @staticmethod\n # type: ignore\n def forward(\n ctx: Context,\n phony: Tensor,\n recomputed: Deque[Recomputed],\n rng_states: Deque[RNGStates],\n function: Function,\n input_atomic: bool,\n *input: Tensor,\n ) -> TensorOrTensors:\n ctx.recomputed = recomputed\n ctx.rng_states = rng_states\n\n save_rng_states(input[0].device, ctx.rng_states)\n\n ctx.function = function\n ctx.input_atomic = input_atomic\n ctx.save_for_backward(*input)\n\n with torch.no_grad(), enable_checkpointing():\n output = function(input[0] if input_atomic else input)\n\n return output\n\n @staticmethod\n def backward(ctx: Context, *grad_output: Tensor,) -> Tuple[Optional[Tensor], ...]: # pragma: no cover\n output, input_leaf = ctx.recomputed.pop()\n\n if isinstance(output, tuple):\n tensors = output\n else:\n tensors = (output,)\n if any(y.requires_grad for y in tensors):\n tensors = tuple([x for x in tensors if x.requires_grad])\n torch.autograd.backward(tensors, grad_output)\n\n grad_input: List[Optional[Tensor]] = [None, None, None, None, None]\n grad_input.extend(x.grad for x in input_leaf)\n return tuple(grad_input)\n\n\nclass Recompute(torch.autograd.Function):\n @staticmethod\n # type: ignore\n def forward(\n ctx: Context,\n phony: Tensor,\n recomputed: Deque[Recomputed],\n rng_states: Deque[RNGStates],\n function: Function,\n input_atomic: bool,\n *input: Tensor,\n ) -> Tensor:\n ctx.recomputed = recomputed\n ctx.rng_states = rng_states\n\n ctx.function = function\n ctx.input_atomic = input_atomic\n ctx.save_for_backward(*input)\n\n return phony\n\n @staticmethod\n def backward(ctx: Context, *grad_output: Tensor) -> Tuple[None, ...]: # pragma: no cover\n input = ctx.saved_tensors\n input_leaf = tuple(x.detach().requires_grad_(x.requires_grad) for x in input)\n\n with restore_rng_states(input[0].device, ctx.rng_states):\n with torch.enable_grad(), enable_recomputing():\n output = ctx.function(input_leaf[0] if ctx.input_atomic else input_leaf)\n\n ctx.recomputed.append((output, input_leaf))\n\n grad_input: List[None] = [None, None, None, None, None]\n grad_input.extend(None for _ in ctx.saved_tensors)\n return tuple(grad_input)\n", "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom threading import Event, Lock, Thread\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast\n\nimport torch\nfrom torch import nn\nfrom torch.distributed import ProcessGroup, rpc\nfrom torch.distributed.distributed_c10d import _get_global_rank\n\nfrom fairscale.nn.model_parallel.initialize import get_pipeline_parallel_group\n\nfrom .async_pipe import AsyncPipe\nfrom .types import EVENT_LOOP_QUEUE, PipeMessage, TensorOrTensors\n\nDEFAULT_MAX_SOURCE_POSITIONS = 1024\nDEFAULT_MAX_TARGET_POSITIONS = 1024\n\nPipeModel: AsyncPipe\nPipeResult: TensorOrTensors\n\n\nSizeOrSizes = Union[torch.Size, List[torch.Size]]\nDtypeOrDtypes = Union[torch.dtype, List[torch.dtype]]\n\n\ndef set_device_based_on_group(group: ProcessGroup) -> None:\n # torch.cuda.set_device(group.rank() % torch.cuda.device_count())\n torch.cuda.set_device(torch.distributed.get_rank() % torch.cuda.device_count())\n\n\ndef get_shapes(tensor: TensorOrTensors) -> SizeOrSizes:\n if isinstance(tensor, torch.Tensor):\n return tensor.shape\n else:\n return [t.shape for t in tensor]\n\n\ndef get_dtype(tensor: TensorOrTensors) -> DtypeOrDtypes:\n if isinstance(tensor, torch.Tensor):\n return tensor.dtype\n else:\n return [t.dtype for t in tensor]\n\n\ndef get_global_ranks_from_group(group: ProcessGroup) -> List[int]:\n return [_get_global_rank(group, r) for r in range(group.size())]\n\n\nclass PipeBackRedirect(torch.autograd.Function):\n @staticmethod\n # type: ignore\n def forward(ctx, inputs, dest, event, message, transport, futures):\n ctx.dest = dest\n ctx.event = event\n ctx.message = message\n ctx.transport = transport\n ctx.futures = futures\n return inputs\n\n @staticmethod\n # type: ignore\n def backward(ctx, *grad):\n ctx.message.tensors = tuple(grad)\n ctx.transport.send_message(ctx.message, sync=False, skip_header=True)\n ctx.event.set()\n # torch.futures.wait_all(ctx.futures)\n return (None, None, None, None, None, None)\n\n\ndef callback_with_model(callback: Callable[[Any, AsyncPipe], None], ctx: Any) -> None:\n try:\n group = get_pipeline_parallel_group() # FIXME(tom) handle dynamic group\n set_device_based_on_group(group)\n\n with PipeModel.lock:\n callback(ctx, PipeModel)\n except Exception as e:\n print(f\"callback_with_model got {e}\")\n\n\nclass PipeRPCWrapper(nn.Module):\n \"\"\"A wrapper for Pipe to control the entire pipeline from a single process.\n Typical usecase would have rank 0 construct `PipeRPCWrapper` and run the\n training loop as normal, and all other ranks would call\n `torch.distributed.rpc.shutdown()`\n\n To run code on each worker, e.g. to run the optimizer, use `foreach_worker`\n \"\"\"\n\n def __init__(self, *args: Any, **kwargs: Any):\n super().__init__()\n self.group = cast(ProcessGroup, kwargs.get(\"group\")) or get_pipeline_parallel_group()\n assert self.group.rank() == 0\n self.lock = Lock()\n\n if True:\n assert (\n self.group == get_pipeline_parallel_group()\n ), \"Can't pickle groups, so group must be `get_pipeline_parallel_group()`\"\n kwargs[\"group\"] = None\n else:\n kwargs[\"group\"] = self.group\n\n kwargs[\"input_device\"] = torch.device(\"cuda\", torch.cuda.current_device())\n\n self.model = AsyncPipe(*args, **kwargs)\n self.worker_map = kwargs[\"worker_map\"]\n self._foreach_worker(self._register_remote_model, args=(args, kwargs))\n self.model.cuda()\n\n def _get_rpc_name(self, rank: int) -> str:\n return self.worker_map[_get_global_rank(self.group, rank)]\n\n def _foreach_worker(self, callback: Callable, args: Any = None) -> None:\n futures = [rpc.rpc_async(self._get_rpc_name(rank), callback, args=args) for rank in range(1, self.group.size())]\n futures = [f.wait() for f in futures]\n\n def foreach_worker(\n self, callback: Callable[[Any, AsyncPipe], None], ctx: Any = None, *, include_self: bool = False\n ) -> None:\n \"\"\"Call `callback` on each worker with the `ctx` and model local to that\n worker. e.g.\n def register_optimizer(ctx, model):\n args, kwargs = ctx\n model.optimizer = torch.optim.SGD(model.parameters(), *args, **kwargs)\n\n pipe_model = PipeRPCWrapper( ... )\n\n pipe_model.foreach_worker(\n register_optimizer,\n ([], {\"lr\" : 0.01, \"momentum\" : 0.9})\n )\n \"\"\"\n\n self._foreach_worker(callback_with_model, args=(callback, ctx))\n\n if include_self:\n with self.model.lock:\n callback(ctx, self.model)\n\n def forward(self, tensor: TensorOrTensors) -> TensorOrTensors: # type: ignore\n shape = get_shapes(tensor)\n dtype = get_dtype(tensor)\n\n if isinstance(tensor, torch.Tensor):\n num_tensors = 1\n else:\n num_tensors = len(tensor)\n\n futures = [\n rpc.rpc_async(self._get_rpc_name(rank), self._model_forward, args=(self.model.training, shape, dtype))\n for rank in range(1, self.group.size())\n ]\n\n if self.model.final_stage:\n return self.model(tensor)\n else:\n event = Event()\n t = Thread(target=self._model_forward_first_stage, args=(tensor, event))\n t.start()\n\n shape, dtype = futures.pop().wait()\n dest_rank = self.group.size() - 1\n dest = self._get_rpc_name(dest_rank)\n dest_global_rank = _get_global_rank(self.group, dest_rank)\n src_global_rank = torch.distributed.get_rank()\n queue = EVENT_LOOP_QUEUE\n\n activations = PipeMessage(dest_global_rank, src_global_rank, queue_name=queue, tensor_count=num_tensors)\n grads = PipeMessage(src_global_rank, dest_global_rank, queue_name=queue, tensor_count=num_tensors)\n\n back_fut = rpc.rpc_async(\n dest, self._send_result_and_do_backwards, args=(self.model.training, activations, grads)\n )\n futures.append(back_fut)\n\n result = self._recv_result(self.model, shape, dtype, activations)\n if isinstance(result, torch.Tensor):\n result.requires_grad_()\n else:\n for r in result:\n r.requires_grad_()\n\n assert self.model.pipeline\n return PipeBackRedirect.apply(\n result, dest_global_rank, event, grads, self.model.pipeline.transport, futures\n )\n\n @property\n def final_stage(self) -> bool:\n return self.model.final_stage\n\n @staticmethod\n def _recv_result(\n model: AsyncPipe, shapes: SizeOrSizes, dtypes: DtypeOrDtypes, message: PipeMessage\n ) -> TensorOrTensors:\n group = get_pipeline_parallel_group()\n set_device_based_on_group(group)\n\n assert model.pipeline\n transport = model.pipeline.transport\n\n if isinstance(shapes, torch.Size):\n message.tensor_shapes = [cast(torch.Size, shapes)]\n message.tensor_dtypes = [cast(torch.dtype, dtypes)]\n message = transport.recv_message_tensors(message)\n return message.tensors[0]\n else:\n message.tensor_shapes = cast(List[torch.Size], shapes)\n message.tensor_dtypes = cast(List[torch.dtype], dtypes)\n message = transport.recv_message_tensors(message)\n return message.tensors\n\n @staticmethod\n def _send_result_and_do_backwards(training: bool, message: PipeMessage, grads_message: PipeMessage) -> None:\n group = get_pipeline_parallel_group()\n set_device_based_on_group(group)\n result = PipeResult\n model = PipeModel\n\n if isinstance(result, torch.Tensor):\n result = tuple([result])\n\n message.tensors = tuple(result)\n assert model.pipeline\n transport = model.pipeline.transport\n transport.send_message(message, sync=False, skip_header=True)\n\n if training:\n grads_message.tensor_shapes = [r.shape for r in result]\n grads_message.tensor_dtypes = [r.dtype for r in result]\n grads_message = transport.recv_message_tensors(grads_message)\n\n with model.lock:\n torch.autograd.backward(result, grads_message.tensors, retain_graph=True)\n\n @staticmethod\n def _register_remote_model(args: List[Any], kwargs: Dict[str, Any]) -> None:\n group = get_pipeline_parallel_group() # FIXME(tom) handle dynamic group\n set_device_based_on_group(group)\n kwargs[\"group\"] = group\n kwargs[\"input_device\"] = torch.device(\"cuda\", torch.cuda.current_device())\n model = AsyncPipe(*args, **kwargs)\n model.cuda()\n global PipeModel\n PipeModel = model\n\n @staticmethod\n def _model_forward(\n training: bool, shape: torch.Size, dtype: torch.dtype\n ) -> Optional[Tuple[SizeOrSizes, DtypeOrDtypes]]:\n try:\n if isinstance(shape, torch.Size):\n tensor = torch.empty(shape, dtype=dtype)\n else:\n tensor = tuple([torch.empty(s, dtype=d) for s, d in zip(shape, dtype)])\n\n model = PipeModel\n assert model.group\n set_device_based_on_group(model.group)\n\n model.train(training)\n result = model(tensor)\n if model.final_stage:\n global PipeResult\n PipeResult = result\n return (get_shapes(result), get_dtype(result))\n\n return None\n except Exception as e:\n print(f\"_model_forward got {e}\")\n raise e\n\n def _model_forward_first_stage(self, tensor: TensorOrTensors, event: Event) -> None:\n try:\n assert self.model.group\n set_device_based_on_group(self.model.group)\n self.model(tensor, event=event)\n except Exception as e:\n print(f\"_model_forward got {e}\")\n raise e\n", "# coding=utf-8\n\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\n\nfrom fairscale.nn.model_parallel import initialize as mpu\nfrom fairscale.nn.model_parallel import random\nfrom fairscale.nn.model_parallel.random import get_cuda_rng_tracker, model_parallel_cuda_manual_seed\nfrom fairscale.utils.testing import dist_init, spawn_for_all_world_sizes\n\n\ndef run_test_set_cuda_rng_state(rank, model_parallel_size, filename, filename_rpc):\n dist_init(rank, model_parallel_size, filename, filename_rpc)\n\n if torch.distributed.get_rank() == 0:\n print(\"> testing set_rng_state with size {} ...\".format(model_parallel_size))\n\n mpu.initialize_model_parallel(model_parallel_size)\n model_parallel_size = mpu.get_model_parallel_world_size()\n\n size = 123\n seed = 1234\n torch.cuda.manual_seed(1234)\n tensor = torch.cuda.FloatTensor(size)\n\n # Get the state\n rng_state = torch.cuda.get_rng_state()\n rng_state_copy = rng_state.clone()\n\n # Do some stuff.\n for _ in range(5):\n torch.randn(size, out=tensor)\n result_1 = tensor.clone()\n\n assert rng_state.sub(rng_state_copy).max() == 0\n assert torch.cuda.get_rng_state().sub(rng_state_copy).max() > 0\n\n # State should be different.\n new_rng_state = torch.cuda.get_rng_state()\n max_diff = new_rng_state.sub(rng_state).max()\n print(\n \" max diff in rng state (should be non-zero) on global rank {}: {}\".format(\n torch.distributed.get_rank(), max_diff\n )\n )\n assert max_diff > 0\n\n # Reset the rng state and do the same stuff.\n random._set_cuda_rng_state(rng_state)\n for _ in range(5):\n torch.randn(size, out=tensor)\n random._set_cuda_rng_state(rng_state)\n for _ in range(5):\n torch.randn(size, out=tensor)\n result_2 = tensor.clone()\n\n # Results should be the same\n error = result_2.sub(result_1).abs().max()\n print(\n \" max error in generated tensors (should be zero) on global rank {}: {}\".format(\n torch.distributed.get_rank(), error\n )\n )\n assert error < 1.0e-6\n\n # Input state should have remained intact.\n error = rng_state.sub(rng_state_copy).max()\n print(\n \" max error in rng state (should be zero) on global rank {}: {}\".format(torch.distributed.get_rank(), error)\n )\n assert error == 0\n\n # Reset groups\n mpu.destroy_model_parallel()\n\n torch.distributed.barrier()\n if torch.distributed.get_rank() == 0:\n print(\">> passed the test :-)\")\n\n\ndef run_test_cuda_rng_tracker(rank, model_parallel_size, filename, filename_rpc):\n dist_init(rank, model_parallel_size, filename, filename_rpc)\n\n if torch.distributed.get_rank() == 0:\n print(\"> testing cuda rng tracker with size {} ...\".format(model_parallel_size))\n\n mpu.initialize_model_parallel(model_parallel_size)\n model_parallel_size = mpu.get_model_parallel_world_size()\n\n seed_1 = 1234\n seed_2 = 4321\n size = [12, 21]\n tensor = torch.cuda.FloatTensor(size)\n\n # Set to seed_1 and generate two tensors.\n torch.cuda.manual_seed(seed_1)\n torch.randn(size, out=tensor)\n target_11 = tensor.clone()\n torch.randn(size, out=tensor)\n target_12 = tensor.clone()\n\n # Set to seed_2 and generate two tensors.\n torch.cuda.manual_seed(seed_2)\n torch.randn(size, out=tensor)\n target_21 = tensor.clone()\n torch.randn(size, out=tensor)\n target_22 = tensor.clone()\n\n # Now if we interleave seed_1 and seed_2,\n # we should still get the same tensors\n torch.cuda.manual_seed(seed_1)\n get_cuda_rng_tracker().add(\"test\", seed_2)\n\n torch.randn(size, out=tensor)\n result_11 = tensor.clone()\n\n with get_cuda_rng_tracker().fork(\"test\"):\n torch.randn(size, out=tensor)\n result_21 = tensor.clone()\n\n torch.randn(size, out=tensor)\n result_12 = tensor.clone()\n\n with get_cuda_rng_tracker().fork(\"test\"):\n torch.randn(size, out=tensor)\n result_22 = tensor.clone()\n\n diff = result_11.sub(result_21).abs().max()\n diff = min(diff, result_12.sub(result_22).abs().max())\n print(\n \" max diff in generated tensors (should be non-zero) on global rank {}: {}\".format(\n torch.distributed.get_rank(), diff\n )\n )\n assert diff > 1.0e-6\n error = max(result_11.sub(target_11).abs().max(), result_12.sub(target_12).abs().max())\n error = max(error, result_21.sub(target_21).abs().max())\n error = max(error, result_22.sub(target_22).abs().max())\n print(\n \" max error in generated tensors (should be zero) on global rank {}: {}\".format(\n torch.distributed.get_rank(), error\n )\n )\n assert error < 1.0e-6\n\n # Reset the tracker\n get_cuda_rng_tracker().reset()\n\n # Reset groups\n mpu.destroy_model_parallel()\n\n torch.distributed.barrier()\n if torch.distributed.get_rank() == 0:\n print(\">> passed the test :-)\")\n\n\ndef run_test_model_parallel_cuda_manual_seed(rank, model_parallel_size, filename, filename_rpc):\n dist_init(rank, model_parallel_size, filename, filename_rpc)\n\n if torch.distributed.get_rank() == 0:\n print(\"> testing model parallel cuda manual seed with size {} ...\".format(model_parallel_size))\n\n mpu.initialize_model_parallel(model_parallel_size)\n model_parallel_size = mpu.get_model_parallel_world_size()\n\n model_parallel_cuda_manual_seed(12345)\n assert torch.cuda.initial_seed() == 12345\n with get_cuda_rng_tracker().fork():\n assert torch.cuda.initial_seed() == (12345 + 2718 + mpu.get_model_parallel_rank())\n\n # Reset the tracker\n get_cuda_rng_tracker().reset()\n\n # Reset groups\n mpu.destroy_model_parallel()\n\n torch.distributed.barrier()\n if torch.distributed.get_rank() == 0:\n print(\">> passed the test :-)\")\n\n\ndef test_set_cuda_rng_state():\n spawn_for_all_world_sizes(run_test_set_cuda_rng_state)\n\n\ndef test_cuda_rng_tracker():\n spawn_for_all_world_sizes(run_test_cuda_rng_tracker)\n\n\ndef test_model_parallel_cuda_manual_seed():\n spawn_for_all_world_sizes(run_test_model_parallel_cuda_manual_seed)\n" ]
[ [ "torch.set_rng_state", "torch.enable_grad", "torch.autograd.backward", "torch.random.fork_rng", "torch.get_rng_state", "torch.no_grad", "torch.cuda.get_rng_state", "torch.cuda.set_rng_state" ], [ "torch.empty", "torch.cuda.current_device", "torch.distributed.distributed_c10d._get_global_rank", "torch.autograd.backward", "torch.distributed.rpc.rpc_async", "torch.distributed.get_rank", "torch.cuda.device_count" ], [ "torch.cuda.initial_seed", "torch.cuda.manual_seed", "torch.randn", "torch.distributed.get_rank", "torch.distributed.barrier", "torch.cuda.FloatTensor", "torch.cuda.get_rng_state" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vrom7632/vrom_live_rep
[ "e56ec45a821f98c40bfa520387a4e895923c4241" ]
[ "Deep-Reinforcement-Learning-Book/meirotansaku/katihanpukuhou_Q/my_meirotansaku_katihanpukuhou_Q.py" ]
[ "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import animation\r\n\r\ndef init_byouga(plt):\r\n \"\"\" 迷路の初期描画\"\"\"\r\n # 迷路を描画\r\n fig = plt.figure(figsize=(5, 5))\r\n \r\n ax = plt.gca()\r\n\r\n # 壁の描画\r\n plt.plot([1,1], [0,1], color='red', linewidth=2)\r\n plt.plot([1,2], [2,2], color='red', linewidth=2)\r\n plt.plot([2,2], [2,1], color='red', linewidth=2)\r\n plt.plot([2,3], [1,1], color='red', linewidth=2)\r\n\r\n # 状態を示す文字の描画\r\n plt.text(0.5, 2.5, 'S0', size=14, ha='center')\r\n plt.text(1.5, 2.5, 'S1', size=14, ha='center')\r\n plt.text(2.5, 2.5, 'S2', size=14, ha='center')\r\n plt.text(0.5, 1.5, 'S3', size=14, ha='center')\r\n plt.text(1.5, 1.5, 'S4', size=14, ha='center')\r\n plt.text(2.5, 1.5, 'S5', size=14, ha='center')\r\n plt.text(0.5, 0.5, 'S6', size=14, ha='center')\r\n plt.text(1.5, 0.5, 'S7', size=14, ha='center')\r\n plt.text(2.5, 0.5, 'S8', size=14, ha='center')\r\n plt.text(0.5, 2.3, 'START', ha='center')\r\n plt.text(2.5, 0.3, 'GOAL', ha='center')\r\n\r\n # 描画範囲の設定\r\n ax.set_xlim(0, 3)\r\n ax.set_ylim(0, 3)\r\n\r\n # 現在地S0に緑丸を描画する\r\n line, = ax.plot([0.5], [2.5], marker=\"o\", color='g', markersize=60)\r\n return fig,line\r\n\r\ndef init_theta(np):\r\n \"\"\" thetaの初期値の決定 \"\"\"\r\n # 行は状態0~7、列は移動方向で↑、→、↓、←を表す\r\n return np.array([[np.nan, 1, 1, np.nan], # snp.nan\r\n [np.nan, 1, np.nan, 1], # s1\r\n [np.nan, np.nan, 1, 1], # s2\r\n [1, 1, 1, np.nan], # s3\r\n [np.nan, np.nan, 1, 1], # s4\r\n [1, np.nan, np.nan, np.nan], # s5\r\n [1, np.nan, np.nan, np.nan], # s6\r\n [1, 1, np.nan, np.nan], # s7、※s8はゴールなので、方策はなし\r\n ],dtype=float)\r\n\r\ndef simple_convert_into_pi_from_theta(theta):\r\n \"\"\"方策パラメータtheta_0をランダム方策piに変換する関数の定義\"\"\"\r\n \"\"\"単純に割合を計算する\"\"\"\r\n theta = np.array(list(map(lambda th:th/np.nansum(th),theta))) # 割合の計算\r\n theta = np.nan_to_num(theta) # nanを0に変換\r\n return theta\r\n\r\ndef get_action(s, Q, epsilon, pi_0):\r\n \"\"\"次の行動を決定する関数\"\"\"\r\n # action:[up, right, down, left]\r\n action = [0, 1, 2, 3]\r\n # 行動を決める\r\n if np.random.rand() < epsilon:\r\n # εの確率でランダムに動く\r\n next_action = np.random.choice(action, p=pi_0[s])\r\n else:\r\n # Qの最大値の行動を採用する\r\n next_action = action[np.nanargmax(Q[s])]\r\n\r\n return next_action\r\n\r\n# def get_s_next(s, a, Q, epsilon, pi_0):\r\ndef get_s_next(s, a):\r\n \"\"\"次の状態を決定する関数\"\"\"\r\n # action:[up, right, down, left]\r\n direction = [-3, 1, 3, -1]\r\n s_next = s + direction[a]\r\n return s_next\r\n\r\ndef q_learning(s, a, r, s_next, Q, eta, gamma):\r\n \"\"\"Q学習による行動価値関数Qの更新\"\"\"\r\n if s_next == 8: # ゴールした場合\r\n Q[s, a] = Q[s, a] + eta * (r - Q[s, a])\r\n else:\r\n Q[s, a] = Q[s, a] + eta * (r + gamma * np.nanmax(Q[s_next]) - Q[s, a])\r\n\r\n return Q\r\n\r\ndef goal_maze_ret_s_a_Q(Q, epsilon, eta, gamma, pi):\r\n \"\"\"Q_learningで迷路を解く関数の定義、状態と行動の履歴および更新したQを出力\"\"\"\r\n s = 0 # スタート地点\r\n a_next = get_action(s, Q, epsilon, pi) # 初期の行動\r\n s_a_history = [[0, np.nan]] # エージェントの移動を記録するリスト\r\n\r\n while True: # ゴールするまでループ\r\n a = a_next # 行動更新\r\n\r\n s_a_history[-1][1] = a\r\n # 現在の状態(つまり一番最後なのでindex=-1)に行動を代入\r\n\r\n s_next = get_s_next(s, a)\r\n # 次の状態を格納\r\n\r\n s_a_history.append([s_next, np.nan])\r\n # 次の状態を代入。行動はまだ分からないのでnanにしておく\r\n\r\n # 報酬を与え, 次の行動を求めます\r\n if s_next == 8:\r\n r = 1 # ゴールにたどり着いたなら報酬を与える\r\n a_next = np.nan\r\n else:\r\n r = 0\r\n a_next = get_action(s_next, Q, epsilon, pi)\r\n # 次の行動a_nextを求めます。\r\n\r\n # 価値関数を更新\r\n Q = q_learning(s, a, r, s_next, Q, eta, gamma)\r\n\r\n # 終了判定\r\n if s_next == 8: # ゴール地点なら終了\r\n break\r\n else:\r\n s = s_next\r\n\r\n return [s_a_history, Q]\r\n\r\ndef agent_animation(s_a_history, fig, line):\r\n \"\"\"初期化関数とフレームごとの描画関数を用いて動画を作成する\"\"\"\r\n def init_background():\r\n \"\"\"背景画像の初期化\"\"\"\r\n line.set_data([], [])\r\n return (line,)\r\n\r\n def animate(i):\r\n \"\"\"フレームごとの描画内容\"\"\"\r\n state = s_a_history[i][0] # 現在の場所を描く\r\n x = (state % 3) + 0.5 # 状態のx座標は、3で割った余り+0.5\r\n y = 2.5 - int(state / 3) # y座標は3で割った商を2.5から引く\r\n line.set_data(x, y)\r\n return (line,)\r\n\r\n return animation.FuncAnimation(fig, animate, init_func=init_background, frames=len(\r\n s_a_history), interval=400, repeat=False)\r\n\r\nif __name__ == \"__main__\":\r\n # 迷路の初期描画\r\n fig,line = init_byouga(plt)\r\n\r\n # 探索可能位置の初期化\r\n theta_0 = init_theta(np)\r\n\r\n # ランダム行動方策pi_0を求める\r\n pi_0 = simple_convert_into_pi_from_theta(theta_0) \r\n \r\n # 初期の行動価値関数Qを設定\r\n [a, b] = theta_0.shape # 行と列の数をa, bに格納\r\n Q = np.random.rand(a, b) * theta_0\r\n # * theta0をすることで要素ごとに掛け算をし、Qの壁方向の値がnanになる\r\n eta = 0.1 # 学習率\r\n gamma = 0.9 # 時間割引率\r\n epsilon = 0.5 # ε-greedy法の初期値\r\n v = np.nanmax(Q, axis=1) # 状態ごとに価値の最大値を求める\r\n episode_count = 100\r\n for i in range(0,episode_count):\r\n print(\"episode:{}\".format(i))\r\n\r\n # ε-greedyの値を少しずつ小さくする\r\n epsilon = epsilon / 2\r\n # Q_learningで迷路を解き、移動した履歴と更新したQを求める\r\n [s_a_history, Q] = goal_maze_ret_s_a_Q(Q, epsilon, eta, gamma, pi_0)\r\n\r\n # 状態価値の変化\r\n new_v = np.nanmax(Q, axis=1) # 状態ごとに価値の最大値を求める\r\n print(np.sum(np.abs(new_v - v))) # 状態価値の変化を出力\r\n print(\"Q:{}\".format(Q))\r\n v = new_v\r\n print(\"step:{}\".format(len(s_a_history) - 1))\r\n # if i == 0:\r\n # ani = agent_animation(s_a_history, fig, line)\r\n # ani.save(\"meirotansaku_katihanpukuhou_Q_1_gakushumae.mp4\", writer=\"ffmpeg\")\r\n # elif (i + 1) % 10 == 0:\r\n # ani = agent_animation(s_a_history, fig, line)\r\n # ani.save(\"meirotansaku_katihanpukuhou_Q_2_gakushuchu_{}.mp4\".format(i), writer=\"ffmpeg\")\r\n \r\n # print(\"count:{}\".format(count))\r\n # 迷路探索のアニメーション描画\r\n # ani = agent_animation(s_a_history, fig, line)\r\n # ani.save(\"meirotansaku_katihanpukuhou_Q_3_gakushugo.mp4\", writer=\"ffmpeg\")\r\n" ]
[ [ "numpy.nanmax", "matplotlib.pyplot.gca", "numpy.nanargmax", "numpy.abs", "numpy.random.choice", "numpy.nan_to_num", "matplotlib.pyplot.plot", "numpy.nansum", "numpy.random.rand", "matplotlib.pyplot.text", "numpy.array", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
icdm2021submission/Continual-Neural-Network-Model-Retraining
[ "7a84f211c7750b862fa5e31293d22d4d0dabed23", "7a84f211c7750b862fa5e31293d22d4d0dabed23", "7a84f211c7750b862fa5e31293d22d4d0dabed23", "7a84f211c7750b862fa5e31293d22d4d0dabed23" ]
[ "src_rnn_rl_group/collect_old.py", "src_fc_rl_gradient/collect.py", "src_rnn_rl/table.py", "src_rl_gradient/model/modeling.py" ]
[ "# rm *.txt & ./bash.sh\n# experiments/base_model/params.json\n# cd /Users/xiaofengzhu/Documents/continual_learning/src\n# tensorboard --logdir\nimport argparse\nimport logging\nimport os\nimport time\nimport glob\nimport tensorflow as tf\nfrom model.utils import Params\nfrom model.utils import set_logger\nfrom model.utils import cal_train_size\nfrom model.reader import load_dataset_from_tfrecords\nfrom model.reader import input_fn\nfrom model.modeling import model_fn\nfrom model.training import evaluate_on_train\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--model_dir', default='experiments/base_model',\n help=\"Directory containing params.json\")\n# loss functions\n# cnn, boost, retrain_regu\nparser.add_argument('--loss_fn', default='cnn', help=\"model loss function\")\n# tf data folder for\n# mnist\nparser.add_argument('--data_dir', default='../data/imdb',\n help=\"Directory containing the dataset\")\n# test.tfrecords\nparser.add_argument('--tfrecords_filename', default='.tfrecords',\n help=\"Dataset-filename for the tfrecords\")\n# usage: python main.py --restore_dir experiments/base_model/best_weights\nparser.add_argument('--restore_dir', default=None, # experimens/base_model/best_weights\n help=\"Optional, directory containing weights to reload\")\nparser.add_argument('--train_range', default='[0-4]',\n help=\"training tf range\")\n# using pretrained weights and gradient boosting on datasets A and B\n# params.num_learners > 1\n# parser.add_argument('--retrain', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']), \\\n# help=\"try on augmented test dataset\")\n# train on datasets A and B\n# parser.add_argument('--collect', default=True, type=lambda x: (str(x).lower() in ['true','1', 'yes']), \\\n# help=\"try on augmented test dataset\")\nparser.add_argument('--finetune', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']), \\\n help=\"try on augmented test dataset\")\nparser.add_argument('--use_kfac', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']), \\\n help=\"usek fac true gradient\")\nparser.add_argument('--log', default='',\n help=\"train log postfix\")\n\nif __name__ == '__main__':\n # Train the model \n tf.reset_default_graph()\n # Set the random seed for the whole graph for reproductible experiments\n tf.set_random_seed(230)\n # Load the parameters from the experiment params.json file in model_dir\n args = parser.parse_args()\n json_path = os.path.join(args.model_dir, 'params.json')\n assert os.path.isfile(json_path), \"No json configuration file found at {}\".format(json_path)\n # Set the logger\n set_logger(os.path.join(args.model_dir, 'collect{}.log'.format(args.log)))\n params = Params(json_path)\n params.dict['loss_fn'] = args.loss_fn\n params.dict['collect'] = True\n params.dict['finetune'] = args.finetune\n params.dict['use_kfac'] = args.use_kfac\n if 'reuters' in args.data_dir:\n params.dict['num_classes'] = 46 \n # Load the parameters from the dataset, that gives the size etc. into params\n json_path = os.path.join(args.data_dir, 'dataset_params.json')\n assert os.path.isfile(json_path), \"No json file found at {}, \\\n please generate tfrecords\".format(json_path)\n params.update(json_path)\n params.dict['train_size'] = cal_train_size(params.train_size, args.train_range)\n # print(params.dict)\n global_epoch = 0\n args.restore_dir = 'best_weights'\n path_train_tfrecords = os.path.join(args.data_dir, 'train-{}'.format(args.train_range) + args.tfrecords_filename)\n # Create the input data pipeline\n logging.info(\"Creating the datasets...\")\n train_dataset = load_dataset_from_tfrecords(glob.glob(path_train_tfrecords))\n #########################################################\n params.dict['training_keep_prob'] = 1.0\n start_time = time.time()\n train_dataset = load_dataset_from_tfrecords(glob.glob(path_train_tfrecords))\n # Specify other parameters for the dataset and the model\n # Create the two iterators over the two datasets\n train_inputs = input_fn('vali', train_dataset, params)\n evaluate_on_train_model_spec = model_fn('vali', train_inputs, params, reuse=True)\n logging.info(\"- done.\")\n args.restore_dir = 'best_weights'\n global_epoch = evaluate_on_train(evaluate_on_train_model_spec,\n args.model_dir, params, restore_from=args.restore_dir,\\\n global_epoch=global_epoch)\n logging.info(\"global_epoch: {} epoch(s)\".format(global_epoch))\n logging.info(\"total time: %s seconds ---\" % (time.time() - start_time))\n", "# rm *.txt & ./bash.sh\n# experiments/base_model/params.json\n# cd /Users/xiaofengzhu/Documents/continual_learning/src\n# tensorboard --logdir\nimport argparse\nimport logging\nimport os\nimport time\nimport glob\nimport tensorflow as tf\nfrom model.utils import Params\nfrom model.utils import set_logger\nfrom model.utils import cal_train_size\nfrom model.reader import load_dataset_from_tfrecords\nfrom model.reader import input_fn\nfrom model.modeling import model_fn\nfrom model.training import evaluate_on_train\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--model_dir', default='experiments/base_model',\n help=\"Directory containing params.json\")\n# loss functions\n# cnn, boost, retrain_regu\nparser.add_argument('--loss_fn', default='cnn', help=\"model loss function\")\n# tf data folder for\n# mnist\nparser.add_argument('--data_dir', default='../data/sea',\n help=\"Directory containing the dataset\")\n# test.tfrecords\nparser.add_argument('--tfrecords_filename', default='.tfrecords',\n help=\"Dataset-filename for the tfrecords\")\n# usage: python main.py --restore_dir experiments/base_model/best_weights\nparser.add_argument('--restore_dir', default=None, # experimens/base_model/best_weights\n help=\"Optional, directory containing weights to reload\")\nparser.add_argument('--train_range', default='[0-4]',\n help=\"training tf range\")\n# using pretrained weights and gradient boosting on datasets A and B\n# params.num_learners > 1\n# parser.add_argument('--retrain', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']), \\\n# help=\"try on augmented test dataset\")\n# train on datasets A and B\n# parser.add_argument('--collect', default=True, type=lambda x: (str(x).lower() in ['true','1', 'yes']), \\\n# help=\"try on augmented test dataset\")\nparser.add_argument('--finetune', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']), \\\n help=\"try on augmented test dataset\")\nparser.add_argument('--use_kfac', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']), \\\n help=\"usek fac true gradient\")\nparser.add_argument('--log', default='',\n help=\"train log postfix\")\n\nif __name__ == '__main__':\n # Train the model \n tf.reset_default_graph()\n # Set the random seed for the whole graph for reproductible experiments\n tf.set_random_seed(230)\n # Load the parameters from the experiment params.json file in model_dir\n args = parser.parse_args()\n json_path = os.path.join(args.model_dir, 'params.json')\n assert os.path.isfile(json_path), \"No json configuration file found at {}\".format(json_path)\n # Set the logger\n set_logger(os.path.join(args.model_dir, 'collect{}.log'.format(args.log)))\n params = Params(json_path)\n params.dict['loss_fn'] = args.loss_fn\n params.dict['collect'] = True\n params.dict['finetune'] = args.finetune\n params.dict['use_kfac'] = args.use_kfac\n # Load the parameters from the dataset, that gives the size etc. into params\n json_path = os.path.join(args.data_dir, 'dataset_params.json')\n assert os.path.isfile(json_path), \"No json file found at {}, \\\n please generate tfrecords\".format(json_path)\n params.update(json_path)\n global_epoch = 0\n args.restore_dir = 'best_weights'\n path_train_tfrecords = os.path.join(args.data_dir, 'train-{}'.format(args.train_range) + args.tfrecords_filename)\n path_sample_train_tfrecords = os.path.join(args.data_dir, 'sample' + args.tfrecords_filename) \n training_files = glob.glob(path_train_tfrecords)\n if args.train_range == '[0-4]':\n params.dict['train_size'] = cal_train_size(params.train_size, args.train_range)\n else:\n params.dict['train_size'] = cal_train_size(params.train_size, args.train_range) *2 # sample.tfrecords is around the same size\n training_files.append(path_sample_train_tfrecords)\n # params.dict['train_size'] = cal_train_size(params.train_size, '[0-' + args.train_range + ']')\n # Create the input data pipeline\n logging.info(\"Creating the datasets...\")\n #########################################################\n params.dict['training_keep_prob'] = 1.0\n start_time = time.time()\n train_dataset = load_dataset_from_tfrecords(training_files)\n # Specify other parameters for the dataset and the model\n # Create the two iterators over the two datasets\n train_inputs = input_fn('vali', train_dataset, params)\n evaluate_on_train_model_spec = model_fn('vali', train_inputs, params, reuse=True)\n logging.info(\"- done.\")\n args.restore_dir = 'best_weights'\n global_epoch = evaluate_on_train(evaluate_on_train_model_spec,\n args.model_dir, params, restore_from=args.restore_dir,\\\n global_epoch=global_epoch)\n logging.info(\"global_epoch: {} epoch(s)\".format(global_epoch))\n logging.info(\"total time: %s seconds ---\" % (time.time() - start_time))\n", "import glob\nimport pandas as pd\nimport os\n\nlog_model = {'fisher': 'EWC', 'mas': 'MAS', 'mine3': 'New3', \\\n'mine': 'New', 'retrain': 'Fine-tune', 'regu': 'Fine-tune2', 'selfless': 'Selfless'}\ndef getPaths(folder, prefix):\n\ttxt_files = glob.glob(os.path.join(folder, '{}_*.log'.format(prefix)))\n\treturn txt_files\n\ndef get_data_model(file_path):\n\tfields = os.path.basename(file_path)\n\tdataset_model = fields.split('.')[0].split('_')\n\treturn dataset_model[0], dataset_model[1]\n\ndef get_accs(file_path):\n\twith open(file_path, 'r') as f:\n\t\taccs = []\n\t\tlines = f.readlines()\n\t\tfor line in lines:\n\t\t\tif '- Eval metrics:' in line:\n\t\t\t# if line.startswith('- Eval metrics:'):\n\t\t\t\t# print(line)\n\t\t\t\tfields = line.split(' ')\n\t\t\t\tacc = float(fields[-1])\n\t\t\t\taccs.append(acc)\n\t\tif (len(accs) != 5):\n\t\t\tprint('ERROR! in ', file_path)\n\t\treturn accs\n\ndef get_time(file_path):\n\twith open(file_path, 'r') as f:\n\t\taccs = []\n\t\tlines = f.readlines()\n\t\tfor line in lines:\n\t\t\tif 'total time:' in line:\n\t\t\t# if line.startswith('total time:'):\n\t\t\t\t# print(line)\n\t\t\t\tfields = line.split(' ')\n\t\t\t\tacc = float(fields[-3])\n\t\t\t\taccs.append(acc)\n\t\tif (len(accs) != 5):\n\t\t\tprint('ERROR! in {} with length: {}'.format(file_path, len(accs)))\n\t\treturn accs\n\ndef print_tables(folder, talbe_prefix, caption):\n\t# print('-----------Results for {}'.format(folder))\n\tprint(talbe_prefix)\n\ttxt_files = getPaths(folder, 'test')\n\ttxt_files.sort()\n\tlines = {}\n\tfor file_path in txt_files:\n\t\tdataset, model = get_data_model(file_path)\n\t\tif 'kfac' in model:\n\t\t\tcontinue\n\t\tif model == 'Arms':\n\t\t\tmodel = 'New'\n\t\t# model = log_model[model]\n\t\taccs = get_accs(file_path)\n\t\tlines[model] = accs\n\t\t# accs = [\"{:.4f}\".format(v) for v in accs]\n\t\t# acc_str = ' &'.join(accs)\n\t\t# print('{} &{}'.format(model, acc_str))\n\tdf = pd.DataFrame(lines)\n\t# print(df)\n\trun_model = df.idxmax(axis=1)\n\t# print(run_model)\n\tfor model in lines:\n\t\taccs = lines[model]\n\t\taccs = [\"{:.2f}\".format(v*100) for v in accs]\n\t\tfor i in range(len(accs)):\n\t\t\tif run_model[i] == model:\n\t\t\t\taccs[i] = '\\\\textbf{' + accs[i] + '}'\n\t\tacc_str = ' &'.join(accs)\n\t\tprint('{} &{}\\\\\\\\ \\\\hline'.format(model, acc_str))\n\ttabel_suffix = generate_tabel_suffix(caption, 'accuracy (\\\\%)')\n\tprint(tabel_suffix)\n\tprint()\n\t# print(talbe_prefix)\n\t# txt_files = getPaths(folder, 'train')\n\t# txt_files.sort()\n\t# lines = {}\n\t# for file_path in txt_files:\n\t# \tdataset, model = get_data_model(file_path)\n\t# \tif 'kfac' in model:\n\t# \t\tcontinue\t\t\n\t# \tif model == 'Arms':\n\t# \t\tmodel = 'New'\t\t\n\t# \t# model = log_model[model]\n\t# \taccs = get_time(file_path)\n\t# \tlines[model] = accs\n\t# \t# accs = [\"{:.2f}\".format(v) for v in accs]\n\t# \t# acc_str = ' &'.join(accs)\n\t# \t# print('{} &{}'.format(model, acc_str))\n\t# df = pd.DataFrame(lines)\n\t# # print(df)\n\t# run_model = df.idxmin(axis=1)\n\t# # print(run_model)\n\t# for model in lines:\n\t# \taccs = lines[model]\n\t# \taccs = [\"{:.2f}\".format(v) for v in accs]\n\t# \tfor i in range(len(accs)):\n\t# \t\tif run_model[i] == model:\n\t# \t\t\taccs[i] = '\\\\textbf{' + accs[i] + '}'\n\t# \tacc_str = ' &'.join(accs)\n\t# \tprint('{} &{}\\\\\\\\ \\\\hline'.format(model, acc_str))\n\t# tabel_suffix = generate_tabel_suffix(caption, 'time (s)')\n\t# print(tabel_suffix)\n\ntalbe_prefix = '\\\\begin{table}[t]\\n'+\\\n'\\\\centering\\n'+\\\n'\\\\begin{tabular}{|l|r|r|r|r|r|}\\n'+\\\n'& R 1 & R 2 & R 3 & R 4 & R 5 \\\\\\\\ \\\\hline\\n' \n\ndef generate_tabel_suffix(caption, r_type):\n\tcaption = r_type + ' for ' + caption\n\ttalbe_suffix = '\\\\end{tabular}\\n'+\\\n\t'\\\\caption{'+caption+'}\\n'+\\\n\t'\\\\label{'+caption+'}\\n'+\\\n\t'\\\\end{table}\\n'\n\treturn talbe_suffix\n\n\n# datasets = ['imdb', 'reuters']\nprefix = 'result_'\ndatasets = ['reuters']\nfolders = ['Greedy', 'EI', 'Reservoir']\n\nfor dataset in datasets:\n\tprint('\\\\subsection{' + dataset.upper() +' Results}')\n\tprint()\n\tfor folder in folders:\n\t\t# print('-------------------------')\n\t\tcaption = '{} with the {} setting'.format(dataset, folder)\n\t\tfolder = '{}{}_{}'.format(prefix, dataset, folder)\n\t\t# print(folder)\n\t\t# folder = '{}{}_{}'.format(prefix, folder, dataset)\n\t\tprint_tables(folder,talbe_prefix,caption)\n\t\tprint()\n\t\t# print('-------------------------')\n", "\"\"\"Define the model.\"\"\"\nimport sys, random, logging\nimport tensorflow as tf\nimport numpy as np\nfrom util import loss_fns, search_metrics\nfrom tensorflow.python.ops import array_ops\n\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom matplotlib import pyplot\nimport os\nimport math\nimport functools\nimport time\nimport kfac\n\ndef lenet_mine3(X, is_training, params=None, var_scope='cnn', trainable=True):\n l2_loss = tf.constant(0.0, dtype=tf.float32)\n neurons = []\n weights = []\n old_neurons = []\n gradients_n = []\n gradients_w = []\n # CONVOLUTION 1 - 1\n with tf.name_scope('conv1_1'):\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n filter1_1 = tf.get_variable('weights1_1', shape=[5, 5, int(params.depth), 32], \\\n trainable=trainable)\n biases1_1 = tf.get_variable('biases1_1', shape=[32], \\\n trainable=trainable)\n # filter1_1 = tf.stop_gradient(filter1_1)\n # biases1_1 = tf.stop_gradient(biases1_1)\n stride = [1,1,1,1]\n conv1_1 = tf.nn.conv2d(X, filter1_1, stride, padding='SAME')\n out1_1 = tf.nn.bias_add(conv1_1, biases1_1)\n conv1_1 = tf.nn.relu(out1_1)\n old_neurons.append(conv1_1)\n with tf.variable_scope('mask', reuse=tf.AUTO_REUSE):\n mask_filter1_1 = tf.get_variable('weights1_1', shape=[5, 5, int(params.depth), 32], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1))\n filter1_1 = tf.multiply(mask_filter1_1, filter1_1)\n mask_biases1_1 = tf.get_variable('biases1_1', shape=[32], \\\n initializer=tf.zeros_initializer())\n biases1_1 = tf.multiply(mask_biases1_1, biases1_1)\n # filter1_1 = tf.nn.tanh(filter1_1)\n # filter1_1 = tf.nn.relu(filter1_1)\n stride = [1,1,1,1]\n conv1_1 = tf.nn.conv2d(X, filter1_1, stride, padding='SAME')\n out1_1 = tf.nn.bias_add(conv1_1, biases1_1)\n conv1_1 = tf.nn.relu(out1_1)\n neurons.append(conv1_1)\n l2_loss += tf.nn.l2_loss(mask_filter1_1-1)\n l2_loss += tf.nn.l2_loss(mask_biases1_1)\n # with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n # out1_1 = tf.layers.batch_normalization(out1_1, training=is_training, name='bn_conv1_1')\n \n weights.extend([filter1_1, biases1_1])\n # POOL 1\n with tf.name_scope('pool1'):\n pool1_1 = tf.nn.max_pool(conv1_1,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n name='pool1_1')\n pool1_1_drop = tf.nn.dropout(pool1_1, params.training_keep_prob)\n # CONVOLUTION 1 - 2\n with tf.name_scope('conv1_2'):\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n filter1_2 = tf.get_variable('weights1_2', shape=[5, 5, 32, 64], \\\n trainable=trainable)\n biases1_2 = tf.get_variable('biases1_2', shape=[64], \\\n trainable=trainable)\n # filter1_2 = tf.stop_gradient(filter1_2)\n # biases1_2 = tf.stop_gradient(biases1_2)\n conv1_2 = tf.nn.conv2d(pool1_1_drop, filter1_2, [1,1,1,1], padding='SAME')\n out1_2 = tf.nn.bias_add(conv1_2, biases1_2)\n conv1_2 = tf.nn.relu(out1_2) \n old_neurons.append(conv1_2)\n with tf.variable_scope('mask', reuse=tf.AUTO_REUSE):\n mask_filter1_2 = tf.get_variable('weights1_2', shape=[5, 5, 32, 64], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1))\n filter1_2 = tf.multiply(mask_filter1_2, filter1_2)\n mask_biases1_2 = tf.get_variable('biases1_2', shape=[64], \\\n initializer=tf.zeros_initializer())\n biases1_2 = tf.multiply(mask_biases1_2, biases1_2) \n # filter1_2 = tf.nn.tanh(filter1_2)\n # filter1_2 = tf.nn.relu(filter1_2)\n conv1_2 = tf.nn.conv2d(pool1_1_drop, filter1_2, [1,1,1,1], padding='SAME')\n out1_2 = tf.nn.bias_add(conv1_2, biases1_2)\n conv1_2 = tf.nn.relu(out1_2)\n neurons.append(conv1_2)\n l2_loss += tf.nn.l2_loss(mask_filter1_2-1)\n l2_loss += tf.nn.l2_loss(mask_biases1_2)\n weights.extend([filter1_2, biases1_2])\n # with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n # out1_2 = tf.layers.batch_normalization(out1_2, training=is_training, name='bn_conv1_2')\n \n # POOL 2\n with tf.name_scope('pool2'):\n pool2_1 = tf.nn.max_pool(conv1_2,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n name='pool2_1')\n pool2_1_drop = tf.nn.dropout(pool2_1, params.training_keep_prob)\n #FULLY CONNECTED 1\n with tf.name_scope('fc1') as scope:\n pool2_flat = tf.layers.Flatten()(pool2_1_drop)\n dim = pool2_flat.get_shape()[1].value\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n fc1w = tf.get_variable('weights3_1', shape=[dim, 1024], \\\n trainable=trainable)\n fc1b = tf.get_variable('biases3_1', shape=[1024], \\\n trainable=trainable)\n # fc1w = tf.stop_gradient(fc1w)\n # fc1b = tf.stop_gradient(fc1b)\n out = tf.nn.bias_add(tf.matmul(pool2_flat, fc1w), fc1b)\n fc1 = tf.nn.relu(out)\n old_neurons.append(fc1) \n with tf.variable_scope('mask', reuse=tf.AUTO_REUSE):\n mask_fc1w = tf.get_variable('weights3_1', shape=[dim, 1024], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1))\n fc1w = tf.multiply(mask_fc1w, fc1w)\n mask_fc1b = tf.get_variable('biases3_1', shape=[1024], \\\n initializer=tf.zeros_initializer())\n fc1b = tf.multiply(mask_fc1b, fc1b)\n # fc1w = tf.nn.tanh(fc1w)\n # fc1w = tf.nn.relu(fc1w)\n out = tf.nn.bias_add(tf.matmul(pool2_flat, fc1w), fc1b)\n fc1 = tf.nn.relu(out)\n neurons.append(fc1)\n # with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n # out = tf.layers.batch_normalization(out, training=is_training, name='bn_fc1w')\n l2_loss += tf.nn.l2_loss(mask_fc1w-1)\n l2_loss += tf.nn.l2_loss(mask_fc1b)\n \n fc1_drop = tf.nn.dropout(fc1, params.training_keep_prob)\n weights.extend([fc1w, fc1b])\n #FULLY CONNECTED 2\n with tf.name_scope('fc2') as scope:\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n fc2w = tf.get_variable('weights3_2', shape=[1024, params.num_classes], \\\n trainable=trainable)\n fc2b = tf.get_variable('biases3_2', shape=[params.num_classes], \\\n trainable=trainable)\n # fc2w = tf.stop_gradient(fc2w)\n # fc2b = tf.stop_gradient(fc2b)\n Ylogits = tf.nn.bias_add(tf.matmul(fc1_drop, fc2w), fc2b)\n old_neurons.append(Ylogits) \n with tf.variable_scope('mask', reuse=tf.AUTO_REUSE):\n mask_fc2w = tf.get_variable('weights3_2', shape=[1024, params.num_classes], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1))\n fc2w = tf.multiply(mask_fc2w, fc2w)\n mask_fc2b = tf.get_variable('biases3_2', shape=[params.num_classes], \\\n initializer=tf.zeros_initializer())\n fc2b = tf.multiply(mask_fc2b, fc2b)\n # fc2w = tf.nn.relu(fc2w)\n Ylogits = tf.nn.bias_add(tf.matmul(fc1_drop, fc2w), fc2b)\n neurons.append(Ylogits) \n l2_loss += tf.nn.l2_loss(mask_fc2w-1)\n l2_loss += tf.nn.l2_loss(mask_fc2b)\n weights.extend([fc2w, fc2b])\n l2_Ylogits = tf.nn.l2_loss(Ylogits)\n gradients_w = []\n for w in weights:\n gradients_w.append(tf.math.abs(tf.gradients(l2_Ylogits, w)))\n for n in neurons:\n gradients_n.append(tf.math.abs(tf.gradients(l2_Ylogits, n))) \n return Ylogits, (neurons, old_neurons), (gradients_n, gradients_w)\n\ndef collect_lenet(inputs, is_training, params=None, var_scope='cnn', trainable=True):\n X = inputs['features']\n labels = inputs['labels']\n # trainable = var_scope == 'cnn'\n # CONVOLUTION 1 - 1\n with tf.name_scope('conv1_1'):\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n filter1_1 = tf.get_variable('weights1_1', shape=[5, 5, int(params.depth), 32], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1), )\n biases1_1 = tf.get_variable('biases1_1', shape=[32], \\\n initializer=tf.constant_initializer(0.0), ) \n with tf.variable_scope('mask', reuse=tf.AUTO_REUSE):\n mask_filter1_1 = tf.get_variable('mweights1_1', shape=[5, 5, int(params.depth), 32], \\\n initializer=tf.zeros_initializer(), )\n mask_biases1_1 = tf.get_variable('mbias_1', shape=[32], \\\n initializer=tf.zeros_initializer(), )\n stride = [1,1,1,1]\n conv1_1 = tf.nn.conv2d(X, filter1_1, stride, padding='SAME')\n out1_1 = tf.nn.bias_add(conv1_1, biases1_1)\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n # out1_1 = tf.layers.batch_normalization(out1_1, training=is_training, name='bn_conv1_1')\n conv1_1 = tf.nn.relu(out1_1)\n # POOL 1\n with tf.name_scope('pool1'):\n pool1_1 = tf.nn.max_pool(conv1_1,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n name='pool1_1')\n pool1_1_drop = tf.nn.dropout(pool1_1, params.training_keep_prob)\n # CONVOLUTION 1 - 2\n with tf.name_scope('conv1_2'):\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n filter1_2 = tf.get_variable('weights1_2', shape=[5, 5, 32, 64], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1), \\\n )\n biases1_2 = tf.get_variable('biases1_2', shape=[64], \\\n initializer=tf.constant_initializer(0.0), ) \n with tf.variable_scope('mask', reuse=tf.AUTO_REUSE):\n mask_filter1_2 = tf.get_variable('mweights1_2', shape=[5, 5, 32, 64], \\\n initializer=tf.zeros_initializer(), )\n mask_biases1_2 = tf.get_variable('mbiases1_2', shape=[64], \\\n initializer=tf.zeros_initializer(), ) \n conv1_2 = tf.nn.conv2d(pool1_1_drop, filter1_2, [1,1,1,1], padding='SAME')\n out1_2 = tf.nn.bias_add(conv1_2, biases1_2)\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n # out1_2 = tf.layers.batch_normalization(out1_2, training=is_training, name='bn_conv1_2')\n conv1_2 = tf.nn.relu(out1_2)\n # POOL 2\n with tf.name_scope('pool2'):\n pool2_1 = tf.nn.max_pool(conv1_2,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n name='pool2_1')\n pool2_1_drop = tf.nn.dropout(pool2_1, params.training_keep_prob)\n #FULLY CONNECTED 1\n with tf.name_scope('fc1') as scope:\n pool2_flat = tf.layers.Flatten()(pool2_1_drop)\n dim = pool2_flat.get_shape()[1].value\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n fc1w = tf.get_variable('weights3_1', shape=[dim, 1024], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1), \\\n )\n fc1b = tf.get_variable('biases3_1', shape=[1024], \\\n initializer=tf.constant_initializer(0.0), ) \n with tf.variable_scope('mask', reuse=tf.AUTO_REUSE):\n mask_fc1w = tf.get_variable('mweights3_1', shape=[dim, 1024], \\\n initializer=tf.zeros_initializer(), ) \n mask_fc1b = tf.get_variable('mbiases3_1', shape=[1024], \\\n initializer=tf.zeros_initializer(), ) \n # fc1w = tf.multiply(mask_fc1w, fc1w)\n out = tf.nn.bias_add(tf.matmul(pool2_flat, fc1w), fc1b)\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n # out = tf.layers.batch_normalization(out, training=is_training, name='bn_fc1w')\n fc1 = tf.nn.relu(out)\n fc1_drop = tf.nn.dropout(fc1, params.training_keep_prob)\n #FULLY CONNECTED 2\n with tf.name_scope('fc2') as scope:\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n fc2w = tf.get_variable('weights3_2', shape=[1024, params.num_classes], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1), \\\n )\n fc2b = tf.get_variable('biases3_2', shape=[params.num_classes], \\\n initializer=tf.constant_initializer(0.0), \\\n ) \n with tf.variable_scope('mask', reuse=tf.AUTO_REUSE):\n mask_fc2w = tf.get_variable('mweights3_2', shape=[1024, params.num_classes], \\\n initializer=tf.zeros_initializer(), \\\n )\n mask_fc2b = tf.get_variable('mbiases3_2', shape=[params.num_classes], \\\n initializer=tf.zeros_initializer(), \\\n )\n Ylogits = tf.nn.bias_add(tf.matmul(fc1_drop, fc2w), fc2b)\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=labels,\n logits=Ylogits)\n loss = tf.reduce_mean(cross_entropy)\n loss = tf.Print(loss, [loss], 'loss in evaluate on train~~\\n')\n mask_fc2w_op = tf.assign(mask_fc2w, mask_fc2w + cal_gradient(loss, fc2w))\n mask_fc2b_op = tf.assign(mask_fc2b, mask_fc2b + cal_gradient(loss, fc2b))\n mask_fc1w_op = tf.assign(mask_fc1w, mask_fc1w + cal_gradient(loss, fc1w))\n mask_fc1b_op = tf.assign(mask_fc1b, mask_fc1b + cal_gradient(loss, fc1b))\n mask_filter1_2_op = tf.assign(mask_filter1_2, mask_filter1_2 + cal_gradient(loss, filter1_2))\n mask_biases1_2_op = tf.assign(mask_biases1_2, mask_biases1_2 + cal_gradient(loss, biases1_2))\n mask_filter1_1_op = tf.assign(mask_filter1_1, mask_filter1_1 + cal_gradient(loss, filter1_1))\n mask_biases1_1_op = tf.assign(mask_biases1_1, mask_biases1_1 + cal_gradient(loss, biases1_1))\n masks_ops = []\n masks_ops.append(mask_fc2w_op)\n masks_ops.append(mask_fc2b_op)\n masks_ops.append(mask_fc1w_op)\n masks_ops.append(mask_fc1b_op)\n masks_ops.append(mask_filter1_2_op)\n masks_ops.append(mask_biases1_2_op)\n masks_ops.append(mask_filter1_1_op)\n masks_ops.append(mask_biases1_1_op)\n return Ylogits, masks_ops\n\ndef collect_lenet_bn(inputs, is_training, params=None, var_scope='cnn', trainable=False):\n X = inputs['features']\n labels = inputs['labels']\n # trainable = var_scope == 'cnn'\n # CONVOLUTION 1 - 1\n with tf.name_scope('conv1_1'):\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n filter1_1 = tf.get_variable('weights1_1', shape=[5, 5, int(params.depth), 32], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1), )\n biases1_1 = tf.get_variable('biases1_1', shape=[32], \\\n initializer=tf.constant_initializer(0.0), ) \n with tf.variable_scope('mask', reuse=tf.AUTO_REUSE):\n mask_filter1_1 = tf.get_variable('mweights1_1', shape=[5, 5, int(params.depth), 32], \\\n initializer=tf.zeros_initializer(), )\n mask_biases1_1 = tf.get_variable('mbias_1', shape=[32], \\\n initializer=tf.zeros_initializer(), ) \n stride = [1,1,1,1]\n conv1_1 = tf.nn.conv2d(X, filter1_1, stride, padding='SAME')\n out1_1 = tf.nn.bias_add(conv1_1, biases1_1)\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n out1_1 = tf.layers.batch_normalization(out1_1, training=is_training, name='bn_conv1_1')\n conv1_1 = tf.nn.relu(out1_1)\n # POOL 1\n with tf.name_scope('pool1'):\n pool1_1 = tf.nn.max_pool(conv1_1,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n name='pool1_1')\n pool1_1_drop = tf.nn.dropout(pool1_1, params.training_keep_prob)\n # CONVOLUTION 1 - 2\n with tf.name_scope('conv1_2'):\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n filter1_2 = tf.get_variable('weights1_2', shape=[5, 5, 32, 64], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1), \\\n )\n biases1_2 = tf.get_variable('biases1_2', shape=[64], \\\n initializer=tf.constant_initializer(0.0), ) \n with tf.variable_scope('mask', reuse=tf.AUTO_REUSE):\n mask_filter1_2 = tf.get_variable('mweights1_2', shape=[5, 5, 32, 64], \\\n initializer=tf.zeros_initializer(), )\n mask_biases1_2 = tf.get_variable('mbiases1_2', shape=[64], \\\n initializer=tf.zeros_initializer(), ) \n conv1_2 = tf.nn.conv2d(pool1_1_drop, filter1_2, [1,1,1,1], padding='SAME')\n out1_2 = tf.nn.bias_add(conv1_2, biases1_2)\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n out1_2 = tf.layers.batch_normalization(out1_2, training=is_training, name='bn_conv1_2')\n conv1_2 = tf.nn.relu(out1_2)\n # POOL 2\n with tf.name_scope('pool2'):\n pool2_1 = tf.nn.max_pool(conv1_2,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n name='pool2_1')\n pool2_1_drop = tf.nn.dropout(pool2_1, params.training_keep_prob)\n #FULLY CONNECTED 1\n with tf.name_scope('fc1') as scope:\n pool2_flat = tf.layers.Flatten()(pool2_1_drop)\n dim = pool2_flat.get_shape()[1].value\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n fc1w = tf.get_variable('weights3_1', shape=[dim, 1024], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1), \\\n )\n fc1b = tf.get_variable('biases3_1', shape=[1024], \\\n initializer=tf.constant_initializer(0.0), ) \n with tf.variable_scope('mask', reuse=tf.AUTO_REUSE):\n mask_fc1w = tf.get_variable('mweights3_1', shape=[dim, 1024], \\\n initializer=tf.zeros_initializer(), ) \n mask_fc1b = tf.get_variable('mbiases3_1', shape=[1024], \\\n initializer=tf.zeros_initializer(), )\n out = tf.nn.bias_add(tf.matmul(pool2_flat, fc1w), fc1b)\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n out = tf.layers.batch_normalization(out, training=is_training, name='bn_fc1w')\n fc1 = tf.nn.relu(out)\n fc1_drop = tf.nn.dropout(fc1, params.training_keep_prob)\n #FULLY CONNECTED 2\n with tf.name_scope('fc2') as scope:\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n fc2w = tf.get_variable('weights3_2', shape=[1024, params.num_classes], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1), \\\n )\n fc2b = tf.get_variable('biases3_2', shape=[params.num_classes], \\\n initializer=tf.constant_initializer(0.0), \\\n ) \n with tf.variable_scope('mask', reuse=tf.AUTO_REUSE):\n mask_fc2w = tf.get_variable('mweights3_2', shape=[1024, params.num_classes], \\\n initializer=tf.zeros_initializer(), \\\n )\n mask_fc2b = tf.get_variable('mbiases3_2', shape=[params.num_classes], \\\n initializer=tf.zeros_initializer(), \\\n )\n Ylogits = tf.nn.bias_add(tf.matmul(fc1_drop, fc2w), fc2b)\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=labels,\n logits=Ylogits)\n loss = tf.reduce_mean(cross_entropy)\n loss = tf.Print(loss, [loss], 'loss in evaluate on train~~\\n')\n mask_fc2w_op = tf.assign(mask_fc2w, mask_fc2w + cal_gradient(loss, fc2w))\n mask_fc2w_op = tf.assign(mask_fc2w, mask_fc2w + cal_gradient(loss, fc2w))\n mask_fc2b_op = tf.assign(mask_fc2b, mask_fc2b + cal_gradient(loss, fc2b))\n mask_fc1w_op = tf.assign(mask_fc1w, mask_fc1w + cal_gradient(loss, fc1w))\n mask_fc1b_op = tf.assign(mask_fc1b, mask_fc1b + cal_gradient(loss, fc1b))\n mask_filter1_2_op = tf.assign(mask_filter1_2, mask_filter1_2 + cal_gradient(loss, filter1_2))\n mask_biases1_2_op = tf.assign(mask_biases1_2, mask_biases1_2 + cal_gradient(loss, biases1_2))\n mask_filter1_1_op = tf.assign(mask_filter1_1, mask_filter1_1 + cal_gradient(loss, filter1_1))\n mask_biases1_1_op = tf.assign(mask_biases1_1, mask_biases1_1 + cal_gradient(loss, biases1_1))\n masks_ops = []\n masks_ops.append(mask_fc2w_op)\n masks_ops.append(mask_fc2b_op)\n masks_ops.append(mask_fc1w_op)\n masks_ops.append(mask_fc1b_op)\n masks_ops.append(mask_filter1_2_op)\n masks_ops.append(mask_biases1_2_op)\n masks_ops.append(mask_filter1_1_op)\n masks_ops.append(mask_biases1_1_op) \n return Ylogits, masks_ops\n\ndef cal_gradient(loss, weight):\n gradient = tf.math.square(tf.gradients(loss, weight))\n return tf.reshape(gradient, tf.shape(weight))\n\ndef retrain_lenet(inputs, is_training, params=None, var_scope='cnn'):\n X = inputs['features']\n labels = inputs['labels'] \n trainable = var_scope=='cnn'\n neurons = []\n weights = []\n gradients_n = []\n gradients_w = []\n # CONVOLUTION 1 - 1\n with tf.name_scope('conv1_1'):\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n filter1_1 = tf.get_variable('weights1_1', shape=[5, 5, int(params.depth), 32], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1), )\n biases1_1 = tf.get_variable('biases1_1', shape=[32], \\\n initializer=tf.constant_initializer(0.0), ) \n with tf.variable_scope('mask', reuse=tf.AUTO_REUSE):\n mask_filter1_1 = tf.get_variable('mweights1_1', shape=[5, 5, int(params.depth), 32], \\\n initializer=tf.zeros_initializer(), trainable=False)\n mask_biases1_1 = tf.get_variable('mbias_1', shape=[32], \\\n initializer=tf.zeros_initializer(), trainable=False)\n \n # filter1_1 = tf.multiply(mask_filter1_1, filter1_1)\n stride = [1,1,1,1]\n conv1_1 = tf.nn.conv2d(X, filter1_1, stride, padding='SAME')\n out1_1 = tf.nn.bias_add(conv1_1, biases1_1)\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n # out1_1 = tf.layers.batch_normalization(out1_1, training=is_training, name='bn_conv1_1')\n conv1_1 = tf.nn.relu(out1_1)\n weights.extend([filter1_1, biases1_1])\n neurons.append(conv1_1)\n gradients_w.extend([mask_filter1_1, mask_biases1_1])\n # POOL 1\n with tf.name_scope('pool1'):\n pool1_1 = tf.nn.max_pool(conv1_1,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n name='pool1_1')\n pool1_1_drop = tf.nn.dropout(pool1_1, params.training_keep_prob)\n # CONVOLUTION 1 - 2\n with tf.name_scope('conv1_2'):\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n filter1_2 = tf.get_variable('weights1_2', shape=[5, 5, 32, 64], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1), \\\n )\n biases1_2 = tf.get_variable('biases1_2', shape=[64], \\\n initializer=tf.constant_initializer(0.0), ) \n with tf.variable_scope('mask', reuse=tf.AUTO_REUSE):\n mask_filter1_2 = tf.get_variable('mweights1_2', shape=[5, 5, 32, 64], \\\n initializer=tf.zeros_initializer(), trainable=False)\n mask_biases1_2 = tf.get_variable('mbiases1_2', shape=[64], \\\n initializer=tf.zeros_initializer(), trainable=False) \n # filter1_2 = tf.multiply(mask_filter1_2, filter1_2) \n conv1_2 = tf.nn.conv2d(pool1_1_drop, filter1_2, [1,1,1,1], padding='SAME')\n out1_2 = tf.nn.bias_add(conv1_2, biases1_2)\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n # out1_2 = tf.layers.batch_normalization(out1_2, training=is_training, name='bn_conv1_2')\n conv1_2 = tf.nn.relu(out1_2)\n weights.extend([filter1_2, biases1_2])\n neurons.append(conv1_2)\n gradients_w.extend([mask_filter1_2, mask_biases1_2]) \n # POOL 2\n with tf.name_scope('pool2'):\n pool2_1 = tf.nn.max_pool(conv1_2,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n name='pool2_1')\n pool2_1_drop = tf.nn.dropout(pool2_1, params.training_keep_prob)\n #FULLY CONNECTED 1\n with tf.name_scope('fc1') as scope:\n pool2_flat = tf.layers.Flatten()(pool2_1_drop)\n dim = pool2_flat.get_shape()[1].value\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n fc1w = tf.get_variable('weights3_1', shape=[dim, 1024], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1), \\\n )\n fc1b = tf.get_variable('biases3_1', shape=[1024], \\\n initializer=tf.constant_initializer(0.0), ) \n with tf.variable_scope('mask', reuse=tf.AUTO_REUSE):\n mask_fc1w = tf.get_variable('mweights3_1', shape=[dim, 1024], \\\n initializer=tf.zeros_initializer(), trainable=False) \n mask_fc1b = tf.get_variable('mbiases3_1', shape=[1024], \\\n initializer=tf.zeros_initializer(), trainable=False) \n # fc1w = tf.multiply(mask_fc1w, fc1w)\n out = tf.nn.bias_add(tf.matmul(pool2_flat, fc1w), fc1b)\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n # out = tf.layers.batch_normalization(out, training=is_training, name='bn_fc1w')\n fc1 = tf.nn.relu(out)\n fc1_drop = tf.nn.dropout(fc1, params.training_keep_prob)\n weights.extend([fc1w, fc1b])\n neurons.append(fc1)\n gradients_w.extend([mask_fc1w, mask_fc1b])\n #FULLY CONNECTED 2\n with tf.name_scope('fc2') as scope:\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n fc2w = tf.get_variable('weights3_2', shape=[1024, params.num_classes], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1), \\\n )\n fc2b = tf.get_variable('biases3_2', shape=[params.num_classes], \\\n initializer=tf.constant_initializer(0.0), \\\n ) \n with tf.variable_scope('mask', reuse=tf.AUTO_REUSE):\n mask_fc2w = tf.get_variable('mweights3_2', shape=[1024, params.num_classes], \\\n initializer=tf.zeros_initializer(), \\\n trainable=False)\n mask_fc2b = tf.get_variable('mbiases3_2', shape=[params.num_classes], \\\n initializer=tf.zeros_initializer(), \\\n trainable=False) \n # fc2w = tf.multiply(mask_fc2w, fc2w)\n Ylogits = tf.nn.bias_add(tf.matmul(fc1_drop, fc2w), fc2b)\n weights.extend([fc2w, fc2b])\n neurons.append(Ylogits)\n gradients_w.extend([mask_fc2w, mask_fc2b])\n\n if 'fisher' in params.loss_fn:\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=labels,\n logits=Ylogits)\n loss = tf.reduce_mean(cross_entropy)\n for n in neurons:\n gradients_n.append(tf.math.square(tf.gradients(loss, n)))\n else:# for retrain_regu_mas and retrain_regu_mine\n l2_Ylogits = tf.nn.l2_loss(Ylogits)\n gradients_w = []\n for w in weights:\n gradients_w.append(tf.math.abs(tf.gradients(l2_Ylogits, w)))\n for n in neurons:\n gradients_n.append(tf.math.abs(tf.gradients(l2_Ylogits, n)))\n return Ylogits, (neurons, weights), (gradients_n, gradients_w)\n\ndef retrain_lenet_bn(inputs, is_training, params=None, var_scope='cnn'):\n X = inputs['features']\n labels = inputs['labels']\n trainable = var_scope=='cnn'\n neurons = []\n weights = []\n gradients_n = []\n gradients_w = []\n # CONVOLUTION 1 - 1\n with tf.name_scope('conv1_1'):\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n filter1_1 = tf.get_variable('weights1_1', shape=[5, 5, int(params.depth), 32], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1), )\n biases1_1 = tf.get_variable('biases1_1', shape=[32], \\\n initializer=tf.constant_initializer(0.0), ) \n with tf.variable_scope('mask', reuse=tf.AUTO_REUSE):\n mask_filter1_1 = tf.get_variable('mweights1_1', shape=[5, 5, int(params.depth), 32], \\\n initializer=tf.zeros_initializer(), trainable=False)\n mask_biases1_1 = tf.get_variable('mbias_1', shape=[32], \\\n initializer=tf.zeros_initializer(), trainable=False)\n # filter1_1 = tf.multiply(mask_filter1_1, filter1_1)\n stride = [1,1,1,1]\n conv1_1 = tf.nn.conv2d(X, filter1_1, stride, padding='SAME')\n out1_1 = tf.nn.bias_add(conv1_1, biases1_1)\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n out1_1 = tf.layers.batch_normalization(out1_1, training=is_training, name='bn_conv1_1')\n conv1_1 = tf.nn.relu(out1_1)\n weights.extend([filter1_1, biases1_1])\n neurons.append(conv1_1)\n gradients_w.extend([mask_filter1_1, mask_biases1_1])\n # POOL 1\n with tf.name_scope('pool1'):\n pool1_1 = tf.nn.max_pool(conv1_1,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n name='pool1_1')\n pool1_1_drop = tf.nn.dropout(pool1_1, params.training_keep_prob)\n # CONVOLUTION 1 - 2\n with tf.name_scope('conv1_2'):\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n filter1_2 = tf.get_variable('weights1_2', shape=[5, 5, 32, 64], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1), \\\n )\n biases1_2 = tf.get_variable('biases1_2', shape=[64], \\\n initializer=tf.constant_initializer(0.0), ) \n with tf.variable_scope('mask', reuse=tf.AUTO_REUSE):\n mask_filter1_2 = tf.get_variable('mweights1_2', shape=[5, 5, 32, 64], \\\n initializer=tf.zeros_initializer(), trainable=False)\n mask_biases1_2 = tf.get_variable('mbiases1_2', shape=[64], \\\n initializer=tf.zeros_initializer(), trainable=False) \n # filter1_2 = tf.multiply(mask_filter1_2, filter1_2) \n conv1_2 = tf.nn.conv2d(pool1_1_drop, filter1_2, [1,1,1,1], padding='SAME')\n out1_2 = tf.nn.bias_add(conv1_2, biases1_2)\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n out1_2 = tf.layers.batch_normalization(out1_2, training=is_training, name='bn_conv1_2')\n conv1_2 = tf.nn.relu(out1_2)\n weights.extend([filter1_2, biases1_2])\n neurons.append(conv1_2)\n gradients_w.extend([mask_filter1_2, mask_biases1_2]) \n # POOL 2\n with tf.name_scope('pool2'):\n pool2_1 = tf.nn.max_pool(conv1_2,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n name='pool2_1')\n pool2_1_drop = tf.nn.dropout(pool2_1, params.training_keep_prob)\n #FULLY CONNECTED 1\n with tf.name_scope('fc1') as scope:\n pool2_flat = tf.layers.Flatten()(pool2_1_drop)\n dim = pool2_flat.get_shape()[1].value\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n fc1w = tf.get_variable('weights3_1', shape=[dim, 1024], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1), \\\n )\n fc1b = tf.get_variable('biases3_1', shape=[1024], \\\n initializer=tf.constant_initializer(0.0), ) \n with tf.variable_scope('mask', reuse=tf.AUTO_REUSE):\n mask_fc1w = tf.get_variable('mweights3_1', shape=[dim, 1024], \\\n initializer=tf.zeros_initializer(), trainable=False) \n mask_fc1b = tf.get_variable('mbiases3_1', shape=[1024], \\\n initializer=tf.zeros_initializer(), trainable=False) \n # fc1w = tf.multiply(mask_fc1w, fc1w)\n out = tf.nn.bias_add(tf.matmul(pool2_flat, fc1w), fc1b)\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n out = tf.layers.batch_normalization(out, training=is_training, name='bn_fc1w')\n fc1 = tf.nn.relu(out)\n fc1_drop = tf.nn.dropout(fc1, params.training_keep_prob)\n weights.extend([fc1w, fc1b])\n neurons.append(fc1)\n gradients_w.extend([mask_fc1w, mask_fc1b]) \n #FULLY CONNECTED 2\n with tf.name_scope('fc2') as scope:\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n fc2w = tf.get_variable('weights3_2', shape=[1024, params.num_classes], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1), \\\n )\n fc2b = tf.get_variable('biases3_2', shape=[params.num_classes], \\\n initializer=tf.constant_initializer(0.0), \\\n ) \n with tf.variable_scope('mask', reuse=tf.AUTO_REUSE):\n mask_fc2w = tf.get_variable('mweights3_2', shape=[1024, params.num_classes], \\\n initializer=tf.zeros_initializer(), \\\n trainable=False)\n mask_fc2b = tf.get_variable('mbiases3_2', shape=[params.num_classes], \\\n initializer=tf.zeros_initializer(), \\\n trainable=False) \n # fc2w = tf.multiply(mask_fc2w, fc2w)\n Ylogits = tf.nn.bias_add(tf.matmul(fc1_drop, fc2w), fc2b)\n weights.extend([fc2w, fc2b])\n neurons.append(Ylogits)\n gradients_w.extend([mask_fc2w, mask_fc2b])\n\n if 'fisher' in params.loss_fn:\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=labels,\n logits=Ylogits)\n loss = tf.reduce_mean(cross_entropy)\n for n in neurons:\n gradients_n.append(tf.math.square(tf.gradients(loss, n)))\n else:# for retrain_regu_mas and retrain_regu_mine\n l2_Ylogits = tf.nn.l2_loss(Ylogits)\n gradients_w = []\n for w in weights:\n gradients_w.append(tf.math.abs(tf.gradients(l2_Ylogits, w)))\n for n in neurons:\n gradients_n.append(tf.math.abs(tf.gradients(l2_Ylogits, n)))\n return Ylogits, (neurons, weights), (gradients_n, gradients_w)\n\ndef lenet(X, params=None, var_scope='cnn'):\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n # CONVOLUTION 1 - 1\n with tf.name_scope('conv1_1'):\n filter1_1 = tf.get_variable('weights1_1', shape=[5, 5, int(params.depth), 32], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1))\n stride = [1,1,1,1]\n conv = tf.nn.conv2d(X, filter1_1, stride, padding='SAME')\n biases = tf.get_variable('biases1_1', shape=[32], \\\n initializer=tf.constant_initializer(0.0))\n out = tf.nn.bias_add(conv, biases)\n conv1_1 = tf.nn.relu(out)\n # POOL 1\n with tf.name_scope('pool1'):\n pool1_1 = tf.nn.max_pool(conv1_1,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n name='pool1_1')\n pool1_1_drop = tf.nn.dropout(pool1_1, params.training_keep_prob)\n # CONVOLUTION 1 - 2\n with tf.name_scope('conv1_2'):\n filter1_2 = tf.get_variable('weights1_2', shape=[5, 5, 32, 64], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1))\n conv = tf.nn.conv2d(pool1_1_drop, filter1_2, [1,1,1,1], padding='SAME')\n biases = tf.get_variable('biases1_2', shape=[64], \\\n initializer=tf.constant_initializer(0.0))\n out = tf.nn.bias_add(conv, biases)\n conv1_2 = tf.nn.relu(out)\n # POOL 2\n with tf.name_scope('pool2'):\n pool2_1 = tf.nn.max_pool(conv1_2,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n name='pool2_1')\n pool2_1_drop = tf.nn.dropout(pool2_1, params.training_keep_prob)\n #FULLY CONNECTED 1\n with tf.name_scope('fc1') as scope:\n pool2_flat = tf.layers.Flatten()(pool2_1_drop)\n dim = pool2_flat.get_shape()[1].value\n fc1w = tf.get_variable('weights3_1', shape=[dim, 1024], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1))\n fc1b = tf.get_variable('biases3_1', shape=[1024], \\\n initializer=tf.constant_initializer(0.0))\n out = tf.nn.bias_add(tf.matmul(pool2_flat, fc1w), fc1b)\n fc1 = tf.nn.relu(out)\n fc1_drop = tf.nn.dropout(fc1, params.training_keep_prob)\n #FULLY CONNECTED 2\n with tf.name_scope('fc2') as scope:\n fc2w = tf.get_variable('weights3_2', shape=[1024, params.num_classes], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1))\n fc2b = tf.get_variable('biases3_2', shape=[params.num_classes], \\\n initializer=tf.constant_initializer(0.0))\n Ylogits = tf.nn.bias_add(tf.matmul(fc1_drop, fc2w), fc2b)\n return Ylogits, fc1_drop\n\ndef lenet_bn(X, is_training, params=None, var_scope='cnn'):\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n # CONVOLUTION 1 - 1\n with tf.name_scope('conv1_1'):\n filter1_1 = tf.get_variable('weights1_1', shape=[5, 5, int(params.depth), 32], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1))\n stride = [1,1,1,1]\n conv = tf.nn.conv2d(X, filter1_1, stride, padding='SAME')\n biases = tf.get_variable('biases1_1', shape=[32], \\\n initializer=tf.constant_initializer(0.0))\n out = tf.nn.bias_add(conv, biases)\n out = tf.layers.batch_normalization(out, training=is_training, name='bn_conv1_1')\n conv1_1 = tf.nn.relu(out)\n # POOL 1\n with tf.name_scope('pool1'):\n pool1_1 = tf.nn.max_pool(conv1_1,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n name='pool1_1')\n pool1_1_drop = tf.nn.dropout(pool1_1, params.training_keep_prob)\n # CONVOLUTION 1 - 2\n with tf.name_scope('conv1_2'):\n filter1_2 = tf.get_variable('weights1_2', shape=[5, 5, 32, 64], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1))\n conv = tf.nn.conv2d(pool1_1_drop, filter1_2, [1,1,1,1], padding='SAME')\n biases = tf.get_variable('biases1_2', shape=[64], \\\n initializer=tf.constant_initializer(0.0))\n out = tf.nn.bias_add(conv, biases)\n out = tf.layers.batch_normalization(out, training=is_training, name='bn_conv1_2')\n conv1_2 = tf.nn.relu(out)\n # POOL 2\n with tf.name_scope('pool2'):\n pool2_1 = tf.nn.max_pool(conv1_2,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n name='pool2_1')\n pool2_1_drop = tf.nn.dropout(pool2_1, params.training_keep_prob)\n #FULLY CONNECTED 1\n with tf.name_scope('fc1') as scope:\n pool2_flat = tf.layers.Flatten()(pool2_1_drop)\n dim = pool2_flat.get_shape()[1].value\n fc1w = tf.get_variable('weights3_1', shape=[dim, 1024], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1))\n fc1b = tf.get_variable('biases3_1', shape=[1024], \\\n initializer=tf.constant_initializer(0.0))\n out = tf.nn.bias_add(tf.matmul(pool2_flat, fc1w), fc1b)\n out = tf.layers.batch_normalization(out, training=is_training, name='bn_fc1w')\n fc1 = tf.nn.relu(out)\n fc1_drop = tf.nn.dropout(fc1, params.training_keep_prob)\n #FULLY CONNECTED 2\n with tf.name_scope('fc2') as scope:\n fc2w = tf.get_variable('weights3_2', shape=[1024, params.num_classes], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1))\n fc2b = tf.get_variable('biases3_2', shape=[params.num_classes], \\\n initializer=tf.constant_initializer(0.0))\n Ylogits = tf.nn.bias_add(tf.matmul(fc1_drop, fc2w), fc2b)\n return Ylogits, fc1_drop\n\ndef retrain_lenet_mini(inputs, params=None, var_scope='cnn'):\n X = inputs['features']\n labels = inputs['labels']\n trainable = var_scope=='cnn'\n neurons = []\n weights = []\n gradients_w = []\n gradients_n = []\n with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):\n # CONVOLUTION 1 - 1\n with tf.name_scope('conv1_1'):\n filter1_1 = tf.get_variable('weights1_1', shape=[5, 5, int(params.depth), 32], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1), )\n stride = [1,1,1,1]\n conv = tf.nn.conv2d(X, filter1_1, stride, padding='SAME')\n biases = tf.get_variable('biases1_1', shape=[32], \\\n initializer=tf.constant_initializer(0.0), )\n out = tf.nn.bias_add(conv, biases)\n conv1_1 = tf.nn.relu(out)\n weights.extend([filter1_1, biases])\n neurons.append(conv1_1)\n # POOL 1\n with tf.name_scope('pool1'):\n pool1_1 = tf.nn.max_pool(conv1_1,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n name='pool1_1')\n pool1_1_drop = tf.nn.dropout(pool1_1, params.training_keep_prob)\n # CONVOLUTION 1 - 2\n with tf.name_scope('conv1_2'):\n filter1_2 = tf.get_variable('weights1_2', shape=[5, 5, 32, 64], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1), )\n conv = tf.nn.conv2d(pool1_1_drop, filter1_2, [1,1,1,1], padding='SAME')\n biases = tf.get_variable('biases1_2', shape=[64], \\\n initializer=tf.constant_initializer(0.0), )\n out = tf.nn.bias_add(conv, biases)\n conv1_2 = tf.nn.relu(out)\n weights.extend([filter1_2, biases])\n neurons.append(conv1_2)\n # POOL 2\n with tf.name_scope('pool2'):\n pool2_1 = tf.nn.max_pool(conv1_2,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n name='pool2_1')\n pool2_1_drop = tf.nn.dropout(pool2_1, params.training_keep_prob)\n #FULLY CONNECTED 1\n with tf.name_scope('fc1') as scope:\n pool2_flat = tf.layers.Flatten()(pool2_1_drop)\n dim = pool2_flat.get_shape()[1].value\n fc1w = tf.get_variable('weights3_1', shape=[dim, 1024], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1), )\n fc1b = tf.get_variable('biases3_1', shape=[1024], \\\n initializer=tf.constant_initializer(0.0), )\n out = tf.nn.bias_add(tf.matmul(pool2_flat, fc1w), fc1b)\n fc1 = tf.nn.relu(out)\n fc1_drop = tf.nn.dropout(fc1, params.training_keep_prob)\n weights.extend([fc1w, fc1b])\n neurons.append(fc1)\n #FULLY CONNECTED 2\n with tf.name_scope('fc2') as scope:\n fc2w = tf.get_variable('weights3_2', shape=[1024, params.num_classes], \\\n initializer=tf.truncated_normal_initializer(stddev=1e-1), )\n fc2b = tf.get_variable('biases3_2', shape=[params.num_classes], \\\n initializer=tf.constant_initializer(0.0), )\n Ylogits = tf.nn.bias_add(tf.matmul(fc1_drop, fc2w), fc2b)\n weights.extend([fc2w, fc2b])\n neurons.append(Ylogits)\n if 'fisher' in params.loss_fn or 'mine' in params.loss_fn:\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=labels,\n logits=Ylogits)\n loss = tf.reduce_mean(cross_entropy)\n for w in weights:\n gradients_w.append(tf.math.square(tf.gradients(loss, w)))\n for n in neurons:\n gradients_n.append(tf.math.square(tf.gradients(loss, n)))\n else:#retrain_regu_mas\n l2_Ylogits = tf.nn.l2_loss(Ylogits)\n for w in weights:\n gradients_w.append(tf.math.abs(tf.gradients(l2_Ylogits, w)))\n for n in neurons:\n gradients_n.append(tf.math.abs(tf.gradients(l2_Ylogits, n))) \n return Ylogits, (neurons, weights), (gradients_n, gradients_w)\n\ndef build_model(mode, inputs, params):\n \"\"\"Compute logits of the model\n Args:\n mode: (string) 'train', 'eval', etc.\n inputs: (dict) contains the inputs of the graph (features, labels...)\n this can be `tf.placeholder` or outputs of `tf.data`\n params: (Params) contains hyperparameters of the model (ex: `params.learning_rate`)\n Returns:\n output: (tf.Tensor) output of the model\n Notice:\n !!! when using the build_model mdprank needs a learning_rate around 1e-5 - 1e-7\n \"\"\"\n is_training = (mode == 'train')\n is_test = (mode == 'test')\n features = inputs['features']\n if params.collect:\n if params.use_bn and not params.use_kfac:\n y_conv, masks = collect_lenet_bn(inputs, False, params, var_scope='cnn', trainable=True)\n inputs['masks'] = masks\n _, _ = lenet_bn(features, False, params, var_scope='c_cnn')\n else:\n y_conv, masks = collect_lenet(inputs, False, params, var_scope='cnn', trainable=True)\n inputs['masks'] = masks\n _, _ = lenet(features, params, var_scope='c_cnn')\n return y_conv, 0.0\n \n if params.use_kfac and params.loss_fn != 'retrain_regu_mine3':\n y_conv, _ = lenet(features, params, var_scope='cnn')\n return y_conv, None\n if is_test:\n # if params.loss_fn=='retrain_regu_mine3':\n # return lenet_mine3(features, False, params, var_scope='cnn')\n if params.use_bn and not params.use_kfac:\n return lenet_bn(features, False, params, var_scope='cnn')\n else:\n return lenet(features, params, var_scope='cnn')\n # not is_test\n if '2' in params.loss_fn:\n if params.use_bn:\n _, (old_neurons, old_weights), (gradients_o_n, gradients_o_w) = retrain_lenet_mini_bn(inputs, params, var_scope='c_cnn')\n y_conv, (neurons, weights), _ = retrain_lenet_mini_bn(inputs, params, var_scope='cnn')\n else:\n _, (old_neurons, old_weights), (gradients_o_n, gradients_o_w) = retrain_lenet_mini(inputs, params, var_scope='c_cnn')\n y_conv, (neurons, weights), _ = retrain_lenet_mini(inputs, params, var_scope='cnn')\n # weight regulization\n var_mse_list = [(old_var - var) * (old_var - var) for (old_var, var) \\\n in zip(old_weights, weights)]\n var_mse_list = [tf.reduce_sum(g*n) for (g, n) in zip(gradients_o_w, var_mse_list)]\n var_mses = functools.reduce(lambda x,y:x+y, var_mse_list) / len(var_mse_list)\n if params.loss_fn=='retrain_regu_mine2':\n if not is_test:\n neuron_mse_list = [(old_neuron - neuron) * (old_neuron - neuron) for (old_neuron, neuron) \\\n in zip(old_neurons, neurons)]\n neuron_mse_list = [tf.reduce_sum(g*n) for (g, n) in zip(gradients_o_n, neuron_mse_list)]\n neuron_mses = functools.reduce(lambda x,y:x+y, neuron_mse_list) / len(neuron_mse_list)\n # # weight regulization\n # var_mse_list = [(old_var - var) * (old_var - var) for (old_var, var) \\\n # in zip(old_weights, weights)]\n # var_mse_list = [tf.reduce_sum(g*n) for (g, n) in zip(gradients_o_w, var_mse_list)]\n # var_mses = functools.reduce(lambda x,y:x+y, var_mse_list) / len(var_mse_list)\n regulization_loss = 0.001 * neuron_mses + 0.001 * var_mses\n return y_conv, regulization_loss\n if params.loss_fn=='retrain_regu_fisher2':\n regulization_loss = 0.001 * var_mses\n return y_conv, regulization_loss \n elif 'retrain' in params.loss_fn:\n if params.loss_fn=='retrain_regu_mine3':\n # y_conv, regulization_loss = lenet_mine3(features, is_training, params, var_scope='cnn')\n y_conv, (old_neurons, neurons), (gradients_o_n, gradients_o_w) = lenet_mine3(features, is_training, params, var_scope='cnn')\n neuron_mse_list = [(old_neuron - neuron) * (old_neuron - neuron) for (old_neuron, neuron) \\\n in zip(old_neurons, neurons)]\n neuron_mse_list = [tf.reduce_sum(g*n) for (g, n) in zip(gradients_o_n, neuron_mse_list)]\n neuron_mses = functools.reduce(lambda x,y:x+y, neuron_mse_list) / len(neuron_mse_list)\n # # weight regulization\n # var_mse_list = [(old_var - var) * (old_var - var) for (old_var, var) \\\n # in zip(old_weights, weights)]\n # var_mse_list = [tf.reduce_sum(g*n) for (g, n) in zip(gradients_o_w, var_mse_list)]\n # var_mses = functools.reduce(lambda x,y:x+y, var_mse_list) / len(var_mse_list)\n regulization_loss = 0.001 * neuron_mses \n return y_conv, regulization_loss\n if params.use_bn:\n _, (old_neurons, old_weights), (gradients_o_n, gradients_o_w) = retrain_lenet_bn(inputs, is_training, params, var_scope='c_cnn')\n y_conv, (neurons, weights), _ = retrain_lenet_bn(inputs, is_training, params, var_scope='cnn')\n else: \n _, (old_neurons, old_weights), (gradients_o_n, gradients_o_w) = retrain_lenet(inputs, is_training, params, var_scope='c_cnn')\n y_conv, (neurons, weights), _ = retrain_lenet(inputs, is_training, params, var_scope='cnn')\n if params.loss_fn=='retrain':\n return y_conv, None\n if params.loss_fn=='retrain_regu':\n neuron_mse_list = [tf.losses.mean_squared_error(old_neuron, neuron) for (old_neuron, neuron) \\\n in zip(old_neurons, neurons)]\n neuron_mses = functools.reduce(lambda x,y:x+y, neuron_mse_list) / len(neuron_mse_list)\n # weight regulization\n var_mse_list = [tf.losses.mean_squared_error(old_var, var) for (old_var, var) \\\n in zip(old_weights, weights)]\n var_mses = functools.reduce(lambda x,y:x+y, var_mse_list) / len(var_mse_list)\n regulization_loss = 0.001 * neuron_mses + 0.001 * var_mses\n return y_conv, regulization_loss\n if params.loss_fn=='retrain_regu_mine':\n neuron_mse_list = [(old_neuron - neuron) * (old_neuron - neuron) for (old_neuron, neuron) \\\n in zip(old_neurons, neurons)]\n neuron_mse_list = [tf.reduce_sum(g*n) for (g, n) in zip(gradients_o_n, neuron_mse_list)]\n neuron_mses = functools.reduce(lambda x,y:x+y, neuron_mse_list) / len(neuron_mse_list)\n # weight regulization\n var_mse_list = [(old_var - var) * (old_var - var) for (old_var, var) \\\n in zip(old_weights, weights)]\n var_mse_list = [tf.reduce_sum(g*n) for (g, n) in zip(gradients_o_w, var_mse_list)]\n var_mses = functools.reduce(lambda x,y:x+y, var_mse_list) / len(var_mse_list)\n regulization_loss = 0.001 * neuron_mses + 0.001 * var_mses\n return y_conv, regulization_loss\n if params.loss_fn=='retrain_regu_minen':\n neuron_mse_list = [(old_neuron - neuron) * (old_neuron - neuron) for (old_neuron, neuron) \\\n in zip(old_neurons, neurons)]\n neuron_mse_list = [tf.reduce_sum(g*n) for (g, n) in zip(gradients_o_n, neuron_mse_list)]\n neuron_mses = functools.reduce(lambda x,y:x+y, neuron_mse_list) / len(neuron_mse_list)\n regulization_loss = 0.001 * neuron_mses\n return y_conv, regulization_loss \n if params.loss_fn=='retrain_regu_mas':\n var_mse_list = [(old_var - var) * (old_var - var) for (old_var, var) \\\n in zip(old_weights, weights)]\n var_mse_list = [tf.reduce_sum(g*n) for (g, n) in zip(gradients_o_w, var_mse_list)]\n var_mses = functools.reduce(lambda x,y:x+y, var_mse_list) / len(var_mse_list)\n regulization_loss = 0.001 * var_mses \n return y_conv, regulization_loss \n if params.loss_fn=='retrain_regu_fishern':\n neuron_mse_list = [(old_neuron - neuron) * (old_neuron - neuron) for (old_neuron, neuron) \\\n in zip(old_neurons, neurons)]\n neuron_mse_list = [tf.reduce_sum(g*n) for (g, n) in zip(gradients_o_n, neuron_mse_list)]\n neuron_mses = functools.reduce(lambda x,y:x+y, neuron_mse_list) / len(neuron_mse_list)\n regulization_loss = 0.001 * neuron_mses\n return y_conv, regulization_loss \n if params.loss_fn=='retrain_regu_fisher':\n # weight regulization\n var_mse_list = [(old_var - var) * (old_var - var) for (old_var, var) \\\n in zip(old_weights, weights)]\n var_mse_list = [tf.reduce_sum(g*n) for (g, n) in zip(gradients_o_w, var_mse_list)]\n var_mses = functools.reduce(lambda x,y:x+y, var_mse_list) / len(var_mse_list)\n regulization_loss = 0.001 * var_mses\n return y_conv, regulization_loss\n if params.loss_fn=='retrain_regu_selfless':\n num_samples = tf.shape(features)[0]\n Rssl = tf.constant(0.0, dtype=tf.float32)\n for layer in range(0, len(neurons)-1):\n neurons_l = tf.reshape(tf.multiply(tf.exp(-gradients_o_n[layer]), neurons[layer]), [num_samples, -1])/1000\n num_neuron = tf.shape(neurons_l)[-1]\n coefficient = tf.range(num_neuron)\n coefficient = tf.cast(coefficient, dtype=tf.float32)\n coefficient = coefficient - tf.transpose(coefficient)\n coefficient = tf.exp(-tf.square(coefficient))\n hihj = tf.reduce_sum(tf.multiply(coefficient, tf.matmul(neurons_l, neurons_l, transpose_a=True)))\n hihj -= tf.reduce_sum(tf.matmul(neurons_l, neurons_l, transpose_b=True))#tf.reduce_sum(tf.square(neurons_l))\n Rssl += hihj\n # weight regulization\n var_mse_list = [(old_var - var) * (old_var - var) for (old_var, var) \\\n in zip(old_weights, weights)]\n var_mse_list = [tf.reduce_sum(g*n) for (g, n) in zip(gradients_o_w, var_mse_list)]\n var_mses = functools.reduce(lambda x,y:x+y, var_mse_list) / len(var_mse_list)\n regulization_loss = 0.0005 * Rssl + 0.001 * var_mses \n return y_conv, regulization_loss\n # cnn models including kfac\n y_conv = None\n if params.use_bn:\n y_conv, _ = lenet_bn(features, is_training, params, var_scope='cnn')\n if is_training:\n _, _ = lenet_bn(features, False, params, var_scope='c_cnn')\n else:\n y_conv, _ = lenet(features, params, var_scope='cnn')\n if is_training:\n _, _ = lenet(features, params, var_scope='c_cnn')\n return y_conv, None\n\ndef model_fn(mode, inputs, params, reuse=False):\n \"\"\"Model function defining the graph operations.\n Args:\n mode: (string) 'train', 'eval', etc.\n inputs: (dict) contains the inputs of the graph (features, labels...)\n this can be `tf.placeholder` or outputs of `tf.data`\n params: (Params) contains hyperparameters of the model (ex: `params.learning_rate`)\n reuse: (bool) whether to reuse the weights\n Returns:\n model_spec: (dict) contains the graph operations or nodes needed for training / evaluation\n \"\"\"\n is_training = (mode == 'train')\n is_test = (mode == 'test')\n labels = inputs['labels']\n loss = None\n gradients = None\n # -----------------------------------------------------------\n # MODEL: define the layers of the model\n with tf.variable_scope('model', reuse=tf.AUTO_REUSE):\n # Compute the output distribution of the model and the predictions\n predictions, calcualted_loss = build_model(mode, inputs, params)\n if not is_test:\n with tf.name_scope('loss'):\n # calcualted_loss = tf.Print(calcualted_loss, [calcualted_loss], message='calcualted_loss is \\n')\n loss = get_loss(predictions, labels, params, calcualted_loss)\n if params.use_regularization:\n reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n loss += tf.reduce_sum(reg_losses)\n if is_training:\n if params.use_kfac:\n with tf.name_scope('kfac_optimizer'):\n # Register loss\n layer_collection = kfac.LayerCollection()\n layer_collection.register_softmax_cross_entropy_loss(predictions, reuse=False)\n # Register layers\n layer_collection.auto_register_layers()\n # Construct training ops\n global_step = tf.train.get_or_create_global_step()\n optimizer = kfac.PeriodicInvCovUpdateKfacOpt(learning_rate=params.learning_rate, damping=0.001, \\\n batch_size=params.batch_size, layer_collection=layer_collection)\n train_op = optimizer.minimize(loss, global_step=global_step)\n elif params.use_bn:\n with tf.name_scope('adam_optimizer'):\n with tf.variable_scope(params.loss_fn, reuse=tf.AUTO_REUSE):\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n global_step = tf.train.get_or_create_global_step()\n optimizer = tf.train.AdamOptimizer(params.learning_rate)\n gradients, variables = zip(*optimizer.compute_gradients(loss))\n gradients, _ = tf.clip_by_global_norm(gradients, params.gradient_clip_value)\n valid_gradients = [g for g in gradients if g is not None]\n # gradients_0 = gradients[0][0]\n train_op = optimizer.apply_gradients(zip(gradients, variables), global_step=global_step)\n else:\n with tf.name_scope('adam_optimizer'): \n global_step = tf.train.get_or_create_global_step()\n optimizer = tf.train.AdamOptimizer(params.learning_rate)\n gradients, variables = zip(*optimizer.compute_gradients(loss))\n gradients, _ = tf.clip_by_global_norm(gradients, params.gradient_clip_value)\n valid_gradients = [g for g in gradients if g is not None]\n train_op = optimizer.apply_gradients(zip(gradients, variables), global_step=global_step)\n with tf.name_scope('accuracy'):\n argmax_predictions = tf.argmax(predictions, 1)\n argmax_labels = tf.argmax(labels, 1)\n correct_prediction = tf.equal(argmax_predictions, argmax_labels)\n correct_prediction = tf.cast(correct_prediction, tf.float32)\n accuracy = tf.reduce_mean(correct_prediction)\n # -----------------------------------------------------------\n # METRICS AND SUMMARIES\n # Metrics for evaluation using tf.metrics (average over whole dataset)\n #######################################################################\n # During training loss is actually \n #######################################################################\n # if is_training and gradients is not None:\n # # loss = tf.nn.l2_loss(gradients)\n # loss = tf.math.reduce_sum(gradients)\n # loss *= loss\n with tf.variable_scope(\"metrics\"):\n metrics = {\n 'accuracy': tf.metrics.mean(accuracy),\n }\n tf.summary.scalar('accuracy', accuracy)\n if not is_test:\n # Summaries for training and validation\n metrics['loss'] = tf.metrics.mean(loss)\n # metrics['calculated_loss'] = tf.reduce_mean(inputs['calculated_loss'])\n tf.summary.scalar('loss', loss)\n\n # if is_training and gradients is not None:\n # loss = gradients\n # loss = tf.nn.l2_loss(gradients)\n # loss = tf.math.reduce_sum(gradients)\n # loss *= loss \n # Group the update ops for the tf.metrics\n update_metrics_op = tf.group(*[op for _, op in metrics.values()])\n # Get the op to reset the local variables used in tf.metrics\n metric_variables = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope=\"metrics\")\n metrics_init_op = tf.variables_initializer(metric_variables)\n # -----------------------------------------------------------\n\n # MODEL SPECIFICATION\n # Create the model specification and return it\n # It contains nodes or operations in the graph that will be used for training and evaluation\n model_spec = inputs\n variable_init_op = tf.group(*[tf.global_variables_initializer(), \\\n tf.local_variables_initializer(), \\\n tf.tables_initializer()])\n model_spec['variable_init_op'] = variable_init_op\n model_spec['metrics_init_op'] = metrics_init_op\n model_spec[\"predictions\"] = predictions\n model_spec[\"accuracy\"] = accuracy\n model_spec['metrics'] = metrics\n model_spec['update_metrics'] = update_metrics_op\n model_spec['summary_op'] = tf.summary.merge_all()\n if is_training:\n model_spec['train_op'] = train_op\n model_spec['loss'] = loss\n\n model_spec['gradients'] = valid_gradients\n return model_spec\n\ndef get_loss(predicted_scores, labels,\n params, calcualted_loss=None):\n \"\"\"\n Return loss based on loss_function_str\n Note: this is for models that have real loss functions\n \"\"\"\n def _cnn():\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=labels,\n logits=predicted_scores)\n loss = tf.reduce_mean(cross_entropy)\n return loss\n def _retrain_regu():\n return _cnn() + calcualted_loss\n\n options = {\n 'cnn': _cnn,\n 'retrain': _cnn,\n 'retrain_regu': _retrain_regu\n }\n loss_function_str = params.loss_fn\n if 'retrain_regu' in params.loss_fn:\n loss_function_str = 'retrain_regu'\n return options[loss_function_str]()\n" ]
[ [ "tensorflow.set_random_seed", "tensorflow.reset_default_graph" ], [ "tensorflow.set_random_seed", "tensorflow.reset_default_graph" ], [ "pandas.DataFrame" ], [ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.get_variable", "tensorflow.control_dependencies", "tensorflow.nn.max_pool", "tensorflow.reduce_sum", "tensorflow.equal", "tensorflow.variables_initializer", "tensorflow.cast", "tensorflow.nn.l2_loss", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "tensorflow.nn.conv2d", "tensorflow.layers.batch_normalization", "tensorflow.get_collection", "tensorflow.gradients", "tensorflow.truncated_normal_initializer", "tensorflow.train.get_or_create_global_step", "tensorflow.name_scope", "tensorflow.square", "tensorflow.argmax", "tensorflow.nn.dropout", "tensorflow.metrics.mean", "tensorflow.matmul", "tensorflow.Print", "tensorflow.shape", "tensorflow.zeros_initializer", "tensorflow.exp", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "tensorflow.layers.Flatten", "tensorflow.nn.bias_add", "tensorflow.nn.relu", "tensorflow.losses.mean_squared_error", "tensorflow.multiply", "tensorflow.constant", "tensorflow.local_variables_initializer", "tensorflow.range", "tensorflow.reduce_mean", "tensorflow.transpose", "tensorflow.constant_initializer", "tensorflow.clip_by_global_norm", "tensorflow.variable_scope", "tensorflow.tables_initializer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "1.0", "1.2" ] } ]
mirishkarganesh/espresso
[ "874e04e4c564f80cb5bb12caedbe17a09e9fa2d1" ]
[ "espresso/models/speech_transformer.py" ]
[ "# Copyright (c) Yiming Wang\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\nfrom typing import Any, Dict, List, Optional\n\nimport torch\nfrom torch import Tensor\nimport torch.nn as nn\n\nfrom fairseq import utils\nfrom fairseq.models import (\n register_model,\n register_model_architecture,\n)\nfrom fairseq.models.transformer import (\n Linear,\n TransformerModel,\n TransformerEncoder,\n TransformerDecoder,\n)\nfrom fairseq.modules import (\n FairseqDropout,\n LayerDropModuleList,\n LayerNorm,\n PositionalEmbedding,\n TransformerDecoderLayer,\n)\nfrom fairseq.modules.quant_noise import quant_noise as apply_quant_noise_\n\nfrom espresso.models.speech_lstm import ConvBNReLU\nfrom espresso.tools.scheduled_sampling_rate_scheduler import ScheduledSamplingRateScheduler\nimport espresso.tools.utils as speech_utils\n\n\nDEFAULT_MAX_SOURCE_POSITIONS = 10240\nDEFAULT_MAX_TARGET_POSITIONS = 1024\n\n\nlogger = logging.getLogger(__name__)\n\n\n@register_model(\"speech_transformer\")\nclass SpeechTransformerModel(TransformerModel):\n \"\"\"\n Transformer model from `\"Attention Is All You Need\" (Vaswani, et al, 2017)\n <https://arxiv.org/abs/1706.03762>`_. It adds 2D convolutions before\n transformer layers in the encoder to process speech input.\n\n Args:\n encoder (SpeechTransformerEncoder): the encoder\n decoder (SpeechTransformerDecoder): the decoder\n\n The Transformer model provides the following named architectures and\n command-line arguments:\n\n .. argparse::\n :ref: fairseq.models.transformer_parser\n :prog:\n \"\"\"\n\n @classmethod\n def hub_models(cls):\n raise NotImplementedError\n\n def __init__(self, args, encoder, decoder):\n super().__init__(args, encoder, decoder)\n self.num_updates = 0\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n # fmt: off\n TransformerModel.add_args(parser)\n parser.add_argument(\"--encoder-conv-channels\", type=str, metavar=\"EXPR\",\n help=\"list of encoder convolution\\'s out channels\")\n parser.add_argument(\"--encoder-conv-kernel-sizes\", type=str, metavar=\"EXPR\",\n help=\"list of encoder convolution\\'s kernel sizes\")\n parser.add_argument(\"--encoder-conv-strides\", type=str, metavar=\"EXPR\",\n help=\"list of encoder convolution\\'s strides\")\n parser.add_argument(\"--encoder-transformer-context\", type=str, metavar=\"EXPR\",\n help=\"left/right context for time-restricted self-attention; \"\n \"can be None or a tuple of two non-negative integers/None\")\n parser.add_argument(\"--decoder-input-dim\", type=int, metavar=\"N\",\n help=\"decoder input dimension (extra linear layer \"\n \"if different from decoder embed dim)\")\n\n # Scheduled sampling options\n parser.add_argument(\"--scheduled-sampling-probs\", type=lambda p: utils.eval_str_list(p),\n metavar=\"P_1,P_2,...,P_N\", default=[1.0],\n help=\"scheduled sampling probabilities of sampling the truth \"\n \"labels for N epochs starting from --start-schedule-sampling-epoch; \"\n \"all later epochs using P_N\")\n parser.add_argument(\"--start-scheduled-sampling-epoch\", type=int,\n metavar=\"N\", default=1,\n help=\"start scheduled sampling from the specified epoch\")\n # fmt: on\n\n @classmethod\n def build_model(cls, args, task):\n \"\"\"Build a new model instance.\"\"\"\n\n # make sure all arguments are present in older models\n base_architecture(args)\n\n if args.encoder_layers_to_keep:\n args.encoder_layers = len(args.encoder_layers_to_keep.split(\",\"))\n if args.decoder_layers_to_keep:\n args.decoder_layers = len(args.decoder_layers_to_keep.split(\",\"))\n\n if getattr(args, \"max_source_positions\", None) is None:\n args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS\n if getattr(args, \"max_target_positions\", None) is None:\n args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS\n\n tgt_dict = task.target_dictionary\n\n decoder_embed_tokens = cls.build_embedding(\n args, tgt_dict, args.decoder_input_dim, args.decoder_embed_path\n )\n\n out_channels = speech_utils.eval_str_nested_list_or_tuple(args.encoder_conv_channels, type=int)\n kernel_sizes = speech_utils.eval_str_nested_list_or_tuple(args.encoder_conv_kernel_sizes, type=int)\n strides = speech_utils.eval_str_nested_list_or_tuple(args.encoder_conv_strides, type=int)\n logger.info(\"input feature dimension: {}, channels: {}\".format(task.feat_dim, task.feat_in_channels))\n assert task.feat_dim % task.feat_in_channels == 0\n conv_layers = ConvBNReLU(\n out_channels, kernel_sizes, strides, in_channels=task.feat_in_channels,\n ) if out_channels is not None else None\n\n transformer_encoder_input_size = task.feat_dim // task.feat_in_channels\n if conv_layers is not None:\n for stride in strides:\n if isinstance(stride, (list, tuple)):\n assert len(stride) > 0\n s = stride[1] if len(stride) > 1 else stride[0]\n else:\n assert isinstance(stride, int)\n s = stride\n transformer_encoder_input_size = (transformer_encoder_input_size + s - 1) // s\n transformer_encoder_input_size *= out_channels[-1]\n else:\n transformer_encoder_input_size = task.feat_dim\n\n encoder_transformer_context = speech_utils.eval_str_nested_list_or_tuple(\n args.encoder_transformer_context, type=int,\n )\n if encoder_transformer_context is not None:\n assert len(encoder_transformer_context) == 2\n for i in range(2):\n assert (\n encoder_transformer_context[i] is None\n or (\n isinstance(encoder_transformer_context[i], int)\n and encoder_transformer_context[i] >= 0\n )\n )\n\n scheduled_sampling_rate_scheduler = ScheduledSamplingRateScheduler(\n args.scheduled_sampling_probs, args.start_scheduled_sampling_epoch,\n )\n\n encoder = cls.build_encoder(\n args, conv_layers_before=conv_layers, input_size=transformer_encoder_input_size,\n transformer_context=encoder_transformer_context,\n )\n decoder = cls.build_decoder(\n args, tgt_dict, decoder_embed_tokens,\n scheduled_sampling_rate_scheduler=scheduled_sampling_rate_scheduler,\n )\n return cls(args, encoder, decoder)\n\n def set_num_updates(self, num_updates):\n self.num_updates = num_updates\n super().set_num_updates(num_updates)\n\n @classmethod\n def build_encoder(cls, args, conv_layers_before=None, input_size=83, transformer_context=None):\n return SpeechTransformerEncoder(\n args, conv_layers_before=conv_layers_before, input_size=input_size,\n transformer_context=transformer_context,\n )\n\n @classmethod\n def build_decoder(cls, args, tgt_dict, embed_tokens, scheduled_sampling_rate_scheduler=None):\n return SpeechTransformerDecoder(\n args,\n tgt_dict,\n embed_tokens,\n no_encoder_attn=getattr(args, \"no_cross_attention\", False),\n scheduled_sampling_rate_scheduler=scheduled_sampling_rate_scheduler,\n )\n\n # TorchScript doesn't support optional arguments with variable length (**kwargs).\n # Current workaround is to add union of all arguments in child classes.\n def forward(\n self,\n src_tokens,\n src_lengths,\n prev_output_tokens,\n return_all_hiddens: bool = True,\n features_only: bool = False,\n alignment_layer: Optional[int] = None,\n alignment_heads: Optional[int] = None,\n epoch=1,\n ):\n \"\"\"\n Run the forward pass for an encoder-decoder model.\n\n Copied from the base class, but without ``**kwargs``,\n which are not supported by TorchScript.\n \"\"\"\n encoder_out = self.encoder(\n src_tokens, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens\n )\n decoder_out = self.decoder(\n prev_output_tokens,\n encoder_out=encoder_out,\n features_only=features_only,\n alignment_layer=alignment_layer,\n alignment_heads=alignment_heads,\n src_lengths=src_lengths,\n return_all_hiddens=return_all_hiddens,\n epoch=epoch,\n )\n return decoder_out\n\n\nclass SpeechTransformerEncoder(TransformerEncoder):\n \"\"\"\n Transformer encoder consisting of 2D convolution layers and\n *args.encoder_layers* layers. Each layer is a :class:`TransformerEncoderLayer`.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n conv_layers_before (~fairseq.speech_lstm.ConvBNReLU): convolutions before\n transformer layers\n input_size (int, optional): dimension of the input to the transformer\n before being projected to args.encoder_embed_dim\n \"\"\"\n\n def __init__(self, args, conv_layers_before=None, input_size=83, transformer_context=None):\n super(TransformerEncoder, self).__init__(None) # no src dictionary\n self.register_buffer(\"version\", torch.Tensor([3]))\n\n self.dropout_module = FairseqDropout(\n args.dropout, module_name=self.__class__.__name__\n )\n self.encoder_layerdrop = args.encoder_layerdrop\n\n embed_dim = args.encoder_embed_dim\n self.max_source_positions = args.max_source_positions\n\n self.conv_layers_before = conv_layers_before\n self.fc0 = Linear(input_size, embed_dim) if input_size != embed_dim else None\n\n self.embed_positions = (\n PositionalEmbedding(\n self.output_lengths(self.max_source_positions),\n embed_dim,\n 0,\n learned=args.encoder_learned_pos,\n )\n if not args.no_token_positional_embeddings\n else None\n )\n\n if getattr(args, \"layernorm_embedding\", False):\n self.layernorm_embedding = LayerNorm(embed_dim)\n else:\n self.layernorm_embedding = None\n\n if not args.adaptive_input and args.quant_noise_pq > 0:\n self.quant_noise = apply_quant_noise_(\n nn.Linear(embed_dim, embed_dim, bias=False),\n args.quant_noise_pq,\n args.quant_noise_pq_block_size,\n )\n else:\n self.quant_noise = None\n\n if self.encoder_layerdrop > 0.0:\n self.layers = LayerDropModuleList(p=self.encoder_layerdrop)\n else:\n self.layers = nn.ModuleList([])\n self.layers.extend(\n [self.build_encoder_layer(args) for i in range(args.encoder_layers)]\n )\n self.num_layers = len(self.layers)\n\n if args.encoder_normalize_before:\n self.layer_norm = LayerNorm(embed_dim)\n else:\n self.layer_norm = None\n\n self.transformer_context = transformer_context\n\n def output_lengths(self, in_lengths):\n return (\n in_lengths if self.conv_layers_before is None\n else self.conv_layers_before.output_lengths(in_lengths)\n )\n\n def get_attn_mask(self, in_lengths):\n \"\"\"\n Create attention mask according to sequence lengths and transformer context.\n\n Args:\n in_lengths (LongTensor): lengths of each input sequence of shape `(batch)`\n\n Returns:\n attn_mask (ByteTensor|BoolTensor, optional): self-attention mask of shape\n `(tgt_len, src_len)`, where `tgt_len` is the length of output and `src_len`\n is the length of input, though here both are equal to `seq_len`.\n `attn_mask[tgt_i, src_j] = 1` means that when calculating the\n embedding for `tgt_i`, we exclude (mask out) `src_j`.\n \"\"\"\n if (\n self.transformer_context is None\n or (self.transformer_context[0] is None and self.transformer_context[1] is None)\n ):\n return None\n max_len = in_lengths.data.max()\n all_ones = in_lengths.ones([max_len, max_len], dtype=torch.bool)\n # at this point left and right context cannot be both None\n if self.transformer_context[0] is None: # mask is a triu matrix\n return all_ones.triu(self.transformer_context[1] + 1)\n if self.transformer_context[1] is None: # mask is a tril matrix\n return all_ones.tril(-self.transformer_context[0] - 1)\n return (\n all_ones.triu(self.transformer_context[1] + 1) | all_ones.tril(-self.transformer_context[0] - 1)\n )\n\n def forward(\n self,\n src_tokens,\n src_lengths,\n return_all_hiddens: bool = False,\n ):\n \"\"\"\n Args:\n src_tokens (LongTensor): tokens in the source language of shape\n `(batch, src_len)`\n src_lengths (torch.LongTensor): lengths of each source sentence of\n shape `(batch)`\n return_all_hiddens (bool, optional): also return all of the\n intermediate hidden states (default: False).\n\n Returns:\n namedtuple:\n - **encoder_out** (Tensor): the last encoder layer's output of\n shape `(src_len, batch, embed_dim)`\n - **encoder_padding_mask** (ByteTensor): the positions of\n padding elements of shape `(batch, src_len)`\n - **encoder_embedding** (Tensor): the (scaled) embedding lookup\n of shape `(batch, src_len, embed_dim)`\n - **encoder_states** (List[Tensor]): all intermediate\n hidden states of shape `(src_len, batch, embed_dim)`.\n Only populated if *return_all_hiddens* is True.\n \"\"\"\n if self.conv_layers_before is not None:\n x, src_lengths, encoder_padding_mask = self.conv_layers_before(src_tokens, src_lengths)\n else:\n x, encoder_padding_mask = (\n src_tokens,\n ~speech_utils.sequence_mask(src_lengths, src_tokens.size(1))\n )\n\n x = self.dropout_module(x)\n if self.fc0 is not None:\n x = self.fc0(x)\n if self.embed_positions is not None:\n # 0s in `~encoder_padding_mask` are used as pad_idx for positional embeddings\n x = x + self.embed_positions((~encoder_padding_mask).int())\n if self.layernorm_embedding is not None:\n x = self.layernorm_embedding(x)\n x = self.dropout_module(x)\n elif self.embed_positions is not None:\n # 0s in `~encoder_padding_mask` are used as pad_idx for positional embeddings\n x = x + self.embed_positions((~encoder_padding_mask).int())\n if self.layernorm_embedding is not None:\n x = self.layernorm_embedding(x)\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n\n attn_mask = self.get_attn_mask(src_lengths)\n\n encoder_states = []\n\n # encoder layers\n for layer in self.layers:\n x = layer(x, encoder_padding_mask, attn_mask=attn_mask)\n if return_all_hiddens:\n assert encoder_states is not None\n encoder_states.append(x)\n\n if self.layer_norm is not None:\n x = self.layer_norm(x)\n\n # The Pytorch Mobile lite interpreter does not supports returning NamedTuple in\n # `foward` so we use a dictionary instead.\n # TorchScript does not support mixed values so the values are all lists.\n # The empty list is equivalent to None.\n return {\n \"encoder_out\": [x], # T x B x C\n \"encoder_padding_mask\": [encoder_padding_mask] if encoder_padding_mask.any()\n else [], # B x T\n \"encoder_embedding\": [],\n \"encoder_states\": encoder_states, # List[T x B x C]\n \"src_tokens\": [],\n \"src_lengths\": [],\n }\n\n def max_positions(self):\n \"\"\"Maximum input length supported by the encoder.\"\"\"\n return self.max_source_positions\n\n\nclass SpeechTransformerDecoder(TransformerDecoder):\n def __init__(\n self, args, dictionary, embed_tokens, no_encoder_attn=False,\n scheduled_sampling_rate_scheduler=None,\n ):\n super().__init__(args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn)\n\n self.scheduled_sampling_rate_scheduler = scheduled_sampling_rate_scheduler\n for layer in self.layers:\n if isinstance(layer, TransformerDecoderLayer):\n layer.need_attn = False # make validation fast\n\n def forward(\n self,\n prev_output_tokens,\n encoder_out: Optional[Dict[str, List[Tensor]]] = None,\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,\n features_only: bool = False,\n full_context_alignment: bool = False,\n alignment_layer: Optional[int] = None,\n alignment_heads: Optional[int] = None,\n src_lengths: Optional[Any] = None,\n return_all_hiddens: bool = False,\n **kwargs,\n ):\n \"\"\"\n Args:\n prev_output_tokens (LongTensor): previous decoder outputs of shape\n `(batch, tgt_len)`, for input feeding/teacher forcing\n encoder_out (optional): output from the encoder, used for\n encoder-side attention\n incremental_state (dict): dictionary used for storing state during\n :ref:`Incremental decoding`\n features_only (bool, optional): only return features without\n applying output layer (default: False).\n full_context_alignment (bool, optional): don't apply\n auto-regressive mask to self-attention (default: False).\n\n Returns:\n tuple:\n - the decoder's output of shape `(batch, tgt_len, vocab)`\n - a dictionary with any model-specific outputs\n \"\"\"\n if self.training and alignment_layer is None: # no attention tensors during training to save memory\n alignment_layer = self.num_layers # can be any value no less than this\n if self.training and self.scheduled_sampling_rate_scheduler is not None:\n epoch = kwargs.get(\"epoch\", 1)\n sampling_prob = self.scheduled_sampling_rate_scheduler.step(epoch)\n if sampling_prob < 1.0: # apply scheduled sampling\n assert not features_only\n return self._forward_with_scheduled_sampling(\n prev_output_tokens, sampling_prob, encoder_out=encoder_out,\n incremental_state={}, # use empty dict to preserve forward state\n full_context_alignment=full_context_alignment,\n alignment_layer=alignment_layer,\n alignment_heads=alignment_heads,\n src_lengths=src_lengths,\n return_all_hiddens=return_all_hiddens,\n )\n\n x, extra = self.extract_features(\n prev_output_tokens,\n encoder_out=encoder_out,\n incremental_state=incremental_state,\n full_context_alignment=full_context_alignment,\n alignment_layer=alignment_layer,\n alignment_heads=alignment_heads,\n )\n if not features_only:\n x = self.output_layer(x)\n return x, extra\n\n def _forward_with_scheduled_sampling(\n self,\n prev_output_tokens,\n sampling_prob,\n encoder_out: Optional[Dict[str, List[Tensor]]] = None,\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,\n features_only: bool = False,\n full_context_alignment: bool = False,\n alignment_layer: Optional[int] = None,\n alignment_heads: Optional[int] = None,\n src_lengths: Optional[Any] = None,\n return_all_hiddens: bool = False,\n ):\n bsz, seqlen = prev_output_tokens.size()\n outs = []\n pred = None\n for step in range(seqlen):\n if step > 0:\n sampling_mask = torch.rand(\n [bsz, 1], device=prev_output_tokens.device,\n ).lt(sampling_prob)\n feed_tokens = torch.where(\n sampling_mask, prev_output_tokens[:, step:step + 1], pred,\n )\n else:\n feed_tokens = prev_output_tokens[:, step:step + 1] # B x 1\n x, _ = self.extract_features(\n feed_tokens,\n encoder_out=encoder_out,\n incremental_state=incremental_state,\n full_context_alignment=full_context_alignment,\n alignment_layer=alignment_layer,\n alignment_heads=alignment_heads,\n )\n x = self.output_layer(x) # B x 1 x V\n outs.append(x)\n pred = x.argmax(-1) # B x 1\n x = torch.cat(outs, dim=1) # B x T x V\n return x, None\n\n def masked_copy_incremental_state(self, incremental_state, another_cached_state, mask):\n raise NotImplementedError\n\n\n@register_model_architecture(\"speech_transformer\", \"speech_transformer\")\ndef base_architecture(args):\n args.encoder_conv_channels = getattr(\n args, \"encoder_conv_channels\", \"[64, 64, 128, 128]\",\n )\n args.encoder_conv_kernel_sizes = getattr(\n args, \"encoder_conv_kernel_sizes\", \"[(3, 3), (3, 3), (3, 3), (3, 3)]\",\n )\n args.encoder_conv_strides = getattr(\n args, \"encoder_conv_strides\", \"[(1, 1), (2, 2), (1, 1), (2, 2)]\",\n )\n args.encoder_embed_dim = getattr(args, \"encoder_embed_dim\", 256)\n args.encoder_ffn_embed_dim = getattr(args, \"encoder_ffn_embed_dim\", 1024)\n args.encoder_layers = getattr(args, \"encoder_layers\", 12)\n args.encoder_attention_heads = getattr(args, \"encoder_attention_heads\", 4)\n args.encoder_normalize_before = getattr(args, \"encoder_normalize_before\", True)\n args.encoder_learned_pos = getattr(args, \"encoder_learned_pos\", False)\n args.encoder_transformer_context = getattr(args, \"encoder_transformer_context\", None)\n args.decoder_embed_path = getattr(args, \"decoder_embed_path\", None)\n args.decoder_embed_dim = getattr(args, \"decoder_embed_dim\", args.encoder_embed_dim)\n args.decoder_ffn_embed_dim = getattr(\n args, \"decoder_ffn_embed_dim\", args.encoder_ffn_embed_dim\n )\n args.decoder_layers = getattr(args, \"decoder_layers\", 6)\n args.decoder_attention_heads = getattr(args, \"decoder_attention_heads\", 4)\n args.decoder_normalize_before = getattr(args, \"decoder_normalize_before\", True)\n args.decoder_learned_pos = getattr(args, \"decoder_learned_pos\", False)\n args.attention_dropout = getattr(args, \"attention_dropout\", 0.2)\n args.activation_dropout = getattr(args, \"activation_dropout\", 0.2)\n args.activation_fn = getattr(args, \"activation_fn\", \"relu\")\n args.dropout = getattr(args, \"dropout\", 0.2)\n args.adaptive_softmax_cutoff = getattr(args, \"adaptive_softmax_cutoff\", None)\n args.adaptive_softmax_dropout = getattr(args, \"adaptive_softmax_dropout\", 0)\n args.share_decoder_input_output_embed = getattr(\n args, \"share_decoder_input_output_embed\", False\n )\n args.no_token_positional_embeddings = getattr(\n args, \"no_token_positional_embeddings\", False\n )\n args.adaptive_input = getattr(args, \"adaptive_input\", False)\n args.no_cross_attention = getattr(args, \"no_cross_attention\", False)\n args.cross_self_attention = getattr(args, \"cross_self_attention\", False)\n\n args.decoder_output_dim = getattr(\n args, \"decoder_output_dim\", args.decoder_embed_dim\n )\n args.decoder_input_dim = getattr(args, \"decoder_input_dim\", args.decoder_embed_dim)\n\n args.no_scale_embedding = getattr(args, \"no_scale_embedding\", False)\n args.layernorm_embedding = getattr(args, \"layernorm_embedding\", False)\n args.tie_adaptive_weights = getattr(args, \"tie_adaptive_weights\", False)\n args.checkpoint_activations = getattr(args, \"checkpoint_activations\", False)\n\n args.encoder_layers_to_keep = getattr(args, \"encoder_layers_to_keep\", None)\n args.decoder_layers_to_keep = getattr(args, \"decoder_layers_to_keep\", None)\n args.encoder_layerdrop = getattr(args, \"encoder_layerdrop\", 0)\n args.decoder_layerdrop = getattr(args, \"decoder_layerdrop\", 0)\n args.quant_noise_pq = getattr(args, \"quant_noise_pq\", 0)\n args.quant_noise_pq_block_size = getattr(args, \"quant_noise_pq_block_size\", 8)\n args.quant_noise_scalar = getattr(args, \"quant_noise_scalar\", 0)\n\n\n@register_model_architecture(\"speech_transformer\", \"speech_transformer_wsj\")\ndef speech_transformer_wsj(args):\n base_architecture(args)\n\n\n@register_model_architecture(\"speech_transformer\", \"speech_transformer_librispeech\")\ndef speech_transformer_librispeech(args):\n args.encoder_embed_dim = getattr(args, \"encoder_embed_dim\", 512)\n args.encoder_ffn_embed_dim = getattr(args, \"encoder_ffn_embed_dim\", 2048)\n args.encoder_layers = getattr(args, \"encoder_layers\", 12)\n args.encoder_attention_heads = getattr(args, \"encoder_attention_heads\", 8)\n args.encoder_transformer_context = getattr(args, \"encoder_transformer_context\", None)\n args.decoder_embed_dim = getattr(args, \"decoder_embed_dim\", args.encoder_embed_dim)\n args.decoder_ffn_embed_dim = getattr(\n args, \"decoder_ffn_embed_dim\", args.encoder_ffn_embed_dim\n )\n args.decoder_layers = getattr(args, \"decoder_layers\", 6)\n args.decoder_attention_heads = getattr(args, \"decoder_attention_heads\", 8)\n args.attention_dropout = getattr(args, \"attention_dropout\", 0.1)\n args.activation_dropout = getattr(args, \"activation_dropout\", 0.1)\n args.dropout = getattr(args, \"dropout\", 0.1)\n args.decoder_output_dim = getattr(\n args, \"decoder_output_dim\", args.decoder_embed_dim\n )\n args.decoder_input_dim = getattr(args, \"decoder_input_dim\", args.decoder_embed_dim)\n base_architecture(args)\n\n\n@register_model_architecture(\"speech_transformer\", \"speech_transformer_swbd\")\ndef speech_transformer_swbd(args):\n args.encoder_embed_dim = getattr(args, \"encoder_embed_dim\", 512)\n args.encoder_ffn_embed_dim = getattr(args, \"encoder_ffn_embed_dim\", 2048)\n args.encoder_layers = getattr(args, \"encoder_layers\", 12)\n args.encoder_attention_heads = getattr(args, \"encoder_attention_heads\", 4)\n args.encoder_transformer_context = getattr(args, \"encoder_transformer_context\", None)\n args.decoder_embed_dim = getattr(args, \"decoder_embed_dim\", args.encoder_embed_dim)\n args.decoder_ffn_embed_dim = getattr(\n args, \"decoder_ffn_embed_dim\", args.encoder_ffn_embed_dim\n )\n args.decoder_layers = getattr(args, \"decoder_layers\", 6)\n args.decoder_attention_heads = getattr(args, \"decoder_attention_heads\", 4)\n args.attention_dropout = getattr(args, \"attention_dropout\", 0.25)\n args.activation_dropout = getattr(args, \"activation_dropout\", 0.25)\n args.dropout = getattr(args, \"dropout\", 0.25)\n args.decoder_output_dim = getattr(\n args, \"decoder_output_dim\", args.decoder_embed_dim\n )\n args.decoder_input_dim = getattr(args, \"decoder_input_dim\", args.decoder_embed_dim)\n base_architecture(args)\n" ]
[ [ "torch.Tensor", "torch.cat", "torch.nn.ModuleList", "torch.nn.Linear", "torch.rand", "torch.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
indiewebconsulting/mind-tensorflow
[ "15ac248a9b883033b723d6fd6eb4335102c5780e", "15ac248a9b883033b723d6fd6eb4335102c5780e" ]
[ "tensorflow/python/training/tracking/util.py", "tensorflow/python/keras/layers/wrappers.py" ]
[ "\"\"\"Utilities for saving/loading Trackable objects.\"\"\"\n# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport os\nimport weakref\n\nfrom tensorflow.core.protobuf import trackable_object_graph_pb2\nfrom tensorflow.python import pywrap_tensorflow\nfrom tensorflow.python.client import session as session_lib\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_io_ops as io_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.training import checkpoint_management\nfrom tensorflow.python.training import saver as v1_saver_lib\nfrom tensorflow.python.training.saving import functional_saver\nfrom tensorflow.python.training.saving import saveable_object_util\nfrom tensorflow.python.training.tracking import base\nfrom tensorflow.python.training.tracking import data_structures\nfrom tensorflow.python.training.tracking import graph_view as graph_view_lib\nfrom tensorflow.python.training.tracking import object_identity\nfrom tensorflow.python.training.tracking import tracking\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util import tf_contextlib\nfrom tensorflow.python.util.tf_export import tf_export\n\n\nclass _CheckpointRestoreCoordinator(object):\n \"\"\"Holds the status of an object-based checkpoint load.\"\"\"\n\n def __init__(self, object_graph_proto, save_path, save_path_tensor,\n restore_op_cache, graph_view):\n \"\"\"Specify the checkpoint being loaded.\n\n Args:\n object_graph_proto: The TrackableObjectGraph protocol buffer associated\n with this checkpoint.\n save_path: A string, the path to the checkpoint, as returned by\n `tf.train.latest_checkpoint`.\n save_path_tensor: A string `Tensor` which contains or will be fed the save\n path.\n restore_op_cache: A dictionary shared between\n `_CheckpointRestoreCoordinator`s for the same Python objects, used to\n look up restore ops by name to avoid re-creating them across multiple\n `restore()` calls.\n graph_view: A graph_view_lib.ObjectGraphView object for the restored\n objects.\n \"\"\"\n self.object_graph_proto = object_graph_proto\n self.restore_uid = ops.uid()\n # Maps from objects to lists of attributes which were in the checkpoint but\n # not loaded into any object, for error checking.\n self.unused_attributes = weakref.WeakKeyDictionary()\n # Dictionary mapping from an id in the protocol buffer flat array to\n # Trackable Python objects. This mapping may be deferred if a\n # checkpoint is restored before all dependencies have been tracked. Uses\n # weak references so that partial restorations don't create reference cycles\n # (as objects with deferred dependencies will generally have references to\n # this object).\n self.object_by_proto_id = weakref.WeakValueDictionary()\n # A set of all Python objects we've seen as dependencies, even if we didn't\n # use them (for example because of inconsistent references when\n # loading). Used to make status assertions fail when loading checkpoints\n # that don't quite match.\n self.all_python_objects = object_identity.ObjectIdentityWeakSet()\n self.save_path_tensor = save_path_tensor\n self.save_path_string = save_path\n self.dtype_map = pywrap_tensorflow.NewCheckpointReader(\n save_path).get_variable_to_dtype_map()\n # A NewCheckpointReader for the most recent checkpoint, for streaming Python\n # state restoration.\n # When graph building, contains a list of ops to run to restore objects from\n # this checkpoint.\n self.restore_ops = []\n self.restore_ops_by_name = restore_op_cache\n self.graph_view = graph_view\n self.new_restore_ops_callback = None\n # A mapping from optimizer proto ids to lists of slot variables to be\n # restored when the optimizer is tracked. Only includes slot variables whose\n # regular variables have already been created, and only for optimizer\n # objects which have not yet been created/tracked.\n self.deferred_slot_restorations = {}\n # A mapping from variable proto ids to lists of slot variables to be\n # restored when the variable is created/tracked. These get shifted over to\n # deferred_slot_restorations if the optimizer hasn't been created when that\n # happens.\n self.slot_restorations = {}\n for node_index, node in enumerate(self.object_graph_proto.nodes):\n for slot_reference in node.slot_variables:\n # `node` refers to an `Optimizer`, since only these have slot variables.\n self.slot_restorations.setdefault(\n slot_reference.original_variable_node_id, []).append(\n base._SlotVariableRestoration( # pylint: disable=protected-access\n optimizer_id=node_index,\n slot_variable_id=slot_reference.slot_variable_node_id,\n slot_name=slot_reference.slot_name))\n\n def new_restore_ops(self, new_ops):\n self.restore_ops.extend(new_ops)\n if self.new_restore_ops_callback:\n self.new_restore_ops_callback(new_ops) # pylint: disable=not-callable\n\n def restore_saveables(self, tensor_saveables, python_saveables):\n \"\"\"Run or build restore operations for SaveableObjects.\n\n Args:\n tensor_saveables: `SaveableObject`s which correspond to Tensors.\n python_saveables: `PythonStateSaveable`s which correspond to Python\n values.\n\n Returns:\n When graph building, a list of restore operations, either cached or newly\n created, to restore `tensor_saveables`.\n \"\"\"\n restore_ops = []\n # Eagerly run restorations for Python state.\n reader = pywrap_tensorflow.NewCheckpointReader(self.save_path_string)\n for saveable in python_saveables:\n spec_names = [spec.name for spec in saveable.specs]\n saveable.python_restore([reader.get_tensor(name) for name in spec_names])\n\n # If we have new SaveableObjects, extract and cache restore ops.\n if tensor_saveables:\n validated_saveables = saveable_object_util.validate_and_slice_inputs(\n tensor_saveables)\n validated_names = set(saveable.name for saveable in validated_saveables)\n if set(tensor_saveables.keys()) != validated_names:\n raise AssertionError(\n (\"Saveable keys changed when validating. Got back %s, was \"\n \"expecting %s\") % (tensor_saveables.keys(), validated_names))\n new_restore_ops = functional_saver.MultiDeviceSaver(\n validated_saveables).restore(self.save_path_tensor)\n if not context.executing_eagerly():\n for name, restore_op in sorted(new_restore_ops.items()):\n restore_ops.append(restore_op)\n assert name not in self.restore_ops_by_name\n self.restore_ops_by_name[name] = restore_op\n return restore_ops\n\n\nclass _NameBasedRestoreCoordinator(object):\n \"\"\"Keeps the status of a name-based checkpoint restore.\"\"\"\n\n def __init__(self, save_path, dtype_map=None):\n self.save_path = save_path\n self.dtype_map = dtype_map\n self.unused_attributes = weakref.WeakKeyDictionary()\n self.restore_uid = ops.uid()\n\n def globally_named_object_attributes(self, trackable):\n \"\"\"Create globally named SaveableObjects from attributes.\n\n If an object's attribute has no global name specified (default construction\n for the SaveableObject factory), records the failure in\n `self.unused_attributes` (which can then be used to make status assertions\n fail; see `NameBasedSaverStatus`).\n\n Args:\n trackable: An object to save.\n\n Yields:\n SaveableObjects for `trackable`'s attributes.\n \"\"\"\n for attribute_name, saveable_factory in (\n trackable._gather_saveables_for_checkpoint().items()): # pylint: disable=protected-access\n if callable(saveable_factory):\n try:\n # This saveable object factory does not have a default name= argument,\n # which means there's no way to save/restore it using a name-based\n # checkpoint. Ignore the error now and make sure assert_consumed()\n # fails.\n saveable = saveable_factory()\n except TypeError:\n # Even if we can't name this object, we should construct it and check\n # whether it's optional to restore it. If it's optional we don't need\n # to make assertions fail.\n if not saveable_factory(\"\").optional_restore:\n self.unused_attributes.setdefault(trackable,\n []).append(attribute_name)\n continue\n else:\n saveable = saveable_factory\n names_to_saveables = saveable_object_util.op_list_to_dict(\n [saveable], convert_variable_to_tensor=False)\n for name, op in names_to_saveables.items():\n for saveable_object in saveable_object_util.saveable_objects_for_op(\n op=op, name=name):\n yield saveable_object\n\n def eager_restore(self, trackable):\n \"\"\"Runs restore ops for `trackable`'s attributes.\"\"\"\n # When graph building, we don't add any restore ops to the graph until\n # run_restore_ops/initialize_or_restore on the status object for name-based\n # checkpoints.\n assert context.executing_eagerly()\n for saveable in self.globally_named_object_attributes(trackable):\n restored_tensors = []\n tensor_missing = False\n for spec in saveable.specs:\n if spec.name in self.dtype_map:\n with ops.device(\"cpu:0\"):\n restored, = io_ops.restore_v2(\n prefix=self.save_path,\n tensor_names=[spec.name],\n shape_and_slices=[\"\"],\n dtypes=[self.dtype_map[spec.name]],\n name=\"%s_checkpoint_read\" % (spec.name,))\n restored_tensors.append(array_ops.identity(restored))\n else:\n tensor_missing = True\n\n if tensor_missing:\n # Record that this variable didn't match so assertions will fail.\n self.unused_attributes.setdefault(trackable, []).append(saveable.name)\n else:\n # Ignores values missing from the checkpoint, as with object-based\n # restore. Status assertions can be used to check exact matches,\n # although it's unlikely to ever happen for name-based checkpoints.\n saveable.restore(\n restored_tensors=restored_tensors, restored_shapes=None)\n\n\n# TODO(allenl): If this ends up in a public API, consider adding LINT.IfChange\n# or consolidating the implementation with get_variable.\ndef _default_getter(name,\n shape,\n dtype,\n initializer=None,\n partition_info=None,\n **kwargs):\n \"\"\"A pared-down version of get_variable which does not reuse variables.\"\"\"\n dtype = dtypes.as_dtype(dtype)\n shape_object = tensor_shape.as_shape(shape)\n with ops.init_scope():\n if initializer is None:\n initializer, initializing_from_value = (\n variable_scope._get_default_variable_store()._get_default_initializer( # pylint: disable=protected-access\n name=name,\n shape=shape_object,\n dtype=dtype))\n else:\n initializing_from_value = not callable(initializer)\n # Same logic as get_variable\n variable_dtype = dtype.base_dtype\n if initializing_from_value:\n if shape is not None:\n raise ValueError(\"If initializer is a constant, do not specify shape.\")\n initial_value = initializer\n else:\n # Instantiate initializer if provided initializer is a type object.\n if isinstance(initializer, type(init_ops.Initializer)):\n initializer = initializer(dtype=dtype)\n\n def initial_value():\n return initializer(\n shape_object.as_list(), dtype=dtype, partition_info=partition_info)\n\n return variables.VariableV1(\n initial_value=initial_value,\n name=name,\n dtype=variable_dtype,\n use_resource=True,\n **kwargs)\n\n\ndef add_variable(trackable,\n name,\n shape=None,\n dtype=dtypes.float32,\n initializer=None,\n trainable=True):\n \"\"\"Add a variable to a Trackable with no scope influence.\"\"\"\n return trackable._add_variable_with_custom_getter( # pylint: disable=protected-access\n name=name,\n shape=shape,\n dtype=dtype,\n initializer=initializer,\n getter=_default_getter,\n trainable=trainable)\n\n\ndef object_metadata(save_path):\n \"\"\"Retrieves information about the objects in a checkpoint.\n\n Example usage:\n\n ```python\n object_graph = tf.contrib.checkpoint.object_metadata(\n tf.train.latest_checkpoint(checkpoint_directory))\n ckpt_variable_names = set()\n for node in object_graph.nodes:\n for attribute in node.attributes:\n ckpt_variable_names.add(attribute.full_name)\n ```\n\n Args:\n save_path: The path to the checkpoint, as returned by `save` or\n `tf.train.latest_checkpoint`.\n\n Returns:\n A parsed `tf.contrib.checkpoint.TrackableObjectGraph` protocol buffer.\n Raises:\n ValueError: If an object graph was not found in the checkpoint.\n \"\"\"\n reader = pywrap_tensorflow.NewCheckpointReader(save_path)\n try:\n object_graph_string = reader.get_tensor(base.OBJECT_GRAPH_PROTO_KEY)\n except errors_impl.NotFoundError:\n raise ValueError(\n ('The specified checkpoint \"%s\" does not appear to be object-based (it '\n 'is missing the key \"%s\"). Likely it was created with a name-based '\n \"saver and does not contain an object dependency graph.\") %\n (save_path, base.OBJECT_GRAPH_PROTO_KEY))\n object_graph_proto = (trackable_object_graph_pb2.TrackableObjectGraph())\n object_graph_proto.ParseFromString(object_graph_string)\n return object_graph_proto\n\n\ndef list_objects(root_trackable):\n \"\"\"Traverse the object graph and list all accessible objects.\n\n Looks for `Trackable` objects which are dependencies of\n `root_trackable`. Includes slot variables only if the variable they are\n slotting for and the optimizer are dependencies of `root_trackable`\n (i.e. if they would be saved with a checkpoint).\n\n Args:\n root_trackable: A `Trackable` object whose dependencies should be flattened.\n\n Returns:\n A flat list of objects.\n \"\"\"\n return graph_view_lib.ObjectGraphView(root_trackable).list_objects()\n\n\ndef gather_initializers(root_trackable):\n \"\"\"Traverse the object graph and find initialization ops.\n\n Looks for `Trackable` objects which are dependencies of\n `root_trackable` and which have an `initializer` property. Includes\n initializers for slot variables only if the variable they are slotting for and\n the optimizer are dependencies of `root_trackable` (i.e. if they would be\n saved with a checkpoint).\n\n Args:\n root_trackable: A `Trackable` object to gather initializers for.\n\n Returns:\n A list of initialization ops.\n \"\"\"\n trackable_objects = list_objects(root_trackable)\n return [\n c.initializer\n for c in trackable_objects\n if hasattr(c, \"initializer\") and c.initializer is not None\n ]\n\n\n@tf_contextlib.contextmanager\ndef capture_dependencies(template):\n \"\"\"Capture variables created within this scope as `Template` dependencies.\n\n Requires that `template.variable_scope` is active.\n\n This scope is intended as a compatibility measure, allowing a trackable\n object to add dependencies on variables created in a block of code which is\n not aware of object-based saving (and instead uses variable names\n heavily). This is how `Template` objects add dependencies on variables and\n sub-`Template`s. Where possible, use `tf.compat.v1.make_template` directly.\n\n Args:\n template: The `Template` object to register dependencies with.\n\n Yields:\n None (when used as a context manager).\n \"\"\"\n name_prefix = template.variable_scope.name\n\n def _trackable_custom_creator(next_creator,\n name,\n initial_value,\n trackable_parent=None,\n **kwargs):\n \"\"\"A variable creation hook which adds Trackable dependencies.\n\n Set for example during a `Template`'s first wrapped function\n execution. Ensures that (a) `template` depends on any trackable\n objects using their own `capture_dependencies` scope inside this scope which\n create variables, and (b) that any variables not in a more deeply nested\n scope are added as dependencies directly.\n\n The `trackable_parent` argument is passed between custom creators but\n ignored when the variable object itself is created. This argument indicates\n (if not `None`) that a more deeply nested scope has already added the\n variable as a dependency, and that parent scopes should add a dependency on\n that object rather than on the variable directly.\n\n Args:\n next_creator: See `variable_scope.variable_creator_scope`; the next\n creator in the chain.\n name: The (full, scope-influenced) name of the variable. The `name_prefix`\n itself is stripped for the purposes of object-based dependency tracking,\n but scopes opened within this scope are respected.\n initial_value: See `variable_scope.variable_creator_scope`. Taken\n explicitly so the argument can be re-named and used with\n `Trackable._add_variable_with_custom_getter`.\n trackable_parent: If not None, a more deeply nested trackable object and\n its name prefix which were passed to `capture_dependencies` to add a\n dependency on (rather than depending on the variable directly).\n **kwargs: Passed through to the next creator.\n\n Returns:\n The output of `next_creator`: the fetched/created variable object.\n \"\"\"\n\n def _call_next_creator_renaming_initializer(initializer, **inner_kwargs):\n inner_kwargs.pop(\"name\") # Ignored; this is the scope-stripped name which\n # we don't want to propagate.\n return next_creator(initial_value=initializer, name=name, **inner_kwargs)\n\n if name is not None and name.startswith(name_prefix):\n scope_stripped_name = name[len(name_prefix) + 1:]\n if not trackable_parent:\n return template._add_variable_with_custom_getter( # pylint: disable=protected-access\n initializer=initial_value,\n name=scope_stripped_name,\n getter=_call_next_creator_renaming_initializer,\n # Disable error checking for Trackable. Exceptions are instead\n # raised if necessary when the object-based saver tries to\n # save/restore the object.\n overwrite=True,\n trackable_parent=(template, name_prefix),\n **kwargs)\n else:\n parent_object, parent_name_prefix = trackable_parent\n template._track_trackable( # pylint: disable=protected-access\n parent_object,\n name=parent_name_prefix[len(name_prefix) + 1:],\n overwrite=True)\n return next_creator(\n name=name,\n initial_value=initial_value,\n trackable_parent=(template, name_prefix),\n **kwargs)\n\n with variable_scope.variable_creator_scope(_trackable_custom_creator):\n yield\n\n\nclass _LoadStatus(object):\n \"\"\"Abstract base for load status callbacks.\"\"\"\n\n @abc.abstractmethod\n def assert_consumed(self):\n \"\"\"Raises an exception unless a non-trivial restoration has completed.\"\"\"\n pass\n\n @abc.abstractmethod\n def assert_existing_objects_matched(self):\n \"\"\"Raises an exception unless existing Python objects have been matched.\"\"\"\n pass\n\n @abc.abstractmethod\n def assert_nontrivial_match(self):\n \"\"\"Raises an exception if only the root object matched.\"\"\"\n pass\n\n @abc.abstractmethod\n def run_restore_ops(self, session=None):\n \"\"\"Runs restore ops from the checkpoint. Requires a valid checkpoint.\"\"\"\n pass\n\n @abc.abstractmethod\n def initialize_or_restore(self, session=None):\n \"\"\"Runs restore ops from the checkpoint, or initializes variables.\"\"\"\n pass\n\n\ndef streaming_restore(status, session=None):\n \"\"\"When graph building, runs restore ops as soon as they come in.\n\n Args:\n status: A _LoadStatus objects from an object-based saver's restore().\n Streaming restore from name-based checkpoints is not currently supported.\n session: A session to run new restore ops in.\n \"\"\"\n if context.executing_eagerly():\n # Streaming restore is the default/only behavior when executing eagerly.\n return\n if session is None:\n session = ops.get_default_session()\n if isinstance(status, NameBasedSaverStatus):\n raise NotImplementedError(\n \"Streaming restore not supported from name-based checkpoints. File a \"\n \"feature request if this limitation bothers you.\")\n status.run_restore_ops(session=session)\n # pylint: disable=protected-access\n status._checkpoint.new_restore_ops_callback = (\n lambda ops: session.run(ops, feed_dict=status._feed_dict))\n # pylint: enable=protected-access\n\n\nclass CheckpointLoadStatus(_LoadStatus):\n \"\"\"Checks the status of checkpoint loading and manages restore ops.\n\n Returned from `Saver.restore`. Since `restore` may defer the loading of values\n in the checkpoint which don't yet have corresponding Python objects,\n `CheckpointLoadStatus` provides a callback to verify that checkpoint loading\n is complete (`assert_consumed`).\n\n When graph building, `restore` does not run restore ops itself since their\n creation may be deferred. The `run_restore_ops` method must be called once all\n Python objects with values to restore have been created and added to the\n dependency graph (this does not necessarily have to be the whole checkpoint;\n calling `run_restore_ops` while `assert_consumed` fails is supported and will\n partially restore the checkpoint).\n\n See `Saver.restore` for usage examples.\n \"\"\"\n\n def __init__(self, checkpoint, feed_dict, graph_view):\n self._checkpoint = checkpoint\n self._feed_dict = feed_dict\n self._graph_view = graph_view\n\n def assert_consumed(self):\n \"\"\"Asserts that all objects in the checkpoint have been created/matched.\n\n Returns:\n `self` for chaining.\n Raises:\n AssertionError: If there are any Python objects in the dependency graph\n which have not been restored from this checkpoint or a later `restore`,\n or if there are any checkpointed values which have not been matched to\n Python objects.\n \"\"\"\n self.assert_existing_objects_matched()\n for node_id, node in enumerate(self._checkpoint.object_graph_proto.nodes):\n trackable = self._checkpoint.object_by_proto_id.get(node_id, None)\n if trackable is None:\n raise AssertionError(\"Unresolved object in checkpoint: %s\" % (node,))\n if self._checkpoint.slot_restorations:\n # Sanity check; this collection should be clear if everything has been\n # restored.\n raise AssertionError(\"Unresolved slot restorations: %s\" %\n (self._checkpoint.slot_restorations,))\n if self._checkpoint.unused_attributes:\n raise AssertionError(\n (\"Unused attributes in these objects (the attributes exist in the \"\n \"checkpoint but not in the objects): %s\") %\n (list(self._checkpoint.unused_attributes.items()),))\n return self\n\n def assert_existing_objects_matched(self):\n \"\"\"Asserts that trackable Python objects have been matched.\n\n Note that this is a weaker assertion than `assert_consumed`. It will only\n fail for existing Python objects which are (transitive) dependencies of the\n root object and which do not have an entry in the checkpoint.\n\n It will not fail, for example, if a `tf.keras.Layer` object has not yet been\n built and so has not created any `tf.Variable` objects.\n\n Returns:\n `self` for chaining.\n\n Raises:\n AssertionError: If a Python object exists in the transitive dependencies\n of the root object but does not have a value in the checkpoint.\n \"\"\"\n for node_id, node in enumerate(self._checkpoint.object_graph_proto.nodes):\n trackable = self._checkpoint.object_by_proto_id.get(node_id, None)\n if (trackable is not None and\n trackable._update_uid < self._checkpoint.restore_uid): # pylint: disable=protected-access\n raise AssertionError(\"Object not assigned a value from checkpoint: %s\" %\n (node,))\n for trackable_object in self._graph_view.list_objects():\n # Remove data structures that do not contain any variables from\n # restoration checks.\n if (isinstance(trackable_object,\n data_structures.TrackableDataStructure) and\n not trackable_object._checkpoint_dependencies):\n continue\n self._checkpoint.all_python_objects.add(trackable_object)\n unused_python_objects = (\n object_identity.ObjectIdentitySet(self._checkpoint.all_python_objects) -\n object_identity.ObjectIdentitySet(\n self._checkpoint.object_by_proto_id.values()))\n if unused_python_objects:\n raise AssertionError(\n (\"Some Python objects were not bound to checkpointed values, likely \"\n \"due to changes in the Python program: %s\") %\n (list(unused_python_objects),))\n return self\n\n def assert_nontrivial_match(self):\n \"\"\"Raises an exception if only the root object matched.\"\"\"\n for trackable_object in self._graph_view.list_objects():\n self._checkpoint.all_python_objects.add(trackable_object)\n if len(self._checkpoint.object_by_proto_id) <= 1:\n unused_python_objects = (\n object_identity.ObjectIdentitySet(self._checkpoint.all_python_objects)\n - object_identity.ObjectIdentitySet(\n self._checkpoint.object_by_proto_id.values()))\n if unused_python_objects:\n raise AssertionError(\n (\"Nothing except the root object matched a checkpointed value. \"\n \"Typically this means that the checkpoint does not match the \"\n \"Python program. The following objects have no matching \"\n \"checkpointed value: %s\") % (list(unused_python_objects),))\n else:\n raise AssertionError(\n \"Nothing to load. No dependencies have been added to %s yet.\" %\n (self._graph_view.root,))\n return self\n\n def run_restore_ops(self, session=None):\n \"\"\"Run operations to restore objects in the dependency graph.\"\"\"\n if context.executing_eagerly():\n return # Run eagerly\n if session is None:\n session = ops.get_default_session()\n session.run(self._checkpoint.restore_ops, feed_dict=self._feed_dict)\n\n def initialize_or_restore(self, session=None):\n \"\"\"Run operations to initialize or restore objects in the dependency graph.\n\n Any objects in the dependency graph which have initializers but are not in\n the checkpoint will have those initializers run, unless those variables are\n being restored by a later call to `tf.train.Checkpoint.restore()`.\n\n This method has a sibling in `InitializationOnlyStatus` which instead\n initializes variables. That type is returned if no checkpoint is specified\n in `Saver.restore`.\n\n Args:\n session: The session to run init/restore ops in. If `None`, uses the\n default session.\n \"\"\"\n if context.executing_eagerly():\n return # Initialization and restoration ops are run eagerly\n if session is None:\n session = ops.get_default_session()\n all_objects = self._graph_view.list_objects()\n already_initialized_objects = object_identity.ObjectIdentitySet(\n self._checkpoint.object_by_proto_id.values())\n initializers_for_non_restored_variables = [\n c.initializer for c in all_objects\n if hasattr(c, \"initializer\")\n and c not in already_initialized_objects\n and (getattr(c, \"_update_uid\", self._checkpoint.restore_uid - 1)\n < self._checkpoint.restore_uid)]\n self.run_restore_ops(session=session)\n session.run(initializers_for_non_restored_variables)\n\n\nclass InitializationOnlyStatus(_LoadStatus):\n \"\"\"Returned from `Saver.restore` when no checkpoint has been specified.\n\n Objects of this type have the same `assert_consumed` method as\n `CheckpointLoadStatus`, but it always fails. However,\n `initialize_or_restore` works on objects of both types, and will\n initialize variables in `InitializationOnlyStatus` objects or restore them\n otherwise.\n \"\"\"\n\n def __init__(self, graph_view, restore_uid):\n self._restore_uid = restore_uid\n self._graph_view = graph_view\n\n def assert_consumed(self):\n \"\"\"Assertion for consistency with `CheckpointLoadStatus`. Always fails.\"\"\"\n raise AssertionError(\n \"No checkpoint specified (save_path=None); nothing is being restored.\")\n\n def assert_existing_objects_matched(self):\n \"\"\"Assertion for consistency with `CheckpointLoadStatus`. Always fails.\"\"\"\n raise AssertionError(\n \"No checkpoint specified (save_path=None); nothing is being restored.\")\n\n def assert_nontrivial_match(self):\n \"\"\"Assertion for consistency with `CheckpointLoadStatus`. Always fails.\"\"\"\n raise AssertionError(\n \"No checkpoint specified (save_path=None); nothing is being restored.\")\n\n def run_restore_ops(self, session=None):\n \"\"\"For consistency with `CheckpointLoadStatus`.\n\n Use `initialize_or_restore` for initializing if no checkpoint was passed\n to `Saver.restore` and restoring otherwise.\n\n Args:\n session: Not used.\n \"\"\"\n raise AssertionError(\n \"No checkpoint specified, so no restore ops are available \"\n \"(save_path=None to Saver.restore).\")\n\n def initialize_or_restore(self, session=None):\n \"\"\"Runs initialization ops for variables.\n\n Objects which would be saved by `Saver.save` will be initialized, unless\n those variables are being restored by a later call to\n `tf.train.Checkpoint.restore()`.\n\n This method does nothing when executing eagerly (initializers get run\n eagerly).\n\n Args:\n session: The session to run initialization ops in. If `None`, uses the\n default session.\n \"\"\"\n if context.executing_eagerly():\n return # run eagerly\n if session is None:\n session = ops.get_default_session()\n trackable_objects = self._graph_view.list_objects()\n initializers = [\n c.initializer for c in trackable_objects\n if hasattr(c, \"initializer\") and c.initializer is not None\n and (getattr(c, \"_update_uid\", self._restore_uid - 1)\n < self._restore_uid)]\n session.run(initializers)\n\n\n_DEPRECATED_RESTORE_INSTRUCTIONS = (\n \"Restoring a name-based tf.train.Saver checkpoint using the object-based \"\n \"restore API. This mode uses global names to match variables, and so is \"\n \"somewhat fragile. It also adds new restore ops to the graph each time it \"\n \"is called when graph building. Prefer re-encoding training checkpoints in \"\n \"the object-based format: run save() on the object-based saver (the same \"\n \"one this message is coming from) and use that checkpoint in the future.\")\n\n\nclass NameBasedSaverStatus(_LoadStatus):\n \"\"\"Status for loading a name-based training checkpoint.\"\"\"\n\n # Ideally this deprecation decorator would be on the class, but that\n # interferes with isinstance checks.\n @deprecation.deprecated(\n date=None, instructions=_DEPRECATED_RESTORE_INSTRUCTIONS)\n def __init__(self, checkpoint, graph_view):\n self._checkpoint = checkpoint\n self._graph_view = graph_view\n\n def assert_consumed(self):\n \"\"\"Raises an exception if any variables/objects are unmatched.\"\"\"\n unused_attributes = dict(self._checkpoint.unused_attributes)\n if unused_attributes:\n raise AssertionError(\n \"Some objects had attributes which were not restored: %s\" %\n (unused_attributes,))\n for trackable in self._graph_view.list_objects():\n # pylint: disable=protected-access\n trackable._maybe_initialize_trackable()\n if trackable._update_uid < self._checkpoint.restore_uid:\n raise AssertionError(\"Object not restored: %s\" % (trackable,))\n # pylint: enable=protected-access\n return self\n\n def assert_existing_objects_matched(self):\n \"\"\"Raises an exception if currently created objects are unmatched.\"\"\"\n # For name-based checkpoints there's no object information in the\n # checkpoint, so there's no distinction between\n # assert_existing_objects_matched and assert_consumed (and both are less\n # useful since we don't touch Python objects or Python state).\n return self.assert_consumed()\n\n def assert_nontrivial_match(self):\n \"\"\"Raises an exception if currently created objects are unmatched.\"\"\"\n # For name-based checkpoints there's no object information in the\n # checkpoint, so there's no distinction between\n # assert_nontrivial_match and assert_consumed (and both are less\n # useful since we don't touch Python objects or Python state).\n return self.assert_consumed()\n\n def _gather_saveable_objects(self):\n \"\"\"Walk the object graph, using global names for SaveableObjects.\"\"\"\n objects = self._graph_view.list_objects()\n saveable_objects = []\n for trackable in objects:\n # pylint: disable=protected-access\n trackable._maybe_initialize_trackable()\n if trackable._update_uid < self._checkpoint.restore_uid:\n trackable._update_uid = self._checkpoint.restore_uid\n else:\n continue\n # pylint: enable=protected-access\n saveable_objects.extend(\n self._checkpoint.globally_named_object_attributes(trackable))\n return saveable_objects\n\n def run_restore_ops(self, session=None):\n \"\"\"Load the name-based checkpoint using a new `tf.compat.v1.train.Saver`.\"\"\"\n if context.executing_eagerly():\n return # Nothing to do, variables are restored on creation.\n if session is None:\n session = ops.get_default_session()\n with ops.device(\"/cpu:0\"):\n saveables = self._gather_saveable_objects()\n v1_saver_lib.Saver(saveables).restore(\n sess=session, save_path=self._checkpoint.save_path)\n\n def initialize_or_restore(self, session=None):\n \"\"\"Alias for `run_restore_ops`.\"\"\"\n self.run_restore_ops(session=session)\n\n\nclass _SessionWithFeedDictAdditions(session_lib.SessionInterface):\n \"\"\"Pretends to be a session, inserts extra feeds on run().\"\"\"\n\n def __init__(self, session, feed_additions):\n self._wrapped_session = session\n self._feed_additions = feed_additions\n\n def run(self, fetches, feed_dict=None, **kwargs):\n if feed_dict is None:\n feed_dict = {}\n else:\n feed_dict = feed_dict.copy()\n feed_dict.update(self._feed_additions)\n return self._wrapped_session.run(\n fetches=fetches, feed_dict=feed_dict, **kwargs)\n\n\nclass TrackableSaver(object):\n \"\"\"Saves and restores a `Trackable` object and its dependencies.\n\n See `Trackable` for details of dependency management. `Saver` wraps\n `tf.compat.v1.train.Saver` for saving, including extra information about the\n graph of\n dependencies between Python objects. When restoring, it uses this information\n about the save-time dependency graph to more robustly match objects with their\n checkpointed values. When executing eagerly, it supports restoring variables\n on object creation (see `Saver.restore`).\n\n Values in a checkpoint are mapped to `Trackable` Python objects\n (`Variable`s, `Optimizer`s, `Layer`s) based on the names provided when the\n checkpoint was written. To avoid breaking existing checkpoints when modifying\n a class, dependency names (the names of attributes to which `Trackable`\n objects are assigned) may not change. These names are local to objects, in\n contrast to the `Variable.name`-based save/restore from\n `tf.compat.v1.train.Saver`, and\n so allow additional program transformations.\n \"\"\"\n\n def __init__(self, graph_view):\n \"\"\"Configure saving.\n\n Args:\n graph_view: A `GraphView` object containing a description of the object\n graph to save.\n \"\"\"\n # The file prefix placeholder is created lazily when graph building (and not\n # at all when executing eagerly) to avoid creating ops in the constructor\n # (when they may never be necessary).\n self._file_prefix_placeholder = None\n\n # Op caching for save\n self._object_graph_feed_tensor = None\n self._last_save_object_graph = None\n self._file_prefix_feed_tensor = None\n self._cached_save_operation = None\n\n # Op caching for restore, shared between _CheckpointRestoreCoordinators\n self._restore_op_cache = {}\n self._graph_view = graph_view\n\n def _gather_saveables(self, object_graph_tensor=None):\n \"\"\"Wraps _serialize_object_graph to include the object graph proto.\"\"\"\n (named_saveable_objects, graph_proto,\n feed_additions) = self._graph_view.serialize_object_graph()\n if object_graph_tensor is None:\n with ops.device(\"/cpu:0\"):\n object_graph_tensor = constant_op.constant(\n graph_proto.SerializeToString(), dtype=dtypes.string)\n else:\n feed_additions.update(\n {object_graph_tensor: graph_proto.SerializeToString()})\n assert base.OBJECT_GRAPH_PROTO_KEY not in named_saveable_objects\n named_saveable_objects.append(\n base.NoRestoreSaveable(\n tensor=object_graph_tensor, name=base.OBJECT_GRAPH_PROTO_KEY))\n return named_saveable_objects, graph_proto, feed_additions\n\n def _save_cached_when_graph_building(self,\n file_prefix,\n object_graph_tensor=None):\n \"\"\"Create or retrieve save ops.\n\n Args:\n file_prefix: The prefix for saved checkpoint files.\n object_graph_tensor: A `Tensor` to which the current object graph will be\n fed.\n\n Returns:\n A two-element tuple with a filename tensor and a feed_dict of tensors to\n feed when running it (if graph building). The feed dict contains the\n current object graph and any Python state to be saved in the\n checkpoint. When executing eagerly only the first argument is meaningful.\n \"\"\"\n (named_saveable_objects, graph_proto,\n feed_additions) = self._gather_saveables(\n object_graph_tensor=object_graph_tensor)\n if (self._last_save_object_graph != graph_proto\n # When executing eagerly, we need to re-create SaveableObjects each time\n # save() is called so they pick up new Tensors passed to their\n # constructors. That means the Saver needs to be copied with a new\n # var_list.\n or context.executing_eagerly() or ops.inside_function()):\n saver = functional_saver.MultiDeviceSaver(named_saveable_objects)\n save_op = saver.save(file_prefix)\n with ops.device(\"/cpu:0\"):\n with ops.control_dependencies([save_op]):\n self._cached_save_operation = array_ops.identity(file_prefix)\n self._last_save_object_graph = graph_proto\n return self._cached_save_operation, feed_additions\n\n def save(self, file_prefix, checkpoint_number=None, session=None):\n \"\"\"Save a training checkpoint.\n\n The saved checkpoint includes variables created by this object and any\n Trackable objects it depends on at the time `Saver.save()` is called.\n\n Args:\n file_prefix: A prefix to use for the checkpoint filenames\n (/path/to/directory/and_a_prefix). Names are generated based on this\n prefix and `checkpoint_number`, if provided.\n checkpoint_number: An integer variable or Tensor, used to number\n checkpoints. Typically this value is saved along with other variables in\n training checkpoints, which will happen automatically if it was created\n by `root_trackable` or one of its dependencies (via\n `Trackable._add_variable`).\n session: The session to evaluate variables in. Ignored when executing\n eagerly. If not provided when graph building, the default session is\n used.\n\n Returns:\n The full path to the checkpoint.\n \"\"\"\n feed_dict = {}\n use_session = (not context.executing_eagerly() and\n not ops.inside_function())\n if checkpoint_number:\n file_prefix = \"%s-%d\" % (file_prefix, checkpoint_number)\n if use_session:\n if self._object_graph_feed_tensor is None:\n with ops.device(\"/cpu:0\"):\n self._object_graph_feed_tensor = constant_op.constant(\n \"\", dtype=dtypes.string)\n self._file_prefix_feed_tensor = constant_op.constant(\n \"\", dtype=dtypes.string)\n object_graph_tensor = self._object_graph_feed_tensor\n file_prefix_tensor = self._file_prefix_feed_tensor\n feed_dict[file_prefix_tensor] = file_prefix\n else:\n with ops.device(\"/cpu:0\"):\n file_prefix_tensor = constant_op.constant(\n file_prefix, dtype=dtypes.string)\n object_graph_tensor = None\n\n file_io.recursive_create_dir(os.path.dirname(file_prefix))\n save_path, new_feed_additions = self._save_cached_when_graph_building(\n file_prefix=file_prefix_tensor, object_graph_tensor=object_graph_tensor)\n if new_feed_additions:\n feed_dict.update(new_feed_additions)\n if not use_session:\n session = None\n elif session is None:\n session = ops.get_default_session()\n\n if session:\n return session.run(save_path, feed_dict=feed_dict)\n else:\n return save_path\n\n def restore(self, save_path):\n \"\"\"Restore a training checkpoint.\n\n Restores `root_trackable` and any objects that it tracks\n (transitive). Either assigns values immediately if variables to restore have\n been created already, or defers restoration until the variables are\n created. Dependencies added to the `root_trackable` passed to the\n constructor after this call will be matched if they have a corresponding\n object in the checkpoint.\n\n When building a graph, restorations are added to the graph but not run.\n\n To disallow deferred loading, assert immediately that all checkpointed\n variables have been matched to variable objects:\n\n ```python\n saver = Saver(root)\n saver.restore(path).assert_consumed()\n ```\n\n An exception will be raised unless every object was matched and its\n variables already exist.\n\n When graph building, `assert_consumed()` indicates that all of the restore\n ops which will be created for this checkpoint have been created. They can be\n run via the `run_restore_ops()` function of the status object:\n\n ```python\n saver.restore(path).assert_consumed().run_restore_ops()\n ```\n\n If the checkpoint has not been consumed completely, then the list of restore\n ops will grow as more objects are added to the dependency graph.\n\n Name-based `tf.compat.v1.train.Saver` checkpoints can be loaded using this\n method. There is no deferred loading, and names are used to match\n variables. No restore ops are created/run until `run_restore_ops()` or\n `initialize_or_restore()` are called on the returned status object, even\n when executing eagerly. Re-encode name-based checkpoints using this\n object-based `Saver.save` as soon as possible.\n\n Args:\n save_path: The path to the checkpoint, as returned by `save` or\n `tf.train.latest_checkpoint`. If None (as when there is no latest\n checkpoint for `tf.train.latest_checkpoint` to return), returns an\n object which may run initializers for objects in the dependency graph.\n If the checkpoint was written by the name-based\n `tf.compat.v1.train.Saver`, names are used to match variables.\n\n Returns:\n A load status object, which can be used to make assertions about the\n status of checkpoint restoration and run initialization/restore ops\n (of type `CheckpointLoadStatus`, or `InitializationOnlyStatus` if\n `save_path` is `None`).\n\n If `save_path` points to a name-based checkpoint, a `NameBasedSaverStatus`\n object is returned which runs restore ops from a name-based saver.\n \"\"\"\n if save_path is None:\n return InitializationOnlyStatus(self._graph_view, ops.uid())\n reader = pywrap_tensorflow.NewCheckpointReader(save_path)\n graph_building = not context.executing_eagerly()\n if graph_building:\n dtype_map = None\n else:\n dtype_map = reader.get_variable_to_dtype_map()\n try:\n object_graph_string = reader.get_tensor(base.OBJECT_GRAPH_PROTO_KEY)\n except errors_impl.NotFoundError:\n # The object graph proto does not exist in this checkpoint. Try the\n # name-based compatibility mode.\n restore_coordinator = _NameBasedRestoreCoordinator(\n save_path=save_path, dtype_map=dtype_map)\n if not graph_building:\n for existing_trackable in self._graph_view.list_objects():\n # pylint: disable=protected-access\n existing_trackable._maybe_initialize_trackable()\n existing_trackable._name_based_restores.add(restore_coordinator)\n existing_trackable._name_based_attribute_restore(restore_coordinator)\n # pylint: enable=protected-access\n return NameBasedSaverStatus(\n restore_coordinator, graph_view=self._graph_view)\n\n if graph_building:\n if self._file_prefix_placeholder is None:\n with ops.device(\"/cpu:0\"):\n self._file_prefix_placeholder = constant_op.constant(\"model\")\n file_prefix_tensor = self._file_prefix_placeholder\n file_prefix_feed_dict = {self._file_prefix_placeholder: save_path}\n else:\n with ops.device(\"/cpu:0\"):\n file_prefix_tensor = constant_op.constant(save_path)\n file_prefix_feed_dict = None\n object_graph_proto = (trackable_object_graph_pb2.TrackableObjectGraph())\n object_graph_proto.ParseFromString(object_graph_string)\n checkpoint = _CheckpointRestoreCoordinator(\n object_graph_proto=object_graph_proto,\n save_path=save_path,\n save_path_tensor=file_prefix_tensor,\n restore_op_cache=self._restore_op_cache,\n graph_view=self._graph_view)\n base.CheckpointPosition(\n checkpoint=checkpoint, proto_id=0).restore(self._graph_view.root)\n load_status = CheckpointLoadStatus(\n checkpoint,\n graph_view=self._graph_view,\n feed_dict=file_prefix_feed_dict)\n return load_status\n\n\ndef frozen_saver(root_trackable):\n \"\"\"Creates a static `tf.compat.v1.train.Saver` from a trackable object.\n\n The returned `Saver` saves object-based checkpoints, but these checkpoints\n will no longer reflect structural changes to the object graph, only changes to\n the values of `Variable`s added as dependencies of the root object before\n `freeze` was called.\n\n `restore` works on the returned `Saver`, but requires that the object graph of\n the checkpoint being loaded exactly matches the object graph when `freeze` was\n called. This is in contrast the object-based restore performed by\n `tf.train.Checkpoint` which attempts a fuzzy matching between a checkpoint's\n object graph and the current Python object graph.\n\n Args:\n root_trackable: A trackable object to save.\n\n Returns:\n A saver which saves object-based checkpoints for the object graph frozen at\n the time `frozen_saver` was called.\n \"\"\"\n named_saveable_objects = graph_view_lib.ObjectGraphView(\n root_trackable).frozen_saveable_objects()\n return functional_saver.MultiDeviceSaver(named_saveable_objects)\n\n\ndef saver_with_op_caching(obj):\n \"\"\"A TrackableSaver with a SaveableObject cache when graph building.\"\"\"\n if context.executing_eagerly():\n saveables_cache = None\n else:\n saveables_cache = object_identity.ObjectIdentityWeakKeyDictionary()\n return TrackableSaver(\n graph_view_lib.ObjectGraphView(\n weakref.ref(obj), saveables_cache=saveables_cache))\n\n\n# Mentions graph building / Sessions. The v2 version is below.\n@tf_export(v1=[\"train.Checkpoint\"])\nclass CheckpointV1(tracking.AutoTrackable):\n \"\"\"Groups trackable objects, saving and restoring them.\n\n `Checkpoint`'s constructor accepts keyword arguments whose values are types\n that contain trackable state, such as `tf.compat.v1.train.Optimizer`\n implementations, `tf.Variable`, `tf.keras.Layer` implementations, or\n `tf.keras.Model` implementations. It saves these values with a checkpoint, and\n maintains a `save_counter` for numbering checkpoints.\n\n Example usage when graph building:\n\n ```python\n import tensorflow as tf\n import os\n\n checkpoint_directory = \"/tmp/training_checkpoints\"\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n\n checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)\n status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory))\n train_op = optimizer.minimize( ... )\n status.assert_consumed() # Optional sanity checks.\n with tf.compat.v1.Session() as session:\n # Use the Session to restore variables, or initialize them if\n # tf.train.latest_checkpoint returned None.\n status.initialize_or_restore(session)\n for _ in range(num_training_steps):\n session.run(train_op)\n checkpoint.save(file_prefix=checkpoint_prefix)\n ```\n\n Example usage with eager execution enabled:\n\n ```python\n import tensorflow as tf\n import os\n\n tf.compat.v1.enable_eager_execution()\n\n checkpoint_directory = \"/tmp/training_checkpoints\"\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n\n checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)\n status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory))\n for _ in range(num_training_steps):\n optimizer.minimize( ... ) # Variables will be restored on creation.\n status.assert_consumed() # Optional sanity checks.\n checkpoint.save(file_prefix=checkpoint_prefix)\n ```\n\n `Checkpoint.save` and `Checkpoint.restore` write and read object-based\n checkpoints, in contrast to `tf.compat.v1.train.Saver` which writes and reads\n `variable.name` based checkpoints. Object-based checkpointing saves a graph of\n dependencies between Python objects (`Layer`s, `Optimizer`s, `Variable`s,\n etc.) with named edges, and this graph is used to match variables when\n restoring a checkpoint. It can be more robust to changes in the Python\n program, and helps to support restore-on-create for variables when executing\n eagerly. Prefer `tf.train.Checkpoint` over `tf.compat.v1.train.Saver` for new\n code.\n\n `Checkpoint` objects have dependencies on the objects passed as keyword\n arguments to their constructors, and each dependency is given a name that is\n identical to the name of the keyword argument for which it was created.\n TensorFlow classes like `Layer`s and `Optimizer`s will automatically add\n dependencies on their variables (e.g. \"kernel\" and \"bias\" for\n `tf.keras.layers.Dense`). Inheriting from `tf.keras.Model` makes managing\n dependencies easy in user-defined classes, since `Model` hooks into attribute\n assignment. For example:\n\n ```python\n class Regress(tf.keras.Model):\n\n def __init__(self):\n super(Regress, self).__init__()\n self.input_transform = tf.keras.layers.Dense(10)\n # ...\n\n def call(self, inputs):\n x = self.input_transform(inputs)\n # ...\n ```\n\n This `Model` has a dependency named \"input_transform\" on its `Dense` layer,\n which in turn depends on its variables. As a result, saving an instance of\n `Regress` using `tf.train.Checkpoint` will also save all the variables created\n by the `Dense` layer.\n\n When variables are assigned to multiple workers, each worker writes its own\n section of the checkpoint. These sections are then merged/re-indexed to behave\n as a single checkpoint. This avoids copying all variables to one worker, but\n does require that all workers see a common filesystem.\n\n Attributes:\n save_counter: Incremented when `save()` is called. Used to number\n checkpoints.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Group objects into a training checkpoint.\n\n Args:\n **kwargs: Keyword arguments are set as attributes of this object, and are\n saved with the checkpoint. Values must be trackable objects.\n\n Raises:\n ValueError: If objects in `kwargs` are not trackable.\n \"\"\"\n super(CheckpointV1, self).__init__()\n for k, v in sorted(kwargs.items(), key=lambda item: item[0]):\n if not isinstance(v, (base.Trackable, def_function.Function)):\n raise ValueError(\n (\"`Checkpoint` was expecting a trackable object (an object \"\n \"derived from `TrackableBase`), got %s. If you believe this \"\n \"object should be trackable (i.e. it is part of the \"\n \"TensorFlow Python API and manages state), please open an issue.\")\n % (v,))\n setattr(self, k, v)\n self._save_counter = None # Created lazily for restore-on-create.\n self._save_assign_op = None\n self._saver = saver_with_op_caching(self)\n\n def _maybe_create_save_counter(self):\n \"\"\"Create a save counter if it does not yet exist.\"\"\"\n if self._save_counter is None:\n # Initialized to 0 and incremented before saving.\n with ops.device(\"/cpu:0\"):\n # add_variable creates a dependency named \"save_counter\"; NoDependency\n # prevents creating a second dependency named \"_save_counter\".\n self._save_counter = data_structures.NoDependency(\n add_variable(\n self,\n name=\"save_counter\",\n initializer=0,\n dtype=dtypes.int64,\n trainable=False))\n\n def write(self, file_prefix, session=None):\n \"\"\"Writes a training checkpoint.\n\n The checkpoint includes variables created by this object and any\n trackable objects it depends on at the time `Checkpoint.write()` is\n called.\n\n `write` does not number checkpoints, increment `save_counter`, or update the\n metadata used by `tf.train.latest_checkpoint`. It is primarily intended for\n use by higher level checkpoint management utilities. `save` provides a very\n basic implementation of these features.\n\n Args:\n file_prefix: A prefix to use for the checkpoint filenames\n (/path/to/directory/and_a_prefix).\n session: The session to evaluate variables in. Ignored when executing\n eagerly. If not provided when graph building, the default session is\n used.\n\n Returns:\n The full path to the checkpoint (i.e. `file_prefix`).\n \"\"\"\n output = self._saver.save(file_prefix=file_prefix, session=session)\n if tensor_util.is_tensor(output):\n if context.executing_eagerly():\n return compat.as_str(output.numpy())\n else:\n # Function building\n return output\n else:\n # Graph + Session, so we already session.ran it.\n return compat.as_str(output)\n\n @property\n def save_counter(self):\n \"\"\"An integer variable which starts at zero and is incremented on save.\n\n Used to number checkpoints.\n\n Returns:\n The save counter variable.\n \"\"\"\n self._maybe_create_save_counter()\n return self._save_counter\n\n def save(self, file_prefix, session=None):\n \"\"\"Saves a training checkpoint and provides basic checkpoint management.\n\n The saved checkpoint includes variables created by this object and any\n trackable objects it depends on at the time `Checkpoint.save()` is\n called.\n\n `save` is a basic convenience wrapper around the `write` method,\n sequentially numbering checkpoints using `save_counter` and updating the\n metadata used by `tf.train.latest_checkpoint`. More advanced checkpoint\n management, for example garbage collection and custom numbering, may be\n provided by other utilities which also wrap `write`\n (`tf.contrib.checkpoint.CheckpointManager` for example).\n\n Args:\n file_prefix: A prefix to use for the checkpoint filenames\n (/path/to/directory/and_a_prefix). Names are generated based on this\n prefix and `Checkpoint.save_counter`.\n session: The session to evaluate variables in. Ignored when executing\n eagerly. If not provided when graph building, the default session is\n used.\n\n Returns:\n The full path to the checkpoint.\n \"\"\"\n graph_building = not context.executing_eagerly()\n if graph_building:\n if ops.inside_function():\n raise NotImplementedError(\n \"Calling tf.train.Checkpoint.save() from a function is not \"\n \"supported, as save() modifies saving metadata in ways not \"\n \"supported by TensorFlow Operations. Consider using \"\n \"tf.train.Checkpoint.write(), a lower-level API which does not \"\n \"update metadata. tf.train.latest_checkpoint and related APIs will \"\n \"not see this checkpoint.\")\n if session is None:\n session = ops.get_default_session()\n if self._save_counter is None:\n # When graph building, if this is a new save counter variable then it\n # needs to be initialized before assign_add. This is only an issue if\n # restore() has not been called first.\n session.run(self.save_counter.initializer)\n if not graph_building or self._save_assign_op is None:\n with ops.colocate_with(self.save_counter):\n assign_op = self.save_counter.assign_add(1, read_value=True)\n if graph_building:\n self._save_assign_op = data_structures.NoDependency(assign_op)\n if graph_building:\n checkpoint_number = session.run(self._save_assign_op)\n else:\n checkpoint_number = assign_op.numpy()\n file_path = self.write(\n \"%s-%d\" % (file_prefix, checkpoint_number), session=session)\n checkpoint_management.update_checkpoint_state_internal(\n save_dir=os.path.dirname(file_prefix),\n model_checkpoint_path=file_path,\n all_model_checkpoint_paths=[file_path],\n save_relative_paths=True)\n return file_path\n\n def restore(self, save_path):\n \"\"\"Restore a training checkpoint.\n\n Restores this `Checkpoint` and any objects it depends on.\n\n When executing eagerly, either assigns values immediately if variables to\n restore have been created already, or defers restoration until the variables\n are created. Dependencies added after this call will be matched if they have\n a corresponding object in the checkpoint (the restore request will queue in\n any trackable object waiting for the expected dependency to be added).\n\n When graph building, restoration ops are added to the graph but not run\n immediately.\n\n To ensure that loading is complete and no more assignments will take place,\n use the `assert_consumed()` method of the status object returned by\n `restore`:\n\n ```python\n checkpoint = tf.train.Checkpoint( ... )\n checkpoint.restore(path).assert_consumed()\n ```\n\n An exception will be raised if any Python objects in the dependency graph\n were not found in the checkpoint, or if any checkpointed values do not have\n a matching Python object.\n\n When graph building, `assert_consumed()` indicates that all of the restore\n ops that will be created for this checkpoint have been created. They can be\n run via the `run_restore_ops()` method of the status object:\n\n ```python\n checkpoint.restore(path).assert_consumed().run_restore_ops()\n ```\n\n If the checkpoint has not been consumed completely, then the list of restore\n ops will grow as more objects are added to the dependency graph.\n\n Name-based `tf.compat.v1.train.Saver` checkpoints can be loaded using this\n method. Names are used to match variables. No restore ops are created/run\n until `run_restore_ops()` or `initialize_or_restore()` are called on the\n returned status object when graph building, but there is restore-on-creation\n when executing eagerly. Re-encode name-based checkpoints using\n `tf.train.Checkpoint.save` as soon as possible.\n\n Args:\n save_path: The path to the checkpoint, as returned by `save` or\n `tf.train.latest_checkpoint`. If None (as when there is no latest\n checkpoint for `tf.train.latest_checkpoint` to return), returns an\n object which may run initializers for objects in the dependency graph.\n If the checkpoint was written by the name-based\n `tf.compat.v1.train.Saver`, names are used to match variables.\n\n Returns:\n A load status object, which can be used to make assertions about the\n status of a checkpoint restoration and run initialization/restore ops.\n\n The returned status object has the following methods:\n\n * `assert_consumed()`:\n Raises an exception if any variables/objects are unmatched: either\n checkpointed values which don't have a matching Python object or\n Python objects in the dependency graph with no values in the\n checkpoint. This method returns the status object, and so may be\n chained with `initialize_or_restore` or `run_restore_ops`.\n\n * `assert_existing_objects_matched()`:\n Raises an exception if any existing Python objects in the dependency\n graph are unmatched. Unlike `assert_consumed`, this assertion will\n pass if values in the checkpoint have no corresponding Python\n objects. For example a `tf.keras.Layer` object which has not yet been\n built, and so has not created any variables, will pass this assertion\n but fail `assert_consumed`. Useful when loading part of a larger\n checkpoint into a new Python program, e.g. a training checkpoint with\n a `tf.compat.v1.train.Optimizer` was saved but only the state required\n for\n inference is being loaded. This method returns the status object, and\n so may be chained with `initialize_or_restore` or `run_restore_ops`.\n\n * `assert_nontrivial_match()`: Asserts that something aside from the root\n object was matched. This is a very weak assertion, but is useful for\n sanity checking in library code where objects may exist in the\n checkpoint which haven't been created in Python and some Python\n objects may not have a checkpointed value.\n\n * `initialize_or_restore(session=None)`:\n When graph building, runs variable initializers if `save_path` is\n `None`, but otherwise runs restore operations. If no `session` is\n explicitly specified, the default session is used. No effect when\n executing eagerly (variables are initialized or restored eagerly).\n\n * `run_restore_ops(session=None)`:\n When graph building, runs restore operations. If no `session` is\n explicitly specified, the default session is used. No effect when\n executing eagerly (restore operations are run eagerly). May only be\n called when `save_path` is not `None`.\n \"\"\"\n status = self._saver.restore(save_path=save_path)\n # Create the save counter now so it gets initialized with other variables\n # when graph building. Creating it earlier would lead to double\n # initialization when executing eagerly.\n self._maybe_create_save_counter()\n return status\n\n\n@tf_export(\"train.Checkpoint\", v1=[])\nclass Checkpoint(tracking.AutoTrackable):\n \"\"\"Groups trackable objects, saving and restoring them.\n\n `Checkpoint`'s constructor accepts keyword arguments whose values are types\n that contain trackable state, such as `tf.keras.optimizers.Optimizer`\n implementations, `tf.Variable`, `tf.keras.Layer` implementations, or\n `tf.keras.Model` implementations. It saves these values with a checkpoint, and\n maintains a `save_counter` for numbering checkpoints.\n\n Example usage:\n\n ```python\n import tensorflow as tf\n import os\n\n checkpoint_directory = \"/tmp/training_checkpoints\"\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n\n checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)\n status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory))\n for _ in range(num_training_steps):\n optimizer.minimize( ... ) # Variables will be restored on creation.\n status.assert_consumed() # Optional sanity checks.\n checkpoint.save(file_prefix=checkpoint_prefix)\n ```\n\n `Checkpoint.save` and `Checkpoint.restore` write and read object-based\n checkpoints, in contrast to TensorFlow 1.x's `tf.compat.v1.train.Saver` which\n writes and\n reads `variable.name` based checkpoints. Object-based checkpointing saves a\n graph of dependencies between Python objects (`Layer`s, `Optimizer`s,\n `Variable`s, etc.) with named edges, and this graph is used to match variables\n when restoring a checkpoint. It can be more robust to changes in the Python\n program, and helps to support restore-on-create for variables.\n\n `Checkpoint` objects have dependencies on the objects passed as keyword\n arguments to their constructors, and each dependency is given a name that is\n identical to the name of the keyword argument for which it was created.\n TensorFlow classes like `Layer`s and `Optimizer`s will automatically add\n dependencies on their variables (e.g. \"kernel\" and \"bias\" for\n `tf.keras.layers.Dense`). Inheriting from `tf.keras.Model` makes managing\n dependencies easy in user-defined classes, since `Model` hooks into attribute\n assignment. For example:\n\n ```python\n class Regress(tf.keras.Model):\n\n def __init__(self):\n super(Regress, self).__init__()\n self.input_transform = tf.keras.layers.Dense(10)\n # ...\n\n def call(self, inputs):\n x = self.input_transform(inputs)\n # ...\n ```\n\n This `Model` has a dependency named \"input_transform\" on its `Dense` layer,\n which in turn depends on its variables. As a result, saving an instance of\n `Regress` using `tf.train.Checkpoint` will also save all the variables created\n by the `Dense` layer.\n\n When variables are assigned to multiple workers, each worker writes its own\n section of the checkpoint. These sections are then merged/re-indexed to behave\n as a single checkpoint. This avoids copying all variables to one worker, but\n does require that all workers see a common filesystem.\n\n Attributes:\n save_counter: Incremented when `save()` is called. Used to number\n checkpoints.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Group objects into a training checkpoint.\n\n Args:\n **kwargs: Keyword arguments are set as attributes of this object, and are\n saved with the checkpoint. Values must be trackable objects.\n\n Raises:\n ValueError: If objects in `kwargs` are not trackable.\n \"\"\"\n super(Checkpoint, self).__init__()\n for k, v in sorted(kwargs.items(), key=lambda item: item[0]):\n if not isinstance(v, (base.Trackable, def_function.Function)):\n raise ValueError(\n (\"`Checkpoint` was expecting a trackable object (an object \"\n \"derived from `TrackableBase`), got %s. If you believe this \"\n \"object should be trackable (i.e. it is part of the \"\n \"TensorFlow Python API and manages state), please open an issue.\")\n % (v,))\n setattr(self, k, v)\n self._save_counter = None # Created lazily for restore-on-create.\n self._save_assign_op = None\n self._saver = saver_with_op_caching(self)\n\n def _maybe_create_save_counter(self):\n \"\"\"Create a save counter if it does not yet exist.\"\"\"\n if self._save_counter is None:\n # Initialized to 0 and incremented before saving.\n with ops.device(\"/cpu:0\"):\n # add_variable creates a dependency named \"save_counter\"; NoDependency\n # prevents creating a second dependency named \"_save_counter\".\n self._save_counter = data_structures.NoDependency(\n add_variable(\n self,\n name=\"save_counter\",\n initializer=0,\n dtype=dtypes.int64,\n trainable=False))\n\n def write(self, file_prefix):\n \"\"\"Writes a training checkpoint.\n\n The checkpoint includes variables created by this object and any\n trackable objects it depends on at the time `Checkpoint.write()` is\n called.\n\n `write` does not number checkpoints, increment `save_counter`, or update the\n metadata used by `tf.train.latest_checkpoint`. It is primarily intended for\n use by higher level checkpoint management utilities. `save` provides a very\n basic implementation of these features.\n\n Args:\n file_prefix: A prefix to use for the checkpoint filenames\n (/path/to/directory/and_a_prefix).\n\n Returns:\n The full path to the checkpoint (i.e. `file_prefix`).\n \"\"\"\n output = self._saver.save(file_prefix=file_prefix)\n if tensor_util.is_tensor(output):\n if context.executing_eagerly():\n return compat.as_str(output.numpy())\n else:\n # Function building\n return output\n else:\n # Graph + Session, so we already session.ran it.\n return compat.as_str(output)\n\n @property\n def save_counter(self):\n \"\"\"An integer variable which starts at zero and is incremented on save.\n\n Used to number checkpoints.\n\n Returns:\n The save counter variable.\n \"\"\"\n self._maybe_create_save_counter()\n return self._save_counter\n\n def save(self, file_prefix):\n \"\"\"Saves a training checkpoint and provides basic checkpoint management.\n\n The saved checkpoint includes variables created by this object and any\n trackable objects it depends on at the time `Checkpoint.save()` is\n called.\n\n `save` is a basic convenience wrapper around the `write` method,\n sequentially numbering checkpoints using `save_counter` and updating the\n metadata used by `tf.train.latest_checkpoint`. More advanced checkpoint\n management, for example garbage collection and custom numbering, may be\n provided by other utilities which also wrap `write`\n (`tf.contrib.checkpoint.CheckpointManager` for example).\n\n Args:\n file_prefix: A prefix to use for the checkpoint filenames\n (/path/to/directory/and_a_prefix). Names are generated based on this\n prefix and `Checkpoint.save_counter`.\n\n Returns:\n The full path to the checkpoint.\n \"\"\"\n graph_building = not context.executing_eagerly()\n if graph_building:\n if ops.inside_function():\n raise NotImplementedError(\n \"Calling tf.train.Checkpoint.save() from a function is not \"\n \"supported, as save() modifies saving metadata in ways not \"\n \"supported by TensorFlow Operations. Consider using \"\n \"tf.train.Checkpoint.write(), a lower-level API which does not \"\n \"update metadata. tf.train.latest_checkpoint and related APIs will \"\n \"not see this checkpoint.\")\n session = ops.get_default_session()\n if self._save_counter is None:\n # When graph building, if this is a new save counter variable then it\n # needs to be initialized before assign_add. This is only an issue if\n # restore() has not been called first.\n session.run(self.save_counter.initializer)\n if not graph_building or self._save_assign_op is None:\n with ops.colocate_with(self.save_counter):\n assign_op = self.save_counter.assign_add(1, read_value=True)\n if graph_building:\n self._save_assign_op = data_structures.NoDependency(assign_op)\n if graph_building:\n checkpoint_number = session.run(self._save_assign_op)\n else:\n checkpoint_number = assign_op.numpy()\n file_path = self.write(\"%s-%d\" % (file_prefix, checkpoint_number))\n checkpoint_management.update_checkpoint_state_internal(\n save_dir=os.path.dirname(file_prefix),\n model_checkpoint_path=file_path,\n all_model_checkpoint_paths=[file_path],\n save_relative_paths=True)\n return file_path\n\n def restore(self, save_path):\n \"\"\"Restore a training checkpoint.\n\n Restores this `Checkpoint` and any objects it depends on.\n\n Either assigns values immediately if variables to restore have been created\n already, or defers restoration until the variables are created. Dependencies\n added after this call will be matched if they have a corresponding object in\n the checkpoint (the restore request will queue in any trackable object\n waiting for the expected dependency to be added).\n\n To ensure that loading is complete and no more assignments will take place,\n use the `assert_consumed()` method of the status object returned by\n `restore`:\n\n ```python\n checkpoint = tf.train.Checkpoint( ... )\n checkpoint.restore(path).assert_consumed()\n ```\n\n An exception will be raised if any Python objects in the dependency graph\n were not found in the checkpoint, or if any checkpointed values do not have\n a matching Python object.\n\n Name-based `tf.compat.v1.train.Saver` checkpoints from TensorFlow 1.x can be\n loaded\n using this method. Names are used to match variables. Re-encode name-based\n checkpoints using `tf.train.Checkpoint.save` as soon as possible.\n\n Args:\n save_path: The path to the checkpoint, as returned by `save` or\n `tf.train.latest_checkpoint`. If None (as when there is no latest\n checkpoint for `tf.train.latest_checkpoint` to return), returns an\n object which may run initializers for objects in the dependency graph.\n If the checkpoint was written by the name-based\n `tf.compat.v1.train.Saver`, names are used to match variables.\n\n Returns:\n A load status object, which can be used to make assertions about the\n status of a checkpoint restoration.\n\n The returned status object has the following methods:\n\n * `assert_consumed()`:\n Raises an exception if any variables/objects are unmatched: either\n checkpointed values which don't have a matching Python object or\n Python objects in the dependency graph with no values in the\n checkpoint. This method returns the status object, and so may be\n chained with other assertions.\n\n * `assert_existing_objects_matched()`:\n Raises an exception if any existing Python objects in the dependency\n graph are unmatched. Unlike `assert_consumed`, this assertion will\n pass if values in the checkpoint have no corresponding Python\n objects. For example a `tf.keras.Layer` object which has not yet been\n built, and so has not created any variables, will pass this assertion\n but fail `assert_consumed`. Useful when loading part of a larger\n checkpoint into a new Python program, e.g. a training checkpoint with\n a `tf.compat.v1.train.Optimizer` was saved but only the state required\n for\n inference is being loaded. This method returns the status object, and\n so may be chained with other assertions.\n\n * `assert_nontrivial_match()`: Asserts that something aside from the root\n object was matched. This is a very weak assertion, but is useful for\n sanity checking in library code where objects may exist in the\n checkpoint which haven't been created in Python and some Python\n objects may not have a checkpointed value.\n \"\"\"\n status = self._saver.restore(save_path=save_path)\n # Create the save counter now so it gets initialized with other variables\n # when graph building. Creating it earlier would lead to double\n # initialization when executing eagerly.\n self._maybe_create_save_counter()\n return status\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=protected-access\n\"\"\"Wrapper layers: layers that augment the functionality of another layer.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\n\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras.engine.base_layer import Layer\nfrom tensorflow.python.keras.engine.input_spec import InputSpec\nfrom tensorflow.python.keras.layers.recurrent import _standardize_args\nfrom tensorflow.python.keras.utils import generic_utils\nfrom tensorflow.python.keras.utils import layer_utils\nfrom tensorflow.python.keras.utils import tf_utils\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n@keras_export('keras.layers.Wrapper')\nclass Wrapper(Layer):\n \"\"\"Abstract wrapper base class.\n\n Wrappers take another layer and augment it in various ways.\n Do not use this class as a layer, it is only an abstract base class.\n Two usable wrappers are the `TimeDistributed` and `Bidirectional` wrappers.\n\n Arguments:\n layer: The layer to be wrapped.\n \"\"\"\n\n def __init__(self, layer, **kwargs):\n assert isinstance(layer, Layer)\n self.layer = layer\n # Tracks mapping of Wrapper inputs to inner layer inputs. Useful when\n # the inner layer has update ops that depend on its inputs (as opposed\n # to the inputs to the Wrapper layer).\n self._input_map = {}\n super(Wrapper, self).__init__(**kwargs)\n\n def build(self, input_shape=None):\n if not self.layer.built:\n self.layer.build(input_shape)\n self.built = True\n\n @property\n def activity_regularizer(self):\n if hasattr(self.layer, 'activity_regularizer'):\n return self.layer.activity_regularizer\n else:\n return None\n\n def get_config(self):\n config = {\n 'layer': {\n 'class_name': self.layer.__class__.__name__,\n 'config': self.layer.get_config()\n }\n }\n base_config = super(Wrapper, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top\n layer = deserialize_layer(\n config.pop('layer'), custom_objects=custom_objects)\n return cls(layer, **config)\n\n\n@keras_export('keras.layers.TimeDistributed')\nclass TimeDistributed(Wrapper):\n \"\"\"This wrapper allows to apply a layer to every temporal slice of an input.\n\n The input should be at least 3D, and the dimension of index one\n will be considered to be the temporal dimension.\n\n Consider a batch of 32 samples,\n where each sample is a sequence of 10 vectors of 16 dimensions.\n The batch input shape of the layer is then `(32, 10, 16)`,\n and the `input_shape`, not including the samples dimension, is `(10, 16)`.\n\n You can then use `TimeDistributed` to apply a `Dense` layer\n to each of the 10 timesteps, independently:\n\n ```python\n # as the first layer in a model\n model = Sequential()\n model.add(TimeDistributed(Dense(8), input_shape=(10, 16)))\n # now model.output_shape == (None, 10, 8)\n ```\n\n The output will then have shape `(32, 10, 8)`.\n\n In subsequent layers, there is no need for the `input_shape`:\n\n ```python\n model.add(TimeDistributed(Dense(32)))\n # now model.output_shape == (None, 10, 32)\n ```\n\n The output will then have shape `(32, 10, 32)`.\n\n `TimeDistributed` can be used with arbitrary layers, not just `Dense`,\n for instance with a `Conv2D` layer:\n\n ```python\n model = Sequential()\n model.add(TimeDistributed(Conv2D(64, (3, 3)),\n input_shape=(10, 299, 299, 3)))\n ```\n\n Arguments:\n layer: a layer instance.\n\n Call arguments:\n inputs: Input tensor.\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. This argument is passed to the\n wrapped layer (only if the layer supports this argument).\n mask: Binary tensor of shape `(samples, timesteps)` indicating whether\n a given timestep should be masked. This argument is passed to the\n wrapped layer (only if the layer supports this argument).\n\n Raises:\n ValueError: If not initialized with a `Layer` instance.\n \"\"\"\n\n def __init__(self, layer, **kwargs):\n if not isinstance(layer, Layer):\n raise ValueError(\n 'Please initialize `TimeDistributed` layer with a '\n '`Layer` instance. You passed: {input}'.format(input=layer))\n super(TimeDistributed, self).__init__(layer, **kwargs)\n self.supports_masking = True\n\n # It is safe to use the fast, reshape-based approach with all of our\n # built-in Layers.\n self._always_use_reshape = (\n layer_utils.is_builtin_layer(layer) and\n not getattr(layer, 'stateful', False))\n\n def _get_shape_tuple(self, init_tuple, tensor, start_idx, int_shape=None):\n \"\"\"Finds non-specific dimensions in the static shapes.\n\n The static shapes are replaced with the corresponding dynamic shapes of the\n tensor.\n\n Arguments:\n init_tuple: a tuple, the first part of the output shape\n tensor: the tensor from which to get the (static and dynamic) shapes\n as the last part of the output shape\n start_idx: int, which indicate the first dimension to take from\n the static shape of the tensor\n int_shape: an alternative static shape to take as the last part\n of the output shape\n\n Returns:\n The new int_shape with the first part from init_tuple\n and the last part from either `int_shape` (if provided)\n or `tensor.shape`, where every `None` is replaced by\n the corresponding dimension from `tf.shape(tensor)`.\n \"\"\"\n # replace all None in int_shape by K.shape\n if int_shape is None:\n int_shape = K.int_shape(tensor)[start_idx:]\n if not any(not s for s in int_shape):\n return init_tuple + tuple(int_shape)\n shape = K.shape(tensor)\n int_shape = list(int_shape)\n for i, s in enumerate(int_shape):\n if not s:\n int_shape[i] = shape[start_idx + i]\n return init_tuple + tuple(int_shape)\n\n def build(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n if len(input_shape) < 3:\n raise ValueError(\n '`TimeDistributed` Layer should be passed an `input_shape ` '\n 'with at least 3 dimensions, received: ' + str(input_shape))\n # Don't enforce the batch or time dimension.\n self.input_spec = InputSpec(shape=[None, None] + input_shape[2:])\n child_input_shape = [input_shape[0]] + input_shape[2:]\n super(TimeDistributed, self).build(tuple(child_input_shape))\n self.built = True\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n child_input_shape = tensor_shape.TensorShape([input_shape[0]] +\n input_shape[2:])\n child_output_shape = self.layer.compute_output_shape(child_input_shape)\n if not isinstance(child_output_shape, tensor_shape.TensorShape):\n child_output_shape = tensor_shape.TensorShape(child_output_shape)\n child_output_shape = child_output_shape.as_list()\n timesteps = input_shape[1]\n return tensor_shape.TensorShape([child_output_shape[0], timesteps] +\n child_output_shape[1:])\n\n def call(self, inputs, training=None, mask=None):\n kwargs = {}\n if generic_utils.has_arg(self.layer.call, 'training'):\n kwargs['training'] = training\n\n input_shape = K.int_shape(inputs)\n if input_shape[0] and not self._always_use_reshape:\n # batch size matters, use rnn-based implementation\n def step(x, _):\n output = self.layer.call(x, **kwargs)\n return output, []\n\n _, outputs, _ = K.rnn(\n step,\n inputs,\n initial_states=[],\n input_length=input_shape[1],\n unroll=False)\n y = outputs\n else:\n # No batch size specified, therefore the layer will be able\n # to process batches of any size.\n # We can go with reshape-based implementation for performance.\n input_length = input_shape[1]\n if not input_length:\n input_length = array_ops.shape(inputs)[1]\n inner_input_shape = self._get_shape_tuple((-1,), inputs, 2)\n # Shape: (num_samples * timesteps, ...). And track the\n # transformation in self._input_map.\n input_uid = generic_utils.object_list_uid(inputs)\n inputs = array_ops.reshape(inputs, inner_input_shape)\n self._input_map[input_uid] = inputs\n # (num_samples * timesteps, ...)\n if generic_utils.has_arg(self.layer.call, 'mask') and mask is not None:\n inner_mask_shape = self._get_shape_tuple((-1,), mask, 2)\n kwargs['mask'] = K.reshape(mask, inner_mask_shape)\n y = self.layer.call(inputs, **kwargs)\n # Shape: (num_samples, timesteps, ...)\n output_shape = self.compute_output_shape(input_shape).as_list()\n output_shape = self._get_shape_tuple(\n (-1, input_length), y, 1, output_shape[2:])\n y = array_ops.reshape(y, output_shape)\n\n # Apply activity regularizer if any:\n if (hasattr(self.layer, 'activity_regularizer') and\n self.layer.activity_regularizer is not None):\n regularization_loss = self.layer.activity_regularizer(y)\n self.add_loss(regularization_loss, inputs)\n return y\n\n def compute_mask(self, inputs, mask=None):\n \"\"\"Computes an output mask tensor for Embedding layer.\n\n This is based on the inputs, mask, and the inner layer.\n If batch size is specified:\n Simply return the input `mask`. (An rnn-based implementation with\n more than one rnn inputs is required but not supported in tf.keras yet.)\n Otherwise we call `compute_mask` of the inner layer at each time step.\n If the output mask at each time step is not `None`:\n (E.g., inner layer is Masking or RNN)\n Concatenate all of them and return the concatenation.\n If the output mask at each time step is `None` and the input mask is not\n `None`:(E.g., inner layer is Dense)\n Reduce the input_mask to 2 dimensions and return it.\n Otherwise (both the output mask and the input mask are `None`):\n (E.g., `mask` is not used at all)\n Return `None`.\n\n Arguments:\n inputs: Tensor with shape [batch size, timesteps, ...] indicating the\n input to TimeDistributed. If static shape information is available for\n \"batch size\", `mask` is returned unmodified.\n mask: Either None (indicating no masking) or a Tensor indicating the\n input mask for TimeDistributed. The shape can be static or dynamic.\n\n Returns:\n Either None (no masking), or a [batch size, timesteps, ...] Tensor with\n an output mask for the TimeDistributed layer with the shape beyond the\n second dimension being the value of the input mask shape(if the computed\n output mask is none), an output mask with the shape beyond the first\n dimension being the value of the mask shape(if mask is not None) or\n output mask with the shape beyond the first dimension being the\n value of the computed output shape.\n\n \"\"\"\n # cases need to call the layer.compute_mask when input_mask is None:\n # Masking layer and Embedding layer with mask_zero\n input_shape = K.int_shape(inputs)\n if input_shape[0]:\n # batch size matters, we currently do not handle mask explicitly\n return mask\n inner_mask = mask\n if inner_mask is not None:\n inner_mask_shape = self._get_shape_tuple((-1,), mask, 2)\n inner_mask = K.reshape(inner_mask, inner_mask_shape)\n input_uid = generic_utils.object_list_uid(inputs)\n inner_inputs = self._input_map.get(input_uid, inputs)\n output_mask = self.layer.compute_mask(inner_inputs, inner_mask)\n if output_mask is None:\n if mask is None:\n return None\n # input_mask is not None, and output_mask is None:\n # we should return a not-None mask\n output_mask = mask\n for _ in range(2, len(K.int_shape(mask))):\n output_mask = K.any(output_mask, axis=-1)\n else:\n # output_mask is not None. We need to reshape it\n input_length = input_shape[1]\n if not input_length:\n input_length = K.shape(inputs)[1]\n output_mask_int_shape = K.int_shape(output_mask)\n if output_mask_int_shape is None:\n # if the output_mask does not have a static shape,\n # its shape must be the same as mask's\n if mask is not None:\n output_mask_int_shape = K.int_shape(mask)\n else:\n output_mask_int_shape = K.compute_output_shape(input_shape)[:-1]\n output_mask_shape = self._get_shape_tuple(\n (-1, input_length), output_mask, 1, output_mask_int_shape[1:])\n output_mask = K.reshape(output_mask, output_mask_shape)\n return output_mask\n\n\n@keras_export('keras.layers.Bidirectional')\nclass Bidirectional(Wrapper):\n \"\"\"Bidirectional wrapper for RNNs.\n\n Arguments:\n layer: `Recurrent` instance.\n merge_mode: Mode by which outputs of the\n forward and backward RNNs will be combined.\n One of {'sum', 'mul', 'concat', 'ave', None}.\n If None, the outputs will not be combined,\n they will be returned as a list.\n\n Call arguments:\n The call arguments for this layer are the same as those of the wrapped RNN\n layer.\n\n Raises:\n ValueError: If not initialized with a `Layer` instance or\n In case of invalid `merge_mode` argument.\n\n Examples:\n\n ```python\n model = Sequential()\n model.add(Bidirectional(LSTM(10, return_sequences=True), input_shape=(5,\n 10)))\n model.add(Bidirectional(LSTM(10)))\n model.add(Dense(5))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n ```\n \"\"\"\n\n def __init__(self, layer, merge_mode='concat', weights=None, **kwargs):\n if not isinstance(layer, Layer):\n raise ValueError(\n 'Please initialize `Bidirectional` layer with a '\n '`Layer` instance. You passed: {input}'.format(input=layer))\n if merge_mode not in ['sum', 'mul', 'ave', 'concat', None]:\n raise ValueError('Invalid merge mode. '\n 'Merge mode should be one of '\n '{\"sum\", \"mul\", \"ave\", \"concat\", None}')\n if getattr(layer, 'zero_output_for_mask', None) is not None:\n # Force the zero_output_for_mask to be True if returning sequences.\n layer.zero_output_for_mask = layer.return_sequences\n\n self.forward_layer = copy.copy(layer)\n config = layer.get_config()\n config['go_backwards'] = not config['go_backwards']\n self.backward_layer = layer.__class__.from_config(config)\n self.forward_layer._name = 'forward_' + self.forward_layer.name\n self.backward_layer._name = 'backward_' + self.backward_layer.name\n self.merge_mode = merge_mode\n if weights:\n nw = len(weights)\n self.forward_layer.initial_weights = weights[:nw // 2]\n self.backward_layer.initial_weights = weights[nw // 2:]\n self.stateful = layer.stateful\n self.return_sequences = layer.return_sequences\n self.return_state = layer.return_state\n self.supports_masking = True\n self._trainable = True\n self._num_constants = None\n # We don't want to track `layer` since we're already tracking the two copies\n # of it we actually run.\n self._setattr_tracking = False\n super(Bidirectional, self).__init__(layer, **kwargs)\n self._setattr_tracking = True\n self.input_spec = layer.input_spec\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n output_shape = self.forward_layer.compute_output_shape(input_shape)\n if not isinstance(output_shape, tensor_shape.TensorShape):\n output_shape = tensor_shape.TensorShape(output_shape)\n output_shape = tuple(output_shape.as_list())\n if self.return_state:\n state_shape = output_shape[1:]\n output_shape = output_shape[0]\n\n if self.merge_mode == 'concat':\n output_shape = list(output_shape)\n output_shape[-1] *= 2\n output_shape = tuple(output_shape)\n elif self.merge_mode is None:\n output_shape = [output_shape, copy.copy(output_shape)]\n\n if self.return_state:\n if self.merge_mode is None:\n return output_shape + state_shape + copy.copy(state_shape)\n return [output_shape] + state_shape + copy.copy(state_shape)\n return output_shape\n\n def __call__(self, inputs, initial_state=None, constants=None, **kwargs):\n \"\"\"`Bidirectional.__call__` implements the same API as the wrapped `RNN`.\"\"\"\n inputs, initial_state, constants = _standardize_args(\n inputs, initial_state, constants, self._num_constants)\n\n if isinstance(inputs, list):\n if len(inputs) > 1:\n initial_state = inputs[1:]\n inputs = inputs[0]\n\n if initial_state is None and constants is None:\n return super(Bidirectional, self).__call__(inputs, **kwargs)\n\n # Applies the same workaround as in `RNN.__call__`\n additional_inputs = []\n additional_specs = []\n if initial_state is not None:\n # Check if `initial_state` can be splitted into half\n num_states = len(initial_state)\n if num_states % 2 > 0:\n raise ValueError(\n 'When passing `initial_state` to a Bidirectional RNN, '\n 'the state should be a list containing the states of '\n 'the underlying RNNs. '\n 'Found: ' + str(initial_state))\n\n kwargs['initial_state'] = initial_state\n additional_inputs += initial_state\n state_specs = [InputSpec(shape=K.int_shape(state))\n for state in initial_state]\n self.forward_layer.state_spec = state_specs[:num_states // 2]\n self.backward_layer.state_spec = state_specs[num_states // 2:]\n additional_specs += state_specs\n if constants is not None:\n kwargs['constants'] = constants\n additional_inputs += constants\n constants_spec = [InputSpec(shape=K.int_shape(constant))\n for constant in constants]\n self.forward_layer.constants_spec = constants_spec\n self.backward_layer.constants_spec = constants_spec\n additional_specs += constants_spec\n\n self._num_constants = len(constants)\n self.forward_layer._num_constants = self._num_constants\n self.backward_layer._num_constants = self._num_constants\n\n is_keras_tensor = K.is_keras_tensor(additional_inputs[0])\n for tensor in additional_inputs:\n if K.is_keras_tensor(tensor) != is_keras_tensor:\n raise ValueError('The initial state of a Bidirectional'\n ' layer cannot be specified with a mix of'\n ' Keras tensors and non-Keras tensors'\n ' (a \"Keras tensor\" is a tensor that was'\n ' returned by a Keras layer, or by `Input`)')\n\n if is_keras_tensor:\n # Compute the full input spec, including state\n full_input = [inputs] + additional_inputs\n # The original input_spec is None since there could be a nested tensor\n # input. Update the input_spec to match the inputs.\n full_input_spec = [None for _ in range(len(nest.flatten(inputs)))\n ] + additional_specs\n\n # Perform the call with temporarily replaced input_spec\n original_input_spec = self.input_spec\n self.input_spec = full_input_spec\n output = super(Bidirectional, self).__call__(full_input, **kwargs)\n self.input_spec = original_input_spec\n return output\n else:\n return super(Bidirectional, self).__call__(inputs, **kwargs)\n\n def call(self,\n inputs,\n training=None,\n mask=None,\n initial_state=None,\n constants=None):\n \"\"\"`Bidirectional.call` implements the same API as the wrapped `RNN`.\"\"\"\n kwargs = {}\n if generic_utils.has_arg(self.layer.call, 'training'):\n kwargs['training'] = training\n if generic_utils.has_arg(self.layer.call, 'mask'):\n kwargs['mask'] = mask\n if generic_utils.has_arg(self.layer.call, 'constants'):\n kwargs['constants'] = constants\n\n if initial_state is not None and generic_utils.has_arg(\n self.layer.call, 'initial_state'):\n forward_inputs = [inputs[0]]\n backward_inputs = [inputs[0]]\n pivot = len(initial_state) // 2 + 1\n # add forward initial state\n forward_state = inputs[1:pivot]\n forward_inputs += forward_state\n if self._num_constants is None:\n # add backward initial state\n backward_state = inputs[pivot:]\n backward_inputs += backward_state\n else:\n # add backward initial state\n backward_state = inputs[pivot:-self._num_constants]\n backward_inputs += backward_state\n # add constants for forward and backward layers\n forward_inputs += inputs[-self._num_constants:]\n backward_inputs += inputs[-self._num_constants:]\n y = self.forward_layer.call(forward_inputs,\n initial_state=forward_state, **kwargs)\n y_rev = self.backward_layer.call(backward_inputs,\n initial_state=backward_state, **kwargs)\n else:\n y = self.forward_layer.call(inputs, **kwargs)\n y_rev = self.backward_layer.call(inputs, **kwargs)\n\n if self.return_state:\n states = y[1:] + y_rev[1:]\n y = y[0]\n y_rev = y_rev[0]\n\n if self.return_sequences:\n y_rev = K.reverse(y_rev, 1)\n if self.merge_mode == 'concat':\n output = K.concatenate([y, y_rev])\n elif self.merge_mode == 'sum':\n output = y + y_rev\n elif self.merge_mode == 'ave':\n output = (y + y_rev) / 2\n elif self.merge_mode == 'mul':\n output = y * y_rev\n elif self.merge_mode is None:\n output = [y, y_rev]\n else:\n raise ValueError(\n 'Unrecognized value for `merge_mode`: %s' % (self.merge_mode))\n\n if self.return_state:\n if self.merge_mode is None:\n return output + states\n return [output] + states\n return output\n\n def reset_states(self):\n self.forward_layer.reset_states()\n self.backward_layer.reset_states()\n\n def build(self, input_shape):\n with K.name_scope(self.forward_layer.name):\n self.forward_layer.build(input_shape)\n with K.name_scope(self.backward_layer.name):\n self.backward_layer.build(input_shape)\n self.built = True\n\n def compute_mask(self, inputs, mask):\n if isinstance(mask, list):\n mask = mask[0]\n if self.return_sequences:\n if not self.merge_mode:\n output_mask = [mask, mask]\n else:\n output_mask = mask\n else:\n output_mask = [None, None] if not self.merge_mode else None\n\n if self.return_state:\n states = self.forward_layer.states\n state_mask = [None for _ in states]\n if isinstance(output_mask, list):\n return output_mask + state_mask * 2\n return [output_mask] + state_mask * 2\n return output_mask\n\n @property\n def constraints(self):\n constraints = {}\n if hasattr(self.forward_layer, 'constraints'):\n constraints.update(self.forward_layer.constraints)\n constraints.update(self.backward_layer.constraints)\n return constraints\n\n def get_config(self):\n config = {'merge_mode': self.merge_mode}\n if self._num_constants is not None:\n config['num_constants'] = self._num_constants\n base_config = super(Bidirectional, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n num_constants = config.pop('num_constants', None)\n layer = super(Bidirectional, cls).from_config(config,\n custom_objects=custom_objects)\n layer._num_constants = num_constants\n return layer\n" ]
[ [ "tensorflow.python.training.tracking.base._SlotVariableRestoration", "tensorflow.python.ops.variable_scope.variable_creator_scope", "tensorflow.python.ops.variable_scope._get_default_variable_store", "tensorflow.python.training.saving.saveable_object_util.saveable_objects_for_op", "tensorflow.python.training.tracking.object_identity.ObjectIdentityWeakKeyDictionary", "tensorflow.python.framework.ops.device", "tensorflow.python.ops.gen_io_ops.restore_v2", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.training.saving.saveable_object_util.op_list_to_dict", "tensorflow.python.training.tracking.object_identity.ObjectIdentityWeakSet", "tensorflow.python.framework.ops.inside_function", "tensorflow.python.training.saving.functional_saver.MultiDeviceSaver", "tensorflow.python.training.saving.saveable_object_util.validate_and_slice_inputs", "tensorflow.python.training.tracking.base.CheckpointPosition", "tensorflow.python.training.tracking.graph_view.ObjectGraphView", "tensorflow.python.pywrap_tensorflow.NewCheckpointReader", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.framework.ops.get_default_session", "tensorflow.python.util.compat.as_str", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.util.deprecation.deprecated", "tensorflow.python.framework.ops.init_scope", "tensorflow.python.framework.tensor_util.is_tensor", "tensorflow.python.training.tracking.base.NoRestoreSaveable", "tensorflow.python.framework.ops.colocate_with", "tensorflow.python.training.tracking.data_structures.NoDependency", "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.python.ops.variables.VariableV1", "tensorflow.core.protobuf.trackable_object_graph_pb2.TrackableObjectGraph", "tensorflow.python.framework.ops.uid", "tensorflow.python.training.tracking.object_identity.ObjectIdentitySet", "tensorflow.python.framework.tensor_shape.as_shape", "tensorflow.python.training.saver.Saver", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.keras.backend.name_scope", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.keras.backend.int_shape", "tensorflow.python.keras.backend.reshape", "tensorflow.python.keras.backend.shape", "tensorflow.python.keras.utils.generic_utils.has_arg", "tensorflow.python.keras.backend.reverse", "tensorflow.python.util.tf_export.keras_export", "tensorflow.python.keras.backend.is_keras_tensor", "tensorflow.python.keras.utils.generic_utils.object_list_uid", "tensorflow.python.keras.backend.concatenate", "tensorflow.python.keras.layers.recurrent._standardize_args", "tensorflow.python.keras.backend.compute_output_shape", "tensorflow.python.keras.backend.rnn", "tensorflow.python.keras.backend.any", "tensorflow.python.ops.array_ops.reshape", "tensorflow.python.keras.engine.input_spec.InputSpec", "tensorflow.python.keras.utils.layer_utils.is_builtin_layer", "tensorflow.python.util.nest.flatten" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] } ]
ZakMiller/gpt-2
[ "7d9b41dbabc3beaeef3d62b52a409fe712e24f8d" ]
[ "encode.py" ]
[ "#!/usr/bin/env python3\n# Usage:\n# PYTHONPATH=src ./encode.py <file|directory|glob> /path/to/output.npz\n# PYTHONPATH=src ./train --dataset /path/to/output.npz\n\nimport argparse\nimport numpy as np\n\nimport encoder\nfrom load_dataset import load_dataset\n\nparser = argparse.ArgumentParser(\n description='Pre-encode text files into tokenized training set.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('--model_name', metavar='MODEL', type=str, default='117M', help='Pretrained model name')\nparser.add_argument('--model_dir', metavar='MODELDIR', type=str, default='models', help='Models directory')\nparser.add_argument('--combine', metavar='CHARS', type=int, default=50000, help='Concatenate files with <|endoftext|> separator into chunks of this minimum size')\nparser.add_argument('--encoding', type=str, default='utf-8', help='Set the encoding for reading and writing files.')\nparser.add_argument('in_text', metavar='PATH', type=str, help='Input file, directory, or glob pattern (utf-8 text).')\nparser.add_argument('out_npz', metavar='OUT.npz', type=str, help='Output file path')\n\ndef main():\n args = parser.parse_args()\n enc = encoder.get_encoder(args.model_name, args.model_dir)\n print('Reading files')\n chunks = load_dataset(enc, args.in_text, args.combine, encoding=args.encoding)\n print('Writing', args.out_npz)\n np.savez_compressed(args.out_npz, *chunks)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.savez_compressed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
doc-doc/NExT-OE
[ "a45d81a48ab5ccc45ff6f7bea60597cc59bc546e" ]
[ "networks/VQAModel/HME.py" ]
[ "import torch\nimport torch.nn as nn\nimport random as rd\nimport sys\nsys.path.insert(0, 'networks')\nfrom Attention import TempAttention\nfrom memory_rand import MemoryRamTwoStreamModule, MemoryRamModule, MMModule\n\n\nclass HME(nn.Module):\n def __init__(self, vid_encoder, qns_encoder, ans_decoder, max_len_v, max_len_q, device, input_drop_p=0.2):\n \"\"\"\n Heterogeneous memory enhanced multimodal attention model for video question answering (CVPR19)\n\n \"\"\"\n super(HME, self).__init__()\n self.vid_encoder = vid_encoder\n self.qns_encoder = qns_encoder\n self.ans_decoder = ans_decoder\n\n dim = qns_encoder.dim_hidden\n\n self.temp_att_a = TempAttention(dim * 2, dim * 2, hidden_dim=256)\n self.temp_att_m = TempAttention(dim * 2, dim * 2, hidden_dim=256)\n self.mrm_vid = MemoryRamTwoStreamModule(dim, dim, max_len_v, device)\n self.mrm_txt = MemoryRamModule(dim, dim, max_len_q, device)\n\n self.mm_module_v1 = MMModule(dim, input_drop_p, device)\n\n self.linear_vid = nn.Linear(dim*2, dim)\n self.linear_qns = nn.Linear(dim*2, dim)\n self.linear_mem = nn.Linear(dim*2, dim)\n self.vq2word_hme = nn.Linear(dim*3, dim*2)\n self._init_weights()\n self.device = device\n\n def _init_weights(self):\n \"\"\"\n initialize the linear weights\n :return:\n \"\"\"\n nn.init.xavier_normal_(self.linear_vid.weight)\n nn.init.xavier_normal_(self.linear_qns.weight)\n nn.init.xavier_normal_(self.linear_mem.weight)\n nn.init.xavier_normal_(self.vq2word_hme.weight)\n\n\n def forward(self, vid_feats, qns, qns_lengths, ans, ans_lengths, teacher_force_ratio=0.5, iter_num=3, mode='train'):\n \"\"\"\n \"\"\"\n\n outputs_app_l1, outputs_app_l2, outputs_motion_l1, outputs_motion_l2 = self.vid_encoder(vid_feats) #(batch_size, fnum, feat_dim)\n\n outputs_app = torch.cat((outputs_app_l1, outputs_app_l2), dim=-1)\n outputs_motion = torch.cat((outputs_motion_l1, outputs_motion_l2), dim=-1)\n\n batch_size, fnum, vid_feat_dim = outputs_app.size()\n\n qns_output, qns_hidden = self.qns_encoder(qns, qns_lengths)\n # print(qns_output.shape, qns_hidden[0].shape) #torch.Size([10, 23, 256]) torch.Size([2, 10, 256])\n\n # qns_output = qns_output.permute(1, 0, 2)\n batch_size, seq_len, qns_feat_dim = qns_output.size()\n\n qns_embed = qns_hidden[0].permute(1, 0, 2).contiguous().view(batch_size, -1) #(batch_size, feat_dim)\n\n # Apply temporal attention\n att_app, beta_app = self.temp_att_a(qns_embed, outputs_app)\n att_motion, beta_motion = self.temp_att_m(qns_embed, outputs_motion)\n tmp_app_motion = torch.cat((outputs_app_l2[:, -1, :], outputs_motion_l2[:, -1, :]), dim=-1)\n\n mem_output = torch.zeros(batch_size, vid_feat_dim).to(self.device)\n\n for bs in range(batch_size):\n mem_ram_vid = self.mrm_vid(outputs_app_l2[bs], outputs_motion_l2[bs], fnum)\n cur_qns = qns_output[bs][:qns_lengths[bs]]\n mem_ram_txt = self.mrm_txt(cur_qns, qns_lengths[bs]) #should remove padded zeros\n mem_output[bs] = self.mm_module_v1(tmp_app_motion[bs].unsqueeze(0), mem_ram_vid, mem_ram_txt, iter_num)\n \"\"\"\n (64, 256) (22, 256) (1, 512)\n \"\"\"\n app_trans = torch.tanh(self.linear_vid(att_app))\n motion_trans = torch.tanh(self.linear_vid(att_motion))\n mem_trans = torch.tanh(self.linear_mem(mem_output))\n\n encoder_outputs = torch.cat((app_trans, motion_trans, mem_trans), dim=1)\n decoder_inputs = self.vq2word_hme(encoder_outputs)\n hidden = qns_hidden\n if mode == 'train':\n vocab_size = self.ans_decoder.vocab_size\n ans_len = ans.shape[1]\n input = ans[:, 0]\n\n outputs = torch.zeros(batch_size, ans_len, vocab_size).to(self.device)\n\n for t in range(0, ans_len):\n output, hidden = self.ans_decoder(decoder_inputs, hidden, input)\n outputs[:, t] = output\n teacher_force = rd.random() < teacher_force_ratio\n top1 = output.argmax(1)\n input = ans[:, t] if teacher_force else top1\n else:\n start = torch.LongTensor([1] * batch_size).to(self.device)\n outputs = self.ans_decoder.sample(decoder_inputs, hidden, start)\n\n return outputs" ]
[ [ "torch.LongTensor", "torch.zeros", "torch.cat", "torch.nn.init.xavier_normal_", "torch.nn.Linear" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mkelley/mskpy
[ "41f41fd69bae71853abdfd2afbd535cd0b79c530" ]
[ "mskpy/models/surfaces.py" ]
[ "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"\nsurfaces --- Models for surfaces\n================================\n\n.. autosummary::\n :toctree: generated/\n\n Surface Models\n --------------\n SurfaceRadiation\n DAp\n DApColor\n HG\n NEATM\n\n Phase functions\n ---------------\n phaseHG\n lambertian\n\n Convenience functions\n ---------------------\n neatm\n\n\"\"\"\n\nimport numpy as np\nimport astropy.units as u\nfrom astropy.units import Quantity\n\n__all__ = [\n 'SurfaceRadiation',\n 'DAp',\n 'DApColor',\n 'HG',\n 'NEATM',\n\n 'phaseHG',\n 'lambertian',\n\n 'neatm'\n]\n\nclass SurfaceRadiation(object):\n \"\"\"An abstract class for light from a surface in the Solar System.\n\n Methods\n -------\n fluxd : Total flux density from the object.\n\n Notes\n -----\n Inheriting classes should override `fluxd`, and `__init__`\n functions, and should only take D and Ap as arguments (if\n possible), remaining parameters should be keywords.\n\n As much as possible, share the same keyword arguments between\n reflected and thermal models.\n\n \"\"\"\n\n def __init__(self, **kwargs):\n pass\n\n def fluxd(self, geom, wave, unit=None):\n pass\n\nclass NEATM(SurfaceRadiation):\n \"\"\"The Near Earth Asteroid Thermal Model.\n\n If you use this model, please reference Harris (1998, Icarus, 131,\n 291-301).\n\n If unknown, use `eta=1.03` `epsilon=0.95` for a comet (Fernandez\n et al., 2013, Icarus 226, 1138-1170), and `eta=0.96` `epsilon=0.9`\n (or `eta=0.91` with `epsilon=0.95`) for an asteroid (Mainzer et\n al. 2011, ApJ 736, 100).\n\n Parameters\n ----------\n D : Quantity\n The diameter of the asteroid.\n Ap : float\n The geometric albedo.\n eta : float, optional\n The IR-beaming parameter.\n epsilon : float, optional\n The mean IR emissivity.\n G : float, optional\n The slope parameter of the Bowell H, G magnitude system, used to\n estimate the phase integral when `phaseint` is `None`.\n phaseint : float, optional\n Use this phase integral instead of that from the HG system.\n tol : float, optional\n The relative error tolerance in the result.\n\n Attributes\n ----------\n A : float\n Bond albedo.\n R : Quantity\n Radius.\n\n Methods\n -------\n T0 : Sub-solar point temperature.\n fluxd : Total flux density from the asteroid.\n\n \"\"\"\n\n def __init__(self, D, Ap, eta=1.0, epsilon=0.95, G=0.15,\n phaseint=None, tol=1e-3, **kwargs):\n self.D = D.to(u.km)\n self.Ap = Ap\n self.eta = eta\n self.epsilon = epsilon\n self.G = G\n self.phaseint = phaseint\n self.tol = tol\n\n def fluxd(self, geom, wave, unit=u.Jy):\n \"\"\"Flux density.\n\n Parameters\n ----------\n geom : dict of Quantities\n A dictionary-like object with the keys 'rh' (heliocentric\n distance), 'delta' (observer-target distance), and 'phase'\n (phase angle).\n wave : Quantity\n The wavelengths at which to compute the emission.\n unit : astropy Units, optional\n The return units. Must be spectral flux density.\n\n Returns\n -------\n fluxd : Quantity\n The flux density from the whole asteroid.\n\n \"\"\"\n\n from numpy import pi\n from scipy.integrate import quad\n\n phase = geom['phase']\n if not np.iterable(wave):\n wave = np.array([wave.value]) * wave.unit\n T0 = self.T0(geom['rh']).to(u.Kelvin).value\n fluxd = np.zeros(len(wave))\n\n # Integrate theta from -pi/2 to pi/2: emission is emitted from\n # the daylit hemisphere: theta = (phase - pi/2) to (phase +\n # pi/2), therfore the theta limits become [-pi/2, pi/2 -\n # phase]\n #\n # Integrate phi from -pi/2 to pi/2 (or 2 * integral from 0 to\n # pi/2)\n #\n # Drop some units for efficiency\n phase_r = np.abs(phase.to(u.rad).value)\n wave_um = wave.to(u.um).value\n for i in range(len(wave_um)):\n fluxd[i] = quad(self._latitude_emission,\n -pi / 2.0 + phase_r, pi / 2.0,\n args=(wave_um[i], T0, phase_r),\n epsrel=self.tol)[0]\n\n fluxd *= (self.epsilon * (self.D / geom['delta'])**2\n / pi / 2.0).decompose().value # W/m^2/Hz\n\n fluxd = fluxd * u.Unit('W / (m2 Hz)')\n equiv = u.spectral_density(u.um, wave.to(u.um).value)\n fluxd = fluxd.to(unit, equivalencies=equiv)\n if len(fluxd) == 1:\n return fluxd[0]\n else:\n return fluxd\n\n @property\n def A(self):\n \"\"\"Bond albedo.\n\n A = geometric albedo * phase integral = p * q\n p = 0.04 (default)\n G = slope parameter = 0.15 (mean val.)\n -> q = 0.290 + 0.684 * G = 0.3926\n -> A = 0.0157\n\n \"\"\"\n if self.phaseint is None:\n A = self.Ap * (0.290 + 0.684 * self.G)\n else:\n A = self.Ap * self.phaseint\n return A\n\n @property\n def R(self):\n \"\"\"Radius.\"\"\"\n return self.D / 2.0\n\n def T0(self, rh):\n \"\"\"Sub-solar point temperature.\n\n Parameters\n ----------\n rh : Quantity\n Heliocentric distance.\n\n Returns\n -------\n T0 : Quantity\n Temperature.\n\n \"\"\"\n\n Fsun = 1367.567 / rh.to(u.au).value**2 # W / m2\n sigma = 5.670373e-08 # W / (K4 m2)\n T0 = (((1.0 - self.A) * Fsun) / abs(self.eta) / self.epsilon\n / sigma)**0.25\n return T0 * u.K\n\n def fit(self, g, wave, fluxd, unc, **kwargs):\n \"\"\"Least-squares fit to a spectrum, varying `D` and `eta`.\n\n Uses the object's current state as the initial parameter set.\n\n Parameters\n ----------\n g : dict-like\n A dictionary-like object with the keys 'rh' (heliocentric\n distance), 'delta' (observer-target distance), and 'phase'\n (phase angle) as Quantities.\n wave : Quantity\n The spectrum wavelengths.\n fluxd : Quantity\n The spectrum flux density.\n unc : Quantity\n The uncertainties on `fluxd`.\n **kwargs\n Any keyword arguments for `scipy.optimize.leastsq`.\n\n Returns\n -------\n fit : NEATM\n Best-fit parameters.\n fiterr : tuple\n `(D, eta)` fit errors (assuming independent variables) or\n `None` if they cannot be computed.\n result : tuple\n The full output from `scipy.optimize.leastsq`.\n\n \"\"\"\n\n from copy import copy\n from scipy.optimize import leastsq\n\n def chi(p, neatm, g, wave, fluxd, unc):\n neatm.D = u.Quantity(abs(p[0]), u.km)\n neatm.eta = abs(p[1])\n model = neatm.fluxd(g, wave, unit=fluxd.unit).value\n chi = (model - fluxd.value) / unc.value\n rchisq = (chi**2).sum() / (len(wave) - 2.0)\n print(neatm.D, neatm.eta, rchisq)\n return chi\n\n neatm = copy(self)\n args = (neatm, g, wave, fluxd, unc)\n kwargs['epsfcn'] = kwargs.get('epsfcn', 1e-5)\n\n kwargs['full_output'] = True\n result = leastsq(chi, (self.D.value, self.eta), args, **kwargs)\n\n neatm.D = u.Quantity(result[0][0], u.km)\n neatm.eta = result[0][1]\n cov = result[1]\n if cov is None:\n err = None\n else:\n err = np.sqrt(np.diagonal(cov))\n\n return neatm, err, result\n\n def _point_emission(self, phi, theta, wave, T0):\n \"\"\"The emission from a single point.\n\n phi, theta : float [radians]\n wave : float [um]\n\n \"\"\"\n\n from numpy import pi\n from ..util import planck\n\n T = T0 * np.cos(phi)**0.25 * np.cos(theta)**0.25\n B = planck(wave, T, unit=None) # W / (m2 sr Hz)\n return (B * pi * np.cos(phi)**2) # W / (m2 Hz)\n\n def _latitude_emission(self, theta, wave, T0, phase):\n \"\"\"The emission from a single latitude.\n\n The function does not consider day vs. night, so make sure the\n integration limits are correctly set.\n\n theta : float [radians]\n wave : float [um]\n\n \"\"\"\n\n from scipy.integrate import quad\n from numpy import pi\n\n if not np.iterable(theta):\n theta = np.array([theta])\n\n fluxd = np.zeros_like(theta)\n for i in range(len(theta)):\n integral = quad(self._point_emission, 0.0, pi / 2.0,\n args=(theta[i], wave, T0),\n epsrel=self.tol / 10.0)\n fluxd[i] = (integral[0] * np.cos(theta[i] - phase))\n\n i = np.isnan(fluxd)\n if any(i):\n fluxd[i] = 0.0\n return fluxd\n\nclass HG(SurfaceRadiation):\n \"\"\"The IAU HG system for reflected light from asteroids.\n\n Parameters\n ----------\n H : float\n Absolute magnitude.\n G : float\n The slope parameter.\n mzp : Quantity, optional\n Flux density of magnitude 0.\n\n Attributes\n ----------\n\n Methods\n -------\n R : Radius.\n D : Diameter.\n fluxd : Total flux density.\n\n \"\"\"\n\n def __init__(self, H, G, mzp=3.51e-8 * u.Unit('W / (m2 um)'), **kwargs):\n self.H = H\n self.G = G\n self.mzp = mzp\n\n def fluxd(self, geom, wave, unit=u.Unit('W / (m2 um)')):\n \"\"\"Flux density.\n\n Parameters\n ----------\n geom : dict of Quantities\n A dictionary-like object with the keys 'rh' (heliocentric\n distance), 'delta' (observer-target distance), and 'phase'\n (phase angle).\n wave : Quantity\n The wavelengths at which to compute the emission.\n unit : astropy Units, optional\n The return units. Must be spectral flux density.\n\n Returns\n -------\n fluxd : Quantity\n The flux density from the whole asteroid.\n\n \"\"\"\n\n from ..calib import solar_flux\n\n if not np.iterable(wave):\n wave = np.array([wave.value]) * wave.unit\n\n rhdelta = geom['rh'].to(u.au).value * geom['delta'].to(u.au).value\n phase = geom['phase']\n\n mv = (self.H + 5.0 * np.log10(rhdelta)\n - 2.5 * np.log10(phaseHG(np.abs(phase.to(u.deg).value), self.G)))\n\n wave_v = np.linspace(0.5, 0.6) * u.um\n fsun_v = solar_flux(wave_v, unit=unit).value.mean()\n fsun = solar_flux(wave, unit=unit)\n\n fluxd = self.mzp * 10**(-0.4 * mv) * fsun / fsun_v\n\n if len(fluxd) == 1:\n return fluxd[0]\n else:\n return fluxd\n\n def D(self, Ap, Msun=-26.75):\n \"\"\"Diameter.\n\n Parameters\n ----------\n Ap : float\n Geometric albedo.\n Msun : float, optional\n Absolute magnitude of the Sun.\n\n Returns\n -------\n D : Quantity\n Diameter of the asteroid.\n \n \"\"\"\n D = 2 / np.sqrt(Ap) * 10**(0.2 * (Msun - self.H)) * u.au\n return D.to(u.km)\n\n def R(self, *args, **kwargs):\n \"\"\"Radius via D().\"\"\"\n return self.D(*args, **kwargs) / 2.0\n\nclass DAp(SurfaceRadiation):\n \"\"\"Reflected light from asteroids given D, Ap.\n\n Parameters\n ----------\n D : Quantity\n Diameter.\n Ap : float\n Geometric albedo.\n G : float, optional\n If `phasef` is None, generate an IAU HG system phase function.\n phasef : function, optional\n Phase function. It must only take one parameter, phase angle,\n in units of degrees.\n\n Attributes\n ----------\n R : radius\n\n Methods\n -------\n H : Absolute magnitude\n\n \"\"\"\n\n def __init__(self, D, Ap, G=0.15, phasef=None, **kwargs):\n self.D = D\n self.Ap = Ap\n\n if phasef is None:\n def phi_g(phase):\n return phaseHG(phase, G)\n self.phasef = phi_g\n else: \n self.phasef = phasef\n\n def fluxd(self, geom, wave, unit=u.Unit('W / (m2 um)')):\n \"\"\"Flux density.\n\n Parameters\n ----------\n geom : dict of Quantities\n A dictionary-like object with the keys 'rh' (heliocentric\n distance), 'delta' (observer-target distance), and 'phase'\n (phase angle).\n wave : Quantity\n The wavelengths at which to compute the emission.\n unit : astropy Units, optional\n The return units. Must be spectral flux density.\n\n Returns\n -------\n fluxd : Quantity\n The flux density from the whole asteroid.\n\n \"\"\"\n\n from numpy import pi\n from ..calib import solar_flux\n\n if not np.iterable(wave):\n wave = np.array([wave.value]) * wave.unit\n\n delta = geom['delta']\n phase = geom['phase']\n fsun = solar_flux(wave, unit=unit) / geom['rh'].to(u.au).value**2\n\n #fsca = fsun * Ap * phasef(phase) * pi * R**2 / pi / delta**2\n fsca = (fsun * self.Ap * self.phasef(np.abs(phase.to(u.deg).value))\n * (self.R / delta).decompose()**2)\n\n if unit != fsca.unit:\n fsca = fsca.to(unit, equivalencies=u.spectral_density(u.um, wave))\n\n return fsca\n\n def H(self, Msun=-26.75):\n \"\"\"Absolute (V) magnitude.\n\n Parameters\n ----------\n Msun : float, optional\n Absolute magnitude of the Sun.\n\n Returns\n -------\n H : float\n\n \"\"\"\n\n return 5 * np.log10(self.R.to(u.au).value * np.sqrt(self.Ap)) - Msun\n\n @property\n def R(self):\n \"\"\"Radius.\"\"\"\n return self.D / 2.0\n\nclass DApColor(DAp):\n \"\"\"Reflected light from asteroids given D, Ap, and a color.\n\n Parameters\n ----------\n D : Quantity\n Diameter.\n Ap : float\n Geometric albedo at 0.55 um.\n S : float\n Spectral slope for reflected light:\n `refl = 1 + (lambda - lambda0) * S / 10`\n where `S` has units of % per 0.1 um, `lambda` has units of um,\n and `lambda0` is 0.55 um. `R` is limited to `0 <= refl <=\n refl_max`.\n refl_max : float\n Use this value as the maximum reflectance.\n G : float, optional\n If `phasef` is None, generate an IAU HG system phase function.\n phasef : function, optional\n Phase function. It must only take one parameter, phase angle,\n in units of degrees.\n\n Attributes\n ----------\n R : radius\n\n Methods\n -------\n H : Absolute magnitude\n\n \"\"\"\n\n def __init__(self, D, Ap, S, refl_max=2.5, **kwargs):\n self.S = S\n self.refl_max = refl_max\n DAp.__init__(self, D, Ap, **kwargs)\n\n def fluxd(self, geom, wave, unit=u.Unit('W / (m2 um)')):\n from numpy import pi\n from ..calib import solar_flux\n\n if not np.iterable(wave):\n wave = np.array([wave.value]) * wave.unit\n\n delta = geom['delta']\n phase = geom['phase']\n fsun = solar_flux(wave, unit=unit) / geom['rh'].to(u.au).value**2\n\n refl = 1 + (wave - 0.55 * u.um).value * self.S / 10.\n if np.any(refl > self.refl_max):\n refl[refl > self.refl_max] = self.refl_max\n if np.any(refl < 0.0):\n refl[refl < 0.0] = 0.0\n\n #fsca = fsun * Ap * phasef(phase) * pi * R**2 / pi / delta**2\n fsca = (fsun * self.Ap * refl\n * self.phasef(np.abs(phase.to(u.deg).value))\n * (self.R / delta).decompose()**2)\n\n if unit != fsca.unit:\n fsca = fsca.to(unit, equivalencies=u.spectral_density(u.um, wave))\n\n return fsca\n\n fluxd.__doc__ = DAp.__doc__\n\ndef _phaseHG_i(i, phase):\n \"\"\"Helper function for phaseHG.\n\n i: integer\n phase : float, radians\n\n \"\"\"\n\n A = [3.332, 1.862]\n B = [0.631, 1.218]\n C = [0.986, 0.238]\n Phi_S = 1.0 - C[i] * np.sin(phase) / \\\n (0.119 + 1.341 * np.sin(phase) - 0.754 * np.sin(phase)**2)\n Phi_L = np.exp(-A[i] * np.tan(0.5 * phase)**B[i])\n W = np.exp(-90.56 * np.tan(0.5 * phase)**2)\n return W * Phi_S + (1.0 - W) * Phi_L\n\ndef phaseHG(phase, G):\n \"\"\"IAU HG system phase function.\n\n Parameters\n ----------\n phase : float\n Phase angle. [deg]\n\n Returns\n -------\n phi : float\n\n \"\"\"\n phase = np.radians(phase)\n return ((1.0 - G) * _phaseHG_i(0, phase) + G * _phaseHG_i(1, phase))\n\ndef lambertian(phase):\n \"\"\"Return the phase function from an Lambert disc computed at a\n specific phase.\n\n Parameters\n ----------\n phase : float or array\n The phase or phases in question. [degrees]\n\n Returns\n -------\n phi : float or array_like\n The ratio of light observed at the requested phase to that\n observed at phase = 0 degrees (full disc).\n\n Notes\n -----\n Uses the analytic form found in Brown 2004, ApJ 610, 1079.\n\n \"\"\"\n phase = np.radians(np.abs(phase))\n return (np.sin(phase) + (pi - phase) * np.cos(phase)) / pi\n\ndef neatm(D, Ap, geom, wave, unit=u.Jy, **kwargs):\n \"\"\"Convenience function for NEATM.\n\n Parameters\n ----------\n D : Quantity\n Diameter.\n Ap : float\n Geometric albedo.\n geom : dict of Quantities\n Geometry of observation: rh, Delta, phase.\n wave : Quantity\n Wavelengths at which to evaluate the model.\n unit : astropy Units, optional\n The return units. Must be spectral flux density.\n **kwargs\n Any `models.NEATM` keyword argument.\n\n Returns\n -------\n fluxd : Quantity\n The flux density from the whole asteroid.\n\n \"\"\"\n\n return NEATM(D, Ap, **kwargs).fluxd(geom, wave, unit=unit)\n\n# update module docstring\nfrom ..util import autodoc\nautodoc(globals())\ndel autodoc\n\n" ]
[ [ "numpy.radians", "numpy.abs", "numpy.linspace", "numpy.sqrt", "numpy.isnan", "numpy.cos", "scipy.integrate.quad", "numpy.sin", "numpy.tan", "scipy.optimize.leastsq", "numpy.log10", "numpy.zeros_like", "numpy.any", "numpy.iterable", "numpy.array", "numpy.diagonal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
ARF1/numba
[ "8a6d09b15f0090144161158d01550847f15fc1c8", "8a6d09b15f0090144161158d01550847f15fc1c8" ]
[ "numba/cuda/tests/cudapy/test_alignment.py", "numba/cuda/tests/cudapy/test_device_func.py" ]
[ "import numpy as np\nfrom numba import from_dtype, cuda\nfrom numba.cuda.testing import skip_on_cudasim, SerialMixin\nimport unittest\n\nclass TestAlignment(SerialMixin, unittest.TestCase):\n def test_record_alignment(self):\n rec_dtype = np.dtype([('a', 'int32'), ('b', 'float64')], align=True)\n rec = from_dtype(rec_dtype)\n\n @cuda.jit((rec[:],))\n def foo(a):\n i = cuda.grid(1)\n a[i].a = a[i].b\n\n a_recarray = np.recarray(3, dtype=rec_dtype)\n for i in range(a_recarray.size):\n a_rec = a_recarray[i]\n a_rec.a = 0\n a_rec.b = (i + 1) * 123\n\n foo[1, 3](a_recarray)\n\n self.assertTrue(np.all(a_recarray.a == a_recarray.b))\n\n @skip_on_cudasim('Simulator does not check alignment')\n def test_record_alignment_error(self):\n rec_dtype = np.dtype([('a', 'int32'), ('b', 'float64')])\n rec = from_dtype(rec_dtype)\n\n with self.assertRaises(Exception) as raises:\n @cuda.jit((rec[:],))\n def foo(a):\n i = cuda.grid(1)\n a[i].a = a[i].b\n\n self.assertTrue('type float64 is not aligned' in str(raises.exception))\n\n\nif __name__ == '__main__':\n unittest.main()\n", "import re\nimport types\n\nimport numpy as np\n\nfrom numba.cuda.testing import unittest, skip_on_cudasim, SerialMixin\nfrom numba import cuda, jit, int32\nfrom numba.core.errors import TypingError\n\n\nclass TestDeviceFunc(SerialMixin, unittest.TestCase):\n\n def test_use_add2f(self):\n\n @cuda.jit(\"float32(float32, float32)\", device=True)\n def add2f(a, b):\n return a + b\n\n def use_add2f(ary):\n i = cuda.grid(1)\n ary[i] = add2f(ary[i], ary[i])\n\n compiled = cuda.jit(\"void(float32[:])\")(use_add2f)\n\n nelem = 10\n ary = np.arange(nelem, dtype=np.float32)\n exp = ary + ary\n compiled[1, nelem](ary)\n\n self.assertTrue(np.all(ary == exp), (ary, exp))\n\n def test_indirect_add2f(self):\n\n @cuda.jit(\"float32(float32, float32)\", device=True)\n def add2f(a, b):\n return a + b\n\n @cuda.jit(\"float32(float32, float32)\", device=True)\n def indirect(a, b):\n return add2f(a, b)\n\n def indirect_add2f(ary):\n i = cuda.grid(1)\n ary[i] = indirect(ary[i], ary[i])\n\n compiled = cuda.jit(\"void(float32[:])\")(indirect_add2f)\n\n nelem = 10\n ary = np.arange(nelem, dtype=np.float32)\n exp = ary + ary\n compiled[1, nelem](ary)\n\n self.assertTrue(np.all(ary == exp), (ary, exp))\n\n def _check_cpu_dispatcher(self, add):\n @cuda.jit\n def add_kernel(ary):\n i = cuda.grid(1)\n ary[i] = add(ary[i], 1)\n\n ary = np.arange(10)\n expect = ary + 1\n add_kernel[1, ary.size](ary)\n np.testing.assert_equal(expect, ary)\n\n def test_cpu_dispatcher(self):\n # Test correct usage\n @jit\n def add(a, b):\n return a + b\n\n self._check_cpu_dispatcher(add)\n\n @skip_on_cudasim('not supported in cudasim')\n def test_cpu_dispatcher_invalid(self):\n # Test invalid usage\n # Explicit signature disables compilation, which also disable\n # compiling on CUDA.\n @jit('(i4, i4)')\n def add(a, b):\n return a + b\n\n # Check that the right error message is provided.\n with self.assertRaises(TypingError) as raises:\n self._check_cpu_dispatcher(add)\n msg = \"Untyped global name 'add':.*using cpu function on device\"\n expected = re.compile(msg)\n self.assertTrue(expected.search(str(raises.exception)) is not None)\n\n def test_cpu_dispatcher_other_module(self):\n @jit\n def add(a, b):\n return a + b\n\n mymod = types.ModuleType(name='mymod')\n mymod.add = add\n del add\n\n @cuda.jit\n def add_kernel(ary):\n i = cuda.grid(1)\n ary[i] = mymod.add(ary[i], 1)\n\n ary = np.arange(10)\n expect = ary + 1\n add_kernel[1, ary.size](ary)\n np.testing.assert_equal(expect, ary)\n\n @skip_on_cudasim('not supported in cudasim')\n def test_inspect_ptx(self):\n @cuda.jit(device=True)\n def foo(x, y):\n return x + y\n\n args = (int32, int32)\n cres = foo.compile(args)\n\n fname = cres.fndesc.mangled_name\n # Verify that the function name has \"foo\" in it as in the python name\n self.assertIn('foo', fname)\n\n ptx = foo.inspect_ptx(args)\n # Check that the compiled function name is in the PTX.\n self.assertIn(fname, ptx.decode('ascii'))\n\n @skip_on_cudasim('not supported in cudasim')\n def test_inspect_llvm(self):\n @cuda.jit(device=True)\n def foo(x, y):\n return x + y\n\n args = (int32, int32)\n cres = foo.compile(args)\n\n fname = cres.fndesc.mangled_name\n # Verify that the function name has \"foo\" in it as in the python name\n self.assertIn('foo', fname)\n\n llvm = foo.inspect_llvm(args)\n # Check that the compiled function name is in the LLVM.\n self.assertIn(fname, llvm)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.all", "numpy.recarray", "numpy.dtype" ], [ "numpy.all", "numpy.arange", "numpy.testing.assert_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
neelabh17/mrf-calibration
[ "a5edcb257719d6ca787aca66e27dd86bb0dbbab6" ]
[ "calibration_library/models/resnet.py" ]
[ "import torch\nfrom torch import nn, optim\nfrom torch.nn import functional as F\nimport torch.nn.init as init\n\n\ndef _weights_init(m):\n classname = m.__class__.__name__\n #print(classname)\n if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):\n init.kaiming_normal_(m.weight)\n\nclass LambdaLayer(nn.Module):\n def __init__(self, lambd):\n super(LambdaLayer, self).__init__()\n self.lambd = lambd\n\n def forward(self, x):\n return self.lambd(x)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1, option='A'):\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != planes:\n if option == 'A':\n \"\"\"\n For CIFAR10 ResNet paper uses option A.\n \"\"\"\n self.shortcut = LambdaLayer(lambda x:\n F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), \"constant\", 0))\n elif option == 'B':\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion * planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self, block, num_blocks, num_classes=10):\n super(ResNet, self).__init__()\n self.in_planes = 16\n\n self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(16)\n self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)\n self.linear = nn.Linear(64, num_classes)\n\n self.apply(_weights_init)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1]*(num_blocks-1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = F.avg_pool2d(out, out.size()[3])\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.functional.relu", "torch.nn.BatchNorm2d", "torch.nn.functional.pad", "torch.nn.init.kaiming_normal_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xuli2020sj/Intelligent-Vehicle
[ "d69d26147ed4f0adbbd4bff83a953f9ef3509131" ]
[ "motion/server.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 10 21:52:21 2021\n\n@author: qq735\n\"\"\"\n\nfrom multiprocessing import Process\nfrom socket import *\n# import wiringpi\n\nimport RPi.GPIO as GPIO\nimport time\nimport string\nimport threading\nimport timeout_decorator\n\nimport serial\nimport pynmea2\n\nserialPort = serial.Serial(\"/dev/ttyUSB0\", 9600, timeout=0.5)\n\nfrom ctypes import *\nimport numpy as np\n\nmlx90640 = cdll.LoadLibrary('./libmlx90640.so')\n\n# 初始化上下左右角度为90度\nServoLeftRightPos = 90\nServoUpDownPos = 90\ng_frontServoPos = 90\nnowfrontPos = 0\n\n# 小车电机引脚定义\nIN1 = 20\nIN2 = 21\nIN3 = 19\nIN4 = 26\nENA = 16\nENB = 13\n\n# 超声波引脚定义\nEchoPin = 0\nTrigPin = 1\n\n# RGB三色灯引脚定义\nLED_R = 22\nLED_G = 27\nLED_B = 24\n\n# 舵机引脚定义\nFrontServoPin = 23\nServoUpDownPin = 9\nServoLeftRightPin = 11\n\n# 红外避障引脚定义\nAvoidSensorLeft = 12\nAvoidSensorRight = 17\n\n# 蜂鸣器引脚定义\nbuzzer = 8\n\n# 变量的定义\n# 七彩灯RGB三色变量定义\nred = 0\ngreen = 0\nblue = 0\n# TCP通信数据包标志位以及接受和发送数据变量\nNewLineReceived = 0\nInputString = ''\nrecvbuf = ''\nReturnTemp = ''\n# 小车和舵机状态变量\ng_CarState = 0\ng_ServoState = 0\n# 小车速度变量,20表示40cm每秒\nCarSpeedControl = 20\n# 寻迹,避障,寻光变量\ninfrared_track_value = ''\ninfrared_avoid_value = ''\nLDR_value = ''\ng_lednum = 0\n\n# 设置GPIO口为BCM编码方式\nGPIO.setmode(GPIO.BCM)\n\n# 忽略警告信息\nGPIO.setwarnings(False)\n\nimport eventlet\nimport time\n\neventlet.monkey_patch()\n\n\n# 电机引脚初始化操作\ndef init():\n global pwm_ENA\n global pwm_ENB\n global delaytime\n global CarSpeedControl\n global pwm_FrontServo\n global pwm_UpDownServo\n global pwm_LeftRightServo\n global nowfrontPos\n\n GPIO.setup(ENA, GPIO.OUT, initial=GPIO.HIGH)\n GPIO.setup(IN1, GPIO.OUT, initial=GPIO.LOW)\n GPIO.setup(IN2, GPIO.OUT, initial=GPIO.LOW)\n GPIO.setup(ENB, GPIO.OUT, initial=GPIO.HIGH)\n GPIO.setup(IN3, GPIO.OUT, initial=GPIO.LOW)\n GPIO.setup(IN4, GPIO.OUT, initial=GPIO.LOW)\n\n GPIO.setup(buzzer, GPIO.OUT, initial=GPIO.HIGH)\n\n GPIO.setup(EchoPin, GPIO.IN)\n GPIO.setup(TrigPin, GPIO.OUT)\n GPIO.setup(FrontServoPin, GPIO.OUT)\n GPIO.setup(ServoUpDownPin, GPIO.OUT)\n GPIO.setup(ServoLeftRightPin, GPIO.OUT)\n\n GPIO.setup(LED_R, GPIO.OUT)\n GPIO.setup(LED_G, GPIO.OUT)\n GPIO.setup(LED_B, GPIO.OUT)\n\n # 设置pwm引脚和频率为2000hz\n pwm_ENA = GPIO.PWM(ENA, 2000)\n pwm_ENB = GPIO.PWM(ENB, 2000)\n # pwm_ENA.start(0)\n # pwm_ENB.start(0)\n\n pwm_FrontServo = GPIO.PWM(FrontServoPin, 50)\n pwm_UpDownServo = GPIO.PWM(ServoUpDownPin, 50)\n pwm_LeftRightServo = GPIO.PWM(ServoLeftRightPin, 50)\n pwm_FrontServo.start(0)\n pwm_UpDownServo.start(0)\n pwm_LeftRightServo.start(0)\n\n\n# 红外\ndef tcam():\n temp = (c_float * 768)()\n ptemp = pointer(temp)\n mlx90640.get_mlx90640_temp(ptemp)\n my_nparray = np.frombuffer(temp, dtype=np.float32)\n\n t = my_nparray.reshape((32, 24))\n\n # print(np.max(t))\n # print(np.argmax(t))\n return np.max(t), np.argmax(t) % 32\n\n\n# GPS 经纬高\ndef GetGPS():\n lat = -1\n lon = -1\n alt = -1\n s = serialPort.readline()\n # print(s)\n # print(type(s.decode()))\n # print(s.find(b'GGA'))\n s = s.decode()\n if s.find('GGA') > -1:\n msg = pynmea2.parse(s)\n lat = msg.lat\n lon = msg.lon\n alt = msg.altitude\n return (lat, lon, alt)\n\n\n# 超声波测距函数\ndef Distance_test():\n GPIO.output(TrigPin, GPIO.HIGH)\n time.sleep(0.000015)\n GPIO.output(TrigPin, GPIO.LOW)\n while not GPIO.input(EchoPin):\n pass\n t1 = time.time()\n while GPIO.input(EchoPin):\n pass\n t2 = time.time()\n # print (\"distance is %d \" % (((t2 - t1)* 340 / 2) * 100))\n time.sleep(0.01)\n return ((t2 - t1) * 340 / 2) * 100\n\n\n# 根据转动的角度来点亮相应的颜色\ndef corlor_light(pos):\n if pos > 150:\n GPIO.output(LED_R, GPIO.HIGH)\n GPIO.output(LED_G, GPIO.LOW)\n GPIO.output(LED_B, GPIO.LOW)\n elif pos > 125:\n GPIO.output(LED_R, GPIO.LOW)\n GPIO.output(LED_G, GPIO.HIGH)\n GPIO.output(LED_B, GPIO.LOW)\n elif pos > 100:\n GPIO.output(LED_R, GPIO.LOW)\n GPIO.output(LED_G, GPIO.LOW)\n GPIO.output(LED_B, GPIO.HIGH)\n elif pos > 75:\n GPIO.output(LED_R, GPIO.HIGH)\n GPIO.output(LED_G, GPIO.HIGH)\n GPIO.output(LED_B, GPIO.LOW)\n elif pos > 50:\n GPIO.output(LED_R, GPIO.LOW)\n GPIO.output(LED_G, GPIO.HIGH)\n GPIO.output(LED_B, GPIO.HIGH)\n elif pos > 25:\n GPIO.output(LED_R, GPIO.HIGH)\n GPIO.output(LED_G, GPIO.LOW)\n GPIO.output(LED_B, GPIO.HIGH)\n elif pos > 0:\n GPIO.output(LED_R, GPIO.HIGH)\n GPIO.output(LED_G, GPIO.HIGH)\n GPIO.output(LED_B, GPIO.HIGH)\n else:\n GPIO.output(LED_R, GPIO.LOW)\n GPIO.output(LED_G, GPIO.LOW)\n GPIO.output(LED_B, GPIO.LOW)\n\n\n# 舵机来回转动\ndef servo_control_color():\n for pos in range(19):\n frontservo_appointed_detection(pos * 10)\n time.sleep(0.02)\n updownservo_appointed_detection(pos * 10)\n time.sleep(0.02)\n leftrightservo_appointed_detection(pos * 10)\n time.sleep(0.02)\n corlor_light(pos)\n for pos in reversed(range(19)):\n frontservo_appointed_detection(pos * 10)\n time.sleep(0.02)\n updownservo_appointed_detection(pos * 10)\n time.sleep(0.02)\n leftrightservo_appointed_detection(pos * 10)\n time.sleep(0.02)\n corlor_light(pos)\n\n\n# 小车前进\ndef run():\n GPIO.output(IN1, GPIO.HIGH)\n GPIO.output(IN2, GPIO.LOW)\n GPIO.output(IN3, GPIO.HIGH)\n GPIO.output(IN4, GPIO.LOW)\n # 启动PWM设置占空比为100(0--100)\n pwm_ENA.start(CarSpeedControl)\n pwm_ENB.start(CarSpeedControl)\n # pwm_ENA.ChangeDutyCycle(CarSpeedControl)\n # pwm_ENB.ChangeDutyCycle(CarSpeedControl)\n\n\n# 小车后退\ndef back():\n GPIO.output(IN1, GPIO.LOW)\n GPIO.output(IN2, GPIO.HIGH)\n GPIO.output(IN3, GPIO.LOW)\n GPIO.output(IN4, GPIO.HIGH)\n pwm_ENA.ChangeDutyCycle(CarSpeedControl)\n pwm_ENB.ChangeDutyCycle(CarSpeedControl)\n\n\n# 小车左转\ndef left():\n pwm_ENA.ChangeDutyCycle(CarSpeedControl)\n pwm_ENB.ChangeDutyCycle(CarSpeedControl)\n GPIO.output(IN1, GPIO.LOW)\n GPIO.output(IN2, GPIO.LOW)\n GPIO.output(IN3, GPIO.HIGH)\n GPIO.output(IN4, GPIO.LOW)\n pwm_ENA.start(CarSpeedControl)\n pwm_ENB.start(CarSpeedControl)\n\n\n# 小车右转\ndef right():\n pwm_ENA.ChangeDutyCycle(CarSpeedControl)\n pwm_ENB.ChangeDutyCycle(CarSpeedControl)\n GPIO.output(IN1, GPIO.HIGH)\n GPIO.output(IN2, GPIO.LOW)\n GPIO.output(IN3, GPIO.LOW)\n GPIO.output(IN4, GPIO.LOW)\n pwm_ENA.start(CarSpeedControl)\n pwm_ENB.start(CarSpeedControl)\n\n\n# 小车原地左转\ndef spin_left():\n pwm_ENA.ChangeDutyCycle(CarSpeedControl)\n pwm_ENB.ChangeDutyCycle(CarSpeedControl)\n GPIO.output(IN1, GPIO.LOW)\n GPIO.output(IN2, GPIO.HIGH)\n GPIO.output(IN3, GPIO.HIGH)\n GPIO.output(IN4, GPIO.LOW)\n pwm_ENA.start(CarSpeedControl)\n pwm_ENB.start(CarSpeedControl)\n\n\n# 小车原地右转\ndef spin_right():\n pwm_ENA.ChangeDutyCycle(CarSpeedControl)\n pwm_ENB.ChangeDutyCycle(CarSpeedControl)\n GPIO.output(IN1, GPIO.HIGH)\n GPIO.output(IN2, GPIO.LOW)\n GPIO.output(IN3, GPIO.LOW)\n GPIO.output(IN4, GPIO.HIGH)\n pwm_ENA.start(CarSpeedControl)\n pwm_ENB.start(CarSpeedControl)\n\n\n# 小车停止\ndef brake():\n GPIO.output(IN1, GPIO.LOW)\n GPIO.output(IN2, GPIO.LOW)\n GPIO.output(IN3, GPIO.LOW)\n GPIO.output(IN4, GPIO.LOW)\n\n\n#\ndef whistle():\n GPIO.output(buzzer, GPIO.LOW)\n time.sleep(0.1)\n GPIO.output(buzzer, GPIO.HIGH)\n time.sleep(0.001)\n\n\n# 前舵机旋转到指定角度\ndef frontservo_appointed_detection(pos):\n pulsewidth = (pos * 11) + 500\n GPIO.output(FrontServoPin, GPIO.HIGH)\n time.sleep(pulsewidth / 1000000.0)\n GPIO.output(FrontServoPin, GPIO.LOW)\n time.sleep(20.0 / 1000 - pulsewidth / 1000000.0)\n global nowfrontPos\n nowfrontPos = pos\n\n\ndef leftrightservo_appointed_detection(pos):\n pulsewidth = (pos * 11) + 500\n GPIO.output(ServoLeftRightPin, GPIO.HIGH)\n time.sleep(pulsewidth / 1000000.0)\n GPIO.output(ServoLeftRightPin, GPIO.LOW)\n time.sleep(20.0 / 1000 - pulsewidth / 1000000.0)\n global nowfrontPos\n nowfrontPos = pos\n\n\n# 摄像头舵机上下旋转到指定角度\ndef updownservo_appointed_detection(pos):\n pulsewidth = (pos * 11) + 500\n GPIO.output(ServoUpDownPin, GPIO.HIGH)\n time.sleep(pulsewidth / 1000000.0)\n GPIO.output(ServoUpDownPin, GPIO.LOW)\n time.sleep(20.0 / 1000 - pulsewidth / 1000000.0)\n global nowfrontPos\n nowfrontPos = pos\n\n\ndef servo_init():\n servoinitpos = 90\n for i in range(18):\n frontservo_appointed_detection(servoinitpos)\n time.sleep(0.02)\n updownservo_appointed_detection(servoinitpos)\n time.sleep(0.02)\n leftrightservo_appointed_detection(servoinitpos)\n time.sleep(0.02)\n # pwm_FrontServo.ChangeDutyCycle(0) # 归零信号\n # pwm_LeftRightServo.ChangeDutyCycle(0) # 归零信号\n # 0pwm_UpDownServo.ChangeDutyCycle(0) # 归零信号\n\n\ndef auto():\n init()\n # servo_init()\n taxishu = 0.008\n FindNum = 0\n\n while FindNum == 0:\n distance = []\n temperature = []\n angle = []\n for i in range(7):\n for ii in range(9):\n frontservo_appointed_detection(i * 30)\n time.sleep(0.01)\n time.sleep(0.8)\n distance.append(Distance_test())\n t, k = tcam()\n temperature.append(t)\n k = int((k - 15.5) / 31 * 55)\n angle.append(k)\n # 正前方为0,右侧为负数,左为正\n for i in range(18):\n frontservo_appointed_detection(90)\n time.sleep(0.02)\n print(distance)\n print(temperature)\n print(angle)\n\n index = temperature.index(max(temperature))\n target_angle = angle[index] + index * 30\n print(index)\n print(target_angle)\n\n # 温度过高,找到火源\n if temperature[index] > 100:\n FindNum = FindNum + 1\n lat, lon, alt = GetGPS()\n print(\"-- Lat: %s -- Lon: %s -- Altitude: %s\" % (lat, lon, alt))\n for i in range(3):\n servo_control_color()\n break\n\n if target_angle <= 90:\n # 目标在右\n needtime = (90 - target_angle) * taxishu\n spin_right()\n time.sleep(needtime)\n brake()\n elif target_angle > 90:\n # 目标在左\n needtime = (target_angle - 90) * taxishu\n spin_left()\n time.sleep(needtime)\n brake()\n\n if distance[index] > 60:\n run()\n time.sleep(2)\n brake()\n elif distance[index] < 60 and distance[index] > 40 or temperature[index] > 35:\n run()\n time.sleep(1)\n print(\"快了\")\n brake()\n elif (distance[index] < 50 or distance[min(index + 1, 6)] < 50 or distance[max(0, index - 1)] < 50) and (\n temperature[index] < 38):\n print('避障')\n left()\n time.sleep(1)\n brake()\n time.sleep(0.2)\n run()\n time.sleep(1.5)\n brake()\n time.sleep(0.2)\n right()\n time.sleep(2)\n brake()\n\n#\ndef whistle():\n GPIO.output(buzzer, GPIO.LOW)\n time.sleep(0.1)\n GPIO.output(buzzer, GPIO.HIGH)\n time.sleep(0.001)\n\n\n# 前舵机旋转到指定角度\ndef frontservo_appointed_detection(pos):\n pulsewidth = (pos * 11) + 500\n GPIO.output(FrontServoPin, GPIO.HIGH)\n time.sleep(pulsewidth / 1000000.0)\n GPIO.output(FrontServoPin, GPIO.LOW)\n time.sleep(20.0 / 1000 - pulsewidth / 1000000.0)\n global nowfrontPos\n nowfrontPos = pos\n # time.sleep(0.02)\n # for i in range(18):\n # pwm_FrontServo.start(2.5 + 10 * pos / 180)\n # time.sleep(0.02) # 等待20ms周期结束\n # # pwm_FrontServo.ChangeDutyCycle(0) # 归零信号\n\n\n# 前舵机向左\ndef front_servo0():\n for i in range(18):\n frontservo_appointed_detection(0)\n time.sleep(0.02)\n\n\ndef front_servo45():\n for i in range(18):\n frontservo_appointed_detection(45)\n time.sleep(0.02)\n\n\ndef front_servo90():\n for i in range(18):\n frontservo_appointed_detection(90)\n time.sleep(0.02)\n\n\ndef front_servo135():\n for i in range(18):\n frontservo_appointed_detection(135)\n time.sleep(0.02)\n\n\ndef front_servo180():\n for i in range(18):\n frontservo_appointed_detection(180)\n time.sleep(0.02)\n\n # 摄像头舵机左右旋转到指定角度\n\n\ndef leftrightservo_appointed_detection(pos):\n for i in range(18):\n pwm_LeftRightServo.ChangeDutyCycle(2.5 + 10 * pos / 180)\n time.sleep(0.2) # 等待20ms周期结束\n # pwm_LeftRightServo.ChangeDutyCycle(0)\t#归零信号\n\n\n# 摄像头舵机上下旋转到指定角度\ndef updownservo_appointed_detection(pos):\n for i in range(18):\n pwm_UpDownServo.ChangeDutyCycle(2.5 + 10 * pos / 180)\n time.sleep(0.2) # 等待20ms周期结束\n # pwm_UpDownServo.ChangeDutyCycle(0)\t#归零信号\n\n\ndef servo_init():\n servoflag = 0\n servoinitpos = 90\n if servoflag != servoinitpos:\n frontservo_appointed_detection(servoinitpos)\n updownservo_appointed_detection(servoinitpos)\n leftrightservo_appointed_detection(servoinitpos)\n time.sleep(0.5)\n pwm_FrontServo.ChangeDutyCycle(0) # 归零信号\n pwm_LeftRightServo.ChangeDutyCycle(0) # 归零信号\n pwm_UpDownServo.ChangeDutyCycle(0) # 归零信号\n\n\n################################################################ 需要为客户端提供服务\ndef do_service(connect_socket):\n while True:\n recv_data = connect_socket.recv(1024)\n if len(recv_data) == 0:\n # 发送方关闭tcp的连接,recv()不会阻塞,而是直接返回''\n # print('client %s close' % str(client_addr)) \n # s.getpeername() s.getsockname()\n # wiringpi.digitalWrite(0,0)\n print('client %s close' % str(connect_socket.getpeername()))\n break\n\n if (len(recv_data) == 1) and (recv_data.decode('gbk')[0] == 'w'):\n with eventlet.Timeout(1, False):\n run()\n elif (len(recv_data) == 1) and (recv_data.decode('gbk')[0] == 's'):\n with eventlet.Timeout(1, False):\n back()\n elif (len(recv_data) == 1) and (recv_data.decode('gbk')[0] == 'a'):\n with eventlet.Timeout(1, False):\n left()\n elif (len(recv_data) == 1) and (recv_data.decode('gbk')[0] == 'd'):\n with eventlet.Timeout(1, False):\n right()\n elif (len(recv_data) == 1) and (recv_data.decode('gbk')[0] == 'x'):\n with eventlet.Timeout(1, False):\n brake()\n elif (len(recv_data) == 1) and (recv_data.decode('gbk')[0] == 'e'):\n with eventlet.Timeout(1, False):\n spin_right()\n elif (len(recv_data) == 1) and (recv_data.decode('gbk')[0] == 'q'):\n with eventlet.Timeout(1, False):\n spin_left()\n elif (len(recv_data) == 1) and (recv_data.decode('gbk')[0] == 'y'):\n with eventlet.Timeout(1, False):\n front_servo0()\n elif (len(recv_data) == 1) and (recv_data.decode('gbk')[0] == 'u'):\n with eventlet.Timeout(1, False):\n front_servo45()\n elif (len(recv_data) == 1) and (recv_data.decode('gbk')[0] == 'i'):\n with eventlet.Timeout(1, False):\n front_servo90()\n elif (len(recv_data) == 1) and (recv_data.decode('gbk')[0] == 'o'):\n with eventlet.Timeout(1, False):\n front_servo135()\n elif (len(recv_data) == 1) and (recv_data.decode('gbk')[0] == 'p'):\n with eventlet.Timeout(1, False):\n front_servo180()\n elif (len(recv_data) == 1) and (recv_data.decode('gbk')[0] == 'n'):\n with eventlet.Timeout(1, False):\n front_servo180()\n # # else:\n # wiringpi.digitalWrite(0,0)\n # if len(recv_data) > 1:\n # wiringpi.digitalWrite(0,0)\n\n print('recv: %s' % recv_data.decode('gbk'))\n\n\ndef main():\n init()\n servo_init()\n # 0.init wiringpi\n # wiringpi.wiringPiSetup()\n # wiringpi.pinMode(0,1)\n # 1.创建socket\n listen_socket = socket(AF_INET, SOCK_STREAM)\n # stream流式套接字,对应tcp\n\n # 设置允许复用地址,当建立连接之后服务器先关闭,设置地址复用\n # 设置socket层属性 复用地址,不用等2msl, 允许\n listen_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n\n # 2.绑定端口\n my_addr = ('192.168.146.107', 8888)\n listen_socket.bind(my_addr)\n\n # 3,接听状态\n listen_socket.listen(4) # 设置套接字成监听,4表示一个己连接队列长度\n print('listening...')\n\n # 4.等待客户端来请求\n\n # 父进程只专注接受连接请求\n while True:\n # 接受连接请求,创建连接套接字,用于客户端间通信\n connect_socket, client_addr = listen_socket.accept() # accept默认会引起阻塞\n # 新创建连接用的socket, 客户端的地址\n # print(connect_socket)\n\n\n # 每当来新的客户端连接,创建子进程,由子进程和客户端通信\n process_do_service = Process(target=do_service, args=(connect_socket,))\n process_do_service.start()\n\n # 父进程,关闭connect_socket\n connect_socket.close()\n\n" ]
[ [ "numpy.frombuffer", "numpy.max", "numpy.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hjkim-haga/TF-OD-API
[ "192ae544169c1230c21141c033800aa1bd94e9b6", "192ae544169c1230c21141c033800aa1bd94e9b6", "22ac477ff4dfb93fe7a32c94b5f0b1e74330902b", "22ac477ff4dfb93fe7a32c94b5f0b1e74330902b", "22ac477ff4dfb93fe7a32c94b5f0b1e74330902b", "192ae544169c1230c21141c033800aa1bd94e9b6", "22ac477ff4dfb93fe7a32c94b5f0b1e74330902b", "192ae544169c1230c21141c033800aa1bd94e9b6", "192ae544169c1230c21141c033800aa1bd94e9b6", "192ae544169c1230c21141c033800aa1bd94e9b6", "22ac477ff4dfb93fe7a32c94b5f0b1e74330902b", "192ae544169c1230c21141c033800aa1bd94e9b6", "192ae544169c1230c21141c033800aa1bd94e9b6", "22ac477ff4dfb93fe7a32c94b5f0b1e74330902b", "22ac477ff4dfb93fe7a32c94b5f0b1e74330902b", "192ae544169c1230c21141c033800aa1bd94e9b6", "192ae544169c1230c21141c033800aa1bd94e9b6", "22ac477ff4dfb93fe7a32c94b5f0b1e74330902b", "192ae544169c1230c21141c033800aa1bd94e9b6", "22ac477ff4dfb93fe7a32c94b5f0b1e74330902b", "192ae544169c1230c21141c033800aa1bd94e9b6", "192ae544169c1230c21141c033800aa1bd94e9b6", "22ac477ff4dfb93fe7a32c94b5f0b1e74330902b", "192ae544169c1230c21141c033800aa1bd94e9b6", "192ae544169c1230c21141c033800aa1bd94e9b6", "22ac477ff4dfb93fe7a32c94b5f0b1e74330902b", "22ac477ff4dfb93fe7a32c94b5f0b1e74330902b", "22ac477ff4dfb93fe7a32c94b5f0b1e74330902b", "192ae544169c1230c21141c033800aa1bd94e9b6", "192ae544169c1230c21141c033800aa1bd94e9b6" ]
[ "research/object_detection/exporter_lib_tf2_test.py", "research/slim/export_inference_graph_test.py", "research/slim/nets/mobilenet_v1.py", "official/vision/beta/modeling/decoders/fpn_test.py", "research/autoaugment/data_utils.py", "research/object_detection/utils/variables_helper_tf1_test.py", "official/vision/beta/projects/volumetric_models/modeling/backbones/unet_3d_test.py", "official/vision/image_classification/mnist_test.py", "research/object_detection/metrics/oid_vrd_challenge_evaluation_utils.py", "official/vision/beta/modeling/backbones/revnet_test.py", "research/efficient-hrl/environments/ant.py", "official/modeling/activations/sigmoid_test.py", "research/object_detection/predictors/heads/mask_head.py", "official/nlp/keras_nlp/layers/position_embedding.py", "official/vision/beta/tasks/retinanet.py", "official/nlp/modeling/networks/mobile_bert_encoder_test.py", "official/nlp/modeling/models/dual_encoder_test.py", "research/deeplab/evaluation/test_utils.py", "research/slim/nets/lenet.py", "research/lfads/synth_data/generate_itb_data.py", "research/object_detection/predictors/heads/keras_box_head_tf2_test.py", "research/slim/nets/mobilenet/mobilenet.py", "research/cvt_text/task_specific/word_level/word_level_data.py", "official/nlp/tools/export_tfhub_lib.py", "research/object_detection/builders/graph_rewriter_builder_tf1_test.py", "official/vision/image_classification/dataset_factory.py", "research/efficient-hrl/train.py", "research/adversarial_text/gen_vocab.py", "research/object_detection/export_tflite_ssd_graph.py", "official/vision/beta/projects/yolo/dataloaders/classification_tfds_decoder.py" ]
[ "# Lint as: python2, python3\r\n# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"Test for exporter_lib_v2.py.\"\"\"\r\n\r\nfrom __future__ import division\r\nimport io\r\nimport os\r\nimport unittest\r\nfrom absl.testing import parameterized\r\nimport numpy as np\r\nfrom PIL import Image\r\nimport six\r\n\r\nimport tensorflow.compat.v2 as tf\r\n\r\nfrom object_detection import exporter_lib_v2\r\nfrom object_detection.builders import model_builder\r\nfrom object_detection.core import model\r\nfrom object_detection.core import standard_fields as fields\r\nfrom object_detection.protos import pipeline_pb2\r\nfrom object_detection.utils import dataset_util\r\nfrom object_detection.utils import tf_version\r\n\r\nif six.PY2:\r\n import mock # pylint: disable=g-importing-member,g-import-not-at-top\r\nelse:\r\n from unittest import mock # pylint: disable=g-importing-member,g-import-not-at-top\r\n\r\n\r\nclass FakeModel(model.DetectionModel):\r\n\r\n def __init__(self, conv_weight_scalar=1.0):\r\n super(FakeModel, self).__init__(num_classes=2)\r\n self._conv = tf.keras.layers.Conv2D(\r\n filters=1, kernel_size=1, strides=(1, 1), padding='valid',\r\n kernel_initializer=tf.keras.initializers.Constant(\r\n value=conv_weight_scalar))\r\n\r\n def preprocess(self, inputs):\r\n return tf.identity(inputs), exporter_lib_v2.get_true_shapes(inputs)\r\n\r\n def predict(self, preprocessed_inputs, true_image_shapes, **side_inputs):\r\n return_dict = {'image': self._conv(preprocessed_inputs)}\r\n if 'side_inp_1' in side_inputs:\r\n return_dict['image'] += side_inputs['side_inp_1']\r\n return return_dict\r\n\r\n def postprocess(self, prediction_dict, true_image_shapes):\r\n predict_tensor_sum = tf.reduce_sum(prediction_dict['image'])\r\n with tf.control_dependencies(list(prediction_dict.values())):\r\n postprocessed_tensors = {\r\n 'detection_boxes': tf.constant([[[0.0, 0.0, 0.5, 0.5],\r\n [0.5, 0.5, 0.8, 0.8]],\r\n [[0.5, 0.5, 1.0, 1.0],\r\n [0.0, 0.0, 0.0, 0.0]]], tf.float32),\r\n 'detection_scores': predict_tensor_sum + tf.constant(\r\n [[0.7, 0.6], [0.9, 0.0]], tf.float32),\r\n 'detection_classes': tf.constant([[0, 1],\r\n [1, 0]], tf.float32),\r\n 'num_detections': tf.constant([2, 1], tf.float32),\r\n }\r\n return postprocessed_tensors\r\n\r\n def predict_masks_from_boxes(self, prediction_dict, true_image_shapes, boxes):\r\n output_dict = self.postprocess(prediction_dict, true_image_shapes)\r\n output_dict.update({\r\n 'detection_masks': tf.ones(shape=(1, 2, 16), dtype=tf.float32),\r\n })\r\n return output_dict\r\n\r\n def restore_map(self, checkpoint_path, fine_tune_checkpoint_type):\r\n pass\r\n\r\n def restore_from_objects(self, fine_tune_checkpoint_type):\r\n pass\r\n\r\n def loss(self, prediction_dict, true_image_shapes):\r\n pass\r\n\r\n def regularization_losses(self):\r\n pass\r\n\r\n def updates(self):\r\n pass\r\n\r\n\r\[email protected](tf_version.is_tf1(), 'Skipping TF2.X only test.')\r\nclass ExportInferenceGraphTest(tf.test.TestCase, parameterized.TestCase):\r\n\r\n def _save_checkpoint_from_mock_model(\r\n self, checkpoint_dir, conv_weight_scalar=6.0):\r\n mock_model = FakeModel(conv_weight_scalar)\r\n fake_image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32)\r\n preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image)\r\n predictions = mock_model.predict(preprocessed_inputs, true_image_shapes)\r\n mock_model.postprocess(predictions, true_image_shapes)\r\n\r\n ckpt = tf.train.Checkpoint(model=mock_model)\r\n exported_checkpoint_manager = tf.train.CheckpointManager(\r\n ckpt, checkpoint_dir, max_to_keep=1)\r\n exported_checkpoint_manager.save(checkpoint_number=0)\r\n\r\n @parameterized.parameters(\r\n {'input_type': 'image_tensor'},\r\n {'input_type': 'encoded_image_string_tensor'},\r\n {'input_type': 'tf_example'},\r\n )\r\n def test_export_yields_correct_directory_structure(\r\n self, input_type='image_tensor'):\r\n tmp_dir = self.get_temp_dir()\r\n self._save_checkpoint_from_mock_model(tmp_dir)\r\n with mock.patch.object(\r\n model_builder, 'build', autospec=True) as mock_builder:\r\n mock_builder.return_value = FakeModel()\r\n exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder\r\n output_directory = os.path.join(tmp_dir, 'output')\r\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\r\n exporter_lib_v2.export_inference_graph(\r\n input_type=input_type,\r\n pipeline_config=pipeline_config,\r\n trained_checkpoint_dir=tmp_dir,\r\n output_directory=output_directory)\r\n self.assertTrue(os.path.exists(os.path.join(\r\n output_directory, 'saved_model', 'saved_model.pb')))\r\n self.assertTrue(os.path.exists(os.path.join(\r\n output_directory, 'saved_model', 'variables', 'variables.index')))\r\n self.assertTrue(os.path.exists(os.path.join(\r\n output_directory, 'saved_model', 'variables',\r\n 'variables.data-00000-of-00001')))\r\n self.assertTrue(os.path.exists(os.path.join(\r\n output_directory, 'checkpoint', 'ckpt-0.index')))\r\n self.assertTrue(os.path.exists(os.path.join(\r\n output_directory, 'checkpoint', 'ckpt-0.data-00000-of-00001')))\r\n self.assertTrue(os.path.exists(os.path.join(\r\n output_directory, 'pipeline.config')))\r\n\r\n def get_dummy_input(self, input_type):\r\n \"\"\"Get dummy input for the given input type.\"\"\"\r\n\r\n if input_type == 'image_tensor':\r\n return np.zeros((1, 20, 20, 3), dtype=np.uint8)\r\n if input_type == 'float_image_tensor':\r\n return np.zeros((1, 20, 20, 3), dtype=np.float32)\r\n elif input_type == 'encoded_image_string_tensor':\r\n image = Image.new('RGB', (20, 20))\r\n byte_io = io.BytesIO()\r\n image.save(byte_io, 'PNG')\r\n return [byte_io.getvalue()]\r\n elif input_type == 'tf_example':\r\n image_tensor = tf.zeros((20, 20, 3), dtype=tf.uint8)\r\n encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).numpy()\r\n example = tf.train.Example(\r\n features=tf.train.Features(\r\n feature={\r\n 'image/encoded':\r\n dataset_util.bytes_feature(encoded_jpeg),\r\n 'image/format':\r\n dataset_util.bytes_feature(six.b('jpeg')),\r\n 'image/source_id':\r\n dataset_util.bytes_feature(six.b('image_id')),\r\n })).SerializeToString()\r\n return [example]\r\n\r\n @parameterized.parameters(\r\n {'input_type': 'image_tensor'},\r\n {'input_type': 'encoded_image_string_tensor'},\r\n {'input_type': 'tf_example'},\r\n {'input_type': 'float_image_tensor'},\r\n )\r\n def test_export_saved_model_and_run_inference(\r\n self, input_type='image_tensor'):\r\n tmp_dir = self.get_temp_dir()\r\n self._save_checkpoint_from_mock_model(tmp_dir)\r\n with mock.patch.object(\r\n model_builder, 'build', autospec=True) as mock_builder:\r\n mock_builder.return_value = FakeModel()\r\n exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder\r\n output_directory = os.path.join(tmp_dir, 'output')\r\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\r\n exporter_lib_v2.export_inference_graph(\r\n input_type=input_type,\r\n pipeline_config=pipeline_config,\r\n trained_checkpoint_dir=tmp_dir,\r\n output_directory=output_directory)\r\n\r\n saved_model_path = os.path.join(output_directory, 'saved_model')\r\n detect_fn = tf.saved_model.load(saved_model_path)\r\n image = self.get_dummy_input(input_type)\r\n detections = detect_fn(tf.constant(image))\r\n\r\n detection_fields = fields.DetectionResultFields\r\n self.assertAllClose(detections[detection_fields.detection_boxes],\r\n [[[0.0, 0.0, 0.5, 0.5],\r\n [0.5, 0.5, 0.8, 0.8]],\r\n [[0.5, 0.5, 1.0, 1.0],\r\n [0.0, 0.0, 0.0, 0.0]]])\r\n self.assertAllClose(detections[detection_fields.detection_scores],\r\n [[0.7, 0.6], [0.9, 0.0]])\r\n self.assertAllClose(detections[detection_fields.detection_classes],\r\n [[1, 2], [2, 1]])\r\n self.assertAllClose(detections[detection_fields.num_detections], [2, 1])\r\n\r\n @parameterized.parameters(\r\n {'use_default_serving': True},\r\n {'use_default_serving': False}\r\n )\r\n def test_export_saved_model_and_run_inference_with_side_inputs(\r\n self, input_type='image_tensor', use_default_serving=True):\r\n tmp_dir = self.get_temp_dir()\r\n self._save_checkpoint_from_mock_model(tmp_dir)\r\n with mock.patch.object(\r\n model_builder, 'build', autospec=True) as mock_builder:\r\n mock_builder.return_value = FakeModel()\r\n exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder\r\n output_directory = os.path.join(tmp_dir, 'output')\r\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\r\n exporter_lib_v2.export_inference_graph(\r\n input_type=input_type,\r\n pipeline_config=pipeline_config,\r\n trained_checkpoint_dir=tmp_dir,\r\n output_directory=output_directory,\r\n use_side_inputs=True,\r\n side_input_shapes='1/2,2',\r\n side_input_names='side_inp_1,side_inp_2',\r\n side_input_types='tf.float32,tf.uint8')\r\n\r\n saved_model_path = os.path.join(output_directory, 'saved_model')\r\n detect_fn = tf.saved_model.load(saved_model_path)\r\n detect_fn_sig = detect_fn.signatures['serving_default']\r\n image = tf.constant(self.get_dummy_input(input_type))\r\n side_input_1 = np.ones((1,), dtype=np.float32)\r\n side_input_2 = np.ones((2, 2), dtype=np.uint8)\r\n if use_default_serving:\r\n detections = detect_fn_sig(input_tensor=image,\r\n side_inp_1=tf.constant(side_input_1),\r\n side_inp_2=tf.constant(side_input_2))\r\n else:\r\n detections = detect_fn(image,\r\n tf.constant(side_input_1),\r\n tf.constant(side_input_2))\r\n\r\n detection_fields = fields.DetectionResultFields\r\n self.assertAllClose(detections[detection_fields.detection_boxes],\r\n [[[0.0, 0.0, 0.5, 0.5],\r\n [0.5, 0.5, 0.8, 0.8]],\r\n [[0.5, 0.5, 1.0, 1.0],\r\n [0.0, 0.0, 0.0, 0.0]]])\r\n self.assertAllClose(detections[detection_fields.detection_scores],\r\n [[400.7, 400.6], [400.9, 400.0]])\r\n self.assertAllClose(detections[detection_fields.detection_classes],\r\n [[1, 2], [2, 1]])\r\n self.assertAllClose(detections[detection_fields.num_detections], [2, 1])\r\n\r\n def test_export_checkpoint_and_run_inference_with_image(self):\r\n tmp_dir = self.get_temp_dir()\r\n self._save_checkpoint_from_mock_model(tmp_dir, conv_weight_scalar=2.0)\r\n with mock.patch.object(\r\n model_builder, 'build', autospec=True) as mock_builder:\r\n mock_builder.return_value = FakeModel()\r\n exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder\r\n output_directory = os.path.join(tmp_dir, 'output')\r\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\r\n exporter_lib_v2.export_inference_graph(\r\n input_type='image_tensor',\r\n pipeline_config=pipeline_config,\r\n trained_checkpoint_dir=tmp_dir,\r\n output_directory=output_directory)\r\n\r\n mock_model = FakeModel()\r\n ckpt = tf.compat.v2.train.Checkpoint(\r\n model=mock_model)\r\n checkpoint_dir = os.path.join(tmp_dir, 'output', 'checkpoint')\r\n manager = tf.compat.v2.train.CheckpointManager(\r\n ckpt, checkpoint_dir, max_to_keep=7)\r\n ckpt.restore(manager.latest_checkpoint).expect_partial()\r\n\r\n fake_image = tf.ones(shape=[1, 5, 5, 3], dtype=tf.float32)\r\n preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image)\r\n predictions = mock_model.predict(preprocessed_inputs, true_image_shapes)\r\n detections = mock_model.postprocess(predictions, true_image_shapes)\r\n\r\n # 150 = conv_weight_scalar * height * width * channels = 2 * 5 * 5 * 3.\r\n self.assertAllClose(detections['detection_scores'],\r\n [[150 + 0.7, 150 + 0.6], [150 + 0.9, 150 + 0.0]])\r\n\r\n\r\nclass DetectionFromImageAndBoxModuleTest(tf.test.TestCase):\r\n\r\n def get_dummy_input(self, input_type):\r\n \"\"\"Get dummy input for the given input type.\"\"\"\r\n\r\n if input_type == 'image_tensor' or input_type == 'image_and_boxes_tensor':\r\n return np.zeros((1, 20, 20, 3), dtype=np.uint8)\r\n if input_type == 'float_image_tensor':\r\n return np.zeros((1, 20, 20, 3), dtype=np.float32)\r\n elif input_type == 'encoded_image_string_tensor':\r\n image = Image.new('RGB', (20, 20))\r\n byte_io = io.BytesIO()\r\n image.save(byte_io, 'PNG')\r\n return [byte_io.getvalue()]\r\n elif input_type == 'tf_example':\r\n image_tensor = tf.zeros((20, 20, 3), dtype=tf.uint8)\r\n encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).numpy()\r\n example = tf.train.Example(\r\n features=tf.train.Features(\r\n feature={\r\n 'image/encoded':\r\n dataset_util.bytes_feature(encoded_jpeg),\r\n 'image/format':\r\n dataset_util.bytes_feature(six.b('jpeg')),\r\n 'image/source_id':\r\n dataset_util.bytes_feature(six.b('image_id')),\r\n })).SerializeToString()\r\n return [example]\r\n\r\n def _save_checkpoint_from_mock_model(self,\r\n checkpoint_dir,\r\n conv_weight_scalar=6.0):\r\n mock_model = FakeModel(conv_weight_scalar)\r\n fake_image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32)\r\n preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image)\r\n predictions = mock_model.predict(preprocessed_inputs, true_image_shapes)\r\n mock_model.postprocess(predictions, true_image_shapes)\r\n\r\n ckpt = tf.train.Checkpoint(model=mock_model)\r\n exported_checkpoint_manager = tf.train.CheckpointManager(\r\n ckpt, checkpoint_dir, max_to_keep=1)\r\n exported_checkpoint_manager.save(checkpoint_number=0)\r\n\r\n def test_export_saved_model_and_run_inference_for_segmentation(\r\n self, input_type='image_and_boxes_tensor'):\r\n tmp_dir = self.get_temp_dir()\r\n self._save_checkpoint_from_mock_model(tmp_dir)\r\n\r\n with mock.patch.object(\r\n model_builder, 'build', autospec=True) as mock_builder:\r\n mock_builder.return_value = FakeModel()\r\n exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder\r\n output_directory = os.path.join(tmp_dir, 'output')\r\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\r\n exporter_lib_v2.export_inference_graph(\r\n input_type=input_type,\r\n pipeline_config=pipeline_config,\r\n trained_checkpoint_dir=tmp_dir,\r\n output_directory=output_directory)\r\n\r\n saved_model_path = os.path.join(output_directory, 'saved_model')\r\n detect_fn = tf.saved_model.load(saved_model_path)\r\n image = self.get_dummy_input(input_type)\r\n boxes = tf.constant([\r\n [\r\n [0.0, 0.0, 0.5, 0.5],\r\n [0.5, 0.5, 0.8, 0.8],\r\n ],\r\n ])\r\n detections = detect_fn(tf.constant(image), boxes)\r\n\r\n detection_fields = fields.DetectionResultFields\r\n self.assertIn(detection_fields.detection_masks, detections)\r\n self.assertListEqual(\r\n list(detections[detection_fields.detection_masks].shape), [1, 2, 16])\r\n\r\n\r\nif __name__ == '__main__':\r\n tf.enable_v2_behavior()\r\n tf.test.main()\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"Tests for export_inference_graph.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport os\r\n\r\n\r\nimport tensorflow.compat.v1 as tf\r\n\r\nfrom tensorflow.python.platform import gfile\r\nimport export_inference_graph\r\n\r\n\r\nclass ExportInferenceGraphTest(tf.test.TestCase):\r\n\r\n def testExportInferenceGraph(self):\r\n tmpdir = self.get_temp_dir()\r\n output_file = os.path.join(tmpdir, 'inception_v3.pb')\r\n flags = tf.app.flags.FLAGS\r\n flags.output_file = output_file\r\n flags.model_name = 'inception_v3'\r\n flags.dataset_dir = tmpdir\r\n export_inference_graph.main(None)\r\n self.assertTrue(gfile.Exists(output_file))\r\n\r\nif __name__ == '__main__':\r\n tf.test.main()\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# =============================================================================\r\n\"\"\"MobileNet v1.\r\n\r\nMobileNet is a general architecture and can be used for multiple use cases.\r\nDepending on the use case, it can use different input layer size and different\r\nhead (for example: embeddings, localization and classification).\r\n\r\nAs described in https://arxiv.org/abs/1704.04861.\r\n\r\n MobileNets: Efficient Convolutional Neural Networks for\r\n Mobile Vision Applications\r\n Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang,\r\n Tobias Weyand, Marco Andreetto, Hartwig Adam\r\n\r\n100% Mobilenet V1 (base) with input size 224x224:\r\n\r\nSee mobilenet_v1()\r\n\r\nLayer params macs\r\n--------------------------------------------------------------------------------\r\nMobilenetV1/Conv2d_0/Conv2D: 864 10,838,016\r\nMobilenetV1/Conv2d_1_depthwise/depthwise: 288 3,612,672\r\nMobilenetV1/Conv2d_1_pointwise/Conv2D: 2,048 25,690,112\r\nMobilenetV1/Conv2d_2_depthwise/depthwise: 576 1,806,336\r\nMobilenetV1/Conv2d_2_pointwise/Conv2D: 8,192 25,690,112\r\nMobilenetV1/Conv2d_3_depthwise/depthwise: 1,152 3,612,672\r\nMobilenetV1/Conv2d_3_pointwise/Conv2D: 16,384 51,380,224\r\nMobilenetV1/Conv2d_4_depthwise/depthwise: 1,152 903,168\r\nMobilenetV1/Conv2d_4_pointwise/Conv2D: 32,768 25,690,112\r\nMobilenetV1/Conv2d_5_depthwise/depthwise: 2,304 1,806,336\r\nMobilenetV1/Conv2d_5_pointwise/Conv2D: 65,536 51,380,224\r\nMobilenetV1/Conv2d_6_depthwise/depthwise: 2,304 451,584\r\nMobilenetV1/Conv2d_6_pointwise/Conv2D: 131,072 25,690,112\r\nMobilenetV1/Conv2d_7_depthwise/depthwise: 4,608 903,168\r\nMobilenetV1/Conv2d_7_pointwise/Conv2D: 262,144 51,380,224\r\nMobilenetV1/Conv2d_8_depthwise/depthwise: 4,608 903,168\r\nMobilenetV1/Conv2d_8_pointwise/Conv2D: 262,144 51,380,224\r\nMobilenetV1/Conv2d_9_depthwise/depthwise: 4,608 903,168\r\nMobilenetV1/Conv2d_9_pointwise/Conv2D: 262,144 51,380,224\r\nMobilenetV1/Conv2d_10_depthwise/depthwise: 4,608 903,168\r\nMobilenetV1/Conv2d_10_pointwise/Conv2D: 262,144 51,380,224\r\nMobilenetV1/Conv2d_11_depthwise/depthwise: 4,608 903,168\r\nMobilenetV1/Conv2d_11_pointwise/Conv2D: 262,144 51,380,224\r\nMobilenetV1/Conv2d_12_depthwise/depthwise: 4,608 225,792\r\nMobilenetV1/Conv2d_12_pointwise/Conv2D: 524,288 25,690,112\r\nMobilenetV1/Conv2d_13_depthwise/depthwise: 9,216 451,584\r\nMobilenetV1/Conv2d_13_pointwise/Conv2D: 1,048,576 51,380,224\r\n--------------------------------------------------------------------------------\r\nTotal: 3,185,088 567,716,352\r\n\r\n\r\n75% Mobilenet V1 (base) with input size 128x128:\r\n\r\nSee mobilenet_v1_075()\r\n\r\nLayer params macs\r\n--------------------------------------------------------------------------------\r\nMobilenetV1/Conv2d_0/Conv2D: 648 2,654,208\r\nMobilenetV1/Conv2d_1_depthwise/depthwise: 216 884,736\r\nMobilenetV1/Conv2d_1_pointwise/Conv2D: 1,152 4,718,592\r\nMobilenetV1/Conv2d_2_depthwise/depthwise: 432 442,368\r\nMobilenetV1/Conv2d_2_pointwise/Conv2D: 4,608 4,718,592\r\nMobilenetV1/Conv2d_3_depthwise/depthwise: 864 884,736\r\nMobilenetV1/Conv2d_3_pointwise/Conv2D: 9,216 9,437,184\r\nMobilenetV1/Conv2d_4_depthwise/depthwise: 864 221,184\r\nMobilenetV1/Conv2d_4_pointwise/Conv2D: 18,432 4,718,592\r\nMobilenetV1/Conv2d_5_depthwise/depthwise: 1,728 442,368\r\nMobilenetV1/Conv2d_5_pointwise/Conv2D: 36,864 9,437,184\r\nMobilenetV1/Conv2d_6_depthwise/depthwise: 1,728 110,592\r\nMobilenetV1/Conv2d_6_pointwise/Conv2D: 73,728 4,718,592\r\nMobilenetV1/Conv2d_7_depthwise/depthwise: 3,456 221,184\r\nMobilenetV1/Conv2d_7_pointwise/Conv2D: 147,456 9,437,184\r\nMobilenetV1/Conv2d_8_depthwise/depthwise: 3,456 221,184\r\nMobilenetV1/Conv2d_8_pointwise/Conv2D: 147,456 9,437,184\r\nMobilenetV1/Conv2d_9_depthwise/depthwise: 3,456 221,184\r\nMobilenetV1/Conv2d_9_pointwise/Conv2D: 147,456 9,437,184\r\nMobilenetV1/Conv2d_10_depthwise/depthwise: 3,456 221,184\r\nMobilenetV1/Conv2d_10_pointwise/Conv2D: 147,456 9,437,184\r\nMobilenetV1/Conv2d_11_depthwise/depthwise: 3,456 221,184\r\nMobilenetV1/Conv2d_11_pointwise/Conv2D: 147,456 9,437,184\r\nMobilenetV1/Conv2d_12_depthwise/depthwise: 3,456 55,296\r\nMobilenetV1/Conv2d_12_pointwise/Conv2D: 294,912 4,718,592\r\nMobilenetV1/Conv2d_13_depthwise/depthwise: 6,912 110,592\r\nMobilenetV1/Conv2d_13_pointwise/Conv2D: 589,824 9,437,184\r\n--------------------------------------------------------------------------------\r\nTotal: 1,800,144 106,002,432\r\n\r\n\"\"\"\r\n\r\n# Tensorflow mandates these.\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom collections import namedtuple\r\nimport functools\r\n\r\nimport tensorflow.compat.v1 as tf\r\nimport tf_slim as slim\r\n\r\n# Conv and DepthSepConv namedtuple define layers of the MobileNet architecture\r\n# Conv defines 3x3 convolution layers\r\n# DepthSepConv defines 3x3 depthwise convolution followed by 1x1 convolution.\r\n# stride is the stride of the convolution\r\n# depth is the number of channels or filters in a layer\r\nConv = namedtuple('Conv', ['kernel', 'stride', 'depth'])\r\nDepthSepConv = namedtuple('DepthSepConv', ['kernel', 'stride', 'depth'])\r\n\r\n# MOBILENETV1_CONV_DEFS specifies the MobileNet body\r\nMOBILENETV1_CONV_DEFS = [\r\n Conv(kernel=[3, 3], stride=2, depth=32),\r\n DepthSepConv(kernel=[3, 3], stride=1, depth=64),\r\n DepthSepConv(kernel=[3, 3], stride=2, depth=128),\r\n DepthSepConv(kernel=[3, 3], stride=1, depth=128),\r\n DepthSepConv(kernel=[3, 3], stride=2, depth=256),\r\n DepthSepConv(kernel=[3, 3], stride=1, depth=256),\r\n DepthSepConv(kernel=[3, 3], stride=2, depth=512),\r\n DepthSepConv(kernel=[3, 3], stride=1, depth=512),\r\n DepthSepConv(kernel=[3, 3], stride=1, depth=512),\r\n DepthSepConv(kernel=[3, 3], stride=1, depth=512),\r\n DepthSepConv(kernel=[3, 3], stride=1, depth=512),\r\n DepthSepConv(kernel=[3, 3], stride=1, depth=512),\r\n DepthSepConv(kernel=[3, 3], stride=2, depth=1024),\r\n DepthSepConv(kernel=[3, 3], stride=1, depth=1024)\r\n]\r\n\r\n\r\ndef _fixed_padding(inputs, kernel_size, rate=1):\r\n \"\"\"Pads the input along the spatial dimensions independently of input size.\r\n\r\n Pads the input such that if it was used in a convolution with 'VALID' padding,\r\n the output would have the same dimensions as if the unpadded input was used\r\n in a convolution with 'SAME' padding.\r\n\r\n Args:\r\n inputs: A tensor of size [batch, height_in, width_in, channels].\r\n kernel_size: The kernel to be used in the conv2d or max_pool2d operation.\r\n rate: An integer, rate for atrous convolution.\r\n\r\n Returns:\r\n output: A tensor of size [batch, height_out, width_out, channels] with the\r\n input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).\r\n \"\"\"\r\n kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),\r\n kernel_size[1] + (kernel_size[1] - 1) * (rate - 1)]\r\n pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]\r\n pad_beg = [pad_total[0] // 2, pad_total[1] // 2]\r\n pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]\r\n padded_inputs = tf.pad(\r\n tensor=inputs,\r\n paddings=[[0, 0], [pad_beg[0], pad_end[0]], [pad_beg[1], pad_end[1]],\r\n [0, 0]])\r\n return padded_inputs\r\n\r\n\r\ndef mobilenet_v1_base(inputs,\r\n final_endpoint='Conv2d_13_pointwise',\r\n min_depth=8,\r\n depth_multiplier=1.0,\r\n conv_defs=None,\r\n output_stride=None,\r\n use_explicit_padding=False,\r\n scope=None):\r\n \"\"\"Mobilenet v1.\r\n\r\n Constructs a Mobilenet v1 network from inputs to the given final endpoint.\r\n\r\n Args:\r\n inputs: a tensor of shape [batch_size, height, width, channels].\r\n final_endpoint: specifies the endpoint to construct the network up to. It\r\n can be one of ['Conv2d_0', 'Conv2d_1_pointwise', 'Conv2d_2_pointwise',\r\n 'Conv2d_3_pointwise', 'Conv2d_4_pointwise', 'Conv2d_5'_pointwise,\r\n 'Conv2d_6_pointwise', 'Conv2d_7_pointwise', 'Conv2d_8_pointwise',\r\n 'Conv2d_9_pointwise', 'Conv2d_10_pointwise', 'Conv2d_11_pointwise',\r\n 'Conv2d_12_pointwise', 'Conv2d_13_pointwise'].\r\n min_depth: Minimum depth value (number of channels) for all convolution ops.\r\n Enforced when depth_multiplier < 1, and not an active constraint when\r\n depth_multiplier >= 1.\r\n depth_multiplier: Float multiplier for the depth (number of channels)\r\n for all convolution ops. The value must be greater than zero. Typical\r\n usage will be to set this value in (0, 1) to reduce the number of\r\n parameters or computation cost of the model.\r\n conv_defs: A list of ConvDef namedtuples specifying the net architecture.\r\n output_stride: An integer that specifies the requested ratio of input to\r\n output spatial resolution. If not None, then we invoke atrous convolution\r\n if necessary to prevent the network from reducing the spatial resolution\r\n of the activation maps. Allowed values are 8 (accurate fully convolutional\r\n mode), 16 (fast fully convolutional mode), 32 (classification mode).\r\n use_explicit_padding: Use 'VALID' padding for convolutions, but prepad\r\n inputs so that the output dimensions are the same as if 'SAME' padding\r\n were used.\r\n scope: Optional variable_scope.\r\n\r\n Returns:\r\n tensor_out: output tensor corresponding to the final_endpoint.\r\n end_points: a set of activations for external use, for example summaries or\r\n losses.\r\n\r\n Raises:\r\n ValueError: if final_endpoint is not set to one of the predefined values,\r\n or depth_multiplier <= 0, or the target output_stride is not\r\n allowed.\r\n \"\"\"\r\n depth = lambda d: max(int(d * depth_multiplier), min_depth)\r\n end_points = {}\r\n\r\n # Used to find thinned depths for each layer.\r\n if depth_multiplier <= 0:\r\n raise ValueError('depth_multiplier is not greater than zero.')\r\n\r\n if conv_defs is None:\r\n conv_defs = MOBILENETV1_CONV_DEFS\r\n\r\n if output_stride is not None and output_stride not in [8, 16, 32]:\r\n raise ValueError('Only allowed output_stride values are 8, 16, 32.')\r\n\r\n padding = 'SAME'\r\n if use_explicit_padding:\r\n padding = 'VALID'\r\n with tf.variable_scope(scope, 'MobilenetV1', [inputs]):\r\n with slim.arg_scope([slim.conv2d, slim.separable_conv2d], padding=padding):\r\n # The current_stride variable keeps track of the output stride of the\r\n # activations, i.e., the running product of convolution strides up to the\r\n # current network layer. This allows us to invoke atrous convolution\r\n # whenever applying the next convolution would result in the activations\r\n # having output stride larger than the target output_stride.\r\n current_stride = 1\r\n\r\n # The atrous convolution rate parameter.\r\n rate = 1\r\n\r\n net = inputs\r\n for i, conv_def in enumerate(conv_defs):\r\n end_point_base = 'Conv2d_%d' % i\r\n\r\n if output_stride is not None and current_stride == output_stride:\r\n # If we have reached the target output_stride, then we need to employ\r\n # atrous convolution with stride=1 and multiply the atrous rate by the\r\n # current unit's stride for use in subsequent layers.\r\n layer_stride = 1\r\n layer_rate = rate\r\n rate *= conv_def.stride\r\n else:\r\n layer_stride = conv_def.stride\r\n layer_rate = 1\r\n current_stride *= conv_def.stride\r\n\r\n if isinstance(conv_def, Conv):\r\n end_point = end_point_base\r\n if use_explicit_padding:\r\n net = _fixed_padding(net, conv_def.kernel)\r\n net = slim.conv2d(net, depth(conv_def.depth), conv_def.kernel,\r\n stride=conv_def.stride,\r\n scope=end_point)\r\n end_points[end_point] = net\r\n if end_point == final_endpoint:\r\n return net, end_points\r\n\r\n elif isinstance(conv_def, DepthSepConv):\r\n end_point = end_point_base + '_depthwise'\r\n\r\n # By passing filters=None\r\n # separable_conv2d produces only a depthwise convolution layer\r\n if use_explicit_padding:\r\n net = _fixed_padding(net, conv_def.kernel, layer_rate)\r\n net = slim.separable_conv2d(net, None, conv_def.kernel,\r\n depth_multiplier=1,\r\n stride=layer_stride,\r\n rate=layer_rate,\r\n scope=end_point)\r\n\r\n end_points[end_point] = net\r\n if end_point == final_endpoint:\r\n return net, end_points\r\n\r\n end_point = end_point_base + '_pointwise'\r\n\r\n net = slim.conv2d(net, depth(conv_def.depth), [1, 1],\r\n stride=1,\r\n scope=end_point)\r\n\r\n end_points[end_point] = net\r\n if end_point == final_endpoint:\r\n return net, end_points\r\n else:\r\n raise ValueError('Unknown convolution type %s for layer %d'\r\n % (conv_def.ltype, i))\r\n raise ValueError('Unknown final endpoint %s' % final_endpoint)\r\n\r\n\r\ndef mobilenet_v1(inputs,\r\n num_classes=1000,\r\n dropout_keep_prob=0.999,\r\n is_training=True,\r\n min_depth=8,\r\n depth_multiplier=1.0,\r\n conv_defs=None,\r\n prediction_fn=slim.softmax,\r\n spatial_squeeze=True,\r\n reuse=None,\r\n scope='MobilenetV1',\r\n global_pool=False):\r\n \"\"\"Mobilenet v1 model for classification.\r\n\r\n Args:\r\n inputs: a tensor of shape [batch_size, height, width, channels].\r\n num_classes: number of predicted classes. If 0 or None, the logits layer\r\n is omitted and the input features to the logits layer (before dropout)\r\n are returned instead.\r\n dropout_keep_prob: the percentage of activation values that are retained.\r\n is_training: whether is training or not.\r\n min_depth: Minimum depth value (number of channels) for all convolution ops.\r\n Enforced when depth_multiplier < 1, and not an active constraint when\r\n depth_multiplier >= 1.\r\n depth_multiplier: Float multiplier for the depth (number of channels)\r\n for all convolution ops. The value must be greater than zero. Typical\r\n usage will be to set this value in (0, 1) to reduce the number of\r\n parameters or computation cost of the model.\r\n conv_defs: A list of ConvDef namedtuples specifying the net architecture.\r\n prediction_fn: a function to get predictions out of logits.\r\n spatial_squeeze: if True, logits is of shape is [B, C], if false logits is\r\n of shape [B, 1, 1, C], where B is batch_size and C is number of classes.\r\n reuse: whether or not the network and its variables should be reused. To be\r\n able to reuse 'scope' must be given.\r\n scope: Optional variable_scope.\r\n global_pool: Optional boolean flag to control the avgpooling before the\r\n logits layer. If false or unset, pooling is done with a fixed window\r\n that reduces default-sized inputs to 1x1, while larger inputs lead to\r\n larger outputs. If true, any input size is pooled down to 1x1.\r\n\r\n Returns:\r\n net: a 2D Tensor with the logits (pre-softmax activations) if num_classes\r\n is a non-zero integer, or the non-dropped-out input to the logits layer\r\n if num_classes is 0 or None.\r\n end_points: a dictionary from components of the network to the corresponding\r\n activation.\r\n\r\n Raises:\r\n ValueError: Input rank is invalid.\r\n \"\"\"\r\n input_shape = inputs.get_shape().as_list()\r\n if len(input_shape) != 4:\r\n raise ValueError('Invalid input tensor rank, expected 4, was: %d' %\r\n len(input_shape))\r\n\r\n with tf.variable_scope(\r\n scope, 'MobilenetV1', [inputs], reuse=reuse) as scope:\r\n with slim.arg_scope([slim.batch_norm, slim.dropout],\r\n is_training=is_training):\r\n net, end_points = mobilenet_v1_base(inputs, scope=scope,\r\n min_depth=min_depth,\r\n depth_multiplier=depth_multiplier,\r\n conv_defs=conv_defs)\r\n with tf.variable_scope('Logits'):\r\n if global_pool:\r\n # Global average pooling.\r\n net = tf.reduce_mean(\r\n input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')\r\n end_points['global_pool'] = net\r\n else:\r\n # Pooling with a fixed kernel size.\r\n kernel_size = _reduced_kernel_size_for_small_input(net, [7, 7])\r\n net = slim.avg_pool2d(net, kernel_size, padding='VALID',\r\n scope='AvgPool_1a')\r\n end_points['AvgPool_1a'] = net\r\n if not num_classes:\r\n return net, end_points\r\n # 1 x 1 x 1024\r\n net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')\r\n logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,\r\n normalizer_fn=None, scope='Conv2d_1c_1x1')\r\n if spatial_squeeze:\r\n logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')\r\n end_points['Logits'] = logits\r\n if prediction_fn:\r\n end_points['Predictions'] = prediction_fn(logits, scope='Predictions')\r\n return logits, end_points\r\n\r\nmobilenet_v1.default_image_size = 224\r\n\r\n\r\ndef wrapped_partial(func, *args, **kwargs):\r\n partial_func = functools.partial(func, *args, **kwargs)\r\n functools.update_wrapper(partial_func, func)\r\n return partial_func\r\n\r\n\r\nmobilenet_v1_075 = wrapped_partial(mobilenet_v1, depth_multiplier=0.75)\r\nmobilenet_v1_050 = wrapped_partial(mobilenet_v1, depth_multiplier=0.50)\r\nmobilenet_v1_025 = wrapped_partial(mobilenet_v1, depth_multiplier=0.25)\r\n\r\n\r\ndef _reduced_kernel_size_for_small_input(input_tensor, kernel_size):\r\n \"\"\"Define kernel size which is automatically reduced for small input.\r\n\r\n If the shape of the input images is unknown at graph construction time this\r\n function assumes that the input images are large enough.\r\n\r\n Args:\r\n input_tensor: input tensor of size [batch_size, height, width, channels].\r\n kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]\r\n\r\n Returns:\r\n a tensor with the kernel size.\r\n \"\"\"\r\n shape = input_tensor.get_shape().as_list()\r\n if shape[1] is None or shape[2] is None:\r\n kernel_size_out = kernel_size\r\n else:\r\n kernel_size_out = [min(shape[1], kernel_size[0]),\r\n min(shape[2], kernel_size[1])]\r\n return kernel_size_out\r\n\r\n\r\ndef mobilenet_v1_arg_scope(\r\n is_training=True,\r\n weight_decay=0.00004,\r\n stddev=0.09,\r\n regularize_depthwise=False,\r\n batch_norm_decay=0.9997,\r\n batch_norm_epsilon=0.001,\r\n batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS,\r\n normalizer_fn=slim.batch_norm):\r\n \"\"\"Defines the default MobilenetV1 arg scope.\r\n\r\n Args:\r\n is_training: Whether or not we're training the model. If this is set to\r\n None, the parameter is not added to the batch_norm arg_scope.\r\n weight_decay: The weight decay to use for regularizing the model.\r\n stddev: The standard deviation of the trunctated normal weight initializer.\r\n regularize_depthwise: Whether or not apply regularization on depthwise.\r\n batch_norm_decay: Decay for batch norm moving average.\r\n batch_norm_epsilon: Small float added to variance to avoid dividing by zero\r\n in batch norm.\r\n batch_norm_updates_collections: Collection for the update ops for\r\n batch norm.\r\n normalizer_fn: Normalization function to apply after convolution.\r\n\r\n Returns:\r\n An `arg_scope` to use for the mobilenet v1 model.\r\n \"\"\"\r\n batch_norm_params = {\r\n 'center': True,\r\n 'scale': True,\r\n 'decay': batch_norm_decay,\r\n 'epsilon': batch_norm_epsilon,\r\n 'updates_collections': batch_norm_updates_collections,\r\n }\r\n if is_training is not None:\r\n batch_norm_params['is_training'] = is_training\r\n\r\n # Set weight_decay for weights in Conv and DepthSepConv layers.\r\n weights_init = tf.truncated_normal_initializer(stddev=stddev)\r\n regularizer = slim.l2_regularizer(weight_decay)\r\n if regularize_depthwise:\r\n depthwise_regularizer = regularizer\r\n else:\r\n depthwise_regularizer = None\r\n with slim.arg_scope([slim.conv2d, slim.separable_conv2d],\r\n weights_initializer=weights_init,\r\n activation_fn=tf.nn.relu6, normalizer_fn=normalizer_fn):\r\n with slim.arg_scope([slim.batch_norm], **batch_norm_params):\r\n with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):\r\n with slim.arg_scope([slim.separable_conv2d],\r\n weights_regularizer=depthwise_regularizer) as sc:\r\n return sc\r\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n# Lint as: python3\r\n\"\"\"Tests for FPN.\"\"\"\r\n\r\n# Import libraries\r\nfrom absl.testing import parameterized\r\nimport tensorflow as tf\r\n\r\nfrom official.vision.beta.modeling.backbones import mobilenet\r\nfrom official.vision.beta.modeling.backbones import resnet\r\nfrom official.vision.beta.modeling.decoders import fpn\r\n\r\n\r\nclass FPNTest(parameterized.TestCase, tf.test.TestCase):\r\n\r\n @parameterized.parameters(\r\n (256, 3, 7, False),\r\n (256, 3, 7, True),\r\n )\r\n def test_network_creation(self, input_size, min_level, max_level,\r\n use_separable_conv):\r\n \"\"\"Test creation of FPN.\"\"\"\r\n tf.keras.backend.set_image_data_format('channels_last')\r\n\r\n inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)\r\n\r\n backbone = resnet.ResNet(model_id=50)\r\n network = fpn.FPN(\r\n input_specs=backbone.output_specs,\r\n min_level=min_level,\r\n max_level=max_level,\r\n use_separable_conv=use_separable_conv)\r\n\r\n endpoints = backbone(inputs)\r\n feats = network(endpoints)\r\n\r\n for level in range(min_level, max_level + 1):\r\n self.assertIn(str(level), feats)\r\n self.assertAllEqual(\r\n [1, input_size // 2**level, input_size // 2**level, 256],\r\n feats[str(level)].shape.as_list())\r\n\r\n @parameterized.parameters(\r\n (256, 3, 7, False),\r\n (256, 3, 7, True),\r\n )\r\n def test_network_creation_with_mobilenet(self, input_size, min_level,\r\n max_level, use_separable_conv):\r\n \"\"\"Test creation of FPN with mobilenet backbone.\"\"\"\r\n tf.keras.backend.set_image_data_format('channels_last')\r\n\r\n inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)\r\n\r\n backbone = mobilenet.MobileNet(model_id='MobileNetV2')\r\n network = fpn.FPN(\r\n input_specs=backbone.output_specs,\r\n min_level=min_level,\r\n max_level=max_level,\r\n use_separable_conv=use_separable_conv)\r\n\r\n endpoints = backbone(inputs)\r\n feats = network(endpoints)\r\n\r\n for level in range(min_level, max_level + 1):\r\n self.assertIn(str(level), feats)\r\n self.assertAllEqual(\r\n [1, input_size // 2**level, input_size // 2**level, 256],\r\n feats[str(level)].shape.as_list())\r\n\r\n def test_serialize_deserialize(self):\r\n # Create a network object that sets all of its config options.\r\n kwargs = dict(\r\n input_specs=resnet.ResNet(model_id=50).output_specs,\r\n min_level=3,\r\n max_level=7,\r\n num_filters=256,\r\n use_separable_conv=False,\r\n use_sync_bn=False,\r\n activation='relu',\r\n norm_momentum=0.99,\r\n norm_epsilon=0.001,\r\n kernel_initializer='VarianceScaling',\r\n kernel_regularizer=None,\r\n bias_regularizer=None,\r\n )\r\n network = fpn.FPN(**kwargs)\r\n\r\n expected_config = dict(kwargs)\r\n self.assertEqual(network.get_config(), expected_config)\r\n\r\n # Create another network object from the first object's config.\r\n new_network = fpn.FPN.from_config(network.get_config())\r\n\r\n # Validate that the config can be forced to JSON.\r\n _ = new_network.to_json()\r\n\r\n # If the serialization was successful, the new config should match the old.\r\n self.assertAllEqual(network.get_config(), new_network.get_config())\r\n\r\n\r\nif __name__ == '__main__':\r\n tf.test.main()\r\n", "# Copyright 2018 The TensorFlow Authors All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"Data utils for CIFAR-10 and CIFAR-100.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport copy\r\nimport cPickle\r\nimport os\r\nimport augmentation_transforms\r\nimport numpy as np\r\nimport policies as found_policies\r\nimport tensorflow as tf\r\n\r\n\r\n# pylint:disable=logging-format-interpolation\r\n\r\n\r\nclass DataSet(object):\r\n \"\"\"Dataset object that produces augmented training and eval data.\"\"\"\r\n\r\n def __init__(self, hparams):\r\n self.hparams = hparams\r\n self.epochs = 0\r\n self.curr_train_index = 0\r\n\r\n all_labels = []\r\n\r\n self.good_policies = found_policies.good_policies()\r\n\r\n # Determine how many databatched to load\r\n num_data_batches_to_load = 5\r\n total_batches_to_load = num_data_batches_to_load\r\n train_batches_to_load = total_batches_to_load\r\n assert hparams.train_size + hparams.validation_size <= 50000\r\n if hparams.eval_test:\r\n total_batches_to_load += 1\r\n # Determine how many images we have loaded\r\n total_dataset_size = 10000 * num_data_batches_to_load\r\n train_dataset_size = total_dataset_size\r\n if hparams.eval_test:\r\n total_dataset_size += 10000\r\n\r\n if hparams.dataset == 'cifar10':\r\n all_data = np.empty((total_batches_to_load, 10000, 3072), dtype=np.uint8)\r\n elif hparams.dataset == 'cifar100':\r\n assert num_data_batches_to_load == 5\r\n all_data = np.empty((1, 50000, 3072), dtype=np.uint8)\r\n if hparams.eval_test:\r\n test_data = np.empty((1, 10000, 3072), dtype=np.uint8)\r\n if hparams.dataset == 'cifar10':\r\n tf.logging.info('Cifar10')\r\n datafiles = [\r\n 'data_batch_1', 'data_batch_2', 'data_batch_3', 'data_batch_4',\r\n 'data_batch_5']\r\n\r\n datafiles = datafiles[:train_batches_to_load]\r\n if hparams.eval_test:\r\n datafiles.append('test_batch')\r\n num_classes = 10\r\n elif hparams.dataset == 'cifar100':\r\n datafiles = ['train']\r\n if hparams.eval_test:\r\n datafiles.append('test')\r\n num_classes = 100\r\n else:\r\n raise NotImplementedError('Unimplemented dataset: ', hparams.dataset)\r\n if hparams.dataset != 'test':\r\n for file_num, f in enumerate(datafiles):\r\n d = unpickle(os.path.join(hparams.data_path, f))\r\n if f == 'test':\r\n test_data[0] = copy.deepcopy(d['data'])\r\n all_data = np.concatenate([all_data, test_data], axis=1)\r\n else:\r\n all_data[file_num] = copy.deepcopy(d['data'])\r\n if hparams.dataset == 'cifar10':\r\n labels = np.array(d['labels'])\r\n else:\r\n labels = np.array(d['fine_labels'])\r\n nsamples = len(labels)\r\n for idx in range(nsamples):\r\n all_labels.append(labels[idx])\r\n\r\n all_data = all_data.reshape(total_dataset_size, 3072)\r\n all_data = all_data.reshape(-1, 3, 32, 32)\r\n all_data = all_data.transpose(0, 2, 3, 1).copy()\r\n all_data = all_data / 255.0\r\n mean = augmentation_transforms.MEANS\r\n std = augmentation_transforms.STDS\r\n tf.logging.info('mean:{} std: {}'.format(mean, std))\r\n\r\n all_data = (all_data - mean) / std\r\n all_labels = np.eye(num_classes)[np.array(all_labels, dtype=np.int32)]\r\n assert len(all_data) == len(all_labels)\r\n tf.logging.info(\r\n 'In CIFAR10 loader, number of images: {}'.format(len(all_data)))\r\n\r\n # Break off test data\r\n if hparams.eval_test:\r\n self.test_images = all_data[train_dataset_size:]\r\n self.test_labels = all_labels[train_dataset_size:]\r\n\r\n # Shuffle the rest of the data\r\n all_data = all_data[:train_dataset_size]\r\n all_labels = all_labels[:train_dataset_size]\r\n np.random.seed(0)\r\n perm = np.arange(len(all_data))\r\n np.random.shuffle(perm)\r\n all_data = all_data[perm]\r\n all_labels = all_labels[perm]\r\n\r\n # Break into train and val\r\n train_size, val_size = hparams.train_size, hparams.validation_size\r\n assert 50000 >= train_size + val_size\r\n self.train_images = all_data[:train_size]\r\n self.train_labels = all_labels[:train_size]\r\n self.val_images = all_data[train_size:train_size + val_size]\r\n self.val_labels = all_labels[train_size:train_size + val_size]\r\n self.num_train = self.train_images.shape[0]\r\n\r\n def next_batch(self):\r\n \"\"\"Return the next minibatch of augmented data.\"\"\"\r\n next_train_index = self.curr_train_index + self.hparams.batch_size\r\n if next_train_index > self.num_train:\r\n # Increase epoch number\r\n epoch = self.epochs + 1\r\n self.reset()\r\n self.epochs = epoch\r\n batched_data = (\r\n self.train_images[self.curr_train_index:\r\n self.curr_train_index + self.hparams.batch_size],\r\n self.train_labels[self.curr_train_index:\r\n self.curr_train_index + self.hparams.batch_size])\r\n final_imgs = []\r\n\r\n images, labels = batched_data\r\n for data in images:\r\n epoch_policy = self.good_policies[np.random.choice(\r\n len(self.good_policies))]\r\n final_img = augmentation_transforms.apply_policy(\r\n epoch_policy, data)\r\n final_img = augmentation_transforms.random_flip(\r\n augmentation_transforms.zero_pad_and_crop(final_img, 4))\r\n # Apply cutout\r\n final_img = augmentation_transforms.cutout_numpy(final_img)\r\n final_imgs.append(final_img)\r\n batched_data = (np.array(final_imgs, np.float32), labels)\r\n self.curr_train_index += self.hparams.batch_size\r\n return batched_data\r\n\r\n def reset(self):\r\n \"\"\"Reset training data and index into the training data.\"\"\"\r\n self.epochs = 0\r\n # Shuffle the training data\r\n perm = np.arange(self.num_train)\r\n np.random.shuffle(perm)\r\n assert self.num_train == self.train_images.shape[\r\n 0], 'Error incorrect shuffling mask'\r\n self.train_images = self.train_images[perm]\r\n self.train_labels = self.train_labels[perm]\r\n self.curr_train_index = 0\r\n\r\n\r\ndef unpickle(f):\r\n tf.logging.info('loading file: {}'.format(f))\r\n fo = tf.gfile.Open(f, 'r')\r\n d = cPickle.load(fo)\r\n fo.close()\r\n return d\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"Tests for object_detection.utils.variables_helper.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport os\r\nimport unittest\r\nimport tensorflow.compat.v1 as tf\r\n\r\nfrom object_detection.utils import test_case\r\nfrom object_detection.utils import tf_version\r\nfrom object_detection.utils import variables_helper\r\n\r\n\r\[email protected](tf_version.is_tf2(), 'Skipping TF1.X only test.')\r\nclass FilterVariablesTest(test_case.TestCase):\r\n\r\n def _create_variables(self):\r\n return [tf.Variable(1.0, name='FeatureExtractor/InceptionV3/weights'),\r\n tf.Variable(1.0, name='FeatureExtractor/InceptionV3/biases'),\r\n tf.Variable(1.0, name='StackProposalGenerator/weights'),\r\n tf.Variable(1.0, name='StackProposalGenerator/biases')]\r\n\r\n def test_return_all_variables_when_empty_regex(self):\r\n variables = self._create_variables()\r\n out_variables = variables_helper.filter_variables(variables, [''])\r\n self.assertCountEqual(out_variables, variables)\r\n\r\n def test_return_variables_which_do_not_match_single_regex(self):\r\n variables = self._create_variables()\r\n out_variables = variables_helper.filter_variables(variables,\r\n ['FeatureExtractor/.*'])\r\n self.assertCountEqual(out_variables, variables[2:])\r\n\r\n def test_return_variables_which_do_not_match_any_regex_in_list(self):\r\n variables = self._create_variables()\r\n out_variables = variables_helper.filter_variables(variables, [\r\n 'FeatureExtractor.*biases', 'StackProposalGenerator.*biases'\r\n ])\r\n self.assertCountEqual(out_variables, [variables[0], variables[2]])\r\n\r\n def test_return_variables_matching_empty_regex_list(self):\r\n variables = self._create_variables()\r\n out_variables = variables_helper.filter_variables(\r\n variables, [''], invert=True)\r\n self.assertCountEqual(out_variables, [])\r\n\r\n def test_return_variables_matching_some_regex_in_list(self):\r\n variables = self._create_variables()\r\n out_variables = variables_helper.filter_variables(\r\n variables,\r\n ['FeatureExtractor.*biases', 'StackProposalGenerator.*biases'],\r\n invert=True)\r\n self.assertCountEqual(out_variables, [variables[1], variables[3]])\r\n\r\n\r\[email protected](tf_version.is_tf2(), 'Skipping TF1.X only test.')\r\nclass MultiplyGradientsMatchingRegexTest(tf.test.TestCase):\r\n\r\n def _create_grads_and_vars(self):\r\n return [(tf.constant(1.0),\r\n tf.Variable(1.0, name='FeatureExtractor/InceptionV3/weights')),\r\n (tf.constant(2.0),\r\n tf.Variable(2.0, name='FeatureExtractor/InceptionV3/biases')),\r\n (tf.constant(3.0),\r\n tf.Variable(3.0, name='StackProposalGenerator/weights')),\r\n (tf.constant(4.0),\r\n tf.Variable(4.0, name='StackProposalGenerator/biases'))]\r\n\r\n def test_multiply_all_feature_extractor_variables(self):\r\n grads_and_vars = self._create_grads_and_vars()\r\n regex_list = ['FeatureExtractor/.*']\r\n multiplier = 0.0\r\n grads_and_vars = variables_helper.multiply_gradients_matching_regex(\r\n grads_and_vars, regex_list, multiplier)\r\n exp_output = [(0.0, 1.0), (0.0, 2.0), (3.0, 3.0), (4.0, 4.0)]\r\n init_op = tf.global_variables_initializer()\r\n with self.test_session() as sess:\r\n sess.run(init_op)\r\n output = sess.run(grads_and_vars)\r\n self.assertCountEqual(output, exp_output)\r\n\r\n def test_multiply_all_bias_variables(self):\r\n grads_and_vars = self._create_grads_and_vars()\r\n regex_list = ['.*/biases']\r\n multiplier = 0.0\r\n grads_and_vars = variables_helper.multiply_gradients_matching_regex(\r\n grads_and_vars, regex_list, multiplier)\r\n exp_output = [(1.0, 1.0), (0.0, 2.0), (3.0, 3.0), (0.0, 4.0)]\r\n init_op = tf.global_variables_initializer()\r\n with self.test_session() as sess:\r\n sess.run(init_op)\r\n output = sess.run(grads_and_vars)\r\n self.assertCountEqual(output, exp_output)\r\n\r\n\r\[email protected](tf_version.is_tf2(), 'Skipping TF1.X only test.')\r\nclass FreezeGradientsMatchingRegexTest(test_case.TestCase):\r\n\r\n def _create_grads_and_vars(self):\r\n return [(tf.constant(1.0),\r\n tf.Variable(1.0, name='FeatureExtractor/InceptionV3/weights')),\r\n (tf.constant(2.0),\r\n tf.Variable(2.0, name='FeatureExtractor/InceptionV3/biases')),\r\n (tf.constant(3.0),\r\n tf.Variable(3.0, name='StackProposalGenerator/weights')),\r\n (tf.constant(4.0),\r\n tf.Variable(4.0, name='StackProposalGenerator/biases'))]\r\n\r\n def test_freeze_all_feature_extractor_variables(self):\r\n grads_and_vars = self._create_grads_and_vars()\r\n regex_list = ['FeatureExtractor/.*']\r\n grads_and_vars = variables_helper.freeze_gradients_matching_regex(\r\n grads_and_vars, regex_list)\r\n exp_output = [(3.0, 3.0), (4.0, 4.0)]\r\n init_op = tf.global_variables_initializer()\r\n with self.test_session() as sess:\r\n sess.run(init_op)\r\n output = sess.run(grads_and_vars)\r\n self.assertCountEqual(output, exp_output)\r\n\r\n\r\[email protected](tf_version.is_tf2(), 'Skipping TF1.X only test.')\r\nclass GetVariablesAvailableInCheckpointTest(test_case.TestCase):\r\n\r\n def test_return_all_variables_from_checkpoint(self):\r\n with tf.Graph().as_default():\r\n variables = [\r\n tf.Variable(1.0, name='weights'),\r\n tf.Variable(1.0, name='biases')\r\n ]\r\n checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')\r\n init_op = tf.global_variables_initializer()\r\n saver = tf.train.Saver(variables)\r\n with self.test_session() as sess:\r\n sess.run(init_op)\r\n saver.save(sess, checkpoint_path)\r\n out_variables = variables_helper.get_variables_available_in_checkpoint(\r\n variables, checkpoint_path)\r\n self.assertCountEqual(out_variables, variables)\r\n\r\n def test_return_all_variables_from_checkpoint_with_partition(self):\r\n with tf.Graph().as_default():\r\n partitioner = tf.fixed_size_partitioner(2)\r\n variables = [\r\n tf.get_variable(\r\n name='weights', shape=(2, 2), partitioner=partitioner),\r\n tf.Variable([1.0, 2.0], name='biases')\r\n ]\r\n checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')\r\n init_op = tf.global_variables_initializer()\r\n saver = tf.train.Saver(variables)\r\n with self.test_session() as sess:\r\n sess.run(init_op)\r\n saver.save(sess, checkpoint_path)\r\n out_variables = variables_helper.get_variables_available_in_checkpoint(\r\n variables, checkpoint_path)\r\n self.assertCountEqual(out_variables, variables)\r\n\r\n def test_return_variables_available_in_checkpoint(self):\r\n checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')\r\n with tf.Graph().as_default():\r\n weight_variable = tf.Variable(1.0, name='weights')\r\n global_step = tf.train.get_or_create_global_step()\r\n graph1_variables = [\r\n weight_variable,\r\n global_step\r\n ]\r\n init_op = tf.global_variables_initializer()\r\n saver = tf.train.Saver(graph1_variables)\r\n with self.test_session() as sess:\r\n sess.run(init_op)\r\n saver.save(sess, checkpoint_path)\r\n\r\n with tf.Graph().as_default():\r\n graph2_variables = graph1_variables + [tf.Variable(1.0, name='biases')]\r\n out_variables = variables_helper.get_variables_available_in_checkpoint(\r\n graph2_variables, checkpoint_path, include_global_step=False)\r\n self.assertCountEqual(out_variables, [weight_variable])\r\n\r\n def test_return_variables_available_an_checkpoint_with_dict_inputs(self):\r\n checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')\r\n with tf.Graph().as_default():\r\n graph1_variables = [\r\n tf.Variable(1.0, name='ckpt_weights'),\r\n ]\r\n init_op = tf.global_variables_initializer()\r\n saver = tf.train.Saver(graph1_variables)\r\n with self.test_session() as sess:\r\n sess.run(init_op)\r\n saver.save(sess, checkpoint_path)\r\n\r\n with tf.Graph().as_default():\r\n graph2_variables_dict = {\r\n 'ckpt_weights': tf.Variable(1.0, name='weights'),\r\n 'ckpt_biases': tf.Variable(1.0, name='biases')\r\n }\r\n out_variables = variables_helper.get_variables_available_in_checkpoint(\r\n graph2_variables_dict, checkpoint_path)\r\n\r\n self.assertIsInstance(out_variables, dict)\r\n self.assertCountEqual(list(out_variables.keys()), ['ckpt_weights'])\r\n self.assertEqual(out_variables['ckpt_weights'].op.name, 'weights')\r\n\r\n def test_return_variables_with_correct_sizes(self):\r\n checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')\r\n with tf.Graph().as_default():\r\n bias_variable = tf.Variable(3.0, name='biases')\r\n global_step = tf.train.get_or_create_global_step()\r\n graph1_variables = [\r\n tf.Variable([[1.0, 2.0], [3.0, 4.0]], name='weights'),\r\n bias_variable,\r\n global_step\r\n ]\r\n init_op = tf.global_variables_initializer()\r\n saver = tf.train.Saver(graph1_variables)\r\n with self.test_session() as sess:\r\n sess.run(init_op)\r\n saver.save(sess, checkpoint_path)\r\n\r\n with tf.Graph().as_default():\r\n graph2_variables = [\r\n tf.Variable([1.0, 2.0], name='weights'), # New variable shape.\r\n bias_variable,\r\n global_step\r\n ]\r\n\r\n out_variables = variables_helper.get_variables_available_in_checkpoint(\r\n graph2_variables, checkpoint_path, include_global_step=True)\r\n self.assertCountEqual(out_variables, [bias_variable, global_step])\r\n\r\n\r\nif __name__ == '__main__':\r\n tf.test.main()\r\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n# Lint as: python3\r\n\"\"\"Tests for 3D UNet backbone.\"\"\"\r\n\r\n# Import libraries\r\nfrom absl.testing import parameterized\r\nimport tensorflow as tf\r\n\r\nfrom official.vision.beta.projects.volumetric_models.modeling.backbones import unet_3d\r\n\r\n\r\nclass UNet3DTest(parameterized.TestCase, tf.test.TestCase):\r\n\r\n @parameterized.parameters(\r\n ([128, 64], 4),\r\n ([256, 128], 6),\r\n )\r\n def test_network_creation(self, input_size, model_id):\r\n \"\"\"Test creation of UNet3D family models.\"\"\"\r\n tf.keras.backend.set_image_data_format('channels_last')\r\n network = unet_3d.UNet3D(model_id=model_id)\r\n inputs = tf.keras.Input(\r\n shape=(input_size[0], input_size[0], input_size[1], 3), batch_size=1)\r\n endpoints = network(inputs)\r\n\r\n for layer_depth in range(model_id):\r\n self.assertAllEqual([\r\n 1, input_size[0] / 2**layer_depth, input_size[0] / 2**layer_depth,\r\n input_size[1] / 2**layer_depth, 64 * 2**layer_depth\r\n ], endpoints[str(layer_depth + 1)].shape.as_list())\r\n\r\n def test_serialize_deserialize(self):\r\n # Create a network object that sets all of its config options.\r\n kwargs = dict(\r\n model_id=4,\r\n pool_size=(2, 2, 2),\r\n kernel_size=(3, 3, 3),\r\n activation='relu',\r\n base_filters=32,\r\n kernel_regularizer=None,\r\n norm_momentum=0.99,\r\n norm_epsilon=0.001,\r\n use_sync_bn=False,\r\n use_batch_normalization=True)\r\n network = unet_3d.UNet3D(**kwargs)\r\n\r\n expected_config = dict(kwargs)\r\n self.assertEqual(network.get_config(), expected_config)\r\n\r\n # Create another network object from the first object's config.\r\n new_network = unet_3d.UNet3D.from_config(network.get_config())\r\n\r\n # Validate that the config can be forced to JSON.\r\n _ = new_network.to_json()\r\n\r\n # If the serialization was successful, the new config should match the old.\r\n self.assertAllEqual(network.get_config(), new_network.get_config())\r\n\r\n\r\nif __name__ == '__main__':\r\n tf.test.main()\r\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n\"\"\"Test the Keras MNIST model on GPU.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport functools\r\n\r\nfrom absl.testing import parameterized\r\nimport tensorflow as tf\r\n\r\nfrom tensorflow.python.distribute import combinations\r\nfrom tensorflow.python.distribute import strategy_combinations\r\nfrom official.utils.testing import integration\r\nfrom official.vision.image_classification import mnist_main\r\n\r\n\r\nmnist_main.define_mnist_flags()\r\n\r\n\r\ndef eager_strategy_combinations():\r\n return combinations.combine(\r\n distribution=[\r\n strategy_combinations.default_strategy,\r\n strategy_combinations.cloud_tpu_strategy,\r\n strategy_combinations.one_device_strategy_gpu,\r\n ],)\r\n\r\n\r\nclass KerasMnistTest(tf.test.TestCase, parameterized.TestCase):\r\n \"\"\"Unit tests for sample Keras MNIST model.\"\"\"\r\n _tempdir = None\r\n\r\n @classmethod\r\n def setUpClass(cls): # pylint: disable=invalid-name\r\n super(KerasMnistTest, cls).setUpClass()\r\n\r\n def tearDown(self):\r\n super(KerasMnistTest, self).tearDown()\r\n tf.io.gfile.rmtree(self.get_temp_dir())\r\n\r\n @combinations.generate(eager_strategy_combinations())\r\n def test_end_to_end(self, distribution):\r\n \"\"\"Test Keras MNIST model with `strategy`.\"\"\"\r\n\r\n extra_flags = [\r\n \"-train_epochs\",\r\n \"1\",\r\n # Let TFDS find the metadata folder automatically\r\n \"--data_dir=\"\r\n ]\r\n\r\n dummy_data = (\r\n tf.ones(shape=(10, 28, 28, 1), dtype=tf.int32),\r\n tf.range(10),\r\n )\r\n datasets = (\r\n tf.data.Dataset.from_tensor_slices(dummy_data),\r\n tf.data.Dataset.from_tensor_slices(dummy_data),\r\n )\r\n\r\n run = functools.partial(\r\n mnist_main.run,\r\n datasets_override=datasets,\r\n strategy_override=distribution)\r\n\r\n integration.run_synthetic(\r\n main=run,\r\n synth=False,\r\n tmp_root=self.create_tempdir().full_path,\r\n extra_flags=extra_flags)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n tf.test.main()\r\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\nr\"\"\"Converts data from CSV format to the VRDDetectionEvaluator format.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\nfrom object_detection.core import standard_fields\r\nfrom object_detection.utils import vrd_evaluation\r\n\r\n\r\ndef build_groundtruth_vrd_dictionary(data, class_label_map,\r\n relationship_label_map):\r\n \"\"\"Builds a groundtruth dictionary from groundtruth data in CSV file.\r\n\r\n Args:\r\n data: Pandas DataFrame with the groundtruth data for a single image.\r\n class_label_map: Class labelmap from string label name to an integer.\r\n relationship_label_map: Relationship type labelmap from string name to an\r\n integer.\r\n\r\n Returns:\r\n A dictionary with keys suitable for passing to\r\n VRDDetectionEvaluator.add_single_ground_truth_image_info:\r\n standard_fields.InputDataFields.groundtruth_boxes: A numpy array\r\n of structures with the shape [M, 1], representing M tuples, each tuple\r\n containing the same number of named bounding boxes.\r\n Each box is of the format [y_min, x_min, y_max, x_max] (see\r\n datatype vrd_box_data_type, single_box_data_type above).\r\n standard_fields.InputDataFields.groundtruth_classes: A numpy array of\r\n structures shape [M, 1], representing the class labels of the\r\n corresponding bounding boxes and possibly additional classes (see\r\n datatype label_data_type above).\r\n standard_fields.InputDataFields.verified_labels: numpy array\r\n of shape [K] containing verified labels.\r\n \"\"\"\r\n data_boxes = data[data.LabelName.isnull()]\r\n data_labels = data[data.LabelName1.isnull()]\r\n\r\n boxes = np.zeros(data_boxes.shape[0], dtype=vrd_evaluation.vrd_box_data_type)\r\n boxes['subject'] = data_boxes[['YMin1', 'XMin1', 'YMax1',\r\n 'XMax1']].to_numpy()\r\n boxes['object'] = data_boxes[['YMin2', 'XMin2', 'YMax2', 'XMax2']].to_numpy()\r\n\r\n labels = np.zeros(data_boxes.shape[0], dtype=vrd_evaluation.label_data_type)\r\n labels['subject'] = data_boxes['LabelName1'].map(\r\n lambda x: class_label_map[x]).to_numpy()\r\n labels['object'] = data_boxes['LabelName2'].map(\r\n lambda x: class_label_map[x]).to_numpy()\r\n labels['relation'] = data_boxes['RelationshipLabel'].map(\r\n lambda x: relationship_label_map[x]).to_numpy()\r\n\r\n return {\r\n standard_fields.InputDataFields.groundtruth_boxes:\r\n boxes,\r\n standard_fields.InputDataFields.groundtruth_classes:\r\n labels,\r\n standard_fields.InputDataFields.groundtruth_image_classes:\r\n data_labels['LabelName'].map(lambda x: class_label_map[x])\r\n .to_numpy(),\r\n }\r\n\r\n\r\ndef build_predictions_vrd_dictionary(data, class_label_map,\r\n relationship_label_map):\r\n \"\"\"Builds a predictions dictionary from predictions data in CSV file.\r\n\r\n Args:\r\n data: Pandas DataFrame with the predictions data for a single image.\r\n class_label_map: Class labelmap from string label name to an integer.\r\n relationship_label_map: Relationship type labelmap from string name to an\r\n integer.\r\n\r\n Returns:\r\n Dictionary with keys suitable for passing to\r\n VRDDetectionEvaluator.add_single_detected_image_info:\r\n standard_fields.DetectionResultFields.detection_boxes: A numpy array of\r\n structures with shape [N, 1], representing N tuples, each tuple\r\n containing the same number of named bounding boxes.\r\n Each box is of the format [y_min, x_min, y_max, x_max] (as an example\r\n see datatype vrd_box_data_type, single_box_data_type above).\r\n standard_fields.DetectionResultFields.detection_scores: float32 numpy\r\n array of shape [N] containing detection scores for the boxes.\r\n standard_fields.DetectionResultFields.detection_classes: A numpy array\r\n of structures shape [N, 1], representing the class labels of the\r\n corresponding bounding boxes and possibly additional classes (see\r\n datatype label_data_type above).\r\n \"\"\"\r\n data_boxes = data\r\n\r\n boxes = np.zeros(data_boxes.shape[0], dtype=vrd_evaluation.vrd_box_data_type)\r\n boxes['subject'] = data_boxes[['YMin1', 'XMin1', 'YMax1',\r\n 'XMax1']].to_numpy()\r\n boxes['object'] = data_boxes[['YMin2', 'XMin2', 'YMax2', 'XMax2']].to_numpy()\r\n\r\n labels = np.zeros(data_boxes.shape[0], dtype=vrd_evaluation.label_data_type)\r\n labels['subject'] = data_boxes['LabelName1'].map(\r\n lambda x: class_label_map[x]).to_numpy()\r\n labels['object'] = data_boxes['LabelName2'].map(\r\n lambda x: class_label_map[x]).to_numpy()\r\n labels['relation'] = data_boxes['RelationshipLabel'].map(\r\n lambda x: relationship_label_map[x]).to_numpy()\r\n\r\n return {\r\n standard_fields.DetectionResultFields.detection_boxes:\r\n boxes,\r\n standard_fields.DetectionResultFields.detection_classes:\r\n labels,\r\n standard_fields.DetectionResultFields.detection_scores:\r\n data_boxes['Score'].to_numpy()\r\n }\r\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n# Lint as: python3\r\n\"\"\"Tests for RevNet.\"\"\"\r\n\r\n# Import libraries\r\nfrom absl.testing import parameterized\r\nimport tensorflow as tf\r\n\r\nfrom official.vision.beta.modeling.backbones import revnet\r\n\r\n\r\nclass RevNetTest(parameterized.TestCase, tf.test.TestCase):\r\n\r\n @parameterized.parameters(\r\n (128, 56, 4),\r\n (128, 104, 4),\r\n )\r\n def test_network_creation(self, input_size, model_id,\r\n endpoint_filter_scale):\r\n \"\"\"Test creation of RevNet family models.\"\"\"\r\n tf.keras.backend.set_image_data_format('channels_last')\r\n\r\n network = revnet.RevNet(model_id=model_id)\r\n inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)\r\n endpoints = network(inputs)\r\n network.summary()\r\n\r\n self.assertAllEqual(\r\n [1, input_size / 2**2, input_size / 2**2, 128 * endpoint_filter_scale],\r\n endpoints['2'].shape.as_list())\r\n self.assertAllEqual(\r\n [1, input_size / 2**3, input_size / 2**3, 256 * endpoint_filter_scale],\r\n endpoints['3'].shape.as_list())\r\n self.assertAllEqual(\r\n [1, input_size / 2**4, input_size / 2**4, 512 * endpoint_filter_scale],\r\n endpoints['4'].shape.as_list())\r\n self.assertAllEqual(\r\n [1, input_size / 2**5, input_size / 2**5, 832 * endpoint_filter_scale],\r\n endpoints['5'].shape.as_list())\r\n\r\n @parameterized.parameters(1, 3, 4)\r\n def test_input_specs(self, input_dim):\r\n \"\"\"Test different input feature dimensions.\"\"\"\r\n tf.keras.backend.set_image_data_format('channels_last')\r\n\r\n input_specs = tf.keras.layers.InputSpec(shape=[None, None, None, input_dim])\r\n network = revnet.RevNet(model_id=56, input_specs=input_specs)\r\n\r\n inputs = tf.keras.Input(shape=(128, 128, input_dim), batch_size=1)\r\n _ = network(inputs)\r\n\r\n def test_serialize_deserialize(self):\r\n # Create a network object that sets all of its config options.\r\n kwargs = dict(\r\n model_id=56,\r\n activation='relu',\r\n use_sync_bn=False,\r\n norm_momentum=0.99,\r\n norm_epsilon=0.001,\r\n kernel_initializer='VarianceScaling',\r\n kernel_regularizer=None,\r\n )\r\n network = revnet.RevNet(**kwargs)\r\n\r\n expected_config = dict(kwargs)\r\n self.assertEqual(network.get_config(), expected_config)\r\n\r\n # Create another network object from the first object's config.\r\n new_network = revnet.RevNet.from_config(network.get_config())\r\n\r\n # Validate that the config can be forced to JSON.\r\n _ = new_network.to_json()\r\n\r\n # If the serialization was successful, the new config should match the old.\r\n self.assertAllEqual(network.get_config(), new_network.get_config())\r\n\r\n\r\nif __name__ == '__main__':\r\n tf.test.main()\r\n", "# Copyright 2018 The TensorFlow Authors All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"Wrapper for creating the ant environment in gym_mujoco.\"\"\"\r\n\r\nimport math\r\nimport numpy as np\r\nimport mujoco_py\r\nfrom gym import utils\r\nfrom gym.envs.mujoco import mujoco_env\r\n\r\n\r\ndef q_inv(a):\r\n return [a[0], -a[1], -a[2], -a[3]]\r\n\r\n\r\ndef q_mult(a, b): # multiply two quaternion\r\n w = a[0] * b[0] - a[1] * b[1] - a[2] * b[2] - a[3] * b[3]\r\n i = a[0] * b[1] + a[1] * b[0] + a[2] * b[3] - a[3] * b[2]\r\n j = a[0] * b[2] - a[1] * b[3] + a[2] * b[0] + a[3] * b[1]\r\n k = a[0] * b[3] + a[1] * b[2] - a[2] * b[1] + a[3] * b[0]\r\n return [w, i, j, k]\r\n\r\n\r\nclass AntEnv(mujoco_env.MujocoEnv, utils.EzPickle):\r\n FILE = \"ant.xml\"\r\n ORI_IND = 3\r\n\r\n def __init__(self, file_path=None, expose_all_qpos=True,\r\n expose_body_coms=None, expose_body_comvels=None):\r\n self._expose_all_qpos = expose_all_qpos\r\n self._expose_body_coms = expose_body_coms\r\n self._expose_body_comvels = expose_body_comvels\r\n self._body_com_indices = {}\r\n self._body_comvel_indices = {}\r\n\r\n mujoco_env.MujocoEnv.__init__(self, file_path, 5)\r\n utils.EzPickle.__init__(self)\r\n\r\n @property\r\n def physics(self):\r\n # check mujoco version is greater than version 1.50 to call correct physics\r\n # model containing PyMjData object for getting and setting position/velocity\r\n # check https://github.com/openai/mujoco-py/issues/80 for updates to api\r\n if mujoco_py.get_version() >= '1.50':\r\n return self.sim\r\n else:\r\n return self.model\r\n\r\n def _step(self, a):\r\n return self.step(a)\r\n\r\n def step(self, a):\r\n xposbefore = self.get_body_com(\"torso\")[0]\r\n self.do_simulation(a, self.frame_skip)\r\n xposafter = self.get_body_com(\"torso\")[0]\r\n forward_reward = (xposafter - xposbefore) / self.dt\r\n ctrl_cost = .5 * np.square(a).sum()\r\n survive_reward = 1.0\r\n reward = forward_reward - ctrl_cost + survive_reward\r\n state = self.state_vector()\r\n done = False\r\n ob = self._get_obs()\r\n return ob, reward, done, dict(\r\n reward_forward=forward_reward,\r\n reward_ctrl=-ctrl_cost,\r\n reward_survive=survive_reward)\r\n\r\n def _get_obs(self):\r\n # No cfrc observation\r\n if self._expose_all_qpos:\r\n obs = np.concatenate([\r\n self.physics.data.qpos.flat[:15], # Ensures only ant obs.\r\n self.physics.data.qvel.flat[:14],\r\n ])\r\n else:\r\n obs = np.concatenate([\r\n self.physics.data.qpos.flat[2:15],\r\n self.physics.data.qvel.flat[:14],\r\n ])\r\n\r\n if self._expose_body_coms is not None:\r\n for name in self._expose_body_coms:\r\n com = self.get_body_com(name)\r\n if name not in self._body_com_indices:\r\n indices = range(len(obs), len(obs) + len(com))\r\n self._body_com_indices[name] = indices\r\n obs = np.concatenate([obs, com])\r\n\r\n if self._expose_body_comvels is not None:\r\n for name in self._expose_body_comvels:\r\n comvel = self.get_body_comvel(name)\r\n if name not in self._body_comvel_indices:\r\n indices = range(len(obs), len(obs) + len(comvel))\r\n self._body_comvel_indices[name] = indices\r\n obs = np.concatenate([obs, comvel])\r\n return obs\r\n\r\n def reset_model(self):\r\n qpos = self.init_qpos + self.np_random.uniform(\r\n size=self.model.nq, low=-.1, high=.1)\r\n qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1\r\n\r\n # Set everything other than ant to original position and 0 velocity.\r\n qpos[15:] = self.init_qpos[15:]\r\n qvel[14:] = 0.\r\n self.set_state(qpos, qvel)\r\n return self._get_obs()\r\n\r\n def viewer_setup(self):\r\n self.viewer.cam.distance = self.model.stat.extent * 0.5\r\n\r\n def get_ori(self):\r\n ori = [0, 1, 0, 0]\r\n rot = self.physics.data.qpos[self.__class__.ORI_IND:self.__class__.ORI_IND + 4] # take the quaternion\r\n ori = q_mult(q_mult(rot, ori), q_inv(rot))[1:3] # project onto x-y plane\r\n ori = math.atan2(ori[1], ori[0])\r\n return ori\r\n\r\n def set_xy(self, xy):\r\n qpos = np.copy(self.physics.data.qpos)\r\n qpos[0] = xy[0]\r\n qpos[1] = xy[1]\r\n\r\n qvel = self.physics.data.qvel\r\n self.set_state(qpos, qvel)\r\n\r\n def get_xy(self):\r\n return self.physics.data.qpos[:2]\r\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n\"\"\"Tests for the customized Sigmoid activation.\"\"\"\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\nfrom tensorflow.python.keras import \\\r\n keras_parameterized # pylint: disable=g-direct-tensorflow-import\r\nfrom official.modeling import activations\r\n\r\n\r\n@keras_parameterized.run_all_keras_modes\r\nclass CustomizedSigmoidTest(keras_parameterized.TestCase):\r\n\r\n def _hard_sigmoid_nn(self, x):\r\n x = np.float32(x)\r\n return tf.nn.relu6(x + 3.) * 0.16667\r\n\r\n def test_hard_sigmoid(self):\r\n features = [[.25, 0, -.25], [-1, -2, 3]]\r\n customized_hard_sigmoid_data = activations.hard_sigmoid(features)\r\n sigmoid_data = self._hard_sigmoid_nn(features)\r\n self.assertAllClose(customized_hard_sigmoid_data, sigmoid_data)\r\n\r\n\r\nif __name__ == '__main__':\r\n tf.test.main()\r\n", "# Lint as: python2, python3\r\n# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"Mask Head.\r\n\r\nContains Mask prediction head classes for different meta architectures.\r\nAll the mask prediction heads have a predict function that receives the\r\n`features` as the first argument and returns `mask_predictions`.\r\n\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport math\r\nfrom six.moves import range\r\nimport tensorflow.compat.v1 as tf\r\nimport tf_slim as slim\r\n\r\nfrom object_detection.predictors.heads import head\r\nfrom object_detection.utils import ops\r\n\r\n\r\nclass MaskRCNNMaskHead(head.Head):\r\n \"\"\"Mask RCNN mask prediction head.\r\n\r\n Please refer to Mask RCNN paper:\r\n https://arxiv.org/abs/1703.06870\r\n \"\"\"\r\n\r\n def __init__(self,\r\n num_classes,\r\n conv_hyperparams_fn=None,\r\n mask_height=14,\r\n mask_width=14,\r\n mask_prediction_num_conv_layers=2,\r\n mask_prediction_conv_depth=256,\r\n masks_are_class_agnostic=False,\r\n convolve_then_upsample=False):\r\n \"\"\"Constructor.\r\n\r\n Args:\r\n num_classes: number of classes. Note that num_classes *does not*\r\n include the background category, so if groundtruth labels take values\r\n in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the\r\n assigned classification targets can range from {0,... K}).\r\n conv_hyperparams_fn: A function to generate tf-slim arg_scope with\r\n hyperparameters for convolution ops.\r\n mask_height: Desired output mask height. The default value is 14.\r\n mask_width: Desired output mask width. The default value is 14.\r\n mask_prediction_num_conv_layers: Number of convolution layers applied to\r\n the image_features in mask prediction branch.\r\n mask_prediction_conv_depth: The depth for the first conv2d_transpose op\r\n applied to the image_features in the mask prediction branch. If set\r\n to 0, the depth of the convolution layers will be automatically chosen\r\n based on the number of object classes and the number of channels in the\r\n image features.\r\n masks_are_class_agnostic: Boolean determining if the mask-head is\r\n class-agnostic or not.\r\n convolve_then_upsample: Whether to apply convolutions on mask features\r\n before upsampling using nearest neighbor resizing. Otherwise, mask\r\n features are resized to [`mask_height`, `mask_width`] using bilinear\r\n resizing before applying convolutions.\r\n\r\n Raises:\r\n ValueError: conv_hyperparams_fn is None.\r\n \"\"\"\r\n super(MaskRCNNMaskHead, self).__init__()\r\n self._num_classes = num_classes\r\n self._conv_hyperparams_fn = conv_hyperparams_fn\r\n self._mask_height = mask_height\r\n self._mask_width = mask_width\r\n self._mask_prediction_num_conv_layers = mask_prediction_num_conv_layers\r\n self._mask_prediction_conv_depth = mask_prediction_conv_depth\r\n self._masks_are_class_agnostic = masks_are_class_agnostic\r\n self._convolve_then_upsample = convolve_then_upsample\r\n if conv_hyperparams_fn is None:\r\n raise ValueError('conv_hyperparams_fn is None.')\r\n\r\n def _get_mask_predictor_conv_depth(self,\r\n num_feature_channels,\r\n num_classes,\r\n class_weight=3.0,\r\n feature_weight=2.0):\r\n \"\"\"Computes the depth of the mask predictor convolutions.\r\n\r\n Computes the depth of the mask predictor convolutions given feature channels\r\n and number of classes by performing a weighted average of the two in\r\n log space to compute the number of convolution channels. The weights that\r\n are used for computing the weighted average do not need to sum to 1.\r\n\r\n Args:\r\n num_feature_channels: An integer containing the number of feature\r\n channels.\r\n num_classes: An integer containing the number of classes.\r\n class_weight: Class weight used in computing the weighted average.\r\n feature_weight: Feature weight used in computing the weighted average.\r\n\r\n Returns:\r\n An integer containing the number of convolution channels used by mask\r\n predictor.\r\n \"\"\"\r\n num_feature_channels_log = math.log(float(num_feature_channels), 2.0)\r\n num_classes_log = math.log(float(num_classes), 2.0)\r\n weighted_num_feature_channels_log = (\r\n num_feature_channels_log * feature_weight)\r\n weighted_num_classes_log = num_classes_log * class_weight\r\n total_weight = feature_weight + class_weight\r\n num_conv_channels_log = round(\r\n (weighted_num_feature_channels_log + weighted_num_classes_log) /\r\n total_weight)\r\n return int(math.pow(2.0, num_conv_channels_log))\r\n\r\n def predict(self, features, num_predictions_per_location=1):\r\n \"\"\"Performs mask prediction.\r\n\r\n Args:\r\n features: A float tensor of shape [batch_size, height, width, channels]\r\n containing features for a batch of images.\r\n num_predictions_per_location: Int containing number of predictions per\r\n location.\r\n\r\n Returns:\r\n instance_masks: A float tensor of shape\r\n [batch_size, 1, num_classes, mask_height, mask_width].\r\n\r\n Raises:\r\n ValueError: If num_predictions_per_location is not 1.\r\n \"\"\"\r\n if num_predictions_per_location != 1:\r\n raise ValueError('Only num_predictions_per_location=1 is supported')\r\n num_conv_channels = self._mask_prediction_conv_depth\r\n if num_conv_channels == 0:\r\n num_feature_channels = features.get_shape().as_list()[3]\r\n num_conv_channels = self._get_mask_predictor_conv_depth(\r\n num_feature_channels, self._num_classes)\r\n with slim.arg_scope(self._conv_hyperparams_fn()):\r\n if not self._convolve_then_upsample:\r\n features = tf.image.resize_bilinear(\r\n features, [self._mask_height, self._mask_width],\r\n align_corners=True)\r\n for _ in range(self._mask_prediction_num_conv_layers - 1):\r\n features = slim.conv2d(\r\n features,\r\n num_outputs=num_conv_channels,\r\n kernel_size=[3, 3])\r\n if self._convolve_then_upsample:\r\n # Replace Transposed Convolution with a Nearest Neighbor upsampling step\r\n # followed by 3x3 convolution.\r\n height_scale = self._mask_height // features.shape[1].value\r\n width_scale = self._mask_width // features.shape[2].value\r\n features = ops.nearest_neighbor_upsampling(\r\n features, height_scale=height_scale, width_scale=width_scale)\r\n features = slim.conv2d(\r\n features,\r\n num_outputs=num_conv_channels,\r\n kernel_size=[3, 3])\r\n\r\n num_masks = 1 if self._masks_are_class_agnostic else self._num_classes\r\n mask_predictions = slim.conv2d(\r\n features,\r\n num_outputs=num_masks,\r\n activation_fn=None,\r\n normalizer_fn=None,\r\n kernel_size=[3, 3])\r\n return tf.expand_dims(\r\n tf.transpose(mask_predictions, perm=[0, 3, 1, 2]),\r\n axis=1,\r\n name='MaskPredictor')\r\n\r\n\r\nclass ConvolutionalMaskHead(head.Head):\r\n \"\"\"Convolutional class prediction head.\"\"\"\r\n\r\n def __init__(self,\r\n is_training,\r\n num_classes,\r\n use_dropout,\r\n dropout_keep_prob,\r\n kernel_size,\r\n use_depthwise=False,\r\n mask_height=7,\r\n mask_width=7,\r\n masks_are_class_agnostic=False):\r\n \"\"\"Constructor.\r\n\r\n Args:\r\n is_training: Indicates whether the BoxPredictor is in training mode.\r\n num_classes: Number of classes.\r\n use_dropout: Option to use dropout or not. Note that a single dropout\r\n op is applied here prior to both box and class predictions, which stands\r\n in contrast to the ConvolutionalBoxPredictor below.\r\n dropout_keep_prob: Keep probability for dropout.\r\n This is only used if use_dropout is True.\r\n kernel_size: Size of final convolution kernel. If the\r\n spatial resolution of the feature map is smaller than the kernel size,\r\n then the kernel size is automatically set to be\r\n min(feature_width, feature_height).\r\n use_depthwise: Whether to use depthwise convolutions for prediction\r\n steps. Default is False.\r\n mask_height: Desired output mask height. The default value is 7.\r\n mask_width: Desired output mask width. The default value is 7.\r\n masks_are_class_agnostic: Boolean determining if the mask-head is\r\n class-agnostic or not.\r\n\r\n Raises:\r\n ValueError: if min_depth > max_depth.\r\n \"\"\"\r\n super(ConvolutionalMaskHead, self).__init__()\r\n self._is_training = is_training\r\n self._num_classes = num_classes\r\n self._use_dropout = use_dropout\r\n self._dropout_keep_prob = dropout_keep_prob\r\n self._kernel_size = kernel_size\r\n self._use_depthwise = use_depthwise\r\n self._mask_height = mask_height\r\n self._mask_width = mask_width\r\n self._masks_are_class_agnostic = masks_are_class_agnostic\r\n\r\n def predict(self, features, num_predictions_per_location):\r\n \"\"\"Predicts boxes.\r\n\r\n Args:\r\n features: A float tensor of shape [batch_size, height, width, channels]\r\n containing image features.\r\n num_predictions_per_location: Number of box predictions to be made per\r\n spatial location.\r\n\r\n Returns:\r\n mask_predictions: A float tensors of shape\r\n [batch_size, num_anchors, num_masks, mask_height, mask_width]\r\n representing the mask predictions for the proposals.\r\n \"\"\"\r\n image_feature = features\r\n # Add a slot for the background class.\r\n if self._masks_are_class_agnostic:\r\n num_masks = 1\r\n else:\r\n num_masks = self._num_classes\r\n num_mask_channels = num_masks * self._mask_height * self._mask_width\r\n net = image_feature\r\n if self._use_dropout:\r\n net = slim.dropout(net, keep_prob=self._dropout_keep_prob)\r\n if self._use_depthwise:\r\n mask_predictions = slim.separable_conv2d(\r\n net, None, [self._kernel_size, self._kernel_size],\r\n padding='SAME', depth_multiplier=1, stride=1,\r\n rate=1, scope='MaskPredictor_depthwise')\r\n mask_predictions = slim.conv2d(\r\n mask_predictions,\r\n num_predictions_per_location * num_mask_channels,\r\n [1, 1],\r\n activation_fn=None,\r\n normalizer_fn=None,\r\n normalizer_params=None,\r\n scope='MaskPredictor')\r\n else:\r\n mask_predictions = slim.conv2d(\r\n net,\r\n num_predictions_per_location * num_mask_channels,\r\n [self._kernel_size, self._kernel_size],\r\n activation_fn=None,\r\n normalizer_fn=None,\r\n normalizer_params=None,\r\n scope='MaskPredictor')\r\n batch_size = features.get_shape().as_list()[0]\r\n if batch_size is None:\r\n batch_size = tf.shape(features)[0]\r\n mask_predictions = tf.reshape(\r\n mask_predictions,\r\n [batch_size, -1, num_masks, self._mask_height, self._mask_width])\r\n return mask_predictions\r\n\r\n\r\n# TODO(alirezafathi): See if possible to unify Weight Shared with regular\r\n# convolutional mask head.\r\nclass WeightSharedConvolutionalMaskHead(head.Head):\r\n \"\"\"Weight shared convolutional mask prediction head.\"\"\"\r\n\r\n def __init__(self,\r\n num_classes,\r\n kernel_size=3,\r\n use_dropout=False,\r\n dropout_keep_prob=0.8,\r\n mask_height=7,\r\n mask_width=7,\r\n masks_are_class_agnostic=False):\r\n \"\"\"Constructor.\r\n\r\n Args:\r\n num_classes: number of classes. Note that num_classes *does not*\r\n include the background category, so if groundtruth labels take values\r\n in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the\r\n assigned classification targets can range from {0,... K}).\r\n kernel_size: Size of final convolution kernel.\r\n use_dropout: Whether to apply dropout to class prediction head.\r\n dropout_keep_prob: Probability of keeping activiations.\r\n mask_height: Desired output mask height. The default value is 7.\r\n mask_width: Desired output mask width. The default value is 7.\r\n masks_are_class_agnostic: Boolean determining if the mask-head is\r\n class-agnostic or not.\r\n \"\"\"\r\n super(WeightSharedConvolutionalMaskHead, self).__init__()\r\n self._num_classes = num_classes\r\n self._kernel_size = kernel_size\r\n self._use_dropout = use_dropout\r\n self._dropout_keep_prob = dropout_keep_prob\r\n self._mask_height = mask_height\r\n self._mask_width = mask_width\r\n self._masks_are_class_agnostic = masks_are_class_agnostic\r\n\r\n def predict(self, features, num_predictions_per_location):\r\n \"\"\"Predicts boxes.\r\n\r\n Args:\r\n features: A float tensor of shape [batch_size, height, width, channels]\r\n containing image features.\r\n num_predictions_per_location: Number of box predictions to be made per\r\n spatial location.\r\n\r\n Returns:\r\n mask_predictions: A tensor of shape\r\n [batch_size, num_anchors, num_classes, mask_height, mask_width]\r\n representing the mask predictions for the proposals.\r\n \"\"\"\r\n mask_predictions_net = features\r\n if self._masks_are_class_agnostic:\r\n num_masks = 1\r\n else:\r\n num_masks = self._num_classes\r\n num_mask_channels = num_masks * self._mask_height * self._mask_width\r\n if self._use_dropout:\r\n mask_predictions_net = slim.dropout(\r\n mask_predictions_net, keep_prob=self._dropout_keep_prob)\r\n mask_predictions = slim.conv2d(\r\n mask_predictions_net,\r\n num_predictions_per_location * num_mask_channels,\r\n [self._kernel_size, self._kernel_size],\r\n activation_fn=None, stride=1, padding='SAME',\r\n normalizer_fn=None,\r\n scope='MaskPredictor')\r\n batch_size = features.get_shape().as_list()[0]\r\n if batch_size is None:\r\n batch_size = tf.shape(features)[0]\r\n mask_predictions = tf.reshape(\r\n mask_predictions,\r\n [batch_size, -1, num_masks, self._mask_height, self._mask_width])\r\n return mask_predictions\r\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n\"\"\"Keras-based positional embedding layer.\"\"\"\r\n# pylint: disable=g-classes-have-attributes\r\nimport tensorflow as tf\r\n\r\n\r\[email protected]_keras_serializable(package=\"keras_nlp\")\r\nclass PositionEmbedding(tf.keras.layers.Layer):\r\n \"\"\"Creates a positional embedding.\r\n\r\n Example:\r\n ```python\r\n position_embedding = PositionEmbedding(max_length=100)\r\n inputs = tf.keras.Input((100, 32), dtype=tf.float32)\r\n outputs = position_embedding(inputs)\r\n ```\r\n\r\n\r\n Args:\r\n max_length: The maximum size of the dynamic sequence.\r\n initializer: The initializer to use for the embedding weights. Defaults to\r\n \"glorot_uniform\".\r\n seq_axis: The axis of the input tensor where we add the embeddings.\r\n\r\n Reference: This layer creates a positional embedding as described in\r\n [BERT: Pre-training of Deep Bidirectional Transformers for Language\r\n Understanding](https://arxiv.org/abs/1810.04805).\r\n \"\"\"\r\n\r\n def __init__(self,\r\n max_length,\r\n initializer=\"glorot_uniform\",\r\n seq_axis=1,\r\n **kwargs):\r\n\r\n super(PositionEmbedding, self).__init__(**kwargs)\r\n if max_length is None:\r\n raise ValueError(\r\n \"`max_length` must be an Integer, not `None`.\"\r\n )\r\n self._max_length = max_length\r\n self._initializer = tf.keras.initializers.get(initializer)\r\n self._seq_axis = seq_axis\r\n\r\n def get_config(self):\r\n config = {\r\n \"max_length\": self._max_length,\r\n \"initializer\": tf.keras.initializers.serialize(self._initializer),\r\n \"seq_axis\": self._seq_axis,\r\n }\r\n base_config = super(PositionEmbedding, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n def build(self, input_shape):\r\n dimension_list = input_shape.as_list()\r\n\r\n seq_length = dimension_list[self._seq_axis]\r\n width = dimension_list[-1]\r\n\r\n if self._max_length is not None:\r\n weight_sequence_length = self._max_length\r\n else:\r\n weight_sequence_length = seq_length\r\n\r\n self._position_embeddings = self.add_weight(\r\n \"embeddings\",\r\n shape=[weight_sequence_length, width],\r\n initializer=self._initializer)\r\n\r\n super(PositionEmbedding, self).build(input_shape)\r\n\r\n def call(self, inputs):\r\n input_shape = tf.shape(inputs)\r\n actual_seq_len = input_shape[self._seq_axis]\r\n position_embeddings = self._position_embeddings[:actual_seq_len, :]\r\n new_shape = [1 for _ in inputs.get_shape().as_list()]\r\n new_shape[self._seq_axis] = actual_seq_len\r\n new_shape[-1] = position_embeddings.get_shape().as_list()[-1]\r\n position_embeddings = tf.reshape(position_embeddings, new_shape)\r\n return tf.broadcast_to(position_embeddings, input_shape)\r\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n\"\"\"RetinaNet task definition.\"\"\"\r\nfrom typing import Any, Optional, List, Tuple, Mapping\r\n\r\nfrom absl import logging\r\nimport tensorflow as tf\r\nfrom official.common import dataset_fn\r\nfrom official.core import base_task\r\nfrom official.core import task_factory\r\nfrom official.vision import keras_cv\r\nfrom official.vision.beta.configs import retinanet as exp_cfg\r\nfrom official.vision.beta.dataloaders import input_reader_factory\r\nfrom official.vision.beta.dataloaders import retinanet_input\r\nfrom official.vision.beta.dataloaders import tf_example_decoder\r\nfrom official.vision.beta.dataloaders import tfds_factory\r\nfrom official.vision.beta.dataloaders import tf_example_label_map_decoder\r\nfrom official.vision.beta.evaluation import coco_evaluator\r\nfrom official.vision.beta.modeling import factory\r\n\r\n\r\n@task_factory.register_task_cls(exp_cfg.RetinaNetTask)\r\nclass RetinaNetTask(base_task.Task):\r\n \"\"\"A single-replica view of training procedure.\r\n\r\n RetinaNet task provides artifacts for training/evalution procedures, including\r\n loading/iterating over Datasets, initializing the model, calculating the loss,\r\n post-processing, and customized metrics with reduction.\r\n \"\"\"\r\n\r\n def build_model(self):\r\n \"\"\"Build RetinaNet model.\"\"\"\r\n\r\n input_specs = tf.keras.layers.InputSpec(\r\n shape=[None] + self.task_config.model.input_size)\r\n\r\n l2_weight_decay = self.task_config.losses.l2_weight_decay\r\n # Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.\r\n # (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)\r\n # (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)\r\n l2_regularizer = (tf.keras.regularizers.l2(\r\n l2_weight_decay / 2.0) if l2_weight_decay else None)\r\n\r\n model = factory.build_retinanet(\r\n input_specs=input_specs,\r\n model_config=self.task_config.model,\r\n l2_regularizer=l2_regularizer)\r\n return model\r\n\r\n def initialize(self, model: tf.keras.Model):\r\n \"\"\"Loading pretrained checkpoint.\"\"\"\r\n if not self.task_config.init_checkpoint:\r\n return\r\n\r\n ckpt_dir_or_file = self.task_config.init_checkpoint\r\n if tf.io.gfile.isdir(ckpt_dir_or_file):\r\n ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)\r\n\r\n # Restoring checkpoint.\r\n if self.task_config.init_checkpoint_modules == 'all':\r\n ckpt = tf.train.Checkpoint(**model.checkpoint_items)\r\n status = ckpt.restore(ckpt_dir_or_file)\r\n status.assert_consumed()\r\n elif self.task_config.init_checkpoint_modules == 'backbone':\r\n ckpt = tf.train.Checkpoint(backbone=model.backbone)\r\n status = ckpt.restore(ckpt_dir_or_file)\r\n status.expect_partial().assert_existing_objects_matched()\r\n else:\r\n raise ValueError(\r\n \"Only 'all' or 'backbone' can be used to initialize the model.\")\r\n\r\n logging.info('Finished loading pretrained checkpoint from %s',\r\n ckpt_dir_or_file)\r\n\r\n def build_inputs(self,\r\n params: exp_cfg.DataConfig,\r\n input_context: Optional[tf.distribute.InputContext] = None):\r\n \"\"\"Build input dataset.\"\"\"\r\n\r\n if params.tfds_name:\r\n decoder = tfds_factory.get_detection_decoder(params.tfds_name)\r\n else:\r\n decoder_cfg = params.decoder.get()\r\n if params.decoder.type == 'simple_decoder':\r\n decoder = tf_example_decoder.TfExampleDecoder(\r\n regenerate_source_id=decoder_cfg.regenerate_source_id)\r\n elif params.decoder.type == 'label_map_decoder':\r\n decoder = tf_example_label_map_decoder.TfExampleDecoderLabelMap(\r\n label_map=decoder_cfg.label_map,\r\n regenerate_source_id=decoder_cfg.regenerate_source_id)\r\n else:\r\n raise ValueError('Unknown decoder type: {}!'.format(\r\n params.decoder.type))\r\n\r\n parser = retinanet_input.Parser(\r\n output_size=self.task_config.model.input_size[:2],\r\n min_level=self.task_config.model.min_level,\r\n max_level=self.task_config.model.max_level,\r\n num_scales=self.task_config.model.anchor.num_scales,\r\n aspect_ratios=self.task_config.model.anchor.aspect_ratios,\r\n anchor_size=self.task_config.model.anchor.anchor_size,\r\n dtype=params.dtype,\r\n match_threshold=params.parser.match_threshold,\r\n unmatched_threshold=params.parser.unmatched_threshold,\r\n aug_rand_hflip=params.parser.aug_rand_hflip,\r\n aug_scale_min=params.parser.aug_scale_min,\r\n aug_scale_max=params.parser.aug_scale_max,\r\n skip_crowd_during_training=params.parser.skip_crowd_during_training,\r\n max_num_instances=params.parser.max_num_instances)\r\n\r\n reader = input_reader_factory.input_reader_generator(\r\n params,\r\n dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),\r\n decoder_fn=decoder.decode,\r\n parser_fn=parser.parse_fn(params.is_training))\r\n dataset = reader.read(input_context=input_context)\r\n\r\n return dataset\r\n\r\n def build_attribute_loss(self,\r\n attribute_heads: List[exp_cfg.AttributeHead],\r\n outputs: Mapping[str, Any],\r\n labels: Mapping[str, Any],\r\n box_sample_weight: tf.Tensor) -> float:\r\n \"\"\"Computes attribute loss.\r\n\r\n Args:\r\n attribute_heads: a list of attribute head configs.\r\n outputs: RetinaNet model outputs.\r\n labels: RetinaNet labels.\r\n box_sample_weight: normalized bounding box sample weights.\r\n\r\n Returns:\r\n Attribute loss of all attribute heads.\r\n \"\"\"\r\n attribute_loss = 0.0\r\n for head in attribute_heads:\r\n if head.name not in labels['attribute_targets']:\r\n raise ValueError(f'Attribute {head.name} not found in label targets.')\r\n if head.name not in outputs['attribute_outputs']:\r\n raise ValueError(f'Attribute {head.name} not found in model outputs.')\r\n\r\n y_true_att = keras_cv.losses.multi_level_flatten(\r\n labels['attribute_targets'][head.name], last_dim=head.size)\r\n y_pred_att = keras_cv.losses.multi_level_flatten(\r\n outputs['attribute_outputs'][head.name], last_dim=head.size)\r\n if head.type == 'regression':\r\n att_loss_fn = tf.keras.losses.Huber(\r\n 1.0, reduction=tf.keras.losses.Reduction.SUM)\r\n att_loss = att_loss_fn(\r\n y_true=y_true_att,\r\n y_pred=y_pred_att,\r\n sample_weight=box_sample_weight)\r\n else:\r\n raise ValueError(f'Attribute type {head.type} not supported.')\r\n attribute_loss += att_loss\r\n\r\n return attribute_loss\r\n\r\n def build_losses(self,\r\n outputs: Mapping[str, Any],\r\n labels: Mapping[str, Any],\r\n aux_losses: Optional[Any] = None):\r\n \"\"\"Build RetinaNet losses.\"\"\"\r\n params = self.task_config\r\n attribute_heads = self.task_config.model.head.attribute_heads\r\n\r\n cls_loss_fn = keras_cv.losses.FocalLoss(\r\n alpha=params.losses.focal_loss_alpha,\r\n gamma=params.losses.focal_loss_gamma,\r\n reduction=tf.keras.losses.Reduction.SUM)\r\n box_loss_fn = tf.keras.losses.Huber(\r\n params.losses.huber_loss_delta, reduction=tf.keras.losses.Reduction.SUM)\r\n\r\n # Sums all positives in a batch for normalization and avoids zero\r\n # num_positives_sum, which would lead to inf loss during training\r\n cls_sample_weight = labels['cls_weights']\r\n box_sample_weight = labels['box_weights']\r\n num_positives = tf.reduce_sum(box_sample_weight) + 1.0\r\n cls_sample_weight = cls_sample_weight / num_positives\r\n box_sample_weight = box_sample_weight / num_positives\r\n y_true_cls = keras_cv.losses.multi_level_flatten(\r\n labels['cls_targets'], last_dim=None)\r\n y_true_cls = tf.one_hot(y_true_cls, params.model.num_classes)\r\n y_pred_cls = keras_cv.losses.multi_level_flatten(\r\n outputs['cls_outputs'], last_dim=params.model.num_classes)\r\n y_true_box = keras_cv.losses.multi_level_flatten(\r\n labels['box_targets'], last_dim=4)\r\n y_pred_box = keras_cv.losses.multi_level_flatten(\r\n outputs['box_outputs'], last_dim=4)\r\n\r\n cls_loss = cls_loss_fn(\r\n y_true=y_true_cls, y_pred=y_pred_cls, sample_weight=cls_sample_weight)\r\n box_loss = box_loss_fn(\r\n y_true=y_true_box, y_pred=y_pred_box, sample_weight=box_sample_weight)\r\n\r\n model_loss = cls_loss + params.losses.box_loss_weight * box_loss\r\n\r\n if attribute_heads:\r\n model_loss += self.build_attribute_loss(attribute_heads, outputs, labels,\r\n box_sample_weight)\r\n\r\n total_loss = model_loss\r\n if aux_losses:\r\n reg_loss = tf.reduce_sum(aux_losses)\r\n total_loss = model_loss + reg_loss\r\n\r\n return total_loss, cls_loss, box_loss, model_loss\r\n\r\n def build_metrics(self, training: bool = True):\r\n \"\"\"Build detection metrics.\"\"\"\r\n metrics = []\r\n metric_names = ['total_loss', 'cls_loss', 'box_loss', 'model_loss']\r\n for name in metric_names:\r\n metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))\r\n\r\n if not training:\r\n if self.task_config.validation_data.tfds_name and self.task_config.annotation_file:\r\n raise ValueError(\r\n \"Can't evaluate using annotation file when TFDS is used.\")\r\n self.coco_metric = coco_evaluator.COCOEvaluator(\r\n annotation_file=self.task_config.annotation_file,\r\n include_mask=False,\r\n per_category_metrics=self.task_config.per_category_metrics)\r\n\r\n return metrics\r\n\r\n def train_step(self,\r\n inputs: Tuple[Any, Any],\r\n model: tf.keras.Model,\r\n optimizer: tf.keras.optimizers.Optimizer,\r\n metrics: Optional[List[Any]] = None):\r\n \"\"\"Does forward and backward.\r\n\r\n Args:\r\n inputs: a dictionary of input tensors.\r\n model: the model, forward pass definition.\r\n optimizer: the optimizer for this training step.\r\n metrics: a nested structure of metrics objects.\r\n\r\n Returns:\r\n A dictionary of logs.\r\n \"\"\"\r\n features, labels = inputs\r\n num_replicas = tf.distribute.get_strategy().num_replicas_in_sync\r\n with tf.GradientTape() as tape:\r\n outputs = model(features, training=True)\r\n outputs = tf.nest.map_structure(\r\n lambda x: tf.cast(x, tf.float32), outputs)\r\n\r\n # Computes per-replica loss.\r\n loss, cls_loss, box_loss, model_loss = self.build_losses(\r\n outputs=outputs, labels=labels, aux_losses=model.losses)\r\n scaled_loss = loss / num_replicas\r\n\r\n # For mixed_precision policy, when LossScaleOptimizer is used, loss is\r\n # scaled for numerical stability.\r\n if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):\r\n scaled_loss = optimizer.get_scaled_loss(scaled_loss)\r\n\r\n tvars = model.trainable_variables\r\n grads = tape.gradient(scaled_loss, tvars)\r\n # Scales back gradient when LossScaleOptimizer is used.\r\n if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):\r\n grads = optimizer.get_unscaled_gradients(grads)\r\n optimizer.apply_gradients(list(zip(grads, tvars)))\r\n\r\n logs = {self.loss: loss}\r\n\r\n all_losses = {\r\n 'total_loss': loss,\r\n 'cls_loss': cls_loss,\r\n 'box_loss': box_loss,\r\n 'model_loss': model_loss,\r\n }\r\n if metrics:\r\n for m in metrics:\r\n m.update_state(all_losses[m.name])\r\n logs.update({m.name: m.result()})\r\n\r\n return logs\r\n\r\n def validation_step(self,\r\n inputs: Tuple[Any, Any],\r\n model: tf.keras.Model,\r\n metrics: Optional[List[Any]] = None):\r\n \"\"\"Validatation step.\r\n\r\n Args:\r\n inputs: a dictionary of input tensors.\r\n model: the keras.Model.\r\n metrics: a nested structure of metrics objects.\r\n\r\n Returns:\r\n A dictionary of logs.\r\n \"\"\"\r\n features, labels = inputs\r\n\r\n outputs = model(features, anchor_boxes=labels['anchor_boxes'],\r\n image_shape=labels['image_info'][:, 1, :],\r\n training=False)\r\n loss, cls_loss, box_loss, model_loss = self.build_losses(\r\n outputs=outputs, labels=labels, aux_losses=model.losses)\r\n logs = {self.loss: loss}\r\n\r\n all_losses = {\r\n 'total_loss': loss,\r\n 'cls_loss': cls_loss,\r\n 'box_loss': box_loss,\r\n 'model_loss': model_loss,\r\n }\r\n\r\n coco_model_outputs = {\r\n 'detection_boxes': outputs['detection_boxes'],\r\n 'detection_scores': outputs['detection_scores'],\r\n 'detection_classes': outputs['detection_classes'],\r\n 'num_detections': outputs['num_detections'],\r\n 'source_id': labels['groundtruths']['source_id'],\r\n 'image_info': labels['image_info']\r\n }\r\n logs.update({self.coco_metric.name: (labels['groundtruths'],\r\n coco_model_outputs)})\r\n if metrics:\r\n for m in metrics:\r\n m.update_state(all_losses[m.name])\r\n logs.update({m.name: m.result()})\r\n return logs\r\n\r\n def aggregate_logs(self, state=None, step_outputs=None):\r\n if state is None:\r\n self.coco_metric.reset_states()\r\n state = self.coco_metric\r\n self.coco_metric.update_state(step_outputs[self.coco_metric.name][0],\r\n step_outputs[self.coco_metric.name][1])\r\n return state\r\n\r\n def reduce_aggregated_logs(self, aggregated_logs, global_step=None):\r\n return self.coco_metric.result()\r\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\nfrom absl.testing import parameterized\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom official.nlp.modeling import models\r\nfrom official.nlp.modeling.networks import mobile_bert_encoder\r\n\r\n\r\ndef generate_fake_input(batch_size=1, seq_len=5, vocab_size=10000, seed=0):\r\n \"\"\"Generate consistent fake integer input sequences.\"\"\"\r\n np.random.seed(seed)\r\n fake_input = []\r\n for _ in range(batch_size):\r\n fake_input.append([])\r\n for _ in range(seq_len):\r\n fake_input[-1].append(np.random.randint(0, vocab_size))\r\n fake_input = np.asarray(fake_input)\r\n return fake_input\r\n\r\n\r\nclass MobileBertEncoderTest(parameterized.TestCase, tf.test.TestCase):\r\n\r\n @parameterized.named_parameters(\r\n ('default_setting', 'relu', True, 'no_norm', False),\r\n ('gelu', 'gelu', True, 'no_norm', False),\r\n ('kq_not_shared', 'relu', False, 'no_norm', False),\r\n ('layer_norm', 'relu', True, 'layer_norm', False),\r\n ('use_pooler', 'relu', True, 'no_norm', True),\r\n ('with_pooler_layer', 'relu', True, 'layer_norm', False))\r\n def test_mobilebert_encoder(self, act_fn, kq_shared_bottleneck,\r\n normalization_type, use_pooler):\r\n hidden_size = 32\r\n sequence_length = 16\r\n num_blocks = 3\r\n test_network = mobile_bert_encoder.MobileBERTEncoder(\r\n word_vocab_size=100,\r\n hidden_size=hidden_size,\r\n num_blocks=num_blocks,\r\n intermediate_act_fn=act_fn,\r\n key_query_shared_bottleneck=kq_shared_bottleneck,\r\n normalization_type=normalization_type,\r\n classifier_activation=use_pooler)\r\n\r\n word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\r\n mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\r\n type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\r\n outputs = test_network([word_ids, mask, type_ids])\r\n layer_output, pooler_output = outputs['sequence_output'], outputs[\r\n 'pooled_output']\r\n\r\n self.assertIsInstance(test_network.transformer_layers, list)\r\n self.assertLen(test_network.transformer_layers, num_blocks)\r\n\r\n layer_output_shape = [None, sequence_length, hidden_size]\r\n self.assertAllEqual(layer_output.shape.as_list(), layer_output_shape)\r\n pooler_output_shape = [None, hidden_size]\r\n self.assertAllEqual(pooler_output.shape.as_list(), pooler_output_shape)\r\n self.assertAllEqual(tf.float32, layer_output.dtype)\r\n\r\n def test_mobilebert_encoder_return_all_layer_output(self):\r\n hidden_size = 32\r\n sequence_length = 16\r\n num_blocks = 3\r\n test_network = mobile_bert_encoder.MobileBERTEncoder(\r\n word_vocab_size=100,\r\n hidden_size=hidden_size,\r\n num_blocks=num_blocks)\r\n\r\n word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\r\n mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\r\n type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\r\n outputs = test_network([word_ids, mask, type_ids])\r\n all_layer_output = outputs['encoder_outputs']\r\n\r\n self.assertIsInstance(all_layer_output, list)\r\n self.assertLen(all_layer_output, num_blocks + 1)\r\n\r\n @parameterized.parameters('int32', 'float32')\r\n def test_mobilebert_encoder_invocation(self, input_mask_dtype):\r\n vocab_size = 100\r\n hidden_size = 32\r\n sequence_length = 16\r\n num_blocks = 3\r\n test_network = mobile_bert_encoder.MobileBERTEncoder(\r\n word_vocab_size=vocab_size,\r\n hidden_size=hidden_size,\r\n num_blocks=num_blocks,\r\n input_mask_dtype=input_mask_dtype)\r\n\r\n word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\r\n mask = tf.keras.Input(shape=(sequence_length,), dtype=input_mask_dtype)\r\n type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\r\n outputs = test_network([word_ids, mask, type_ids])\r\n model = tf.keras.Model([word_ids, mask, type_ids], outputs)\r\n\r\n input_seq = generate_fake_input(\r\n batch_size=1, seq_len=sequence_length, vocab_size=vocab_size)\r\n input_mask = generate_fake_input(\r\n batch_size=1, seq_len=sequence_length, vocab_size=2)\r\n token_type = generate_fake_input(\r\n batch_size=1, seq_len=sequence_length, vocab_size=2)\r\n outputs = model.predict([input_seq, input_mask, token_type])\r\n\r\n sequence_output_shape = [1, sequence_length, hidden_size]\r\n self.assertAllEqual(outputs['sequence_output'].shape, sequence_output_shape)\r\n pooled_output_shape = [1, hidden_size]\r\n self.assertAllEqual(outputs['pooled_output'].shape, pooled_output_shape)\r\n\r\n def test_mobilebert_encoder_invocation_with_attention_score(self):\r\n vocab_size = 100\r\n hidden_size = 32\r\n sequence_length = 16\r\n num_blocks = 3\r\n test_network = mobile_bert_encoder.MobileBERTEncoder(\r\n word_vocab_size=vocab_size,\r\n hidden_size=hidden_size,\r\n num_blocks=num_blocks)\r\n\r\n word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\r\n mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\r\n type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\r\n outputs = test_network([word_ids, mask, type_ids])\r\n model = tf.keras.Model([word_ids, mask, type_ids], outputs)\r\n\r\n input_seq = generate_fake_input(\r\n batch_size=1, seq_len=sequence_length, vocab_size=vocab_size)\r\n input_mask = generate_fake_input(\r\n batch_size=1, seq_len=sequence_length, vocab_size=2)\r\n token_type = generate_fake_input(\r\n batch_size=1, seq_len=sequence_length, vocab_size=2)\r\n outputs = model.predict([input_seq, input_mask, token_type])\r\n self.assertLen(outputs['attention_scores'], num_blocks)\r\n\r\n @parameterized.named_parameters(\r\n ('sequence_classification', models.BertClassifier, [None, 5]),\r\n ('token_classification', models.BertTokenClassifier, [None, 16, 5]))\r\n def test_mobilebert_encoder_for_downstream_task(self, task, prediction_shape):\r\n hidden_size = 32\r\n sequence_length = 16\r\n mobilebert_encoder = mobile_bert_encoder.MobileBERTEncoder(\r\n word_vocab_size=100, hidden_size=hidden_size)\r\n num_classes = 5\r\n classifier = task(network=mobilebert_encoder, num_classes=num_classes)\r\n\r\n word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\r\n mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\r\n type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\r\n prediction = classifier([word_ids, mask, type_ids])\r\n if task == models.BertTokenClassifier:\r\n prediction = prediction['logits']\r\n self.assertAllEqual(prediction.shape.as_list(), prediction_shape)\r\n\r\n\r\nif __name__ == '__main__':\r\n tf.test.main()\r\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n\"\"\"Tests for dual encoder network.\"\"\"\r\n\r\nfrom absl.testing import parameterized\r\nimport tensorflow as tf\r\n\r\nfrom tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import\r\nfrom official.nlp.modeling import networks\r\nfrom official.nlp.modeling.models import dual_encoder\r\n\r\n\r\n# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It\r\n# guarantees forward compatibility of this code for the V2 switchover.\r\n@keras_parameterized.run_all_keras_modes\r\nclass DualEncoderTest(keras_parameterized.TestCase):\r\n\r\n @parameterized.parameters((192, 'logits'), (768, 'predictions'))\r\n def test_dual_encoder(self, hidden_size, output):\r\n \"\"\"Validate that the Keras object can be created.\"\"\"\r\n # Build a transformer network to use within the dual encoder model.\r\n vocab_size = 100\r\n sequence_length = 512\r\n test_network = networks.BertEncoder(\r\n vocab_size=vocab_size,\r\n num_layers=2,\r\n hidden_size=hidden_size,\r\n sequence_length=sequence_length,\r\n dict_outputs=True)\r\n\r\n # Create a dual encoder model with the created network.\r\n dual_encoder_model = dual_encoder.DualEncoder(\r\n test_network, max_seq_length=sequence_length, output=output)\r\n\r\n # Create a set of 2-dimensional inputs (the first dimension is implicit).\r\n left_word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\r\n left_mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\r\n left_type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\r\n\r\n right_word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\r\n right_mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\r\n right_type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\r\n\r\n if output == 'logits':\r\n outputs = dual_encoder_model([\r\n left_word_ids, left_mask, left_type_ids, right_word_ids, right_mask,\r\n right_type_ids\r\n ])\r\n _ = outputs['left_logits']\r\n elif output == 'predictions':\r\n outputs = dual_encoder_model([left_word_ids, left_mask, left_type_ids])\r\n # Validate that the outputs are of the expected shape.\r\n expected_sequence_shape = [None, sequence_length, 768]\r\n self.assertAllEqual(expected_sequence_shape,\r\n outputs['sequence_output'].shape.as_list())\r\n left_encoded = outputs['pooled_output']\r\n expected_encoding_shape = [None, 768]\r\n self.assertAllEqual(expected_encoding_shape, left_encoded.shape.as_list())\r\n\r\n @parameterized.parameters((192, 'logits'), (768, 'predictions'))\r\n def test_dual_encoder_tensor_call(self, hidden_size, output):\r\n \"\"\"Validate that the Keras object can be invoked.\"\"\"\r\n # Build a transformer network to use within the dual encoder model. (Here,\r\n # we use # a short sequence_length for convenience.)\r\n sequence_length = 2\r\n test_network = networks.BertEncoder(\r\n vocab_size=100, num_layers=2, sequence_length=sequence_length)\r\n\r\n # Create a dual encoder model with the created network.\r\n dual_encoder_model = dual_encoder.DualEncoder(\r\n test_network, max_seq_length=sequence_length, output=output)\r\n\r\n # Create a set of 2-dimensional data tensors to feed into the model.\r\n word_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32)\r\n mask = tf.constant([[1, 1], [1, 0]], dtype=tf.int32)\r\n type_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32)\r\n\r\n # Invoke the model model on the tensors. In Eager mode, this does the\r\n # actual calculation. (We can't validate the outputs, since the network is\r\n # too complex: this simply ensures we're not hitting runtime errors.)\r\n if output == 'logits':\r\n _ = dual_encoder_model(\r\n [word_ids, mask, type_ids, word_ids, mask, type_ids])\r\n elif output == 'predictions':\r\n _ = dual_encoder_model([word_ids, mask, type_ids])\r\n\r\n def test_serialize_deserialize(self):\r\n \"\"\"Validate that the dual encoder model can be serialized / deserialized.\"\"\"\r\n # Build a transformer network to use within the dual encoder model. (Here,\r\n # we use a short sequence_length for convenience.)\r\n sequence_length = 32\r\n test_network = networks.BertEncoder(\r\n vocab_size=100, num_layers=2, sequence_length=sequence_length)\r\n\r\n # Create a dual encoder model with the created network. (Note that all the\r\n # args are different, so we can catch any serialization mismatches.)\r\n dual_encoder_model = dual_encoder.DualEncoder(\r\n test_network, max_seq_length=sequence_length, output='predictions')\r\n\r\n # Create another dual encoder model via serialization and deserialization.\r\n config = dual_encoder_model.get_config()\r\n new_dual_encoder = dual_encoder.DualEncoder.from_config(config)\r\n\r\n # Validate that the config can be forced to JSON.\r\n _ = new_dual_encoder.to_json()\r\n\r\n # If the serialization was successful, the new config should match the old.\r\n self.assertAllEqual(dual_encoder_model.get_config(),\r\n new_dual_encoder.get_config())\r\n\r\n\r\nif __name__ == '__main__':\r\n tf.test.main()\r\n", "# Lint as: python2, python3\r\n# Copyright 2019 The TensorFlow Authors All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Utility functions to set up unit tests on Panoptic Segmentation code.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport os\r\n\r\n\r\n\r\nfrom absl import flags\r\nimport numpy as np\r\nimport scipy.misc\r\nimport six\r\nfrom six.moves import map\r\n\r\nFLAGS = flags.FLAGS\r\n\r\n_TEST_DIR = 'deeplab/evaluation/testdata'\r\n\r\n\r\ndef read_test_image(testdata_path, *args, **kwargs):\r\n \"\"\"Loads a test image.\r\n\r\n Args:\r\n testdata_path: Image path relative to panoptic_segmentation/testdata as a\r\n string.\r\n *args: Additional positional arguments passed to `imread`.\r\n **kwargs: Additional keyword arguments passed to `imread`.\r\n\r\n Returns:\r\n The image, as a numpy array.\r\n \"\"\"\r\n image_path = os.path.join(_TEST_DIR, testdata_path)\r\n return scipy.misc.imread(image_path, *args, **kwargs)\r\n\r\n\r\ndef read_segmentation_with_rgb_color_map(image_testdata_path,\r\n rgb_to_semantic_label,\r\n output_dtype=None):\r\n \"\"\"Reads a test segmentation as an image and a map from colors to labels.\r\n\r\n Args:\r\n image_testdata_path: Image path relative to panoptic_segmentation/testdata\r\n as a string.\r\n rgb_to_semantic_label: Mapping from RGB colors to integer labels as a\r\n dictionary.\r\n output_dtype: Type of the output labels. If None, defaults to the type of\r\n the provided color map.\r\n\r\n Returns:\r\n A 2D numpy array of labels.\r\n\r\n Raises:\r\n ValueError: On an incomplete `rgb_to_semantic_label`.\r\n \"\"\"\r\n rgb_image = read_test_image(image_testdata_path, mode='RGB')\r\n if len(rgb_image.shape) != 3 or rgb_image.shape[2] != 3:\r\n raise AssertionError(\r\n 'Expected RGB image, actual shape is %s' % rgb_image.sape)\r\n\r\n num_pixels = rgb_image.shape[0] * rgb_image.shape[1]\r\n unique_colors = np.unique(np.reshape(rgb_image, [num_pixels, 3]), axis=0)\r\n if not set(map(tuple, unique_colors)).issubset(\r\n six.viewkeys(rgb_to_semantic_label)):\r\n raise ValueError('RGB image has colors not in color map.')\r\n\r\n output_dtype = output_dtype or type(\r\n next(six.itervalues(rgb_to_semantic_label)))\r\n output_labels = np.empty(rgb_image.shape[:2], dtype=output_dtype)\r\n for rgb_color, int_label in six.iteritems(rgb_to_semantic_label):\r\n color_array = np.array(rgb_color, ndmin=3)\r\n output_labels[np.all(rgb_image == color_array, axis=2)] = int_label\r\n return output_labels\r\n\r\n\r\ndef panoptic_segmentation_with_class_map(instance_testdata_path,\r\n instance_label_to_semantic_label):\r\n \"\"\"Reads in a panoptic segmentation with an instance map and a map to classes.\r\n\r\n Args:\r\n instance_testdata_path: Path to a grayscale instance map, given as a string\r\n and relative to panoptic_segmentation/testdata.\r\n instance_label_to_semantic_label: A map from instance labels to class\r\n labels.\r\n\r\n Returns:\r\n A tuple `(instance_labels, class_labels)` of numpy arrays.\r\n\r\n Raises:\r\n ValueError: On a mismatched set of instances in\r\n the\r\n `instance_label_to_semantic_label`.\r\n \"\"\"\r\n instance_labels = read_test_image(instance_testdata_path, mode='L')\r\n if set(np.unique(instance_labels)) != set(\r\n six.iterkeys(instance_label_to_semantic_label)):\r\n raise ValueError('Provided class map does not match present instance ids.')\r\n\r\n class_labels = np.empty_like(instance_labels)\r\n for instance_id, class_id in six.iteritems(instance_label_to_semantic_label):\r\n class_labels[instance_labels == instance_id] = class_id\r\n\r\n return instance_labels, class_labels\r\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Contains a variant of the LeNet model definition.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport tensorflow.compat.v1 as tf\r\nimport tf_slim as slim\r\n\r\n\r\ndef lenet(images, num_classes=10, is_training=False,\r\n dropout_keep_prob=0.5,\r\n prediction_fn=slim.softmax,\r\n scope='LeNet'):\r\n \"\"\"Creates a variant of the LeNet model.\r\n\r\n Note that since the output is a set of 'logits', the values fall in the\r\n interval of (-infinity, infinity). Consequently, to convert the outputs to a\r\n probability distribution over the characters, one will need to convert them\r\n using the softmax function:\r\n\r\n logits = lenet.lenet(images, is_training=False)\r\n probabilities = tf.nn.softmax(logits)\r\n predictions = tf.argmax(logits, 1)\r\n\r\n Args:\r\n images: A batch of `Tensors` of size [batch_size, height, width, channels].\r\n num_classes: the number of classes in the dataset. If 0 or None, the logits\r\n layer is omitted and the input features to the logits layer are returned\r\n instead.\r\n is_training: specifies whether or not we're currently training the model.\r\n This variable will determine the behaviour of the dropout layer.\r\n dropout_keep_prob: the percentage of activation values that are retained.\r\n prediction_fn: a function to get predictions out of logits.\r\n scope: Optional variable_scope.\r\n\r\n Returns:\r\n net: a 2D Tensor with the logits (pre-softmax activations) if num_classes\r\n is a non-zero integer, or the inon-dropped-out nput to the logits layer\r\n if num_classes is 0 or None.\r\n end_points: a dictionary from components of the network to the corresponding\r\n activation.\r\n \"\"\"\r\n end_points = {}\r\n\r\n with tf.variable_scope(scope, 'LeNet', [images]):\r\n net = end_points['conv1'] = slim.conv2d(images, 32, [5, 5], scope='conv1')\r\n net = end_points['pool1'] = slim.max_pool2d(net, [2, 2], 2, scope='pool1')\r\n net = end_points['conv2'] = slim.conv2d(net, 64, [5, 5], scope='conv2')\r\n net = end_points['pool2'] = slim.max_pool2d(net, [2, 2], 2, scope='pool2')\r\n net = slim.flatten(net)\r\n end_points['Flatten'] = net\r\n\r\n net = end_points['fc3'] = slim.fully_connected(net, 1024, scope='fc3')\r\n if not num_classes:\r\n return net, end_points\r\n net = end_points['dropout3'] = slim.dropout(\r\n net, dropout_keep_prob, is_training=is_training, scope='dropout3')\r\n logits = end_points['Logits'] = slim.fully_connected(\r\n net, num_classes, activation_fn=None, scope='fc4')\r\n\r\n end_points['Predictions'] = prediction_fn(logits, scope='Predictions')\r\n\r\n return logits, end_points\r\nlenet.default_image_size = 28\r\n\r\n\r\ndef lenet_arg_scope(weight_decay=0.0):\r\n \"\"\"Defines the default lenet argument scope.\r\n\r\n Args:\r\n weight_decay: The weight decay to use for regularizing the model.\r\n\r\n Returns:\r\n An `arg_scope` to use for the inception v3 model.\r\n \"\"\"\r\n with slim.arg_scope(\r\n [slim.conv2d, slim.fully_connected],\r\n weights_regularizer=slim.l2_regularizer(weight_decay),\r\n weights_initializer=tf.truncated_normal_initializer(stddev=0.1),\r\n activation_fn=tf.nn.relu) as sc:\r\n return sc\r\n", "# Copyright 2017 Google Inc. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n#\r\n# ==============================================================================\r\nfrom __future__ import print_function\r\n\r\nimport h5py\r\nimport numpy as np\r\nimport os\r\nfrom six.moves import xrange\r\nimport tensorflow as tf\r\n\r\nfrom utils import write_datasets\r\nfrom synthetic_data_utils import normalize_rates\r\nfrom synthetic_data_utils import get_train_n_valid_inds, nparray_and_transpose\r\nfrom synthetic_data_utils import spikify_data, split_list_by_inds\r\n\r\nDATA_DIR = \"rnn_synth_data_v1.0\"\r\n\r\nflags = tf.app.flags\r\nflags.DEFINE_string(\"save_dir\", \"/tmp/\" + DATA_DIR + \"/\",\r\n \"Directory for saving data.\")\r\nflags.DEFINE_string(\"datafile_name\", \"itb_rnn\",\r\n \"Name of data file for input case.\")\r\nflags.DEFINE_integer(\"synth_data_seed\", 5, \"Random seed for RNN generation.\")\r\nflags.DEFINE_float(\"T\", 1.0, \"Time in seconds to generate.\")\r\nflags.DEFINE_integer(\"C\", 800, \"Number of conditions\")\r\nflags.DEFINE_integer(\"N\", 50, \"Number of units for the RNN\")\r\nflags.DEFINE_float(\"train_percentage\", 4.0/5.0,\r\n \"Percentage of train vs validation trials\")\r\nflags.DEFINE_integer(\"nreplications\", 5,\r\n \"Number of spikifications of the same underlying rates.\")\r\nflags.DEFINE_float(\"tau\", 0.025, \"Time constant of RNN\")\r\nflags.DEFINE_float(\"dt\", 0.010, \"Time bin\")\r\nflags.DEFINE_float(\"max_firing_rate\", 30.0,\r\n \"Map 1.0 of RNN to a spikes per second\")\r\nflags.DEFINE_float(\"u_std\", 0.25,\r\n \"Std dev of input to integration to bound model\")\r\nflags.DEFINE_string(\"checkpoint_path\", \"SAMPLE_CHECKPOINT\",\r\n \"\"\"Path to directory with checkpoints of model\r\n trained on integration to bound task. Currently this\r\n is a placeholder which tells the code to grab the\r\n checkpoint that is provided with the code\r\n (in /trained_itb/..). If you have your own checkpoint\r\n you would like to restore, you would point it to\r\n that path.\"\"\")\r\nFLAGS = flags.FLAGS\r\n\r\n\r\nclass IntegrationToBoundModel:\r\n def __init__(self, N):\r\n scale = 0.8 / float(N**0.5)\r\n self.N = N\r\n self.Wh_nxn = tf.Variable(tf.random_normal([N, N], stddev=scale))\r\n self.b_1xn = tf.Variable(tf.zeros([1, N]))\r\n self.Bu_1xn = tf.Variable(tf.zeros([1, N]))\r\n self.Wro_nxo = tf.Variable(tf.random_normal([N, 1], stddev=scale))\r\n self.bro_o = tf.Variable(tf.zeros([1]))\r\n\r\n def call(self, h_tm1_bxn, u_bx1):\r\n act_t_bxn = tf.matmul(h_tm1_bxn, self.Wh_nxn) + self.b_1xn + u_bx1 * self.Bu_1xn\r\n h_t_bxn = tf.nn.tanh(act_t_bxn)\r\n z_t = tf.nn.xw_plus_b(h_t_bxn, self.Wro_nxo, self.bro_o)\r\n return z_t, h_t_bxn\r\n\r\ndef get_data_batch(batch_size, T, rng, u_std):\r\n u_bxt = rng.randn(batch_size, T) * u_std\r\n running_sum_b = np.zeros([batch_size])\r\n labels_bxt = np.zeros([batch_size, T])\r\n for t in xrange(T):\r\n running_sum_b += u_bxt[:, t]\r\n labels_bxt[:, t] += running_sum_b\r\n labels_bxt = np.clip(labels_bxt, -1, 1)\r\n return u_bxt, labels_bxt\r\n\r\n\r\nrng = np.random.RandomState(seed=FLAGS.synth_data_seed)\r\nu_rng = np.random.RandomState(seed=FLAGS.synth_data_seed+1)\r\nT = FLAGS.T\r\nC = FLAGS.C\r\nN = FLAGS.N # must be same N as in trained model (provided example is N = 50)\r\nnreplications = FLAGS.nreplications\r\nE = nreplications * C # total number of trials\r\ntrain_percentage = FLAGS.train_percentage\r\nntimesteps = int(T / FLAGS.dt)\r\nbatch_size = 1 # gives one example per ntrial\r\n\r\nmodel = IntegrationToBoundModel(N)\r\ninputs_ph_t = [tf.placeholder(tf.float32,\r\n shape=[None, 1]) for _ in range(ntimesteps)]\r\nstate = tf.zeros([batch_size, N])\r\nsaver = tf.train.Saver()\r\n\r\nP_nxn = rng.randn(N,N) / np.sqrt(N) # random projections\r\n\r\n# unroll RNN for T timesteps\r\noutputs_t = []\r\nstates_t = []\r\n\r\nfor inp in inputs_ph_t:\r\n output, state = model.call(state, inp)\r\n outputs_t.append(output)\r\n states_t.append(state)\r\n\r\nwith tf.Session() as sess:\r\n # restore the latest model ckpt\r\n if FLAGS.checkpoint_path == \"SAMPLE_CHECKPOINT\":\r\n dir_path = os.path.dirname(os.path.realpath(__file__))\r\n model_checkpoint_path = os.path.join(dir_path, \"trained_itb/model-65000\")\r\n else:\r\n model_checkpoint_path = FLAGS.checkpoint_path\r\n try:\r\n saver.restore(sess, model_checkpoint_path)\r\n print ('Model restored from', model_checkpoint_path)\r\n except:\r\n assert False, (\"No checkpoints to restore from, is the path %s correct?\"\r\n %model_checkpoint_path)\r\n\r\n # generate data for trials\r\n data_e = []\r\n u_e = []\r\n outs_e = []\r\n for c in range(C):\r\n u_1xt, outs_1xt = get_data_batch(batch_size, ntimesteps, u_rng, FLAGS.u_std)\r\n\r\n feed_dict = {}\r\n for t in xrange(ntimesteps):\r\n feed_dict[inputs_ph_t[t]] = np.reshape(u_1xt[:,t], (batch_size,-1))\r\n\r\n states_t_bxn, outputs_t_bxn = sess.run([states_t, outputs_t],\r\n feed_dict=feed_dict)\r\n states_nxt = np.transpose(np.squeeze(np.asarray(states_t_bxn)))\r\n outputs_t_bxn = np.squeeze(np.asarray(outputs_t_bxn))\r\n r_sxt = np.dot(P_nxn, states_nxt)\r\n\r\n for s in xrange(nreplications):\r\n data_e.append(r_sxt)\r\n u_e.append(u_1xt)\r\n outs_e.append(outputs_t_bxn)\r\n\r\n truth_data_e = normalize_rates(data_e, E, N)\r\n\r\nspiking_data_e = spikify_data(truth_data_e, rng, dt=FLAGS.dt,\r\n max_firing_rate=FLAGS.max_firing_rate)\r\ntrain_inds, valid_inds = get_train_n_valid_inds(E, train_percentage,\r\n nreplications)\r\n\r\ndata_train_truth, data_valid_truth = split_list_by_inds(truth_data_e,\r\n train_inds,\r\n valid_inds)\r\ndata_train_spiking, data_valid_spiking = split_list_by_inds(spiking_data_e,\r\n train_inds,\r\n valid_inds)\r\n\r\ndata_train_truth = nparray_and_transpose(data_train_truth)\r\ndata_valid_truth = nparray_and_transpose(data_valid_truth)\r\ndata_train_spiking = nparray_and_transpose(data_train_spiking)\r\ndata_valid_spiking = nparray_and_transpose(data_valid_spiking)\r\n\r\n# save down the inputs used to generate this data\r\ntrain_inputs_u, valid_inputs_u = split_list_by_inds(u_e,\r\n train_inds,\r\n valid_inds)\r\ntrain_inputs_u = nparray_and_transpose(train_inputs_u)\r\nvalid_inputs_u = nparray_and_transpose(valid_inputs_u)\r\n\r\n# save down the network outputs (may be useful later)\r\ntrain_outputs_u, valid_outputs_u = split_list_by_inds(outs_e,\r\n train_inds,\r\n valid_inds)\r\ntrain_outputs_u = np.array(train_outputs_u)\r\nvalid_outputs_u = np.array(valid_outputs_u)\r\n\r\n\r\ndata = { 'train_truth': data_train_truth,\r\n 'valid_truth': data_valid_truth,\r\n 'train_data' : data_train_spiking,\r\n 'valid_data' : data_valid_spiking,\r\n 'train_percentage' : train_percentage,\r\n 'nreplications' : nreplications,\r\n 'dt' : FLAGS.dt,\r\n 'u_std' : FLAGS.u_std,\r\n 'max_firing_rate': FLAGS.max_firing_rate,\r\n 'train_inputs_u': train_inputs_u,\r\n 'valid_inputs_u': valid_inputs_u,\r\n 'train_outputs_u': train_outputs_u,\r\n 'valid_outputs_u': valid_outputs_u,\r\n 'conversion_factor' : FLAGS.max_firing_rate/(1.0/FLAGS.dt) }\r\n\r\n# just one dataset here\r\ndatasets = {}\r\ndataset_name = 'dataset_N' + str(N)\r\ndatasets[dataset_name] = data\r\n\r\n# write out the dataset\r\nwrite_datasets(FLAGS.save_dir, FLAGS.datafile_name, datasets)\r\nprint ('Saved to ', os.path.join(FLAGS.save_dir,\r\n FLAGS.datafile_name + '_' + dataset_name))\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"Tests for object_detection.predictors.heads.box_head.\"\"\"\r\nimport unittest\r\nimport tensorflow.compat.v1 as tf\r\n\r\nfrom google.protobuf import text_format\r\nfrom object_detection.builders import hyperparams_builder\r\nfrom object_detection.predictors.heads import keras_box_head\r\nfrom object_detection.protos import hyperparams_pb2\r\nfrom object_detection.utils import test_case\r\nfrom object_detection.utils import tf_version\r\n\r\n\r\[email protected](tf_version.is_tf1(), 'Skipping TF2.X only test.')\r\nclass ConvolutionalKerasBoxHeadTest(test_case.TestCase):\r\n\r\n def _build_conv_hyperparams(self):\r\n conv_hyperparams = hyperparams_pb2.Hyperparams()\r\n conv_hyperparams_text_proto = \"\"\"\r\n activation: NONE\r\n regularizer {\r\n l2_regularizer {\r\n }\r\n }\r\n initializer {\r\n truncated_normal_initializer {\r\n }\r\n }\r\n \"\"\"\r\n text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)\r\n return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)\r\n\r\n def test_prediction_size_depthwise_false(self):\r\n conv_hyperparams = self._build_conv_hyperparams()\r\n box_prediction_head = keras_box_head.ConvolutionalBoxHead(\r\n is_training=True,\r\n box_code_size=4,\r\n kernel_size=3,\r\n conv_hyperparams=conv_hyperparams,\r\n freeze_batchnorm=False,\r\n num_predictions_per_location=1,\r\n use_depthwise=False)\r\n def graph_fn():\r\n image_feature = tf.random_uniform(\r\n [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)\r\n box_encodings = box_prediction_head(image_feature)\r\n return box_encodings\r\n box_encodings = self.execute(graph_fn, [])\r\n self.assertAllEqual([64, 323, 1, 4], box_encodings.shape)\r\n\r\n def test_prediction_size_depthwise_true(self):\r\n conv_hyperparams = self._build_conv_hyperparams()\r\n box_prediction_head = keras_box_head.ConvolutionalBoxHead(\r\n is_training=True,\r\n box_code_size=4,\r\n kernel_size=3,\r\n conv_hyperparams=conv_hyperparams,\r\n freeze_batchnorm=False,\r\n num_predictions_per_location=1,\r\n use_depthwise=True)\r\n def graph_fn():\r\n image_feature = tf.random_uniform(\r\n [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)\r\n box_encodings = box_prediction_head(image_feature)\r\n return box_encodings\r\n box_encodings = self.execute(graph_fn, [])\r\n self.assertAllEqual([64, 323, 1, 4], box_encodings.shape)\r\n\r\n\r\[email protected](tf_version.is_tf1(), 'Skipping TF2.X only test.')\r\nclass MaskRCNNKerasBoxHeadTest(test_case.TestCase):\r\n\r\n def _build_fc_hyperparams(\r\n self, op_type=hyperparams_pb2.Hyperparams.FC):\r\n hyperparams = hyperparams_pb2.Hyperparams()\r\n hyperparams_text_proto = \"\"\"\r\n activation: NONE\r\n regularizer {\r\n l2_regularizer {\r\n }\r\n }\r\n initializer {\r\n truncated_normal_initializer {\r\n }\r\n }\r\n \"\"\"\r\n text_format.Merge(hyperparams_text_proto, hyperparams)\r\n hyperparams.op = op_type\r\n return hyperparams_builder.KerasLayerHyperparams(hyperparams)\r\n\r\n def test_prediction_size(self):\r\n box_prediction_head = keras_box_head.MaskRCNNBoxHead(\r\n is_training=False,\r\n num_classes=20,\r\n fc_hyperparams=self._build_fc_hyperparams(),\r\n freeze_batchnorm=False,\r\n use_dropout=True,\r\n dropout_keep_prob=0.5,\r\n box_code_size=4,\r\n share_box_across_classes=False)\r\n def graph_fn():\r\n roi_pooled_features = tf.random_uniform(\r\n [64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)\r\n prediction = box_prediction_head(roi_pooled_features)\r\n return prediction\r\n prediction = self.execute(graph_fn, [])\r\n self.assertAllEqual([64, 1, 20, 4], prediction.shape)\r\n\r\n\r\[email protected](tf_version.is_tf1(), 'Skipping TF2.X only test.')\r\nclass WeightSharedConvolutionalKerasBoxHead(test_case.TestCase):\r\n\r\n def _build_conv_hyperparams(self):\r\n conv_hyperparams = hyperparams_pb2.Hyperparams()\r\n conv_hyperparams_text_proto = \"\"\"\r\n activation: NONE\r\n regularizer {\r\n l2_regularizer {\r\n }\r\n }\r\n initializer {\r\n truncated_normal_initializer {\r\n }\r\n }\r\n \"\"\"\r\n text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)\r\n return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)\r\n\r\n def test_prediction_size_depthwise_false(self):\r\n conv_hyperparams = self._build_conv_hyperparams()\r\n box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead(\r\n box_code_size=4,\r\n conv_hyperparams=conv_hyperparams,\r\n num_predictions_per_location=1,\r\n use_depthwise=False)\r\n def graph_fn():\r\n image_feature = tf.random_uniform(\r\n [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)\r\n box_encodings = box_prediction_head(image_feature)\r\n return box_encodings\r\n box_encodings = self.execute(graph_fn, [])\r\n self.assertAllEqual([64, 323, 4], box_encodings.shape)\r\n\r\n def test_prediction_size_depthwise_true(self):\r\n conv_hyperparams = self._build_conv_hyperparams()\r\n box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead(\r\n box_code_size=4,\r\n conv_hyperparams=conv_hyperparams,\r\n num_predictions_per_location=1,\r\n use_depthwise=True)\r\n def graph_fn():\r\n image_feature = tf.random_uniform(\r\n [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)\r\n box_encodings = box_prediction_head(image_feature)\r\n return box_encodings\r\n box_encodings = self.execute(graph_fn, [])\r\n self.assertAllEqual([64, 323, 4], box_encodings.shape)\r\n\r\n def test_variable_count_depth_wise_true(self):\r\n conv_hyperparams = self._build_conv_hyperparams()\r\n box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead(\r\n box_code_size=4,\r\n conv_hyperparams=conv_hyperparams,\r\n num_predictions_per_location=1,\r\n use_depthwise=True)\r\n image_feature = tf.random_uniform(\r\n [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)\r\n box_prediction_head(image_feature)\r\n self.assertEqual(len(box_prediction_head.variables), 3)\r\n\r\n def test_variable_count_depth_wise_False(self):\r\n conv_hyperparams = self._build_conv_hyperparams()\r\n box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead(\r\n box_code_size=4,\r\n conv_hyperparams=conv_hyperparams,\r\n num_predictions_per_location=1,\r\n use_depthwise=False)\r\n image_feature = tf.random_uniform(\r\n [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)\r\n box_prediction_head(image_feature)\r\n self.assertEqual(len(box_prediction_head.variables), 2)\r\n\r\n\r\nif __name__ == '__main__':\r\n tf.test.main()\r\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Mobilenet Base Class.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\nimport collections\r\nimport contextlib\r\nimport copy\r\nimport os\r\n\r\nimport tensorflow.compat.v1 as tf\r\nimport tf_slim as slim\r\n\r\n\r\[email protected]_arg_scope\r\ndef apply_activation(x, name=None, activation_fn=None):\r\n return activation_fn(x, name=name) if activation_fn else x\r\n\r\n\r\ndef _fixed_padding(inputs, kernel_size, rate=1):\r\n \"\"\"Pads the input along the spatial dimensions independently of input size.\r\n\r\n Pads the input such that if it was used in a convolution with 'VALID' padding,\r\n the output would have the same dimensions as if the unpadded input was used\r\n in a convolution with 'SAME' padding.\r\n\r\n Args:\r\n inputs: A tensor of size [batch, height_in, width_in, channels].\r\n kernel_size: The kernel to be used in the conv2d or max_pool2d operation.\r\n rate: An integer, rate for atrous convolution.\r\n\r\n Returns:\r\n output: A tensor of size [batch, height_out, width_out, channels] with the\r\n input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).\r\n \"\"\"\r\n kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),\r\n kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)]\r\n pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]\r\n pad_beg = [pad_total[0] // 2, pad_total[1] // 2]\r\n pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]\r\n padded_inputs = tf.pad(\r\n tensor=inputs,\r\n paddings=[[0, 0], [pad_beg[0], pad_end[0]], [pad_beg[1], pad_end[1]],\r\n [0, 0]])\r\n return padded_inputs\r\n\r\n\r\ndef _make_divisible(v, divisor, min_value=None):\r\n if min_value is None:\r\n min_value = divisor\r\n new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\r\n # Make sure that round down does not go down by more than 10%.\r\n if new_v < 0.9 * v:\r\n new_v += divisor\r\n return int(new_v)\r\n\r\n\r\[email protected]\r\ndef _set_arg_scope_defaults(defaults):\r\n \"\"\"Sets arg scope defaults for all items present in defaults.\r\n\r\n Args:\r\n defaults: dictionary/list of pairs, containing a mapping from\r\n function to a dictionary of default args.\r\n\r\n Yields:\r\n context manager where all defaults are set.\r\n \"\"\"\r\n if hasattr(defaults, 'items'):\r\n items = list(defaults.items())\r\n else:\r\n items = defaults\r\n if not items:\r\n yield\r\n else:\r\n func, default_arg = items[0]\r\n with slim.arg_scope(func, **default_arg):\r\n with _set_arg_scope_defaults(items[1:]):\r\n yield\r\n\r\n\r\[email protected]_arg_scope\r\ndef depth_multiplier(output_params,\r\n multiplier,\r\n divisible_by=8,\r\n min_depth=8,\r\n **unused_kwargs):\r\n if 'num_outputs' not in output_params:\r\n return\r\n d = output_params['num_outputs']\r\n output_params['num_outputs'] = _make_divisible(d * multiplier, divisible_by,\r\n min_depth)\r\n\r\n\r\n_Op = collections.namedtuple('Op', ['op', 'params', 'multiplier_func'])\r\n\r\n\r\ndef op(opfunc, multiplier_func=depth_multiplier, **params):\r\n multiplier = params.pop('multiplier_transform', multiplier_func)\r\n return _Op(opfunc, params=params, multiplier_func=multiplier)\r\n\r\n\r\nclass NoOpScope(object):\r\n \"\"\"No-op context manager.\"\"\"\r\n\r\n def __enter__(self):\r\n return None\r\n\r\n def __exit__(self, exc_type, exc_value, traceback):\r\n return False\r\n\r\n\r\ndef safe_arg_scope(funcs, **kwargs):\r\n \"\"\"Returns `slim.arg_scope` with all None arguments removed.\r\n\r\n Args:\r\n funcs: Functions to pass to `arg_scope`.\r\n **kwargs: Arguments to pass to `arg_scope`.\r\n\r\n Returns:\r\n arg_scope or No-op context manager.\r\n\r\n Note: can be useful if None value should be interpreted as \"do not overwrite\r\n this parameter value\".\r\n \"\"\"\r\n filtered_args = {name: value for name, value in kwargs.items()\r\n if value is not None}\r\n if filtered_args:\r\n return slim.arg_scope(funcs, **filtered_args)\r\n else:\r\n return NoOpScope()\r\n\r\n\r\[email protected]_arg_scope\r\ndef mobilenet_base( # pylint: disable=invalid-name\r\n inputs,\r\n conv_defs,\r\n multiplier=1.0,\r\n final_endpoint=None,\r\n output_stride=None,\r\n use_explicit_padding=False,\r\n scope=None,\r\n is_training=False):\r\n \"\"\"Mobilenet base network.\r\n\r\n Constructs a network from inputs to the given final endpoint. By default\r\n the network is constructed in inference mode. To create network\r\n in training mode use:\r\n\r\n with slim.arg_scope(mobilenet.training_scope()):\r\n logits, endpoints = mobilenet_base(...)\r\n\r\n Args:\r\n inputs: a tensor of shape [batch_size, height, width, channels].\r\n conv_defs: A list of op(...) layers specifying the net architecture.\r\n multiplier: Float multiplier for the depth (number of channels)\r\n for all convolution ops. The value must be greater than zero. Typical\r\n usage will be to set this value in (0, 1) to reduce the number of\r\n parameters or computation cost of the model.\r\n final_endpoint: The name of last layer, for early termination for\r\n for V1-based networks: last layer is \"layer_14\", for V2: \"layer_20\"\r\n output_stride: An integer that specifies the requested ratio of input to\r\n output spatial resolution. If not None, then we invoke atrous convolution\r\n if necessary to prevent the network from reducing the spatial resolution\r\n of the activation maps. Allowed values are 1 or any even number, excluding\r\n zero. Typical values are 8 (accurate fully convolutional mode), 16\r\n (fast fully convolutional mode), and 32 (classification mode).\r\n\r\n NOTE- output_stride relies on all consequent operators to support dilated\r\n operators via \"rate\" parameter. This might require wrapping non-conv\r\n operators to operate properly.\r\n\r\n use_explicit_padding: Use 'VALID' padding for convolutions, but prepad\r\n inputs so that the output dimensions are the same as if 'SAME' padding\r\n were used.\r\n scope: optional variable scope.\r\n is_training: How to setup batch_norm and other ops. Note: most of the time\r\n this does not need be set directly. Use mobilenet.training_scope() to set\r\n up training instead. This parameter is here for backward compatibility\r\n only. It is safe to set it to the value matching\r\n training_scope(is_training=...). It is also safe to explicitly set\r\n it to False, even if there is outer training_scope set to to training.\r\n (The network will be built in inference mode). If this is set to None,\r\n no arg_scope is added for slim.batch_norm's is_training parameter.\r\n\r\n Returns:\r\n tensor_out: output tensor.\r\n end_points: a set of activations for external use, for example summaries or\r\n losses.\r\n\r\n Raises:\r\n ValueError: depth_multiplier <= 0, or the target output_stride is not\r\n allowed.\r\n \"\"\"\r\n if multiplier <= 0:\r\n raise ValueError('multiplier is not greater than zero.')\r\n\r\n # Set conv defs defaults and overrides.\r\n conv_defs_defaults = conv_defs.get('defaults', {})\r\n conv_defs_overrides = conv_defs.get('overrides', {})\r\n if use_explicit_padding:\r\n conv_defs_overrides = copy.deepcopy(conv_defs_overrides)\r\n conv_defs_overrides[\r\n (slim.conv2d, slim.separable_conv2d)] = {'padding': 'VALID'}\r\n\r\n if output_stride is not None:\r\n if output_stride == 0 or (output_stride > 1 and output_stride % 2):\r\n raise ValueError('Output stride must be None, 1 or a multiple of 2.')\r\n\r\n # a) Set the tensorflow scope\r\n # b) set padding to default: note we might consider removing this\r\n # since it is also set by mobilenet_scope\r\n # c) set all defaults\r\n # d) set all extra overrides.\r\n # pylint: disable=g-backslash-continuation\r\n with _scope_all(scope, default_scope='Mobilenet'), \\\r\n safe_arg_scope([slim.batch_norm], is_training=is_training), \\\r\n _set_arg_scope_defaults(conv_defs_defaults), \\\r\n _set_arg_scope_defaults(conv_defs_overrides):\r\n # The current_stride variable keeps track of the output stride of the\r\n # activations, i.e., the running product of convolution strides up to the\r\n # current network layer. This allows us to invoke atrous convolution\r\n # whenever applying the next convolution would result in the activations\r\n # having output stride larger than the target output_stride.\r\n current_stride = 1\r\n\r\n # The atrous convolution rate parameter.\r\n rate = 1\r\n\r\n net = inputs\r\n # Insert default parameters before the base scope which includes\r\n # any custom overrides set in mobilenet.\r\n end_points = {}\r\n scopes = {}\r\n for i, opdef in enumerate(conv_defs['spec']):\r\n params = dict(opdef.params)\r\n opdef.multiplier_func(params, multiplier)\r\n stride = params.get('stride', 1)\r\n if output_stride is not None and current_stride == output_stride:\r\n # If we have reached the target output_stride, then we need to employ\r\n # atrous convolution with stride=1 and multiply the atrous rate by the\r\n # current unit's stride for use in subsequent layers.\r\n layer_stride = 1\r\n layer_rate = rate\r\n rate *= stride\r\n else:\r\n layer_stride = stride\r\n layer_rate = 1\r\n current_stride *= stride\r\n # Update params.\r\n params['stride'] = layer_stride\r\n # Only insert rate to params if rate > 1 and kernel size is not [1, 1].\r\n if layer_rate > 1:\r\n if tuple(params.get('kernel_size', [])) != (1, 1):\r\n # We will apply atrous rate in the following cases:\r\n # 1) When kernel_size is not in params, the operation then uses\r\n # default kernel size 3x3.\r\n # 2) When kernel_size is in params, and if the kernel_size is not\r\n # equal to (1, 1) (there is no need to apply atrous convolution to\r\n # any 1x1 convolution).\r\n params['rate'] = layer_rate\r\n # Set padding\r\n if use_explicit_padding:\r\n if 'kernel_size' in params:\r\n net = _fixed_padding(net, params['kernel_size'], layer_rate)\r\n else:\r\n params['use_explicit_padding'] = True\r\n\r\n end_point = 'layer_%d' % (i + 1)\r\n try:\r\n net = opdef.op(net, **params)\r\n except Exception:\r\n print('Failed to create op %i: %r params: %r' % (i, opdef, params))\r\n raise\r\n end_points[end_point] = net\r\n scope = os.path.dirname(net.name)\r\n scopes[scope] = end_point\r\n if final_endpoint is not None and end_point == final_endpoint:\r\n break\r\n\r\n # Add all tensors that end with 'output' to\r\n # endpoints\r\n for t in net.graph.get_operations():\r\n scope = os.path.dirname(t.name)\r\n bn = os.path.basename(t.name)\r\n if scope in scopes and t.name.endswith('output'):\r\n end_points[scopes[scope] + '/' + bn] = t.outputs[0]\r\n return net, end_points\r\n\r\n\r\[email protected]\r\ndef _scope_all(scope, default_scope=None):\r\n with tf.variable_scope(scope, default_name=default_scope) as s,\\\r\n tf.name_scope(s.original_name_scope):\r\n yield s\r\n\r\n\r\[email protected]_arg_scope\r\ndef mobilenet(inputs,\r\n num_classes=1001,\r\n prediction_fn=slim.softmax,\r\n reuse=None,\r\n scope='Mobilenet',\r\n base_only=False,\r\n use_reduce_mean_for_pooling=False,\r\n **mobilenet_args):\r\n \"\"\"Mobilenet model for classification, supports both V1 and V2.\r\n\r\n Note: default mode is inference, use mobilenet.training_scope to create\r\n training network.\r\n\r\n\r\n Args:\r\n inputs: a tensor of shape [batch_size, height, width, channels].\r\n num_classes: number of predicted classes. If 0 or None, the logits layer\r\n is omitted and the input features to the logits layer (before dropout)\r\n are returned instead.\r\n prediction_fn: a function to get predictions out of logits\r\n (default softmax).\r\n reuse: whether or not the network and its variables should be reused. To be\r\n able to reuse 'scope' must be given.\r\n scope: Optional variable_scope.\r\n base_only: if True will only create the base of the network (no pooling\r\n and no logits).\r\n use_reduce_mean_for_pooling: if True use the reduce_mean for pooling. If\r\n True use the global_pool function that provides some optimization.\r\n **mobilenet_args: passed to mobilenet_base verbatim.\r\n - conv_defs: list of conv defs\r\n - multiplier: Float multiplier for the depth (number of channels)\r\n for all convolution ops. The value must be greater than zero. Typical\r\n usage will be to set this value in (0, 1) to reduce the number of\r\n parameters or computation cost of the model.\r\n - output_stride: will ensure that the last layer has at most total stride.\r\n If the architecture calls for more stride than that provided\r\n (e.g. output_stride=16, but the architecture has 5 stride=2 operators),\r\n it will replace output_stride with fractional convolutions using Atrous\r\n Convolutions.\r\n\r\n Returns:\r\n logits: the pre-softmax activations, a tensor of size\r\n [batch_size, num_classes]\r\n end_points: a dictionary from components of the network to the corresponding\r\n activation tensor.\r\n\r\n Raises:\r\n ValueError: Input rank is invalid.\r\n \"\"\"\r\n is_training = mobilenet_args.get('is_training', False)\r\n input_shape = inputs.get_shape().as_list()\r\n if len(input_shape) != 4:\r\n raise ValueError('Expected rank 4 input, was: %d' % len(input_shape))\r\n\r\n with tf.variable_scope(scope, 'Mobilenet', reuse=reuse) as scope:\r\n inputs = tf.identity(inputs, 'input')\r\n net, end_points = mobilenet_base(inputs, scope=scope, **mobilenet_args)\r\n if base_only:\r\n return net, end_points\r\n\r\n net = tf.identity(net, name='embedding')\r\n\r\n with tf.variable_scope('Logits'):\r\n net = global_pool(net, use_reduce_mean_for_pooling)\r\n end_points['global_pool'] = net\r\n if not num_classes:\r\n return net, end_points\r\n net = slim.dropout(net, scope='Dropout', is_training=is_training)\r\n # 1 x 1 x num_classes\r\n # Note: legacy scope name.\r\n logits = slim.conv2d(\r\n net,\r\n num_classes, [1, 1],\r\n activation_fn=None,\r\n normalizer_fn=None,\r\n biases_initializer=tf.zeros_initializer(),\r\n scope='Conv2d_1c_1x1')\r\n\r\n logits = tf.squeeze(logits, [1, 2])\r\n\r\n logits = tf.identity(logits, name='output')\r\n end_points['Logits'] = logits\r\n if prediction_fn:\r\n end_points['Predictions'] = prediction_fn(logits, 'Predictions')\r\n return logits, end_points\r\n\r\n\r\ndef global_pool(input_tensor,\r\n use_reduce_mean_for_pooling=False,\r\n pool_op=tf.nn.avg_pool2d):\r\n \"\"\"Applies avg pool to produce 1x1 output.\r\n\r\n NOTE: This function is funcitonally equivalenet to reduce_mean, but it has\r\n baked in average pool which has better support across hardware.\r\n\r\n Args:\r\n input_tensor: input tensor\r\n use_reduce_mean_for_pooling: if True use reduce_mean for pooling\r\n pool_op: pooling op (avg pool is default)\r\n Returns:\r\n a tensor batch_size x 1 x 1 x depth.\r\n \"\"\"\r\n if use_reduce_mean_for_pooling:\r\n return tf.reduce_mean(\r\n input_tensor, [1, 2], keepdims=True, name='ReduceMean')\r\n else:\r\n shape = input_tensor.get_shape().as_list()\r\n if shape[1] is None or shape[2] is None:\r\n kernel_size = tf.convert_to_tensor(value=[\r\n 1,\r\n tf.shape(input=input_tensor)[1],\r\n tf.shape(input=input_tensor)[2], 1\r\n ])\r\n else:\r\n kernel_size = [1, shape[1], shape[2], 1]\r\n output = pool_op(\r\n input_tensor, ksize=kernel_size, strides=[1, 1, 1, 1], padding='VALID')\r\n # Recover output shape, for unknown shape.\r\n output.set_shape([None, 1, 1, None])\r\n return output\r\n\r\n\r\ndef training_scope(is_training=True,\r\n weight_decay=0.00004,\r\n stddev=0.09,\r\n dropout_keep_prob=0.8,\r\n bn_decay=0.997):\r\n \"\"\"Defines Mobilenet training scope.\r\n\r\n Usage:\r\n with slim.arg_scope(mobilenet.training_scope()):\r\n logits, endpoints = mobilenet_v2.mobilenet(input_tensor)\r\n\r\n # the network created will be trainble with dropout/batch norm\r\n # initialized appropriately.\r\n Args:\r\n is_training: if set to False this will ensure that all customizations are\r\n set to non-training mode. This might be helpful for code that is reused\r\n across both training/evaluation, but most of the time training_scope with\r\n value False is not needed. If this is set to None, the parameters is not\r\n added to the batch_norm arg_scope.\r\n\r\n weight_decay: The weight decay to use for regularizing the model.\r\n stddev: Standard deviation for initialization, if negative uses xavier.\r\n dropout_keep_prob: dropout keep probability (not set if equals to None).\r\n bn_decay: decay for the batch norm moving averages (not set if equals to\r\n None).\r\n\r\n Returns:\r\n An argument scope to use via arg_scope.\r\n \"\"\"\r\n # Note: do not introduce parameters that would change the inference\r\n # model here (for example whether to use bias), modify conv_def instead.\r\n batch_norm_params = {\r\n 'decay': bn_decay,\r\n 'is_training': is_training\r\n }\r\n if stddev < 0:\r\n weight_intitializer = slim.initializers.xavier_initializer()\r\n else:\r\n weight_intitializer = tf.truncated_normal_initializer(\r\n stddev=stddev)\r\n\r\n # Set weight_decay for weights in Conv and FC layers.\r\n with slim.arg_scope(\r\n [slim.conv2d, slim.fully_connected, slim.separable_conv2d],\r\n weights_initializer=weight_intitializer,\r\n normalizer_fn=slim.batch_norm), \\\r\n slim.arg_scope([mobilenet_base, mobilenet], is_training=is_training),\\\r\n safe_arg_scope([slim.batch_norm], **batch_norm_params), \\\r\n safe_arg_scope([slim.dropout], is_training=is_training,\r\n keep_prob=dropout_keep_prob), \\\r\n slim.arg_scope([slim.conv2d], \\\r\n weights_regularizer=slim.l2_regularizer(weight_decay)), \\\r\n slim.arg_scope([slim.separable_conv2d], weights_regularizer=None) as s:\r\n return s\r\n", "# Copyright 2018 The TensorFlow Authors All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"Utilities for processing word-level datasets.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport collections\r\nimport os\r\nimport random\r\nimport tensorflow as tf\r\n\r\nfrom base import embeddings\r\nfrom base import utils\r\nfrom corpus_processing import example\r\nfrom corpus_processing import minibatching\r\nfrom task_specific.word_level import tagging_utils\r\n\r\n\r\nclass TaggedDataLoader(object):\r\n def __init__(self, config, name, is_token_level):\r\n self._config = config\r\n self._task_name = name\r\n self._raw_data_path = os.path.join(config.raw_data_topdir, name)\r\n self._is_token_level = is_token_level\r\n self.label_mapping_path = os.path.join(\r\n config.preprocessed_data_topdir,\r\n (name if is_token_level else\r\n name + '_' + config.label_encoding) + '_label_mapping.pkl')\r\n\r\n if self.label_mapping:\r\n self._n_classes = len(set(self.label_mapping.values()))\r\n else:\r\n self._n_classes = None\r\n\r\n def get_dataset(self, split):\r\n if (split == 'train' and not self._config.for_preprocessing and\r\n tf.gfile.Exists(os.path.join(self._raw_data_path, 'train_subset.txt'))):\r\n split = 'train_subset'\r\n return minibatching.Dataset(\r\n self._config, self._get_examples(split), self._task_name)\r\n\r\n def get_labeled_sentences(self, split):\r\n sentences = []\r\n path = os.path.join(self._raw_data_path, split + '.txt')\r\n if not tf.gfile.Exists(path):\r\n if self._config.for_preprocessing:\r\n return []\r\n else:\r\n raise ValueError('Unable to load data from', path)\r\n\r\n with tf.gfile.GFile(path, 'r') as f:\r\n sentence = []\r\n for line in f:\r\n line = line.strip().split()\r\n if not line:\r\n if sentence:\r\n words, tags = zip(*sentence)\r\n sentences.append((words, tags))\r\n sentence = []\r\n continue\r\n if line[0] == '-DOCSTART-':\r\n continue\r\n word, tag = line[0], line[-1]\r\n sentence.append((word, tag))\r\n return sentences\r\n\r\n @property\r\n def label_mapping(self):\r\n if not self._config.for_preprocessing:\r\n return utils.load_cpickle(self.label_mapping_path)\r\n\r\n tag_counts = collections.Counter()\r\n train_tags = set()\r\n for split in ['train', 'dev', 'test']:\r\n for words, tags in self.get_labeled_sentences(split):\r\n if not self._is_token_level:\r\n span_labels = tagging_utils.get_span_labels(tags)\r\n tags = tagging_utils.get_tags(\r\n span_labels, len(words), self._config.label_encoding)\r\n for tag in tags:\r\n if self._task_name == 'depparse':\r\n tag = tag.split('-')[1]\r\n tag_counts[tag] += 1\r\n if split == 'train':\r\n train_tags.add(tag)\r\n if self._task_name == 'ccg':\r\n # for CCG, there are tags in the test sets that aren't in the train set\r\n # all tags not in the train set get mapped to a special label\r\n # the model will never predict this label because it never sees it in the\r\n # training set\r\n not_in_train_tags = []\r\n for tag, count in tag_counts.items():\r\n if tag not in train_tags:\r\n not_in_train_tags.append(tag)\r\n label_mapping = {\r\n label: i for i, label in enumerate(sorted(filter(\r\n lambda t: t not in not_in_train_tags, tag_counts.keys())))\r\n }\r\n n = len(label_mapping)\r\n for tag in not_in_train_tags:\r\n label_mapping[tag] = n\r\n else:\r\n labels = sorted(tag_counts.keys())\r\n if self._task_name == 'depparse':\r\n labels.remove('root')\r\n labels.insert(0, 'root')\r\n label_mapping = {label: i for i, label in enumerate(labels)}\r\n return label_mapping\r\n\r\n def _get_examples(self, split):\r\n word_vocab = embeddings.get_word_vocab(self._config)\r\n char_vocab = embeddings.get_char_vocab()\r\n examples = [\r\n TaggingExample(\r\n self._config, self._is_token_level, words, tags,\r\n word_vocab, char_vocab, self.label_mapping, self._task_name)\r\n for words, tags in self.get_labeled_sentences(split)]\r\n if self._config.train_set_percent < 100:\r\n utils.log('using reduced train set ({:}%)'.format(\r\n self._config.train_set_percent))\r\n random.shuffle(examples)\r\n examples = examples[:int(len(examples) *\r\n self._config.train_set_percent / 100.0)]\r\n return examples\r\n\r\n\r\nclass TaggingExample(example.Example):\r\n def __init__(self, config, is_token_level, words, original_tags,\r\n word_vocab, char_vocab, label_mapping, task_name):\r\n super(TaggingExample, self).__init__(words, word_vocab, char_vocab)\r\n if is_token_level:\r\n labels = original_tags\r\n else:\r\n span_labels = tagging_utils.get_span_labels(original_tags)\r\n labels = tagging_utils.get_tags(\r\n span_labels, len(words), config.label_encoding)\r\n\r\n if task_name == 'depparse':\r\n self.labels = []\r\n for l in labels:\r\n split = l.split('-')\r\n self.labels.append(\r\n len(label_mapping) * (0 if split[0] == '0' else 1 + int(split[0]))\r\n + label_mapping[split[1]])\r\n else:\r\n self.labels = [label_mapping[l] for l in labels]\r\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n\"\"\"Library of components of export_tfhub.py. See docstring there for more.\"\"\"\r\n\r\nimport contextlib\r\nimport hashlib\r\nimport os\r\nimport tempfile\r\n\r\nfrom typing import Optional, Text, Tuple\r\n\r\n# Import libraries\r\nfrom absl import logging\r\nimport tensorflow as tf\r\n# pylint: disable=g-direct-tensorflow-import TODO(b/175369555): Remove these.\r\nfrom tensorflow.core.protobuf import saved_model_pb2\r\nfrom tensorflow.python.ops import control_flow_ops\r\n# pylint: enable=g-direct-tensorflow-import\r\nfrom official.modeling import tf_utils\r\nfrom official.nlp.bert import configs\r\nfrom official.nlp.configs import encoders\r\nfrom official.nlp.modeling import layers\r\nfrom official.nlp.modeling import models\r\nfrom official.nlp.modeling import networks\r\n\r\n\r\ndef get_bert_encoder(bert_config):\r\n \"\"\"Returns a BertEncoder with dict outputs.\"\"\"\r\n bert_encoder = networks.BertEncoder(\r\n vocab_size=bert_config.vocab_size,\r\n hidden_size=bert_config.hidden_size,\r\n num_layers=bert_config.num_hidden_layers,\r\n num_attention_heads=bert_config.num_attention_heads,\r\n intermediate_size=bert_config.intermediate_size,\r\n activation=tf_utils.get_activation(bert_config.hidden_act),\r\n dropout_rate=bert_config.hidden_dropout_prob,\r\n attention_dropout_rate=bert_config.attention_probs_dropout_prob,\r\n max_sequence_length=bert_config.max_position_embeddings,\r\n type_vocab_size=bert_config.type_vocab_size,\r\n initializer=tf.keras.initializers.TruncatedNormal(\r\n stddev=bert_config.initializer_range),\r\n embedding_width=bert_config.embedding_size,\r\n dict_outputs=True)\r\n\r\n return bert_encoder\r\n\r\n\r\ndef get_do_lower_case(do_lower_case, vocab_file=None, sp_model_file=None):\r\n \"\"\"Returns do_lower_case, replacing None by a guess from vocab file name.\"\"\"\r\n if do_lower_case is not None:\r\n return do_lower_case\r\n elif vocab_file:\r\n do_lower_case = \"uncased\" in vocab_file\r\n logging.info(\"Using do_lower_case=%s based on name of vocab_file=%s\",\r\n do_lower_case, vocab_file)\r\n return do_lower_case\r\n elif sp_model_file:\r\n do_lower_case = True # All public ALBERTs (as of Oct 2020) do it.\r\n logging.info(\"Defaulting to do_lower_case=%s for Sentencepiece tokenizer\",\r\n do_lower_case)\r\n return do_lower_case\r\n else:\r\n raise ValueError(\"Must set vocab_file or sp_model_file.\")\r\n\r\n\r\ndef _create_model(\r\n *,\r\n bert_config: Optional[configs.BertConfig] = None,\r\n encoder_config: Optional[encoders.EncoderConfig] = None,\r\n with_mlm: bool,\r\n) -> Tuple[tf.keras.Model, tf.keras.Model]:\r\n \"\"\"Creates the model to export and the model to restore the checkpoint.\r\n\r\n Args:\r\n bert_config: A legacy `BertConfig` to create a `BertEncoder` object.\r\n Exactly one of encoder_config and bert_config must be set.\r\n encoder_config: An `EncoderConfig` to create an encoder of the configured\r\n type (`BertEncoder` or other).\r\n with_mlm: A bool to control the second component of the result.\r\n If True, will create a `BertPretrainerV2` object; otherwise, will\r\n create a `BertEncoder` object.\r\n\r\n Returns:\r\n A Tuple of (1) a Keras model that will be exported, (2) a `BertPretrainerV2`\r\n object or `BertEncoder` object depending on the value of `with_mlm`\r\n argument, which contains the first model and will be used for restoring\r\n weights from the checkpoint.\r\n \"\"\"\r\n if (bert_config is not None) == (encoder_config is not None):\r\n raise ValueError(\"Exactly one of `bert_config` and `encoder_config` \"\r\n \"can be specified, but got %s and %s\" %\r\n (bert_config, encoder_config))\r\n\r\n if bert_config is not None:\r\n encoder = get_bert_encoder(bert_config)\r\n else:\r\n encoder = encoders.build_encoder(encoder_config)\r\n\r\n # Convert from list of named inputs to dict of inputs keyed by name.\r\n # Only the latter accepts a dict of inputs after restoring from SavedModel.\r\n encoder_inputs_dict = {x.name: x for x in encoder.inputs}\r\n encoder_output_dict = encoder(encoder_inputs_dict)\r\n # For interchangeability with other text representations,\r\n # add \"default\" as an alias for BERT's whole-input reptesentations.\r\n encoder_output_dict[\"default\"] = encoder_output_dict[\"pooled_output\"]\r\n core_model = tf.keras.Model(\r\n inputs=encoder_inputs_dict, outputs=encoder_output_dict)\r\n\r\n if with_mlm:\r\n if bert_config is not None:\r\n hidden_act = bert_config.hidden_act\r\n else:\r\n assert encoder_config is not None\r\n hidden_act = encoder_config.get().hidden_activation\r\n\r\n pretrainer = models.BertPretrainerV2(\r\n encoder_network=encoder,\r\n mlm_activation=tf_utils.get_activation(hidden_act))\r\n\r\n pretrainer_inputs_dict = {x.name: x for x in pretrainer.inputs}\r\n pretrainer_output_dict = pretrainer(pretrainer_inputs_dict)\r\n mlm_model = tf.keras.Model(\r\n inputs=pretrainer_inputs_dict, outputs=pretrainer_output_dict)\r\n # Set `_auto_track_sub_layers` to False, so that the additional weights\r\n # from `mlm` sub-object will not be included in the core model.\r\n # TODO(b/169210253): Use a public API when available.\r\n core_model._auto_track_sub_layers = False # pylint: disable=protected-access\r\n core_model.mlm = mlm_model\r\n return core_model, pretrainer\r\n else:\r\n return core_model, encoder\r\n\r\n\r\ndef export_model(export_path: Text,\r\n *,\r\n bert_config: Optional[configs.BertConfig] = None,\r\n encoder_config: Optional[encoders.EncoderConfig] = None,\r\n model_checkpoint_path: Text,\r\n with_mlm: bool,\r\n copy_pooler_dense_to_encoder: bool = False,\r\n vocab_file: Optional[Text] = None,\r\n sp_model_file: Optional[Text] = None,\r\n do_lower_case: Optional[bool] = None) -> None:\r\n \"\"\"Exports an Encoder as SavedModel after restoring pre-trained weights.\r\n\r\n The exported SavedModel implements a superset of the Encoder API for\r\n Text embeddings with Transformer Encoders described at\r\n https://www.tensorflow.org/hub/common_saved_model_apis/text.\r\n\r\n In particular, the exported SavedModel can be used in the following way:\r\n\r\n ```\r\n # Calls default interface (encoder only).\r\n\r\n encoder = hub.load(...)\r\n encoder_inputs = dict(\r\n input_word_ids=..., # Shape [batch, seq_length], dtype=int32\r\n input_mask=..., # Shape [batch, seq_length], dtype=int32\r\n input_type_ids=..., # Shape [batch, seq_length], dtype=int32\r\n )\r\n encoder_outputs = encoder(encoder_inputs)\r\n assert encoder_outputs.keys() == {\r\n \"pooled_output\", # Shape [batch_size, width], dtype=float32\r\n \"default\", # Alias for \"pooled_output\" (aligns with other models).\r\n \"sequence_output\" # Shape [batch_size, seq_length, width], dtype=float32\r\n \"encoder_outputs\", # List of Tensors with outputs of all transformer layers.\r\n }\r\n ```\r\n\r\n If `with_mlm` is True, the exported SavedModel can also be called in the\r\n following way:\r\n\r\n ```\r\n # Calls expanded interface that includes logits of the Masked Language Model.\r\n mlm_inputs = dict(\r\n input_word_ids=..., # Shape [batch, seq_length], dtype=int32\r\n input_mask=..., # Shape [batch, seq_length], dtype=int32\r\n input_type_ids=..., # Shape [batch, seq_length], dtype=int32\r\n masked_lm_positions=..., # Shape [batch, num_predictions], dtype=int32\r\n )\r\n mlm_outputs = encoder.mlm(mlm_inputs)\r\n assert mlm_outputs.keys() == {\r\n \"pooled_output\", # Shape [batch, width], dtype=float32\r\n \"sequence_output\", # Shape [batch, seq_length, width], dtype=float32\r\n \"encoder_outputs\", # List of Tensors with outputs of all transformer layers.\r\n \"mlm_logits\" # Shape [batch, num_predictions, vocab_size], dtype=float32\r\n }\r\n ```\r\n\r\n Args:\r\n export_path: The SavedModel output directory.\r\n bert_config: An optional `configs.BertConfig` object. Note: exactly one of\r\n `bert_config` and following `encoder_config` must be specified.\r\n encoder_config: An optional `encoders.EncoderConfig` object.\r\n model_checkpoint_path: The path to the checkpoint.\r\n with_mlm: Whether to export the additional mlm sub-object.\r\n copy_pooler_dense_to_encoder: Whether to copy the pooler's dense layer\r\n used in the next sentence prediction task to the encoder.\r\n vocab_file: The path to the wordpiece vocab file, or None.\r\n sp_model_file: The path to the sentencepiece model file, or None.\r\n Exactly one of vocab_file and sp_model_file must be set.\r\n do_lower_case: Whether to lower-case text before tokenization.\r\n \"\"\"\r\n if with_mlm:\r\n core_model, pretrainer = _create_model(bert_config=bert_config,\r\n encoder_config=encoder_config,\r\n with_mlm=with_mlm)\r\n encoder = pretrainer.encoder_network\r\n # It supports both the new pretrainer checkpoint produced by TF-NLP and\r\n # the checkpoint converted from TF1 (original BERT, SmallBERTs).\r\n checkpoint_items = pretrainer.checkpoint_items\r\n checkpoint = tf.train.Checkpoint(**checkpoint_items)\r\n else:\r\n core_model, encoder = _create_model(bert_config=bert_config,\r\n encoder_config=encoder_config,\r\n with_mlm=with_mlm)\r\n checkpoint = tf.train.Checkpoint(\r\n model=encoder, # Legacy checkpoints.\r\n encoder=encoder)\r\n checkpoint.restore(model_checkpoint_path).assert_existing_objects_matched()\r\n\r\n if copy_pooler_dense_to_encoder:\r\n logging.info(\"Copy pooler's dense layer to the encoder.\")\r\n pooler_checkpoint = tf.train.Checkpoint(\r\n **{\"next_sentence.pooler_dense\": encoder.pooler_layer})\r\n pooler_checkpoint.restore(\r\n model_checkpoint_path).assert_existing_objects_matched()\r\n\r\n # Before SavedModels for preprocessing appeared in Oct 2020, the encoders\r\n # provided this information to let users do preprocessing themselves.\r\n # We keep doing that for now. It helps users to upgrade incrementally.\r\n # Moreover, it offers an escape hatch for advanced users who want the\r\n # full vocab, not the high-level operations from the preprocessing model.\r\n if vocab_file:\r\n core_model.vocab_file = tf.saved_model.Asset(vocab_file)\r\n if do_lower_case is None:\r\n raise ValueError(\"Must pass do_lower_case if passing vocab_file.\")\r\n core_model.do_lower_case = tf.Variable(do_lower_case, trainable=False)\r\n elif sp_model_file:\r\n # This was used by ALBERT, with implied values of do_lower_case=True\r\n # and strip_diacritics=True.\r\n core_model.sp_model_file = tf.saved_model.Asset(sp_model_file)\r\n else:\r\n raise ValueError(\"Must set vocab_file or sp_model_file\")\r\n core_model.save(export_path, include_optimizer=False, save_format=\"tf\")\r\n\r\n\r\nclass BertPackInputsSavedModelWrapper(tf.train.Checkpoint):\r\n \"\"\"Wraps a BertPackInputs layer for export to SavedModel.\r\n\r\n The wrapper object is suitable for use with `tf.saved_model.save()` and\r\n `.load()`. The wrapper object is callable with inputs and outputs like the\r\n BertPackInputs layer, but differs from saving an unwrapped Keras object:\r\n\r\n - The inputs can be a list of 1 or 2 RaggedTensors of dtype int32 and\r\n ragged rank 1 or 2. (In Keras, saving to a tf.function in a SavedModel\r\n would fix the number of RaggedTensors and their ragged rank.)\r\n - The call accepts an optional keyword argument `seq_length=` to override\r\n the layer's .seq_length hyperparameter. (In Keras, a hyperparameter\r\n could not be changed after saving to a tf.function in a SavedModel.)\r\n \"\"\"\r\n\r\n def __init__(self, bert_pack_inputs: layers.BertPackInputs):\r\n super().__init__()\r\n\r\n # Preserve the layer's configured seq_length as a default but make it\r\n # overridable. Having this dynamically determined default argument\r\n # requires self.__call__ to be defined in this indirect way.\r\n default_seq_length = bert_pack_inputs.seq_length\r\n @tf.function(autograph=False)\r\n def call(inputs, seq_length=default_seq_length):\r\n return layers.BertPackInputs.bert_pack_inputs(\r\n inputs, seq_length=seq_length,\r\n start_of_sequence_id=bert_pack_inputs.start_of_sequence_id,\r\n end_of_segment_id=bert_pack_inputs.end_of_segment_id,\r\n padding_id=bert_pack_inputs.padding_id)\r\n self.__call__ = call\r\n\r\n for ragged_rank in range(1, 3):\r\n for num_segments in range(1, 3):\r\n _ = self.__call__.get_concrete_function(\r\n [tf.RaggedTensorSpec([None] * (ragged_rank + 1), dtype=tf.int32)\r\n for _ in range(num_segments)],\r\n seq_length=tf.TensorSpec([], tf.int32))\r\n\r\n\r\ndef create_preprocessing(*,\r\n vocab_file: Optional[str] = None,\r\n sp_model_file: Optional[str] = None,\r\n do_lower_case: bool,\r\n tokenize_with_offsets: bool,\r\n default_seq_length: int) -> tf.keras.Model:\r\n \"\"\"Returns a preprocessing Model for given tokenization parameters.\r\n\r\n This function builds a Keras Model with attached subobjects suitable for\r\n saving to a SavedModel. The resulting SavedModel implements the Preprocessor\r\n API for Text embeddings with Transformer Encoders described at\r\n https://www.tensorflow.org/hub/common_saved_model_apis/text.\r\n\r\n Args:\r\n vocab_file: The path to the wordpiece vocab file, or None.\r\n sp_model_file: The path to the sentencepiece model file, or None.\r\n Exactly one of vocab_file and sp_model_file must be set.\r\n This determines the type of tokenzer that is used.\r\n do_lower_case: Whether to do lower case.\r\n tokenize_with_offsets: Whether to include the .tokenize_with_offsets\r\n subobject.\r\n default_seq_length: The sequence length of preprocessing results from\r\n root callable. This is also the default sequence length for the\r\n bert_pack_inputs subobject.\r\n\r\n Returns:\r\n A tf.keras.Model object with several attached subobjects, suitable for\r\n saving as a preprocessing SavedModel.\r\n \"\"\"\r\n # Select tokenizer.\r\n if bool(vocab_file) == bool(sp_model_file):\r\n raise ValueError(\"Must set exactly one of vocab_file, sp_model_file\")\r\n if vocab_file:\r\n tokenize = layers.BertTokenizer(\r\n vocab_file=vocab_file,\r\n lower_case=do_lower_case,\r\n tokenize_with_offsets=tokenize_with_offsets)\r\n else:\r\n tokenize = layers.SentencepieceTokenizer(\r\n model_file_path=sp_model_file,\r\n lower_case=do_lower_case,\r\n strip_diacritics=True, # Strip diacritics to follow ALBERT model.\r\n tokenize_with_offsets=tokenize_with_offsets)\r\n\r\n # The root object of the preprocessing model can be called to do\r\n # one-shot preprocessing for users with single-sentence inputs.\r\n sentences = tf.keras.layers.Input(shape=(), dtype=tf.string, name=\"sentences\")\r\n if tokenize_with_offsets:\r\n tokens, start_offsets, limit_offsets = tokenize(sentences)\r\n else:\r\n tokens = tokenize(sentences)\r\n pack = layers.BertPackInputs(\r\n seq_length=default_seq_length,\r\n special_tokens_dict=tokenize.get_special_tokens_dict())\r\n model_inputs = pack(tokens)\r\n preprocessing = tf.keras.Model(sentences, model_inputs)\r\n\r\n # Individual steps of preprocessing are made available as named subobjects\r\n # to enable more general preprocessing. For saving, they need to be Models\r\n # in their own right.\r\n preprocessing.tokenize = tf.keras.Model(sentences, tokens)\r\n # Provide an equivalent to tokenize.get_special_tokens_dict().\r\n preprocessing.tokenize.get_special_tokens_dict = tf.train.Checkpoint()\r\n preprocessing.tokenize.get_special_tokens_dict.__call__ = tf.function(\r\n lambda: tokenize.get_special_tokens_dict(), # pylint: disable=[unnecessary-lambda]\r\n input_signature=[])\r\n if tokenize_with_offsets:\r\n preprocessing.tokenize_with_offsets = tf.keras.Model(\r\n sentences, [tokens, start_offsets, limit_offsets])\r\n preprocessing.tokenize_with_offsets.get_special_tokens_dict = (\r\n preprocessing.tokenize.get_special_tokens_dict)\r\n # Conceptually, this should be\r\n # preprocessing.bert_pack_inputs = tf.keras.Model(tokens, model_inputs)\r\n # but technicalities require us to use a wrapper (see comments there).\r\n # In particular, seq_length can be overridden when calling this.\r\n preprocessing.bert_pack_inputs = BertPackInputsSavedModelWrapper(pack)\r\n\r\n return preprocessing\r\n\r\n\r\ndef _move_to_tmpdir(file_path: Optional[Text], tmpdir: Text) -> Optional[Text]:\r\n \"\"\"Returns new path with same basename and hash of original path.\"\"\"\r\n if file_path is None: return None\r\n olddir, filename = os.path.split(file_path)\r\n hasher = hashlib.sha1()\r\n hasher.update(olddir.encode(\"utf-8\"))\r\n target_dir = os.path.join(tmpdir, hasher.hexdigest())\r\n target_file = os.path.join(target_dir, filename)\r\n tf.io.gfile.mkdir(target_dir)\r\n tf.io.gfile.copy(file_path, target_file)\r\n return target_file\r\n\r\n\r\ndef export_preprocessing(export_path: Text,\r\n *,\r\n vocab_file: Optional[Text] = None,\r\n sp_model_file: Optional[Text] = None,\r\n do_lower_case: bool,\r\n tokenize_with_offsets: bool,\r\n default_seq_length: int,\r\n experimental_disable_assert: bool = False) -> None:\r\n \"\"\"Exports preprocessing to a SavedModel for TF Hub.\"\"\"\r\n with tempfile.TemporaryDirectory() as tmpdir:\r\n # TODO(b/175369555): Remove experimental_disable_assert and its use.\r\n with _maybe_disable_assert(experimental_disable_assert):\r\n preprocessing = create_preprocessing(\r\n vocab_file=_move_to_tmpdir(vocab_file, tmpdir),\r\n sp_model_file=_move_to_tmpdir(sp_model_file, tmpdir),\r\n do_lower_case=do_lower_case,\r\n tokenize_with_offsets=tokenize_with_offsets,\r\n default_seq_length=default_seq_length)\r\n preprocessing.save(export_path, include_optimizer=False, save_format=\"tf\")\r\n if experimental_disable_assert:\r\n _check_no_assert(export_path)\r\n # It helps the unit test to prevent stray copies of the vocab file.\r\n if tf.io.gfile.exists(tmpdir):\r\n raise IOError(\"Failed to clean up TemporaryDirectory\")\r\n\r\n\r\n# TODO(b/175369555): Remove all workarounds for this bug of TensorFlow 2.4\r\n# when this bug is no longer a concern for publishing new models.\r\n# TensorFlow 2.4 has a placement issue with Assert ops in tf.functions called\r\n# from Dataset.map() on a TPU worker. They end up on the TPU coordinator,\r\n# and invoking them from the TPU worker is either inefficient (when possible)\r\n# or impossible (notably when using \"headless\" TPU workers on Cloud that do not\r\n# have a channel to the coordinator). The bug has been fixed in time for TF 2.5.\r\n# To work around this, the following code avoids Assert ops in the exported\r\n# SavedModels. It monkey-patches calls to tf.Assert from inside TensorFlow and\r\n# replaces them by a no-op while building the exported model. This is fragile,\r\n# so _check_no_assert() validates the result. The resulting model should be fine\r\n# to read on future versions of TF, even if this workaround at export time\r\n# may break eventually. (Failing unit tests will tell.)\r\n\r\n\r\ndef _dont_assert(condition, data, summarize=None, name=\"Assert\"):\r\n \"\"\"The no-op version of tf.Assert installed by _maybe_disable_assert.\"\"\"\r\n del condition, data, summarize # Unused.\r\n if tf.executing_eagerly():\r\n return\r\n with tf.name_scope(name):\r\n return tf.no_op(name=\"dont_assert\")\r\n\r\n\r\[email protected]\r\ndef _maybe_disable_assert(disable_assert):\r\n \"\"\"Scoped monkey patch of control_flow_ops.Assert to a no-op.\"\"\"\r\n if not disable_assert:\r\n yield\r\n return\r\n\r\n original_assert = control_flow_ops.Assert\r\n control_flow_ops.Assert = _dont_assert\r\n yield\r\n control_flow_ops.Assert = original_assert\r\n\r\n\r\ndef _check_no_assert(saved_model_path):\r\n \"\"\"Raises AssertionError if SavedModel contains Assert ops.\"\"\"\r\n saved_model_filename = os.path.join(saved_model_path, \"saved_model.pb\")\r\n with tf.io.gfile.GFile(saved_model_filename, \"rb\") as f:\r\n saved_model = saved_model_pb2.SavedModel.FromString(f.read())\r\n\r\n assert_nodes = []\r\n graph_def = saved_model.meta_graphs[0].graph_def\r\n assert_nodes += [\"node '{}' in global graph\".format(n.name)\r\n for n in graph_def.node if n.op == \"Assert\"]\r\n for fdef in graph_def.library.function:\r\n assert_nodes += [\r\n \"node '{}' in function '{}'\".format(n.name, fdef.signature.name)\r\n for n in fdef.node_def if n.op == \"Assert\"]\r\n if assert_nodes:\r\n raise AssertionError(\r\n \"Internal tool error: \"\r\n \"failed to suppress {} Assert ops in SavedModel:\\n{}\".format(\r\n len(assert_nodes), \"\\n\".join(assert_nodes[:10])))\r\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for graph_rewriter_builder.\"\"\"\r\nimport unittest\r\nfrom unittest import mock # pylint: disable=g-importing-member\r\nimport tensorflow.compat.v1 as tf\r\nimport tf_slim as slim\r\n\r\nfrom object_detection.builders import graph_rewriter_builder\r\nfrom object_detection.protos import graph_rewriter_pb2\r\nfrom object_detection.utils import tf_version\r\n\r\n\r\nif tf_version.is_tf1():\r\n from tensorflow.contrib import quantize as contrib_quantize # pylint: disable=g-import-not-at-top\r\n\r\n\r\[email protected](tf_version.is_tf2(), 'Skipping TF1.X only test.')\r\nclass QuantizationBuilderTest(tf.test.TestCase):\r\n\r\n def testQuantizationBuilderSetsUpCorrectTrainArguments(self):\r\n with mock.patch.object(\r\n contrib_quantize,\r\n 'experimental_create_training_graph') as mock_quant_fn:\r\n with mock.patch.object(slim,\r\n 'summarize_collection') as mock_summarize_col:\r\n graph_rewriter_proto = graph_rewriter_pb2.GraphRewriter()\r\n graph_rewriter_proto.quantization.delay = 10\r\n graph_rewriter_proto.quantization.weight_bits = 8\r\n graph_rewriter_proto.quantization.activation_bits = 8\r\n graph_rewrite_fn = graph_rewriter_builder.build(\r\n graph_rewriter_proto, is_training=True)\r\n graph_rewrite_fn()\r\n _, kwargs = mock_quant_fn.call_args\r\n self.assertEqual(kwargs['input_graph'], tf.get_default_graph())\r\n self.assertEqual(kwargs['quant_delay'], 10)\r\n mock_summarize_col.assert_called_with('quant_vars')\r\n\r\n def testQuantizationBuilderSetsUpCorrectEvalArguments(self):\r\n with mock.patch.object(contrib_quantize,\r\n 'experimental_create_eval_graph') as mock_quant_fn:\r\n with mock.patch.object(slim,\r\n 'summarize_collection') as mock_summarize_col:\r\n graph_rewriter_proto = graph_rewriter_pb2.GraphRewriter()\r\n graph_rewriter_proto.quantization.delay = 10\r\n graph_rewrite_fn = graph_rewriter_builder.build(\r\n graph_rewriter_proto, is_training=False)\r\n graph_rewrite_fn()\r\n _, kwargs = mock_quant_fn.call_args\r\n self.assertEqual(kwargs['input_graph'], tf.get_default_graph())\r\n mock_summarize_col.assert_called_with('quant_vars')\r\n\r\n\r\nif __name__ == '__main__':\r\n tf.test.main()\r\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n# Lint as: python3\r\n\"\"\"Dataset utilities for vision tasks using TFDS and tf.data.Dataset.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\n# from __future__ import google_type_annotations\r\nfrom __future__ import print_function\r\n\r\nimport os\r\nfrom typing import Any, List, Optional, Tuple, Mapping, Union\r\n\r\nfrom absl import logging\r\nfrom dataclasses import dataclass\r\nimport tensorflow as tf\r\nimport tensorflow_datasets as tfds\r\n\r\nfrom official.modeling.hyperparams import base_config\r\nfrom official.vision.image_classification import augment\r\nfrom official.vision.image_classification import preprocessing\r\n\r\nAUGMENTERS = {\r\n 'autoaugment': augment.AutoAugment,\r\n 'randaugment': augment.RandAugment,\r\n}\r\n\r\n\r\n@dataclass\r\nclass AugmentConfig(base_config.Config):\r\n \"\"\"Configuration for image augmenters.\r\n\r\n Attributes:\r\n name: The name of the image augmentation to use. Possible options are None\r\n (default), 'autoaugment', or 'randaugment'.\r\n params: Any paramaters used to initialize the augmenter.\r\n \"\"\"\r\n name: Optional[str] = None\r\n params: Optional[Mapping[str, Any]] = None\r\n\r\n def build(self) -> augment.ImageAugment:\r\n \"\"\"Build the augmenter using this config.\"\"\"\r\n params = self.params or {}\r\n augmenter = AUGMENTERS.get(self.name, None)\r\n return augmenter(**params) if augmenter is not None else None\r\n\r\n\r\n@dataclass\r\nclass DatasetConfig(base_config.Config):\r\n \"\"\"The base configuration for building datasets.\r\n\r\n Attributes:\r\n name: The name of the Dataset. Usually should correspond to a TFDS dataset.\r\n data_dir: The path where the dataset files are stored, if available.\r\n filenames: Optional list of strings representing the TFRecord names.\r\n builder: The builder type used to load the dataset. Value should be one of\r\n 'tfds' (load using TFDS), 'records' (load from TFRecords), or 'synthetic'\r\n (generate dummy synthetic data without reading from files).\r\n split: The split of the dataset. Usually 'train', 'validation', or 'test'.\r\n image_size: The size of the image in the dataset. This assumes that `width`\r\n == `height`. Set to 'infer' to infer the image size from TFDS info. This\r\n requires `name` to be a registered dataset in TFDS.\r\n num_classes: The number of classes given by the dataset. Set to 'infer' to\r\n infer the image size from TFDS info. This requires `name` to be a\r\n registered dataset in TFDS.\r\n num_channels: The number of channels given by the dataset. Set to 'infer' to\r\n infer the image size from TFDS info. This requires `name` to be a\r\n registered dataset in TFDS.\r\n num_examples: The number of examples given by the dataset. Set to 'infer' to\r\n infer the image size from TFDS info. This requires `name` to be a\r\n registered dataset in TFDS.\r\n batch_size: The base batch size for the dataset.\r\n use_per_replica_batch_size: Whether to scale the batch size based on\r\n available resources. If set to `True`, the dataset builder will return\r\n batch_size multiplied by `num_devices`, the number of device replicas\r\n (e.g., the number of GPUs or TPU cores). This setting should be `True` if\r\n the strategy argument is passed to `build()` and `num_devices > 1`.\r\n num_devices: The number of replica devices to use. This should be set by\r\n `strategy.num_replicas_in_sync` when using a distribution strategy.\r\n dtype: The desired dtype of the dataset. This will be set during\r\n preprocessing.\r\n one_hot: Whether to apply one hot encoding. Set to `True` to be able to use\r\n label smoothing.\r\n augmenter: The augmenter config to use. No augmentation is used by default.\r\n download: Whether to download data using TFDS.\r\n shuffle_buffer_size: The buffer size used for shuffling training data.\r\n file_shuffle_buffer_size: The buffer size used for shuffling raw training\r\n files.\r\n skip_decoding: Whether to skip image decoding when loading from TFDS.\r\n cache: whether to cache to dataset examples. Can be used to avoid re-reading\r\n from disk on the second epoch. Requires significant memory overhead.\r\n tf_data_service: The URI of a tf.data service to offload preprocessing onto\r\n during training. The URI should be in the format \"protocol://address\",\r\n e.g. \"grpc://tf-data-service:5050\".\r\n mean_subtract: whether or not to apply mean subtraction to the dataset.\r\n standardize: whether or not to apply standardization to the dataset.\r\n \"\"\"\r\n name: Optional[str] = None\r\n data_dir: Optional[str] = None\r\n filenames: Optional[List[str]] = None\r\n builder: str = 'tfds'\r\n split: str = 'train'\r\n image_size: Union[int, str] = 'infer'\r\n num_classes: Union[int, str] = 'infer'\r\n num_channels: Union[int, str] = 'infer'\r\n num_examples: Union[int, str] = 'infer'\r\n batch_size: int = 128\r\n use_per_replica_batch_size: bool = True\r\n num_devices: int = 1\r\n dtype: str = 'float32'\r\n one_hot: bool = True\r\n augmenter: AugmentConfig = AugmentConfig()\r\n download: bool = False\r\n shuffle_buffer_size: int = 10000\r\n file_shuffle_buffer_size: int = 1024\r\n skip_decoding: bool = True\r\n cache: bool = False\r\n tf_data_service: Optional[str] = None\r\n mean_subtract: bool = False\r\n standardize: bool = False\r\n\r\n @property\r\n def has_data(self):\r\n \"\"\"Whether this dataset is has any data associated with it.\"\"\"\r\n return self.name or self.data_dir or self.filenames\r\n\r\n\r\n@dataclass\r\nclass ImageNetConfig(DatasetConfig):\r\n \"\"\"The base ImageNet dataset config.\"\"\"\r\n name: str = 'imagenet2012'\r\n # Note: for large datasets like ImageNet, using records is faster than tfds\r\n builder: str = 'records'\r\n image_size: int = 224\r\n num_channels: int = 3\r\n num_examples: int = 1281167\r\n num_classes: int = 1000\r\n batch_size: int = 128\r\n\r\n\r\n@dataclass\r\nclass Cifar10Config(DatasetConfig):\r\n \"\"\"The base CIFAR-10 dataset config.\"\"\"\r\n name: str = 'cifar10'\r\n image_size: int = 224\r\n batch_size: int = 128\r\n download: bool = True\r\n cache: bool = True\r\n\r\n\r\nclass DatasetBuilder:\r\n \"\"\"An object for building datasets.\r\n\r\n Allows building various pipelines fetching examples, preprocessing, etc.\r\n Maintains additional state information calculated from the dataset, i.e.,\r\n training set split, batch size, and number of steps (batches).\r\n \"\"\"\r\n\r\n def __init__(self, config: DatasetConfig, **overrides: Any):\r\n \"\"\"Initialize the builder from the config.\"\"\"\r\n self.config = config.replace(**overrides)\r\n self.builder_info = None\r\n\r\n if self.config.augmenter is not None:\r\n logging.info('Using augmentation: %s', self.config.augmenter.name)\r\n self.augmenter = self.config.augmenter.build()\r\n else:\r\n self.augmenter = None\r\n\r\n @property\r\n def is_training(self) -> bool:\r\n \"\"\"Whether this is the training set.\"\"\"\r\n return self.config.split == 'train'\r\n\r\n @property\r\n def batch_size(self) -> int:\r\n \"\"\"The batch size, multiplied by the number of replicas (if configured).\"\"\"\r\n if self.config.use_per_replica_batch_size:\r\n return self.config.batch_size * self.config.num_devices\r\n else:\r\n return self.config.batch_size\r\n\r\n @property\r\n def global_batch_size(self):\r\n \"\"\"The global batch size across all replicas.\"\"\"\r\n return self.batch_size\r\n\r\n @property\r\n def local_batch_size(self):\r\n \"\"\"The base unscaled batch size.\"\"\"\r\n if self.config.use_per_replica_batch_size:\r\n return self.config.batch_size\r\n else:\r\n return self.config.batch_size // self.config.num_devices\r\n\r\n @property\r\n def num_steps(self) -> int:\r\n \"\"\"The number of steps (batches) to exhaust this dataset.\"\"\"\r\n # Always divide by the global batch size to get the correct # of steps\r\n return self.num_examples // self.global_batch_size\r\n\r\n @property\r\n def dtype(self) -> tf.dtypes.DType:\r\n \"\"\"Converts the config's dtype string to a tf dtype.\r\n\r\n Returns:\r\n A mapping from string representation of a dtype to the `tf.dtypes.DType`.\r\n\r\n Raises:\r\n ValueError if the config's dtype is not supported.\r\n\r\n \"\"\"\r\n dtype_map = {\r\n 'float32': tf.float32,\r\n 'bfloat16': tf.bfloat16,\r\n 'float16': tf.float16,\r\n 'fp32': tf.float32,\r\n 'bf16': tf.bfloat16,\r\n }\r\n try:\r\n return dtype_map[self.config.dtype]\r\n except:\r\n raise ValueError('Invalid DType provided. Supported types: {}'.format(\r\n dtype_map.keys()))\r\n\r\n @property\r\n def image_size(self) -> int:\r\n \"\"\"The size of each image (can be inferred from the dataset).\"\"\"\r\n\r\n if self.config.image_size == 'infer':\r\n return self.info.features['image'].shape[0]\r\n else:\r\n return int(self.config.image_size)\r\n\r\n @property\r\n def num_channels(self) -> int:\r\n \"\"\"The number of image channels (can be inferred from the dataset).\"\"\"\r\n if self.config.num_channels == 'infer':\r\n return self.info.features['image'].shape[-1]\r\n else:\r\n return int(self.config.num_channels)\r\n\r\n @property\r\n def num_examples(self) -> int:\r\n \"\"\"The number of examples (can be inferred from the dataset).\"\"\"\r\n if self.config.num_examples == 'infer':\r\n return self.info.splits[self.config.split].num_examples\r\n else:\r\n return int(self.config.num_examples)\r\n\r\n @property\r\n def num_classes(self) -> int:\r\n \"\"\"The number of classes (can be inferred from the dataset).\"\"\"\r\n if self.config.num_classes == 'infer':\r\n return self.info.features['label'].num_classes\r\n else:\r\n return int(self.config.num_classes)\r\n\r\n @property\r\n def info(self) -> tfds.core.DatasetInfo:\r\n \"\"\"The TFDS dataset info, if available.\"\"\"\r\n try:\r\n if self.builder_info is None:\r\n self.builder_info = tfds.builder(self.config.name).info\r\n except ConnectionError as e:\r\n logging.error('Failed to use TFDS to load info. Please set dataset info '\r\n '(image_size, num_channels, num_examples, num_classes) in '\r\n 'the dataset config.')\r\n raise e\r\n return self.builder_info\r\n\r\n def build(\r\n self,\r\n strategy: Optional[tf.distribute.Strategy] = None) -> tf.data.Dataset:\r\n \"\"\"Construct a dataset end-to-end and return it using an optional strategy.\r\n\r\n Args:\r\n strategy: a strategy that, if passed, will distribute the dataset\r\n according to that strategy. If passed and `num_devices > 1`,\r\n `use_per_replica_batch_size` must be set to `True`.\r\n\r\n Returns:\r\n A TensorFlow dataset outputting batched images and labels.\r\n \"\"\"\r\n if strategy:\r\n if strategy.num_replicas_in_sync != self.config.num_devices:\r\n logging.warn(\r\n 'Passed a strategy with %d devices, but expected'\r\n '%d devices.', strategy.num_replicas_in_sync,\r\n self.config.num_devices)\r\n dataset = strategy.distribute_datasets_from_function(self._build)\r\n else:\r\n dataset = self._build()\r\n\r\n return dataset\r\n\r\n def _build(\r\n self,\r\n input_context: Optional[tf.distribute.InputContext] = None\r\n ) -> tf.data.Dataset:\r\n \"\"\"Construct a dataset end-to-end and return it.\r\n\r\n Args:\r\n input_context: An optional context provided by `tf.distribute` for\r\n cross-replica training.\r\n\r\n Returns:\r\n A TensorFlow dataset outputting batched images and labels.\r\n \"\"\"\r\n builders = {\r\n 'tfds': self.load_tfds,\r\n 'records': self.load_records,\r\n 'synthetic': self.load_synthetic,\r\n }\r\n\r\n builder = builders.get(self.config.builder, None)\r\n\r\n if builder is None:\r\n raise ValueError('Unknown builder type {}'.format(self.config.builder))\r\n\r\n self.input_context = input_context\r\n dataset = builder()\r\n dataset = self.pipeline(dataset)\r\n\r\n return dataset\r\n\r\n def load_tfds(self) -> tf.data.Dataset:\r\n \"\"\"Return a dataset loading files from TFDS.\"\"\"\r\n\r\n logging.info('Using TFDS to load data.')\r\n\r\n builder = tfds.builder(self.config.name, data_dir=self.config.data_dir)\r\n\r\n if self.config.download:\r\n builder.download_and_prepare()\r\n\r\n decoders = {}\r\n\r\n if self.config.skip_decoding:\r\n decoders['image'] = tfds.decode.SkipDecoding()\r\n\r\n read_config = tfds.ReadConfig(\r\n interleave_cycle_length=10,\r\n interleave_block_length=1,\r\n input_context=self.input_context)\r\n\r\n dataset = builder.as_dataset(\r\n split=self.config.split,\r\n as_supervised=True,\r\n shuffle_files=True,\r\n decoders=decoders,\r\n read_config=read_config)\r\n\r\n return dataset\r\n\r\n def load_records(self) -> tf.data.Dataset:\r\n \"\"\"Return a dataset loading files with TFRecords.\"\"\"\r\n logging.info('Using TFRecords to load data.')\r\n if self.config.filenames is None:\r\n if self.config.data_dir is None:\r\n raise ValueError('Dataset must specify a path for the data files.')\r\n\r\n file_pattern = os.path.join(self.config.data_dir,\r\n '{}*'.format(self.config.split))\r\n dataset = tf.data.Dataset.list_files(file_pattern, shuffle=False)\r\n else:\r\n dataset = tf.data.Dataset.from_tensor_slices(self.config.filenames)\r\n\r\n return dataset\r\n\r\n def load_synthetic(self) -> tf.data.Dataset:\r\n \"\"\"Return a dataset generating dummy synthetic data.\"\"\"\r\n logging.info('Generating a synthetic dataset.')\r\n\r\n def generate_data(_):\r\n image = tf.zeros([self.image_size, self.image_size, self.num_channels],\r\n dtype=self.dtype)\r\n label = tf.zeros([1], dtype=tf.int32)\r\n return image, label\r\n\r\n dataset = tf.data.Dataset.range(1)\r\n dataset = dataset.repeat()\r\n dataset = dataset.map(\r\n generate_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)\r\n return dataset\r\n\r\n def pipeline(self, dataset: tf.data.Dataset) -> tf.data.Dataset:\r\n \"\"\"Build a pipeline fetching, shuffling, and preprocessing the dataset.\r\n\r\n Args:\r\n dataset: A `tf.data.Dataset` that loads raw files.\r\n\r\n Returns:\r\n A TensorFlow dataset outputting batched images and labels.\r\n \"\"\"\r\n if (self.config.builder != 'tfds' and self.input_context and\r\n self.input_context.num_input_pipelines > 1):\r\n dataset = dataset.shard(self.input_context.num_input_pipelines,\r\n self.input_context.input_pipeline_id)\r\n logging.info(\r\n 'Sharding the dataset: input_pipeline_id=%d '\r\n 'num_input_pipelines=%d', self.input_context.num_input_pipelines,\r\n self.input_context.input_pipeline_id)\r\n\r\n if self.is_training and self.config.builder == 'records':\r\n # Shuffle the input files.\r\n dataset.shuffle(buffer_size=self.config.file_shuffle_buffer_size)\r\n\r\n if self.is_training and not self.config.cache:\r\n dataset = dataset.repeat()\r\n\r\n if self.config.builder == 'records':\r\n # Read the data from disk in parallel\r\n dataset = dataset.interleave(\r\n tf.data.TFRecordDataset,\r\n cycle_length=10,\r\n block_length=1,\r\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\r\n\r\n if self.config.cache:\r\n dataset = dataset.cache()\r\n\r\n if self.is_training:\r\n dataset = dataset.shuffle(self.config.shuffle_buffer_size)\r\n dataset = dataset.repeat()\r\n\r\n # Parse, pre-process, and batch the data in parallel\r\n if self.config.builder == 'records':\r\n preprocess = self.parse_record\r\n else:\r\n preprocess = self.preprocess\r\n dataset = dataset.map(\r\n preprocess, num_parallel_calls=tf.data.experimental.AUTOTUNE)\r\n\r\n if self.input_context and self.config.num_devices > 1:\r\n if not self.config.use_per_replica_batch_size:\r\n raise ValueError(\r\n 'The builder does not support a global batch size with more than '\r\n 'one replica. Got {} replicas. Please set a '\r\n '`per_replica_batch_size` and enable '\r\n '`use_per_replica_batch_size=True`.'.format(\r\n self.config.num_devices))\r\n\r\n # The batch size of the dataset will be multiplied by the number of\r\n # replicas automatically when strategy.distribute_datasets_from_function\r\n # is called, so we use local batch size here.\r\n dataset = dataset.batch(\r\n self.local_batch_size, drop_remainder=self.is_training)\r\n else:\r\n dataset = dataset.batch(\r\n self.global_batch_size, drop_remainder=self.is_training)\r\n\r\n # Prefetch overlaps in-feed with training\r\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\r\n\r\n if self.config.tf_data_service:\r\n if not hasattr(tf.data.experimental, 'service'):\r\n raise ValueError('The tf_data_service flag requires Tensorflow version '\r\n '>= 2.3.0, but the version is {}'.format(\r\n tf.__version__))\r\n dataset = dataset.apply(\r\n tf.data.experimental.service.distribute(\r\n processing_mode='parallel_epochs',\r\n service=self.config.tf_data_service,\r\n job_name='resnet_train'))\r\n dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\r\n\r\n return dataset\r\n\r\n def parse_record(self, record: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:\r\n \"\"\"Parse an ImageNet record from a serialized string Tensor.\"\"\"\r\n keys_to_features = {\r\n 'image/encoded': tf.io.FixedLenFeature((), tf.string, ''),\r\n 'image/format': tf.io.FixedLenFeature((), tf.string, 'jpeg'),\r\n 'image/class/label': tf.io.FixedLenFeature([], tf.int64, -1),\r\n 'image/class/text': tf.io.FixedLenFeature([], tf.string, ''),\r\n 'image/object/bbox/xmin': tf.io.VarLenFeature(dtype=tf.float32),\r\n 'image/object/bbox/ymin': tf.io.VarLenFeature(dtype=tf.float32),\r\n 'image/object/bbox/xmax': tf.io.VarLenFeature(dtype=tf.float32),\r\n 'image/object/bbox/ymax': tf.io.VarLenFeature(dtype=tf.float32),\r\n 'image/object/class/label': tf.io.VarLenFeature(dtype=tf.int64),\r\n }\r\n\r\n parsed = tf.io.parse_single_example(record, keys_to_features)\r\n\r\n label = tf.reshape(parsed['image/class/label'], shape=[1])\r\n\r\n # Subtract one so that labels are in [0, 1000)\r\n label -= 1\r\n\r\n image_bytes = tf.reshape(parsed['image/encoded'], shape=[])\r\n image, label = self.preprocess(image_bytes, label)\r\n\r\n return image, label\r\n\r\n def preprocess(self, image: tf.Tensor,\r\n label: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:\r\n \"\"\"Apply image preprocessing and augmentation to the image and label.\"\"\"\r\n if self.is_training:\r\n image = preprocessing.preprocess_for_train(\r\n image,\r\n image_size=self.image_size,\r\n mean_subtract=self.config.mean_subtract,\r\n standardize=self.config.standardize,\r\n dtype=self.dtype,\r\n augmenter=self.augmenter)\r\n else:\r\n image = preprocessing.preprocess_for_eval(\r\n image,\r\n image_size=self.image_size,\r\n num_channels=self.num_channels,\r\n mean_subtract=self.config.mean_subtract,\r\n standardize=self.config.standardize,\r\n dtype=self.dtype)\r\n\r\n label = tf.cast(label, tf.int32)\r\n if self.config.one_hot:\r\n label = tf.one_hot(label, self.num_classes)\r\n label = tf.reshape(label, [self.num_classes])\r\n\r\n return image, label\r\n\r\n @classmethod\r\n def from_params(cls, *args, **kwargs):\r\n \"\"\"Construct a dataset builder from a default config and any overrides.\"\"\"\r\n config = DatasetConfig.from_args(*args, **kwargs)\r\n return cls(config)\r\n", "# Copyright 2018 The TensorFlow Authors All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\nr\"\"\"Script for training an RL agent using the UVF algorithm.\r\n\r\nTo run locally: See run_train.py\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport os\r\nimport time\r\nimport tensorflow as tf\r\nslim = tf.contrib.slim\r\n\r\nimport gin.tf\r\n# pylint: disable=unused-import\r\nimport train_utils\r\nimport agent as agent_\r\nfrom agents import circular_buffer\r\nfrom utils import utils as uvf_utils\r\nfrom environments import create_maze_env\r\n# pylint: enable=unused-import\r\n\r\n\r\nflags = tf.app.flags\r\n\r\nFLAGS = flags.FLAGS\r\nflags.DEFINE_string('goal_sample_strategy', 'sample',\r\n 'None, sample, FuN')\r\n\r\nLOAD_PATH = None\r\n\r\n\r\ndef collect_experience(tf_env, agent, meta_agent, state_preprocess,\r\n replay_buffer, meta_replay_buffer,\r\n action_fn, meta_action_fn,\r\n environment_steps, num_episodes, num_resets,\r\n episode_rewards, episode_meta_rewards,\r\n store_context,\r\n disable_agent_reset):\r\n \"\"\"Collect experience in a tf_env into a replay_buffer using action_fn.\r\n\r\n Args:\r\n tf_env: A TFEnvironment.\r\n agent: A UVF agent.\r\n meta_agent: A Meta Agent.\r\n replay_buffer: A Replay buffer to collect experience in.\r\n meta_replay_buffer: A Replay buffer to collect meta agent experience in.\r\n action_fn: A function to produce actions given current state.\r\n meta_action_fn: A function to produce meta actions given current state.\r\n environment_steps: A variable to count the number of steps in the tf_env.\r\n num_episodes: A variable to count the number of episodes.\r\n num_resets: A variable to count the number of resets.\r\n store_context: A boolean to check if store context in replay.\r\n disable_agent_reset: A boolean that disables agent from resetting.\r\n\r\n Returns:\r\n A collect_experience_op that excute an action and store into the\r\n replay_buffers\r\n \"\"\"\r\n tf_env.start_collect()\r\n state = tf_env.current_obs()\r\n state_repr = state_preprocess(state)\r\n action = action_fn(state, context=None)\r\n\r\n with tf.control_dependencies([state]):\r\n transition_type, reward, discount = tf_env.step(action)\r\n\r\n def increment_step():\r\n return environment_steps.assign_add(1)\r\n\r\n def increment_episode():\r\n return num_episodes.assign_add(1)\r\n\r\n def increment_reset():\r\n return num_resets.assign_add(1)\r\n\r\n def update_episode_rewards(context_reward, meta_reward, reset):\r\n new_episode_rewards = tf.concat(\r\n [episode_rewards[:1] + context_reward, episode_rewards[1:]], 0)\r\n new_episode_meta_rewards = tf.concat(\r\n [episode_meta_rewards[:1] + meta_reward,\r\n episode_meta_rewards[1:]], 0)\r\n return tf.group(\r\n episode_rewards.assign(\r\n tf.cond(reset,\r\n lambda: tf.concat([[0.], episode_rewards[:-1]], 0),\r\n lambda: new_episode_rewards)),\r\n episode_meta_rewards.assign(\r\n tf.cond(reset,\r\n lambda: tf.concat([[0.], episode_meta_rewards[:-1]], 0),\r\n lambda: new_episode_meta_rewards)))\r\n\r\n def no_op_int():\r\n return tf.constant(0, dtype=tf.int64)\r\n\r\n step_cond = agent.step_cond_fn(state, action,\r\n transition_type,\r\n environment_steps, num_episodes)\r\n reset_episode_cond = agent.reset_episode_cond_fn(\r\n state, action,\r\n transition_type, environment_steps, num_episodes)\r\n reset_env_cond = agent.reset_env_cond_fn(state, action,\r\n transition_type,\r\n environment_steps, num_episodes)\r\n\r\n increment_step_op = tf.cond(step_cond, increment_step, no_op_int)\r\n increment_episode_op = tf.cond(reset_episode_cond, increment_episode,\r\n no_op_int)\r\n increment_reset_op = tf.cond(reset_env_cond, increment_reset, no_op_int)\r\n increment_op = tf.group(increment_step_op, increment_episode_op,\r\n increment_reset_op)\r\n\r\n with tf.control_dependencies([increment_op, reward, discount]):\r\n next_state = tf_env.current_obs()\r\n next_state_repr = state_preprocess(next_state)\r\n next_reset_episode_cond = tf.logical_or(\r\n agent.reset_episode_cond_fn(\r\n state, action,\r\n transition_type, environment_steps, num_episodes),\r\n tf.equal(discount, 0.0))\r\n\r\n if store_context:\r\n context = [tf.identity(var) + tf.zeros_like(var) for var in agent.context_vars]\r\n meta_context = [tf.identity(var) + tf.zeros_like(var) for var in meta_agent.context_vars]\r\n else:\r\n context = []\r\n meta_context = []\r\n with tf.control_dependencies([next_state] + context + meta_context):\r\n if disable_agent_reset:\r\n collect_experience_ops = [tf.no_op()] # don't reset agent\r\n else:\r\n collect_experience_ops = agent.cond_begin_episode_op(\r\n tf.logical_not(reset_episode_cond),\r\n [state, action, reward, next_state,\r\n state_repr, next_state_repr],\r\n mode='explore', meta_action_fn=meta_action_fn)\r\n context_reward, meta_reward = collect_experience_ops\r\n collect_experience_ops = list(collect_experience_ops)\r\n collect_experience_ops.append(\r\n update_episode_rewards(tf.reduce_sum(context_reward), meta_reward,\r\n reset_episode_cond))\r\n\r\n meta_action_every_n = agent.tf_context.meta_action_every_n\r\n with tf.control_dependencies(collect_experience_ops):\r\n transition = [state, action, reward, discount, next_state]\r\n\r\n meta_action = tf.to_float(\r\n tf.concat(context, -1)) # Meta agent action is low-level context\r\n\r\n meta_end = tf.logical_and( # End of meta-transition.\r\n tf.equal(agent.tf_context.t % meta_action_every_n, 1),\r\n agent.tf_context.t > 1)\r\n with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):\r\n states_var = tf.get_variable('states_var',\r\n [meta_action_every_n, state.shape[-1]],\r\n state.dtype)\r\n actions_var = tf.get_variable('actions_var',\r\n [meta_action_every_n, action.shape[-1]],\r\n action.dtype)\r\n state_var = tf.get_variable('state_var', state.shape, state.dtype)\r\n reward_var = tf.get_variable('reward_var', reward.shape, reward.dtype)\r\n meta_action_var = tf.get_variable('meta_action_var',\r\n meta_action.shape, meta_action.dtype)\r\n meta_context_var = [\r\n tf.get_variable('meta_context_var%d' % idx,\r\n meta_context[idx].shape, meta_context[idx].dtype)\r\n for idx in range(len(meta_context))]\r\n\r\n actions_var_upd = tf.scatter_update(\r\n actions_var, (agent.tf_context.t - 2) % meta_action_every_n, action)\r\n with tf.control_dependencies([actions_var_upd]):\r\n actions = tf.identity(actions_var) + tf.zeros_like(actions_var)\r\n meta_reward = tf.identity(meta_reward) + tf.zeros_like(meta_reward)\r\n meta_reward = tf.reshape(meta_reward, reward.shape)\r\n\r\n reward = 0.1 * meta_reward\r\n meta_transition = [state_var, meta_action_var,\r\n reward_var + reward,\r\n discount * (1 - tf.to_float(next_reset_episode_cond)),\r\n next_state]\r\n meta_transition.extend([states_var, actions])\r\n if store_context: # store current and next context into replay\r\n transition += context + list(agent.context_vars)\r\n meta_transition += meta_context_var + list(meta_agent.context_vars)\r\n\r\n meta_step_cond = tf.squeeze(tf.logical_and(step_cond, tf.logical_or(next_reset_episode_cond, meta_end)))\r\n\r\n collect_experience_op = tf.group(\r\n replay_buffer.maybe_add(transition, step_cond),\r\n meta_replay_buffer.maybe_add(meta_transition, meta_step_cond),\r\n )\r\n\r\n with tf.control_dependencies([collect_experience_op]):\r\n collect_experience_op = tf.cond(reset_env_cond,\r\n tf_env.reset,\r\n tf_env.current_time_step)\r\n\r\n meta_period = tf.equal(agent.tf_context.t % meta_action_every_n, 1)\r\n states_var_upd = tf.scatter_update(\r\n states_var, (agent.tf_context.t - 1) % meta_action_every_n,\r\n next_state)\r\n state_var_upd = tf.assign(\r\n state_var,\r\n tf.cond(meta_period, lambda: next_state, lambda: state_var))\r\n reward_var_upd = tf.assign(\r\n reward_var,\r\n tf.cond(meta_period,\r\n lambda: tf.zeros_like(reward_var),\r\n lambda: reward_var + reward))\r\n meta_action = tf.to_float(tf.concat(agent.context_vars, -1))\r\n meta_action_var_upd = tf.assign(\r\n meta_action_var,\r\n tf.cond(meta_period, lambda: meta_action, lambda: meta_action_var))\r\n meta_context_var_upd = [\r\n tf.assign(\r\n meta_context_var[idx],\r\n tf.cond(meta_period,\r\n lambda: meta_agent.context_vars[idx],\r\n lambda: meta_context_var[idx]))\r\n for idx in range(len(meta_context))]\r\n\r\n return tf.group(\r\n collect_experience_op,\r\n states_var_upd,\r\n state_var_upd,\r\n reward_var_upd,\r\n meta_action_var_upd,\r\n *meta_context_var_upd)\r\n\r\n\r\ndef sample_best_meta_actions(state_reprs, next_state_reprs, prev_meta_actions,\r\n low_states, low_actions, low_state_reprs,\r\n inverse_dynamics, uvf_agent, k=10):\r\n \"\"\"Return meta-actions which approximately maximize low-level log-probs.\"\"\"\r\n sampled_actions = inverse_dynamics.sample(state_reprs, next_state_reprs, k, prev_meta_actions)\r\n sampled_actions = tf.stop_gradient(sampled_actions)\r\n sampled_log_probs = tf.reshape(uvf_agent.log_probs(\r\n tf.tile(low_states, [k, 1, 1]),\r\n tf.tile(low_actions, [k, 1, 1]),\r\n tf.tile(low_state_reprs, [k, 1, 1]),\r\n [tf.reshape(sampled_actions, [-1, sampled_actions.shape[-1]])]),\r\n [k, low_states.shape[0],\r\n low_states.shape[1], -1])\r\n fitness = tf.reduce_sum(sampled_log_probs, [2, 3])\r\n best_actions = tf.argmax(fitness, 0)\r\n actions = tf.gather_nd(\r\n sampled_actions,\r\n tf.stack([best_actions,\r\n tf.range(prev_meta_actions.shape[0], dtype=tf.int64)], -1))\r\n return actions\r\n\r\n\r\[email protected]\r\ndef train_uvf(train_dir,\r\n environment=None,\r\n num_bin_actions=3,\r\n agent_class=None,\r\n meta_agent_class=None,\r\n state_preprocess_class=None,\r\n inverse_dynamics_class=None,\r\n exp_action_wrapper=None,\r\n replay_buffer=None,\r\n meta_replay_buffer=None,\r\n replay_num_steps=1,\r\n meta_replay_num_steps=1,\r\n critic_optimizer=None,\r\n actor_optimizer=None,\r\n meta_critic_optimizer=None,\r\n meta_actor_optimizer=None,\r\n repr_optimizer=None,\r\n relabel_contexts=False,\r\n meta_relabel_contexts=False,\r\n batch_size=64,\r\n repeat_size=0,\r\n num_episodes_train=2000,\r\n initial_episodes=2,\r\n initial_steps=None,\r\n num_updates_per_observation=1,\r\n num_collect_per_update=1,\r\n num_collect_per_meta_update=1,\r\n gamma=1.0,\r\n meta_gamma=1.0,\r\n reward_scale_factor=1.0,\r\n target_update_period=1,\r\n should_stop_early=None,\r\n clip_gradient_norm=0.0,\r\n summarize_gradients=False,\r\n debug_summaries=False,\r\n log_every_n_steps=100,\r\n prefetch_queue_capacity=2,\r\n policy_save_dir='policy',\r\n save_policy_every_n_steps=1000,\r\n save_policy_interval_secs=0,\r\n replay_context_ratio=0.0,\r\n next_state_as_context_ratio=0.0,\r\n state_index=0,\r\n zero_timer_ratio=0.0,\r\n timer_index=-1,\r\n debug=False,\r\n max_policies_to_save=None,\r\n max_steps_per_episode=None,\r\n load_path=LOAD_PATH):\r\n \"\"\"Train an agent.\"\"\"\r\n tf_env = create_maze_env.TFPyEnvironment(environment)\r\n observation_spec = [tf_env.observation_spec()]\r\n action_spec = [tf_env.action_spec()]\r\n\r\n max_steps_per_episode = max_steps_per_episode or tf_env.pyenv.max_episode_steps\r\n\r\n assert max_steps_per_episode, 'max_steps_per_episode need to be set'\r\n\r\n if initial_steps is None:\r\n initial_steps = initial_episodes * max_steps_per_episode\r\n\r\n if agent_class.ACTION_TYPE == 'discrete':\r\n assert False\r\n else:\r\n assert agent_class.ACTION_TYPE == 'continuous'\r\n\r\n assert agent_class.ACTION_TYPE == meta_agent_class.ACTION_TYPE\r\n with tf.variable_scope('meta_agent'):\r\n meta_agent = meta_agent_class(\r\n observation_spec,\r\n action_spec,\r\n tf_env,\r\n debug_summaries=debug_summaries)\r\n meta_agent.set_replay(replay=meta_replay_buffer)\r\n\r\n with tf.variable_scope('uvf_agent'):\r\n uvf_agent = agent_class(\r\n observation_spec,\r\n action_spec,\r\n tf_env,\r\n debug_summaries=debug_summaries)\r\n uvf_agent.set_meta_agent(agent=meta_agent)\r\n uvf_agent.set_replay(replay=replay_buffer)\r\n\r\n with tf.variable_scope('state_preprocess'):\r\n state_preprocess = state_preprocess_class()\r\n\r\n with tf.variable_scope('inverse_dynamics'):\r\n inverse_dynamics = inverse_dynamics_class(\r\n meta_agent.sub_context_as_action_specs[0])\r\n\r\n # Create counter variables\r\n global_step = tf.contrib.framework.get_or_create_global_step()\r\n num_episodes = tf.Variable(0, dtype=tf.int64, name='num_episodes')\r\n num_resets = tf.Variable(0, dtype=tf.int64, name='num_resets')\r\n num_updates = tf.Variable(0, dtype=tf.int64, name='num_updates')\r\n num_meta_updates = tf.Variable(0, dtype=tf.int64, name='num_meta_updates')\r\n episode_rewards = tf.Variable([0.] * 100, name='episode_rewards')\r\n episode_meta_rewards = tf.Variable([0.] * 100, name='episode_meta_rewards')\r\n\r\n # Create counter variables summaries\r\n train_utils.create_counter_summaries([\r\n ('environment_steps', global_step),\r\n ('num_episodes', num_episodes),\r\n ('num_resets', num_resets),\r\n ('num_updates', num_updates),\r\n ('num_meta_updates', num_meta_updates),\r\n ('replay_buffer_adds', replay_buffer.get_num_adds()),\r\n ('meta_replay_buffer_adds', meta_replay_buffer.get_num_adds()),\r\n ])\r\n\r\n tf.summary.scalar('avg_episode_rewards',\r\n tf.reduce_mean(episode_rewards[1:]))\r\n tf.summary.scalar('avg_episode_meta_rewards',\r\n tf.reduce_mean(episode_meta_rewards[1:]))\r\n tf.summary.histogram('episode_rewards', episode_rewards[1:])\r\n tf.summary.histogram('episode_meta_rewards', episode_meta_rewards[1:])\r\n\r\n # Create init ops\r\n action_fn = uvf_agent.action\r\n action_fn = uvf_agent.add_noise_fn(action_fn, global_step=None)\r\n meta_action_fn = meta_agent.action\r\n meta_action_fn = meta_agent.add_noise_fn(meta_action_fn, global_step=None)\r\n meta_actions_fn = meta_agent.actions\r\n meta_actions_fn = meta_agent.add_noise_fn(meta_actions_fn, global_step=None)\r\n init_collect_experience_op = collect_experience(\r\n tf_env,\r\n uvf_agent,\r\n meta_agent,\r\n state_preprocess,\r\n replay_buffer,\r\n meta_replay_buffer,\r\n action_fn,\r\n meta_action_fn,\r\n environment_steps=global_step,\r\n num_episodes=num_episodes,\r\n num_resets=num_resets,\r\n episode_rewards=episode_rewards,\r\n episode_meta_rewards=episode_meta_rewards,\r\n store_context=True,\r\n disable_agent_reset=False,\r\n )\r\n\r\n # Create train ops\r\n collect_experience_op = collect_experience(\r\n tf_env,\r\n uvf_agent,\r\n meta_agent,\r\n state_preprocess,\r\n replay_buffer,\r\n meta_replay_buffer,\r\n action_fn,\r\n meta_action_fn,\r\n environment_steps=global_step,\r\n num_episodes=num_episodes,\r\n num_resets=num_resets,\r\n episode_rewards=episode_rewards,\r\n episode_meta_rewards=episode_meta_rewards,\r\n store_context=True,\r\n disable_agent_reset=False,\r\n )\r\n\r\n train_op_list = []\r\n repr_train_op = tf.constant(0.0)\r\n for mode in ['meta', 'nometa']:\r\n if mode == 'meta':\r\n agent = meta_agent\r\n buff = meta_replay_buffer\r\n critic_opt = meta_critic_optimizer\r\n actor_opt = meta_actor_optimizer\r\n relabel = meta_relabel_contexts\r\n num_steps = meta_replay_num_steps\r\n my_gamma = meta_gamma,\r\n n_updates = num_meta_updates\r\n else:\r\n agent = uvf_agent\r\n buff = replay_buffer\r\n critic_opt = critic_optimizer\r\n actor_opt = actor_optimizer\r\n relabel = relabel_contexts\r\n num_steps = replay_num_steps\r\n my_gamma = gamma\r\n n_updates = num_updates\r\n\r\n with tf.name_scope(mode):\r\n batch = buff.get_random_batch(batch_size, num_steps=num_steps)\r\n states, actions, rewards, discounts, next_states = batch[:5]\r\n with tf.name_scope('Reward'):\r\n tf.summary.scalar('average_step_reward', tf.reduce_mean(rewards))\r\n rewards *= reward_scale_factor\r\n batch_queue = slim.prefetch_queue.prefetch_queue(\r\n [states, actions, rewards, discounts, next_states] + batch[5:],\r\n capacity=prefetch_queue_capacity,\r\n name='batch_queue')\r\n\r\n batch_dequeue = batch_queue.dequeue()\r\n if repeat_size > 0:\r\n batch_dequeue = [\r\n tf.tile(batch, (repeat_size+1,) + (1,) * (batch.shape.ndims - 1))\r\n for batch in batch_dequeue\r\n ]\r\n batch_size *= (repeat_size + 1)\r\n states, actions, rewards, discounts, next_states = batch_dequeue[:5]\r\n if mode == 'meta':\r\n low_states = batch_dequeue[5]\r\n low_actions = batch_dequeue[6]\r\n low_state_reprs = state_preprocess(low_states)\r\n state_reprs = state_preprocess(states)\r\n next_state_reprs = state_preprocess(next_states)\r\n\r\n if mode == 'meta': # Re-label meta-action\r\n prev_actions = actions\r\n if FLAGS.goal_sample_strategy == 'None':\r\n pass\r\n elif FLAGS.goal_sample_strategy == 'FuN':\r\n actions = inverse_dynamics.sample(state_reprs, next_state_reprs, 1, prev_actions, sc=0.1)\r\n actions = tf.stop_gradient(actions)\r\n elif FLAGS.goal_sample_strategy == 'sample':\r\n actions = sample_best_meta_actions(state_reprs, next_state_reprs, prev_actions,\r\n low_states, low_actions, low_state_reprs,\r\n inverse_dynamics, uvf_agent, k=10)\r\n else:\r\n assert False\r\n\r\n if state_preprocess.trainable and mode == 'meta':\r\n # Representation learning is based on meta-transitions, but is trained\r\n # along with low-level policy updates.\r\n repr_loss, _, _ = state_preprocess.loss(states, next_states, low_actions, low_states)\r\n repr_train_op = slim.learning.create_train_op(\r\n repr_loss,\r\n repr_optimizer,\r\n global_step=None,\r\n update_ops=None,\r\n summarize_gradients=summarize_gradients,\r\n clip_gradient_norm=clip_gradient_norm,\r\n variables_to_train=state_preprocess.get_trainable_vars(),)\r\n\r\n # Get contexts for training\r\n contexts, next_contexts = agent.sample_contexts(\r\n mode='train', batch_size=batch_size,\r\n state=states, next_state=next_states,\r\n )\r\n if not relabel: # Re-label context (in the style of TDM or HER).\r\n contexts, next_contexts = (\r\n batch_dequeue[-2*len(contexts):-1*len(contexts)],\r\n batch_dequeue[-1*len(contexts):])\r\n\r\n merged_states = agent.merged_states(states, contexts)\r\n merged_next_states = agent.merged_states(next_states, next_contexts)\r\n if mode == 'nometa':\r\n context_rewards, context_discounts = agent.compute_rewards(\r\n 'train', state_reprs, actions, rewards, next_state_reprs, contexts)\r\n elif mode == 'meta': # Meta-agent uses sum of rewards, not context-specific rewards.\r\n _, context_discounts = agent.compute_rewards(\r\n 'train', states, actions, rewards, next_states, contexts)\r\n context_rewards = rewards\r\n\r\n if agent.gamma_index is not None:\r\n context_discounts *= tf.cast(\r\n tf.reshape(contexts[agent.gamma_index], (-1,)),\r\n dtype=context_discounts.dtype)\r\n else: context_discounts *= my_gamma\r\n\r\n critic_loss = agent.critic_loss(merged_states, actions,\r\n context_rewards, context_discounts,\r\n merged_next_states)\r\n\r\n critic_loss = tf.reduce_mean(critic_loss)\r\n\r\n actor_loss = agent.actor_loss(merged_states, actions,\r\n context_rewards, context_discounts,\r\n merged_next_states)\r\n actor_loss *= tf.to_float( # Only update actor every N steps.\r\n tf.equal(n_updates % target_update_period, 0))\r\n\r\n critic_train_op = slim.learning.create_train_op(\r\n critic_loss,\r\n critic_opt,\r\n global_step=n_updates,\r\n update_ops=None,\r\n summarize_gradients=summarize_gradients,\r\n clip_gradient_norm=clip_gradient_norm,\r\n variables_to_train=agent.get_trainable_critic_vars(),)\r\n critic_train_op = uvf_utils.tf_print(\r\n critic_train_op, [critic_train_op],\r\n message='critic_loss',\r\n print_freq=1000,\r\n name='critic_loss')\r\n train_op_list.append(critic_train_op)\r\n if actor_loss is not None:\r\n actor_train_op = slim.learning.create_train_op(\r\n actor_loss,\r\n actor_opt,\r\n global_step=None,\r\n update_ops=None,\r\n summarize_gradients=summarize_gradients,\r\n clip_gradient_norm=clip_gradient_norm,\r\n variables_to_train=agent.get_trainable_actor_vars(),)\r\n actor_train_op = uvf_utils.tf_print(\r\n actor_train_op, [actor_train_op],\r\n message='actor_loss',\r\n print_freq=1000,\r\n name='actor_loss')\r\n train_op_list.append(actor_train_op)\r\n\r\n assert len(train_op_list) == 4\r\n # Update targets should happen after the networks have been updated.\r\n with tf.control_dependencies(train_op_list[2:]):\r\n update_targets_op = uvf_utils.periodically(\r\n uvf_agent.update_targets, target_update_period, 'update_targets')\r\n if meta_agent is not None:\r\n with tf.control_dependencies(train_op_list[:2]):\r\n update_meta_targets_op = uvf_utils.periodically(\r\n meta_agent.update_targets, target_update_period, 'update_targets')\r\n\r\n assert_op = tf.Assert( # Hack to get training to stop.\r\n tf.less_equal(global_step, 200 + num_episodes_train * max_steps_per_episode),\r\n [global_step])\r\n with tf.control_dependencies([update_targets_op, assert_op]):\r\n train_op = tf.add_n(train_op_list[2:], name='post_update_targets')\r\n # Representation training steps on every low-level policy training step.\r\n train_op += repr_train_op\r\n with tf.control_dependencies([update_meta_targets_op, assert_op]):\r\n meta_train_op = tf.add_n(train_op_list[:2],\r\n name='post_update_meta_targets')\r\n\r\n if debug_summaries:\r\n train_.gen_debug_batch_summaries(batch)\r\n slim.summaries.add_histogram_summaries(\r\n uvf_agent.get_trainable_critic_vars(), 'critic_vars')\r\n slim.summaries.add_histogram_summaries(\r\n uvf_agent.get_trainable_actor_vars(), 'actor_vars')\r\n\r\n train_ops = train_utils.TrainOps(train_op, meta_train_op,\r\n collect_experience_op)\r\n\r\n policy_save_path = os.path.join(train_dir, policy_save_dir, 'model.ckpt')\r\n policy_vars = uvf_agent.get_actor_vars() + meta_agent.get_actor_vars() + [\r\n global_step, num_episodes, num_resets\r\n ] + list(uvf_agent.context_vars) + list(meta_agent.context_vars) + state_preprocess.get_trainable_vars()\r\n # add critic vars, since some test evaluation depends on them\r\n policy_vars += uvf_agent.get_trainable_critic_vars() + meta_agent.get_trainable_critic_vars()\r\n policy_saver = tf.train.Saver(\r\n policy_vars, max_to_keep=max_policies_to_save, sharded=False)\r\n\r\n lowlevel_vars = (uvf_agent.get_actor_vars() +\r\n uvf_agent.get_trainable_critic_vars() +\r\n state_preprocess.get_trainable_vars())\r\n lowlevel_saver = tf.train.Saver(lowlevel_vars)\r\n\r\n def policy_save_fn(sess):\r\n policy_saver.save(\r\n sess, policy_save_path, global_step=global_step, write_meta_graph=False)\r\n if save_policy_interval_secs > 0:\r\n tf.logging.info(\r\n 'Wait %d secs after save policy.' % save_policy_interval_secs)\r\n time.sleep(save_policy_interval_secs)\r\n\r\n train_step_fn = train_utils.TrainStep(\r\n max_number_of_steps=num_episodes_train * max_steps_per_episode + 100,\r\n num_updates_per_observation=num_updates_per_observation,\r\n num_collect_per_update=num_collect_per_update,\r\n num_collect_per_meta_update=num_collect_per_meta_update,\r\n log_every_n_steps=log_every_n_steps,\r\n policy_save_fn=policy_save_fn,\r\n save_policy_every_n_steps=save_policy_every_n_steps,\r\n should_stop_early=should_stop_early).train_step\r\n\r\n local_init_op = tf.local_variables_initializer()\r\n init_targets_op = tf.group(uvf_agent.update_targets(1.0),\r\n meta_agent.update_targets(1.0))\r\n\r\n def initialize_training_fn(sess):\r\n \"\"\"Initialize training function.\"\"\"\r\n sess.run(local_init_op)\r\n sess.run(init_targets_op)\r\n if load_path:\r\n tf.logging.info('Restoring low-level from %s' % load_path)\r\n lowlevel_saver.restore(sess, load_path)\r\n global_step_value = sess.run(global_step)\r\n assert global_step_value == 0, 'Global step should be zero.'\r\n collect_experience_call = sess.make_callable(\r\n init_collect_experience_op)\r\n\r\n for _ in range(initial_steps):\r\n collect_experience_call()\r\n\r\n train_saver = tf.train.Saver(max_to_keep=2, sharded=True)\r\n tf.logging.info('train dir: %s', train_dir)\r\n return slim.learning.train(\r\n train_ops,\r\n train_dir,\r\n train_step_fn=train_step_fn,\r\n save_interval_secs=FLAGS.save_interval_secs,\r\n saver=train_saver,\r\n log_every_n_steps=0,\r\n global_step=global_step,\r\n master=\"\",\r\n is_chief=(FLAGS.task == 0),\r\n save_summaries_secs=FLAGS.save_summaries_secs,\r\n init_fn=initialize_training_fn)\r\n", "# Copyright 2017 Google Inc. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Generates vocabulary and term frequency files for datasets.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\nfrom six import iteritems\r\n\r\nfrom collections import defaultdict\r\n\r\n# Dependency imports\r\n\r\nimport tensorflow as tf\r\n\r\nfrom data import data_utils\r\nfrom data import document_generators\r\n\r\nflags = tf.app.flags\r\nFLAGS = flags.FLAGS\r\n\r\n# Flags controlling input are in document_generators.py\r\n\r\nflags.DEFINE_string('output_dir', '',\r\n 'Path to save vocab.txt and vocab_freq.txt.')\r\n\r\nflags.DEFINE_boolean('use_unlabeled', True, 'Whether to use the '\r\n 'unlabeled sentiment dataset in the vocabulary.')\r\nflags.DEFINE_boolean('include_validation', False, 'Whether to include the '\r\n 'validation set in the vocabulary.')\r\nflags.DEFINE_integer('doc_count_threshold', 1, 'The minimum number of '\r\n 'documents a word or bigram should occur in to keep '\r\n 'it in the vocabulary.')\r\n\r\nMAX_VOCAB_SIZE = 100 * 1000\r\n\r\n\r\ndef fill_vocab_from_doc(doc, vocab_freqs, doc_counts):\r\n \"\"\"Fills vocabulary and doc counts with tokens from doc.\r\n\r\n Args:\r\n doc: Document to read tokens from.\r\n vocab_freqs: dict<token, frequency count>\r\n doc_counts: dict<token, document count>\r\n\r\n Returns:\r\n None\r\n \"\"\"\r\n doc_seen = set()\r\n\r\n for token in document_generators.tokens(doc):\r\n if doc.add_tokens or token in vocab_freqs:\r\n vocab_freqs[token] += 1\r\n if token not in doc_seen:\r\n doc_counts[token] += 1\r\n doc_seen.add(token)\r\n\r\n\r\ndef main(_):\r\n tf.logging.set_verbosity(tf.logging.INFO)\r\n vocab_freqs = defaultdict(int)\r\n doc_counts = defaultdict(int)\r\n\r\n # Fill vocabulary frequencies map and document counts map\r\n for doc in document_generators.documents(\r\n dataset='train',\r\n include_unlabeled=FLAGS.use_unlabeled,\r\n include_validation=FLAGS.include_validation):\r\n fill_vocab_from_doc(doc, vocab_freqs, doc_counts)\r\n\r\n # Filter out low-occurring terms\r\n vocab_freqs = dict((term, freq) for term, freq in iteritems(vocab_freqs)\r\n if doc_counts[term] > FLAGS.doc_count_threshold)\r\n\r\n # Sort by frequency\r\n ordered_vocab_freqs = data_utils.sort_vocab_by_frequency(vocab_freqs)\r\n\r\n # Limit vocab size\r\n ordered_vocab_freqs = ordered_vocab_freqs[:MAX_VOCAB_SIZE]\r\n\r\n # Add EOS token\r\n ordered_vocab_freqs.append((data_utils.EOS_TOKEN, 1))\r\n\r\n # Write\r\n tf.gfile.MakeDirs(FLAGS.output_dir)\r\n data_utils.write_vocab_and_frequency(ordered_vocab_freqs, FLAGS.output_dir)\r\n\r\n\r\nif __name__ == '__main__':\r\n tf.app.run()\r\n", "# Lint as: python2, python3\r\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\nr\"\"\"Exports an SSD detection model to use with tf-lite.\r\n\r\nOutputs file:\r\n* A tflite compatible frozen graph - $output_directory/tflite_graph.pb\r\n\r\nThe exported graph has the following input and output nodes.\r\n\r\nInputs:\r\n'normalized_input_image_tensor': a float32 tensor of shape\r\n[1, height, width, 3] containing the normalized input image. Note that the\r\nheight and width must be compatible with the height and width configured in\r\nthe fixed_shape_image resizer options in the pipeline config proto.\r\n\r\nIn floating point Mobilenet model, 'normalized_image_tensor' has values\r\nbetween [-1,1). This typically means mapping each pixel (linearly)\r\nto a value between [-1, 1]. Input image\r\nvalues between 0 and 255 are scaled by (1/128.0) and then a value of\r\n-1 is added to them to ensure the range is [-1,1).\r\nIn quantized Mobilenet model, 'normalized_image_tensor' has values between [0,\r\n255].\r\nIn general, see the `preprocess` function defined in the feature extractor class\r\nin the object_detection/models directory.\r\n\r\nOutputs:\r\nIf add_postprocessing_op is true: frozen graph adds a\r\n TFLite_Detection_PostProcess custom op node has four outputs:\r\n detection_boxes: a float32 tensor of shape [1, num_boxes, 4] with box\r\n locations\r\n detection_classes: a float32 tensor of shape [1, num_boxes]\r\n with class indices\r\n detection_scores: a float32 tensor of shape [1, num_boxes]\r\n with class scores\r\n num_boxes: a float32 tensor of size 1 containing the number of detected boxes\r\nelse:\r\n the graph has two outputs:\r\n 'raw_outputs/box_encodings': a float32 tensor of shape [1, num_anchors, 4]\r\n containing the encoded box predictions.\r\n 'raw_outputs/class_predictions': a float32 tensor of shape\r\n [1, num_anchors, num_classes] containing the class scores for each anchor\r\n after applying score conversion.\r\n\r\nExample Usage:\r\n--------------\r\npython object_detection/export_tflite_ssd_graph.py \\\r\n --pipeline_config_path path/to/ssd_mobilenet.config \\\r\n --trained_checkpoint_prefix path/to/model.ckpt \\\r\n --output_directory path/to/exported_model_directory\r\n\r\nThe expected output would be in the directory\r\npath/to/exported_model_directory (which is created if it does not exist)\r\nwith contents:\r\n - tflite_graph.pbtxt\r\n - tflite_graph.pb\r\nConfig overrides (see the `config_override` flag) are text protobufs\r\n(also of type pipeline_pb2.TrainEvalPipelineConfig) which are used to override\r\ncertain fields in the provided pipeline_config_path. These are useful for\r\nmaking small changes to the inference graph that differ from the training or\r\neval config.\r\n\r\nExample Usage (in which we change the NMS iou_threshold to be 0.5 and\r\nNMS score_threshold to be 0.0):\r\npython object_detection/export_tflite_ssd_graph.py \\\r\n --pipeline_config_path path/to/ssd_mobilenet.config \\\r\n --trained_checkpoint_prefix path/to/model.ckpt \\\r\n --output_directory path/to/exported_model_directory\r\n --config_override \" \\\r\n model{ \\\r\n ssd{ \\\r\n post_processing { \\\r\n batch_non_max_suppression { \\\r\n score_threshold: 0.0 \\\r\n iou_threshold: 0.5 \\\r\n } \\\r\n } \\\r\n } \\\r\n } \\\r\n \"\r\n\"\"\"\r\n\r\nimport tensorflow.compat.v1 as tf\r\nfrom google.protobuf import text_format\r\nfrom object_detection import export_tflite_ssd_graph_lib\r\nfrom object_detection.protos import pipeline_pb2\r\n\r\nflags = tf.app.flags\r\nflags.DEFINE_string('output_directory', None, 'Path to write outputs.')\r\nflags.DEFINE_string(\r\n 'pipeline_config_path', None,\r\n 'Path to a pipeline_pb2.TrainEvalPipelineConfig config '\r\n 'file.')\r\nflags.DEFINE_string('trained_checkpoint_prefix', None, 'Checkpoint prefix.')\r\nflags.DEFINE_integer('max_detections', 10,\r\n 'Maximum number of detections (boxes) to show.')\r\nflags.DEFINE_integer('max_classes_per_detection', 1,\r\n 'Maximum number of classes to output per detection box.')\r\nflags.DEFINE_integer(\r\n 'detections_per_class', 100,\r\n 'Number of anchors used per class in Regular Non-Max-Suppression.')\r\nflags.DEFINE_bool('add_postprocessing_op', True,\r\n 'Add TFLite custom op for postprocessing to the graph.')\r\nflags.DEFINE_bool(\r\n 'use_regular_nms', False,\r\n 'Flag to set postprocessing op to use Regular NMS instead of Fast NMS.')\r\nflags.DEFINE_string(\r\n 'config_override', '', 'pipeline_pb2.TrainEvalPipelineConfig '\r\n 'text proto to override pipeline_config_path.')\r\n\r\nFLAGS = flags.FLAGS\r\n\r\n\r\ndef main(argv):\r\n del argv # Unused.\r\n flags.mark_flag_as_required('output_directory')\r\n flags.mark_flag_as_required('pipeline_config_path')\r\n flags.mark_flag_as_required('trained_checkpoint_prefix')\r\n\r\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\r\n\r\n with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f:\r\n text_format.Merge(f.read(), pipeline_config)\r\n text_format.Merge(FLAGS.config_override, pipeline_config)\r\n export_tflite_ssd_graph_lib.export_tflite_graph(\r\n pipeline_config, FLAGS.trained_checkpoint_prefix, FLAGS.output_directory,\r\n FLAGS.add_postprocessing_op, FLAGS.max_detections,\r\n FLAGS.max_classes_per_detection, use_regular_nms=FLAGS.use_regular_nms)\r\n\r\n\r\nif __name__ == '__main__':\r\n tf.app.run(main)\r\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n\"\"\"TFDS Classification decoder.\"\"\"\r\n\r\nimport tensorflow as tf\r\nfrom official.vision.beta.dataloaders import decoder\r\n\r\n\r\nclass Decoder(decoder.Decoder):\r\n \"\"\"A tf.Example decoder for classification task.\"\"\"\r\n\r\n def __init__(self):\r\n return\r\n\r\n def decode(self, serialized_example):\r\n sample_dict = {\r\n 'image/encoded':\r\n tf.io.encode_jpeg(serialized_example['image'], quality=100),\r\n 'image/class/label':\r\n serialized_example['label'],\r\n }\r\n return sample_dict\r\n" ]
[ [ "tensorflow.compat.v2.test.main", "tensorflow.compat.v2.train.CheckpointManager", "tensorflow.compat.v2.compat.v2.train.Checkpoint", "tensorflow.compat.v2.enable_v2_behavior", "numpy.ones", "tensorflow.compat.v2.compat.v2.train.CheckpointManager", "tensorflow.compat.v2.keras.initializers.Constant", "tensorflow.compat.v2.saved_model.load", "tensorflow.compat.v2.zeros", "tensorflow.compat.v2.identity", "tensorflow.compat.v2.ones", "tensorflow.compat.v2.reduce_sum", "tensorflow.compat.v2.constant", "tensorflow.compat.v2.train.Checkpoint", "numpy.zeros" ], [ "tensorflow.python.platform.gfile.Exists", "tensorflow.compat.v1.test.main" ], [ "tensorflow.compat.v1.truncated_normal_initializer", "tensorflow.compat.v1.reduce_mean", "tensorflow.compat.v1.squeeze", "tensorflow.compat.v1.variable_scope", "tensorflow.compat.v1.pad" ], [ "tensorflow.keras.Input", "tensorflow.test.main", "tensorflow.keras.backend.set_image_data_format" ], [ "numpy.random.seed", "tensorflow.gfile.Open", "numpy.arange", "numpy.eye", "numpy.random.shuffle", "numpy.concatenate", "tensorflow.logging.info", "numpy.array", "numpy.empty" ], [ "tensorflow.compat.v1.Variable", "tensorflow.compat.v1.test.main", "tensorflow.compat.v1.get_variable", "tensorflow.compat.v1.global_variables_initializer", "tensorflow.compat.v1.train.get_or_create_global_step", "tensorflow.compat.v1.fixed_size_partitioner", "tensorflow.compat.v1.Graph", "tensorflow.compat.v1.train.Saver", "tensorflow.compat.v1.constant" ], [ "tensorflow.keras.Input", "tensorflow.test.main", "tensorflow.keras.backend.set_image_data_format" ], [ "tensorflow.range", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.python.distribute.combinations.combine", "tensorflow.ones", "tensorflow.test.main" ], [ "numpy.zeros" ], [ "tensorflow.keras.Input", "tensorflow.keras.layers.InputSpec", "tensorflow.test.main", "tensorflow.keras.backend.set_image_data_format" ], [ "numpy.concatenate", "numpy.copy", "numpy.square" ], [ "tensorflow.nn.relu6", "tensorflow.test.main", "numpy.float32" ], [ "tensorflow.compat.v1.shape", "tensorflow.compat.v1.reshape", "tensorflow.compat.v1.transpose", "tensorflow.compat.v1.image.resize_bilinear" ], [ "tensorflow.shape", "tensorflow.broadcast_to", "tensorflow.reshape", "tensorflow.keras.initializers.serialize", "tensorflow.keras.utils.register_keras_serializable", "tensorflow.keras.initializers.get" ], [ "tensorflow.io.gfile.isdir", "tensorflow.train.latest_checkpoint", "tensorflow.train.Checkpoint", "tensorflow.keras.regularizers.l2", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.keras.losses.Huber", "tensorflow.one_hot", "tensorflow.distribute.get_strategy", "tensorflow.keras.layers.InputSpec", "tensorflow.keras.metrics.Mean", "tensorflow.GradientTape" ], [ "tensorflow.keras.Input", "numpy.random.seed", "numpy.asarray", "tensorflow.test.main", "tensorflow.keras.Model", "numpy.random.randint" ], [ "tensorflow.constant", "tensorflow.test.main", "tensorflow.keras.Input" ], [ "numpy.unique", "numpy.reshape", "numpy.empty_like", "numpy.all", "numpy.array", "numpy.empty" ], [ "tensorflow.compat.v1.variable_scope", "tensorflow.compat.v1.truncated_normal_initializer" ], [ "tensorflow.nn.xw_plus_b", "numpy.dot", "tensorflow.matmul", "numpy.sqrt", "tensorflow.zeros", "numpy.clip", "numpy.reshape", "numpy.asarray", "tensorflow.placeholder", "tensorflow.nn.tanh", "tensorflow.Session", "tensorflow.train.Saver", "numpy.array", "numpy.zeros", "numpy.random.RandomState", "tensorflow.random_normal" ], [ "tensorflow.compat.v1.random_uniform", "tensorflow.compat.v1.test.main" ], [ "tensorflow.compat.v1.truncated_normal_initializer", "tensorflow.compat.v1.reduce_mean", "tensorflow.compat.v1.squeeze", "tensorflow.compat.v1.zeros_initializer", "tensorflow.compat.v1.name_scope", "tensorflow.compat.v1.shape", "tensorflow.compat.v1.variable_scope", "tensorflow.compat.v1.pad", "tensorflow.compat.v1.identity" ], [ "tensorflow.gfile.Exists", "tensorflow.gfile.GFile" ], [ "tensorflow.executing_eagerly", "tensorflow.Variable", "tensorflow.io.gfile.exists", "tensorflow.train.Checkpoint", "tensorflow.io.gfile.GFile", "tensorflow.saved_model.Asset", "tensorflow.keras.Model", "tensorflow.function", "tensorflow.name_scope", "tensorflow.no_op", "tensorflow.io.gfile.mkdir", "tensorflow.RaggedTensorSpec", "tensorflow.keras.initializers.TruncatedNormal", "tensorflow.TensorSpec", "tensorflow.io.gfile.copy", "tensorflow.keras.layers.Input" ], [ "tensorflow.compat.v1.get_default_graph", "tensorflow.compat.v1.test.main" ], [ "tensorflow.zeros", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.cast", "tensorflow.reshape", "tensorflow.io.parse_single_example", "tensorflow.io.VarLenFeature", "tensorflow.io.FixedLenFeature", "tensorflow.data.Dataset.list_files", "tensorflow.one_hot", "tensorflow.data.Dataset.range", "tensorflow.data.experimental.service.distribute" ], [ "tensorflow.scatter_update", "tensorflow.cond", "tensorflow.get_variable", "tensorflow.concat", "tensorflow.control_dependencies", "tensorflow.reduce_sum", "tensorflow.equal", "tensorflow.group", "tensorflow.add_n", "tensorflow.Variable", "tensorflow.logical_or", "tensorflow.stop_gradient", "tensorflow.name_scope", "tensorflow.to_float", "tensorflow.train.Saver", "tensorflow.argmax", "tensorflow.logical_not", "tensorflow.tile", "tensorflow.less_equal", "tensorflow.identity", "tensorflow.zeros_like", "tensorflow.logging.info", "tensorflow.no_op", "tensorflow.summary.histogram", "tensorflow.constant", "tensorflow.local_variables_initializer", "tensorflow.range", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.variable_scope", "tensorflow.contrib.framework.get_or_create_global_step", "tensorflow.get_variable_scope" ], [ "tensorflow.gfile.MakeDirs", "tensorflow.logging.set_verbosity", "tensorflow.app.run" ], [ "tensorflow.compat.v1.gfile.GFile", "tensorflow.compat.v1.app.run" ], [ "tensorflow.io.encode_jpeg" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jerrywgz/Paddle
[ "85c4912755b783dd7554a9d6b9dae4a7e40371bc", "85c4912755b783dd7554a9d6b9dae4a7e40371bc" ]
[ "python/paddle/fluid/tests/test_lod_tensor.py", "python/paddle/v2/image.py" ]
[ "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport paddle.fluid as fluid\nfrom paddle.fluid.lod_tensor import create_lod_tensor, create_random_int_lodtensor\nimport numpy as np\nimport unittest\n\n\nclass TestLoDTensor(unittest.TestCase):\n def test_pybind_recursive_seq_lens(self):\n tensor = fluid.LoDTensor()\n recursive_seq_lens = []\n tensor.set_recursive_sequence_lengths(recursive_seq_lens)\n recursive_seq_lens = [[], [1], [3]]\n self.assertRaises(Exception, tensor.set_recursive_sequence_lengths,\n recursive_seq_lens)\n recursive_seq_lens = [[0], [2], [3]]\n self.assertRaises(Exception, tensor.set_recursive_sequence_lengths,\n recursive_seq_lens)\n\n recursive_seq_lens = [[1, 2, 3]]\n tensor.set_recursive_sequence_lengths(recursive_seq_lens)\n self.assertEqual(tensor.recursive_sequence_lengths(),\n recursive_seq_lens)\n tensor.set(np.random.random([6, 1]), fluid.CPUPlace())\n self.assertTrue(tensor.has_valid_recursive_sequence_lengths())\n tensor.set(np.random.random([9, 1]), fluid.CPUPlace())\n self.assertFalse(tensor.has_valid_recursive_sequence_lengths())\n\n # Each level's sum should be equal to the number of items in the next level\n # Moreover, last level's sum should be equal to the tensor height\n recursive_seq_lens = [[2, 3], [1, 3, 1, 2, 2]]\n tensor.set_recursive_sequence_lengths(recursive_seq_lens)\n self.assertEqual(tensor.recursive_sequence_lengths(),\n recursive_seq_lens)\n tensor.set(np.random.random([8, 1]), fluid.CPUPlace())\n self.assertFalse(tensor.has_valid_recursive_sequence_lengths())\n recursive_seq_lens = [[2, 3], [1, 3, 1, 2, 1]]\n tensor.set_recursive_sequence_lengths(recursive_seq_lens)\n self.assertTrue(tensor.has_valid_recursive_sequence_lengths())\n tensor.set(np.random.random([9, 1]), fluid.CPUPlace())\n self.assertFalse(tensor.has_valid_recursive_sequence_lengths())\n\n def test_create_lod_tensor(self):\n # Create LoDTensor from a list\n data = [[1, 2, 3], [3, 4]]\n wrong_recursive_seq_lens = [[2, 2]]\n correct_recursive_seq_lens = [[3, 2]]\n self.assertRaises(AssertionError, create_lod_tensor, data,\n wrong_recursive_seq_lens, fluid.CPUPlace())\n tensor = create_lod_tensor(data, correct_recursive_seq_lens,\n fluid.CPUPlace())\n self.assertEqual(tensor.recursive_sequence_lengths(),\n correct_recursive_seq_lens)\n\n # Create LoDTensor from numpy array\n data = np.random.random([10, 1])\n recursive_seq_lens = [[2, 1], [3, 3, 4]]\n tensor = create_lod_tensor(data, recursive_seq_lens, fluid.CPUPlace())\n self.assertEqual(tensor.recursive_sequence_lengths(),\n recursive_seq_lens)\n\n # Create LoDTensor from another LoDTensor, they are differnt instances\n new_recursive_seq_lens = [[2, 2, 1], [1, 2, 2, 3, 2]]\n new_tensor = create_lod_tensor(tensor, new_recursive_seq_lens,\n fluid.CPUPlace())\n self.assertEqual(tensor.recursive_sequence_lengths(),\n recursive_seq_lens)\n self.assertEqual(new_tensor.recursive_sequence_lengths(),\n new_recursive_seq_lens)\n\n def test_create_random_int_lodtensor(self):\n # The shape of a word, commonly used in speech and NLP problem, is [1]\n shape = [1]\n recursive_seq_lens = [[2, 3, 5]]\n dict_size = 10000\n low = 0\n high = dict_size - 1\n tensor = create_random_int_lodtensor(recursive_seq_lens, shape,\n fluid.CPUPlace(), low, high)\n self.assertEqual(tensor.recursive_sequence_lengths(),\n recursive_seq_lens)\n self.assertEqual(tensor.shape(), [10, 1])\n\n\nif __name__ == '__main__':\n unittest.main()\n", "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis file contains some common interfaces for image preprocess.\nMany users are confused about the image layout. We introduce\nthe image layout as follows.\n\n- CHW Layout\n\n - The abbreviations: C=channel, H=Height, W=Width\n - The default layout of image opened by cv2 or PIL is HWC.\n PaddlePaddle only supports the CHW layout. And CHW is simply\n a transpose of HWC. It must transpose the input image.\n\n- Color format: RGB or BGR\n\n OpenCV use BGR color format. PIL use RGB color format. Both\n formats can be used for training. Noted that, the format should\n be keep consistent between the training and inference peroid.\n\"\"\"\nimport numpy as np\ntry:\n import cv2\nexcept ImportError:\n cv2 = None\nimport os\nimport tarfile\nimport cPickle\n\n__all__ = [\n \"load_image_bytes\", \"load_image\", \"resize_short\", \"to_chw\", \"center_crop\",\n \"random_crop\", \"left_right_flip\", \"simple_transform\", \"load_and_transform\",\n \"batch_images_from_tar\"\n]\n\n\ndef batch_images_from_tar(data_file,\n dataset_name,\n img2label,\n num_per_batch=1024):\n \"\"\"\n Read images from tar file and batch them into batch file.\n\n :param data_file: path of image tar file\n :type data_file: string\n :param dataset_name: 'train','test' or 'valid'\n :type dataset_name: string\n :param img2label: a dic with image file name as key \n and image's label as value\n :type img2label: dic\n :param num_per_batch: image number per batch file\n :type num_per_batch: int\n :return: path of list file containing paths of batch file\n :rtype: string\n \"\"\"\n batch_dir = data_file + \"_batch\"\n out_path = \"%s/%s\" % (batch_dir, dataset_name)\n meta_file = \"%s/%s.txt\" % (batch_dir, dataset_name)\n\n if os.path.exists(out_path):\n return meta_file\n else:\n os.makedirs(out_path)\n\n tf = tarfile.open(data_file)\n mems = tf.getmembers()\n data = []\n labels = []\n file_id = 0\n for mem in mems:\n if mem.name in img2label:\n data.append(tf.extractfile(mem).read())\n labels.append(img2label[mem.name])\n if len(data) == num_per_batch:\n output = {}\n output['label'] = labels\n output['data'] = data\n cPickle.dump(\n output,\n open('%s/batch_%d' % (out_path, file_id), 'w'),\n protocol=cPickle.HIGHEST_PROTOCOL)\n file_id += 1\n data = []\n labels = []\n if len(data) > 0:\n output = {}\n output['label'] = labels\n output['data'] = data\n cPickle.dump(\n output,\n open('%s/batch_%d' % (out_path, file_id), 'w'),\n protocol=cPickle.HIGHEST_PROTOCOL)\n\n with open(meta_file, 'a') as meta:\n for file in os.listdir(out_path):\n meta.write(os.path.abspath(\"%s/%s\" % (out_path, file)) + \"\\n\")\n return meta_file\n\n\ndef load_image_bytes(bytes, is_color=True):\n \"\"\"\n Load an color or gray image from bytes array.\n\n Example usage:\n \n .. code-block:: python\n\n with open('cat.jpg') as f:\n im = load_image_bytes(f.read())\n\n :param bytes: the input image bytes array.\n :type bytes: str\n :param is_color: If set is_color True, it will load and\n return a color image. Otherwise, it will\n load and return a gray image.\n :type is_color: bool\n \"\"\"\n flag = 1 if is_color else 0\n file_bytes = np.asarray(bytearray(bytes), dtype=np.uint8)\n img = cv2.imdecode(file_bytes, flag)\n return img\n\n\ndef load_image(file, is_color=True):\n \"\"\"\n Load an color or gray image from the file path.\n\n Example usage:\n \n .. code-block:: python\n\n im = load_image('cat.jpg')\n\n :param file: the input image path.\n :type file: string\n :param is_color: If set is_color True, it will load and\n return a color image. Otherwise, it will\n load and return a gray image.\n :type is_color: bool\n \"\"\"\n # cv2.IMAGE_COLOR for OpenCV3\n # cv2.CV_LOAD_IMAGE_COLOR for older OpenCV Version\n # cv2.IMAGE_GRAYSCALE for OpenCV3\n # cv2.CV_LOAD_IMAGE_GRAYSCALE for older OpenCV Version\n # Here, use constant 1 and 0\n # 1: COLOR, 0: GRAYSCALE\n flag = 1 if is_color else 0\n im = cv2.imread(file, flag)\n return im\n\n\ndef resize_short(im, size):\n \"\"\" \n Resize an image so that the length of shorter edge is size.\n\n Example usage:\n \n .. code-block:: python\n\n im = load_image('cat.jpg')\n im = resize_short(im, 256)\n \n :param im: the input image with HWC layout.\n :type im: ndarray\n :param size: the shorter edge size of image after resizing.\n :type size: int\n \"\"\"\n h, w = im.shape[:2]\n h_new, w_new = size, size\n if h > w:\n h_new = size * h / w\n else:\n w_new = size * w / h\n im = cv2.resize(im, (w_new, h_new), interpolation=cv2.INTER_CUBIC)\n return im\n\n\ndef to_chw(im, order=(2, 0, 1)):\n \"\"\"\n Transpose the input image order. The image layout is HWC format\n opened by cv2 or PIL. Transpose the input image to CHW layout\n according the order (2,0,1).\n\n Example usage:\n \n .. code-block:: python\n\n im = load_image('cat.jpg')\n im = resize_short(im, 256)\n im = to_chw(im)\n \n :param im: the input image with HWC layout.\n :type im: ndarray\n :param order: the transposed order.\n :type order: tuple|list \n \"\"\"\n assert len(im.shape) == len(order)\n im = im.transpose(order)\n return im\n\n\ndef center_crop(im, size, is_color=True):\n \"\"\"\n Crop the center of image with size.\n\n Example usage:\n \n .. code-block:: python\n\n im = center_crop(im, 224)\n \n :param im: the input image with HWC layout.\n :type im: ndarray\n :param size: the cropping size.\n :type size: int\n :param is_color: whether the image is color or not.\n :type is_color: bool\n \"\"\"\n h, w = im.shape[:2]\n h_start = (h - size) / 2\n w_start = (w - size) / 2\n h_end, w_end = h_start + size, w_start + size\n if is_color:\n im = im[h_start:h_end, w_start:w_end, :]\n else:\n im = im[h_start:h_end, w_start:w_end]\n return im\n\n\ndef random_crop(im, size, is_color=True):\n \"\"\"\n Randomly crop input image with size.\n\n Example usage:\n \n .. code-block:: python\n\n im = random_crop(im, 224)\n \n :param im: the input image with HWC layout.\n :type im: ndarray\n :param size: the cropping size.\n :type size: int\n :param is_color: whether the image is color or not.\n :type is_color: bool\n \"\"\"\n h, w = im.shape[:2]\n h_start = np.random.randint(0, h - size + 1)\n w_start = np.random.randint(0, w - size + 1)\n h_end, w_end = h_start + size, w_start + size\n if is_color:\n im = im[h_start:h_end, w_start:w_end, :]\n else:\n im = im[h_start:h_end, w_start:w_end]\n return im\n\n\ndef left_right_flip(im, is_color=True):\n \"\"\"\n Flip an image along the horizontal direction.\n Return the flipped image.\n\n Example usage:\n \n .. code-block:: python\n\n im = left_right_flip(im)\n \n :param im: input image with HWC layout or HW layout for gray image\n :type im: ndarray\n :param is_color: whether input image is color or not\n :type is_color: bool\n \"\"\"\n if len(im.shape) == 3 and is_color:\n return im[:, ::-1, :]\n else:\n return im[:, ::-1]\n\n\ndef simple_transform(im,\n resize_size,\n crop_size,\n is_train,\n is_color=True,\n mean=None):\n \"\"\"\n Simply data argumentation for training. These operations include\n resizing, croping and flipping.\n\n Example usage:\n \n .. code-block:: python\n\n im = simple_transform(im, 256, 224, True)\n\n :param im: The input image with HWC layout.\n :type im: ndarray\n :param resize_size: The shorter edge length of the resized image.\n :type resize_size: int\n :param crop_size: The cropping size.\n :type crop_size: int\n :param is_train: Whether it is training or not.\n :type is_train: bool\n :param is_color: whether the image is color or not.\n :type is_color: bool\n :param mean: the mean values, which can be element-wise mean values or \n mean values per channel.\n :type mean: numpy array | list\n \"\"\"\n im = resize_short(im, resize_size)\n if is_train:\n im = random_crop(im, crop_size, is_color=is_color)\n if np.random.randint(2) == 0:\n im = left_right_flip(im, is_color)\n else:\n im = center_crop(im, crop_size, is_color=is_color)\n if len(im.shape) == 3:\n im = to_chw(im)\n\n im = im.astype('float32')\n if mean is not None:\n mean = np.array(mean, dtype=np.float32)\n # mean value, may be one value per channel \n if mean.ndim == 1 and is_color:\n mean = mean[:, np.newaxis, np.newaxis]\n elif mean.ndim == 1:\n mean = mean\n else:\n # elementwise mean\n assert len(mean.shape) == len(im)\n im -= mean\n\n return im\n\n\ndef load_and_transform(filename,\n resize_size,\n crop_size,\n is_train,\n is_color=True,\n mean=None):\n \"\"\"\n Load image from the input file `filename` and transform image for\n data argumentation. Please refer to the `simple_transform` interface\n for the transform operations.\n\n Example usage:\n \n .. code-block:: python\n\n im = load_and_transform('cat.jpg', 256, 224, True)\n\n :param filename: The file name of input image.\n :type filename: string\n :param resize_size: The shorter edge length of the resized image.\n :type resize_size: int\n :param crop_size: The cropping size.\n :type crop_size: int\n :param is_train: Whether it is training or not.\n :type is_train: bool\n :param is_color: whether the image is color or not.\n :type is_color: bool\n :param mean: the mean values, which can be element-wise mean values or \n mean values per channel.\n :type mean: numpy array | list\n \"\"\"\n im = load_image(filename, is_color)\n im = simple_transform(im, resize_size, crop_size, is_train, is_color, mean)\n return im\n" ]
[ [ "numpy.random.random" ], [ "numpy.array", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MattAlexMiracle/SmartPatch
[ "c485cb433d8e085d6eae10a335ee19f5e6c1a41c" ]
[ "Benchmark/research-seq2seq-HTR/models/encoder_vgg.py" ]
[ "from torch import nn\nfrom torch.autograd import Variable\nimport torch\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nimport numpy as np\n\n# from models.vgg_tro_channel1 import vgg16_bn\nfrom models.vgg_tro_channel3 import vgg16_bn, vgg19_bn\n\n# torch.cuda.set_device(1)\n\nDROP_OUT = False\nLSTM = False\nSUM_UP = True\nPRE_TRAIN_VGG = True\n\n\nclass Encoder(nn.Module):\n def __init__(self, hidden_size, height, width, bgru, step, flip):\n super(Encoder, self).__init__()\n self.hidden_size = hidden_size\n self.height = height\n self.width = width\n self.bi = bgru\n self.step = step\n self.flip = flip\n self.n_layers = 2\n self.dropout = 0.5\n\n # self.layer = vgg16_bn(PRE_TRAIN_VGG)\n self.layer = vgg19_bn(PRE_TRAIN_VGG)\n\n if DROP_OUT:\n self.layer_dropout = nn.Dropout2d(p=0.5)\n if self.step is not None:\n # self.output_proj = nn.Linear((((((self.height-2)//2)-2)//2-2-2-2)//2)*128*self.step, self.hidden_size)\n self.output_proj = nn.Linear(\n self.height // 16 * 512 * self.step, self.height // 16 * 512\n )\n\n if LSTM:\n RNN = nn.LSTM\n else:\n RNN = nn.GRU\n\n if self.bi: # 8: 3 MaxPool->2**3 128: last hidden_size of layer4\n self.rnn = RNN(\n self.height // 16 * 512,\n self.hidden_size,\n self.n_layers,\n dropout=self.dropout,\n bidirectional=True,\n )\n if SUM_UP:\n self.enc_out_merge = (\n lambda x: x[:, :, : x.shape[-1] // 2] + x[:, :, x.shape[-1] // 2 :]\n )\n self.enc_hidden_merge = lambda x: (x[0] + x[1]).unsqueeze(0)\n else:\n self.rnn = RNN(\n self.height // 16 * 512,\n self.hidden_size,\n self.n_layers,\n dropout=self.dropout,\n bidirectional=False,\n )\n\n # (32, 1, 80, 1400)\n def forward(self, in_data, in_data_len, hidden=None):\n batch_size = in_data.shape[0]\n out = self.layer(in_data) # torch.Size([32, 512, 4, 63])\n if DROP_OUT and self.training:\n out = self.layer_dropout(out)\n # out.register_hook(print)\n out = out.permute(3, 0, 2, 1) # (width, batch, height, channels)\n out.contiguous()\n # out = out.view(-1, batch_size, (((((self.height-2)//2)-2)//2-2-2-2)//2)*128) # (t, b, f) (173, 32, 1024)\n out = out.reshape(-1, batch_size, self.height // 16 * 512)\n if self.step is not None:\n time_step, batch_size, n_feature = out.shape[0], out.shape[1], out.shape[2]\n out_short = Variable(\n torch.zeros(time_step // self.step, batch_size, n_feature * self.step)\n ).cuda() # t//STEP, b, f*STEP\n for i in range(0, time_step // self.step):\n part_out = [out[j] for j in range(i * self.step, (i + 1) * self.step)]\n # reverse the image feature map\n out_short[i] = torch.cat(part_out, 1) # b, f*STEP\n\n out = self.output_proj(out_short) # t//STEP, b, hidden_size\n width = out.shape[0]\n src_len = in_data_len.numpy() * (width / self.width)\n src_len = src_len + 0.999 # in case of 0 length value from float to int\n src_len = src_len.astype(\"int\")\n out = pack_padded_sequence(out, src_len.tolist(), batch_first=False)\n output, hidden = self.rnn(out, hidden)\n # output: t, b, f*2 hidden: 2, b, f\n output, output_len = pad_packed_sequence(output, batch_first=False)\n if self.bi and SUM_UP:\n output = self.enc_out_merge(output)\n # hidden = self.enc_hidden_merge(hidden)\n # # output: t, b, f hidden: b, f\n odd_idx = [1, 3, 5, 7, 9, 11]\n hidden_idx = odd_idx[: self.n_layers]\n final_hidden = hidden[hidden_idx]\n # if self.flip:\n # hidden = output[-1]\n # #hidden = hidden.permute(1, 0, 2) # b, 2, f\n # #hidden = hidden.contiguous().view(batch_size, -1) # b, f*2\n # else:\n # hidden = output[0] # b, f*2\n return output, final_hidden # t, b, f*2 b, f*2\n\n # matrix: b, c, h, w lens: list size of batch_size\n def conv_mask(self, matrix, lens):\n lens = np.array(lens)\n width = matrix.shape[-1]\n lens2 = lens * (width / self.width)\n lens2 = lens2 + 0.999 # in case le == 0\n lens2 = lens2.astype(\"int\")\n matrix_new = matrix.permute(0, 3, 1, 2) # b, w, c, h\n matrix_out = Variable(torch.zeros(matrix_new.shape)).cuda()\n for i, le in enumerate(lens2):\n if self.flip:\n matrix_out[i, -le:] = matrix_new[i, -le:]\n else:\n matrix_out[i, :le] = matrix_new[i, :le]\n matrix_out = matrix_out.permute(0, 2, 3, 1) # b, c, h, w\n return matrix_out\n\n\nif __name__ == \"__main__\":\n print(vgg16_bn())\n" ]
[ [ "torch.nn.Dropout2d", "torch.cat", "torch.zeros", "torch.nn.Linear", "torch.nn.utils.rnn.pad_packed_sequence", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cvemeki/Computational-motor-control
[ "cf02c77bff44ffdff63630c445b35b657a1d2b6c", "cf02c77bff44ffdff63630c445b35b657a1d2b6c" ]
[ "lab4_ZHANG_ZHENG_YANG/Python/Muscle.py", "lab4_ZHANG_ZHENG_YANG/Python/exercise1_copy.py" ]
[ "import numpy as np\n\n\nclass Muscle(object):\n \"\"\"This class implements the muscle model.\n The muscle model is based on the hill-type muscle model.\n \"\"\"\n # Default Muscle Parameters\n\n c = np.log(0.05) # pylint: disable=no-member\n N = 1.5\n K = 5.0\n tau_act = 0.01 # Time constant for the activation function\n F_per_m2 = 300000 # Force per m2 of muscle PCSA\n density = 1060\n\n def __init__(self, parameters):\n \"\"\"This function initializes the muscle model.\n A default muscle name is given as muscle\n\n Parameters\n ----------\n parameters : <MuscleParameters>\n Instance of MuscleParameters class\n\n Returns:\n -------\n Muscle : <Muscle>\n Returns an instance of class Muscle\n\n Attributes:\n ----------\n l_MTC : float\n Length of Muscle Tendon Complex\n l_slack : float\n Tendon slack length\n l_opt : float\n Optimal fiber length\n l_CE : float\n Length of contracticle element\n v_CE : float\n Velocity of contractile element\n deltaLength : float\n Change in Muscle Tendon length\n activeForce : float\n Active force generated by the muscle\n passiveForce : float\n Passive force generated by the muscle\n force : float\n Sum of Active and Passive forces\n tendonForce : float\n Force generated by the muscle tendon\n stim : float\n Muscle stimulation.\n\n Methods:\n --------\n step : func\n Integrates muscle state by time step dt\n\n Example:\n --------\n >>> from SystemParameters import MuscleParameters\n >>> import Muscle\n >>> muscle_parameters = MuscleParameters()\n >>> muscle1 = Muscle.Muscle(muscle_parameters)\n >>> muscle1.stim = 0.05\n >>> muscle1.deltaLength = 0.01\n >>> muscle1.step(dt)\n \"\"\"\n\n # Muscle specific parameters initialization\n self.l_slack = parameters.l_slack\n self.l_opt = parameters.l_opt\n self.v_max = parameters.v_max\n self.F_max = 1500 # parameters.f_max\n self.pennation = parameters.pennation\n self.E_ref = 0.04 # Reference strain\n self.w = 0.4\n\n # Muscle parameters initialization\n self.musclejoints = []\n self.l_SE = 0.0 # Muscle Series Element Length\n self.l_CE = 0.3 # Muscle Contracticle Element Length\n self.A = 0.01 # Muscle activation\n self.stim = 0.01 # base stimulation\n self.l_MTC = 0.0 # Muscle Tendon Unit (MTU) length\n self.deltaLength = 0.0\n \n # lse!= 0 because this will automatically dstribute the length according to lmtc !!!!!\n self.initializeMuscleLength() # executed everytime when the muscle is called!!!!!!!\n\n ######################### Attributes #########################\n @property\n def l_slack(self):\n \"\"\"Muscle Tendon Slack Length.\"\"\"\n return self.__l_slack\n\n @l_slack.setter\n def l_slack(self, value):\n \"\"\" Keyword Arguments:\n value -- \"Muscle Tendon Slack Length. \"\"\"\n self.__l_slack = value\n\n @property\n def l_MTC(self):\n \"\"\" Length of Muscle Tendon Complex.\"\"\"\n return self.__l_MTC\n\n @l_MTC.setter\n def l_MTC(self, value):\n \"\"\" Keyword Arguments:\n value -- Length of Muscle Tendon Complex \"\"\"\n self.__l_MTC = value\n\n @property\n def l_CE(self):\n \"\"\" Length of muscle contracticle element.\"\"\"\n return self.__l_CE\n\n @l_CE.setter\n def l_CE(self, value):\n \"\"\" Keyword Arguments:\n value -- Length of muscle contracticle element\"\"\"\n self.__l_CE = value\n\n @property\n def activeForce(self):\n \"\"\"This function returns the active force generated by the muscle.\"\"\"\n return self.computeMuscleActiveForce(self.l_CE, self.v_CE, self.A)\n\n @property\n def passiveForce(self):\n \"\"\"This function returns the passive force generated by the muscle.\"\"\"\n# print self._F_PE_star(self.l_CE) - self._F_BE(self.l_CE)\n return self._F_PE_star(self.l_CE) + self._F_BE(self.l_CE) # not PE - BE?\n\n @property\n def v_CE(self):\n \"\"\"Velocity of muscle contracticle element\"\"\"\n return self.__v_CE\n\n @v_CE.setter\n def v_CE(self, value):\n \"\"\"Velocity of muscle contracticle element.\"\"\"\n self.__v_CE = value\n\n @property\n def deltaLength(self):\n \"\"\"This function returns the change in length of the muscle\"\"\"\n return self._deltaLength\n\n @deltaLength.setter\n def deltaLength(self, value):\n \"\"\" Keyword Arguments:\n value -- Set the change in Muscle Tendon Complex length\"\"\"\n self._deltaLength = value\n\n @property\n def force(self):\n \"\"\"Function returns the sum of active and passive force\"\"\"\n return self.activeForce + self.passiveForce\n\n @property\n def tendonForce(self):\n \"\"\"This function returns the force generated by the muscle.\"\"\"\n return self.computeMuscleTendonForce(self.l_CE)\n\n ######################### METHODS #########################\n\n def computeMuscleActiveForce(self, l_CE, v_CE, a):\n \"\"\"This function computes the Active Muscle Force.\n The function requires\n l_CE : Contracticle element length\n v_CE : Contracticle element velocity\n a : muscle activation.\"\"\"\n return a * self._f_v_ce(v_CE) * self._f_l(l_CE) * self.F_max\n\n def computeMuscleTendonForce(self, l_CE):\n \"\"\"This function computes the muscle tendon force.\n The function requires contracticle element length\"\"\"\n return self._F_SE(self.computeTendonLength(l_CE))\n\n def computeTendonLength(self, l_CE):\n \"\"\"This function computes the muscle tendon length.\n The function requires contracticle element length\"\"\"\n return self.l_MTC - l_CE\n\n def computeMuscleTendonLength(self):\n \"\"\"This function computes the total muscle length of muscle tendon unit.\n The function requires the list of muscle joint objects\"\"\"\n # if(self.deltaLength > 0.5):\n # print('Delta Length : {}'.format(self.deltaLength))\n self.l_MTC = self.l_slack + self.l_opt + self.deltaLength\n for link in self.musclejoints:\n self.l_MTC += self.pennation * link.getDelta_Length()\n\n def updateActivation(self, dt):\n \"\"\"This function updates the activation function of the muscle.\n The function requires time step dt as the inputs\"\"\"\n self.stim = max(0.01, min(1.0, self.stim))\n self.dA = (self.stim - self.A) * dt / Muscle.tau_act\n self.A += self.dA\n\n def initializeMuscleLength(self):\n \"\"\"This function initializes the muscle lengths.\"\"\"\n self.computeMuscleTendonLength()\n\n if(self.l_MTC < (self.l_slack + self.l_opt)):\n self.l_CE = self.l_opt\n self.l_SE = self.l_MTC - self.l_CE\n else:\n if(self.l_opt * self.w + self.E_ref * self.l_slack != 0.0):\n self.l_SE = self.l_slack * ((self.l_opt * self.w + self.E_ref * (\n self.l_MTC - self.l_opt)) / (self.l_opt * self.w + self.E_ref * self.l_slack))\n else:\n self.l_SE = self.l_slack\n\n self.l_CE = self.l_MTC - self.l_SE\n\n def step(self, dt):\n \"\"\"This function integrates and steps the muscle model by\n time step dt.\"\"\"\n self.updateActivation(dt)\n self.computeMuscleTendonLength()\n self.v_CE = self._v_CE(\n self._f_v(\n self._F_SE(\n self.l_SE), self._F_BE(\n self.l_CE), self.A, self._f_l(\n self.l_CE), self._F_PE_star(\n self.l_CE)))\n \n # Vce = Vce( fv( Fse(lse),Fbe(lce),A,fl(lce),Fpestar(lce) ) )\n \n # Integration of the velocity to obtain the muscle length\n # Here a simple EULER integration is applied since the integration\n # time steps are small\n # Can be changed to a more complex integrator\n self.l_CE = self.l_CE - self.v_CE * dt if self.l_CE > 0.0 else 0.0\n self.l_SE = self.l_MTC - self.l_CE\n\n def applyForce(self):\n # Applies force to the Joint\n \"\"\"This function applies the force to the respective muscle joint.\n The function requires the list of muscle joint objects\"\"\"\n for link in self.musclejoints:\n link.addTorqueToJoint()\n\n def _F_SE(self, l_SE):\n \"\"\"This function computes the Force in the Series Element (SE).\n The function requires SE length l_SE as inputs.\"\"\"\n f_se = self.F_max * ((l_SE - self.l_slack) / (self.l_slack *\n self.E_ref))**2 if l_SE > self.l_slack else 0.0\n return f_se\n\n def _F_PE_star(self, l_CE):\n \"\"\" This function computes the Force in the Parallel Element (PE).\n Force prevents the muscle from over-exentsion\n The function requires contracticle length l_CE as inputs.\"\"\"\n return self.F_max * ((l_CE - self.l_opt) / (self.l_opt * self.w))**2 if l_CE > self.l_opt else 0.0\n\n def _F_BE(self, l_CE):\n \"\"\" This function computes the Force in the muscle belly.\n Force prevents the muscle from collapsing on itself.\n The function requires SE length l_SE as inputs.\"\"\"\n return self.F_max * ((l_CE - self.l_opt * (1.0 - self.w)) / (\n self.l_opt * self.w / 2.0))**2 if l_CE <= self.l_opt * (1.0 - self.w) else 0.0\n\n def _f_l(self, l_CE):\n \"\"\" This function computes the force from force-length relationship.\n The function requires SE length l_SE as inputs.\"\"\"\n val = abs((l_CE - self.l_opt) / (self.l_opt * self.w))\n exposant = Muscle.c * val * val * val\n return np.exp(exposant)\n\n def _f_v_ce(self, v_CE):\n \"\"\" This function computes the force from force-velocity relationship.\n The function requires contracticle velocity as inputs.\"\"\"\n if(v_CE >= 0):\n return (self.v_max - v_CE) / (self.v_max + Muscle.K * v_CE)\n else:\n return Muscle.N + (Muscle.N - 1) * (self.v_max +\n v_CE) / (7.56 * Muscle.K * v_CE - self.v_max)\n\n def _f_v(self, F_SE, F_BE, a, f_l, F_PE_star): # Fmax is cancelled in fv\n \"\"\" This function computes the force from force-velocity relationship.\n The function requires\n F_SE : Series element force\n F_BE : Muscle belly force\n a : muscle activation\n f_l : Force from force-length relationship\n F_PE_star : Parallel element force.\"\"\"\n if(self.F_max * a * f_l + F_PE_star == 0.0):\n f_v = 0.0\n else:\n f_v = (F_SE + F_BE) / ((self.F_max * a * f_l) + F_PE_star)\n\n f_v = 1.5 if f_v > 1.5 else f_v\n f_v = 0.0 if f_v < 0.0 else f_v\n# print f_v\n return f_v\n\n def _v_CE(self, f_v):\n \"\"\" This function computes the Contracticle element velocity.\n The function requires force from force-velocity relationship.\"\"\"\n return self.v_max * (1.0 - f_v) / (1.0 + f_v * Muscle.K) if f_v < 1.0 else self.v_max * \\\n (f_v - 1.0) / (7.56 * Muscle.K * (f_v - Muscle.N) + 1.0 - Muscle.N)\n\n", "\"\"\" Lab 4 \"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom biopack import integrate, DEFAULT, parse_args\nimport biolog\nfrom SystemParameters import PendulumParameters\nfrom lab4_pendulum import pendulum_system\n\nDEFAULT[\"label\"] = [r\"$\\theta$ [rad]\", r\"$d\\theta/dt$ [rad/s]\"]\n\n\ndef pendulum_integration(state, time, *args, **kwargs):\n \"\"\" Function for system integration \"\"\"\n# biolog.warning(\n# \"Pendulum equation with spring and damper must be implemented\") # l_S\n return pendulum_system(state[0], state[1], *args, **kwargs)[:, 0]\n\n\n\ndef exercise1a():\n \"\"\" Exercise 1 \"\"\"\n biolog.info(\"Executing Lab 4 : Exercise 1\");\n\n def period_(dt,state):\n zero = [0.0,0.0]\n j = 0\n for i in range(1,len(state)):\n if state[i]*state[i-1] <= 0:\n zero[j] = i\n j+=1\n if j == 2:\n break\n period = (zero[1] - zero[0])*dt\n return period\n \n def different_initial_conditions(position,parameters,time):\n for position_ in position:\n x0 = [position_, 0]\n res = integrate(pendulum_integration, x0, time, args=(parameters, ))\n res.plot_state(\"State\") \n plt.title('State')\n res.plot_phase(\"Phase\")\n plt.title('Phase')\n plt.show()\n return None\n \n def influence_of_k(k,parameters,time,x0,k_test):\n maxposition = np.ones(len(k))\n maxvelocity = np.ones(len(k))\n period = np.ones(len(k))\n \n ''' the change of amplitude and the period in function of k '''\n \n for i in range(0,len(k)): \n parameters.k1 = k[i]\n parameters.k2 = k[i]\n res = integrate(pendulum_integration, x0, time, args=(parameters, ))\n \n maxposition[i] = max(res.state[:,0])\n maxvelocity[i] = max(res.state[:,1])\n period[i] = period_(dt,res.state[:,1])\n \n plt.plot(k,maxposition)\n plt.plot(k,maxvelocity)\n plt.plot(k,period)\n plt.grid()\n plt.title('The change of the amplitude and the period in function of k')\n plt.legend(['amplitude of position(rad)','amplitude of velocity(rad/s)','period of movement(s)'])\n plt.show()\n \n ''' plot position and velocity seperately for 2 k'''\n for i in range(0,2): \n for k_ in k_test:\n parameters.k1 = k_\n parameters.k2 = k_\n res = integrate(pendulum_integration, x0, time, args=(parameters, ))\n plt.plot(time,res.state[:,i])\n if i == 0:\n plt.title('Position-time')\n else: plt.title('Velocity-time')\n \n plt.legend(['k1 = %s'%(k_test[0]) , 'k2 = %s'%(k_test[1])])\n# plt.legend(['k = s'])\n plt.grid()\n plt.show()\n return None\n \n def influence_of_theta0(theta0,parameters,time,x0,theta0_test):\n \n maxposition = np.ones(len(theta0))\n minposition = np.ones(len(theta0))\n maxvelocity = np.ones(len(k))\n period = np.ones(len(k))\n \n ''' the change of amplitude, the range of motion, and the period in function of k '''\n for i in range(0,len(theta0)):\n parameters.s_theta_ref1 = theta0[i]\n parameters.s_theta_ref2 = theta0[i]\n res = integrate(pendulum_integration, x0, time, args=(parameters, ))\n \n maxposition[i] = max(res.state[:,0])\n minposition[i] = min(res.state[:,0])\n maxvelocity[i] = max(res.state[:,1])\n period[i] = period_(dt,res.state[:,1])\n \n plt.plot(theta0,maxposition)\n plt.plot(theta0,minposition)\n plt.plot(theta0,maxvelocity)\n plt.plot(theta0,period)\n plt.title('The change of the amplitude, the range of motion, and the period in function of k')\n plt.legend(['amplitude of right position(rad)','amplitude of left position(rad)','amplitude of velocity(rad/s)','period of movement(s)'])\n plt.grid()\n plt.show()\n \n for i in range(0,2): \n for theta0_ in theta0_test:\n parameters.s_theta_ref1 = theta0_\n parameters.s_theta_ref2 = theta0_\n res = integrate(pendulum_integration, x0, time, args=(parameters, ))\n plt.plot(time,res.state[:,i])\n \n if i == 0:\n plt.title('Position-time')\n else: plt.title('Velocity-time')\n \n plt.legend(['theta01 = %s'%(theta0_test[0]) , 'theta02 = %s'%(theta0_test[1])]) \n plt.grid()\n plt.show()\n \n return None\n \n def different_k1_k2(k1,k2,parameters,time,x0):\n parameters.k1 = k1\n parameters.k2 = k2\n res = integrate(pendulum_integration, x0, time, args=(parameters, ))\n res.plot_state(\"State\")\n plt.title('State with k1 = %s, k2 = %s' %(k1,k2))\n plt.show()\n return None\n \n def different_theta01_theta02(theta01,theta02,parameters,time,x0):\n parameters.s_theta_ref1 = theta01\n parameters.s_theta_ref2 = theta02\n res = integrate(pendulum_integration, x0, time, args=(parameters, ))\n res.plot_state(\"State\")\n plt.title('State with theta01 = %s, theta02 = %s' %(theta01,theta02))\n plt.show()\n return None\n \n \n # Simulation Parameters\n parameters = PendulumParameters()\n t_start = 0.0\n t_stop = 10.0\n dt = 0.05\n \n biolog.warning(\"Using large time step dt={}\".format(dt))\n time = np.arange(t_start, t_stop, dt)\n \n # no damping\n parameters.b1 = 0\n parameters.b2 = 0\n \n x0 = [np.pi/3, 0]\n \n \n position = [-np.pi/2,0,np.pi/4]\n different_initial_conditions(position,parameters,time)\n \n k = np.linspace(1,60,40)\n k_test = [20,50]\n influence_of_k(k,parameters,time,x0,k_test)\n \n theta0 = np.linspace(-np.pi/2,np.pi/2,40)\n theta0_test = [np.pi/6,np.pi/3]\n influence_of_theta0(theta0,parameters,time,x0,theta0_test)\n \n### different k1 and k2\n k1 = 10\n k2 = 30\n different_k1_k2(k1,k2,parameters,time,x0)\n \n### different theta01 and theta02\n theta01 = 1.5\n theta02 = 1\n different_theta01_theta02(theta01,theta02,parameters,time,x0)\n \ndef exercise1b():\n \"\"\" Exercise 1 \"\"\"\n biolog.info(\"Executing Lab 4 : Exercise 1\");\n parameters = PendulumParameters() \n### With damping\n t_start = 0.0\n t_stop = 10.0\n dt = 0.05\n \n biolog.warning(\"Using large time step dt={}\".format(dt))\n time = np.arange(t_start, t_stop, dt)\n \n parameters.b1 = 0.5\n parameters.b2 = 0.5\n x0 = [np.pi/3, 0]\n biolog.info(parameters.showParameters())\n res = integrate(pendulum_integration, x0, time, args=(parameters, ))\n\n res.plot_state(\"State\")\n \nif __name__ == '__main__':\n# exercise1a()\n exercise1b()\n\n" ]
[ [ "numpy.log", "numpy.exp" ], [ "matplotlib.pyplot.legend", "numpy.linspace", "matplotlib.pyplot.title", "numpy.arange", "matplotlib.pyplot.plot", "matplotlib.pyplot.grid", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JacobHA/HandsFree
[ "a374bcb823370963a1c0e30cbb4866018d6d4169", "a374bcb823370963a1c0e30cbb4866018d6d4169" ]
[ ".history/handtracking_v007_20220601173944.py", ".history/handtracking_v007_20220601211918.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 2 00:20:41 2021\n\n@author: jacob\n\"\"\"\n\nimport cv2\nimport mediapipe as mp\nfrom vedo import *\nfrom time import sleep\nfrom google.protobuf.json_format import MessageToDict\n\nimport numpy as np\nfrom utils import *\n\nMEMORY_DEBUG = False\n\nTHUMB_TIP_INDEX = 4\nINDEX_TIP_INDEX = 8\nMIDDLE_TIP_INDEX = 12 \nMIDDLE_PALM_INDEX = 9\nMAX_TRACKING_TIME = 50\nSMOOTHING_INTERVAL = 10\nMIN_WAITING_FRAMES = 2\nRESET_WAITING_FRAMES = 20\nEPSILON_NOISE = 1E-3\nFINGER_TOUCHING_RADIUS = 0.07\nZOOM_THRESHOLD = 0 #5E-4\nROTATION_SENSITIVITY = 10\nPANNING_SENSITIVITY = 2\nPANNING_Z_SENSITIVITY = 1.5\nZOOM_SENSITIVITY = 0.1 # effectively how many loop iterations must be done (i.e. ms waited) to acheive zoom factor\nINITIAL_RESCALE = 0.00001\n\n\nSHOW_SELFIE = True\n\ndimensions = 3\n\nmp_drawing = mp.solutions.drawing_utils\nmp_hands = mp.solutions.hands\n# drawing_styles = mp.solutions.drawing_styles\n\nthumb_positions = MaxSizeList(MAX_TRACKING_TIME) \nindex_positions = MaxSizeList(MAX_TRACKING_TIME)\nmiddle_positions = MaxSizeList(MAX_TRACKING_TIME)\nmiddle_tip_vert_positions = MaxSizeList(MAX_TRACKING_TIME)\nmiddle_palm_vert_positions = MaxSizeList(MAX_TRACKING_TIME)\nmiddle_finger_open_list = MaxSizeList(MIN_WAITING_FRAMES)\nhand_status = MaxSizeList(MIN_WAITING_FRAMES)\nopen_status = MaxSizeList(RESET_WAITING_FRAMES)\n\nlast_two_positions = MaxSizeList(2 * 3) # NUM_FINGERS_NEEDED * NUM_DIMENSIONS\nlast_two_thumbs = MaxSizeList(2)\nlast_two_indexes = MaxSizeList(2)\n# When two hands must be tracked\nlast_two_indexes_L = MaxSizeList(2)\nlast_two_indexes_R = MaxSizeList(2)\n\nlast_two_middles = MaxSizeList(2)\n\nlast_two_thumb_index_vecs = MaxSizeList(2)\nlast_two_thumb_index_dists = MaxSizeList(2)\n\nz_unit_vec = np.array([0,0,1])\n\nSTL_name = r'C:\\Users\\jacob\\Downloads\\croc-nut20210722-6981-17x5gmo\\rayandsumer\\croc-nut\\cn1.stl'\nSTL_name = r'C:\\Users\\jacob\\OneDrive\\Desktop\\3D Print files\\cars\\mate fixed.stl'\n\n\nv = Mesh(STL_name)\n\navg_model_size = v.averageSize()\nv.scale(avg_model_size * INITIAL_RESCALE)\n# print(avg_model_size)\n# v.scale(0.001)\ncam = dict(pos=(1,0,0), focalPoint=(0,0,0), viewup=(0,0,1))\n\n\nimage = None\ndisplay_message = \"Firing up...\" \nstatus_message = Text2D(display_message, pos=\"top-center\", font=2, c='w', bg='b3', alpha=1)\n# axs = Axes(xrange=(-1,1), yrange=(-1,1), zrange=(-1,2), yzGrid=False)\nplt = show(v, status_message, axes=4, viewup='z', camera=cam, interactive=False)\n\nif MEMORY_DEBUG:\n tracemalloc.start()\n\ntry: \n cap = cv2.VideoCapture(0)\n\n with mp_hands.Hands(min_detection_confidence=0.85, min_tracking_confidence=0.85, max_num_hands=2) as hands:\n while cap.isOpened():\n\n pause_updates = False\n new_zoom, old_zoom = 1,1\n\n success, image = cap.read()\n if not success:\n print(\"Ignoring empty camera frame.\")\n continue\n\n # Flip the image horizontally for a later selfie-view display, and convert\n # the BGR image to RGB.\n image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)\n\n results = hands.process(image)\n \n # Draw the hand annotations on the image.\n image.flags.writeable = False # Performance improvement\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n image.flags.writeable = False\n display_message = \"\" # So as not to overcrowd with box behind\n\n multihand_results = results.multi_hand_landmarks\n\n\n if multihand_results:\n\n hand_present = (MessageToDict(results.multi_handedness[0])['classification'][0]['label'])\n NUM_HANDS_PRESENT = len(multihand_results)\n\n # keep track of (which) hands\n if NUM_HANDS_PRESENT == 1:\n hand_status.append(hand_present) \n \n # save on memory by only iterating thru what we care about\n for hand_landmarks in multihand_results: # [multihand_results[val] for val in landmark_index_nums]:\n # Draw landmarks \n mp_drawing.draw_landmarks(\n image, hand_landmarks, mp_hands.HAND_CONNECTIONS,)\n # drawing_styles.get_default_hand_landmark_style(),\n # drawing_styles.get_default_hand_connection_style())\n \n # Gather finger location data\n for tip_index, finger_positions_list in zip([THUMB_TIP_INDEX, INDEX_TIP_INDEX, MIDDLE_TIP_INDEX], \n [last_two_thumbs, last_two_indexes, last_two_middles]):\n \n # Need 3D to take cross product...\n finger_positions_list.append([\n hand_landmarks.landmark[tip_index].x,\n hand_landmarks.landmark[tip_index].y,\n hand_landmarks.landmark[tip_index].z,\n ])\n\n # Gather palm location data\n for tip_index, finger_positions_list in zip([ MIDDLE_TIP_INDEX, MIDDLE_PALM_INDEX],\n [ middle_tip_vert_positions, middle_palm_vert_positions]):\n \n finger_positions_list.append([\n hand_landmarks.landmark[tip_index].y,\n ]) # only care about y position for these landmarks\n\n middle_finger_open_list.append(middle_tip_vert_positions[-1] < middle_palm_vert_positions[-1])\n # This will tell us if the hand is open or closed ^\n\n # If sufficient data has been collected:\n display_message = \"Tracking hand\"\n\n if NUM_HANDS_PRESENT == 2:\n hand_status.append('Both') \n\n for hand_landmarks, chirality in zip(multihand_results,[last_two_indexes_L, last_two_indexes_R]): # [multihand_results[val] for val in landmark_index_nums]:\n # Draw landmarks \n mp_drawing.draw_landmarks(\n image, hand_landmarks, mp_hands.HAND_CONNECTIONS,)\n # drawing_styles.get_default_hand_landmark_style(),\n # drawing_styles.get_default_hand_connection_style())\n # Gather finger location data\n tip_index = INDEX_TIP_INDEX\n finger_poss = []\n \n \n # Need 3D to pan into/out of page..\n chirality.append([\n hand_landmarks.landmark[tip_index].x,\n hand_landmarks.landmark[tip_index].y,\n hand_landmarks.landmark[tip_index].z,\n ])\n\n\n if len(last_two_thumbs) >= MIN_WAITING_FRAMES:\n open_status.append(hand_open(middle_finger_open_list, MIN_WAITING_FRAMES))\n\n # generate/grab the last two smoothed points\n # for finger_positions_list, last_two in zip([thumb_positions, index_positions, middle_positions],\n # [last_two_thumbs, last_two_indexes, last_two_middles]):\n # for dim in range(dimensions): # x,y,z\n # last_two.append(\n # smooth(np.array(finger_positions_list).T.tolist()[dim], SMOOTHING_INTERVAL)[-2:])\n\n # Rather than smoothing, let's just average the last 2 points\n index_pos = np.array(last_two_indexes).mean(axis=0)\n thumb_pos = np.array(last_two_thumbs).mean(axis=0)\n middle_pos = np.array(last_two_middles).mean(axis=0)\n\n # Create AOR in xy plane based of thumb-index line and rotate based on distance bw fingers\n last_two_thumb_index_vecs = MaxSizeList(2)\n last_two_thumb_index_dists = MaxSizeList(2)\n\n norms = MaxSizeList(2)\n\n # First check that fingers are not closed: i.e. that we do not want any action\n # fingers_touching = within_volume_of([pointA, pointB, pointC], FINGER_TOUCHING_RADIUS)\n # if NUM_HANDS_PRESENT == 2:\n # pause_updates = True\n\n\n # find the vector between two points\n thumb_to_index = index_pos - thumb_pos\n thumb_to_middle = middle_pos - thumb_pos\n norms = np.cross(thumb_to_middle, thumb_to_index)\n \n # Optionally do masking here... it helps prevent the distance from being changed by z coord\n # thumb_to_index[-1] = 0 # set z coord to zero\n thumb_to_index *= -1 # offset coord axis weirdness (y goes down)\n last_two_thumb_index_vecs = np.array(last_two_indexes) - np.array(last_two_thumbs)\n last_two_thumb_index_dists = np.linalg.norm(last_two_thumb_index_vecs, axis=1)\n # ^^ Instead of this just do live-time averaging... result += thumb_to_index\n # result /= 2\n \n if hand_status == ['Both']*MIN_WAITING_FRAMES and open_status[-1]:\n\n display_message = \"Panning\"\n # Pan camera\n \n indexes = np.array(last_two_indexes_L) - np.array(last_two_indexes_R)\n change = indexes[1] - indexes[0]\n change = np.array([change[2], change[0], -PANNING_Z_SENSITIVITY * change[1]])\n # index_change[2] = 0 # set z-axis change to zero \n # First check that fingers are not closed: i.e. that we do not want any action\n v.shift(0.1*PANNING_SENSITIVITY * change)\n \n if hand_status == ['Right']*MIN_WAITING_FRAMES and open_status[-1]:\n\n # Change zoom multiplier based on fingers distance changing (open/close thumb and index)\n display_message = \"Zooming\"\n \n new_zoom *= ((1 + (last_two_thumb_index_dists[1] - last_two_thumb_index_dists[0]))) ** (1/ZOOM_SENSITIVITY) # outer plus sign bc pinch out means zoom in\n \n\n if hand_status == ['Left']*MIN_WAITING_FRAMES and open_status[-1]:\n\n # Calculate rotation matrix and extract angles\n\n display_message = \"Rotating\"\n\n normal_to_rotate = norms # np.cross(np.average(last_two_thumb_index_vecs,axis=0), z_unit_vec) # always crossing it into the screen..check sign later\n angle_to_rotate = (last_two_thumb_index_dists).mean()\n\n v.rotate(angle = angle_to_rotate*ROTATION_SENSITIVITY, axis = normal_to_rotate)#[::-1]) #bc of axis weirdness\n\n if open_status == [False]*RESET_WAITING_FRAMES:\n display_message = \"Resetting\"\n \n v = Mesh(STL_name)\n\n avg_model_size = v.averageSize()\n v.scale(avg_model_size * INITIAL_RESCALE)\n \n cam = dict(pos=(1,0,0), focalPoint=(0,0,0), viewup=(0,0,1))\n plt = show(v, status_message, axes=4, viewup='z', camera=cam, interactive=False)\n\n \n\n else: # i.e. no hands detected\n thumb_positions = MaxSizeList(MAX_TRACKING_TIME) \n index_positions = MaxSizeList(MAX_TRACKING_TIME)\n \n pause_updates = True\n \n # Show vtk file and camera's image\n if pause_updates:\n display_message = \"Updates paused\"\n\n # if SHOW_SELFIE:\n # cv2.imshow('MediaPipe', image)\n\n status_message.text(display_message)\n plt.show(v, status_message, zoom = new_zoom, camera = cam, interactive=False) # important line! \n del image # this cuts down on memory by a lot; ~1200KB -> ~200KB !\n \n if MEMORY_DEBUG:\n snapshot = tracemalloc.take_snapshot()\n display_top(snapshot) \n\nexcept Exception as e:\n # This enables the camera to be cleaned up if there are any errors\n print('Caught an exception: ' + str(e))\n cap.release()\n cv2.destroyAllWindows()\n pass\n\ncap.release()\ncv2.destroyAllWindows()\n\n# interactive().close() # Not sure what this does..\n", "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 2 00:20:41 2021\n\n@author: jacob\n\"\"\"\n\nimport cv2\nimport mediapipe as mp\nfrom vedo import *\nfrom time import sleep\nfrom google.protobuf.json_format import MessageToDict\n\nimport numpy as np\nfrom utils import *\n\nMEMORY_DEBUG = False\n\nTHUMB_TIP_INDEX = 4\nINDEX_TIP_INDEX = 8\nMIDDLE_TIP_INDEX = 12 \nMIDDLE_PALM_INDEX = 9\nMAX_TRACKING_TIME = 50\nSMOOTHING_INTERVAL = 10\nMIN_WAITING_FRAMES = 2\nRESET_WAITING_FRAMES = 20\nEPSILON_NOISE = 1E-3\nFINGER_TOUCHING_RADIUS = 0.07\nZOOM_THRESHOLD = 0 #5E-4\nROTATION_SENSITIVITY = 10\nPANNING_SENSITIVITY = 2\nPANNING_Z_SENSITIVITY = 1.5\nZOOM_SENSITIVITY = 0.1 # effectively how many loop iterations must be done (i.e. ms waited) to acheive zoom factor\nINITIAL_RESCALE = 0.00001\n\n\nSHOW_SELFIE = False#True\n\ndimensions = 3\n\nmp_drawing = mp.solutions.drawing_utils\nmp_hands = mp.solutions.hands\n# drawing_styles = mp.solutions.drawing_styles\n\nthumb_positions = MaxSizeList(MAX_TRACKING_TIME) \nindex_positions = MaxSizeList(MAX_TRACKING_TIME)\nmiddle_positions = MaxSizeList(MAX_TRACKING_TIME)\nmiddle_tip_vert_positions = MaxSizeList(MAX_TRACKING_TIME)\nmiddle_palm_vert_positions = MaxSizeList(MAX_TRACKING_TIME)\nmiddle_finger_open_list = MaxSizeList(MIN_WAITING_FRAMES)\nhand_status = MaxSizeList(MIN_WAITING_FRAMES)\nopen_status = MaxSizeList(RESET_WAITING_FRAMES)\n\nlast_two_positions = MaxSizeList(2 * 3) # NUM_FINGERS_NEEDED * NUM_DIMENSIONS\nlast_two_thumbs = MaxSizeList(2)\nlast_two_indexes = MaxSizeList(2)\n# When two hands must be tracked\nlast_two_indexes_L = MaxSizeList(2)\nlast_two_indexes_R = MaxSizeList(2)\n\nlast_two_middles = MaxSizeList(2)\n\nlast_two_thumb_index_vecs = MaxSizeList(2)\nlast_two_thumb_index_dists = MaxSizeList(2)\n\nz_unit_vec = np.array([0,0,1])\n\nSTL_name = r'A.stl'\n\nv = Mesh(STL_name)\n\navg_model_size = v.averageSize()\nv.scale(avg_model_size * INITIAL_RESCALE)\n# print(avg_model_size)\n# v.scale(0.001)\ncam = dict(pos=(1,0,0), focalPoint=(0,0,0), viewup=(0,0,1))\n\n\nimage = None\ndisplay_message = \"Firing up...\" \nstatus_message = Text2D(display_message, pos=\"top-center\", font=2, c='w', bg='b3', alpha=1)\n# axs = Axes(xrange=(-1,1), yrange=(-1,1), zrange=(-1,2), yzGrid=False)\nplt = show(v, status_message, axes=4, viewup='z', camera=cam, interactive=False)\n\nif MEMORY_DEBUG:\n tracemalloc.start()\n\ntry: \n cap = cv2.VideoCapture(0)\n\n with mp_hands.Hands(min_detection_confidence=0.85, min_tracking_confidence=0.85, max_num_hands=2) as hands:\n while cap.isOpened():\n\n pause_updates = False\n new_zoom, old_zoom = 1,1\n\n success, image = cap.read()\n if not success:\n print(\"Ignoring empty camera frame.\")\n continue\n\n # Flip the image horizontally for a later selfie-view display, and convert\n # the BGR image to RGB.\n image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)\n\n results = hands.process(image)\n \n # Draw the hand annotations on the image.\n # image.flags.writeable = False # Performance improvement\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n # image.flags.writeable = False\n display_message = \"\" # So as not to overcrowd with box behind\n\n multihand_results = results.multi_hand_landmarks\n\n\n if multihand_results:\n\n hand_present = (MessageToDict(results.multi_handedness[0])['classification'][0]['label'])\n NUM_HANDS_PRESENT = len(multihand_results)\n\n # keep track of (which) hands\n if NUM_HANDS_PRESENT == 1:\n hand_status.append(hand_present) \n \n # save on memory by only iterating thru what we care about\n for hand_landmarks in multihand_results: # [multihand_results[val] for val in landmark_index_nums]:\n # Draw landmarks \n mp_drawing.draw_landmarks(\n image, hand_landmarks, mp_hands.HAND_CONNECTIONS,)\n # drawing_styles.get_default_hand_landmark_style(),\n # drawing_styles.get_default_hand_connection_style())\n \n # Gather finger location data\n for tip_index, finger_positions_list in zip([THUMB_TIP_INDEX, INDEX_TIP_INDEX, MIDDLE_TIP_INDEX], \n [last_two_thumbs, last_two_indexes, last_two_middles]):\n \n # Need 3D to take cross product...\n finger_positions_list.append([\n hand_landmarks.landmark[tip_index].x,\n hand_landmarks.landmark[tip_index].y,\n hand_landmarks.landmark[tip_index].z,\n ])\n\n # Gather palm location data\n for tip_index, finger_positions_list in zip([ MIDDLE_TIP_INDEX, MIDDLE_PALM_INDEX],\n [ middle_tip_vert_positions, middle_palm_vert_positions]):\n \n finger_positions_list.append([\n hand_landmarks.landmark[tip_index].y,\n ]) # only care about y position for these landmarks\n\n middle_finger_open_list.append(middle_tip_vert_positions[-1] < middle_palm_vert_positions[-1])\n # This will tell us if the hand is open or closed ^\n\n # If sufficient data has been collected:\n display_message = \"Tracking hand\"\n\n if NUM_HANDS_PRESENT == 2:\n hand_status.append('Both') \n\n for hand_landmarks, chirality in zip(multihand_results,[last_two_indexes_L, last_two_indexes_R]): # [multihand_results[val] for val in landmark_index_nums]:\n # Draw landmarks \n mp_drawing.draw_landmarks(\n image, hand_landmarks, mp_hands.HAND_CONNECTIONS,)\n # drawing_styles.get_default_hand_landmark_style(),\n # drawing_styles.get_default_hand_connection_style())\n # Gather finger location data\n tip_index = INDEX_TIP_INDEX\n finger_poss = []\n \n \n # Need 3D to pan into/out of page..\n chirality.append([\n hand_landmarks.landmark[tip_index].x,\n hand_landmarks.landmark[tip_index].y,\n hand_landmarks.landmark[tip_index].z,\n ])\n\n\n if len(last_two_thumbs) >= MIN_WAITING_FRAMES:\n open_status.append(hand_open(middle_finger_open_list, MIN_WAITING_FRAMES))\n\n # generate/grab the last two smoothed points\n # for finger_positions_list, last_two in zip([thumb_positions, index_positions, middle_positions],\n # [last_two_thumbs, last_two_indexes, last_two_middles]):\n # for dim in range(dimensions): # x,y,z\n # last_two.append(\n # smooth(np.array(finger_positions_list).T.tolist()[dim], SMOOTHING_INTERVAL)[-2:])\n\n # Rather than smoothing, let's just average the last 2 points\n index_pos = np.array(last_two_indexes).mean(axis=0)\n thumb_pos = np.array(last_two_thumbs).mean(axis=0)\n middle_pos = np.array(last_two_middles).mean(axis=0)\n\n # Create AOR in xy plane based of thumb-index line and rotate based on distance bw fingers\n last_two_thumb_index_vecs = MaxSizeList(2)\n last_two_thumb_index_dists = MaxSizeList(2)\n\n norms = MaxSizeList(2)\n\n # First check that fingers are not closed: i.e. that we do not want any action\n # fingers_touching = within_volume_of([pointA, pointB, pointC], FINGER_TOUCHING_RADIUS)\n # if NUM_HANDS_PRESENT == 2:\n # pause_updates = True\n\n\n # find the vector between two points\n thumb_to_index = index_pos - thumb_pos\n thumb_to_middle = middle_pos - thumb_pos\n norms = np.cross(thumb_to_middle, thumb_to_index)\n \n # Optionally do masking here... it helps prevent the distance from being changed by z coord\n # thumb_to_index[-1] = 0 # set z coord to zero\n thumb_to_index *= -1 # offset coord axis weirdness (y goes down)\n last_two_thumb_index_vecs = np.array(last_two_indexes) - np.array(last_two_thumbs)\n last_two_thumb_index_dists = np.linalg.norm(last_two_thumb_index_vecs, axis=1)\n # ^^ Instead of this just do live-time averaging... result += thumb_to_index\n # result /= 2\n \n if hand_status == ['Both']*MIN_WAITING_FRAMES and open_status[-1]:\n\n display_message = \"Panning\"\n # Pan camera\n \n indexes = np.array(last_two_indexes_L) - np.array(last_two_indexes_R)\n change = indexes[1] - indexes[0]\n change = np.array([change[2], change[0], -PANNING_Z_SENSITIVITY * change[1]])\n # index_change[2] = 0 # set z-axis change to zero \n # First check that fingers are not closed: i.e. that we do not want any action\n v.shift(0.1*PANNING_SENSITIVITY * change)\n \n if hand_status == ['Right']*MIN_WAITING_FRAMES and open_status[-1]:\n\n # Change zoom multiplier based on fingers distance changing (open/close thumb and index)\n display_message = \"Zooming\"\n \n new_zoom *= ((1 + (last_two_thumb_index_dists[1] - last_two_thumb_index_dists[0]))) ** (1/ZOOM_SENSITIVITY) # outer plus sign bc pinch out means zoom in\n \n\n if hand_status == ['Left']*MIN_WAITING_FRAMES and open_status[-1]:\n\n # Calculate rotation matrix and extract angles\n\n display_message = \"Rotating\"\n\n normal_to_rotate = norms # np.cross(np.average(last_two_thumb_index_vecs,axis=0), z_unit_vec) # always crossing it into the screen..check sign later\n angle_to_rotate = (last_two_thumb_index_dists).mean()\n\n v.rotate(angle = angle_to_rotate*ROTATION_SENSITIVITY, axis = normal_to_rotate)#[::-1]) #bc of axis weirdness\n\n if open_status == [False]*RESET_WAITING_FRAMES:\n display_message = \"Resetting\"\n \n v = Mesh(STL_name)\n\n avg_model_size = v.averageSize()\n v.scale(avg_model_size * INITIAL_RESCALE)\n \n cam = dict(pos=(1,0,0), focalPoint=(0,0,0), viewup=(0,0,1))\n plt = show(v, status_message, axes=4, viewup='z', camera=cam, interactive=False)\n\n \n\n else: # i.e. no hands detected\n thumb_positions = MaxSizeList(MAX_TRACKING_TIME) \n index_positions = MaxSizeList(MAX_TRACKING_TIME)\n \n pause_updates = True\n \n # Show vtk file and camera's image\n if pause_updates:\n display_message = \"Updates paused\"\n\n if SHOW_SELFIE:\n cv2.imshow('MediaPipe', image)\n cv2.waitKey(1)\n\n status_message.text(display_message)\n plt.show(v, status_message, zoom = new_zoom, camera = cam, interactive=False) # important line! \n del image # this cuts down on memory by a lot; ~1200KB -> ~200KB !\n \n if MEMORY_DEBUG:\n snapshot = tracemalloc.take_snapshot()\n display_top(snapshot) \n\nexcept Exception as e:\n # This enables the camera to be cleaned up if there are any errors\n print('Caught an exception: ' + str(e))\n cap.release()\n cv2.destroyAllWindows()\n pass\n\ncap.release()\ncv2.destroyAllWindows()\n\n# interactive().close() # Not sure what this does..\n" ]
[ [ "numpy.array", "numpy.linalg.norm", "numpy.cross" ], [ "numpy.array", "numpy.linalg.norm", "numpy.cross" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
softbankrobotics-research/Generative_Continual_Learning
[ "66b121437c248993b41f154b5a2d6b7197278578" ]
[ "Generative_Models/Conditional_Model.py" ]
[ "from Generative_Models.Generative_Model import GenerativeModel\nimport torch\nfrom utils import *\n\n\nclass ConditionalModel(GenerativeModel):\n\n\n # if no task2generate are given we generate all labellize for all task\n # if task2generate and annotate == false we generate only for the actual task\n # if task2generate and annotate == true we generate only for all past tasks\n def sample(self, batch_size, task2generate=None, multi_annotation=False):\n '''\n :param batch_size:\n :param task2generate: give the index of class to generate (the name is a bit misleading)\n :param multi_annotation: indicate if we want just one classes or all classes <= task2generate\n :param expert: classifier that can give a label to samples\n :return: batch of sample from different classes and return a batch of images and label\n '''\n\n self.G.eval()\n\n if task2generate is not None:\n classes2generate=task2generate + 1\n else:\n classes2generate=self.num_classes\n\n z_ = self.random_tensor(batch_size, self.z_dim)\n if multi_annotation:\n # keep this please\n # y = torch.LongTensor(batch_size, 1).random_() % self.num_classes\n y = (torch.randperm(batch_size * 10) % classes2generate)[:batch_size]\n y_onehot = self.get_one_hot(y)\n else:\n y = (torch.ones(batch_size) * (classes2generate-1)).long()\n y_onehot = self.get_one_hot(y).cuda()\n\n output = self.G(variable(z_), y_onehot).data\n\n return output, y\n\n # For conditional Replay we generate tasks one by one\n def generate_batch4Task(self, nb_sample_train, task2generate, multi_annotation):\n return self.sample(batch_size=nb_sample_train, task2generate=task2generate, multi_annotation=False)\n\n\n\n def get_one_hot(self, y):\n y_onehot = torch.FloatTensor(y.shape[0], self.num_classes)\n y_onehot.zero_()\n y_onehot.scatter_(1, y[:, np.newaxis], 1.0)\n\n return y_onehot\n\n\n # This function generate a dataset for one class or for all class until ind_task included\n def generate_dataset(self, ind_task, nb_sample_per_task, one_task=True, Train=True, classe2generate=None):\n\n # to generate 10 classes classe2generate is 9 as classes 0 to 9\n if classe2generate is not None:\n assert classe2generate <= self.num_classes\n if self.task_type != \"disjoint\":\n assert classe2generate == self.num_classes\n else:\n classe2generate = ind_task+1\n\n train_loader_gen=None\n\n if Train:\n path = os.path.join(self.gen_dir, 'train_Task_' + str(ind_task) + '.pt')\n path_samples = os.path.join(self.sample_dir, 'samples_train_' + str(ind_task) + '.png')\n else:\n path = os.path.join(self.gen_dir, 'test_Task_' + str(ind_task) + '.pt')\n path_samples = os.path.join(self.sample_dir, 'samples_test_' + str(ind_task) + '.png')\n\n # if we have only on task to generate\n if one_task or classe2generate == 0: # generate only for the task ind_task\n\n train_loader_gen = self.generate_task(nb_sample_per_task, multi_annotation=False, classe2generate=classe2generate)\n\n else: # else case we generate for all previous task\n\n for i in range(classe2generate): # we take from all task, actual one included\n\n train_loader_ind = self.generate_task(nb_sample_per_task, multi_annotation=True, classe2generate=i)\n\n if i == 0:\n train_loader_gen = train_loader_ind\n else:\n train_loader_gen.concatenate(train_loader_ind)\n\n # we save the concatenation of all generated with the actual task for train and test\n train_loader_gen.save(path)\n train_loader_gen.visualize_sample(path_samples, self.sample_num, [self.size, self.size, self.input_size])\n\n # return the the train loader with all data\n return train_loader_gen # test_loader_gen # for instance we don't use the test set\n\n # this generation only works for Baseline, disjoint\n # we generate the dataset based on one generator by task to get normally the best generated dataset\n # can be used to generate train or test data\n def generate_best_dataset(self, ind_task, nb_sample_per_task, one_task=True, Train=True, classe2generate=None):\n\n\n # to generate 10 classes classe2generate is 9 as classes 0 to 9\n if classe2generate is not None:\n assert classe2generate <= self.num_classes\n if self.task_type != \"disjoint\":\n assert classe2generate == self.num_classes\n else:\n classe2generate = ind_task+1\n\n if Train:\n path = os.path.join(self.gen_dir, 'Best_train_Task_' + str(ind_task) + '.pt')\n else:\n path = os.path.join(self.gen_dir, 'Best_test_Task_' + str(ind_task) + '.pt')\n\n # if we have only on task to generate\n if one_task or classe2generate == 0: # generate only for the task ind_task\n # we do not need automatic annotation since we have one generator by class\n previous_data_train = self.generate_task(nb_sample_per_task, multi_annotation=False, classe2generate=classe2generate)\n #previous_data_train = DataLoader(tasks_tr, self.args)\n\n else: # else we load the previous dataset and add the new data\n\n previous_path_train = os.path.join(self.gen_dir, 'Best_train_Task_' + str(ind_task - 1) + '.pt')\n\n previous_data_train = DataLoader(torch.load(previous_path_train), self.args)\n\n # we do not need automatic annotation since we have one generator by class\n train_loader_ind = self.generate_task(nb_sample_per_task, multi_annotation=False, classe2generate=i)\n\n previous_data_train.concatenate(train_loader_ind)\n\n # we save the concatenation of all generated with the actual task for train and test\n previous_data_train.save(path)\n\n # return nothing" ]
[ [ "torch.randperm", "torch.FloatTensor", "torch.ones", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DAMONYLY/YOLOv3
[ "fae4e9533de70d187e09b97ee8b74d06deb0f037" ]
[ "utils/parse_yolo_weights.py" ]
[ "\nfrom __future__ import division\nimport torch\nimport numpy as np\n\n\ndef parse_conv_block(m, weights, offset, initflag):\n \"\"\"\n Initialization of conv layers with batchnorm\n Args:\n m (Sequential): sequence of layers\n weights (numpy.ndarray): pretrained weights data\n offset (int): current position in the weights file\n initflag (bool): if True, the layers are not covered by the weights file. \\\n They are initialized using darknet-style initialization.\n Returns:\n offset (int): current position in the weights file\n weights (numpy.ndarray): pretrained weights data\n \"\"\"\n conv_model = m[0]\n bn_model = m[1]\n param_length = m[1].bias.numel() # numel()获取张量元素的个数\n\n # batchnorm\n for pname in ['bias', 'weight', 'running_mean', 'running_var']:\n layerparam = getattr(bn_model, pname)\n\n if initflag: # yolo initialization - scale to one, bias to zero\n if pname == 'weight':\n weights = np.append(weights, np.ones(param_length))\n else:\n weights = np.append(weights, np.zeros(param_length))\n\n param = torch.from_numpy(weights[offset:offset + param_length]).view_as(layerparam)\n layerparam.data.copy_(param)\n offset += param_length\n\n param_length = conv_model.weight.numel()\n\n # conv\n if initflag: # yolo initialization\n n, c, k, _ = conv_model.weight.shape\n scale = np.sqrt(2 / (k * k * c))\n weights = np.append(weights, scale * np.random.normal(size=param_length))\n\n param = torch.from_numpy(\n weights[offset:offset + param_length]).view_as(conv_model.weight)\n conv_model.weight.data.copy_(param)\n offset += param_length\n\n return offset, weights\n\ndef parse_yolo_block(m, weights, offset, initflag):\n \"\"\"\n YOLO Layer (one conv with bias) Initialization\n Args:\n m (Sequential): sequence of layers\n weights (numpy.ndarray): pretrained weights data\n offset (int): current position in the weights file\n initflag (bool): if True, the layers are not covered by the weights file. \\\n They are initialized using darknet-style initialization.\n Returns:\n offset (int): current position in the weights file\n weights (numpy.ndarray): pretrained weights data\n \"\"\"\n conv_model = m._modules['conv']\n param_length = conv_model.bias.numel()\n\n if initflag: # yolo initialization - bias to zero\n weights = np.append(weights, np.zeros(param_length))\n\n param = torch.from_numpy(\n weights[offset:offset + param_length]).view_as(conv_model.bias)\n conv_model.bias.data.copy_(param)\n offset += param_length\n\n param_length = conv_model.weight.numel()\n\n if initflag: # yolo initialization\n n, c, k, _ = conv_model.weight.shape\n scale = np.sqrt(2 / (k * k * c))\n weights = np.append(weights, scale * np.random.normal(size=param_length))\n \n param = torch.from_numpy(\n weights[offset:offset + param_length]).view_as(conv_model.weight)\n conv_model.weight.data.copy_(param)\n offset += param_length\n\n return offset, weights\n\ndef parse_yolo_weights(model, weights_path):\n \"\"\"\n Parse YOLO (darknet) pre-trained weights data onto the pytorch model\n Args:\n model : pytorch model object\n weights_path (str): path to the YOLO (darknet) pre-trained weights file\n \"\"\"\n fp = open(weights_path, \"rb\")\n\n # skip the header\n header = np.fromfile(fp, dtype=np.int32, count=5) # not used\n # read weights \n weights = np.fromfile(fp, dtype=np.float32)\n fp.close()\n\n offset = 0 \n initflag = False #whole yolo weights : False, darknet weights : True\n\n for m in model.module_list:\n\n if m._get_name() == 'Sequential':\n # normal conv block\n offset, weights = parse_conv_block(m, weights, offset, initflag)\n\n elif m._get_name() == 'resblock':\n # residual block\n for modu in m._modules['module_list']:\n for blk in modu:\n offset, weights = parse_conv_block(blk, weights, offset, initflag)\n\n elif m._get_name() == 'YOLOLayer':\n # YOLO Layer (one conv with bias) Initialization\n offset, weights = parse_yolo_block(m, weights, offset, initflag)\n\n initflag = (offset >= len(weights)) # the end of the weights file. turn the flag on\n" ]
[ [ "numpy.fromfile", "numpy.sqrt", "torch.from_numpy", "numpy.ones", "numpy.random.normal", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
erikmannerfelt/GeoUtils
[ "96a044f7cca73f936e5b245a5e99e0d2102d279f" ]
[ "tests/test_spatial_tools.py" ]
[ "\"\"\"\nFunctions to test the spatial tools.\n\"\"\"\nfrom __future__ import annotations\n\nimport warnings\nfrom typing import Callable\n\nimport numpy as np\nimport pytest\nimport rasterio as rio\n\nimport geoutils as gu\nfrom geoutils import datasets\nfrom geoutils.georaster import RasterType\n\n# def test_dem_subtraction():\n# \"\"\"Test that the DEM subtraction script gives reasonable numbers.\"\"\"\n# with warnings.catch_warnings():\n# warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n# diff = gu.georaster.spatial_tools.subtract_rasters(\n# examples.get_path(\"longyearbyen_ref_dem\"),\n# examples.get_path(\"longyearbyen_tba_dem\"))\n\n# assert np.nanmean(np.abs(diff.data)) < 100\n\n\nclass stack_merge_images:\n \"\"\"\n Test cases for stacking and merging images\n Split an image with some overlap, then stack/merge it, and validate bounds and shape.\n Param `cls` is used to set the type of the output, e.g. gu.Raster (default).\n \"\"\"\n\n def __init__(self, image: str, cls: Callable[[str], RasterType] = gu.Raster) -> None:\n img = cls(datasets.get_path(image))\n self.img = img\n\n # Find the easting midpoint of the img\n x_midpoint = np.mean([img.bounds.right, img.bounds.left])\n x_midpoint -= x_midpoint % img.res[0]\n\n # Cut the img into two imgs that slightly overlap each other.\n self.img1 = img.copy()\n self.img1.crop(\n rio.coords.BoundingBox(\n right=x_midpoint + img.res[0] * 3, left=img.bounds.left, top=img.bounds.top, bottom=img.bounds.bottom\n )\n )\n self.img2 = img.copy()\n self.img2.crop(\n rio.coords.BoundingBox(\n left=x_midpoint - img.res[0] * 3, right=img.bounds.right, top=img.bounds.top, bottom=img.bounds.bottom\n )\n )\n\n # To check that use_ref_bounds work - create a img that do not cover the whole extent\n self.img3 = img.copy()\n self.img3.crop(\n rio.coords.BoundingBox(\n left=x_midpoint - img.res[0] * 3,\n right=img.bounds.right - img.res[0] * 2,\n top=img.bounds.top,\n bottom=img.bounds.bottom,\n )\n )\n\n\[email protected]\ndef images_1d(): # type: ignore\n return stack_merge_images(\"landsat_B4\")\n\n\[email protected]\ndef sat_images(): # type: ignore\n return stack_merge_images(\"landsat_B4\", cls=gu.SatelliteImage)\n\n\[email protected]\ndef images_3d(): # type: ignore\n return stack_merge_images(\"landsat_RGB\")\n\n\[email protected](\n \"rasters\", [pytest.lazy_fixture(\"images_1d\"), pytest.lazy_fixture(\"sat_images\")]\n) # type: ignore\n# pytest.lazy_fixture('images_3d')]) ## Requires Raster.reproject() fix.\ndef test_stack_rasters(rasters) -> None: # type: ignore\n \"\"\"Test stack_rasters\"\"\"\n # Merge the two overlapping DEMs and check that output bounds and shape is correct\n stacked_img = gu.spatial_tools.stack_rasters([rasters.img1, rasters.img2])\n\n assert stacked_img.count == 2\n assert rasters.img.shape == stacked_img.shape\n assert type(stacked_img) == gu.Raster # Check output object is always Raster, whatever input was given\n\n merged_bounds = gu.spatial_tools.merge_bounding_boxes(\n [rasters.img1.bounds, rasters.img2.bounds], resolution=rasters.img1.res[0]\n )\n assert merged_bounds == stacked_img.bounds\n\n # Check that reference works with input Raster\n stacked_img = gu.spatial_tools.stack_rasters([rasters.img1, rasters.img2], reference=rasters.img)\n assert rasters.img.bounds == stacked_img.bounds\n\n # Others than int or gu.Raster should raise a ValueError\n try:\n stacked_img = gu.spatial_tools.stack_rasters([rasters.img1, rasters.img2], reference=\"a string\")\n except ValueError as exception:\n if \"reference should be\" not in str(exception):\n raise exception\n\n # Check that use_ref_bounds works - use a img that do not cover the whole extent\n\n # This case should not preserve original extent\n stacked_img = gu.spatial_tools.stack_rasters([rasters.img1, rasters.img3])\n assert stacked_img.bounds != rasters.img.bounds\n\n # This case should preserve original extent\n stacked_img2 = gu.spatial_tools.stack_rasters(\n [rasters.img1, rasters.img3], reference=rasters.img, use_ref_bounds=True\n )\n assert stacked_img2.bounds == rasters.img.bounds\n\n\[email protected](\"rasters\", [pytest.lazy_fixture(\"images_1d\")]) # type: ignore\n# pytest.lazy_fixture('images_3d')]) ##Requires Raster.reproject() fix.\ndef test_merge_rasters(rasters) -> None: # type: ignore\n \"\"\"Test merge_rasters\"\"\"\n # Merge the two overlapping DEMs and check that it closely resembles the initial DEM\n merged_img = gu.spatial_tools.merge_rasters([rasters.img1, rasters.img2])\n assert rasters.img.data.shape == merged_img.data.shape\n assert rasters.img.bounds == merged_img.bounds\n\n diff = rasters.img.data - merged_img.data\n\n assert np.abs(np.nanmean(diff)) < 0.3\n\n # Check that reference works\n merged_img2 = gu.spatial_tools.merge_rasters([rasters.img1, rasters.img2], reference=rasters.img)\n assert merged_img2 == merged_img\n\n\ndef test_subdivide_array() -> None:\n\n test_shape = (6, 4)\n test_count = 4\n subdivision_grid = gu.spatial_tools.subdivide_array(test_shape, test_count)\n\n assert subdivision_grid.shape == test_shape\n assert np.unique(subdivision_grid).size == test_count\n\n assert np.unique(gu.spatial_tools.subdivide_array((3, 3), 3)).size == 3\n\n with pytest.raises(ValueError, match=r\"Expected a 2D shape, got 1D shape.*\"):\n gu.spatial_tools.subdivide_array((5,), 2)\n\n with pytest.raises(ValueError, match=r\"Shape.*smaller than.*\"):\n gu.spatial_tools.subdivide_array((5, 2), 15)\n\n\[email protected](\"dtype\", [\"uint8\", \"uint16\", \"int32\", \"float32\", \"float16\"]) # type: ignore\[email protected](\n \"mask_and_viewable\",\n [\n (None, True), # An ndarray with no mask should support views\n (False, True), # A masked array with an empty mask should support views\n ([True, False, False, False], False), # A masked array with an occupied mask should not support views.\n ([False, False, False, False], True), # A masked array with an empty occupied mask should support views.\n ],\n) # type: ignore\[email protected](\n \"shape_and_check_passes\",\n [\n ((1, 2, 2), True), # A 3D array with a shape[0] == 1 is okay.\n ((2, 1, 2), False), # A 3D array with a shape[0] != 1 is not okay.\n ((2, 2), True), # A 2D array is okay.\n ((4,), True), # A 1D array is okay.\n ],\n) # type: ignore\ndef test_get_array_and_mask(\n dtype: str,\n mask_and_viewable: tuple[None | bool | list[bool], bool],\n shape_and_check_passes: tuple[tuple[int, ...], bool],\n) -> None:\n \"\"\"Validate that the function returns views when expected, and copies otherwise.\"\"\"\n warnings.simplefilter(\"error\")\n\n masked_values, view_should_be_possible = mask_and_viewable\n shape, check_should_pass = shape_and_check_passes\n\n # Create an array of the specified dtype\n array = np.ones(shape, dtype=dtype)\n if masked_values is not None:\n if masked_values is False:\n array = np.ma.masked_array(array)\n else:\n array = np.ma.masked_array(array, mask=np.reshape(masked_values, array.shape))\n\n # Validate that incorrect shapes raise the correct error.\n if not check_should_pass:\n with pytest.raises(ValueError, match=\"Invalid array shape given\"):\n gu.spatial_tools.get_array_and_mask(array, check_shape=True)\n\n # Stop the test here as the failure is now validated.\n return\n\n # Get a copy of the array and check its shape (it should always pass at this point)\n arr, _ = gu.spatial_tools.get_array_and_mask(array, copy=True, check_shape=True)\n\n # Validate that the array is a copy\n assert not np.shares_memory(arr, array)\n\n # If it was an integer dtype and it had a mask, validate that the array is now \"float32\"\n if np.issubdtype(dtype, np.integer) and np.any(masked_values or False):\n assert arr.dtype == \"float32\"\n\n # If there was no mask or the mask was empty, validate that arr and array are equivalent\n if not np.any(masked_values or False):\n assert np.sum(np.abs(array - arr)) == 0.0\n\n with warnings.catch_warnings(record=True) as caught_warnings:\n warnings.simplefilter(\"always\")\n\n # Try to create a view.\n arr_view, mask = gu.spatial_tools.get_array_and_mask(array, copy=False)\n\n # If it should be possible, validate that there were no warnings.\n if view_should_be_possible:\n assert len(caught_warnings) == 0, (caught_warnings[0].message, array)\n # Otherwise, validate that one warning was raised with the correct text.\n else:\n assert len(caught_warnings) == 1\n assert \"Copying is required\" in str(caught_warnings[0].message)\n\n # Validate that the view shares memory if it was possible, or otherwise that it is a copy.\n if view_should_be_possible:\n assert np.shares_memory(array, arr_view)\n else:\n assert not np.shares_memory(array, arr_view)\n\n\nclass TestSubsample:\n \"\"\"\n Different examples of 1D to 3D arrays with masked values for testing.\n \"\"\"\n\n # Case 1 - 1D array, 1 masked value\n array1D = np.ma.masked_array(np.arange(10), mask=np.zeros(10))\n array1D.mask[3] = True\n assert np.ndim(array1D) == 1\n assert np.count_nonzero(array1D.mask) > 0\n\n # Case 2 - 2D array, 1 masked value\n array2D = np.ma.masked_array(np.arange(9).reshape((3, 3)), mask=np.zeros((3, 3)))\n array2D.mask[0, 1] = True\n assert np.ndim(array2D) == 2\n assert np.count_nonzero(array2D.mask) > 0\n\n # Case 3 - 3D array, 1 masked value\n array3D = np.ma.masked_array(np.arange(9).reshape((1, 3, 3)), mask=np.zeros((1, 3, 3)))\n array3D = np.ma.vstack((array3D, array3D + 10))\n array3D.mask[0, 0, 1] = True\n assert np.ndim(array3D) == 3\n assert np.count_nonzero(array3D.mask) > 0\n\n @pytest.mark.parametrize(\"array\", [array1D, array2D, array3D]) # type: ignore\n def test_subsample(self, array: np.ndarray) -> None:\n \"\"\"\n Test gu.spatial_tools.subsample_raster.\n \"\"\"\n # Test that subsample > 1 works as expected, i.e. output 1D array, with no masked values, or selected size\n for npts in np.arange(2, np.size(array)):\n random_values = gu.spatial_tools.subsample_raster(array, subsample=npts)\n assert np.ndim(random_values) == 1\n assert np.size(random_values) == npts\n assert np.count_nonzero(random_values.mask) == 0\n\n # Test if subsample > number of valid values => return all\n random_values = gu.spatial_tools.subsample_raster(array, subsample=np.size(array) + 3)\n assert np.all(np.sort(random_values) == array[~array.mask])\n\n # Test if subsample = 1 => return all valid values\n random_values = gu.spatial_tools.subsample_raster(array, subsample=1)\n assert np.all(np.sort(random_values) == array[~array.mask])\n\n # Test if subsample < 1\n random_values = gu.spatial_tools.subsample_raster(array, subsample=0.5)\n assert np.size(random_values) == int(np.size(array) * 0.5)\n\n # Test with optional argument return_indices\n indices = gu.spatial_tools.subsample_raster(array, subsample=0.3, return_indices=True)\n assert np.ndim(indices) == 2\n assert len(indices) == np.ndim(array)\n assert np.ndim(array[indices]) == 1\n assert np.size(array[indices]) == int(np.size(array) * 0.3)\n" ]
[ [ "numpy.ma.vstack", "numpy.abs", "numpy.unique", "numpy.reshape", "numpy.shares_memory", "numpy.issubdtype", "numpy.arange", "numpy.ma.masked_array", "numpy.sort", "numpy.ones", "numpy.ndim", "numpy.size", "numpy.mean", "numpy.any", "numpy.count_nonzero", "numpy.nanmean", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sarahthiele/hush
[ "5a6a67cacd21615c9c02a6c3539c598ddf3da405" ]
[ "src/figures/PSD.py" ]
[ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport astropy.units as u\n\n\ndef func(x, a, b, c, d, e):\n return a + b * x + c * x ** 2 + d * x ** 3 + e * x ** 4\n\nmodel = \"fiducial\"\ncolors = [\"#add0ed\", \"#2b5d87\", \"#4288c2\", \"#17334a\"]\nTobs = 4 * u.yr\n\npower_dat_F50 = pd.read_hdf(\n \"../data/results.hdf\", key=\"total_power_DWDs_{}_{}\".format(\"F50\", model)\n)\npopt_F50 = pd.read_hdf(\"../data/results.hdf\", key=\"conf_fit_DWDs_{}_{}\".format(\"F50\", model))\npopt_F50 = popt_F50.values.flatten()\n\npower_dat_FZ = pd.read_hdf(\n \"../data/results.hdf\", key=\"total_power_DWDs_{}_{}\".format(\"FZ\", model)\n)\npopt_FZ = pd.read_hdf(\"../data/results.hdf\", key=\"conf_fit_DWDs_{}_{}\".format(\"FZ\", model))\npopt_FZ = popt_FZ.values.flatten()\n\nconf_fit_FZ = (\n 10\n ** func(\n x=np.log10(np.linspace(1e-4, 1e-1, 100000)),\n a=popt_FZ[0],\n b=popt_FZ[1],\n c=popt_FZ[2],\n d=popt_FZ[3],\n e=popt_FZ[4],\n )\n * Tobs.to(u.s).value\n)\n\nconf_fit_F50 = (\n 10\n ** func(\n x=np.log10(np.linspace(1e-4, 1e-1, 100000)),\n a=popt_F50[0],\n b=popt_F50[1],\n c=popt_F50[2],\n d=popt_F50[3],\n e=popt_F50[4],\n )\n * Tobs.to(u.s).value\n)\n\n\nfig, (ax1) = plt.subplots(1, 1, figsize=(6, 4.2))\nplt.plot(\n power_dat_F50.f_gw[::10],\n power_dat_F50.strain_2[::10] * Tobs.to(u.s).value,\n c=colors[1],\n lw=1,\n alpha=1,\n rasterized=True,\n)\nplt.plot(\n power_dat_FZ.f_gw[::10],\n power_dat_FZ.strain_2[::10] * Tobs.to(u.s).value,\n c=colors[0],\n lw=1,\n alpha=0.8,\n rasterized=True,\n)\nplt.plot(\n np.linspace(1e-4, 1e-1, 100000),\n conf_fit_F50,\n c=colors[3],\n ls=\"--\",\n lw=2,\n label=r\"F50\",\n)\nplt.plot(\n np.linspace(1e-4, 1e-1, 100000),\n conf_fit_FZ,\n c=colors[2],\n ls=\"--\",\n lw=2,\n label=r\"FZ\",\n)\nplt.xscale(\"log\")\nplt.yscale(\"log\")\n\nax1.set_ylabel(r\"PSD [Hz$^{-1}$]\", size=15)\nax1.set_xlabel(r\"f$_{\\rm{GW}}$ [Hz]\", size=15)\nax1.tick_params(labelsize=12)\nax1.set_yticks([1e-38, 1e-37, 1e-36, 1e-35, 1e-34])\nplt.xlim(1e-4, 3e-2)\nplt.ylim(1e-38, 5e-34)\nplt.legend(prop={\"size\": 12}, ncol=2, frameon=False, loc=(0, 1))\nplt.tight_layout()\nplt.savefig(\"PSD.pdf\", dpi=100)\nax1.set_ylabel(r'PSD [Hz$^{-1}$]', size=15)\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.tight_layout", "numpy.linspace", "matplotlib.pyplot.ylim", "matplotlib.pyplot.yscale", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlim", "matplotlib.pyplot.xscale" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
KorA6/open_model_zoo
[ "2856f9d6f5b4d2cb19b0c98c127b6c758851c51b" ]
[ "tools/accuracy_checker/openvino/tools/accuracy_checker/adapters/retinanet.py" ]
[ "\"\"\"\nCopyright (c) 2018-2022 Intel Corporation\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom collections import OrderedDict\nimport numpy as np\n\nfrom .adapter import Adapter\nfrom ..config import ListField, StringField, NumberField\nfrom ..representation import DetectionPrediction\nfrom ..postprocessor import NMS\n\n\nclass MultiOutRetinaNet(Adapter):\n __provider__ = 'retinanet_multihead'\n\n @classmethod\n def parameters(cls):\n params = super().parameters()\n params.update({\n 'boxes_outputs': ListField(description='boxes localization outputs'),\n 'class_outputs': ListField(description=\"outputs with classes probabilities\"),\n 'ratios': ListField(\n description='ratio for anchors generation', optional=True, default=[1.0, 2.0, 0.5], value_type=float\n ),\n 'pre_nms_top_k': NumberField(\n description='pre nms keep top k boxes', value_type=int, optional=True, default=1000\n ),\n 'post_nms_top_k': NumberField(\n description='post nms keep top k boxes', value_type=int, optional=True, default=100\n ),\n 'min_conf': NumberField(\n description='min score for detection filtering', value_type=float, optional=True, default=0.05\n ),\n 'nms_threshold': NumberField(\n description='overlap threshold for nms', value_type=float, optional=True, default=0.5\n )\n })\n return params\n\n def configure(self):\n self.ratios = self.get_value_from_config('ratios')\n self.scales = [4 * 2 ** (i / 3) for i in range(3)]\n\n self.boxes_outs = self.get_value_from_config('boxes_outputs')\n self.class_outs = self.get_value_from_config('class_outputs')\n self.anchors = {}\n assert len(self.boxes_outs) == len(self.class_outs), 'the number of boxes and classes heads should be equal'\n self.pre_nms_top_k = self.get_value_from_config('pre_nms_top_k')\n self.post_nms_top_k = self.get_value_from_config('post_nms_top_k')\n self.min_conf = self.get_value_from_config('min_conf')\n self.nms_threshold = self.get_value_from_config('nms_threshold')\n self.outputs_verified = False\n\n def select_output_blob(self, outputs):\n def generate_out_names(list_names, outputs):\n return [self.check_output_name(out, outputs) for out in list_names]\n self.boxes_outs = generate_out_names(self.boxes_outs, outputs)\n self.class_outs = generate_out_names(self.class_outs, outputs)\n self.outputs_verified = True\n\n def decode_boxes(self, raw_outputs, input_shape):\n def generate_anchors(stride, ratio_vals, scales_vals):\n scales = np.tile(np.array(scales_vals), (len(ratio_vals), 1))\n scales = np.transpose(scales, (0, 1)).reshape((-1, 1))\n ratios = ratio_vals * len(scales_vals)\n\n wh = np.tile(np.array([stride]), (len(ratios), 2))\n ws = np.sqrt(wh[:, 0] * wh[:, 1] / ratios)\n dwh = np.stack([ws, ws * ratios], axis=1)\n xy1 = 0.5 * (wh - dwh * scales)\n xy2 = 0.5 * (wh + dwh * scales)\n return np.concatenate([xy1, xy2], axis=1)\n\n cls_heads = [raw_outputs[cls_out] for cls_out in self.class_outs]\n box_heads = [raw_outputs[box_out] for box_out in self.boxes_outs]\n decoded = []\n for cls_head, box_head in zip(cls_heads, box_heads):\n # Generate level's anchors\n stride = input_shape[-1] // cls_head.shape[-1]\n if stride not in self.anchors:\n self.anchors[stride] = generate_anchors(stride, self.ratios, self.scales)\n\n # Decode and filter boxes\n decoded.append(\n self.decode(cls_head, box_head, stride, self.min_conf, self.pre_nms_top_k,\n anchors=self.anchors[stride])\n )\n\n # Perform non-maximum suppression\n decoded = [np.concatenate(tensors, 1) for tensors in zip(*decoded)]\n return self.nms(*decoded, nms=self.nms_threshold, ndetections=self.post_nms_top_k)\n\n @staticmethod\n def decode(all_cls_head, all_box_head, stride=1, threshold=0.05, top_n=1000, anchors=None):\n def delta2box(deltas, anchors, size, stride):\n 'Convert deltas from anchors to boxes'\n\n anchors_wh = anchors[:, 2:] - anchors[:, :2] + 1\n ctr = anchors[:, :2] + 0.5 * anchors_wh\n pred_ctr = deltas[:, :2] * anchors_wh + ctr\n pred_wh = np.exp(deltas[:, 2:]) * anchors_wh\n\n boxes = np.concatenate([\n pred_ctr - 0.5 * pred_wh,\n pred_ctr + 0.5 * pred_wh - 1,\n ], axis=1)\n\n M = np.array([*size, *size]) * stride - 1\n return np.clip(boxes, 0, M)\n\n num_boxes = 4\n num_anchors = anchors.shape[0] if anchors is not None else 1\n num_classes = all_cls_head.shape[1] // num_anchors\n height, width = all_cls_head.shape[-2:]\n\n batch_size = all_cls_head.shape[0]\n out_scores = np.zeros((batch_size, top_n))\n out_boxes = np.zeros((batch_size, top_n, num_boxes))\n out_classes = np.zeros((batch_size, top_n))\n\n # Per item in batch\n for batch in range(batch_size):\n cls_head = all_cls_head[batch, :, :, :].reshape(-1)\n box_head = all_box_head[batch, :, :, :].reshape(-1, num_boxes)\n\n # Keep scores over threshold\n keep = np.nonzero(cls_head >= threshold)[0]\n if np.size(keep) == 0:\n continue\n\n # Gather top elements\n scores = cls_head[keep]\n indices = np.argsort(scores)[::-1]\n indices = indices[:min(top_n, keep.size)]\n scores = scores[indices]\n indices = keep[indices]\n classes = (indices / width / height) % num_classes\n classes = classes.astype(int)\n\n # Infer kept bboxes\n x = indices % width\n y = (indices // width) % height\n a = indices // num_classes // height // width\n box_head = box_head.reshape(num_anchors, num_boxes, height, width)\n boxes = box_head[a, :, y, x]\n\n if anchors is not None:\n grid = np.stack([x, y, x, y], 1) * stride + anchors[a, :]\n boxes = delta2box(boxes, grid, [width, height], stride)\n\n out_scores[batch, :scores.shape[0]] = scores\n out_boxes[batch, :boxes.shape[0], :] = boxes\n out_classes[batch, :classes.shape[0]] = classes\n\n return out_scores, out_boxes, out_classes\n\n @staticmethod\n def nms(all_scores, all_boxes, all_classes, nms=0.5, ndetections=100):\n 'Non Maximum Suppression'\n batch_size = all_scores.shape[0]\n out_scores = np.zeros((batch_size, ndetections))\n out_boxes = np.zeros((batch_size, ndetections, 4))\n out_classes = np.zeros((batch_size, ndetections))\n\n # Per item in batch\n for batch in range(batch_size):\n # Discard null scores\n keep = (all_scores[batch, :].reshape(-1) > 0).nonzero()\n scores = all_scores[batch, keep].reshape(-1)\n boxes = all_boxes[batch, keep, :].reshape(-1, 4)\n classes = all_classes[batch, keep].reshape(-1)\n\n if scores.size == 0:\n continue\n\n # Sort boxes\n indices = np.argsort(scores)[::-1]\n boxes, classes, scores = boxes[indices], classes[indices], scores[indices]\n areas = (boxes[:, 2] - boxes[:, 0] + 1) * (boxes[:, 3] - boxes[:, 1] + 1).reshape(-1)\n keep = np.ones(len(scores))\n\n for i in range(ndetections):\n if i >= keep.nonzero()[0].size or i >= scores.size:\n i -= 1\n break\n\n # Find overlapping boxes with lower score\n xy1 = np.maximum(boxes[:, :2], boxes[i, :2])\n xy2 = np.minimum(boxes[:, 2:], boxes[i, 2:])\n inter = np.prod((xy2 - xy1 + 1).clip(0), 1)\n criterion = ((scores > scores[i]) |\n (inter / (areas + areas[i] - inter) <= nms) |\n (classes != classes[i]))\n criterion[i] = 1\n\n # Only keep relevant boxes\n scores = scores[criterion.nonzero()].reshape(-1)\n boxes = boxes[criterion.nonzero(), :].reshape(-1, 4)\n classes = classes[criterion.nonzero()].reshape(-1)\n areas = areas[criterion.nonzero()].reshape(-1)\n keep[(~criterion).nonzero()] = 0\n\n if i >= scores.size:\n i = scores.size - 2\n out_scores[batch, :i + 1] = scores[:i + 1]\n out_boxes[batch, :i + 1, :] = boxes[:i + 1, :]\n out_classes[batch, :i + 1] = classes[:i + 1]\n\n return out_scores, out_boxes, out_classes\n\n def process(self, raw, identifiers, frame_meta):\n raw_outputs = self._extract_predictions(raw, frame_meta)\n if not self.outputs_verified:\n self.select_output_blob(raw_outputs)\n input_shape_dict = frame_meta[0].get('input_shape', {'data': (1, 3, 480, 640)})\n input_shape = next(iter(input_shape_dict.values()))\n out_scores, out_boxes, out_classes = self.decode_boxes(raw_outputs, input_shape)\n result = []\n for identifier, boxes, scores, labels in zip(identifiers, out_boxes, out_scores, out_classes):\n non_empty = (scores > 0).nonzero()[0]\n result.append(DetectionPrediction(identifier, labels[non_empty], scores[non_empty], *boxes[non_empty].T))\n return result\n\n\nclass RetinaNetAdapter(Adapter):\n __provider__ = 'retinanet'\n\n @classmethod\n def parameters(cls):\n params = super().parameters()\n params.update({\n 'loc_out': StringField(description='boxes localization output'),\n 'class_out': StringField(description=\"output with classes probabilities\")\n })\n return params\n\n def configure(self):\n self.loc_out = self.get_value_from_config('loc_out')\n self.cls_out = self.get_value_from_config('class_out')\n self.pyramid_levels = [3, 4, 5, 6, 7]\n self.strides = [2 ** x for x in self.pyramid_levels]\n self.sizes = [2 ** (x + 2) for x in self.pyramid_levels]\n self.ratios = np.array([0.5, 1, 2])\n self.scales = np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)])\n self.std = np.array([0.1, 0.1, 0.2, 0.2])\n self.outputs_verified = False\n\n def select_output_blob(self, outputs):\n self.loc_out = self.check_output_name(self.loc_out, outputs)\n self.cls_out = self.check_output_name(self.cls_out, outputs)\n self.outputs_verified = True\n\n def process(self, raw, identifiers, frame_meta):\n raw_outputs = self._extract_predictions(raw, frame_meta)\n if not self.outputs_verified:\n self.select_output_blob(raw_outputs)\n results = []\n for identifier, loc_pred, cls_pred, meta in zip(\n identifiers, raw_outputs[self.loc_out], raw_outputs[self.cls_out], frame_meta\n ):\n _, _, h, w = next(iter(meta.get('input_shape', {'data': (1, 3, 800, 800)}).values()))\n anchors = self.create_anchors([w, h])\n transformed_anchors = self.regress_boxes(anchors, loc_pred)\n labels, scores = np.argmax(cls_pred, axis=1), np.max(cls_pred, axis=1)\n scores_mask = np.reshape(scores > 0.05, -1)\n transformed_anchors = transformed_anchors[scores_mask, :]\n x_mins, y_mins, x_maxs, y_maxs = transformed_anchors.T\n results.append(DetectionPrediction(\n identifier, labels[scores_mask], scores[scores_mask], x_mins / w, y_mins / h, x_maxs / w, y_maxs / h\n ))\n\n return results\n\n def create_anchors(self, input_shape):\n def _generate_anchors(base_size=16):\n \"\"\"\n Generate anchor (reference) windows by enumerating aspect ratios X\n scales w.r.t. a reference window.\n \"\"\"\n num_anchors = len(self.ratios) * len(self.scales)\n # initialize output anchors\n anchors = np.zeros((num_anchors, 4))\n # scale base_size\n anchors[:, 2:] = base_size * np.tile(self.scales, (2, len(self.ratios))).T\n # compute areas of anchors\n areas = anchors[:, 2] * anchors[:, 3]\n # correct for ratios\n anchors[:, 2] = np.sqrt(areas / np.repeat(self.ratios, len(self.scales)))\n anchors[:, 3] = anchors[:, 2] * np.repeat(self.ratios, len(self.scales))\n # transform from (x_ctr, y_ctr, w, h) -> (x1, y1, x2, y2)\n anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T\n anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T\n\n return anchors\n\n def _shift(shape, stride, anchors):\n shift_x = (np.arange(0, shape[1]) + 0.5) * stride\n shift_y = (np.arange(0, shape[0]) + 0.5) * stride\n shift_x, shift_y = np.meshgrid(shift_x, shift_y)\n\n shifts = np.vstack((\n shift_x.ravel(), shift_y.ravel(),\n shift_x.ravel(), shift_y.ravel()\n )).transpose()\n a = anchors.shape[0]\n k = shifts.shape[0]\n all_anchors = (anchors.reshape((1, a, 4)) + shifts.reshape((1, k, 4)).transpose((1, 0, 2)))\n all_anchors = all_anchors.reshape((k * a, 4))\n\n return all_anchors\n\n image_shapes = [(np.array(input_shape) + 2 ** x - 1) // (2 ** x) for x in self.pyramid_levels]\n # compute anchors over all pyramid levels\n all_anchors = np.zeros((0, 4)).astype(np.float32)\n for idx, _ in enumerate(self.pyramid_levels):\n anchors = _generate_anchors(base_size=self.sizes[idx])\n shifted_anchors = _shift(image_shapes[idx], self.strides[idx], anchors)\n all_anchors = np.append(all_anchors, shifted_anchors, axis=0)\n\n return all_anchors\n\n def regress_boxes(self, boxes, deltas):\n widths = boxes[:, 2] - boxes[:, 0]\n heights = boxes[:, 3] - boxes[:, 1]\n ctr_x = boxes[:, 0] + 0.5 * widths\n ctr_y = boxes[:, 1] + 0.5 * heights\n\n dx = deltas[:, 0] * self.std[0]\n dy = deltas[:, 1] * self.std[1]\n dw = deltas[:, 2] * self.std[2]\n dh = deltas[:, 3] * self.std[3]\n\n pred_ctr_x = ctr_x + dx * widths\n pred_ctr_y = ctr_y + dy * heights\n pred_w = np.exp(dw) * widths\n pred_h = np.exp(dh) * heights\n\n pred_boxes_x1 = pred_ctr_x - 0.5 * pred_w\n pred_boxes_y1 = pred_ctr_y - 0.5 * pred_h\n pred_boxes_x2 = pred_ctr_x + 0.5 * pred_w\n pred_boxes_y2 = pred_ctr_y + 0.5 * pred_h\n\n pred_boxes = np.stack([pred_boxes_x1, pred_boxes_y1, pred_boxes_x2, pred_boxes_y2], axis=1)\n\n return pred_boxes\n\n\nclass RetinaNetTF2(Adapter):\n __provider__ = 'retinanet_tf2'\n\n @classmethod\n def parameters(cls):\n params = super().parameters()\n params.update({\n 'boxes_outputs': ListField(description='boxes localization output', value_type=str),\n 'class_outputs': ListField(description=\"output with classes probabilities\"),\n 'min_level': NumberField(optional=True, value_type=int, default=3, description='min pyramid level'),\n 'max_level': NumberField(optional=True, value_type=int, default=7, description='max pyramid level'),\n 'aspect_ratios': ListField(\n value_type=float, optional=True, default=[1, 2, 0.5], description='aspect ratio levels'\n ),\n 'num_scales': NumberField(\n optional=True, default=3, value_type=int, min_value=1, description='number anchor scales'),\n 'anchor_size': NumberField(optional=True, default=4, description='anchor box size'),\n 'total_size': NumberField(\n optional=True, default=100, value_type=int, min_value=1, description='final number of boxes'\n ),\n 'pre_nms_top_k': NumberField(\n optional=True, value_type=int, min_value=1, default=5000,\n description='number of keep top by score boxes before nms'),\n 'score_threshold': NumberField(\n value_type=float, min_value=0, max_value=1, default=0.05, description='scores threshold'\n ),\n 'nms_threshold': NumberField(\n value_type=float, min_value=0, max_value=1, default=0.5, description='nms threshold'\n )\n\n })\n return params\n\n def configure(self):\n self.loc_out = self.get_value_from_config('boxes_outputs')\n self.cls_out = self.get_value_from_config('class_outputs')\n self.min_level = self.get_value_from_config('min_level')\n self.max_level = self.get_value_from_config('max_level')\n self.aspect_ratios = self.get_value_from_config('aspect_ratios')\n self.anchor_size = self.get_value_from_config('anchor_size')\n self.num_scales = self.get_value_from_config('num_scales')\n self.max_total_size = self.get_value_from_config('total_size')\n self.nms_iou_threshold = self.get_value_from_config('nms_threshold')\n self.score_threshold = self.get_value_from_config('score_threshold')\n self.pre_nms_num_boxes = self.get_value_from_config('pre_nms_top_k')\n self.outputs_verified = False\n\n def select_output_blob(self, outputs):\n def generate_out_names(list_names, outputs):\n return [self.check_output_name(out, outputs) for out in list_names]\n self.loc_out = generate_out_names(self.loc_out, outputs)\n self.cls_out = generate_out_names(self.cls_out, outputs)\n self.outputs_verified = True\n\n def _generate_anchor_boxes(self, image_size):\n boxes_all = []\n for level in range(self.min_level, self.max_level + 1):\n boxes_l = []\n for scale in range(self.num_scales):\n for aspect_ratio in self.aspect_ratios:\n stride = 2 ** level\n intermediate_scale = 2 ** (scale / float(self.num_scales))\n base_anchor_size = self.anchor_size * stride * intermediate_scale\n aspect_x = aspect_ratio ** 0.5\n aspect_y = aspect_ratio ** -0.5\n half_anchor_size_x = base_anchor_size * aspect_x / 2.0\n half_anchor_size_y = base_anchor_size * aspect_y / 2.0\n x = np.arange(stride / 2, image_size[1], stride)\n y = np.arange(stride / 2, image_size[0], stride)\n xv, yv = np.meshgrid(x, y)\n xv = np.reshape(xv, -1)\n yv = np.reshape(yv, -1)\n boxes = np.stack([\n yv - half_anchor_size_y, xv - half_anchor_size_x,\n yv + half_anchor_size_y, xv + half_anchor_size_x\n ], axis=1)\n boxes_l.append(boxes)\n boxes_l = np.stack(boxes_l, axis=1)\n boxes_l = np.reshape(boxes_l, [-1, 4])\n boxes_all.append(boxes_l)\n\n def unpack_labels(labels):\n unpacked_labels = OrderedDict()\n count = 0\n for level in range(self.min_level, self.max_level + 1):\n feat_size_y = int(image_size[0] / 2 ** level)\n feat_size_x = int(image_size[1] / 2 ** level)\n steps = feat_size_y * feat_size_x * self.num_scales * len(self.aspect_ratios)\n unpacked_labels[level] = np.reshape(labels[count:count + steps],\n [feat_size_y, feat_size_x, -1])\n count += steps\n return unpacked_labels\n\n return unpack_labels(np.concatenate(boxes_all, axis=0))\n\n def prepare_boxes_and_classes(self, raw, batch_id):\n boxes_outs, classes_outs = [], []\n for boxes_out, cls_out in zip(self.loc_out, self.cls_out):\n boxes_outs.append(np.transpose(raw[boxes_out][batch_id], (1, 2, 0)))\n classes_outs.append(np.transpose(raw[cls_out][batch_id], (1, 2, 0)))\n return boxes_outs, classes_outs\n\n def process(self, raw, identifiers, frame_meta):\n raw_outputs = self._extract_predictions(raw, frame_meta)\n if not self.outputs_verified:\n self.select_output_blob(raw_outputs)\n result = []\n for batch_id, (identifier, meta) in enumerate(zip(identifiers, frame_meta)):\n boxes_out, classes_out = self.prepare_boxes_and_classes(raw_outputs, batch_id)\n input_shape = [shape for shape in meta['input_shape'].values() if len(shape) == 4]\n input_shape = input_shape[0]\n image_size = input_shape[2:] if input_shape[1] == 3 else input_shape[1:3]\n boxes, scores, labels = self.process_single(boxes_out, classes_out, image_size)\n if np.size(boxes):\n x_mins, y_mins, x_maxs, y_maxs = boxes.T\n x_mins /= image_size[1]\n y_mins /= image_size[0]\n x_maxs /= image_size[1]\n y_maxs /= image_size[0]\n else:\n x_mins, y_mins, x_maxs, y_maxs = [], [], [], []\n result.append(\n DetectionPrediction(\n identifier, labels, scores,\n x_mins, y_mins, x_maxs, y_maxs\n ))\n return result\n\n def process_single(self, box_outputs, class_outputs, image_size):\n def sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n boxes = []\n scores = []\n anchor_boxes = self._generate_anchor_boxes(image_size)\n for i in range(self.min_level, self.max_level + 1):\n box_outputs_i_shape = np.shape(box_outputs[i - self.min_level])\n num_anchors_per_locations = box_outputs_i_shape[-1] // 4\n num_classes = np.shape(class_outputs[i - self.min_level])[-1] // num_anchors_per_locations\n\n scores_i = sigmoid(np.reshape(class_outputs[i - self.min_level], [-1, num_classes]))\n scores_i = scores_i[:, 1:]\n\n anchor_boxes_i = np.reshape(anchor_boxes[i], [-1, 4])\n box_outputs_i = np.reshape(box_outputs[i - self.min_level], [-1, 4])\n boxes_i = self.decode_boxes(box_outputs_i, anchor_boxes_i)\n boxes_i[:, ::2] = np.clip(boxes_i[:, ::2], a_min=0, a_max=image_size[1] - 1)\n boxes_i[:, 1::2] = np.clip(boxes_i[:, 1::2], a_min=0, a_max=image_size[0] - 1)\n\n boxes.append(boxes_i)\n scores.append(scores_i)\n boxes = np.concatenate(boxes, axis=0)\n scores = np.concatenate(scores, axis=0)\n\n nmsed_boxes, nmsed_scores, nmsed_classes = self._generate_detections(\n np.expand_dims(boxes, axis=1), scores,\n self.max_total_size, self.nms_iou_threshold, self.score_threshold, self.pre_nms_num_boxes\n )\n\n return nmsed_boxes, nmsed_scores, nmsed_classes\n\n @staticmethod\n def decode_boxes(encoded_boxes, anchors):\n BBOX_XFORM_CLIP = np.log(1000. / 16.)\n dy = encoded_boxes[..., 0:1]\n dx = encoded_boxes[..., 1:2]\n dh = encoded_boxes[..., 2:3]\n dw = encoded_boxes[..., 3:4]\n dh = np.minimum(dh, BBOX_XFORM_CLIP)\n dw = np.minimum(dw, BBOX_XFORM_CLIP)\n\n anchor_ymin = anchors[..., 0:1]\n anchor_xmin = anchors[..., 1:2]\n anchor_ymax = anchors[..., 2:3]\n anchor_xmax = anchors[..., 3:4]\n anchor_h = anchor_ymax - anchor_ymin + 1.0\n anchor_w = anchor_xmax - anchor_xmin + 1.0\n anchor_yc = anchor_ymin + 0.5 * anchor_h\n anchor_xc = anchor_xmin + 0.5 * anchor_w\n\n decoded_boxes_yc = dy * anchor_h + anchor_yc\n decoded_boxes_xc = dx * anchor_w + anchor_xc\n decoded_boxes_h = np.exp(dh) * anchor_h\n decoded_boxes_w = np.exp(dw) * anchor_w\n\n decoded_boxes_ymin = decoded_boxes_yc - 0.5 * decoded_boxes_h\n decoded_boxes_xmin = decoded_boxes_xc - 0.5 * decoded_boxes_w\n decoded_boxes_ymax = decoded_boxes_ymin + decoded_boxes_h - 1.0\n decoded_boxes_xmax = decoded_boxes_xmin + decoded_boxes_w - 1.0\n\n decoded_boxes = np.concatenate([\n decoded_boxes_xmin, decoded_boxes_ymin, decoded_boxes_xmax,\n decoded_boxes_ymax\n ], axis=-1)\n return decoded_boxes\n\n @staticmethod\n def _generate_detections(boxes,\n scores,\n max_total_size=100,\n nms_iou_threshold=0.5,\n score_threshold=0.05,\n pre_nms_num_boxes=5000):\n\n def _select_top_k_scores(scores_in, pre_nms_num_detections):\n num_anchors, num_class = scores_in.shape\n scores_trans = np.transpose(scores_in, [1, 0])\n scores_trans = np.reshape(scores_trans, [-1, num_anchors])\n\n indices_ = np.argsort(-1 * scores_trans)\n top_k_scores = -1 * np.sort(-1 * scores_trans)[:, :pre_nms_num_detections]\n top_k_indices = indices_[:, :pre_nms_num_detections]\n\n top_k_scores = np.reshape(top_k_scores,\n [num_class, pre_nms_num_detections])\n top_k_indices = np.reshape(top_k_indices,\n [num_class, pre_nms_num_detections])\n\n return np.transpose(top_k_scores,\n [1, 0]), np.transpose(top_k_indices, [1, 0])\n\n nmsed_boxes = []\n nmsed_classes = []\n nmsed_scores = []\n _, num_classes_for_box, _ = boxes.shape\n total_anchors, num_classes = scores.shape\n scores, indices = _select_top_k_scores(\n scores, min(total_anchors, pre_nms_num_boxes))\n for i in range(num_classes):\n boxes_i = boxes[:, min(num_classes_for_box - 1, i), :]\n scores_i = scores[:, i]\n boxes_i = boxes_i[indices[:, i], :]\n\n filtered_scores = scores_i > score_threshold\n boxes_i = boxes_i[filtered_scores]\n scores_i = scores_i[filtered_scores]\n if not np.size(scores_i):\n continue\n\n keep = NMS.nms(*boxes_i.T, scores_i, nms_iou_threshold)\n if len(keep) > max_total_size:\n keep = keep[:max_total_size]\n nms_boxes = boxes_i[keep]\n nms_scores = scores_i[keep]\n nmsed_classes_i = np.full(len(nms_scores), i+1)\n nmsed_boxes.append(nms_boxes)\n nmsed_scores.append(nms_scores)\n nmsed_classes.append(nmsed_classes_i)\n if np.size(nmsed_scores):\n nmsed_boxes = np.concatenate(nmsed_boxes, axis=0)\n nmsed_scores = np.concatenate(nmsed_scores, axis=0)\n nmsed_classes = np.concatenate(nmsed_classes, axis=0)\n sorted_order = np.argsort(nmsed_scores)[::-1]\n if sorted_order.size > max_total_size:\n sorted_order = sorted_order[:max_total_size]\n nmsed_scores = nmsed_scores[sorted_order]\n nmsed_boxes = nmsed_boxes[sorted_order, :]\n nmsed_classes = nmsed_classes[sorted_order]\n return nmsed_boxes, nmsed_scores, nmsed_classes\n" ]
[ [ "numpy.expand_dims", "numpy.minimum", "numpy.sqrt", "numpy.concatenate", "numpy.max", "numpy.exp", "numpy.clip", "numpy.reshape", "numpy.arange", "numpy.stack", "numpy.size", "numpy.argmax", "numpy.zeros", "numpy.log", "numpy.nonzero", "numpy.append", "numpy.transpose", "numpy.argsort", "numpy.meshgrid", "numpy.array", "numpy.maximum", "numpy.tile", "numpy.sort", "numpy.shape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DanRyanIrish/ndcube
[ "f98f97ad9e65a8ddd79f047d76c596599cf94882" ]
[ "ndcube/tests/test_lookup_table_coord.py" ]
[ "import astropy.units as u\nimport gwcs.coordinate_frames as cf\nimport numpy as np\nimport pytest\nfrom astropy.coordinates import SkyCoord\nfrom astropy.time import Time\n\nfrom ndcube.extra_coords import LookupTableCoord\n\n\[email protected]\ndef lut_1d_distance():\n lookup_table = u.Quantity(np.arange(10) * u.km)\n return LookupTableCoord(lookup_table)\n\n\ndef test_repr_str(lut_1d_distance):\n assert str(lut_1d_distance.delayed_models) in str(lut_1d_distance)\n assert str(lut_1d_distance.frames) in str(lut_1d_distance)\n assert str(lut_1d_distance) in repr(lut_1d_distance)\n\n assert str(lut_1d_distance.delayed_models[0]) in repr(lut_1d_distance.delayed_models[0])\n\n\ndef test_exceptions(lut_1d_distance):\n with pytest.raises(TypeError):\n LookupTableCoord(u.Quantity([1, 2, 3], u.nm), [1, 2, 3])\n\n with pytest.raises(TypeError):\n lut_1d_distance & list()\n\n # Test two Time\n with pytest.raises(ValueError):\n LookupTableCoord(Time(\"2011-01-01\"), Time(\"2011-01-01\"))\n\n # Test two SkyCoord\n with pytest.raises(ValueError):\n LookupTableCoord(SkyCoord(10, 10, unit=u.deg), SkyCoord(10, 10, unit=u.deg))\n\n # Test not matching units\n with pytest.raises(u.UnitsError):\n LookupTableCoord(u.Quantity([1, 2, 3], u.nm), u.Quantity([1, 2, 3], u.s))\n\n\ndef test_1d_distance(lut_1d_distance):\n assert lut_1d_distance.model.n_inputs == 1\n assert lut_1d_distance.model.n_outputs == 1\n assert lut_1d_distance.model.lookup_table.shape == (10,)\n assert u.allclose(u.Quantity(range(10), u.pix), lut_1d_distance.model.points)\n\n assert u.allclose(lut_1d_distance.wcs.pixel_to_world(0), 0 * u.km)\n assert u.allclose(lut_1d_distance.wcs.pixel_to_world(9), 9 * u.km)\n assert lut_1d_distance.wcs.world_to_pixel(0 * u.km) == 0\n\n sub_ltc = lut_1d_distance[0:5]\n assert len(sub_ltc.delayed_models[0].lookup_table[0]) == 5\n\n\ndef test_3d_distance():\n lookup_table = (u.Quantity(np.arange(10) * u.km),\n u.Quantity(np.arange(10, 20) * u.km),\n u.Quantity(np.arange(20, 30) * u.km))\n\n ltc = LookupTableCoord(*lookup_table, mesh=True)\n assert ltc.model.n_inputs == 3\n assert ltc.model.n_outputs == 3\n\n assert ltc.wcs.world_n_dim == 3\n assert ltc.wcs.pixel_n_dim == 3\n\n assert u.allclose(ltc.wcs.pixel_to_world(0*u.pix, 0*u.pix, 0*u.pix),\n (0, 10, 20)*u.km)\n assert u.allclose(ltc.wcs.world_to_pixel(0*u.km, 10*u.km, 20*u.km), (0, 0, 0))\n\n sub_ltc = ltc[0:5, 0:6, 0:7]\n assert len(sub_ltc.delayed_models[0].lookup_table[0]) == 5\n assert len(sub_ltc.delayed_models[0].lookup_table[1]) == 6\n assert len(sub_ltc.delayed_models[0].lookup_table[2]) == 7\n\n\ndef test_2d_nout_1_no_mesh():\n lookup_table = np.arange(9).reshape(3, 3) * u.km, np.arange(9, 18).reshape(3, 3) * u.km\n\n ltc = LookupTableCoord(*lookup_table, mesh=False)\n assert ltc.wcs.world_n_dim == 2\n assert ltc.wcs.pixel_n_dim == 2\n\n assert ltc.model.n_inputs == 2\n assert ltc.model.n_outputs == 2\n\n assert u.allclose(ltc.wcs.pixel_to_world(0*u.pix, 0*u.pix),\n (0, 9)*u.km)\n\n # TODO: this model is not invertable\n # assert u.allclose(ltc.wcs.world_to_pixel(0*u.km, 9*u.km), (0, 0))\n\n sub_ltc = ltc[0:2, 0:2]\n assert sub_ltc.delayed_models[0].lookup_table[0].shape == (2, 2)\n assert sub_ltc.delayed_models[0].lookup_table[1].shape == (2, 2)\n\n\ndef test_1d_skycoord_no_mesh():\n sc = SkyCoord(range(10), range(10), unit=u.deg)\n ltc = LookupTableCoord(sc, mesh=False)\n assert ltc.model.n_inputs == 1\n assert ltc.model.n_outputs == 2\n\n sub_ltc = ltc[0:4]\n assert sub_ltc.delayed_models[0].lookup_table[0].shape == (4, )\n assert sub_ltc.delayed_models[0].lookup_table[1].shape == (4, )\n\n\ndef test_2d_skycoord_mesh():\n sc = SkyCoord(range(10), range(10), unit=u.deg)\n ltc = LookupTableCoord(sc, mesh=True)\n assert ltc.model.n_inputs == 2\n assert ltc.model.n_outputs == 2\n\n sub_ltc = ltc[0:4, 0:5]\n assert sub_ltc.delayed_models[0].lookup_table[0].shape == (4, )\n assert sub_ltc.delayed_models[0].lookup_table[1].shape == (5, )\n\n\[email protected]\ndef test_3d_skycoord_mesh():\n \"\"\"Known failure due to gwcs#120.\"\"\"\n sc = SkyCoord(range(10), range(10), range(10), unit=(u.deg, u.deg, u.AU))\n ltc = LookupTableCoord(sc, mesh=True)\n assert ltc.model.n_inputs == 3\n assert ltc.model.n_outputs == 3\n\n sub_ltc = ltc[0:4, 0:5, 0:6]\n assert sub_ltc.delayed_models[0].lookup_table[0].shape == (4, )\n assert sub_ltc.delayed_models[0].lookup_table[1].shape == (5, )\n assert sub_ltc.delayed_models[0].lookup_table[2].shape == (6, )\n\n\ndef test_2d_skycoord_no_mesh():\n data = np.arange(9).reshape(3, 3), np.arange(9, 18).reshape(3, 3)\n sc = SkyCoord(*data, unit=u.deg)\n ltc = LookupTableCoord(sc, mesh=False)\n assert ltc.model.n_inputs == 2\n assert ltc.model.n_outputs == 2\n\n sub_ltc = ltc[1:3, 1:2]\n assert sub_ltc.delayed_models[0].lookup_table[0].shape == (2, 1)\n assert sub_ltc.delayed_models[0].lookup_table[1].shape == (2, 1)\n\n\ndef test_1d_time():\n data = Time([\"2011-01-01T00:00:00\",\n \"2011-01-01T00:00:10\",\n \"2011-01-01T00:00:20\",\n \"2011-01-01T00:00:30\"], format=\"isot\")\n ltc = LookupTableCoord(data)\n assert ltc.model.n_inputs == 1\n assert ltc.model.n_outputs == 1\n assert u.allclose(ltc.model.lookup_table, u.Quantity((0, 10, 20, 30), u.s))\n\n assert ltc.wcs.pixel_to_world(0) == Time(\"2011-01-01T00:00:00\")\n assert ltc.wcs.world_to_pixel(Time(\"2011-01-01T00:00:00\")) == 0\n\n sub_ltc = ltc[1:3]\n assert sub_ltc.delayed_models[0].lookup_table.shape == (2,)\n\n\ndef test_join():\n time_ltc = LookupTableCoord(Time([\"2011-01-01T00:00:00\",\n \"2011-01-01T00:00:10\",\n \"2011-01-01T00:00:20\",\n \"2011-01-01T00:00:30\"], format=\"isot\"))\n\n wave_ltc = LookupTableCoord(range(10) * u.nm)\n\n ltc = time_ltc & wave_ltc\n\n assert ltc.model.n_inputs == 2\n assert ltc.model.n_outputs == 2\n\n assert isinstance(ltc.frame, cf.CompositeFrame)\n world = ltc.wcs.pixel_to_world(0, 0)\n assert world[0] == Time(\"2011-01-01T00:00:00\")\n assert u.allclose(world[1], 0 * u.nm)\n\n assert u.allclose(ltc.wcs.world_to_pixel(*world), (0, 0))\n\n sub_ltc = ltc[1:3, 1:3]\n assert len(sub_ltc.delayed_models) == 2\n assert sub_ltc.delayed_models[0].lookup_table.shape == (2,)\n assert sub_ltc.delayed_models[1].lookup_table[0].shape == (2,)\n\n sub_ltc = ltc[1:3, 2]\n assert len(sub_ltc.delayed_models) == 1\n assert sub_ltc.delayed_models[0].lookup_table.shape == (2,)\n\n\ndef test_join_3d():\n sc = SkyCoord(range(10), range(10), unit=u.deg)\n space_ltc = LookupTableCoord(sc, mesh=True)\n wave_ltc = LookupTableCoord(range(10) * u.nm)\n\n ltc = space_ltc & wave_ltc\n\n assert ltc.model.n_inputs == 3\n assert ltc.model.n_outputs == 3\n\n assert isinstance(ltc.frame, cf.CompositeFrame)\n world = ltc.wcs.pixel_to_world(0, 0, 0)\n assert isinstance(world[0], SkyCoord)\n assert u.allclose(world[1], 0 * u.nm)\n\n assert u.allclose(ltc.wcs.world_to_pixel(*world), (0, 0, 0))\n" ]
[ [ "numpy.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AyeshaSadiqa/thesis
[ "761eb0c37acd42707d52d4a6bfabe8ac566d8aa4" ]
[ "codes/models/archs/TDAN_arch.py" ]
[ "'''\nNetwork architecture for TDAN:\nTDAN: Temporally Deformable Alignment Network for Video Super-Resolution\n'''\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport models.archs.arch_util as arch_util\ntry:\n from models.archs.dcn.deform_conv import ModulatedDeformConvPack as DCN\nexcept ImportError:\n raise ImportError('Failed to import DCNv2 module.')\n\n\nclass Align(nn.Module):\n\n def __init__(self, channel=1, nf=64, nb=5, groups=8):\n super(Align, self).__init__()\n\n self.initial_conv = nn.Conv2d(channel, nf, 3, padding=1, bias=True)\n self.residual_layers = arch_util.make_layer(arch_util.ResidualBlock_noBN, nb)\n\n self.bottle_neck = nn.Conv2d(nf * 2, nf, 3, padding=1, bias=True)\n\n self.offset_conv_1 = nn.Conv2d(nf, nf, 3, padding=1, bias=True)\n self.deform_conv_1 = DCN(nf, nf, 3, stride=1, padding=1, dilation=1, deformable_groups=groups,\n extra_offset_mask=True)\n self.offset_conv_2 = nn.Conv2d(nf, nf, 3, padding=1, bias=True)\n self.deform_conv_2 = DCN(nf, nf, 3, stride=1, padding=1, dilation=1, deformable_groups=groups,\n extra_offset_mask=True)\n self.offset_conv_3 = nn.Conv2d(nf, nf, 3, padding=1, bias=True)\n self.deform_conv_3 = DCN(nf, nf, 3, stride=1, padding=1, dilation=1, deformable_groups=groups,\n extra_offset_mask=True)\n\n self.offset_conv = nn.Conv2d(nf, nf, 3, padding=1, bias=True)\n self.deform_conv = DCN(nf, nf, 3, stride=1, padding=1, dilation=1, deformable_groups=groups,\n extra_offset_mask=True)\n self.reconstruction = nn.Conv2d(nf, channel, 3, padding=1, bias=True)\n\n def forward(self, x):\n B, N, C, W, H = x.size()\n\n # extract features\n y = x.view(-1, C, W, H)\n out = F.relu(self.initial_conv(y), inplace=True)\n out = self.residual_layers(out)\n out = out.view(B, N, -1, W, H)\n\n # reference frame\n ref_index = N // 2\n ref_frame = out[:, ref_index, :, :, :].clone().contiguous()\n # neighbor frames\n y = []\n for i in range(N):\n nei_frame = out[:, i, :, :, :].contiguous()\n fea = torch.cat([ref_frame, nei_frame], dim=1)\n fea = self.bottle_neck(fea)\n # feature transformation\n offset1 = self.offset_conv_1(fea)\n fea = self.deform_conv_1([fea, offset1])\n offset2 = self.offset_conv_2(fea)\n fea = self.deform_conv_2([fea, offset2])\n offset3 = self.offset_conv_3(fea)\n fea = self.deform_conv_3([nei_frame, offset3])\n offset = self.offset_conv(fea)\n aligned_fea = (self.deform_conv([fea, offset]))\n im = self.reconstruction(aligned_fea)\n y.append(im)\n y = torch.cat(y, dim=1)\n return y\n\n\nclass Trunk(nn.Module):\n\n def __init__(self, channel=1, nframes=5, scale=4, nf=64, nb=10):\n super(Trunk, self).__init__()\n self.feature_extractor = nn.Sequential(nn.Conv2d(nframes * channel, 64, 3, padding=1, bias=True),\n nn.ReLU(inplace=True))\n self.residual_layers = arch_util.make_layer(arch_util.ResidualBlock_noBN, nb)\n self.upsampler = nn.Sequential(arch_util.Upsampler(arch_util.default_conv, scale, 64, act=False),\n nn.Conv2d(64, 3, 3, padding=1, bias=False))\n\n def forward(self, x):\n '''\n :param x: (B, C*T, H, W)\n :return: (B, C, s*H, s*W)\n '''\n out = self.feature_extractor(x)\n out = self.residual_layers(out)\n out = self.upsampler(out)\n return out\n\n\nclass TDAN(nn.Module):\n '''Temporally Deformable Alignment Network'''\n def __init__(self, channel=1, nframes=5, scale=4, nf=64, nb_f=5, nb_b=10, groups=8):\n super(TDAN, self).__init__()\n\n self.align = Align(channel=channel, nf=nf, nb=nb_f, groups=groups)\n self.trunk = Trunk(channel=channel, nframes=nframes, scale=scale, nf=nf, nb=nb_b)\n\n def forward(self, x):\n '''\n :param x: (B, T, C, H, W)\n :return: (B, C, s*H, s*W)\n '''\n out = self.align(x)\n out = self.trunk(out)\n return out\n\n\nif __name__ == '__main__':\n B, N, C, W, H = 1, 7, 3, 64, 64\n model = TDAN(channel=C, nf=64, nframes=N, groups=8, scale=1).to(device=torch.device('cuda'))\n x = torch.randn(B, N, C, W, H).to(device=torch.device('cuda'))\n out = model(x)\n print(out.shape)\n" ]
[ [ "torch.cat", "torch.randn", "torch.nn.Conv2d", "torch.device", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
javierrodenas/clearml_javi
[ "b6326104fe6a6f522223c2ac3d87468990a9e6f2" ]
[ "get_confusion_matrix.py" ]
[ "import numpy as np\nimport os\nfrom numpy import loadtxt\nfrom sklearn.metrics import confusion_matrix, classification_report\nimport pandas as pd\nimport glob\nfrom matplotlib import pyplot as plt\nfrom scipy.cluster.hierarchy import dendrogram\nfrom sklearn.cluster import AgglomerativeClustering\n\ndef buildData(preds, path):\n\n if preds.ndim == 3:\n preds = preds.reshape(preds.shape[0], preds.shape[-1])\n\n preds = np.argmax(preds, axis=1)\n\n\n image_class = dict()\n\n for num_class in range(1000):\n image_class[num_class] = []\n if num_class < 1000:\n for filename in glob.glob(path + str(num_class) + '/*.jpg'):\n image_class[num_class].append(filename)\n\n gts = []\n\n for num_class, filenames in image_class.items():\n for filename in filenames:\n gts.append(num_class)\n\n return preds, gts\n\n\n\ndef computeAndSaveConfusionMatrix(gts, preds, cm_matrix_path):\n\n\n cm = confusion_matrix(gts, preds, labels=range(1000))\n\n np.savetxt(os.path.join(cm_matrix_path, 'cm.csv'), cm, delimiter=',')\n\n with open(os.path.join(cm_matrix_path, 'classification_report.txt'), 'w') as f_obj:\n f_obj.write(classification_report(gts, preds))\n\n return cm\n\n\ndef getMaxDistance(df):\n\n for index_row, row in df.iterrows():\n for column in df.columns.values:\n value_row = df.loc[index_row][column]\n value_column = df.loc[column][index_row]\n if value_column > value_row:\n df[index_row][column] = value_column\n else:\n df[column][index_row] = value_row\n\n return df\n\n\n\ndef plotDendrogram(model, categories, **kwargs):\n # Create linkage matrix and then plot the dendrogram\n\n # create the counts of samples under each node\n counts = np.zeros(model.children_.shape[0])\n n_samples = len(model.labels_)\n for i, merge in enumerate(model.children_):\n current_count = 0\n for child_idx in merge:\n if child_idx < n_samples:\n current_count += 1 # leaf node\n else:\n current_count += counts[child_idx - n_samples]\n counts[i] = current_count\n\n linkage_matrix = np.column_stack([model.children_, model.distances_,\n counts]).astype(float)\n\n # Plot the corresponding dendrogram\n dendrogram(linkage_matrix, labels=categories, distance_sort='ascending', **kwargs)\n\n\ndef cleanDiagonal(df):\n\n for num_class in range(1000):\n df[num_class][num_class] = 1\n\n #df = df.replace(100, 1000)\n\n return df\n\n\n\n\ndef mainLoop():\n\n scores_path = r'/media/HDD_4TB/javi/FoodChallenge/ICCV/volo/output/train/20210721-202002-volo_d5-512/val.npz'\n cm_matrix_path = r'./'\n results = np.load(scores_path)\n files = results['file_array']\n preds = np.array(results['scores_array'])\n path_dataset = '/media/HDD_4TB/datasets/iccv/val/'\n path_to_save_dendrogram = '/media/HDD_4TB/javi/FoodChallenge/ICCV/volo/output/train/'\n\n preds, gts = buildData(preds, path_dataset)\n cm = computeAndSaveConfusionMatrix(gts, preds, cm_matrix_path)\n df = pd.DataFrame(cm)\n\n df = (df - df.min()) / (df.max() - df.min())\n df = 1 - df\n categories = [class_index for class_index in range(1000)]\n index = categories.copy()\n df.set_index = index\n df.columns = categories\n df = cleanDiagonal(df)\n #df = getMaxDistance(df)\n # Sklearn plot\n\n model = AgglomerativeClustering(distance_threshold=0, n_clusters=None, affinity='precomputed', linkage='single')\n model = model.fit(df.values)\n plt.title('Hierarchical Clustering Dendrogram')\n plt.figure(figsize=(50, 50))\n plotDendrogram(model, categories=categories)\n #hierarchy.dendrogram(Z, show_contracted=True, labels=df.index.tolist())\n plt.savefig(path_to_save_dendrogram + 'ICCV_dendrogram.pdf', dpi=1200)\n\nif __name__ == \"__main__\":\n mainLoop()" ]
[ [ "matplotlib.pyplot.title", "sklearn.metrics.classification_report", "pandas.DataFrame", "matplotlib.pyplot.savefig", "numpy.argmax", "numpy.column_stack", "scipy.cluster.hierarchy.dendrogram", "numpy.load", "sklearn.cluster.AgglomerativeClustering", "numpy.array", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
edumur/Qcodes
[ "ac262035c299a872995cecd210f0e84b0b85d751", "478af6e746f918a9e1dded7ec626484987f894d2" ]
[ "qcodes/instrument_drivers/rigol/DS4000.py", "qcodes/data/hdf5_format.py" ]
[ "import logging\nimport re\nimport time\nimport warnings\nfrom collections import namedtuple\nfrom typing import Any\n\nimport numpy as np\nfrom packaging import version\n\nfrom qcodes import VisaInstrument\nfrom qcodes import validators as vals\nfrom qcodes.instrument.channel import ChannelList, InstrumentChannel\nfrom qcodes.instrument.parameter import ArrayParameter, ParamRawDataType\nfrom qcodes.utils.validators import Bool, Ints\n\nlog = logging.getLogger(__name__)\n\nclass TraceNotReady(Exception):\n pass\n\n\nclass ScopeArray(ArrayParameter):\n def __init__(\n self,\n name: str,\n instrument: \"RigolDS4000Channel\",\n channel: int,\n raw: bool = False,\n ):\n super().__init__(\n name=name,\n shape=(1400,),\n label=\"Voltage\",\n unit=\"V\",\n setpoint_names=(\"Time\",),\n setpoint_labels=(\"Time\",),\n setpoint_units=(\"s\",),\n docstring=\"holds an array from scope\",\n instrument=instrument,\n )\n self.channel = channel\n self.raw = raw\n self.max_read_step = 50\n self.trace_ready = False\n\n def prepare_curvedata(self) -> None:\n \"\"\"\n Prepare the scope for returning curve data\n \"\"\"\n assert isinstance(self.instrument, RigolDS4000Channel)\n if self.raw:\n self.instrument.write(':STOP') # Stop acquisition\n self.instrument.write(':WAVeform:MODE RAW') # Set RAW mode\n else:\n self.instrument.write(':WAVeform:MODE NORM') # Set normal mode\n\n self.get_preamble()\n p = self.preamble\n\n # Generate time axis data\n xdata = np.linspace(p.xorigin, p.xorigin + p.xincrement * p.points, p.points)\n self.setpoints = (tuple(xdata),)\n self.shape = (p.points,)\n\n self.trace_ready = True\n\n def get_raw(self) -> ParamRawDataType:\n assert isinstance(self.instrument, RigolDS4000Channel)\n assert isinstance(self.root_instrument, DS4000)\n if not self.trace_ready:\n raise TraceNotReady('Please run prepare_curvedata to prepare '\n 'the scope for giving a trace.')\n else:\n self.trace_ready = False\n\n # Set the data type for waveforms to \"BYTE\"\n self.instrument.write(':WAVeform:FORMat BYTE')\n # Set read channel\n self.instrument.write(f':WAVeform:SOURce CHAN{self.channel}')\n\n data_bin = bytearray()\n if self.raw:\n log.info(\n 'Readout of raw waveform started, %g points', self.shape[0]\n )\n # Ask for the right number of points\n self.instrument.write(f':WAVeform:POINts {self.shape[0]}')\n # Resets the waveform data reading\n self.instrument.write(':WAVeform:RESet')\n # Starts the waveform data reading\n self.instrument.write(':WAVeform:BEGin')\n\n for i in range(self.max_read_step):\n status = self.instrument.ask(':WAVeform:STATus?').split(',')[0]\n\n # Ask and retrieve waveform data\n # It uses .read_raw() to get a byte\n # string since our data is binary\n self.instrument.write(':WAVeform:DATA?')\n data_chunk = self.root_instrument.visa_handle.read_raw()\n data_chuck = self._validate_strip_block(data_chunk)\n data_bin.extend(data_chuck)\n\n if status == 'IDLE':\n self.instrument.write(':WAVeform:END')\n break\n else:\n # Wait some time to have the buffer re-filled\n time.sleep(0.3)\n log.info(\n 'chucks read: %d, last chuck points: '\n '%g, total read size: %g',\n i, len(data_chuck), len(data_bin)\n )\n else:\n raise ValueError('Communication error')\n else:\n # Ask and retrieve waveform data\n # It uses .read_raw() to get a byte string since our data is binary\n log.info(\n 'Readout of display waveform started, %d points',\n self.shape[0]\n )\n self.instrument.write(':WAVeform:DATA?') # Query data\n data_chunk = self.root_instrument.visa_handle.read_raw()\n data_bin.extend(self._validate_strip_block(data_chunk))\n\n log.info('Readout ended, total read size: %g', len(data_bin))\n\n log.info('Data conversion')\n # Convert data to byte array\n data_raw = np.frombuffer(data_bin, dtype=np.uint8).astype(float)\n\n # Convert byte array to real data\n p = self.preamble\n data = (data_raw - p.yreference - p.yorigin) * p.yincrement\n log.info('Data conversion done')\n\n return data\n\n @staticmethod\n def _validate_strip_block(block: bytes) -> bytes:\n \"\"\"\n Given a block of raw data from the instrument, validate and\n then strip the header with\n size information. Raise ValueError if the sizes don't match.\n\n Args:\n block: The data block\n Returns:\n The stripped data\n \"\"\"\n # Validate header\n header = block[:11].decode('ascii')\n match = re.match(r'#9(\\d{9})', header)\n if match:\n size = int(match[1])\n block_nh = block[11:] # Strip header\n block_nh = block_nh.strip() # Strip \\n\n\n if size == len(block_nh):\n return block_nh\n\n raise ValueError('Malformed data')\n\n def get_preamble(self) -> None:\n assert isinstance(self.instrument, RigolDS4000Channel)\n preamble_nt = namedtuple('preamble_nt', [\"format\", \"mode\", \"points\", \"count\", \"xincrement\", \"xorigin\",\n \"xreference\", \"yincrement\", \"yorigin\", \"yreference\"])\n conv = lambda x: int(x) if x.isdigit() else float(x)\n\n preamble_raw = self.instrument.ask(':WAVeform:PREamble?')\n preamble_num = [conv(x) for x in preamble_raw.strip().split(',')]\n self.preamble = preamble_nt(*preamble_num)\n\n\nclass RigolDS4000Channel(InstrumentChannel):\n\n def __init__(self, parent: \"DS4000\",\n name: str,\n channel: int):\n super().__init__(parent, name)\n\n self.add_parameter(\"amplitude\",\n get_cmd=f\":MEASure:VAMP? chan{channel}\",\n get_parser = float\n )\n self.add_parameter(\"vertical_scale\",\n get_cmd=f\":CHANnel{channel}:SCALe?\",\n set_cmd=\":CHANnel{}:SCALe {}\".format(channel, \"{}\"),\n get_parser=float\n )\n\n # Return the waveform displayed on the screen\n self.add_parameter('curvedata',\n channel=channel,\n parameter_class=ScopeArray,\n raw=False\n )\n\n # Return the waveform in the internal memory\n self.add_parameter('curvedata_raw',\n channel=channel,\n parameter_class=ScopeArray,\n raw=True\n )\n\n\nclass DS4000(VisaInstrument):\n \"\"\"\n This is the QCoDeS driver for the Rigol DS4000 series oscilloscopes.\n \"\"\"\n\n def __init__(\n self,\n name: str,\n address: str,\n timeout: float = 20,\n **kwargs: Any\n ):\n \"\"\"\n Initialises the DS4000.\n\n Args:\n name: Name of the instrument used by QCoDeS\n address: Instrument address as used by VISA\n timeout: visa timeout, in secs. long default (180)\n to accommodate large waveforms\n \"\"\"\n\n # Init VisaInstrument. device_clear MUST NOT be issued, otherwise communications hangs\n # due a bug in firmware\n super().__init__(name, address, device_clear=False, timeout=timeout, **kwargs)\n self.connect_message()\n\n self._check_firmware_version()\n\n # functions\n self.add_function('run',\n call_cmd=':RUN',\n docstring='Start acquisition')\n self.add_function('stop',\n call_cmd=':STOP',\n docstring='Stop acquisition')\n self.add_function('single',\n call_cmd=':SINGle',\n docstring='Single trace acquisition')\n self.add_function('force_trigger',\n call_cmd='TFORce',\n docstring='Force trigger event')\n self.add_function(\"auto_scale\",\n call_cmd=\":AUToscale\",\n docstring=\"Perform autoscale\")\n\n # general parameters\n self.add_parameter('trigger_type',\n label='Type of the trigger',\n get_cmd=':TRIGger:MODE?',\n set_cmd=':TRIGger:MODE {}',\n vals=vals.Enum('EDGE', 'PULS', 'RUNT', 'NEDG',\n 'SLOP', 'VID', 'PATT', 'RS232',\n 'IIC', 'SPI', 'CAN', 'FLEX', 'USB'))\n self.add_parameter('trigger_mode',\n label='Mode of the trigger',\n get_cmd=':TRIGger:SWEep?',\n set_cmd=':TRIGger:SWEep {}',\n vals=vals.Enum('AUTO', 'NORM', 'SING'))\n self.add_parameter(\"time_base\",\n label=\"Horizontal time base\",\n get_cmd=\":TIMebase:MAIN:SCALe?\",\n set_cmd=\":TIMebase:MAIN:SCALe {}\",\n get_parser=float,\n unit=\"s/div\")\n self.add_parameter(\"sample_point_count\",\n label=\"Number of the waveform points\",\n get_cmd=\":WAVeform:POINts?\",\n set_cmd=\":WAVeform:POINts {}\",\n get_parser=int,\n vals=Ints(min_value=1))\n self.add_parameter(\"enable_auto_scale\",\n label=\"Enable or disable autoscale\",\n get_cmd=\":SYSTem:AUToscale?\",\n set_cmd=\":SYSTem:AUToscale {}\",\n get_parser=bool,\n vals=Bool())\n\n channels = ChannelList(self, \"Channels\", RigolDS4000Channel, snapshotable=False)\n\n for channel_number in range(1, 5):\n channel = RigolDS4000Channel(self, f\"ch{channel_number}\", channel_number)\n channels.append(channel)\n\n self.add_submodule(\"channels\", channels.to_channel_tuple())\n\n def _check_firmware_version(self) -> None:\n #Require version 00.02.03\n\n idn = self.get_idn()\n verstr = idn[\"firmware\"]\n if verstr is None:\n raise RuntimeError(\"Could not determine firmware version of DS4000.\")\n ver = version.parse(verstr)\n if ver < version.parse(\"00.02.03\"):\n warnings.warn(\n \"Firmware version should be at least 00.02.03,\"\n \"data transfer may not work correctly\"\n )\n", "import json\nimport logging\nimport os\nfrom typing import TYPE_CHECKING\n\nimport h5py\nimport numpy as np\n\nimport qcodes as qc\n\nfrom .data_array import DataArray\nfrom .format import Formatter\n\nif TYPE_CHECKING:\n from .data_set import DataSet\n\nclass HDF5Format(Formatter):\n \"\"\"\n HDF5 formatter for saving qcodes datasets.\n\n Capable of storing (write) and recovering (read) qcodes datasets.\n\n \"\"\"\n\n _format_tag = 'hdf5'\n\n def close_file(self, data_set: 'DataSet'):\n \"\"\"\n Closes the hdf5 file open in the dataset.\n\n Args:\n data_set: DataSet object\n \"\"\"\n if hasattr(data_set, '_h5_base_group'):\n data_set._h5_base_group.close()\n # Removes reference to closed file\n del data_set._h5_base_group\n else:\n logging.warning(\n 'Cannot close file, data_set has no open hdf5 file')\n\n def _create_file(self, filepath):\n \"\"\"\n creates a hdf5 file (data_object) at a location specified by\n filepath\n \"\"\"\n folder, _filename = os.path.split(filepath)\n if not os.path.isdir(folder):\n os.makedirs(folder)\n file = h5py.File(filepath, 'a')\n return file\n\n def _open_file(self, data_set, location=None):\n if location is None:\n location = data_set.location\n filepath = self._filepath_from_location(location,\n io_manager=data_set.io)\n data_set._h5_base_group = h5py.File(filepath, 'r+')\n\n def read(self, data_set: 'DataSet', location=None):\n \"\"\"\n Reads an hdf5 file specified by location into a data_set object.\n If no data_set is provided will create an empty data_set to read into.\n\n\n Args:\n data_set: the data to read into. Should already have\n attributes ``io`` (an io manager), ``location`` (string),\n and ``arrays`` (dict of ``{array_id: array}``, can be empty\n or can already have some or all of the arrays present, they\n expect to be overwritten)\n location (None or str): Location to write the data. If no location\n is provided will use the location specified in the dataset.\n \"\"\"\n def decode_bytes_if_needed(s):\n \"\"\"\n h5py 2 stores strings encoded as bytestrings\n h5py 3 fixes this and stores them as regular utf8 strings\n\n This is a simple wrapper to always convert to regular strings\n \"\"\"\n try:\n s = s.decode()\n except AttributeError:\n pass\n return s\n\n self._open_file(data_set, location)\n\n if '__format_tag' in data_set._h5_base_group.attrs:\n format_tag = data_set._h5_base_group.attrs['__format_tag']\n if format_tag != self._format_tag:\n raise Exception('format tag %s does not match tag %s of file %s' %\n (format_tag, self._format_tag, location))\n\n for i, array_id in enumerate(\n data_set._h5_base_group['Data Arrays'].keys()):\n # Decoding string is needed because of h5py/issues/379\n name = array_id # will be overwritten if not in file\n dat_arr = data_set._h5_base_group['Data Arrays'][array_id]\n\n # write ensures these attributes always exist\n name = decode_bytes_if_needed(dat_arr.attrs['name'])\n label = decode_bytes_if_needed(dat_arr.attrs['label'])\n\n # get unit from units if no unit field, for backward compatibility\n if 'unit' in dat_arr.attrs:\n unit = decode_bytes_if_needed(dat_arr.attrs['unit'])\n else:\n unit = decode_bytes_if_needed(dat_arr.attrs['units'])\n\n is_setpoint_str = decode_bytes_if_needed(dat_arr.attrs['is_setpoint'])\n is_setpoint = str_to_bool(is_setpoint_str)\n # if not is_setpoint:\n set_arrays = dat_arr.attrs['set_arrays']\n set_arrays = [decode_bytes_if_needed(s) for s in set_arrays]\n # else:\n # set_arrays = ()\n vals = dat_arr[:, 0]\n if 'shape' in dat_arr.attrs.keys():\n # extend with NaN if needed\n esize = np.prod(dat_arr.attrs['shape'])\n vals = np.append(vals, [np.nan] * (esize - vals.size))\n vals = vals.reshape(dat_arr.attrs['shape'])\n if array_id not in data_set.arrays.keys(): # create new array\n d_array = DataArray(\n name=name, array_id=array_id, label=label, parameter=None,\n unit=unit,\n is_setpoint=is_setpoint, set_arrays=(),\n preset_data=vals)\n data_set.add_array(d_array)\n else: # update existing array with extracted values\n d_array = data_set.arrays[array_id]\n d_array.name = name\n d_array.label = label\n d_array.unit = unit\n d_array.is_setpoint = is_setpoint\n d_array.ndarray = vals\n d_array.shape = dat_arr.attrs['shape']\n # needed because I cannot add set_arrays at this point\n data_set.arrays[array_id]._sa_array_ids = set_arrays\n\n # Add copy/ref of setarrays (not array id only)\n # Note, this is not pretty but a result of how the dataset works\n for array_id, d_array in data_set.arrays.items():\n for sa_id in d_array._sa_array_ids:\n d_array.set_arrays += (data_set.arrays[sa_id], )\n data_set = self.read_metadata(data_set)\n return data_set\n\n def _filepath_from_location(self, location, io_manager):\n filename = os.path.split(location)[-1]\n filepath = io_manager.to_path(location +\n f'/{filename}.hdf5')\n return filepath\n\n def _create_data_object(self, data_set, io_manager=None,\n location=None):\n # Create the file if it is not there yet\n if io_manager is None:\n io_manager = data_set.io\n if location is None:\n location = data_set.location\n filepath = self._filepath_from_location(location, io_manager)\n # note that this creates an hdf5 file in a folder with the same\n # name. This is useful for saving e.g. images in the same folder\n # I think this is a sane default (MAR).\n data_set._h5_base_group = self._create_file(filepath)\n data_set._h5_base_group.attrs[\"__qcodes_version\"] = qc.__version__\n data_set._h5_base_group.attrs[\"__format_tag\"] = self._format_tag\n\n return data_set._h5_base_group\n\n def write(self, data_set, io_manager=None, location=None,\n force_write=False, flush=True, write_metadata=True,\n only_complete=False):\n \"\"\"\n Writes a data_set to an hdf5 file.\n\n Args:\n data_set: qcodes data_set to write to hdf5 file\n io_manager: io_manger used for providing path\n location: location can be used to specify custom location\n force_write (bool): if True creates a new file to write to\n flush (bool) : whether to flush after writing, can be disabled\n for testing or performance reasons\n write_metadata (bool): If True write the dataset metadata to disk\n only_complete (bool): Not used by this formatter, but must be\n included in the call signature to avoid an \"unexpected\n keyword argument\" TypeError.\n\n N.B. It is recommended to close the file after writing, this can be\n done by calling ``HDF5Format.close_file(data_set)`` or\n ``data_set.finalize()`` if the data_set formatter is set to an\n hdf5 formatter. Note that this is not required if the dataset\n is created from a Loop as this includes a data_set.finalize()\n statement.\n\n The write function consists of two parts, writing DataArrays and\n writing metadata.\n\n - The main part of write consists of writing and resizing arrays,\n the resizing providing support for incremental writes.\n\n - write_metadata is called at the end of write and dumps a\n dictionary to an hdf5 file. If there already is metadata it will\n delete this and overwrite it with current metadata.\n\n \"\"\"\n if not hasattr(data_set, '_h5_base_group') or force_write:\n data_set._h5_base_group = self._create_data_object(\n data_set, io_manager, location)\n\n data_name = 'Data Arrays'\n\n if data_name not in data_set._h5_base_group.keys():\n arr_group = data_set._h5_base_group.create_group(data_name)\n else:\n arr_group = data_set._h5_base_group[data_name]\n\n for array_id in data_set.arrays.keys():\n if array_id not in arr_group.keys() or force_write:\n self._create_dataarray_dset(array=data_set.arrays[array_id],\n group=arr_group)\n dset = arr_group[array_id]\n # Resize the dataset and add the new values\n\n # dataset refers to the hdf5 dataset here\n datasetshape = dset.shape\n old_dlen = datasetshape[0]\n x = data_set.arrays[array_id]\n try:\n # get latest NaN element\n new_dlen = (~np.isnan(x)).flatten().nonzero()[0][-1] + 1\n except IndexError:\n new_dlen = old_dlen\n\n new_datasetshape = (new_dlen,\n datasetshape[1])\n dset.resize(new_datasetshape)\n new_data_shape = (new_dlen - old_dlen, datasetshape[1])\n dset[old_dlen:new_dlen] = x[old_dlen:new_dlen].reshape(\n new_data_shape)\n # allow resizing extracted data, here so it gets written for\n # incremental writes aswell\n dset.attrs['shape'] = x.shape\n if write_metadata:\n self.write_metadata(\n data_set, io_manager=io_manager, location=location)\n\n # flush ensures buffers are written to disk\n # (useful for ensuring openable by other files)\n if flush:\n data_set._h5_base_group.file.flush()\n\n def _create_dataarray_dset(self, array, group):\n \"\"\"\n input arguments\n array: Dataset data array\n group: group in the hdf5 file where the dset will be created\n\n creates a hdf5 datasaset that represents the data array.\n \"\"\"\n # Check for empty meta attributes, use array_id if name and/or label\n # is not specified\n if array.label is not None:\n label = array.label\n else:\n label = array.array_id\n\n if array.name is not None:\n name = array.name\n else:\n name = array.array_id\n\n # Create the hdf5 dataset\n dset = group.create_dataset(\n array.array_id, (0, 1),\n maxshape=(None, 1))\n dset.attrs['label'] = _encode_to_utf8(str(label))\n dset.attrs['name'] = _encode_to_utf8(str(name))\n dset.attrs['unit'] = _encode_to_utf8(str(array.unit or ''))\n dset.attrs['is_setpoint'] = _encode_to_utf8(str(array.is_setpoint))\n\n set_arrays = []\n # list will remain empty if array does not have set_array\n for i in range(len(array.set_arrays)):\n set_arrays += [_encode_to_utf8(\n str(array.set_arrays[i].array_id))]\n dset.attrs['set_arrays'] = set_arrays\n\n return dset\n\n def write_metadata(self, data_set, io_manager=None, location=None, read_first=True, **kwargs):\n \"\"\"\n Writes metadata of dataset to file using write_dict_to_hdf5 method\n\n Note that io and location are arguments that are only here because\n of backwards compatibility with the loop.\n This formatter uses io and location as specified for the main\n dataset.\n The read_first argument is ignored.\n \"\"\"\n if not hasattr(data_set, '_h5_base_group'):\n # added here because loop writes metadata before data itself\n data_set._h5_base_group = self._create_data_object(data_set)\n if 'metadata' in data_set._h5_base_group.keys():\n del data_set._h5_base_group['metadata']\n metadata_group = data_set._h5_base_group.create_group('metadata')\n self.write_dict_to_hdf5(data_set.metadata, metadata_group)\n\n # flush ensures buffers are written to disk\n # (useful for ensuring openable by other files)\n data_set._h5_base_group.file.flush()\n\n def _read_list_group(self, entry_point, list_type):\n d = {}\n self.read_dict_from_hdf5(data_dict=d,\n h5_group=entry_point[list_type])\n\n if list_type == 'tuple':\n item = tuple(d[k] for k in sorted(d.keys()))\n elif list_type == 'list':\n item = [d[k] for k in sorted(d.keys())]\n else:\n raise Exception('type %s not supported' % list_type)\n\n return item\n\n def _write_list_group(self, key, item, entry_point, list_type):\n entry_point.create_group(key)\n group_attrs = entry_point[key].attrs\n group_attrs['list_type'] = list_type\n\n if list_type == 'tuple' or list_type == 'list':\n item = {str(v[0]): v[1] for v in enumerate(item)}\n else:\n raise Exception('type %s not supported' % type(item))\n\n entry_point[key].create_group(list_type)\n self.write_dict_to_hdf5(\n data_dict=item,\n entry_point=entry_point[key][list_type])\n\n def write_dict_to_hdf5(self, data_dict, entry_point):\n \"\"\" Write a (nested) dictionary to HDF5\n\n Args:\n data_dict (dict): Dicionary to be written\n entry_point (object): Object to write to\n \"\"\"\n for key, item in data_dict.items():\n if isinstance(key, (float, int)):\n key = '__' + str(type(key)) + '__' + str(key)\n\n if isinstance(item, (str, bool, float, int)):\n entry_point.attrs[key] = item\n elif isinstance(item, np.ndarray):\n entry_point.create_dataset(key, data=item)\n elif isinstance(item, (np.int32, np.int64)):\n entry_point.attrs[key] = int(item)\n elif item is None:\n # as h5py does not support saving None as attribute\n # I create special string, note that this can create\n # unexpected behaviour if someone saves a string with this name\n entry_point.attrs[key] = 'NoneType:__None__'\n elif isinstance(item, dict):\n entry_point.create_group(key)\n self.write_dict_to_hdf5(data_dict=item,\n entry_point=entry_point[key])\n elif isinstance(item, tuple):\n self._write_list_group(key, item, entry_point, 'tuple')\n elif isinstance(item, list):\n if len(item) > 0:\n elt_type = type(item[0])\n if all(isinstance(x, elt_type) for x in item):\n if isinstance(item[0], (int, float,\n np.int32, np.int64)):\n\n entry_point.create_dataset(key,\n data=np.array(item))\n entry_point[key].attrs['list_type'] = 'array'\n elif isinstance(item[0], str):\n dt = h5py.special_dtype(vlen=str)\n data = np.array(item)\n data = data.reshape((-1, 1))\n ds = entry_point.create_dataset(\n key, (len(data), 1), dtype=dt)\n ds[:] = data\n elif isinstance(item[0], dict):\n entry_point.create_group(key)\n group_attrs = entry_point[key].attrs\n group_attrs['list_type'] = 'dict'\n base_list_key = 'list_idx_{}'\n group_attrs['base_list_key'] = base_list_key\n group_attrs['list_length'] = len(item)\n for i, list_item in enumerate(item):\n list_item_grp = entry_point[key].create_group(\n base_list_key.format(i))\n self.write_dict_to_hdf5(\n data_dict=list_item,\n entry_point=list_item_grp)\n else:\n logging.warning(\n 'List of type \"{}\" for \"{}\":\"{}\" not '\n 'supported, storing as string'.format(\n elt_type, key, item))\n entry_point.attrs[key] = str(item)\n else:\n self._write_list_group(key, item, entry_point, 'list')\n else:\n # as h5py does not support saving None as attribute\n entry_point.attrs[key] = 'NoneType:__emptylist__'\n\n else:\n logging.warning(\n 'Type \"{}\" for \"{}\":\"{}\" not supported, '\n 'storing as string'.format(type(item), key, item))\n entry_point.attrs[key] = str(item)\n\n def read_metadata(self, data_set: 'DataSet'):\n \"\"\"\n Reads in the metadata, this is also called at the end of a read\n statement so there should be no need to call this explicitly.\n\n Args:\n data_set: Dataset object to read the metadata into\n \"\"\"\n # checks if there is an open file in the dataset as load_data does\n # reading of metadata before reading the complete dataset\n if not hasattr(self, '_h5_base_group'):\n self._open_file(data_set)\n if 'metadata' in data_set._h5_base_group.keys():\n metadata_group = data_set._h5_base_group['metadata']\n self.read_dict_from_hdf5(data_set.metadata, metadata_group)\n return data_set\n\n def read_dict_from_hdf5(self, data_dict, h5_group):\n \"\"\" Read a dictionary from HDF5\n\n Args:\n data_dict (dict): Dataset to read from\n h5_group (object): HDF5 object to read from\n \"\"\"\n\n if 'list_type' not in h5_group.attrs:\n for key, item in h5_group.items():\n if isinstance(item, h5py.Group):\n data_dict[key] = {}\n data_dict[key] = self.read_dict_from_hdf5(data_dict[key],\n item)\n else: # item either a group or a dataset\n if 'list_type' not in item.attrs:\n data_dict[key] = item[...]\n else:\n data_dict[key] = list(item[...])\n for key, item in h5_group.attrs.items():\n if type(item) is str:\n # Extracts \"None\" as an exception as h5py does not support\n # storing None, nested if statement to avoid elementwise\n # comparison warning\n if item == 'NoneType:__None__':\n item = None\n elif item == 'NoneType:__emptylist__':\n item = []\n else:\n pass\n data_dict[key] = item\n elif h5_group.attrs['list_type'] == 'tuple':\n data_dict = self._read_list_group(h5_group, 'tuple')\n elif h5_group.attrs['list_type'] == 'list':\n data_dict = self._read_list_group(h5_group, 'list')\n elif h5_group.attrs['list_type'] == 'dict':\n # preallocate empty list\n list_to_be_filled = [None] * h5_group.attrs['list_length']\n base_list_key = h5_group.attrs['base_list_key']\n for i in range(h5_group.attrs['list_length']):\n list_to_be_filled[i] = {}\n self.read_dict_from_hdf5(\n data_dict=list_to_be_filled[i],\n h5_group=h5_group[base_list_key.format(i)])\n\n # THe error is here!, extract correctly but not adding to\n # data dict correctly\n data_dict = list_to_be_filled\n else:\n raise NotImplementedError('cannot read \"list_type\":\"{}\"'.format(\n h5_group.attrs['list_type']))\n return data_dict\n\n\ndef _encode_to_utf8(s):\n \"\"\"\n Required because h5py does not support python3 strings\n converts byte type to string\n \"\"\"\n return s.encode('utf-8')\n\n\ndef str_to_bool(s):\n if s == 'True':\n return True\n elif s == 'False':\n return False\n else:\n raise ValueError(f\"Cannot covert {s} to a bool\")\n\n\nfrom qcodes.utils.helpers import NumpyJSONEncoder, deep_update\n\n\nclass HDF5FormatMetadata(HDF5Format):\n\n _format_tag = 'hdf5-json'\n metadata_file = 'snapshot.json'\n\n def write_metadata(self, data_set: 'DataSet', io_manager=None, location=None, read_first=False, **kwargs):\n \"\"\"\n Write all metadata in this DataSet to storage.\n\n Args:\n data_set: the data we're storing\n\n io_manager (io_manager): the base location to write to\n\n location (str): the file location within io_manager\n\n read_first (Optional[bool]): read previously saved metadata before\n writing? The current metadata will still be the used if\n there are changes, but if the saved metadata has information\n not present in the current metadata, it will be retained.\n Default True.\n kwargs (dict): From the dicionary the key sort_keys is extracted (default value: False). If True, then the\n keys of the metadata will be stored sorted in the json file. Note: sorting is only possible if\n the keys of the metadata dictionary can be compared.\n\n \"\"\"\n sort_keys = kwargs.get('sort_keys', False)\n\n # this statement is here to make the linter happy\n if io_manager is None or location is None:\n raise Exception('please set io_manager and location arguments ')\n\n if read_first:\n # In case the saved file has more metadata than we have here,\n # read it in first. But any changes to the in-memory copy should\n # override the saved file data.\n memory_metadata = data_set.metadata\n data_set.metadata = {}\n self.read_metadata(data_set)\n deep_update(data_set.metadata, memory_metadata)\n\n fn = io_manager.join(location, self.metadata_file)\n with io_manager.open(fn, 'w', encoding='utf8') as snap_file:\n json.dump(data_set.metadata, snap_file, sort_keys=sort_keys,\n indent=4, ensure_ascii=False, cls=NumpyJSONEncoder)\n\n def read_metadata(self, data_set):\n io_manager = data_set.io\n location = data_set.location\n fn = io_manager.join(location, self.metadata_file)\n if io_manager.list(fn):\n with io_manager.open(fn, 'r') as snap_file:\n metadata = json.load(snap_file)\n data_set.metadata.update(metadata)\n" ]
[ [ "numpy.frombuffer", "numpy.linspace" ], [ "numpy.isnan", "numpy.append", "numpy.array", "numpy.prod" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
OneraHub/WhatsOpt-Tutorial
[ "12d14e04defa1d44e43c47486801af4bf1284987", "12d14e04defa1d44e43c47486801af4bf1284987" ]
[ "examples/ssbj/mda/propulsion_base.py", "examples/mod_branin/run_screening.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n propulsion_base.py generated by WhatsOpt 1.10.4\n\"\"\"\n# DO NOT EDIT unless you know what you are doing\n# whatsopt_url: \n# analysis_id: 3\n\n\nimport numpy as np\nfrom numpy import nan\nfrom os import path\nfrom importlib import import_module\nfrom yaml import load, FullLoader\nfrom openmdao.api import ExplicitComponent\n\nclass PropulsionBase(ExplicitComponent):\n \"\"\" An OpenMDAO base component to encapsulate Propulsion discipline \"\"\"\n\n def __init__(self, **kwargs):\n super(PropulsionBase, self).__init__(**kwargs)\n self._impl = None\n dockconf = path.join(path.dirname(__file__), \".whatsopt_dock.yml\")\n if path.exists(dockconf):\n with open(dockconf) as dockfile:\n dock = load(dockfile, Loader=FullLoader)\n impl = dock.get(\"propulsion\")\n if impl:\n module = import_module(impl['module'])\n self._impl = getattr(module, impl['class'])()\n\n def setup(self):\n self.add_input('D', val=np.ones((1,)), desc='')\n self.add_input('x_pro', val=1.0, desc='')\n self.add_input('z', val=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0], desc='')\n\n self.add_output('DT', val=np.ones((1,)), desc='')\n\n self.add_output('ESF', val=np.ones((1,)), desc='')\n\n self.add_output('SFC', val=np.ones((1,)), desc='')\n\n self.add_output('Temp', val=np.ones((1,)), desc='')\n\n self.add_output('WE', val=np.ones((1,)), desc='')\n\n\n self.declare_partials('*', '*')\n\n ", "# -*- coding: utf-8 -*-\n\"\"\"\n run_screening.py generated by WhatsOpt. \n\"\"\"\n# DO NOT EDIT unless you know what you are doing\n# analysis_id: 49\n\nimport sys\nimport numpy as np\n# import matplotlib\n# matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom openmdao.api import Problem, SqliteRecorder, CaseReader\nfrom whatsopt.salib_doe_driver import SalibDoeDriver\nfrom SALib.analyze import morris as ma\nfrom SALib.plotting import morris as mp\nfrom mod_branin import ModBranin \n\nfrom optparse import OptionParser\nparser = OptionParser()\nparser.add_option(\"-b\", \"--batch\",\n action=\"store_true\", dest=\"batch\", default=False,\n help=\"do not plot anything\")\n(options, args) = parser.parse_args()\n\npb = Problem(ModBranin())\npb.driver = SalibDoeDriver(n_trajs=10, n_levels=4, grid_step_size=1)\ncase_recorder_filename = 'mod_branin_screening.sqlite' \nrecorder = SqliteRecorder(case_recorder_filename)\npb.driver.add_recorder(recorder)\npb.model.add_recorder(recorder)\npb.model.nonlinear_solver.add_recorder(recorder)\n\npb.model.add_design_var('x1', lower=-5, upper=10)\npb.model.add_design_var('x2', lower=0, upper=15)\npb.model.add_objective('f')\npb.model.add_constraint('g', upper=0.)\npb.setup() \npb.run_driver() \n\nif options.batch:\n exit(0)\nreader = CaseReader(case_recorder_filename)\ncases = reader.system_cases.list_cases()\nn = len(cases)\ndata = {'inputs': {}, 'outputs': {} }\ndata['inputs']['x1'] = np.zeros((n,)+(1,))\ndata['inputs']['x2'] = np.zeros((n,)+(1,))\ndata['outputs']['f'] = np.zeros((n,)+(1,))\ndata['outputs']['g'] = np.zeros((n,)+(1,))\n\nfor i, case_id in enumerate(cases):\n case = reader.system_cases.get_case(case_id)\n data['inputs']['x1'][i,:] = case.inputs['x1']\n data['inputs']['x2'][i,:] = case.inputs['x2']\n data['outputs']['f'][i,:] = case.outputs['f']\n data['outputs']['g'][i,:] = case.outputs['g']\n\nsalib_pb = pb.driver.get_salib_problem()\ninputs = pb.driver.get_cases()\n\nprint('*** Output: f')\noutput = data['outputs']['f'].reshape((-1,))\nSi = ma.analyze(salib_pb, inputs, output, print_to_console=True)\nfig, (ax1, ax2) = plt.subplots(1,2)\nfig.suptitle('f '+'sensitivity')\nmp.horizontal_bar_plot(ax1, Si, {})\nmp.covariance_plot(ax2, Si, {})\n\nprint('*** Output: g')\noutput = data['outputs']['g'].reshape((-1,))\nSi = ma.analyze(salib_pb, inputs, output, print_to_console=True)\nfig, (ax1, ax2) = plt.subplots(1,2)\nfig.suptitle('g '+'sensitivity')\nmp.horizontal_bar_plot(ax1, Si, {})\nmp.covariance_plot(ax2, Si, {})\n\n\nplt.show()\n" ]
[ [ "numpy.ones" ], [ "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.subplots" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bendikbo/SSED
[ "fdd0e74d419687bc8cba65341d7248ca6ccd1a4e" ]
[ "train.py" ]
[ "import argparse\nimport logging\nimport torch\nimport pathlib\nimport numpy as np\nfrom classifier.config.defaults import cfg\nfrom classifier.data.build import make_data_loaders\nfrom classifier.logger import setup_logger\nfrom classifier.trainer import Trainer\nfrom classifier.models import build_model\nfrom classifier.utils import to_cuda\n\nnp.random.seed(0)\ntorch.manual_seed(0)\n\n\n\ndef start_train(cfg):\n logger = logging.getLogger('classification.trainer')\n model = build_model(cfg)\n model = to_cuda(model)\n dataloaders = make_data_loaders(cfg)\n trainer = Trainer(\n cfg,\n model=model,\n dataloaders=dataloaders\n )\n trainer.train()\n return trainer.model\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(description='Single Record MultiLine Detector Training With PyTorch')\n parser.add_argument(\n \"config_file\",\n default=\"\",\n metavar=\"FILE\",\n help=\"path to config file\",\n type=str,\n )\n parser.add_argument(\n \"opts\",\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER,\n )\n return parser\n\n\ndef main():\n args = get_parser().parse_args()\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n output_dir = pathlib.Path(cfg.OUTPUT_DIR)\n output_dir.mkdir(exist_ok=True, parents=True)\n logger = setup_logger(\"Classifier\", output_dir)\n logger.info(args)\n logger.info(\"Loaded configuration file {}\".format(args.config_file))\n with open(args.config_file, \"r\") as cf:\n config_str = \"\\n\" + cf.read()\n logger.info(config_str)\n logger.info(\"Running with config:\\n{}\".format(cfg))\n model = start_train(cfg)\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.manual_seed", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
akshatabhat/TVQA
[ "85c50b26eb8941781dc4bb93bce61201aff4643d" ]
[ "model/bidaf.py" ]
[ "__author__ = \"Jie Lei\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n\nclass BidafAttn(nn.Module):\n \"\"\"from the BiDAF paper https://arxiv.org/abs/1611.01603.\n Implemented by @easonnie and @jayleicn\n \"\"\"\n def __init__(self, channel_size, method=\"original\", get_h=False):\n super(BidafAttn, self).__init__()\n \"\"\"\n This method do biDaf from s2 to s1:\n The return value will have the same size as s1.\n :param channel_size: Hidden size of the input\n \"\"\"\n self.method = method\n self.get_h = get_h\n if method == \"original\":\n self.mlp = nn.Linear(channel_size * 3, 1, bias=False)\n\n def similarity(self, s1, l1, s2, l2):\n \"\"\"\n :param s1: [B, t1, D]\n :param l1: [B]\n :param s2: [B, t2, D]\n :param l2: [B]\n :return:\n \"\"\"\n if self.method == \"original\":\n t1 = s1.size(1)\n t2 = s2.size(1)\n repeat_s1 = s1.unsqueeze(2).repeat(1, 1, t2, 1) # [B, T1, T2, D]\n repeat_s2 = s2.unsqueeze(1).repeat(1, t1, 1, 1) # [B, T1, T2, D]\n packed_s1_s2 = torch.cat([repeat_s1, repeat_s2, repeat_s1 * repeat_s2], dim=3) # [B, T1, T2, D*3]\n s = self.mlp(packed_s1_s2).squeeze() # s is the similarity matrix from biDAF paper. [B, T1, T2]\n elif self.method == \"dot\":\n s = torch.bmm(s1, s2.transpose(1, 2))\n\n s_mask = s.data.new(*s.size()).fill_(1).byte() # [B, T1, T2]\n # Init similarity mask using lengths\n for i, (l_1, l_2) in enumerate(zip(l1, l2)):\n s_mask[i][:l_1, :l_2] = 0\n\n s_mask = Variable(s_mask)\n s.data.masked_fill_(s_mask.data.byte(), -float(\"inf\"))\n return s\n\n @classmethod\n def get_u_tile(cls, s, s2):\n \"\"\"\n attended vectors of s2 for each word in s1,\n signify which words in s2 are most relevant to words in s1\n \"\"\"\n a_weight = F.softmax(s, dim=2) # [B, t1, t2]\n a_weight.data.masked_fill_(a_weight.data != a_weight.data, 0) # remove nan from softmax on -inf\n u_tile = torch.bmm(a_weight, s2) # [B, t1, t2] * [B, t2, D] -> [B, t1, D]\n return u_tile\n\n @classmethod\n def get_h_tile(cls, s, s1):\n \"\"\"\n attended vectors of s1\n which words in s1 is most similar to each words in s2\n \"\"\"\n t1 = s1.size(1)\n b_weight = F.softmax(torch.max(s, dim=2)[0], dim=-1).unsqueeze(1) # [b, t2]\n h_tile = torch.bmm(b_weight, s1).repeat(1, t1, 1) # repeat to match s1 # [B, t1, D]\n return h_tile\n\n def forward(self, s1, l1, s2, l2):\n s = self.similarity(s1, l1, s2, l2)\n u_tile = self.get_u_tile(s, s2)\n # h_tile = self.get_h_tile(s, s1)\n h_tile = self.get_h_tile(s, s1) if self.get_h else None\n return u_tile, h_tile\n # return u_tile\n\n" ]
[ [ "torch.nn.functional.softmax", "torch.max", "torch.cat", "torch.nn.Linear", "torch.bmm", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JiaHsin/ftf-re-api
[ "02e7ed5d0628a1d5a9aec3b37f6bcba93bf3284e" ]
[ "reporting_engine/transform_layer/tests/test_calc_new_families.py" ]
[ "from django.test import TestCase\nfrom django.db import connections\nimport pandas\nfrom pandas.testing import assert_frame_equal, assert_series_equal\nfrom transform_layer.services.data_service import DataService\nimport transform_layer.calculations as calc\n\nimport json\nimport math\nimport unittest\nimport os\nimport pyreadr\n\n#How 'off' the value returned by a data def can be before it is considered wrong\n#.005 = .5% of expected\nREL_TOL = .01\n\nbase_scope = {\n \"startDate\":\"01/01/2020\",\n \"endDate\":\"12/31/2020\",\n \"scope_type\": \"hierarchy\",\n \"scope_field\":\"loc_id\",\n \"scope_field_value\":6,\n \"control_type_name\":\"Is Grocery Service\"\n}\n\nTEST_DATA_SERVICE = DataService(base_scope)\n\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\nbase_families = pyreadr.read_r(os.path.join(__location__, './test_data/test_calc_new_families/base_families.rds'))[None]\nbase_members = pyreadr.read_r(os.path.join(__location__, './test_data/test_calc_new_families/base_members.rds'))[None]\nbase_services = pyreadr.read_r(os.path.join(__location__, './test_data/test_calc_new_families/base_services.rds'))[None]\n\n#substitue the call to TEST_DATA_SERVICE.get_data_for_definition with this\n#its the data that david used in his calculations\nBASE_DATA = [base_services, base_families, base_members]\n\nclass CalculationsTestCase(unittest.TestCase):\n #test for data def 32\n def test_get_new_families(self):\n expected = 6307\n data = BASE_DATA\n func = calc.data_calc_function_switcher[32]\n result = func(data)\n self.assertTrue(math.isclose(result, expected))\n\n #test for data def 33\n def test_get_new_members(self):\n expected = 20779\n data = BASE_DATA\n func = calc.data_calc_function_switcher[33]\n result = func(data)\n self.assertTrue(math.isclose(result, expected))\n\n #test for data def 34\n def test_get_new_members_to_old_families(self):\n expected = 19160\n data = BASE_DATA\n func = calc.data_calc_function_switcher[34]\n result = func(data)\n self.assertTrue(math.isclose(result, expected))\n\n #test for data def 35\n def test_get_services_to_new_families(self):\n expected = 22790\n data = BASE_DATA\n func = calc.data_calc_function_switcher[35]\n result = func(data)\n self.assertTrue(math.isclose(result, expected))\n\n #test for data def 36\n def test_get_families_first_service(self):\n expected = 6352\n data = BASE_DATA\n func = calc.data_calc_function_switcher[36]\n result = func(data)\n self.assertTrue(math.isclose(result, expected))\n\n #test for data def 37/38\n def test_get_new_families_freq_visits(self):\n expected = pandas.read_csv(\n os.path.join(__location__, './expected_results/results_new_fam_service_distribution.csv'),\n index_col = 'num_services'\n )\n #data = TEST_DATA_SERVICE.get_data_for_definition(38)\n data = BASE_DATA \n func = calc.data_calc_function_switcher[37]\n result = func(data)\n resultFrame = pandas.read_json(result)\n assert_frame_equal(resultFrame, expected, check_like = True)\n \n #test for data def 39\n def test_get_new_fam_household_composition(self):\n expected = {\n \"family_composition_type\": {\n \"0\":\"adults_and_children\",\n \"1\": \"adults_and_seniors\",\n \"2\": \"adults_only\",\n \"3\": \"adults_seniors_and_children\",\n \"4\": \"children_and_seniors\",\n \"5\": \"children_only\",\n \"6\": \"seniors_only\"\n },\n \"num_families\": {\n \"0\":2622,\n \"1\": 447,\n \"2\": 2467,\n \"3\": 297,\n \"4\": 36,\n \"5\": 16,\n \"6\": 422\n }\n }\n #data = TEST_DATA_SERVICE.get_data_for_definition(38)\n data = BASE_DATA \n func = calc.data_calc_function_switcher[39]\n result = func(data)\n resultDict = json.loads(result)\n self.maxDiff = None\n self.assertDictEqual(resultDict, expected)\n\n #test for data def 40\n def test_get_new_fam_composition_key_insight(self):\n expected = {\n \"has_child_senior\":3840,\n \"no_child_senior\":2467\n }\n #data = TEST_DATA_SERVICE.get_data_for_definition(38)\n data = BASE_DATA \n func = calc.data_calc_function_switcher[40]\n result = func(data)\n result = json.loads(result)\n self.maxDiff = None\n self.assertDictEqual(result, expected)\n\n #test for data def 41\n def test_get_new_fam_hh_size_dist_1_to_10(self):\n expected = pandas.read_csv(\n os.path.join(__location__, './expected_results/results_new_fam_hh_size_dist_1_to_10.csv'),\n index_col = 'index'\n )\n data = BASE_DATA \n func = calc.data_calc_function_switcher[41]\n result = func(data)\n resultFrame = pandas.read_json(result)\n assert_frame_equal(resultFrame, expected, check_like = True)\n\n #test for data def 42\n def test_get_new_fam_hh_size_dist_classic(self):\n expected = {\n '1 - 3':3965,\n '4 - 6':2040,\n '7+':302\n }\n expected = pandas.Series(data = expected)\n\n #data = TEST_DATA_SERVICE.get_data_for_definition(42)\n data = BASE_DATA\n\n func = calc.data_calc_function_switcher[42]\n result = func(data)\n resultDict = json.loads(result)\n resultFrame = pandas.Series(data = resultDict)\n assert_series_equal(resultFrame, expected)\n\n #test for data def 43\n def test_get_relationship_length_indv_mean(self):\n expected = 809.5147\n data = BASE_DATA\n\n func = calc.data_calc_function_switcher[43]\n result = func(data)\n self.assertTrue(math.isclose(round(result,4), expected))\n\n #test for data def 45\n def test_get_relationship_length_indv_mean(self):\n expected = 792.9765\n #data = TEST_DATA_SERVICE.get_data_for_definition(45)\n data = BASE_DATA\n\n func = calc.data_calc_function_switcher[45]\n result = func(data)\n self.assertTrue(math.isclose(round(result,4), expected))\n\nif __name__ == '__main__':\n unittest.main()" ]
[ [ "pandas.testing.assert_series_equal", "pandas.testing.assert_frame_equal", "pandas.Series", "pandas.read_json" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
sagieppel/Predicting-Material-properties-of-objects-and-liquids-inside-transparent-vessels-from-image
[ "54d9e2649dc8d24a55ad1d05d4e395b80c9c141c" ]
[ "TRAIN.py" ]
[ "# Train net Given an image of vessel and content and mask (region) of the vessel predict the material of the vessel content and the material of the vessel\n#...............................Imports..................................................................\nimport os\nimport numpy as np\nimport NetModel\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport Reader as DataReader\nimport Visuallization as vis\nimport cv2\n\n#################################Input paramaters#########################################################################################\n#.................................Main Input parametrs...........................................................................................\nDataFolder={} # Contain list of folders that will contain training data\nDataFolder[\"LiquidContent\"]=r\"Examples/Train//\"\n# TransProteusFolder[\"ObjectContent\"]=r\"TranProteus/Training/ObjectContent/\"\n# TransProteusFolder[\"ObjectContent2\"]=r\"TranProteus/Training/SingleObjectContent/\"\n# TransProteusFolder[\"LiquidContent\"]=r\"TranProteus/Training/LiquidContent/\"\n# TransProteusFolder[\"LiquidFlat\"]=r\"TranProteus/Training/FlatSurfaceLiquids/\n\nMinSize=280 # Min image dimension size (Height,width)\nMaxSize=1000# Max image dimension size (Height,width)\nMaxPixels=800*800*1.5# Max size of training batch in pixel, reduce to solve out of memory problems\nMaxBatchSize=6#MAx number images in a batch\nInputMaskType=\"VesselMask\" ##\"ContentMask\" # Type of input mask for the net (\"Vessel\"/\"Content\")\nTrained_model_path=\"\" # Path of trained model weights If you want to return to trained model, else should be =\"\"\nLearning_Rate=1e-5 # learning rate\nTrainedModelWeightDir=\"logs/\" # Folder where trained model weight and information will be stored\"\nWeight_Decay=1e-5# Weight for the weight decay loss function\nTrainLossTxtFile=TrainedModelWeightDir+\"TrainLoss.txt\" #Where train losses will be writen\nMAX_ITERATION = int(100000010) # Max number of training iteration\n##################################################################################################################################################\n# ------------------------------ Properties to predict------------------------------------------------\nOrderedPropertiesToPredict=[\"Transmission\",\"Base Color\",\"Metalic\",\"Transmission Roguhness\",\"Roughness\"] #List of property to predict in the right order Dictionary is orderless\nDimPropertiesToPredict={'Transmission':1,'Base Color':3,'Metalic':1,'Transmission Roguhness':1,'Roughness':1} # List of peoperties to predict and the vector size for each properry\nObjectsToPredict=[\"ContentMaterial\",\"VesselMaterial\"] # List of objects to predict properties of\n#******************************Create folder for statics file and weights*********************************************************************************************************************\n\nif not os.path.exists(TrainedModelWeightDir):\n os.mkdir(TrainedModelWeightDir)\n#=========================Load net weights from previous run (if training was interupted)====================================================================================================================\nInitStep=1\nif os.path.exists(TrainedModelWeightDir + \"/Defult.torch\"):\n Trained_model_path=TrainedModelWeightDir + \"/Defult.torch\"\nif os.path.exists(TrainedModelWeightDir+\"/Learning_Rate.npy\"):\n Learning_Rate=np.load(TrainedModelWeightDir+\"/Learning_Rate.npy\")\nif os.path.exists(TrainedModelWeightDir+\"/itr.npy\"): InitStep=int(np.load(TrainedModelWeightDir+\"/itr.npy\"))\n\n#---------------------Create and Initiate net and create optimizer------------------------------------------------------------------------------------\nNet=NetModel.Net(OrderedObjectsToPredict=ObjectsToPredict,OrderedPropertiesToPredict=OrderedPropertiesToPredict,DimPropertiesToPredict=DimPropertiesToPredict) # Create net and load pretrained\n\nif Trained_model_path!=\"\": # Optional initiate full net by loading a pretrained net\n Net.load_state_dict(torch.load(Trained_model_path))\nNet=Net.cuda()\n\n#------------------------------------Create optimizer-------------------------------------------------------------------------------------\noptimizer=torch.optim.Adam(params=Net.parameters(),lr=Learning_Rate,weight_decay=Weight_Decay) # Create adam optimizer\ntorch.save(Net.state_dict(), TrainedModelWeightDir + \"/\" + \"test\" + \".torch\")# Test saving the weight to see that all folders exists\n\n#----------------------------------------# Create Array of Readers for each input folder--------------------------------------------------------------------------------------------------------------\n\nReaders={} # Array of Readers for each input folder\nfor nm in DataFolder:\n Readers[nm]=DataReader.Reader(DataFolder[nm],DimPropertiesToPredict,MaxBatchSize,MinSize,MaxSize,MaxPixels,TrainingMode=True)\n#--------------------------- Create logs files for saving loss during training----------------------------------------------------------------------------------------------------------\n\nif not os.path.exists(TrainedModelWeightDir): os.makedirs(TrainedModelWeightDir) # Create folder for trained weight\nf = open(TrainLossTxtFile, \"w+\")# Training loss log file\nf.write(\"Iteration\\tloss\\t Learning Rate=\")\nf.close()\n#-------------------Create statics dictionary for keeping track of loss during training--------------------------------------------------------------------------------\nPrevAvgLoss=0\n\nAVGCatLoss={}\nfor nm in ObjectsToPredict:\n for ky in OrderedPropertiesToPredict:\n AVGCatLoss[nm+\"_\"+ky]=0\nAVGCatLoss[\"Total\"]=0\n##############################################################################################################################\n#..............Start Training loop: Main Training....................................................................\nprint(\"Start Training\")\nfor itr in range(InitStep,MAX_ITERATION): # Main training loop\n\n\n print(\"------------------------------\",itr,\"------------------------------------------------\")\n readertype=list(Readers)[np.random.randint(len(list(Readers)))] # Pick random reader (dataset)\n print(readertype)\n\n GTMaps, GTMaterials = Readers[readertype].LoadBatch() # Load training batch\n #################***************************************************************************************************\n # batchSize=GTMaps[\"VesselWithContentRGB\"].shape[0]\n # for i in range(batchSize):\n # for nm in GTMaps:\n #\n # print(nm, GTMaps[nm][i].max(),GTMaps[nm][i].min())\n # tmIm = GTMaps[nm][i].copy()\n # if GTMaps[nm][i].max()>255 or GTMaps[nm][i].min()<0 or np.ndim(GTMaps[nm][i])==2:\n # if tmIm.max()>tmIm.min():\n # tmIm[tmIm>1000]=0\n # tmIm = tmIm-tmIm.min()\n # tmIm = tmIm/tmIm.max()*255\n # print(nm,\"New\", tmIm.max(), tmIm.min())\n # if np.ndim(tmIm)==2: tmIm=cv2.cvtColor(tmIm, cv2.COLOR_GRAY2BGR)\n # vis.show(np.hstack([tmIm,GTMaps[\"VesselWithContentRGB\"][i].astype(np.uint8)]) ,nm+ \" Max=\" + str(GTMaps[nm][i].max()) + \" Min=\" + str(GTMaps[nm][i].min()))\n # #############*************************Run net and get prediction**********************************************************************\n\n Prd = Net.forward(Images=GTMaps[\"VesselWithContentRGB\"],ROIMask=GTMaps[InputMaskType]) # Run net inference and get prediction\n Net.zero_grad()\n print(\"Calculating loss \")\n # #**************************************Calculate Loss *************************************************************************************************************************\n TotalLoss=0 # Total loss for every object and property\n LossCat={} # Loss by class and object\n\n for nm in Prd: # Loss for every object\n for ky in Prd[nm]: # Loss for every property\n GTProperty=torch.autograd.Variable(torch.from_numpy(GTMaterials[nm][ky]).cuda(),requires_grad=False) # Convert GT property to pytorch\n GTPropertyExist=torch.autograd.Variable(torch.from_numpy(GTMaterials[nm][ky+\"_Exist\"]).cuda(), requires_grad=False) # List that define wether proprty exist for the case\n # LossCat[nm]=(torch.pow(GTProperty-Prd[nm],2).mean(1)*GTPropertyExist).mean()\n LossCat[nm+\"_\"+ky] = (torch.abs(GTProperty - Prd[nm][ky]).mean(1) * GTPropertyExist).mean() # L1 loss\n # if ky=='Base Color': print(nm,\" Prop exist\", GTPropertyExist.mean())\n TotalLoss+=LossCat[nm+\"_\"+ky]\n LossCat[\"Total\"]=TotalLoss\n\n#---------------Total Loss and running average loss----------------------------------------------------------------------------------------------------------\n print(\"Calculating Total Loss\")\n fr = 1 / np.min([itr - InitStep + 1, 2000])\n\n for nm in LossCat:\n if not nm in AVGCatLoss: AVGCatLoss[nm]=0\n if LossCat[nm]>0:\n AVGCatLoss[nm]=(1 - fr) * AVGCatLoss[nm] + fr * LossCat[nm].data.cpu().numpy() # Runnig average loss\n\n#-----------------------Apply back propogation---------------------------------------------------------------------------------------------------\n\n TotalLoss.backward() # Backpropogate loss\n optimizer.step() # Apply gradient descent change to weight\n\n############################################################################################################################3\n\n # Displat save and update learning rate\n\n#########################################################################################################################\n# --------------Save trained model------------------------------------------------------------------------------------------------------------------------------------------\n if itr % 300 == 0:# Temprorary save model weight\n print(\"Saving Model to file in \"+TrainedModelWeightDir+\"/Defult.torch\")\n torch.save(Net.state_dict(), TrainedModelWeightDir + \"/Defult.torch\")\n torch.save(Net.state_dict(), TrainedModelWeightDir + \"/DefultBack.torch\")\n print(\"model saved\")\n np.save(TrainedModelWeightDir+\"/Learning_Rate.npy\",Learning_Rate)\n np.save(TrainedModelWeightDir+\"/itr.npy\",itr)\n if itr % 60000 == 0 and itr>0: # permenantly save model weight\n print(\"Saving Model to file in \"+TrainedModelWeightDir+\"/\"+ str(itr) + \".torch\")\n torch.save(Net.state_dict(), TrainedModelWeightDir + \"/\" + str(itr) + \".torch\")\n print(\"model saved\")\n\n#......................Write and display train loss statitics ..........................................................................\n\n if itr % 10==0:\n # Display train loss\n txt=\"\\n\"+str(itr)+\"\\tLearning Rate \\t\"+str(Learning_Rate)\n for nm in AVGCatLoss:\n txt+=\"\\tAverage Cat Loss[\"+nm+\"] \"+str(AVGCatLoss[nm])+\" \"\n print(txt)\n #Write train loss to file\n with open(TrainLossTxtFile, \"a\") as f:\n f.write(txt)\n f.close()\n# #----------------Update learning rate -------------------------------------------------------------------------------\n if itr%10000==0:\n if \"TotalPrevious\" not in AVGCatLoss:\n AVGCatLoss[\"TotalPrevious\"]=AVGCatLoss[\"Total\"]\n elif AVGCatLoss[\"Total\"]*0.95<AVGCatLoss[\"TotalPrevious\"]: # If loss have decrease in less the 5% since last check, decrease learning rate\n Learning_Rate*=0.9\n if Learning_Rate<=4e-7: # If learning to small increase it back up\n Learning_Rate=5e-6\n print(\"Learning Rate=\"+str(Learning_Rate))\n print(\"======================================================================================================================\")\n optimizer = torch.optim.Adam(params=Net.parameters(), lr=Learning_Rate,weight_decay=Weight_Decay) # Update learning rate in optimizer\n torch.cuda.empty_cache() # Empty cuda memory to avoid memory leaks\n AVGCatLoss[\"TotalPrevious\"]=AVGCatLoss[\"Total\"]+0.0000000001 # Save current average loss for future referance\n\n\n\n" ]
[ [ "torch.abs", "numpy.min", "torch.load", "torch.cuda.empty_cache", "numpy.save", "torch.from_numpy", "numpy.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jamesliu/spark
[ "3fddc9468fa50e7683caa973fec6c52e1132268d" ]
[ "python/pyspark/mllib/linalg.py" ]
[ "#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nMLlib utilities for linear algebra. For dense vectors, MLlib\nuses the NumPy C{array} type, so you can simply pass NumPy arrays\naround. For sparse vectors, users can construct a L{SparseVector}\nobject from MLlib or pass SciPy C{scipy.sparse} column vectors if\nSciPy is available in their environment.\n\"\"\"\n\nimport sys\nimport array\nimport copy_reg\n\nimport numpy as np\n\nfrom pyspark.sql import UserDefinedType, StructField, StructType, ArrayType, DoubleType, \\\n IntegerType, ByteType\n\n\n__all__ = ['Vector', 'DenseVector', 'SparseVector', 'Vectors', 'DenseMatrix', 'Matrices']\n\n\nif sys.version_info[:2] == (2, 7):\n # speed up pickling array in Python 2.7\n def fast_pickle_array(ar):\n return array.array, (ar.typecode, ar.tostring())\n copy_reg.pickle(array.array, fast_pickle_array)\n\n\n# Check whether we have SciPy. MLlib works without it too, but if we have it, some methods,\n# such as _dot and _serialize_double_vector, start to support scipy.sparse matrices.\n\ntry:\n import scipy.sparse\n _have_scipy = True\nexcept:\n # No SciPy in environment, but that's okay\n _have_scipy = False\n\n\ndef _convert_to_vector(l):\n if isinstance(l, Vector):\n return l\n elif type(l) in (array.array, np.array, np.ndarray, list, tuple):\n return DenseVector(l)\n elif _have_scipy and scipy.sparse.issparse(l):\n assert l.shape[1] == 1, \"Expected column vector\"\n csc = l.tocsc()\n return SparseVector(l.shape[0], csc.indices, csc.data)\n else:\n raise TypeError(\"Cannot convert type %s into Vector\" % type(l))\n\n\ndef _vector_size(v):\n \"\"\"\n Returns the size of the vector.\n\n >>> _vector_size([1., 2., 3.])\n 3\n >>> _vector_size((1., 2., 3.))\n 3\n >>> _vector_size(array.array('d', [1., 2., 3.]))\n 3\n >>> _vector_size(np.zeros(3))\n 3\n >>> _vector_size(np.zeros((3, 1)))\n 3\n >>> _vector_size(np.zeros((1, 3)))\n Traceback (most recent call last):\n ...\n ValueError: Cannot treat an ndarray of shape (1, 3) as a vector\n \"\"\"\n if isinstance(v, Vector):\n return len(v)\n elif type(v) in (array.array, list, tuple):\n return len(v)\n elif type(v) == np.ndarray:\n if v.ndim == 1 or (v.ndim == 2 and v.shape[1] == 1):\n return len(v)\n else:\n raise ValueError(\"Cannot treat an ndarray of shape %s as a vector\" % str(v.shape))\n elif _have_scipy and scipy.sparse.issparse(v):\n assert v.shape[1] == 1, \"Expected column vector\"\n return v.shape[0]\n else:\n raise TypeError(\"Cannot treat type %s as a vector\" % type(v))\n\n\ndef _format_float(f, digits=4):\n s = str(round(f, digits))\n if '.' in s:\n s = s[:s.index('.') + 1 + digits]\n return s\n\n\nclass VectorUDT(UserDefinedType):\n \"\"\"\n SQL user-defined type (UDT) for Vector.\n \"\"\"\n\n @classmethod\n def sqlType(cls):\n return StructType([\n StructField(\"type\", ByteType(), False),\n StructField(\"size\", IntegerType(), True),\n StructField(\"indices\", ArrayType(IntegerType(), False), True),\n StructField(\"values\", ArrayType(DoubleType(), False), True)])\n\n @classmethod\n def module(cls):\n return \"pyspark.mllib.linalg\"\n\n @classmethod\n def scalaUDT(cls):\n return \"org.apache.spark.mllib.linalg.VectorUDT\"\n\n def serialize(self, obj):\n if isinstance(obj, SparseVector):\n indices = [int(i) for i in obj.indices]\n values = [float(v) for v in obj.values]\n return (0, obj.size, indices, values)\n elif isinstance(obj, DenseVector):\n values = [float(v) for v in obj]\n return (1, None, None, values)\n else:\n raise ValueError(\"cannot serialize %r of type %r\" % (obj, type(obj)))\n\n def deserialize(self, datum):\n assert len(datum) == 4, \\\n \"VectorUDT.deserialize given row with length %d but requires 4\" % len(datum)\n tpe = datum[0]\n if tpe == 0:\n return SparseVector(datum[1], datum[2], datum[3])\n elif tpe == 1:\n return DenseVector(datum[3])\n else:\n raise ValueError(\"do not recognize type %r\" % tpe)\n\n\nclass Vector(object):\n\n __UDT__ = VectorUDT()\n\n \"\"\"\n Abstract class for DenseVector and SparseVector\n \"\"\"\n def toArray(self):\n \"\"\"\n Convert the vector into an numpy.ndarray\n :return: numpy.ndarray\n \"\"\"\n raise NotImplementedError\n\n\nclass DenseVector(Vector):\n \"\"\"\n A dense vector represented by a value array.\n \"\"\"\n def __init__(self, ar):\n if isinstance(ar, basestring):\n ar = np.frombuffer(ar, dtype=np.float64)\n elif not isinstance(ar, np.ndarray):\n ar = np.array(ar, dtype=np.float64)\n if ar.dtype != np.float64:\n ar.astype(np.float64)\n self.array = ar\n\n def __reduce__(self):\n return DenseVector, (self.array.tostring(),)\n\n def dot(self, other):\n \"\"\"\n Compute the dot product of two Vectors. We support\n (Numpy array, list, SparseVector, or SciPy sparse)\n and a target NumPy array that is either 1- or 2-dimensional.\n Equivalent to calling numpy.dot of the two vectors.\n\n >>> dense = DenseVector(array.array('d', [1., 2.]))\n >>> dense.dot(dense)\n 5.0\n >>> dense.dot(SparseVector(2, [0, 1], [2., 1.]))\n 4.0\n >>> dense.dot(range(1, 3))\n 5.0\n >>> dense.dot(np.array(range(1, 3)))\n 5.0\n >>> dense.dot([1.,])\n Traceback (most recent call last):\n ...\n AssertionError: dimension mismatch\n >>> dense.dot(np.reshape([1., 2., 3., 4.], (2, 2), order='F'))\n array([ 5., 11.])\n >>> dense.dot(np.reshape([1., 2., 3.], (3, 1), order='F'))\n Traceback (most recent call last):\n ...\n AssertionError: dimension mismatch\n \"\"\"\n if type(other) == np.ndarray:\n if other.ndim > 1:\n assert len(self) == other.shape[0], \"dimension mismatch\"\n return np.dot(self.array, other)\n elif _have_scipy and scipy.sparse.issparse(other):\n assert len(self) == other.shape[0], \"dimension mismatch\"\n return other.transpose().dot(self.toArray())\n else:\n assert len(self) == _vector_size(other), \"dimension mismatch\"\n if isinstance(other, SparseVector):\n return other.dot(self)\n elif isinstance(other, Vector):\n return np.dot(self.toArray(), other.toArray())\n else:\n return np.dot(self.toArray(), other)\n\n def squared_distance(self, other):\n \"\"\"\n Squared distance of two Vectors.\n\n >>> dense1 = DenseVector(array.array('d', [1., 2.]))\n >>> dense1.squared_distance(dense1)\n 0.0\n >>> dense2 = np.array([2., 1.])\n >>> dense1.squared_distance(dense2)\n 2.0\n >>> dense3 = [2., 1.]\n >>> dense1.squared_distance(dense3)\n 2.0\n >>> sparse1 = SparseVector(2, [0, 1], [2., 1.])\n >>> dense1.squared_distance(sparse1)\n 2.0\n >>> dense1.squared_distance([1.,])\n Traceback (most recent call last):\n ...\n AssertionError: dimension mismatch\n >>> dense1.squared_distance(SparseVector(1, [0,], [1.,]))\n Traceback (most recent call last):\n ...\n AssertionError: dimension mismatch\n \"\"\"\n assert len(self) == _vector_size(other), \"dimension mismatch\"\n if isinstance(other, SparseVector):\n return other.squared_distance(self)\n elif _have_scipy and scipy.sparse.issparse(other):\n return _convert_to_vector(other).squared_distance(self)\n\n if isinstance(other, Vector):\n other = other.toArray()\n elif not isinstance(other, np.ndarray):\n other = np.array(other)\n diff = self.toArray() - other\n return np.dot(diff, diff)\n\n def toArray(self):\n return self.array\n\n def __getitem__(self, item):\n return self.array[item]\n\n def __len__(self):\n return len(self.array)\n\n def __str__(self):\n return \"[\" + \",\".join([str(v) for v in self.array]) + \"]\"\n\n def __repr__(self):\n return \"DenseVector([%s])\" % (', '.join(_format_float(i) for i in self.array))\n\n def __eq__(self, other):\n return isinstance(other, DenseVector) and np.array_equal(self.array, other.array)\n\n def __ne__(self, other):\n return not self == other\n\n def __getattr__(self, item):\n return getattr(self.array, item)\n\n\nclass SparseVector(Vector):\n \"\"\"\n A simple sparse vector class for passing data to MLlib. Users may\n alternatively pass SciPy's {scipy.sparse} data types.\n \"\"\"\n def __init__(self, size, *args):\n \"\"\"\n Create a sparse vector, using either a dictionary, a list of\n (index, value) pairs, or two separate arrays of indices and\n values (sorted by index).\n\n :param size: Size of the vector.\n :param args: Non-zero entries, as a dictionary, list of tupes,\n or two sorted lists containing indices and values.\n\n >>> print SparseVector(4, {1: 1.0, 3: 5.5})\n (4,[1,3],[1.0,5.5])\n >>> print SparseVector(4, [(1, 1.0), (3, 5.5)])\n (4,[1,3],[1.0,5.5])\n >>> print SparseVector(4, [1, 3], [1.0, 5.5])\n (4,[1,3],[1.0,5.5])\n \"\"\"\n self.size = int(size)\n assert 1 <= len(args) <= 2, \"must pass either 2 or 3 arguments\"\n if len(args) == 1:\n pairs = args[0]\n if type(pairs) == dict:\n pairs = pairs.items()\n pairs = sorted(pairs)\n self.indices = np.array([p[0] for p in pairs], dtype=np.int32)\n self.values = np.array([p[1] for p in pairs], dtype=np.float64)\n else:\n if isinstance(args[0], basestring):\n assert isinstance(args[1], str), \"values should be string too\"\n if args[0]:\n self.indices = np.frombuffer(args[0], np.int32)\n self.values = np.frombuffer(args[1], np.float64)\n else:\n # np.frombuffer() doesn't work well with empty string in older version\n self.indices = np.array([], dtype=np.int32)\n self.values = np.array([], dtype=np.float64)\n else:\n self.indices = np.array(args[0], dtype=np.int32)\n self.values = np.array(args[1], dtype=np.float64)\n assert len(self.indices) == len(self.values), \"index and value arrays not same length\"\n for i in xrange(len(self.indices) - 1):\n if self.indices[i] >= self.indices[i + 1]:\n raise TypeError(\"indices array must be sorted\")\n\n def __reduce__(self):\n return (SparseVector, (self.size, self.indices.tostring(), self.values.tostring()))\n\n def dot(self, other):\n \"\"\"\n Dot product with a SparseVector or 1- or 2-dimensional Numpy array.\n\n >>> a = SparseVector(4, [1, 3], [3.0, 4.0])\n >>> a.dot(a)\n 25.0\n >>> a.dot(array.array('d', [1., 2., 3., 4.]))\n 22.0\n >>> b = SparseVector(4, [2, 4], [1.0, 2.0])\n >>> a.dot(b)\n 0.0\n >>> a.dot(np.array([[1, 1], [2, 2], [3, 3], [4, 4]]))\n array([ 22., 22.])\n >>> a.dot([1., 2., 3.])\n Traceback (most recent call last):\n ...\n AssertionError: dimension mismatch\n >>> a.dot(np.array([1., 2.]))\n Traceback (most recent call last):\n ...\n AssertionError: dimension mismatch\n >>> a.dot(DenseVector([1., 2.]))\n Traceback (most recent call last):\n ...\n AssertionError: dimension mismatch\n >>> a.dot(np.zeros((3, 2)))\n Traceback (most recent call last):\n ...\n AssertionError: dimension mismatch\n \"\"\"\n if type(other) == np.ndarray:\n if other.ndim == 2:\n results = [self.dot(other[:, i]) for i in xrange(other.shape[1])]\n return np.array(results)\n elif other.ndim > 2:\n raise ValueError(\"Cannot call dot with %d-dimensional array\" % other.ndim)\n\n assert len(self) == _vector_size(other), \"dimension mismatch\"\n\n if type(other) in (np.ndarray, array.array, DenseVector):\n result = 0.0\n for i in xrange(len(self.indices)):\n result += self.values[i] * other[self.indices[i]]\n return result\n\n elif type(other) is SparseVector:\n result = 0.0\n i, j = 0, 0\n while i < len(self.indices) and j < len(other.indices):\n if self.indices[i] == other.indices[j]:\n result += self.values[i] * other.values[j]\n i += 1\n j += 1\n elif self.indices[i] < other.indices[j]:\n i += 1\n else:\n j += 1\n return result\n\n else:\n return self.dot(_convert_to_vector(other))\n\n def squared_distance(self, other):\n \"\"\"\n Squared distance from a SparseVector or 1-dimensional NumPy array.\n\n >>> a = SparseVector(4, [1, 3], [3.0, 4.0])\n >>> a.squared_distance(a)\n 0.0\n >>> a.squared_distance(array.array('d', [1., 2., 3., 4.]))\n 11.0\n >>> a.squared_distance(np.array([1., 2., 3., 4.]))\n 11.0\n >>> b = SparseVector(4, [2, 4], [1.0, 2.0])\n >>> a.squared_distance(b)\n 30.0\n >>> b.squared_distance(a)\n 30.0\n >>> b.squared_distance([1., 2.])\n Traceback (most recent call last):\n ...\n AssertionError: dimension mismatch\n >>> b.squared_distance(SparseVector(3, [1,], [1.0,]))\n Traceback (most recent call last):\n ...\n AssertionError: dimension mismatch\n \"\"\"\n assert len(self) == _vector_size(other), \"dimension mismatch\"\n if type(other) in (list, array.array, DenseVector, np.array, np.ndarray):\n if type(other) is np.array and other.ndim != 1:\n raise Exception(\"Cannot call squared_distance with %d-dimensional array\" %\n other.ndim)\n result = 0.0\n j = 0 # index into our own array\n for i in xrange(len(other)):\n if j < len(self.indices) and self.indices[j] == i:\n diff = self.values[j] - other[i]\n result += diff * diff\n j += 1\n else:\n result += other[i] * other[i]\n return result\n\n elif type(other) is SparseVector:\n result = 0.0\n i, j = 0, 0\n while i < len(self.indices) and j < len(other.indices):\n if self.indices[i] == other.indices[j]:\n diff = self.values[i] - other.values[j]\n result += diff * diff\n i += 1\n j += 1\n elif self.indices[i] < other.indices[j]:\n result += self.values[i] * self.values[i]\n i += 1\n else:\n result += other.values[j] * other.values[j]\n j += 1\n while i < len(self.indices):\n result += self.values[i] * self.values[i]\n i += 1\n while j < len(other.indices):\n result += other.values[j] * other.values[j]\n j += 1\n return result\n else:\n return self.squared_distance(_convert_to_vector(other))\n\n def toArray(self):\n \"\"\"\n Returns a copy of this SparseVector as a 1-dimensional NumPy array.\n \"\"\"\n arr = np.zeros((self.size,), dtype=np.float64)\n arr[self.indices] = self.values\n return arr\n\n def __len__(self):\n return self.size\n\n def __str__(self):\n inds = \"[\" + \",\".join([str(i) for i in self.indices]) + \"]\"\n vals = \"[\" + \",\".join([str(v) for v in self.values]) + \"]\"\n return \"(\" + \",\".join((str(self.size), inds, vals)) + \")\"\n\n def __repr__(self):\n inds = self.indices\n vals = self.values\n entries = \", \".join([\"{0}: {1}\".format(inds[i], _format_float(vals[i]))\n for i in xrange(len(inds))])\n return \"SparseVector({0}, {{{1}}})\".format(self.size, entries)\n\n def __eq__(self, other):\n \"\"\"\n Test SparseVectors for equality.\n\n >>> v1 = SparseVector(4, [(1, 1.0), (3, 5.5)])\n >>> v2 = SparseVector(4, [(1, 1.0), (3, 5.5)])\n >>> v1 == v2\n True\n >>> v1 != v2\n False\n \"\"\"\n return (isinstance(other, self.__class__)\n and other.size == self.size\n and np.array_equal(other.indices, self.indices)\n and np.array_equal(other.values, self.values))\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n\nclass Vectors(object):\n\n \"\"\"\n Factory methods for working with vectors. Note that dense vectors\n are simply represented as NumPy array objects, so there is no need\n to covert them for use in MLlib. For sparse vectors, the factory\n methods in this class create an MLlib-compatible type, or users\n can pass in SciPy's C{scipy.sparse} column vectors.\n \"\"\"\n\n @staticmethod\n def sparse(size, *args):\n \"\"\"\n Create a sparse vector, using either a dictionary, a list of\n (index, value) pairs, or two separate arrays of indices and\n values (sorted by index).\n\n :param size: Size of the vector.\n :param args: Non-zero entries, as a dictionary, list of tupes,\n or two sorted lists containing indices and values.\n\n >>> print Vectors.sparse(4, {1: 1.0, 3: 5.5})\n (4,[1,3],[1.0,5.5])\n >>> print Vectors.sparse(4, [(1, 1.0), (3, 5.5)])\n (4,[1,3],[1.0,5.5])\n >>> print Vectors.sparse(4, [1, 3], [1.0, 5.5])\n (4,[1,3],[1.0,5.5])\n \"\"\"\n return SparseVector(size, *args)\n\n @staticmethod\n def dense(elements):\n \"\"\"\n Create a dense vector of 64-bit floats from a Python list. Always\n returns a NumPy array.\n\n >>> Vectors.dense([1, 2, 3])\n DenseVector([1.0, 2.0, 3.0])\n \"\"\"\n return DenseVector(elements)\n\n @staticmethod\n def stringify(vector):\n \"\"\"\n Converts a vector into a string, which can be recognized by\n Vectors.parse().\n\n >>> Vectors.stringify(Vectors.sparse(2, [1], [1.0]))\n '(2,[1],[1.0])'\n >>> Vectors.stringify(Vectors.dense([0.0, 1.0]))\n '[0.0,1.0]'\n \"\"\"\n return str(vector)\n\n\nclass Matrix(object):\n \"\"\"\n Represents a local matrix.\n \"\"\"\n\n def __init__(self, numRows, numCols):\n self.numRows = numRows\n self.numCols = numCols\n\n def toArray(self):\n \"\"\"\n Returns its elements in a NumPy ndarray.\n \"\"\"\n raise NotImplementedError\n\n\nclass DenseMatrix(Matrix):\n \"\"\"\n Column-major dense matrix.\n \"\"\"\n def __init__(self, numRows, numCols, values):\n Matrix.__init__(self, numRows, numCols)\n if isinstance(values, basestring):\n values = np.frombuffer(values, dtype=np.float64)\n elif not isinstance(values, np.ndarray):\n values = np.array(values, dtype=np.float64)\n assert len(values) == numRows * numCols\n if values.dtype != np.float64:\n values.astype(np.float64)\n self.values = values\n\n def __reduce__(self):\n return DenseMatrix, (self.numRows, self.numCols, self.values.tostring())\n\n def toArray(self):\n \"\"\"\n Return an numpy.ndarray\n\n >>> m = DenseMatrix(2, 2, range(4))\n >>> m.toArray()\n array([[ 0., 2.],\n [ 1., 3.]])\n \"\"\"\n return self.values.reshape((self.numRows, self.numCols), order='F')\n\n def __eq__(self, other):\n return (isinstance(other, DenseMatrix) and\n self.numRows == other.numRows and\n self.numCols == other.numCols and\n all(self.values == other.values))\n\n\nclass Matrices(object):\n @staticmethod\n def dense(numRows, numCols, values):\n \"\"\"\n Create a DenseMatrix\n \"\"\"\n return DenseMatrix(numRows, numCols, values)\n\n\ndef _test():\n import doctest\n (failure_count, test_count) = doctest.testmod(optionflags=doctest.ELLIPSIS)\n if failure_count:\n exit(-1)\n\nif __name__ == \"__main__\":\n _test()\n" ]
[ [ "numpy.dot", "numpy.array_equal", "numpy.frombuffer", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
arahangua/gnn_prediction_sn
[ "3b3b8da07ee920c94f1a88fab87472860eec6322", "3b3b8da07ee920c94f1a88fab87472860eec6322" ]
[ "pred_models/gnn_torch_utils.py", "pred_models/gnn_torch_models.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 3 13:23:59 2021\n\n@author: th\n\"\"\"\n\n\nimport torch\nfrom torch.nn import ReLU, Linear, Softmax, SmoothL1Loss, Tanh, LeakyReLU\nfrom torch_geometric.nn import GCNConv, global_max_pool, global_mean_pool, SGConv, GNNExplainer, SAGEConv, GATConv, FastRGCNConv, GraphConv\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nimport torch.nn.functional as F\nimport torch_optimizer as optim\n\nimport gnn_torch_models\n\nimport random\nfrom sklearn.preprocessing import StandardScaler as SS\n# torch.set_default_dtype(torch.float)\n\n\ndef standardscaler_transform(sc_feat_pure):\n scaler = SS()\n scaler.fit(sc_feat_pure)\n transformed=scaler.transform(sc_feat_pure)\n \n return transformed, scaler\n \ndef batch_split(nodes_cp, full_index, ii):\n test_x = nodes_cp[ii]\n train_idx=np.setxor1d(full_index, ii)\n train_x = nodes_cp[train_idx]\n if(len(train_x[0].shape)==1):\n train_concat = flatten_list_1d(train_x)\n else:\n train_concat = []\n for jj, x in enumerate(train_x):\n if(jj==0):\n train_concat = x\n else:\n train_concat= np.vstack((train_concat, x))\n \n return train_concat, test_x\n \ndef make_diag_batch_FC(FCs):\n \n count=0\n for FC in FCs:\n count+=FC.shape[0]\n \n #gen mat\n \n batch_FC = np.zeros((count,count))\n size_log = 0\n for FC in FCs:\n size = FC.shape[0]\n batch_FC[size_log:size_log+size, size_log:size_log+size]=FC\n size_log += size\n \n return batch_FC\n\ndef flatten_list_1d(act_ratio):\n ph = np.empty((1,0))\n ph = np.squeeze(ph)\n \n for entry in act_ratio:\n ph = np.concatenate((ph, entry))\n \n return ph\n\ndef batch_split_x(nodes_cp, full_index, ii, chip_ids):\n nodes_cp = np.array(nodes_cp)\n test_x = nodes_cp[ii]\n train_idx=np.setxor1d(full_index, chip_ids)\n train_x = nodes_cp[train_idx]\n if(len(train_x[0].shape)==1):\n train_concat = flatten_list_1d(train_x)\n else:\n train_concat = []\n for jj, x in enumerate(train_x):\n if(jj==0):\n train_concat = x\n else:\n train_concat= np.vstack((train_concat, x))\n \n return train_concat, test_x\n\n\n\ndef evaluate(out, labels):\n \"\"\"\n Calculates the accuracy between the prediction and the ground truth.\n :param out: predicted outputs of the explainer\n :param labels: ground truth of the data\n :returns: int accuracy\n \"\"\"\n acc = torch.mean(torch.square(out-labels))\n return acc \ndef evaluate_mae(out, labels):\n \"\"\"\n Calculates the accuracy between the prediction and the ground truth.\n :param out: predicted outputs of the explainer\n :param labels: ground truth of the data\n :returns: int accuracy\n \"\"\"\n acc = torch.mean(torch.abs(out-labels))\n return acc \ndef evaluate_acc(out, labels):\n \"\"\"\n Calculates the accuracy between the prediction and the ground truth.\n :param out: predicted outputs of the explainer\n :param labels: ground truth of the data\n :returns: int accuracy\n \"\"\"\n out_cl = torch.max(out,1)[1]\n lab_cl = torch.max(labels,1)[1]\n diff_sum = torch.sum(torch.abs(out_cl-lab_cl))\n \n acc = 1- (diff_sum/out.shape[0])\n return acc \n\n\n\n\n\ndef gen_gridparams(dropout_probs, learning_rates, weight_decays, hidden_dims):\n \n fit_param_list = []\n for prob in dropout_probs:\n for rate in learning_rates:\n for decay in weight_decays:\n for hd in hidden_dims:\n fit_params= dict()\n fit_params['dropout_prob']=prob\n fit_params['learning_rate']=rate\n fit_params['weight_decay']=decay\n fit_params['hidden_dims']=hd\n fit_param_list.append(fit_params)\n return fit_param_list\n \n \n\n\n\ndef run_gridsearch_batch_x(nodes, FCs, target_frs, epoch_n, iter_n, model_string, fit_param_list, device, chip_ids):\n \n fit_result=[]\n for entry in fit_param_list:\n fit_params= dict()\n fit_params['dropout_prob']=entry['dropout_prob']\n fit_params['learning_rate']=entry['learning_rate']\n fit_params['weight_decay']=entry['weight_decay']\n fit_params['hidden_dims']=entry['hidden_dims']\n \n fit_params['fit_result']=run_GNN_batch_x(nodes, FCs, target_frs, epoch_n, iter_n, model_string, fit_params, device, chip_ids, 1)\n fit_result.append(fit_params) \n \n return fit_result\n\n\n \ndef standard_scale(features,train_idx, validate_idx, test_idx):\n features_wip = np.copy(features)\n \n if(len(features_wip.shape)==1):\n X_train, X_scaler = standardscaler_transform(features_wip[train_idx].reshape(-1,1))\n X_validate = X_scaler.transform(features_wip[validate_idx].reshape(-1,1))\n X_test = X_scaler.transform(features_wip[test_idx].reshape(-1,1))\n features_wip[train_idx] = np.squeeze(X_train)\n features_wip[validate_idx] = np.squeeze(X_validate)\n features_wip[test_idx] = np.squeeze(X_test)\n else: \n X_train, X_scaler = standardscaler_transform(features_wip[train_idx, :])\n X_validate = X_scaler.transform(features_wip[validate_idx, :])\n X_test = X_scaler.transform(features_wip[test_idx, :])\n features_wip[train_idx, :] = X_train\n features_wip[validate_idx, :] = X_validate\n features_wip[test_idx, :] = X_test\n \n return features_wip\n \n \n\n\ndef make_rgcn_mat(train_FC, device):\n edge_idx = np.array(np.where(train_FC!=0))\n edge_idx = torch.tensor(edge_idx, device= device)\n edge_type = train_FC[np.where(train_FC!=0)]\n types = np.unique(edge_type)\n edge_class = np.squeeze(np.zeros((edge_type.shape[0],1)))\n for jj, typ in enumerate(types):\n idx = np.where(edge_type==typ)[0]\n edge_class[idx]=jj\n edge_weight = torch.tensor(edge_class, device=device).type(torch.LongTensor)\n \n return edge_idx, edge_weight\n \n\n\ndef match_network_param(sage_params_uniq, chip_ids):\n uniq_chip = np.unique(chip_ids)\n uniq_indices=[]\n for uniq_c in uniq_chip:\n indices = np.where(np.array(chip_ids)==uniq_c)[0]\n uniq_indices.append(indices[0])\n \n \n sage_params = dict()\n for k,v in sage_params_uniq.items():\n sage_params[k] = []\n \n # get the sequence straight\n \n seq = np.argsort(uniq_indices) \n for k,v in sage_params_uniq.items():\n for zz, idx in enumerate(seq):\n st_p=uniq_indices[idx]\n n_same = len(np.where(np.array(chip_ids)==np.array(chip_ids[st_p]))[0])\n for _ in range(n_same):\n sage_params[k].append(sage_params_uniq[k][zz])\n \n return sage_params \n\n\n\n\ndef run_GNN_batch_x(nodes, FCs, target_frs, n_epoch, iter_n, model_string, fit_params_list, device, chip_ids, gridsearch=0):\n # compute GCN assuming same nodes\n \n #seeds\n np.random.seed(42)\n random.seed(42)\n num_features= nodes[0].shape[1]\n \n #number of classes\n if(len(target_frs[0].shape)==1):\n num_classes=1\n else:\n num_classes = target_frs[0].shape[1]\n \n \n per_network=[]\n for ii in range(len(target_frs)):\n train_acc_vec=[]\n train_mae_vec=[]\n model_params_vec=[]\n test_acc_vec=[]\n test_mae_vec=[]\n\n \n validate_curves_list =[]\n train_curves_list=[]\n\n # prep x,y \n target_cp = np.copy(target_frs)\n full_index= np.arange(len(target_frs))\n #get target y first \n test_y = target_cp[ii]\n # make x \n nodes_cp = np.copy(nodes)\n # FC\n FC_cp = np.copy(FCs)\n \n #params \n if(gridsearch==0):\n fit_params = fit_params_list[ii]\n else:\n fit_params = fit_params_list\n \n for iter_ in range(iter_n):\n \n # targets\n test_y = target_cp[ii]\n # val_y = target_cp[val_idx]\n \n #get idx from same chips \n same_chip = np.where(np.array(chip_ids) == chip_ids[ii])[0]\n \n if(gridsearch==0):\n train_idx=np.setxor1d(full_index, same_chip) # got rid of it\n else:\n train_idx = np.setxor1d(full_index, ii)\n \n train_y = target_cp[train_idx]\n train_y = flatten_list_1d(train_y)\n \n # make x \n #features (input)\n if(gridsearch==0):\n train_x, test_x= batch_split_x(nodes_cp, full_index, ii, same_chip) #identical function to wp1_data_description, wp1_data class\n else:\n train_x, test_x= batch_split(nodes_cp, full_index, ii)\n \n #stack train and val for scaling \n \n #scale them\n scaled_x, train_scaler_x=standardscaler_transform(train_x)\n test_x = train_scaler_x.transform(test_x) \n train_x = train_scaler_x.transform(train_x)\n # val_x = train_scaler_x.transform(val_x)\n \n # scale y\n \n scaled_y, train_scaler_y=standardscaler_transform(train_y.reshape(-1,1))\n train_y = train_scaler_y.transform(train_y.reshape(-1,1))\n test_y = train_scaler_y.transform(test_y.reshape(-1,1))\n # val_y = train_scaler_y.transform(val_y.reshape(-1,1))\n \n # FCs\n train_FC= make_diag_batch_FC(FC_cp[train_idx])\n test_FC = FC_cp[ii]\n # put into cuda \n train_x = torch.tensor(train_x, device = device, dtype=float)\n train_y = torch.tensor(train_y, device = device, dtype=float)\n test_x = torch.tensor(test_x, device = device, dtype=float)\n test_y = torch.tensor(test_y, device = device, dtype=float)\n \n if(num_classes==1):\n train_y = torch.reshape(train_y, (train_y.shape[0], 1))\n test_y = torch.reshape(test_y, (test_y.shape[0], 1))\n \n edge_idx= dict()\n edge_weight =dict()\n edge_idx['train'] = np.array(np.where(train_FC>0))\n edge_idx['train'] = torch.tensor(edge_idx['train'], device = device)\n edge_weight['train'] = train_FC[np.where(train_FC>0)]\n edge_weight['train'] = torch.tensor(edge_weight['train'], device = device, dtype=float)\n \n #prep for testing \n \n edge_idx['test'] = np.array(np.where(test_FC>0))\n edge_idx['test'] = torch.tensor(edge_idx['test'], device = device)\n edge_weight['test'] = test_FC[np.where(test_FC>0)]\n edge_weight['test'] = torch.tensor(edge_weight['test'], device = device, dtype=float)\n \n \n \n model = gnn_torch_models.return_model(model_string, num_features, num_classes, fit_params['dropout_prob'], fit_params['hidden_dims'])\n model.to(device, dtype=float) \n \n if('rgcn' in model_string):\n edge_idx= dict()\n edge_weight =dict()\n edge_idx['train'], edge_weight['train'] = make_rgcn_mat(train_FC, device)\n edge_idx['test'], edge_weight['test'] = make_rgcn_mat(test_FC, device)\n edge_idx['train'] = torch.tensor(edge_idx['train'], device= device)\n edge_idx['test'] = torch.tensor(edge_idx['test'], device= device)\n edge_weight['train'] = torch.tensor(edge_weight['train'], device= device, dtype=float)\n edge_weight['test'] = torch.tensor(edge_weight['test'], device= device, dtype=float)\n \n # edge_idx['val'], edge_weight['val'] = make_rgcn_mat(val_FC, device)\n \n \n optimizer = torch.optim.Adam(model.parameters(), lr=fit_params['learning_rate'], weight_decay= fit_params['weight_decay'])\n \n if(model_string == 'gcn_class'):\n criterion = torch.nn.CrossEntropyLoss()\n else:\n criterion = torch.nn.MSELoss()\n \n train_acc_curve=[]\n validate_acc_curve=[]\n \n \n #epochs\n if(gridsearch==0):\n n_epoch = fit_params['bs_epoch']\n \n \n for epoch in range(n_epoch):\n \n model.train()\n optimizer.zero_grad()\n out = model.forward(train_x, edge_idx['train'], edge_weight['train']) # forward\n loss = criterion(out, train_y)\n loss.backward()\n # torch.nn.utils.clip_grad_norm_(model.parameters(), 2.0)\n optimizer.step()\n \n # eval flag\n model.eval()\n \n with torch.no_grad():\n out=dict()\n out['train'] = model(train_x, edge_idx['train'], edge_weight['train'])\n out['test'] = model(test_x, edge_idx['test'], edge_weight['test'])\n # out['test'] = model(test_x, edge_idx['test'], edge_weight['test'])\n \n # Evaluate train\n mse=dict()\n mae=dict()\n mse['train'] = evaluate(out['train'], train_y)\n mae['train'] = evaluate_mae(out['train'], train_y)\n \n mse['test'] = evaluate(out['test'], test_y)\n mae['test'] = evaluate_mae(out['test'], test_y)\n \n \n \n if(epoch% 50==0):\n print(f\"Epoch: {epoch}, train_acc: {mse['train']:.4f}, validate_acc : {mse['test']:.4f}, LR : {optimizer.param_groups[0]['lr']:.8f}\")\n train_acc_curve.append(mse['train'].cpu().numpy())\n validate_acc_curve.append(mse['test'].cpu().numpy())\n \n \n \n # for each iter\n train_acc_vec.append(mse['train'].cpu().numpy())\n train_mae_vec.append(mae['train'].cpu().numpy())\n \n validate_curves_list.append(np.array(validate_acc_curve))\n train_curves_list.append(np.array(train_acc_curve))\n \n model_dict=dict()\n \n for k,v in model.state_dict().items():\n model_dict[k] =v.cpu()\n \n if(gridsearch==0):\n model_params_vec.append(model_dict)\n \n \n # test\n with torch.no_grad():\n out['test'] = model(test_x, edge_idx['test'], edge_weight['test'])\n mse['test'] = evaluate(out['test'], test_y)\n mae['test'] = evaluate_mae(out['test'], test_y)\n print(f\"iteration: {iter_}, test_acc: {mse['test']:.4f}\")\n \n test_acc_vec.append(mse['test'].cpu().numpy())\n test_mae_vec.append(mae['test'].cpu().numpy())\n \n result = dict()\n result['mse_train']=np.array(train_acc_vec)\n result['mae_train']=np.array(train_mae_vec)\n \n result['mse_test']= np.array(test_acc_vec)\n result['mae_test'] = np.array(test_mae_vec)\n \n result['train_curve']=train_curves_list\n result['validate_curve']=validate_curves_list\n per_network.append(result)\n return per_network\n\n\n\n\n\ndef simple_forward_model(model, input_vec, adj_mat, cuda, gpu_id):\n \n x = torch.tensor(input_vec)\n # lab_out = torch.tensor(target_vec)\n # lab_out = torch.reshape(lab_out, (adj_mat.shape[0], 1))\n edge_idx = np.array(np.where(adj_mat>0))\n edge_idx = torch.tensor(edge_idx)\n edge_weight = adj_mat[np.where(adj_mat>0)]\n edge_weight = torch.tensor(edge_weight)\n \n \n \n if(cuda):\n # lab_out=lab_out.cuda(gpu_id)\n x = x.cuda(gpu_id)\n edge_idx = edge_idx.cuda(gpu_id)\n edge_weight=edge_weight.cuda(gpu_id)\n model = model.cuda(gpu_id)\n \n \n with torch.no_grad():\n out = model.forward(x, edge_idx, edge_weight)\n \n return out.cpu().detach().numpy()\n\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 3 13:23:59 2021\n\n@author: th\n\"\"\"\n\n\nimport torch\nfrom torch.nn import ReLU, Linear, Softmax, SmoothL1Loss, Tanh, LeakyReLU\nfrom torch_geometric.nn import GCNConv, global_max_pool, global_mean_pool, SGConv, SAGEConv, GATConv, FastRGCNConv, GraphConv\nimport torch.nn.functional as F\n\n\n\nclass RGCN(torch.nn.Module):\n def __init__(self, num_features, num_classes, num_edge_types, dropout_prob, hidden):\n super(RGCN, self).__init__()\n self.embedding_size = hidden * 3\n self.conv1 = FastRGCNConv(num_features, hidden, num_edge_types)\n self.relu1 = ReLU()\n self.conv2 = FastRGCNConv(hidden, hidden, num_edge_types)\n self.relu2 = ReLU()\n self.conv3 = FastRGCNConv(hidden, hidden, num_edge_types)\n self.relu3 = ReLU()\n self.lin = Linear(3*hidden, num_classes)\n self.dropout_prob = dropout_prob\n \n \n def forward(self, x, edge_index, edge_type):\n input_lin = self.embedding(x, edge_index, edge_type)\n final = self.lin(input_lin)\n return final\n def embedding(self, x, edge_index, edge_type):\n # if edge_weights is None:\n # edge_weights = torch.ones(edge_index.size(1))\n stack = []\n\n out1 = self.conv1(x, edge_index, edge_type)\n out1 = F.dropout(out1, training=self.training, p=self.dropout_prob)\n out1 = self.relu1(out1)\n \n stack.append(out1)\n\n out2 = self.conv2(out1, edge_index, edge_type)\n out2= F.dropout(out2,training=self.training, p=self.dropout_prob)\n out2 = self.relu2(out2)\n stack.append(out2)\n\n out3 = self.conv3(out2, edge_index, edge_type)\n out3 = F.dropout(out3,training=self.training, p=self.dropout_prob)\n out3 = self.relu3(out3)\n stack.append(out3)\n\n input_lin = torch.cat(stack, dim=1)\n\n return input_lin \n \n \nclass sl_RGCN(torch.nn.Module):\n def __init__(self, num_features, num_classes, num_edge_types, dropout_prob, hidden):\n super(sl_RGCN, self).__init__()\n self.embedding_size = hidden * 1\n self.conv1 = FastRGCNConv(num_features, hidden, num_edge_types)\n self.relu1 = ReLU()\n self.lin = Linear(1*hidden, num_classes)\n self.dropout_prob = dropout_prob\n \n \n def forward(self, x, edge_index, edge_type):\n input_lin = self.embedding(x, edge_index, edge_type)\n final = self.lin(input_lin)\n return final\n def embedding(self, x, edge_index, edge_type):\n # if edge_weights is None:\n # edge_weights = torch.ones(edge_index.size(1))\n stack = []\n\n out1 = self.conv1(x, edge_index, edge_type)\n out1 = F.dropout(out1, training=self.training, p=self.dropout_prob)\n out1 = self.relu1(out1)\n stack.append(out1)\n\n input_lin = torch.cat(stack, dim=1)\n\n return input_lin\n \nclass RGCN2(torch.nn.Module):\n def __init__(self, num_features, num_classes, num_edge_types, dropout_prob, hidden):\n super(RGCN2, self).__init__()\n self.embedding_size = hidden * 2\n self.conv1 = FastRGCNConv(num_features, hidden, num_edge_types)\n self.relu1 = ReLU()\n self.conv2 = FastRGCNConv(hidden, hidden, num_edge_types)\n self.relu2 = ReLU()\n self.lin = Linear(2*hidden, num_classes)\n self.dropout_prob = dropout_prob\n \n \n def forward(self, x, edge_index, edge_type):\n input_lin = self.embedding(x, edge_index, edge_type)\n final = self.lin(input_lin)\n return final\n def embedding(self, x, edge_index, edge_type):\n # if edge_weights is None:\n # edge_weights = torch.ones(edge_index.size(1))\n stack = []\n\n out1 = self.conv1(x, edge_index, edge_type)\n out1 = F.dropout(out1, training=self.training, p=self.dropout_prob)\n out1 = self.relu1(out1)\n stack.append(out1)\n \n out2 = self.conv2(out1, edge_index, edge_type)\n out2= F.dropout(out2,training=self.training, p=self.dropout_prob)\n out2 = self.relu2(out2)\n stack.append(out2)\n \n\n input_lin = torch.cat(stack, dim=1)\n\n return input_lin\n \n \n \n\nclass Graphsage(torch.nn.Module):\n \"\"\"\n A graph clasification model for nodes decribed in https://arxiv.org/abs/1903.03894.\n This model consists of 3 stacked GCN layers followed by a linear layer.\n \"\"\"\n def __init__(self, num_features, num_classes, dropout_prob, aggre, hidden):\n super().__init__()\n self.embedding_size = hidden * 3\n self.conv1 = GraphConv(num_features, hidden, aggre)\n self.relu1 = ReLU()\n self.conv2 = GraphConv(hidden, hidden, aggre)\n self.relu2 = ReLU()\n self.conv3 = GraphConv(hidden, hidden, aggre)\n self.relu3 = ReLU()\n self.lin = Linear(3*hidden, num_classes)\n self.dropout_prob = dropout_prob\n \n def forward(self, x, edge_index, edge_weights):\n input_lin = self.embedding(x, edge_index, edge_weights)\n final = self.lin(input_lin)\n return final\n\n def embedding(self, x, edge_index, edge_weights):\n # if edge_weights is None:\n # edge_weights = torch.ones(edge_index.size(1))\n stack = []\n \n \n # out1 = (x - x.mean(0, keepdim=True)) / (x.std(0, keepdim=True) + 1e-8) # this is not used in PGExplainer\n out1 = self.conv1(x, edge_index,edge_weights)\n out1 = F.dropout(out1, training=self.training, p=self.dropout_prob)\n out1 = self.relu1(out1)\n stack.append(out1)\n # out1 = self.tanh1(out1)\n \n out2 = self.conv2(out1, edge_index, edge_weights)\n out2 = F.dropout(out2, training=self.training, p=self.dropout_prob)\n out2 = self.relu2(out2)\n stack.append(out2)\n\n out3 = self.conv3(out2, edge_index, edge_weights)\n out3 = F.dropout(out3, training=self.training, p=self.dropout_prob)\n out3 = self.relu3(out3)\n stack.append(out3)\n\n input_lin = torch.cat(stack, dim=1)\n\n return input_lin\n \n\nclass Graphsage2(torch.nn.Module):\n \"\"\"\n A graph clasification model for nodes decribed in https://arxiv.org/abs/1903.03894.\n This model consists of 3 stacked GCN layers followed by a linear layer.\n \"\"\"\n def __init__(self, num_features, num_classes, dropout_prob, aggre, hidden):\n super().__init__()\n self.embedding_size = hidden * 2\n self.conv1 = GraphConv(num_features, hidden, aggre)\n self.relu1 = ReLU()\n self.conv2 = GraphConv(hidden, hidden, aggre)\n self.relu2 = ReLU()\n self.lin = Linear(2*hidden, num_classes)\n self.dropout_prob = dropout_prob\n \n def forward(self, x, edge_index, edge_weights):\n input_lin = self.embedding(x, edge_index, edge_weights)\n final = self.lin(input_lin)\n return final\n\n def embedding(self, x, edge_index, edge_weights):\n # if edge_weights is None:\n # edge_weights = torch.ones(edge_index.size(1))\n stack = []\n \n # out1 = (x - x.mean(0, keepdim=True)) / (x.std(0, keepdim=True) + 1e-8) # this is not used in PGExplainer\n out1 = self.conv1(x, edge_index,edge_weights)\n out1 = F.dropout(out1, training=self.training, p=self.dropout_prob)\n out1 = self.relu1(out1)\n # out1 = self.tanh1(out1)\n stack.append(out1)\n \n out2 = self.conv2(out1, edge_index, edge_weights)\n out2 = F.dropout(out2, training=self.training, p=self.dropout_prob)\n out2 = self.relu2(out2)\n # out2 = self.tanh2(out2)\n stack.append(out2)\n\n input_lin = torch.cat(stack, dim=1)\n\n return input_lin\n\n\nclass Graphsage1(torch.nn.Module):\n \"\"\"\n A graph clasification model for nodes decribed in https://arxiv.org/abs/1903.03894.\n This model consists of 3 stacked GCN layers followed by a linear layer.\n \"\"\"\n def __init__(self, num_features, num_classes, dropout_prob, aggre, hidden):\n super().__init__()\n self.embedding_size = hidden * 1\n self.conv1 = GraphConv(num_features, hidden, aggre)\n self.relu1 = ReLU()\n self.lin = Linear(1*hidden, num_classes)\n self.dropout_prob = dropout_prob\n \n def forward(self, x, edge_index, edge_weights):\n input_lin = self.embedding(x, edge_index, edge_weights)\n final = self.lin(input_lin)\n return final\n\n def embedding(self, x, edge_index, edge_weights):\n # if edge_weights is None:\n # edge_weights = torch.ones(edge_index.size(1))\n stack = []\n \n # out1 = (x - x.mean(0, keepdim=True)) / (x.std(0, keepdim=True) + 1e-8) # this is not used in PGExplainer\n out1 = self.conv1(x, edge_index,edge_weights)\n out1 = F.dropout(out1, training=self.training, p=self.dropout_prob)\n out1 = self.relu1(out1)\n stack.append(out1)\n \n input_lin = torch.cat(stack, dim=1)\n\n return input_lin\n \n\nclass NodeGCN(torch.nn.Module):\n \"\"\"\n A graph clasification model for nodes decribed in https://arxiv.org/abs/1903.03894.\n This model consists of 3 stacked GCN layers followed by a linear layer.\n \"\"\"\n def __init__(self, num_features, num_classes, dropout_prob, hidden):\n super(NodeGCN, self).__init__()\n self.embedding_size = hidden * 3\n self.conv1 = GCNConv(num_features, hidden)\n self.relu1 = ReLU()\n self.conv2 = GCNConv(hidden, hidden)\n self.relu2 = ReLU()\n self.conv3 = GCNConv(hidden, hidden)\n self.relu3 = ReLU()\n self.lin = Linear(3*hidden, num_classes)\n self.dropout_prob = dropout_prob\n\n def forward(self, x, edge_index, edge_weights):\n input_lin = self.embedding(x, edge_index, edge_weights)\n final = self.lin(input_lin)\n return final\n\n def embedding(self, x, edge_index, edge_weights):\n if edge_weights is None:\n edge_weights = torch.ones(edge_index.size(1))\n stack = []\n\n out1 = self.conv1(x, edge_index, edge_weights)\n out1 = F.dropout(out1, training=self.training, p=self.dropout_prob)\n out1 = self.relu1(out1)\n stack.append(out1)\n\n out2 = self.conv2(out1, edge_index, edge_weights)\n out2 = F.dropout(out2, training=self.training, p=self.dropout_prob)\n out2 = self.relu2(out2)\n stack.append(out2)\n\n out3 = self.conv3(out2, edge_index, edge_weights)\n out3 = F.dropout(out3, training=self.training, p=self.dropout_prob)\n out3 = self.relu3(out3)\n stack.append(out3)\n\n input_lin = torch.cat(stack, dim=1)\n\n return input_lin\n \n \nclass NodeGCN2(torch.nn.Module):\n \"\"\"\n A graph clasification model for nodes decribed in https://arxiv.org/abs/1903.03894.\n This model consists of 3 stacked GCN layers followed by a linear layer.\n \"\"\"\n def __init__(self, num_features, num_classes, dropout_prob, hidden):\n super(NodeGCN2, self).__init__()\n self.embedding_size =hidden* 2\n self.conv1 = GCNConv(num_features, hidden)\n self.relu1 = ReLU()\n self.conv2 = GCNConv(hidden, hidden)\n self.relu2 = ReLU()\n self.lin = Linear(2*hidden, num_classes)\n self.dropout_prob = dropout_prob\n\n def forward(self, x, edge_index, edge_weights):\n input_lin = self.embedding(x, edge_index, edge_weights)\n final = self.lin(input_lin)\n return final\n\n def embedding(self, x, edge_index, edge_weights):\n if edge_weights is None:\n edge_weights = torch.ones(edge_index.size(1))\n stack = []\n\n out1 = self.conv1(x, edge_index, edge_weights)\n out1 = F.dropout(out1, training=self.training, p=self.dropout_prob)\n out1 = self.relu1(out1)\n stack.append(out1)\n\n out2 = self.conv2(out1, edge_index, edge_weights)\n out2 = F.dropout(out2, training=self.training, p=self.dropout_prob)\n out2 = self.relu2(out2)\n stack.append(out2)\n\n input_lin = torch.cat(stack, dim=1)\n\n return input_lin\n \n\n \n\nclass SlGCN(torch.nn.Module):\n \"\"\"\n A graph clasification model for nodes decribed in https://arxiv.org/abs/1903.03894.\n This model consists of 3 stacked GCN layers followed by a linear layer.\n \"\"\"\n def __init__(self, num_features, num_classes, dropout_prob, hidden):\n super(SlGCN, self).__init__()\n self.embedding_size = hidden\n self.conv1 = GCNConv(num_features, hidden)\n self.relu1 = ReLU()\n self.lin = Linear(hidden, num_classes)\n self.dropout_prob = dropout_prob\n\n def forward(self, x, edge_index, edge_weights):\n input_lin = self.embedding(x, edge_index, edge_weights)\n final = self.lin(input_lin)\n return final\n\n def embedding(self, x, edge_index, edge_weights):\n if edge_weights is None:\n edge_weights = torch.ones(edge_index.size(1))\n stack = []\n\n out1 = self.conv1(x, edge_index, edge_weights)\n out1 = F.dropout(out1, training=self.training, p=self.dropout_prob)\n out1 = self.relu1(out1)\n stack.append(out1)\n\n input_lin = torch.cat(stack, dim=1)\n\n return input_lin\n\n\n\n\n\ndef return_model(model_string, num_features, num_classes, dropout_prob, hidden):\n if(model_string=='gcn'):\n model= NodeGCN(num_features,1, dropout_prob, hidden)\n if(model_string == 'gcn2'):\n model = NodeGCN2(num_features, 1, dropout_prob, hidden)\n if(model_string=='sl_gcn'):\n model = SlGCN(num_features,1, dropout_prob, hidden)\n if(model_string=='sage_max'):\n model = Graphsage(num_features,1, dropout_prob, 'max', hidden)\n if(model_string=='sage_add'):\n model = Graphsage(num_features,1, dropout_prob, 'add', hidden)\n if(model_string=='sage_mean'):\n model = Graphsage(num_features,1, dropout_prob, 'mean', hidden)\n \n if(model_string=='sage2_max'):\n model = Graphsage2(num_features,1, dropout_prob, 'max', hidden)\n if(model_string=='sage2_add'):\n model = Graphsage2(num_features,1, dropout_prob, 'add', hidden)\n if(model_string=='sage2_mean'):\n model = Graphsage2(num_features,1, dropout_prob, 'mean', hidden)\n \n \n if(model_string=='sage1_max'):\n model = Graphsage1(num_features,1, dropout_prob, 'max', hidden)\n if(model_string=='sage1_add'):\n model = Graphsage1(num_features,1, dropout_prob, 'add', hidden)\n if(model_string=='sage1_mean'):\n model = Graphsage1(num_features,1, dropout_prob, 'mean', hidden)\n \n \n if(model_string=='rgcn'):\n model = RGCN(num_features, 1, 2, dropout_prob, hidden)\n if(model_string =='rgcn2'):\n model = RGCN2(num_features, 1, 2, dropout_prob, hidden)\n if(model_string == 'sl_rgcn'):\n model = sl_RGCN(num_features, 1, 2, dropout_prob, hidden)\n \n return model\n \n\n" ]
[ [ "torch.abs", "torch.max", "numpy.squeeze", "numpy.concatenate", "torch.no_grad", "numpy.where", "numpy.setxor1d", "torch.nn.CrossEntropyLoss", "numpy.unique", "torch.reshape", "torch.tensor", "numpy.copy", "torch.square", "numpy.zeros", "numpy.argsort", "numpy.array", "numpy.random.seed", "numpy.empty", "sklearn.preprocessing.StandardScaler", "torch.nn.MSELoss", "numpy.vstack" ], [ "torch.nn.Linear", "torch.nn.ReLU", "torch.cat", "torch.nn.functional.dropout" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Jiannan-Liu/nCoVSegNet
[ "7543e68edff011a7f7b694c97cf0f185d441fd6b" ]
[ "module/backbone/Res2Net.py" ]
[ "import torch.nn as nn\nimport math\nimport torch.utils.model_zoo as model_zoo\nimport torch\nimport torch.nn.functional as F\n\n__all__ = ['Res2Net', 'res2net50_v1b', 'res2net101_v1b', 'res2net50_v1b_26w_4s']\n\nmodel_urls = {\n 'res2net50_v1b_26w_4s': 'https://shanghuagao.oss-cn-beijing.aliyuncs.com/res2net/res2net50_v1b_26w_4s-3cf99910.pth',\n 'res2net101_v1b_26w_4s': 'https://shanghuagao.oss-cn-beijing.aliyuncs.com/res2net/res2net101_v1b_26w_4s-0812c246.pth',\n}\n\n\nclass Bottle2neck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, baseWidth=26, scale=4, stype='normal'):\n \"\"\" Constructor\n Args:\n inplanes: input channel dimensionality\n planes: output channel dimensionality\n stride: conv stride. Replaces pooling layer.\n downsample: None when stride = 1\n baseWidth: basic width of conv3x3\n scale: number of scale.\n type: 'normal': normal set. 'stage': first block of a new stage.\n \"\"\"\n super(Bottle2neck, self).__init__()\n\n width = int(math.floor(planes * (baseWidth / 64.0)))\n self.conv1 = nn.Conv2d(inplanes, width * scale, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(width * scale)\n\n if scale == 1:\n self.nums = 1\n else:\n self.nums = scale - 1\n if stype == 'stage':\n self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1)\n convs = []\n bns = []\n for i in range(self.nums):\n convs.append(nn.Conv2d(width, width, kernel_size=3, stride=stride, padding=1, bias=False))\n bns.append(nn.BatchNorm2d(width))\n self.convs = nn.ModuleList(convs)\n self.bns = nn.ModuleList(bns)\n\n self.conv3 = nn.Conv2d(width * scale, planes * self.expansion, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion)\n\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stype = stype\n self.scale = scale\n self.width = width\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n spx = torch.split(out, self.width, 1)\n for i in range(self.nums):\n if i == 0 or self.stype == 'stage':\n sp = spx[i]\n else:\n sp = sp + spx[i]\n sp = self.convs[i](sp)\n sp = self.relu(self.bns[i](sp))\n if i == 0:\n out = sp\n else:\n out = torch.cat((out, sp), 1)\n if self.scale != 1 and self.stype == 'normal':\n out = torch.cat((out, spx[self.nums]), 1)\n elif self.scale != 1 and self.stype == 'stage':\n out = torch.cat((out, self.pool(spx[self.nums])), 1)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Res2Net(nn.Module):\n\n def __init__(self, block, layers, baseWidth=26, scale=4, num_classes=1000):\n self.inplanes = 64\n super(Res2Net, self).__init__()\n self.baseWidth = baseWidth\n self.scale = scale\n self.conv1 = nn.Sequential(\n nn.Conv2d(3, 32, 3, 2, 1, bias=False),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 32, 3, 1, 1, bias=False),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, 3, 1, 1, bias=False)\n )\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU()\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.AvgPool2d(kernel_size=stride, stride=stride,\n ceil_mode=True, count_include_pad=False),\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=1, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample=downsample,\n stype='stage', baseWidth=self.baseWidth, scale=self.scale))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, baseWidth=self.baseWidth, scale=self.scale))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\ndef res2net50_v1b(pretrained=False, **kwargs):\n \"\"\"Constructs a Res2Net-50_v1b model_lung_infection.\n Res2Net-50 refers to the Res2Net-50_v1b_26w_4s.\n Args:\n pretrained (bool): If True, returns a model_lung_infection pre-trained on ImageNet\n \"\"\"\n model = Res2Net(Bottle2neck, [3, 4, 6, 3], baseWidth=26, scale=4, **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['res2net50_v1b_26w_4s']))\n return model\n\n\ndef res2net101_v1b(pretrained=False, **kwargs):\n \"\"\"Constructs a Res2Net-50_v1b_26w_4s model_lung_infection.\n Args:\n pretrained (bool): If True, returns a model_lung_infection pre-trained on ImageNet\n \"\"\"\n model = Res2Net(Bottle2neck, [3, 4, 23, 3], baseWidth=26, scale=4, **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['res2net101_v1b_26w_4s']))\n return model\n\n\ndef res2net50_v1b_26w_4s(pretrained=False, **kwargs):\n \"\"\"Constructs a Res2Net-50_v1b_26w_4s model_lung_infection.\n Args:\n pretrained (bool): If True, returns a model_lung_infection pre-trained on ImageNet\n \"\"\"\n model = Res2Net(Bottle2neck, [3, 4, 6, 3], baseWidth=26, scale=4, **kwargs)\n if pretrained:\n model_state = torch.load('/home/ljn/code/Lnet/res/res2net50_v1b_26w_4s-3cf99910.pth')\n model.load_state_dict(model_state)\n # model_lung_infection.load_state_dict(model_zoo.load_url(model_urls['res2net50_v1b_26w_4s']))\n return model\n\n\ndef res2net101_v1b_26w_4s(pretrained=False, **kwargs):\n \"\"\"Constructs a Res2Net-50_v1b_26w_4s model_lung_infection.\n Args:\n pretrained (bool): If True, returns a model_lung_infection pre-trained on ImageNet\n \"\"\"\n model = Res2Net(Bottle2neck, [3, 4, 23, 3], baseWidth=26, scale=4, **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['res2net101_v1b_26w_4s']))\n return model\n\n\ndef res2net152_v1b_26w_4s(pretrained=False, **kwargs):\n \"\"\"Constructs a Res2Net-50_v1b_26w_4s model_lung_infection.\n Args:\n pretrained (bool): If True, returns a model_lung_infection pre-trained on ImageNet\n \"\"\"\n model = Res2Net(Bottle2neck, [3, 8, 36, 3], baseWidth=26, scale=4, **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['res2net152_v1b_26w_4s']))\n return model\n\n\n# if __name__ == '__main__':\n# images = torch.rand(1, 3, 224, 224).cuda(0)\n# model = res2net50_v1b_26w_4s(pretrained=True)\n# model = model.cuda(0)\n# print(model(images).size())\n" ]
[ [ "torch.nn.Sequential", "torch.load", "torch.cat", "torch.nn.init.constant_", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.Linear", "torch.nn.AvgPool2d", "torch.nn.AdaptiveAvgPool2d", "torch.nn.BatchNorm2d", "torch.split", "torch.nn.ReLU", "torch.utils.model_zoo.load_url", "torch.nn.init.kaiming_normal_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
idoroditty/IML.HUJI
[ "fdc9c5166c33db679eca8ebd928ae487faadf39f" ]
[ "exercises/perform_model_selection.py" ]
[ "from __future__ import annotations\nimport numpy as np\nimport pandas as pd\nimport sklearn.datasets\nfrom sklearn import datasets\nfrom IMLearn.metrics import mean_square_error\nfrom IMLearn.utils import split_train_test\nfrom IMLearn.model_selection import cross_validate\nfrom IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression\nfrom sklearn.linear_model import Lasso\n\nfrom utils import *\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\n\ndef select_polynomial_degree(n_samples: int = 100, noise: float = 5):\n \"\"\"\n Simulate data from a polynomial model and use cross-validation to select the best fitting degree\n\n Parameters\n ----------\n n_samples: int, default=100\n Number of samples to generate\n\n noise: float, default = 5\n Noise level to simulate in responses\n \"\"\"\n # Question 1 - Generate dataset for model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) + eps for eps Gaussian noise\n # and split into training- and testing portions\n mu = 0\n eps = np.random.normal(mu, noise, n_samples)\n f_x = lambda x: (x + 3) * (x + 2) * (x + 1) * (x - 1) * (x - 2)\n X = np.linspace(-1.2, 2, n_samples)\n y_without_noise = f_x(X)\n y_with_noise = y_without_noise + eps\n train_X, train_y, test_X, test_y = split_train_test(pd.DataFrame(X), pd.DataFrame(y_with_noise), 2 / 3)\n fig = go.Figure()\n fig.add_trace(go.Scatter(x=X, y=y_without_noise, mode=\"markers\", name=\"Polynom\"))\n fig.add_trace(go.Scatter(x=train_X[0], y=train_y[0], mode=\"markers\", marker=dict(color=\"Red\",\n colorscale=[custom[0], custom[-1]]), name=\"Train Set\"))\n fig.add_trace(go.Scatter(x=test_X[0], y=test_y[0], mode=\"markers\", marker=dict(color=\"Green\",\n colorscale=[custom[0], custom[-1]]), name=\"Test Set\"))\n fig.update_layout(title=\"Training and Validation score as a function of polynomial degree.\"\n f\" Noise={noise}, Number of samples={n_samples}\",\n xaxis_title=\"Polynomial Degree\", yaxis_title=\"Score\")\n fig.show()\n\n # Question 2 - Perform CV for polynomial fitting with degrees 0,1,...,10\n train_scores = []\n validation_scores = []\n temp_train_X = np.array(train_X).flatten()\n temp_train_y = np.array(train_y).flatten()\n for i in range(11):\n train_score, validation_score = cross_validate(PolynomialFitting(i), temp_train_X,\n temp_train_y, mean_square_error)\n train_scores.append(train_score)\n validation_scores.append(validation_score)\n fig2 = go.Figure()\n fig2.add_trace(go.Scatter(x=[i for i in range(11)], y=train_scores, name=\"Train Scores\"))\n fig2.add_trace(go.Scatter(x=[i for i in range(11)], y=validation_scores, name=\"Validation Scores\"))\n fig2.update_layout(title=\"Average Training and Validation error as a function of polynomial degree.\"\n f\" Noise={noise}, Number of samples={n_samples}\",\n xaxis_title=\"Polynomial Degree\", yaxis_title=\"Average error\")\n fig2.show()\n\n # Question 3 - Using best value of k, fit a k-degree polynomial model and report test error\n k_star = np.argmin(validation_scores)\n polynomial_fitting = PolynomialFitting(k_star)\n polynomial_fitting.fit(np.array(train_X), np.array(train_y))\n pred = polynomial_fitting.predict(np.array(test_X))\n print(\"best polynomial degree: \", k_star)\n print(\"The test error: \", np.round(mean_square_error(np.array(test_y), pred), 2))\n\n\ndef select_regularization_parameter(n_samples: int = 50, n_evaluations: int = 500):\n \"\"\"\n Using sklearn's diabetes dataset use cross-validation to select the best fitting regularization parameter\n values for Ridge and Lasso regressions\n\n Parameters\n ----------\n n_samples: int, default=50\n Number of samples to generate\n\n n_evaluations: int, default = 500\n Number of regularization parameter values to evaluate for each of the algorithms\n \"\"\"\n # Question 6 - Load diabetes dataset and split into training and testing portions\n X, y = datasets.load_diabetes(return_X_y=True)\n train_X, train_y, test_X, test_y = X[:n_samples], y[:n_samples], X[n_samples:], y[n_samples:]\n\n # Question 7 - Perform CV for different values of the regularization parameter for Ridge and Lasso regressions\n lambdas = np.linspace(0.01, 2, n_evaluations)\n ridge_train_scores = []\n ridge_validation_scores = []\n lasso_train_scores = []\n lasso_validation_scores = []\n for lam in lambdas:\n train_score, validation_score = cross_validate(RidgeRegression(lam), train_X,\n train_y, mean_square_error)\n ridge_train_scores.append(train_score)\n ridge_validation_scores.append(validation_score)\n train_score, validation_score = cross_validate(Lasso(lam), train_X,\n train_y, mean_square_error)\n lasso_train_scores.append(train_score)\n lasso_validation_scores.append(validation_score)\n fig = go.Figure()\n fig.add_trace(go.Scatter(x=lambdas, y=ridge_train_scores, marker=dict(color=\"Red\",\n colorscale=[custom[0], custom[-1]]), name=\"Ridge Train Set\"))\n fig.add_trace(go.Scatter(x=lambdas, y=ridge_validation_scores, marker=dict(color=\"Blue\",\n colorscale=[custom[0], custom[-1]]), name=\"Ridge Validation Set\"))\n fig.add_trace(go.Scatter(x=lambdas, y=lasso_train_scores, marker=dict(color=\"Purple\",\n colorscale=[custom[0], custom[-1]]), name=\"Lasso Train Set\"))\n fig.add_trace(go.Scatter(x=lambdas, y=lasso_validation_scores, marker=dict(color=\"Green\",\n colorscale=[custom[0], custom[-1]]), name=\"Lasso Validation Set\"))\n fig.update_layout(title=\"Average Training and Validation errors as a function of the \"\n \"regularization parameter lambda\", xaxis_title=\"Regularization Parameter Value\",\n yaxis_title=\"Average error\")\n fig.show()\n\n # Question 8 - Compare best Ridge model, best Lasso model and Least Squares model\n ridge_min_ind = np.argmin(np.array(ridge_validation_scores))\n lasso_min_ind = np.argmin(np.array(lasso_validation_scores))\n lam_ridge_min = lambdas[ridge_min_ind]\n print(\"Ridge lambda: \", lam_ridge_min)\n lam_lasso_min = lambdas[lasso_min_ind]\n print(\"Lasso lambda: \", lam_lasso_min)\n ridge_estimator = RidgeRegression(lam_ridge_min)\n lasso_estimator = Lasso(lam_lasso_min)\n linear_regression_estimator = LinearRegression()\n ridge_estimator.fit(train_X, train_y)\n lasso_estimator.fit(train_X, train_y)\n linear_regression_estimator.fit(train_X, train_y)\n ridge_pred = ridge_estimator.predict(test_X)\n lasso_pred = lasso_estimator.predict(test_X)\n linear_regression_pred = linear_regression_estimator.predict(test_X)\n print(\"Ridge test error with best lambda: \", np.round(mean_square_error(np.array(test_y), ridge_pred), 2))\n print(\"Lasso test error with best lambda: \", np.round(mean_square_error(np.array(test_y), lasso_pred), 2))\n print(\"Linear Regression test error with best lambda: \", np.round(mean_square_error(np.array(test_y),\n linear_regression_pred), 2))\n\n\nif __name__ == '__main__':\n np.random.seed(0)\n select_polynomial_degree()\n select_polynomial_degree(noise=0)\n select_polynomial_degree(n_samples=1500, noise=10)\n select_regularization_parameter()\n" ]
[ [ "numpy.linspace", "numpy.random.seed", "sklearn.datasets.load_diabetes", "pandas.DataFrame", "sklearn.linear_model.Lasso", "numpy.random.normal", "numpy.argmin", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
oywtece/dstn
[ "5936811f81d419db82191a5939ee347f6d3359ed" ]
[ "dstn_self_att.py" ]
[ "import numpy as np\nimport tensorflow as tf\nimport datetime\nimport ctr_funcs as func\nimport config_dstn as cfg\nimport os\nimport shutil\n\n# config\nstr_txt = cfg.output_file_name\nbase_path = './tmp'\nmodel_saving_addr = base_path + '/dstn_s_' + str_txt + '/'\noutput_file_name = base_path + '/dstn_s_' + str_txt + '.txt'\nnum_csv_col = cfg.num_csv_col\ntrain_file_name = cfg.train_file_name\nval_file_name = cfg.val_file_name\ntest_file_name = cfg.test_file_name\nbatch_size = cfg.batch_size\nn_ft = cfg.n_ft\nk = cfg.k\neta = cfg.eta\nkp_prob = cfg.kp_prob\nn_epoch = cfg.n_epoch\nmax_num_lower_ct = cfg.max_num_lower_ct\nrecord_step_size = cfg.record_step_size\nlayer_dim = cfg.layer_dim\nopt_alg = cfg.opt_alg\nn_one_hot_slot = cfg.n_one_hot_slot\nn_mul_hot_slot = cfg.n_mul_hot_slot\nnum_aux_type = cfg.num_aux_type\nn_one_hot_slot_aux = cfg.n_one_hot_slot_aux\nn_mul_hot_slot_aux = cfg.n_mul_hot_slot_aux\nmax_len_per_slot_aux = cfg.max_len_per_slot_aux\nnum_aux_inst_in_data = cfg.num_aux_inst_in_data\nmax_num_aux_inst_used = cfg.max_num_aux_inst_used\nmax_len_per_slot = cfg.max_len_per_slot\natt_hidden_dim = cfg.att_hidden_dim\n\nlabel_col_idx = 0\nrecord_defaults = [[0]]*num_csv_col\nrecord_defaults[0] = [0.0]\ntotal_num_ft_col = num_csv_col - 1\n\n# create dir\nif not os.path.exists(base_path):\n os.mkdir(base_path)\n\n# remove dir\nif os.path.isdir(model_saving_addr):\n shutil.rmtree(model_saving_addr)\n\n###########################################################\n###########################################################\nprint('Loading data start!')\ntf.set_random_seed(123)\n\n# load training data\ntrain_ft, train_label = func.tf_input_pipeline(train_file_name, batch_size, n_epoch, label_col_idx, record_defaults)\n\n# load val data\nn_val_inst = func.count_lines(val_file_name[0])\nval_ft, val_label = func.tf_input_pipeline(val_file_name, n_val_inst, 1, label_col_idx, record_defaults)\nn_val_batch = n_val_inst//batch_size\n\n# load test data\ntest_ft, test_label = func.tf_input_pipeline_test(test_file_name, batch_size, 1, label_col_idx, record_defaults)\nprint('Loading data done!')\n\n########################################################################\ndef partition_input(x_input):\n # generate idx_list\n len_list = []\n len_list.append(n_one_hot_slot)\n len_list.append(n_mul_hot_slot*max_len_per_slot)\n \n for i in range(num_aux_type):\n len_list.append(n_one_hot_slot_aux[i]*num_aux_inst_in_data[i])\n len_list.append(n_mul_hot_slot_aux[i]*max_len_per_slot_aux[i]*num_aux_inst_in_data[i])\n \n len_list = np.array(len_list)\n idx_list = np.cumsum(len_list)\n\n # shape=[None, n_one_hot_slot]\n x_input_one_hot = x_input[:, 0:idx_list[0]]\n x_input_mul_hot = x_input[:, idx_list[0]:idx_list[1]]\n # shape=[None, n_mul_hot_slot, max_len_per_slot]\n x_input_mul_hot = tf.reshape(x_input_mul_hot, (-1, n_mul_hot_slot, max_len_per_slot))\n \n # aux\n x_input_one_hot_aux = {}\n x_input_mul_hot_aux = {}\n for i in range(num_aux_type):\n # take out\n temp_1 = x_input[:, idx_list[2*i+1]:idx_list[2*i+2]]\n # reshape\n temp_1 = tf.reshape(temp_1, (-1, num_aux_inst_in_data[i], n_one_hot_slot_aux[i]))\n # shape=[None, max_num_ctxt, n_one_hot_slot]\n x_input_one_hot_aux[i] = temp_1[:, 0:max_num_aux_inst_used[i], :]\n # take out\n temp_2 = x_input[:, idx_list[2*i+2]:idx_list[2*i+3]]\n temp_2 = tf.reshape(temp_2, (-1, num_aux_inst_in_data[i], n_mul_hot_slot_aux[i], \\\n max_len_per_slot_aux[i]))\n # shape=[None, max_num_ctxt, n_mul_hot_slot, max_len_per_slot]\n x_input_mul_hot_aux[i] = temp_2[:, 0:max_num_aux_inst_used[i], :, :]\n \n return x_input_one_hot, x_input_mul_hot, x_input_one_hot_aux, x_input_mul_hot_aux\n\n# add mask\ndef get_masked_one_hot(x_input_one_hot):\n data_mask = tf.cast(tf.greater(x_input_one_hot, 0), tf.float32)\n data_mask = tf.expand_dims(data_mask, axis = 2)\n data_mask = tf.tile(data_mask, (1,1,k))\n # output: (?, n_one_hot_slot, k)\n data_embed_one_hot = tf.nn.embedding_lookup(emb_mat, x_input_one_hot)\n data_embed_one_hot_masked = tf.multiply(data_embed_one_hot, data_mask)\n return data_embed_one_hot_masked\n\ndef get_masked_mul_hot(x_input_mul_hot):\n data_mask = tf.cast(tf.greater(x_input_mul_hot, 0), tf.float32)\n data_mask = tf.expand_dims(data_mask, axis = 3)\n data_mask = tf.tile(data_mask, (1,1,1,k))\n # output: (?, n_mul_hot_slot, max_len_per_slot, k)\n data_embed_mul_hot = tf.nn.embedding_lookup(emb_mat, x_input_mul_hot)\n data_embed_mul_hot_masked = tf.multiply(data_embed_mul_hot, data_mask)\n return data_embed_mul_hot_masked\n\ndef get_masked_one_hot_aux(x_input_one_hot_ctxt):\n data_mask = tf.cast(tf.greater(x_input_one_hot_ctxt, 0), tf.float32)\n data_mask = tf.expand_dims(data_mask, axis = 3)\n data_mask = tf.tile(data_mask, (1,1,1,k))\n # output: (?, max_num_ctxt, n_one_hot_slot, k)\n data_embed_one_hot = tf.nn.embedding_lookup(emb_mat, x_input_one_hot_ctxt)\n data_embed_one_hot_masked = tf.multiply(data_embed_one_hot, data_mask)\n return data_embed_one_hot_masked\n\ndef get_masked_mul_hot_aux(x_input_mul_hot_ctxt):\n data_mask = tf.cast(tf.greater(x_input_mul_hot_ctxt, 0), tf.float32)\n data_mask = tf.expand_dims(data_mask, axis = 4)\n data_mask = tf.tile(data_mask, (1,1,1,1,k))\n # output: (?, n_mul_hot_slot, max_len_per_slot, k)\n data_embed_mul_hot = tf.nn.embedding_lookup(emb_mat, x_input_mul_hot_ctxt)\n data_embed_mul_hot_masked = tf.multiply(data_embed_mul_hot, data_mask)\n return data_embed_mul_hot_masked\n\ndef prepare_input_embed(x_input_one_hot, x_input_mul_hot):\n # output: (?, n_one_hot_slot, k)\n data_embed_one_hot = get_masked_one_hot(x_input_one_hot)\n # reshape (flatten)\n data_embed_one_hot = tf.reshape(data_embed_one_hot, [-1, n_one_hot_slot*k]) \n # output: (?, n_mul_hot_slot, max_len_per_slot, k)\n data_embed_mul_hot = get_masked_mul_hot(x_input_mul_hot)\n # pooling for mul hot fts\n # sum pooling; no keep dims -> none, n_mul_hot_slot, k\n # dim 2 -- max_len_per_slot, i.e., over mul fts in one slot\n data_embed_mul_hot_pooling = tf.reduce_sum(data_embed_mul_hot, 2)\n data_embed_mul_hot_pooling = tf.reshape(data_embed_mul_hot_pooling, [-1, n_mul_hot_slot*k])\n # concatenate (col-wise; keep num of rows unchanged)\n data_embed_ori = tf.concat([data_embed_one_hot, data_embed_mul_hot_pooling], 1)\n return data_embed_ori\n\n##################################\n# should keep max_num_ctxt dim\ndef prepare_input_embed_aux_interaction(x_input_one_hot_ctxt, x_input_mul_hot_ctxt, \\\n max_num_ctxt, cur_n_one_hot_slot, cur_n_mul_hot_slot):\n # output: (?, max_num_ctxt, n_one_hot_slot, k)\n data_embed_one_hot_ctxt = get_masked_one_hot_aux(x_input_one_hot_ctxt)\n # output: (?, max_num_ctxt, n_mul_hot_slot, max_len_per_slot, k)\n data_embed_mul_hot_ctxt = get_masked_mul_hot_aux(x_input_mul_hot_ctxt)\n # if max_num_ctxt = 1, then this dim will be automatically collapsed\n data_embed_mul_hot_pooling_ctxt = tf.reduce_sum(data_embed_mul_hot_ctxt, 3) \n data_embed_one_hot_ctxt = tf.reshape(data_embed_one_hot_ctxt, \\\n [-1, max_num_ctxt, cur_n_one_hot_slot*k])\n data_embed_mul_hot_pooling_ctxt = tf.reshape(data_embed_mul_hot_pooling_ctxt, \\\n [-1, max_num_ctxt, cur_n_mul_hot_slot*k])\n # output dim: none * max_num_ctxt * (n_one_hot_slot + n_mul_hot_slot)k\n data_embed_ctxt = tf.concat([data_embed_one_hot_ctxt, data_embed_mul_hot_pooling_ctxt], 2)\n return data_embed_ctxt\n\n########################################\ndef get_wgt_sum_embed_aux(data_embed_ctxt, W1_ctxt, b1_ctxt, W2_ctxt, b2_ctxt, \\\n max_num_ctxt, total_embed_dim_ctxt):\n # dim: (none*max_num_ctxt) * total_embed_dim\n data_reshape = tf.reshape(data_embed_ctxt, [-1, total_embed_dim_ctxt])\n # dim: (none*max_num_ctxt) * att_hidden_dim\n hidden = tf.matmul(data_reshape, W1_ctxt) + b1_ctxt\n hidden = tf.nn.relu(hidden)\n hidden = tf.nn.dropout(hidden, keep_prob)\n # dim: (none*max_num_ctxt)* 1\n wgt_ctxt = tf.matmul(hidden, W2_ctxt) + b2_ctxt\n # dim: none * max_num_ctxt * 1\n wgt_ctxt = tf.reshape(wgt_ctxt, [-1, max_num_ctxt, 1])\n # nlz over different max_num_ctxt\n nlz_wgt_ctxt = tf.nn.softmax(wgt_ctxt, dim=1)\n # dim: none * max_num_ctxt * total_embed_dim\n temp = nlz_wgt_ctxt * data_embed_ctxt\n # sum over dim max_num_ctxt\n # dim: none * total_embed_dim (same dim as data_embed_ori)\n output = tf.reduce_sum(temp, 1)\n return output\n\n###########################################################\n# input for DNN (embedding ids)\nx_input = tf.placeholder(tf.int32, shape=[None, total_num_ft_col])\n\nx_input_one_hot, x_input_mul_hot, x_input_one_hot_aux, x_input_mul_hot_aux \\\n = partition_input(x_input)\n\n# target vect\ny_target = tf.placeholder(tf.float32, shape=[None, 1])\n# dropout keep prob\nkeep_prob = tf.placeholder(tf.float32)\n\n# emb_mat dim add 1 -> for padding (idx = 0)\nwith tf.device('/cpu:0'):\n emb_mat = tf.Variable(tf.random_normal([n_ft + 1, k], stddev=0.01))\n\n# attention weight\nW1_list = {}; b1_list = {}; W2_list = {}; b2_list = {}\ntotal_embed_dim = {}\nfor i in range(num_aux_type):\n total_embed_dim[i] = k*(n_one_hot_slot_aux[i] + n_mul_hot_slot_aux[i])\n std_a = np.sqrt(2.0/(total_embed_dim[i]+att_hidden_dim))\n std_b = np.sqrt(2.0/att_hidden_dim)\n W1_list[i] = tf.Variable(tf.random_normal([total_embed_dim[i], att_hidden_dim], \\\n stddev=std_a))\n b1_list[i] = tf.Variable(tf.random_normal([att_hidden_dim], stddev=std_b))\n W2_list[i] = tf.Variable(tf.random_normal([att_hidden_dim, 1], stddev=std_b))\n b2_list[i] = tf.Variable(tf.random_normal([1], stddev=0.01))\n\n####### DNN part: ori ########\ndata_embed_ori = prepare_input_embed(x_input_one_hot, x_input_mul_hot)\n\n# ####### DNN part: ctxt, clk, non_clk ########\ndata_embed_aux = {}\nwgt_sum_embed_aux = {}\nfor i in range(num_aux_type):\n data_embed_aux[i] = prepare_input_embed_aux_interaction(x_input_one_hot_aux[i], \\\n x_input_mul_hot_aux[i], max_num_aux_inst_used[i], \\\n n_one_hot_slot_aux[i], n_mul_hot_slot_aux[i])\n wgt_sum_embed_aux[i] = get_wgt_sum_embed_aux(data_embed_aux[i], \\\n W1_list[i], b1_list[i], W2_list[i], b2_list[i], \\\n max_num_aux_inst_used[i], total_embed_dim[i])\n\n# ################################\n# big concatenation\ndata_embed = tf.concat([data_embed_ori, wgt_sum_embed_aux[0]], 1)\nfor i in range(1, len(data_embed_aux)):\n data_embed = tf.concat([data_embed, wgt_sum_embed_aux[i]], 1)\n\n################################\n# include output layer\nn_layer = len(layer_dim)\n \ncur_layer = data_embed\n \ndata_embed_shape = data_embed.get_shape().as_list()\nin_dim = data_embed_shape[1]\n# loop to create DNN struct\nfor i in range(0, n_layer):\n out_dim = layer_dim[i]\n weight = tf.Variable(tf.random_normal(shape=[in_dim, out_dim], stddev=np.sqrt(2.0/(in_dim+out_dim))))\n bias = tf.Variable(tf.constant(0.0, shape=[out_dim]))\n # output layer, linear activation\n if i == n_layer - 1:\n cur_layer = tf.matmul(cur_layer, weight) + bias\n else:\n cur_layer = tf.nn.relu(tf.matmul(cur_layer, weight) + bias)\n cur_layer = tf.nn.dropout(cur_layer, keep_prob)\n in_dim = layer_dim[i]\n \ny_hat = cur_layer\n \n# log loss\nloss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y_hat, labels=y_target))\npred_score = tf.sigmoid(y_hat)\n\nif opt_alg == 'Adam':\n optimizer = tf.train.AdamOptimizer(eta).minimize(loss)\nelse:\n # default\n optimizer = tf.train.AdagradOptimizer(eta).minimize(loss)\n\n########################################\n# Launch the graph.\nconfig = tf.ConfigProto(log_device_placement=False)\nconfig.gpu_options.allow_growth = True\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.3\n\nwith tf.Session(config=config) as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess, coord)\n\n func.print_time()\n print('Load val data')\n\n # load val data\n val_ft_inst, val_label_inst = sess.run([val_ft, val_label])\n print('Done loading eval data')\n\n # Add ops to save and restore all the variables\n saver = tf.train.Saver() \n train_loss_list = []\n val_avg_auc_list = []\n epoch_list = []\n best_n_round = 0\n best_val_avg_auc = 0\n early_stop_flag = 0\n lower_ct = 0 \n\n func.print_time()\n print('Start train loop')\n \n epoch = -1\n try:\n while not coord.should_stop(): \n epoch += 1 \n train_ft_inst, train_label_inst = sess.run([train_ft, train_label])\n train_label_inst = np.transpose([train_label_inst]) \n \n sess.run(optimizer, feed_dict={x_input:train_ft_inst, \\\n y_target:train_label_inst, keep_prob:kp_prob})\n \n # record loss and accuracy every step_size generations\n if (epoch+1)%record_step_size == 0:\n epoch_list.append(epoch)\n train_loss_temp = sess.run(loss, feed_dict={ \\\n x_input:train_ft_inst, \\\n y_target:train_label_inst, keep_prob:1})\n train_loss_list.append(train_loss_temp)\n \n val_pred_score_all = []\n val_label_all = []\n \n for iii in range(n_val_batch):\n # get batch\n start_idx = iii*batch_size\n end_idx = (iii+1)*batch_size\n cur_val_ft = val_ft_inst[start_idx: end_idx]\n cur_val_label = val_label_inst[start_idx: end_idx]\n # pred score\n cur_val_pred_score = sess.run(pred_score, feed_dict={ \\\n x_input:cur_val_ft, keep_prob:1})\n val_pred_score_all.append(cur_val_pred_score.flatten())\n val_label_all.append(cur_val_label) \n \n # calculate auc\n val_pred_score_re = func.list_flatten(val_pred_score_all)\n val_label_re = func.list_flatten(val_label_all)\n val_auc_temp, _, _ = func.cal_auc(val_pred_score_re, val_label_re)\n # record all val results \n val_avg_auc_list.append(val_auc_temp)\n \n # record best and save models\n if val_auc_temp > best_val_avg_auc:\n best_val_avg_auc = val_auc_temp\n best_n_round = epoch\n # Save the variables to disk\n save_path = saver.save(sess, model_saving_addr)\n print(\"Model saved in file: %s\" % save_path)\n # count of consecutive lower\n if val_auc_temp < best_val_avg_auc:\n lower_ct += 1\n # once higher or equal, set to 0\n else:\n lower_ct = 0\n \n if lower_ct >= max_num_lower_ct:\n early_stop_flag = 1\n \n auc_and_loss = [epoch+1, train_loss_temp, val_auc_temp]\n auc_and_loss = [np.round(xx,4) for xx in auc_and_loss]\n func.print_time() \n print('Generation # {}. Train Loss: {:.4f}. Val Avg AUC: {:.4f}.'\\\n .format(*auc_and_loss))\n \n if early_stop_flag == 1:\n break\n \n except tf.errors.OutOfRangeError:\n print('Done training -- epoch limit reached')\n \n # after training\n saver.restore(sess, model_saving_addr)\n print(\"Model restored.\")\n \n # load test data\n test_pred_score_all = []\n test_label_all = []\n test_loss_all = []\n try:\n while True:\n test_ft_inst, test_label_inst = sess.run([test_ft, test_label])\n cur_test_pred_score = sess.run(pred_score, feed_dict={ \\\n x_input:test_ft_inst, keep_prob:1})\n test_pred_score_all.append(cur_test_pred_score.flatten())\n test_label_all.append(test_label_inst)\n \n cur_test_loss = sess.run(loss, feed_dict={ \\\n x_input:test_ft_inst, \\\n y_target: np.transpose([test_label_inst]), keep_prob:1})\n test_loss_all.append(cur_test_loss)\n\n except tf.errors.OutOfRangeError:\n print('Done loading testing data -- epoch limit reached') \n finally:\n coord.request_stop()\n \n coord.join(threads) \n \n # calculate auc\n test_pred_score_re = func.list_flatten(test_pred_score_all)\n test_label_re = func.list_flatten(test_label_all)\n test_auc, _, _ = func.cal_auc(test_pred_score_re, test_label_re)\n test_rmse = func.cal_rmse(test_pred_score_re, test_label_re)\n test_loss = np.mean(test_loss_all)\n \n # rounding\n test_auc = np.round(test_auc, 4)\n test_rmse = np.round(test_rmse, 4)\n test_loss = np.round(test_loss, 5)\n train_loss_list = [np.round(xx,4) for xx in train_loss_list]\n val_avg_auc_list = [np.round(xx,4) for xx in val_avg_auc_list]\n \n print('test_auc = ', test_auc)\n print('test_rmse =', test_rmse)\n print('test_loss =', test_loss)\n print('train_loss_list =', train_loss_list)\n print('val_avg_auc_list =', val_avg_auc_list)\n \n # write output to file\n with open(output_file_name, 'a') as f:\n now = datetime.datetime.now()\n time_str = now.strftime(cfg.time_style)\n f.write(time_str + '\\n')\n f.write('train_file_name = ' + train_file_name[0] + '\\n')\n f.write('learning_rate = ' + str(eta) + ', n_epoch = ' + str(n_epoch) \\\n + ', emb_dize = ' + str(k) + '\\n')\n f.write('test_auc = ' + str(test_auc) + '\\n')\n f.write('test_rmse = ' + str(test_rmse) + '\\n')\n f.write('test_loss = ' + str(test_loss) + '\\n')\n f.write('train_loss_list =' + str(train_loss_list) + '\\n')\n f.write('val_avg_auc_list =' + str(val_avg_auc_list) + '\\n')\n f.write('-'*50 + '\\n')\n\n" ]
[ [ "tensorflow.device", "tensorflow.concat", "numpy.sqrt", "tensorflow.reduce_sum", "numpy.cumsum", "tensorflow.nn.sigmoid_cross_entropy_with_logits", "numpy.round", "numpy.mean", "tensorflow.train.AdamOptimizer", "tensorflow.greater", "tensorflow.ConfigProto", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.tile", "tensorflow.nn.dropout", "tensorflow.matmul", "tensorflow.train.AdagradOptimizer", "tensorflow.train.Coordinator", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "numpy.transpose", "tensorflow.set_random_seed", "numpy.array", "tensorflow.nn.embedding_lookup", "tensorflow.nn.relu", "tensorflow.multiply", "tensorflow.nn.softmax", "tensorflow.constant", "tensorflow.local_variables_initializer", "tensorflow.train.start_queue_runners", "tensorflow.reshape", "tensorflow.sigmoid", "tensorflow.expand_dims", "tensorflow.random_normal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
MingchengZuo/fast-cma-es
[ "ada34f50b93d52493d768ad67addaf915f9e0d2f", "ada34f50b93d52493d768ad67addaf915f9e0d2f" ]
[ "fcmaes/pygmoretry.py", "examples/spring.py" ]
[ "# Copyright (c) Dietmar Wolz.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory.\n\nimport math\nimport os\nimport sys\nfrom numpy.random import Generator, MT19937, SeedSequence\nfrom scipy.optimize import OptimizeResult, Bounds\nimport multiprocessing as mp\nfrom multiprocessing import Process\nfrom fcmaes.retry import Store\nfrom fcmaes.optimizer import logger\n\nos.environ['MKL_DEBUG_CPU_TYPE'] = '5'\nos.environ['MKL_NUM_THREADS'] = '1'\nos.environ['OPENBLAS_NUM_THREADS'] = '1'\n\ndef minimize(prob, \n algo,\n value_limit = math.inf,\n num_retries = 100*mp.cpu_count(),\n logger = None,\n workers = mp.cpu_count(),\n popsize = 1, \n ): \n \"\"\"Minimization of a scalar function of one or more variables using parallel retry.\n Similar to fcmaes.retry but works with pygmo / pagmo problems + algorithms.\n For problems with equality/inequality contraints or multiple objectives fcmaes.retry cannot \n be used since a fcmaes objective function is expected to return a single value. \n pygmo / pagmo support both contraints and multiple objectives. Alternatively\n you can use https://esa.github.io/pygmo2/archipelago.html but it is a bit tricky\n to configure it to use multiprocessing. As default it uses multithreading which means\n it scales less with the number of available processor cores. \n \n Parameters\n ----------\n prob : pygmo/pagmo problem, https://esa.github.io/pagmo2/docs/cpp/problem.html\n The objective function to be minimized.\n algo : pygmo/pagmo algorithm, https://esa.github.io/pagmo2/docs/cpp/algorithm.html\n The optimizer\n value_limit : float, optional\n Upper limit for optimized function values to be stored. \n num_retries : int, optional\n Number of optimization retries. \n logger : logger, optional\n logger for log output of the retry mechanism. If None, logging\n is switched off. Default is a logger which logs both to stdout and\n appends to a file ``optimizer.log``.\n workers : int, optional\n number of parallel processes used. Default is mp.cpu_count()\n popsize = int, optional\n population size \n \n Returns\n -------\n res : scipy.OptimizeResult\n The optimization result is represented as an ``OptimizeResult`` object.\n Important attributes are: ``x`` the solution array, \n ``fun`` the best function value, ``nfev`` the number of function evaluations,\n ``success`` a Boolean flag indicating if the optimizer exited successfully. \"\"\"\n\n lb, ub = prob.get_bounds()\n bounds = Bounds(lb, ub)\n store = Store(bounds, logger = logger)\n return retry(store, prob, algo, num_retries, value_limit, popsize, workers)\n \ndef retry(store, prob, algo, num_retries, value_limit = math.inf, popsize=1, workers=mp.cpu_count()):\n try:\n import pygmo as pg\n except ImportError as e:\n raise ImportError(\"Please install PYGMO (pip install pygmo) to use PAGMO optimizers\") from e\n sg = SeedSequence()\n rgs = [Generator(MT19937(s)) for s in sg.spawn(workers)]\n proc=[Process(target=_retry_loop,\n args=(pid, rgs, store, prob, algo, num_retries, value_limit, popsize, pg)) for pid in range(workers)]\n [p.start() for p in proc]\n [p.join() for p in proc]\n store.sort()\n store.dump()\n return OptimizeResult(x=store.get_x_best(), fun=store.get_y_best(), \n nfev=store.get_count_evals(), success=True)\n \ndef _retry_loop(pid, rgs, store, prob, algo, num_retries, value_limit, popsize, pg):\n\n #reinitialize logging config for windows - multi threading fix\n if 'win' in sys.platform and not store.logger is None:\n store.logger = logger()\n \n while store.get_runs_compare_incr(num_retries): \n try: \n seed = int(rgs[pid].uniform(0, 2**32 - 1))\n pop = pg.population(prob, popsize, seed=seed)\n pop = algo.evolve(pop)\n except Exception:\n pass # ignore \"Maximum number of iteration reached\" \n sol = pop.champion_x\n y = pop.champion_f\n evals = pop.problem.get_fevals()\n \n feasible = prob.feasibility_x(pop.champion_x)\n if feasible:\n store.add_result(y[0], sol, evals, value_limit)\n store.dump()\n", "# Copyright (c) Dietmar Wolz.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory.\n\n# This problem consists of minimizing the weight of a tension/compression spring subject to constraints on \n# shear stress, surge frequency and minimum deflection\n# The design variables are:\n\n# - x1: the mean coil diameter\n# - x2: the wire diameter\n# - x3: the number of active coils\n\n# which are subject to four inequality constraints.\n# fcmaes retry used with a penalty for constraint violations 'weight_penalty'\n# works as well as scipy minimize.\n\n# This example is taken from https://www.sciencedirect.com/science/article/abs/pii/S0096300306015098\n\nimport math\nimport time\nimport numpy as np\nfrom scipy.optimize import Bounds, minimize\nfrom fcmaes.optimizer import dtime, random_x, logger\nfrom fcmaes import retry, advretry\n\nbounds = Bounds([0.01, 0.01, 0.01], [20, 20, 20])\n\ndef feasible(x):\n x = np.array(x)\n return np.maximum(np.minimum(x, bounds.ub), bounds.lb)\n\ndef constraint_ineq(x):\n return [ x[1]**3 * x[2] / (71785 * x[0]**4) - 1,\n 1 - (4*x[1]**2 - x[0]*x[1]) / (12566*(x[1]*x[0]**3 - x[0]**4)) - 1/(5108*x[0]**2),\n 140.45*x[0]/(x[1]**2 * x[2]) - 1,\n 1 - (x[1] + x[0]) / 1.5]\n\ndef penalty(x):\n return - np.sum(np.minimum(constraint_ineq(x), 0))\n\ndef weight(x): \n return (x[2] + 2) * x[1]*x[0]**2\n\ndef weight_penalty(x): \n try:\n val = weight(x) \n return val + 100000*penalty(x)\n except ZeroDivisionError:\n return 1E99\n \ndef print_result(ret, best, t0, i):\n x = feasible(ret.x) # make sure result is feasible\n w = weight(x)\n val = weight_penalty(x) # add penalty for ineq constraint violation\n if val < best:\n pen = penalty(x) # check ineq constraint\n best = min(val, best)\n print(\"{0}: time = {1:.1f} best = {2:.8f} f(xmin) = {3:.8f} ineq = {4:.5f}\"\n .format(i+1, dtime(t0), best, w, pen))\n return best\n\ndef test_minimize_SLSQP(fun, num):\n ineq_cons = {'type': 'ineq', 'fun' : constraint_ineq}\n\n best = math.inf\n t0 = time.perf_counter();\n for i in range(num):\n guess = random_x(bounds.lb, bounds.ub)\n ret = minimize(fun, x0 = guess, bounds = bounds,\n method='SLSQP', constraints=[ineq_cons])\n if ret.success:\n best = print_result(ret, best, t0, i)\n\nif __name__ == '__main__':\n \n # try as alternative \n # test_minimize_SLSQP(weight, 10000)\n \n t0 = time.perf_counter();\n ret = advretry.minimize(weight_penalty, bounds, logger = logger(), num_retries = 320)\n #ret = retry.minimize(weight_penalty, bounds, logger = logger(), num_retries=32)\n print_result(ret, 10000, t0, 0)\n" ]
[ [ "numpy.random.MT19937", "scipy.optimize.Bounds", "numpy.random.SeedSequence" ], [ "scipy.optimize.Bounds", "numpy.array", "numpy.minimum", "scipy.optimize.minimize" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "1.9", "1.5", "1.2", "1.7", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "1.3", "1.9", "1.5", "1.7", "1.2", "1.8" ], "tensorflow": [] } ]
aseyboldt/arviz
[ "1fb40ff442f5ba4b8d11ceeaef27e6c339eb1685" ]
[ "arviz/plots/backends/bokeh/forestplot.py" ]
[ "# pylint: disable=all\n\"\"\"Bokeh forestplot.\"\"\"\nfrom collections import defaultdict, OrderedDict\nfrom itertools import cycle, tee\n\nimport bokeh.plotting as bkp\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom bokeh.layouts import gridplot\nfrom bokeh.models import Band, ColumnDataSource\nfrom bokeh.models.annotations import Title\nfrom bokeh.models.tickers import FixedTicker\n\nfrom . import backend_kwarg_defaults, backend_show\nfrom ...plot_utils import _scale_fig_size, xarray_var_iter, make_label, get_bins, _fast_kde\nfrom ....rcparams import rcParams\nfrom ....stats import hpd\nfrom ....stats.diagnostics import _ess, _rhat\nfrom ....stats.stats_utils import histogram\nfrom ....utils import conditional_jit\n\n\ndef pairwise(iterable):\n \"\"\"From itertools cookbook. [a, b, c, ...] -> (a, b), (b, c), ...\"\"\"\n first, second = tee(iterable)\n next(second, None)\n return zip(first, second)\n\n\ndef plot_forest(\n ax,\n datasets,\n var_names,\n model_names,\n combined,\n colors,\n figsize,\n width_ratios,\n linewidth,\n markersize,\n kind,\n ncols,\n credible_interval,\n quartiles,\n rope,\n ridgeplot_overlap,\n ridgeplot_alpha,\n ridgeplot_kind,\n textsize,\n ess,\n r_hat,\n backend_kwargs,\n show,\n):\n \"\"\"Bokeh forest plot.\"\"\"\n plot_handler = PlotHandler(\n datasets, var_names=var_names, model_names=model_names, combined=combined, colors=colors\n )\n\n if figsize is None:\n figsize = (min(12, sum(width_ratios) * 2), plot_handler.fig_height())\n\n (figsize, _, _, _, auto_linewidth, auto_markersize) = _scale_fig_size(figsize, textsize, 1.1, 1)\n\n if linewidth is None:\n linewidth = auto_linewidth\n\n if markersize is None:\n markersize = auto_markersize\n\n if backend_kwargs is None:\n backend_kwargs = {}\n\n backend_kwargs = {\n **backend_kwarg_defaults(\n (\"tools\", \"plot.bokeh.tools\"),\n (\"output_backend\", \"plot.bokeh.output_backend\"),\n (\"dpi\", \"plot.bokeh.figure.dpi\"),\n ),\n **backend_kwargs,\n }\n dpi = backend_kwargs.pop(\"dpi\")\n\n if ax is None:\n axes = []\n for i, width_r in zip(range(ncols), width_ratios):\n if i == 0:\n ax = bkp.figure(\n height=int(figsize[0]) * dpi,\n width=int(figsize[1] * (width_r / sum(width_ratios)) * dpi * 1.25),\n **backend_kwargs,\n )\n _y_range = ax.y_range\n else:\n ax = bkp.figure(\n height=figsize[0] * dpi,\n width=int(figsize[1] * (width_r / sum(width_ratios)) * dpi * 1.25),\n y_range=_y_range,\n **backend_kwargs,\n )\n axes.append(ax)\n else:\n axes = ax\n\n axes = np.atleast_2d(axes)\n\n if kind == \"forestplot\":\n plot_handler.forestplot(\n credible_interval, quartiles, linewidth, markersize, axes[0, 0], rope,\n )\n elif kind == \"ridgeplot\":\n plot_handler.ridgeplot(\n ridgeplot_overlap, linewidth, ridgeplot_alpha, ridgeplot_kind, axes[0, 0]\n )\n else:\n raise TypeError(\n \"Argument 'kind' must be one of 'forestplot' or \"\n \"'ridgeplot' (you provided {})\".format(kind)\n )\n\n idx = 1\n if ess:\n plot_handler.plot_neff(axes[0, idx], markersize)\n idx += 1\n\n if r_hat:\n plot_handler.plot_rhat(axes[0, idx], markersize)\n idx += 1\n\n for i, ax_ in enumerate(axes.ravel()):\n if kind == \"ridgeplot\":\n ax_.xgrid.grid_line_color = None\n ax_.ygrid.grid_line_color = None\n else:\n ax_.ygrid.grid_line_color = None\n\n if i != 0:\n ax_.yaxis.visible = False\n\n ax_.outline_line_color = None\n\n labels, ticks = plot_handler.labels_and_ticks()\n\n axes[0, 0].yaxis.ticker = FixedTicker(ticks=ticks)\n axes[0, 0].yaxis.major_label_overrides = dict(zip(map(str, ticks), map(str, labels)))\n\n all_plotters = list(plot_handler.plotters.values())\n y_max = plot_handler.y_max() - all_plotters[-1].group_offset\n if kind == \"ridgeplot\": # space at the top\n y_max += ridgeplot_overlap\n\n axes[0, 0].y_range._property_values[\n \"start\"\n ] = -all_plotters[ # pylint: disable=protected-access\n 0\n ].group_offset\n axes[0, 0].y_range._property_values[\"end\"] = y_max # pylint: disable=protected-access\n\n if backend_show(show):\n grid = gridplot(axes.tolist(), toolbar_location=\"above\")\n bkp.show(grid)\n\n return axes\n\n\nclass PlotHandler:\n \"\"\"Class to handle logic from ForestPlot.\"\"\"\n\n # pylint: disable=inconsistent-return-statements\n\n def __init__(self, datasets, var_names, model_names, combined, colors):\n self.data = datasets\n\n if model_names is None:\n if len(self.data) > 1:\n model_names = [\"Model {}\".format(idx) for idx, _ in enumerate(self.data)]\n else:\n model_names = [\"\"]\n elif len(model_names) != len(self.data):\n raise ValueError(\"The number of model names does not match the number of models\")\n\n self.model_names = list(reversed(model_names)) # y-values are upside down\n\n if var_names is None:\n if len(self.data) > 1:\n self.var_names = list(\n set().union(*[OrderedDict(datum.data_vars) for datum in self.data])\n )\n else:\n self.var_names = list(\n reversed(*[OrderedDict(datum.data_vars) for datum in self.data])\n )\n else:\n self.var_names = list(reversed(var_names)) # y-values are upside down\n\n self.combined = combined\n\n if colors == \"cycle\":\n colors = [\n prop\n for _, prop in zip(\n range(len(self.data)), cycle(plt.rcParams[\"axes.prop_cycle\"].by_key()[\"color\"])\n )\n ]\n elif isinstance(colors, str):\n colors = [colors for _ in self.data]\n\n self.colors = list(reversed(colors)) # y-values are upside down\n\n self.plotters = self.make_plotters()\n\n def make_plotters(self):\n \"\"\"Initialize an object for each variable to be plotted.\"\"\"\n plotters, y = {}, 0\n for var_name in self.var_names:\n plotters[var_name] = VarHandler(\n var_name,\n self.data,\n y,\n model_names=self.model_names,\n combined=self.combined,\n colors=self.colors,\n )\n y = plotters[var_name].y_max()\n return plotters\n\n def labels_and_ticks(self):\n \"\"\"Collect labels and ticks from plotters.\"\"\"\n val = self.plotters.values()\n\n @conditional_jit(forceobj=True)\n def label_idxs():\n labels, idxs = [], []\n for plotter in val:\n sub_labels, sub_idxs, _, _ = plotter.labels_ticks_and_vals()\n labels.append(sub_labels)\n idxs.append(sub_idxs)\n return np.concatenate(labels), np.concatenate(idxs)\n\n return label_idxs()\n\n def display_multiple_ropes(self, rope, ax, y, linewidth, rope_var):\n \"\"\"Display ROPE when more than one interval is provided.\"\"\"\n vals = dict(rope[rope_var][0])[\"rope\"]\n ax.line(\n vals,\n (y + 0.05, y + 0.05),\n line_width=linewidth * 2,\n color=[\n color\n for _, color in zip(\n range(3), cycle(plt.rcParams[\"axes.prop_cycle\"].by_key()[\"color\"])\n )\n ][2],\n line_alpha=0.7,\n )\n return ax\n\n def ridgeplot(self, mult, linewidth, alpha, ridgeplot_kind, ax):\n \"\"\"Draw ridgeplot for each plotter.\n\n Parameters\n ----------\n mult : float\n How much to multiply height by. Set this to greater than 1 to have some overlap.\n linewidth : float\n Width of line on border of ridges\n alpha : float\n Transparency of ridges\n kind : string\n By default (\"auto\") continuous variables are plotted using KDEs and discrete ones using\n histograms. To override this use \"hist\" to plot histograms and \"density\" for KDEs\n ax : Axes\n Axes to draw on\n \"\"\"\n if alpha is None:\n alpha = 1.0\n for plotter in list(self.plotters.values())[::-1]:\n for x, y_min, y_max, color in list(plotter.ridgeplot(mult, ridgeplot_kind))[::-1]:\n if alpha == 0:\n border = color\n facecolor = None\n else:\n border = \"black\"\n facecolor = color\n if x.dtype.kind == \"i\":\n ax.vbar(\n x=x,\n top=y_max - y_min,\n bottom=y_min,\n fill_alpha=alpha,\n fill_color=facecolor,\n )\n else:\n patch = ax.patch(\n np.concatenate([x, x[::-1]]),\n np.concatenate([y_min, y_max[::-1]]),\n fill_color=color,\n fill_alpha=alpha,\n line_dash=\"solid\",\n line_width=linewidth,\n line_color=border,\n )\n patch.level = \"overlay\"\n return ax\n\n def forestplot(self, credible_interval, quartiles, linewidth, markersize, ax, rope):\n \"\"\"Draw forestplot for each plotter.\n\n Parameters\n ----------\n credible_interval : float\n How wide each line should be\n quartiles : bool\n Whether to mark quartiles\n linewidth : float\n Width of forestplot line\n markersize : float\n Size of marker in center of forestplot line\n ax : Axes\n Axes to draw on\n \"\"\"\n if rope is None or isinstance(rope, dict):\n pass\n elif len(rope) == 2:\n cds = ColumnDataSource(\n {\n \"x\": rope,\n \"lower\": [-2 * self.y_max(), -2 * self.y_max()],\n \"upper\": [self.y_max() * 2, self.y_max() * 2],\n }\n )\n\n band = Band(\n base=\"x\",\n lower=\"lower\",\n upper=\"upper\",\n fill_color=[\n color\n for _, color in zip(\n range(4), cycle(plt.rcParams[\"axes.prop_cycle\"].by_key()[\"color\"])\n )\n ][2],\n line_alpha=0.5,\n source=cds,\n )\n\n ax.renderers.append(band)\n else:\n raise ValueError(\n \"Argument `rope` must be None, a dictionary like\"\n '{\"var_name\": {\"rope\": (lo, hi)}}, or an '\n \"iterable of length 2\"\n )\n # Quantiles to be calculated\n endpoint = 100 * (1 - credible_interval) / 2\n if quartiles:\n qlist = [endpoint, 25, 50, 75, 100 - endpoint]\n else:\n qlist = [endpoint, 50, 100 - endpoint]\n\n for plotter in self.plotters.values():\n for y, rope_var, values, color in plotter.treeplot(qlist, credible_interval):\n if isinstance(rope, dict):\n self.display_multiple_ropes(rope, ax, y, linewidth, rope_var)\n\n mid = len(values) // 2\n param_iter = zip(\n np.linspace(2 * linewidth, linewidth, mid, endpoint=True)[-1::-1], range(mid)\n )\n for width, j in param_iter:\n ax.line(\n [values[j], values[-(j + 1)]], [y, y], line_width=width, line_color=color\n )\n ax.circle(\n x=values[mid], y=y, size=markersize * 0.75, fill_color=color,\n )\n _title = Title()\n _title.text = \"{:.1%} Credible Interval\".format(credible_interval)\n ax.title = _title\n\n return ax\n\n def plot_neff(self, ax, markersize):\n \"\"\"Draw effective n for each plotter.\"\"\"\n max_ess = 0\n for plotter in self.plotters.values():\n for y, ess, color in plotter.ess():\n if ess is not None:\n ax.circle(\n x=ess, y=y, fill_color=color, size=markersize, line_color=\"black\",\n )\n if ess > max_ess:\n max_ess = ess\n ax.x_range._property_values[\"start\"] = 0 # pylint: disable=protected-access\n ax.x_range._property_values[\"end\"] = 1.07 * max_ess # pylint: disable=protected-access\n\n _title = Title()\n _title.text = \"ess\"\n ax.title = _title\n\n ax.xaxis[0].ticker.desired_num_ticks = 3\n\n return ax\n\n def plot_rhat(self, ax, markersize):\n \"\"\"Draw r-hat for each plotter.\"\"\"\n for plotter in self.plotters.values():\n for y, r_hat, color in plotter.r_hat():\n if r_hat is not None:\n ax.circle(x=r_hat, y=y, fill_color=color, size=markersize, line_color=\"black\")\n ax.x_range._property_values[\"start\"] = 0.9 # pylint: disable=protected-access\n ax.x_range._property_values[\"end\"] = 2.1 # pylint: disable=protected-access\n\n _title = Title()\n _title.text = \"r_hat\"\n ax.title = _title\n\n ax.xaxis[0].ticker.desired_num_ticks = 3\n\n return ax\n\n def fig_height(self):\n \"\"\"Figure out the height of this plot.\"\"\"\n # hand-tuned\n return (\n 4\n + len(self.data) * len(self.var_names)\n - 1\n + 0.1 * sum(1 for j in self.plotters.values() for _ in j.iterator())\n )\n\n def y_max(self):\n \"\"\"Get maximum y value for the plot.\"\"\"\n return max(p.y_max() for p in self.plotters.values())\n\n\nclass VarHandler:\n \"\"\"Handle individual variable logic.\"\"\"\n\n def __init__(self, var_name, data, y_start, model_names, combined, colors):\n self.var_name = var_name\n self.data = data\n self.y_start = y_start\n self.model_names = model_names\n self.combined = combined\n self.colors = colors\n self.model_color = dict(zip(self.model_names, self.colors))\n max_chains = max(datum.chain.max().values for datum in data)\n self.chain_offset = len(data) * 0.45 / max(1, max_chains)\n self.var_offset = 1.5 * self.chain_offset\n self.group_offset = 2 * self.var_offset\n\n def iterator(self):\n \"\"\"Iterate over models and chains for each variable.\"\"\"\n if self.combined:\n grouped_data = [[(0, datum)] for datum in self.data]\n skip_dims = {\"chain\"}\n else:\n grouped_data = [datum.groupby(\"chain\") for datum in self.data]\n skip_dims = set()\n\n label_dict = OrderedDict()\n for name, grouped_datum in zip(self.model_names, grouped_data):\n for _, sub_data in grouped_datum:\n datum_iter = xarray_var_iter(\n sub_data,\n var_names=[self.var_name],\n skip_dims=skip_dims,\n reverse_selections=True,\n )\n for _, selection, values in datum_iter:\n label = make_label(self.var_name, selection, position=\"beside\")\n if label not in label_dict:\n label_dict[label] = OrderedDict()\n if name not in label_dict[label]:\n label_dict[label][name] = []\n label_dict[label][name].append(values)\n\n y = self.y_start\n for label, model_data in label_dict.items():\n for model_name, value_list in model_data.items():\n if model_name:\n row_label = \"{}: {}\".format(model_name, label)\n else:\n row_label = label\n for values in value_list:\n yield y, row_label, label, values, self.model_color[model_name]\n y += self.chain_offset\n y += self.var_offset\n y += self.group_offset\n\n def labels_ticks_and_vals(self):\n \"\"\"Get labels, ticks, values, and colors for the variable.\"\"\"\n y_ticks = defaultdict(list)\n for y, label, _, vals, color in self.iterator():\n y_ticks[label].append((y, vals, color))\n labels, ticks, vals, colors = [], [], [], []\n for label, data in y_ticks.items():\n labels.append(label)\n ticks.append(np.mean([j[0] for j in data]))\n vals.append(np.vstack([j[1] for j in data]))\n colors.append(data[0][2]) # the colors are all the same\n return labels, ticks, vals, colors\n\n def treeplot(self, qlist, credible_interval):\n \"\"\"Get data for each treeplot for the variable.\"\"\"\n for y, _, label, values, color in self.iterator():\n ntiles = np.percentile(values.flatten(), qlist)\n ntiles[0], ntiles[-1] = hpd(values.flatten(), credible_interval, multimodal=False)\n yield y, label, ntiles, color\n\n def ridgeplot(self, mult, ridgeplot_kind):\n \"\"\"Get data for each ridgeplot for the variable.\"\"\"\n xvals, yvals, pdfs, colors = [], [], [], []\n for y, *_, values, color in self.iterator():\n yvals.append(y)\n colors.append(color)\n values = values.flatten()\n values = values[np.isfinite(values)]\n\n if ridgeplot_kind == \"auto\":\n kind = \"hist\" if np.all(np.mod(values, 1) == 0) else \"density\"\n else:\n kind = ridgeplot_kind\n\n if kind == \"hist\":\n bins = get_bins(values)\n _, density, x = histogram(values, bins=bins)\n x = x[:-1]\n elif kind == \"density\":\n density, lower, upper = _fast_kde(values)\n x = np.linspace(lower, upper, len(density))\n\n xvals.append(x)\n pdfs.append(density)\n\n scaling = max(np.max(j) for j in pdfs)\n for y, x, pdf, color in zip(yvals, xvals, pdfs, colors):\n y = y * np.ones_like(x)\n yield x, y, mult * pdf / scaling + y, color\n\n def ess(self):\n \"\"\"Get effective n data for the variable.\"\"\"\n _, y_vals, values, colors = self.labels_ticks_and_vals()\n for y, value, color in zip(y_vals, values, colors):\n yield y, _ess(value), color\n\n def r_hat(self):\n \"\"\"Get rhat data for the variable.\"\"\"\n _, y_vals, values, colors = self.labels_ticks_and_vals()\n for y, value, color in zip(y_vals, values, colors):\n if value.ndim != 2 or value.shape[0] < 2:\n yield y, None, color\n else:\n yield y, _rhat(value), color\n\n def y_max(self):\n \"\"\"Get max y value for the variable.\"\"\"\n end_y = max(y for y, *_ in self.iterator())\n\n if self.combined:\n end_y += self.group_offset\n\n return end_y + 2 * self.group_offset\n" ]
[ [ "numpy.ones_like", "numpy.isfinite", "numpy.linspace", "numpy.concatenate", "numpy.atleast_2d", "numpy.max", "numpy.mean", "numpy.mod", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
annoing-morda/b_diploma
[ "7c04b1014bbf8ebf3deb4fe0f60fd845475b3635", "7c04b1014bbf8ebf3deb4fe0f60fd845475b3635" ]
[ "geometry.py", "quadr_programming.py" ]
[ "import numpy as np\nfrom numpy import linalg as la\n\nclass cylinder: # Класс, описывающий цилиндр\n def __init__(self, o, a, b, c):\n # o - центр основания, a, b - оси эллпса, c - центральная ось цилиндра\n self.o = o\n self.a = a\n self.b = b\n self.c = c\n\n def check(self): # Проверка корректности задания цилиндра\n if np.all(np.matmul(np.transpose(self.a), self.b) == 0):\n # a и b должны быть ортогональны\n matr = np.hstack((self.a, self.b, self.c))\n # a, b и c должны быть ЛНЗ системой\n if la.det(matr) != 0:\n return True\n return False\n\n def get_translation(self):\n # Возвращает вектор параллельного переноса (для смены базиса)\n return -self.o\n\n def get_matrix(self):\n # Возвращет матрицу перехода в базис (a, b, c)\n return la.inv(np.hstack((self.a, self.b, self.c)))\n\n\nclass parallelepiped: # Класс, описывающий параллелограмм\n def __init__(self, o, a, b, c):\n self.o = o\n self.a = a\n self.b = b\n self.c = c\n\n def check(self):\n # Проверка корректности задания цилиндра\n matr = np.hstack((self.a, self.b, self.c))\n # a, b и c должны быть ЛНЗ системой\n if la.det(matr) != 0:\n return True\n return False\n\n def transform(self, matr, delta): # Преобразование координат\n self.o = np.matmul(matr, self.o + delta) # delta - вектор переноса\n self.a = np.matmul(matr, self.a) # matr - матрица перехода\n self.b = np.matmul(matr, self.b)\n self.c = np.matmul(matr, self.c)\n\n def _get_X_4(self): # Возвращает матрицу X_4 (описана в тексте)\n matr = np.hstack((self.o, self.a, self.b, self.c))\n v = [matr[0, ]]\n return np.matmul(np.transpose(v), v)\n\n def _get_Y_4(self): # Возвращает матрицу Y_4 (описана в тексте)\n matr = np.hstack((self.o, self.a, self.b, self.c))\n v = [matr[1, ]]\n return np.matmul(np.transpose(v), v)\n\n def _get_2_projection(self, matr, coord_num): # Возвращает X_2 или Y_2\n rows = np.vstack((matr[1:coord_num], matr[coord_num + 1:]))\n m = np.hstack((rows[:, 1:coord_num], rows[:, coord_num + 1:]))\n return m\n\n def _get_b(self, matr, coord_num, val): # Возвращает b_X или b_Y\n rows = np.vstack((matr[0], matr[coord_num]))\n m = np.hstack((rows[:, 1:coord_num], rows[:, coord_num + 1:]))\n return 2 * np.matmul(np.array([1, val]), m)\n\n def _get_C(self, matr, coord_num, val): # Возвращает C_x или C_y\n rows = np.vstack((matr[0], matr[coord_num]))\n m = np.hstack((np.transpose([rows[:, 0]]),\n np.transpose([rows[:, coord_num]])))\n r = np.array([1, val])\n return np.matmul(np.matmul(r, m), np.transpose(m))[0]\n\n def _get_z_coefs(self, coord):\n # Возвращает коэффициенты из неравенства, вытекающего из ограничения по z\n matr = np.hstack((self.o, self.a, self.b, self.c))\n v = matr[2,:]\n return np.hstack((v[1:coord], v[coord + 1:]))\n\n def _get_z_const(self, coord, val):\n # Возвращает константы из неравенства, вытекающего из ограничения по z\n matr = np.hstack((self.o, self.a, self.b, self.c))\n v = matr[2,:]\n const = v[0] + v[coord] * val\n return np.array([[1 - const], [-const]])\n\n def _get_cyl_axis_cross_plane(self, coord, val):\n # Находим координаты (внутренние для параллелепипеда)\n # пересечения оси Oz с гранью\n matr = np.hstack((self.o, self.a, self.b, self.c))\n rows = matr[:2]\n left = np.hstack((self.a, self.b, self.c))\n left = left[:2]\n right = - self.o[:2]\n left_last = np.array([[0, 0, 0]])\n left_last[0, coord - 1] = 1\n left = np.vstack((left, left_last))\n right = np.vstack((right, np.array([[val]])))\n if la.det(left) == 0: # Если система не имеет решения,\n return 0 # проверку проводить не будем.\n r = la.solve(left, right)\n # Находим координаты (внутренние для параллелепипеда) пересечения оси и грани\n r = np.delete(r, coord - 1, 0)\n # Удаляем константную координату\n return r\n\n def get_tasks(self):\n # Постановка задач оптимизации на гранях\n # Названия - как в описании\n X_4 = self._get_X_4()\n Y_4 = self._get_Y_4()\n tasks = []\n for coord in (1, 2, 3):\n # Описание всех граней: каждая грань - пара из номера\n for val in (0., 1.):\n # внутренней координаты, обращенной в константу, и константы (0 или 1)\n M = (self._get_2_projection(X_4, coord) +\n self._get_2_projection(Y_4, coord))\n b = self._get_b(X_4, coord, val) + self._get_b(Y_4, coord, val)\n C = (1 - self._get_C(X_4, coord, val) -\n self._get_C(Y_4, coord, val))\n conds = np.vstack((np.eye(2), -np.eye(2)))\n cond_const = np.transpose(np.array([[1, 1, 0, 0]]))\n # Задаем ограничения по изменяющимся внутренним координатам\n conds = np.vstack((conds, self._get_z_coefs(coord), -\n self._get_z_coefs(coord)))\n cond_const = (\n np.vstack((cond_const, self._get_z_const(coord, val))))\n # Задаем ограничения по z\n r = self._get_cyl_axis_cross_plane(coord, val)\n # Находим координаты пересечения\n tasks.append((M, b, conds, cond_const, C, r))\n # Добавляем кортеж, задающий систему неравенств, наличие решения\n # равнозначно наличию пересечения грани и цилиндра\n\n return tasks\n\n def check_begin_inside(self):\n # Проверяем принадлежность точки (0, 0, 0) параллелограмму\n matr = np.hstack((self.a, self.b, self.c))\n r = la.solve(matr, -self.o)\n return np.all(r <= 1) and np.all(r >=0)\n", "import numpy as np\nimport geometry as geom\nfrom numpy import linalg as la\n\ndef calculate_form_value(M, b, vec):\n # Подсчет значения формы M(x,x) - b(x) на векторе vec\n res = np.dot(np.transpose(vec),\n np.dot(M, vec)) + np.dot(np.transpose(b), vec)\n try :\n return res[0, 0]\n except IndexError:\n return res\n\ndef solve_1dim(M, b, cond, cond_const, cond_eq, cond_eq_const):\n eq_index = 0\n free_index = 1\n M_diff = 2 * M\n b_diff = -b\n\n if cond_eq[0] == 0:\n eq_index = 1\n free_index = 0\n\n M_diff[eq_index] = cond_eq\n b[eq_index] = cond_eq_const\n force_vert_check = False\n if la.det(M_diff) == 0: # На прямой расстояние до оси может не меняться\n # В этом случае минимум - форма на какой-нибудь точке отрезка\n force_vert_check = True # Например, на вершине\n else:\n linear_global_min = la.solve(M_diff, b)\n # Ищем минимум на прямой, содержащей ребро\n conditions_check = np.matmul(cond, linear_global_min)\n inside = np.all(np.less_equal(conditions_check, cond_const))\n if inside: # Проверяем, что минимум лежит внутри ребра\n return (linear_global_min, calculate_form_value(M, b, linear_global_min))\n # Проверяем вершины\n minval = 100\n minvert = np.array([[-1000], [-1000]])\n for i in range(b.shape[0]):\n A_matr = np.vstack((cond_eq, M[i]))\n if la.det(A_matr) != 0:\n b_matr = np.array([[cond_eq_const], [b[i]]])\n vertex = la.solve(A_matr, b_matr)\n conditions_check = np.matmul(cond, vertex)\n # Проверяем, что отобранная точка действительно вершина\n inside = np.all(np.less_equal(conditions_check, cond_const))\n if inside:\n vert_val = calculate_form_value(M, b, vertex)\n if vert_val < minval:\n minval = vert_val\n minvert = vertex\n return(minvert, minval)\n\ndef solve_2dim(task):\n # Минимизируем функция M(x, x) - bx на ребрах многоугольника, заданной\n # условиями cond*x <= cond_const, сравниваем минимум с константой C\n # Проверяем, не попал ли центр эллипса внутрь многоугольника.\n (M, b, cond, cond_const, C, r) = task\n minval = 100\n minpoint = np.array([[-1000], [-1000]])\n for i in range(cond_const.shape[0]):\n (curpoint, curval) = solve_1dim(M, b,\n np.delete(cond, i, 0), np.delete(cond_const, i, 0),\n cond[i], cond_const[i, 0] ) # Ищем минимум на ребре\n if curval < minval:\n minval = curval\n minpoint = curpoint\n # Устанавливаем минимум на всех ребрах\n if minval <= C:\n # Если минимум на ребрах нас устроил, сообщаем о наличии пересечения\n return True\n\n if type(r) == type(0): # Если ось Oz параллельна плоскости многоугольнка,\n # все возможные проверки уже пройдены.\n return False\n conditions_check = np.matmul(cond, r)\n # Проверяем принадлежность центра внутренности многоугольника\n center_inside = np.all(np.less_equal(conditions_check, cond_const))\n return center_inside\n" ]
[ [ "numpy.hstack", "numpy.linalg.solve", "numpy.eye", "numpy.matmul", "numpy.linalg.det", "numpy.all", "numpy.delete", "numpy.transpose", "numpy.array", "numpy.vstack" ], [ "numpy.dot", "numpy.linalg.solve", "numpy.matmul", "numpy.linalg.det", "numpy.delete", "numpy.less_equal", "numpy.transpose", "numpy.array", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
FRidh/scipy
[ "dabfb4586e0b656b5f6da8b301643b918259e61f", "dabfb4586e0b656b5f6da8b301643b918259e61f", "dabfb4586e0b656b5f6da8b301643b918259e61f", "dabfb4586e0b656b5f6da8b301643b918259e61f", "dabfb4586e0b656b5f6da8b301643b918259e61f", "dabfb4586e0b656b5f6da8b301643b918259e61f", "dabfb4586e0b656b5f6da8b301643b918259e61f", "f9e536b4a9d96322d7e971073602c8969dbd9369" ]
[ "scipy/io/matlab/tests/test_mio.py", "scipy/signal/setup.py", "scipy/sparse/linalg/isolve/lgmres.py", "scipy/optimize/minpack.py", "scipy/weave/tests/test_ast_tools.py", "scipy/linalg/blas.py", "scipy/sparse/csgraph/tests/test_graph_laplacian.py", "scipy/sparse/linalg/eigen/lobpcg/setup.py" ]
[ "#!/usr/bin/env python\n# -*- coding: latin-1 -*-\n''' Nose test generators\n\nNeed function load / save / roundtrip tests\n\n'''\nfrom __future__ import division, print_function, absolute_import\n\nimport os\nfrom os.path import join as pjoin, dirname\nfrom glob import glob\nfrom io import BytesIO\nfrom tempfile import mkdtemp\n\nfrom scipy._lib.six import u, text_type, string_types\n\nimport warnings\nimport shutil\nimport gzip\n\nfrom numpy.testing import (assert_array_equal, assert_array_almost_equal,\n assert_equal, assert_raises, run_module_suite,\n assert_)\n\nimport numpy as np\nfrom numpy import array\nimport scipy.sparse as SP\n\nimport scipy.io.matlab.byteordercodes as boc\nfrom scipy.io.matlab.miobase import matdims, MatWriteError, MatReadError\nfrom scipy.io.matlab.mio import (mat_reader_factory, loadmat, savemat, whosmat)\nfrom scipy.io.matlab.mio5 import (MatlabObject, MatFile5Writer, MatFile5Reader,\n MatlabFunction, varmats_from_mat,\n to_writeable, EmptyStructMarker)\nfrom scipy.io.matlab import mio5_params as mio5p\n\ntest_data_path = pjoin(dirname(__file__), 'data')\n\n\ndef mlarr(*args, **kwargs):\n \"\"\"Convenience function to return matlab-compatible 2D array.\"\"\"\n arr = np.array(*args, **kwargs)\n arr.shape = matdims(arr)\n return arr\n\n# Define cases to test\ntheta = np.pi/4*np.arange(9,dtype=float).reshape(1,9)\ncase_table4 = [\n {'name': 'double',\n 'classes': {'testdouble': 'double'},\n 'expected': {'testdouble': theta}\n }]\ncase_table4.append(\n {'name': 'string',\n 'classes': {'teststring': 'char'},\n 'expected': {'teststring':\n array([u('\"Do nine men interpret?\" \"Nine men,\" I nod.')])}\n })\ncase_table4.append(\n {'name': 'complex',\n 'classes': {'testcomplex': 'double'},\n 'expected': {'testcomplex': np.cos(theta) + 1j*np.sin(theta)}\n })\nA = np.zeros((3,5))\nA[0] = list(range(1,6))\nA[:,0] = list(range(1,4))\ncase_table4.append(\n {'name': 'matrix',\n 'classes': {'testmatrix': 'double'},\n 'expected': {'testmatrix': A},\n })\ncase_table4.append(\n {'name': 'sparse',\n 'classes': {'testsparse': 'sparse'},\n 'expected': {'testsparse': SP.coo_matrix(A)},\n })\nB = A.astype(complex)\nB[0,0] += 1j\ncase_table4.append(\n {'name': 'sparsecomplex',\n 'classes': {'testsparsecomplex': 'sparse'},\n 'expected': {'testsparsecomplex': SP.coo_matrix(B)},\n })\ncase_table4.append(\n {'name': 'multi',\n 'classes': {'theta': 'double', 'a': 'double'},\n 'expected': {'theta': theta, 'a': A},\n })\ncase_table4.append(\n {'name': 'minus',\n 'classes': {'testminus': 'double'},\n 'expected': {'testminus': mlarr(-1)},\n })\ncase_table4.append(\n {'name': 'onechar',\n 'classes': {'testonechar': 'char'},\n 'expected': {'testonechar': array([u('r')])},\n })\n# Cell arrays stored as object arrays\nCA = mlarr(( # tuple for object array creation\n [],\n mlarr([1]),\n mlarr([[1,2]]),\n mlarr([[1,2,3]])), dtype=object).reshape(1,-1)\nCA[0,0] = array(\n [u('This cell contains this string and 3 arrays of increasing length')])\ncase_table5 = [\n {'name': 'cell',\n 'classes': {'testcell': 'cell'},\n 'expected': {'testcell': CA}}]\nCAE = mlarr(( # tuple for object array creation\n mlarr(1),\n mlarr(2),\n mlarr([]),\n mlarr([]),\n mlarr(3)), dtype=object).reshape(1,-1)\nobjarr = np.empty((1,1),dtype=object)\nobjarr[0,0] = mlarr(1)\ncase_table5.append(\n {'name': 'scalarcell',\n 'classes': {'testscalarcell': 'cell'},\n 'expected': {'testscalarcell': objarr}\n })\ncase_table5.append(\n {'name': 'emptycell',\n 'classes': {'testemptycell': 'cell'},\n 'expected': {'testemptycell': CAE}})\ncase_table5.append(\n {'name': 'stringarray',\n 'classes': {'teststringarray': 'char'},\n 'expected': {'teststringarray': array(\n [u('one '), u('two '), u('three')])},\n })\ncase_table5.append(\n {'name': '3dmatrix',\n 'classes': {'test3dmatrix': 'double'},\n 'expected': {\n 'test3dmatrix': np.transpose(np.reshape(list(range(1,25)), (4,3,2)))}\n })\nst_sub_arr = array([np.sqrt(2),np.exp(1),np.pi]).reshape(1,3)\ndtype = [(n, object) for n in ['stringfield', 'doublefield', 'complexfield']]\nst1 = np.zeros((1,1), dtype)\nst1['stringfield'][0,0] = array([u('Rats live on no evil star.')])\nst1['doublefield'][0,0] = st_sub_arr\nst1['complexfield'][0,0] = st_sub_arr * (1 + 1j)\ncase_table5.append(\n {'name': 'struct',\n 'classes': {'teststruct': 'struct'},\n 'expected': {'teststruct': st1}\n })\nCN = np.zeros((1,2), dtype=object)\nCN[0,0] = mlarr(1)\nCN[0,1] = np.zeros((1,3), dtype=object)\nCN[0,1][0,0] = mlarr(2, dtype=np.uint8)\nCN[0,1][0,1] = mlarr([[3]], dtype=np.uint8)\nCN[0,1][0,2] = np.zeros((1,2), dtype=object)\nCN[0,1][0,2][0,0] = mlarr(4, dtype=np.uint8)\nCN[0,1][0,2][0,1] = mlarr(5, dtype=np.uint8)\ncase_table5.append(\n {'name': 'cellnest',\n 'classes': {'testcellnest': 'cell'},\n 'expected': {'testcellnest': CN},\n })\nst2 = np.empty((1,1), dtype=[(n, object) for n in ['one', 'two']])\nst2[0,0]['one'] = mlarr(1)\nst2[0,0]['two'] = np.empty((1,1), dtype=[('three', object)])\nst2[0,0]['two'][0,0]['three'] = array([u('number 3')])\ncase_table5.append(\n {'name': 'structnest',\n 'classes': {'teststructnest': 'struct'},\n 'expected': {'teststructnest': st2}\n })\na = np.empty((1,2), dtype=[(n, object) for n in ['one', 'two']])\na[0,0]['one'] = mlarr(1)\na[0,0]['two'] = mlarr(2)\na[0,1]['one'] = array([u('number 1')])\na[0,1]['two'] = array([u('number 2')])\ncase_table5.append(\n {'name': 'structarr',\n 'classes': {'teststructarr': 'struct'},\n 'expected': {'teststructarr': a}\n })\nODT = np.dtype([(n, object) for n in\n ['expr', 'inputExpr', 'args',\n 'isEmpty', 'numArgs', 'version']])\nMO = MatlabObject(np.zeros((1,1), dtype=ODT), 'inline')\nm0 = MO[0,0]\nm0['expr'] = array([u('x')])\nm0['inputExpr'] = array([u(' x = INLINE_INPUTS_{1};')])\nm0['args'] = array([u('x')])\nm0['isEmpty'] = mlarr(0)\nm0['numArgs'] = mlarr(1)\nm0['version'] = mlarr(1)\ncase_table5.append(\n {'name': 'object',\n 'classes': {'testobject': 'object'},\n 'expected': {'testobject': MO}\n })\nfp_u_str = open(pjoin(test_data_path, 'japanese_utf8.txt'), 'rb')\nu_str = fp_u_str.read().decode('utf-8')\nfp_u_str.close()\ncase_table5.append(\n {'name': 'unicode',\n 'classes': {'testunicode': 'char'},\n 'expected': {'testunicode': array([u_str])}\n })\ncase_table5.append(\n {'name': 'sparse',\n 'classes': {'testsparse': 'sparse'},\n 'expected': {'testsparse': SP.coo_matrix(A)},\n })\ncase_table5.append(\n {'name': 'sparsecomplex',\n 'classes': {'testsparsecomplex': 'sparse'},\n 'expected': {'testsparsecomplex': SP.coo_matrix(B)},\n })\ncase_table5.append(\n {'name': 'bool',\n 'classes': {'testbools': 'logical'},\n 'expected': {'testbools':\n array([[True], [False]])},\n })\n\ncase_table5_rt = case_table5[:]\n# Inline functions can't be concatenated in matlab, so RT only\ncase_table5_rt.append(\n {'name': 'objectarray',\n 'classes': {'testobjectarray': 'object'},\n 'expected': {'testobjectarray': np.repeat(MO, 2).reshape(1,2)}})\n\n\ndef types_compatible(var1, var2):\n \"\"\"Check if types are same or compatible.\n\n 0-D numpy scalars are compatible with bare python scalars.\n \"\"\"\n type1 = type(var1)\n type2 = type(var2)\n if type1 is type2:\n return True\n if type1 is np.ndarray and var1.shape == ():\n return type(var1.item()) is type2\n if type2 is np.ndarray and var2.shape == ():\n return type(var2.item()) is type1\n return False\n\n\ndef _check_level(label, expected, actual):\n \"\"\" Check one level of a potentially nested array \"\"\"\n if SP.issparse(expected): # allow different types of sparse matrices\n assert_(SP.issparse(actual))\n assert_array_almost_equal(actual.todense(),\n expected.todense(),\n err_msg=label,\n decimal=5)\n return\n # Check types are as expected\n assert_(types_compatible(expected, actual),\n \"Expected type %s, got %s at %s\" %\n (type(expected), type(actual), label))\n # A field in a record array may not be an ndarray\n # A scalar from a record array will be type np.void\n if not isinstance(expected,\n (np.void, np.ndarray, MatlabObject)):\n assert_equal(expected, actual)\n return\n # This is an ndarray-like thing\n assert_(expected.shape == actual.shape,\n msg='Expected shape %s, got %s at %s' % (expected.shape,\n actual.shape,\n label))\n ex_dtype = expected.dtype\n if ex_dtype.hasobject: # array of objects\n if isinstance(expected, MatlabObject):\n assert_equal(expected.classname, actual.classname)\n for i, ev in enumerate(expected):\n level_label = \"%s, [%d], \" % (label, i)\n _check_level(level_label, ev, actual[i])\n return\n if ex_dtype.fields: # probably recarray\n for fn in ex_dtype.fields:\n level_label = \"%s, field %s, \" % (label, fn)\n _check_level(level_label,\n expected[fn], actual[fn])\n return\n if ex_dtype.type in (text_type, # string or bool\n np.unicode_,\n np.bool_):\n assert_equal(actual, expected, err_msg=label)\n return\n # Something numeric\n assert_array_almost_equal(actual, expected, err_msg=label, decimal=5)\n\n\ndef _load_check_case(name, files, case):\n for file_name in files:\n matdict = loadmat(file_name, struct_as_record=True)\n label = \"test %s; file %s\" % (name, file_name)\n for k, expected in case.items():\n k_label = \"%s, variable %s\" % (label, k)\n assert_(k in matdict, \"Missing key at %s\" % k_label)\n _check_level(k_label, expected, matdict[k])\n\n\ndef _whos_check_case(name, files, case, classes):\n for file_name in files:\n label = \"test %s; file %s\" % (name, file_name)\n\n whos = whosmat(file_name)\n\n expected_whos = []\n for k, expected in case.items():\n expected_whos.append((k, expected.shape, classes[k]))\n\n whos.sort()\n expected_whos.sort()\n assert_equal(whos, expected_whos,\n \"%s: %r != %r\" % (label, whos, expected_whos)\n )\n\n\n# Round trip tests\ndef _rt_check_case(name, expected, format):\n mat_stream = BytesIO()\n savemat(mat_stream, expected, format=format)\n mat_stream.seek(0)\n _load_check_case(name, [mat_stream], expected)\n\n\n# generator for load tests\ndef test_load():\n for case in case_table4 + case_table5:\n name = case['name']\n expected = case['expected']\n filt = pjoin(test_data_path, 'test%s_*.mat' % name)\n files = glob(filt)\n assert_(len(files) > 0,\n \"No files for test %s using filter %s\" % (name, filt))\n yield _load_check_case, name, files, expected\n\n\n# generator for whos tests\ndef test_whos():\n for case in case_table4 + case_table5:\n name = case['name']\n expected = case['expected']\n classes = case['classes']\n filt = pjoin(test_data_path, 'test%s_*.mat' % name)\n files = glob(filt)\n assert_(len(files) > 0,\n \"No files for test %s using filter %s\" % (name, filt))\n yield _whos_check_case, name, files, expected, classes\n\n\n# generator for round trip tests\ndef test_round_trip():\n for case in case_table4 + case_table5_rt:\n case_table4_names = [case['name'] for case in case_table4]\n name = case['name'] + '_round_trip'\n expected = case['expected']\n for format in (['4', '5'] if case['name'] in case_table4_names else ['5']):\n yield _rt_check_case, name, expected, format\n\n\ndef test_gzip_simple():\n xdense = np.zeros((20,20))\n xdense[2,3] = 2.3\n xdense[4,5] = 4.5\n x = SP.csc_matrix(xdense)\n\n name = 'gzip_test'\n expected = {'x':x}\n format = '4'\n\n tmpdir = mkdtemp()\n try:\n fname = pjoin(tmpdir,name)\n mat_stream = gzip.open(fname,mode='wb')\n savemat(mat_stream, expected, format=format)\n mat_stream.close()\n\n mat_stream = gzip.open(fname,mode='rb')\n actual = loadmat(mat_stream, struct_as_record=True)\n mat_stream.close()\n finally:\n shutil.rmtree(tmpdir)\n\n assert_array_almost_equal(actual['x'].todense(),\n expected['x'].todense(),\n err_msg=repr(actual))\n\n\ndef test_multiple_open():\n # Ticket #1039, on Windows: check that files are not left open\n tmpdir = mkdtemp()\n try:\n x = dict(x=np.zeros((2, 2)))\n\n fname = pjoin(tmpdir, \"a.mat\")\n\n # Check that file is not left open\n savemat(fname, x)\n os.unlink(fname)\n savemat(fname, x)\n loadmat(fname)\n os.unlink(fname)\n\n # Check that stream is left open\n f = open(fname, 'wb')\n savemat(f, x)\n f.seek(0)\n f.close()\n\n f = open(fname, 'rb')\n loadmat(f)\n f.seek(0)\n f.close()\n finally:\n shutil.rmtree(tmpdir)\n\n\ndef test_mat73():\n # Check any hdf5 files raise an error\n filenames = glob(\n pjoin(test_data_path, 'testhdf5*.mat'))\n assert_(len(filenames) > 0)\n for filename in filenames:\n fp = open(filename, 'rb')\n assert_raises(NotImplementedError,\n loadmat,\n fp,\n struct_as_record=True)\n fp.close()\n\n\ndef test_warnings():\n # This test is an echo of the previous behavior, which was to raise a\n # warning if the user triggered a search for mat files on the Python system\n # path. We can remove the test in the next version after upcoming (0.13)\n fname = pjoin(test_data_path, 'testdouble_7.1_GLNX86.mat')\n with warnings.catch_warnings():\n warnings.simplefilter('error')\n # This should not generate a warning\n mres = loadmat(fname, struct_as_record=True)\n # This neither\n mres = loadmat(fname, struct_as_record=False)\n\n\ndef test_regression_653():\n # Saving a dictionary with only invalid keys used to raise an error. Now we\n # save this as an empty struct in matlab space.\n sio = BytesIO()\n savemat(sio, {'d':{1:2}}, format='5')\n back = loadmat(sio)['d']\n # Check we got an empty struct equivalent\n assert_equal(back.shape, (1,1))\n assert_equal(back.dtype, np.dtype(np.object))\n assert_(back[0,0] is None)\n\n\ndef test_structname_len():\n # Test limit for length of field names in structs\n lim = 31\n fldname = 'a' * lim\n st1 = np.zeros((1,1), dtype=[(fldname, object)])\n savemat(BytesIO(), {'longstruct': st1}, format='5')\n fldname = 'a' * (lim+1)\n st1 = np.zeros((1,1), dtype=[(fldname, object)])\n assert_raises(ValueError, savemat, BytesIO(),\n {'longstruct': st1}, format='5')\n\n\ndef test_4_and_long_field_names_incompatible():\n # Long field names option not supported in 4\n my_struct = np.zeros((1,1),dtype=[('my_fieldname',object)])\n assert_raises(ValueError, savemat, BytesIO(),\n {'my_struct':my_struct}, format='4', long_field_names=True)\n\n\ndef test_long_field_names():\n # Test limit for length of field names in structs\n lim = 63\n fldname = 'a' * lim\n st1 = np.zeros((1,1), dtype=[(fldname, object)])\n savemat(BytesIO(), {'longstruct': st1}, format='5',long_field_names=True)\n fldname = 'a' * (lim+1)\n st1 = np.zeros((1,1), dtype=[(fldname, object)])\n assert_raises(ValueError, savemat, BytesIO(),\n {'longstruct': st1}, format='5',long_field_names=True)\n\n\ndef test_long_field_names_in_struct():\n # Regression test - long_field_names was erased if you passed a struct\n # within a struct\n lim = 63\n fldname = 'a' * lim\n cell = np.ndarray((1,2),dtype=object)\n st1 = np.zeros((1,1), dtype=[(fldname, object)])\n cell[0,0] = st1\n cell[0,1] = st1\n savemat(BytesIO(), {'longstruct': cell}, format='5',long_field_names=True)\n #\n # Check to make sure it fails with long field names off\n #\n assert_raises(ValueError, savemat, BytesIO(),\n {'longstruct': cell}, format='5', long_field_names=False)\n\n\ndef test_cell_with_one_thing_in_it():\n # Regression test - make a cell array that's 1 x 2 and put two\n # strings in it. It works. Make a cell array that's 1 x 1 and put\n # a string in it. It should work but, in the old days, it didn't.\n cells = np.ndarray((1,2),dtype=object)\n cells[0,0] = 'Hello'\n cells[0,1] = 'World'\n savemat(BytesIO(), {'x': cells}, format='5')\n\n cells = np.ndarray((1,1),dtype=object)\n cells[0,0] = 'Hello, world'\n savemat(BytesIO(), {'x': cells}, format='5')\n\n\ndef test_writer_properties():\n # Tests getting, setting of properties of matrix writer\n mfw = MatFile5Writer(BytesIO())\n yield assert_equal, mfw.global_vars, []\n mfw.global_vars = ['avar']\n yield assert_equal, mfw.global_vars, ['avar']\n yield assert_equal, mfw.unicode_strings, False\n mfw.unicode_strings = True\n yield assert_equal, mfw.unicode_strings, True\n yield assert_equal, mfw.long_field_names, False\n mfw.long_field_names = True\n yield assert_equal, mfw.long_field_names, True\n\n\ndef test_use_small_element():\n # Test whether we're using small data element or not\n sio = BytesIO()\n wtr = MatFile5Writer(sio)\n # First check size for no sde for name\n arr = np.zeros(10)\n wtr.put_variables({'aaaaa': arr})\n w_sz = len(sio.getvalue())\n # Check small name results in largish difference in size\n sio.truncate(0)\n sio.seek(0)\n wtr.put_variables({'aaaa': arr})\n yield assert_, w_sz - len(sio.getvalue()) > 4\n # Whereas increasing name size makes less difference\n sio.truncate(0)\n sio.seek(0)\n wtr.put_variables({'aaaaaa': arr})\n yield assert_, len(sio.getvalue()) - w_sz < 4\n\n\ndef test_save_dict():\n # Test that dict can be saved (as recarray), loaded as matstruct\n dict_types = ((dict, False),)\n try:\n from collections import OrderedDict\n except ImportError:\n pass\n else:\n dict_types += ((OrderedDict, True),)\n ab_exp = np.array([[(1, 2)]], dtype=[('a', object), ('b', object)])\n ba_exp = np.array([[(2, 1)]], dtype=[('b', object), ('a', object)])\n for dict_type, is_ordered in dict_types:\n # Initialize with tuples to keep order for OrderedDict\n d = dict_type([('a', 1), ('b', 2)])\n stream = BytesIO()\n savemat(stream, {'dict': d})\n stream.seek(0)\n vals = loadmat(stream)['dict']\n assert_equal(set(vals.dtype.names), set(['a', 'b']))\n if is_ordered: # Input was ordered, output in ab order\n assert_array_equal(vals, ab_exp)\n else: # Not ordered input, either order output\n if vals.dtype.names[0] == 'a':\n assert_array_equal(vals, ab_exp)\n else:\n assert_array_equal(vals, ba_exp)\n\n\ndef test_1d_shape():\n # New 5 behavior is 1D -> row vector\n arr = np.arange(5)\n for format in ('4', '5'):\n # Column is the default\n stream = BytesIO()\n savemat(stream, {'oned': arr}, format=format)\n vals = loadmat(stream)\n assert_equal(vals['oned'].shape, (1, 5))\n # can be explicitly 'column' for oned_as\n stream = BytesIO()\n savemat(stream, {'oned':arr},\n format=format,\n oned_as='column')\n vals = loadmat(stream)\n assert_equal(vals['oned'].shape, (5,1))\n # but different from 'row'\n stream = BytesIO()\n savemat(stream, {'oned':arr},\n format=format,\n oned_as='row')\n vals = loadmat(stream)\n assert_equal(vals['oned'].shape, (1,5))\n\n\ndef test_compression():\n arr = np.zeros(100).reshape((5,20))\n arr[2,10] = 1\n stream = BytesIO()\n savemat(stream, {'arr':arr})\n raw_len = len(stream.getvalue())\n vals = loadmat(stream)\n yield assert_array_equal, vals['arr'], arr\n stream = BytesIO()\n savemat(stream, {'arr':arr}, do_compression=True)\n compressed_len = len(stream.getvalue())\n vals = loadmat(stream)\n yield assert_array_equal, vals['arr'], arr\n yield assert_, raw_len > compressed_len\n # Concatenate, test later\n arr2 = arr.copy()\n arr2[0,0] = 1\n stream = BytesIO()\n savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=False)\n vals = loadmat(stream)\n yield assert_array_equal, vals['arr2'], arr2\n stream = BytesIO()\n savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=True)\n vals = loadmat(stream)\n yield assert_array_equal, vals['arr2'], arr2\n\n\ndef test_single_object():\n stream = BytesIO()\n savemat(stream, {'A':np.array(1, dtype=object)})\n\n\ndef test_skip_variable():\n # Test skipping over the first of two variables in a MAT file\n # using mat_reader_factory and put_variables to read them in.\n #\n # This is a regression test of a problem that's caused by\n # using the compressed file reader seek instead of the raw file\n # I/O seek when skipping over a compressed chunk.\n #\n # The problem arises when the chunk is large: this file has\n # a 256x256 array of random (uncompressible) doubles.\n #\n filename = pjoin(test_data_path,'test_skip_variable.mat')\n #\n # Prove that it loads with loadmat\n #\n d = loadmat(filename, struct_as_record=True)\n yield assert_, 'first' in d\n yield assert_, 'second' in d\n #\n # Make the factory\n #\n factory = mat_reader_factory(filename, struct_as_record=True)\n #\n # This is where the factory breaks with an error in MatMatrixGetter.to_next\n #\n d = factory.get_variables('second')\n yield assert_, 'second' in d\n factory.mat_stream.close()\n\n\ndef test_empty_struct():\n # ticket 885\n filename = pjoin(test_data_path,'test_empty_struct.mat')\n # before ticket fix, this would crash with ValueError, empty data\n # type\n d = loadmat(filename, struct_as_record=True)\n a = d['a']\n assert_equal(a.shape, (1,1))\n assert_equal(a.dtype, np.dtype(np.object))\n assert_(a[0,0] is None)\n stream = BytesIO()\n arr = np.array((), dtype='U')\n # before ticket fix, this used to give data type not understood\n savemat(stream, {'arr':arr})\n d = loadmat(stream)\n a2 = d['arr']\n assert_array_equal(a2, arr)\n\n\ndef test_save_empty_dict():\n # saving empty dict also gives empty struct\n stream = BytesIO()\n savemat(stream, {'arr': {}})\n d = loadmat(stream)\n a = d['arr']\n assert_equal(a.shape, (1,1))\n assert_equal(a.dtype, np.dtype(np.object))\n assert_(a[0,0] is None)\n\n\ndef assert_any_equal(output, alternatives):\n \"\"\" Assert `output` is equal to at least one element in `alternatives`\n \"\"\"\n one_equal = False\n for expected in alternatives:\n if np.all(output == expected):\n one_equal = True\n break\n assert_(one_equal)\n\n\ndef test_to_writeable():\n # Test to_writeable function\n res = to_writeable(np.array([1])) # pass through ndarrays\n assert_equal(res.shape, (1,))\n assert_array_equal(res, 1)\n # Dict fields can be written in any order\n expected1 = np.array([(1, 2)], dtype=[('a', '|O8'), ('b', '|O8')])\n expected2 = np.array([(2, 1)], dtype=[('b', '|O8'), ('a', '|O8')])\n alternatives = (expected1, expected2)\n assert_any_equal(to_writeable({'a':1,'b':2}), alternatives)\n # Fields with underscores discarded\n assert_any_equal(to_writeable({'a':1,'b':2, '_c':3}), alternatives)\n # Not-string fields discarded\n assert_any_equal(to_writeable({'a':1,'b':2, 100:3}), alternatives)\n # String fields that are valid Python identifiers discarded\n assert_any_equal(to_writeable({'a':1,'b':2, '99':3}), alternatives)\n # Object with field names is equivalent\n\n class klass(object):\n pass\n\n c = klass\n c.a = 1\n c.b = 2\n assert_any_equal(to_writeable(c), alternatives)\n # empty list and tuple go to empty array\n res = to_writeable([])\n assert_equal(res.shape, (0,))\n assert_equal(res.dtype.type, np.float64)\n res = to_writeable(())\n assert_equal(res.shape, (0,))\n assert_equal(res.dtype.type, np.float64)\n # None -> None\n assert_(to_writeable(None) is None)\n # String to strings\n assert_equal(to_writeable('a string').dtype.type, np.str_)\n # Scalars to numpy to numpy scalars\n res = to_writeable(1)\n assert_equal(res.shape, ())\n assert_equal(res.dtype.type, np.array(1).dtype.type)\n assert_array_equal(res, 1)\n # Empty dict returns EmptyStructMarker\n assert_(to_writeable({}) is EmptyStructMarker)\n # Object does not have (even empty) __dict__\n assert_(to_writeable(object()) is None)\n # Custom object does have empty __dict__, returns EmptyStructMarker\n\n class C(object):\n pass\n\n assert_(to_writeable(c()) is EmptyStructMarker)\n # dict keys with legal characters are convertible\n res = to_writeable({'a': 1})['a']\n assert_equal(res.shape, (1,))\n assert_equal(res.dtype.type, np.object_)\n # Only fields with illegal characters, falls back to EmptyStruct\n assert_(to_writeable({'1':1}) is EmptyStructMarker)\n assert_(to_writeable({'_a':1}) is EmptyStructMarker)\n # Unless there are valid fields, in which case structured array\n assert_equal(to_writeable({'1':1, 'f': 2}),\n np.array([(2,)], dtype=[('f', '|O8')]))\n\n\ndef test_recarray():\n # check roundtrip of structured array\n dt = [('f1', 'f8'),\n ('f2', 'S10')]\n arr = np.zeros((2,), dtype=dt)\n arr[0]['f1'] = 0.5\n arr[0]['f2'] = 'python'\n arr[1]['f1'] = 99\n arr[1]['f2'] = 'not perl'\n stream = BytesIO()\n savemat(stream, {'arr': arr})\n d = loadmat(stream, struct_as_record=False)\n a20 = d['arr'][0,0]\n yield assert_equal, a20.f1, 0.5\n yield assert_equal, a20.f2, 'python'\n d = loadmat(stream, struct_as_record=True)\n a20 = d['arr'][0,0]\n yield assert_equal, a20['f1'], 0.5\n yield assert_equal, a20['f2'], 'python'\n # structs always come back as object types\n yield assert_equal, a20.dtype, np.dtype([('f1', 'O'),\n ('f2', 'O')])\n a21 = d['arr'].flat[1]\n yield assert_equal, a21['f1'], 99\n yield assert_equal, a21['f2'], 'not perl'\n\n\ndef test_save_object():\n class C(object):\n pass\n c = C()\n c.field1 = 1\n c.field2 = 'a string'\n stream = BytesIO()\n savemat(stream, {'c': c})\n d = loadmat(stream, struct_as_record=False)\n c2 = d['c'][0,0]\n assert_equal(c2.field1, 1)\n assert_equal(c2.field2, 'a string')\n d = loadmat(stream, struct_as_record=True)\n c2 = d['c'][0,0]\n assert_equal(c2['field1'], 1)\n assert_equal(c2['field2'], 'a string')\n\n\ndef test_read_opts():\n # tests if read is seeing option sets, at initialization and after\n # initialization\n arr = np.arange(6).reshape(1,6)\n stream = BytesIO()\n savemat(stream, {'a': arr})\n rdr = MatFile5Reader(stream)\n back_dict = rdr.get_variables()\n rarr = back_dict['a']\n assert_array_equal(rarr, arr)\n rdr = MatFile5Reader(stream, squeeze_me=True)\n assert_array_equal(rdr.get_variables()['a'], arr.reshape((6,)))\n rdr.squeeze_me = False\n assert_array_equal(rarr, arr)\n rdr = MatFile5Reader(stream, byte_order=boc.native_code)\n assert_array_equal(rdr.get_variables()['a'], arr)\n # inverted byte code leads to error on read because of swapped\n # header etc\n rdr = MatFile5Reader(stream, byte_order=boc.swapped_code)\n assert_raises(Exception, rdr.get_variables)\n rdr.byte_order = boc.native_code\n assert_array_equal(rdr.get_variables()['a'], arr)\n arr = np.array(['a string'])\n stream.truncate(0)\n stream.seek(0)\n savemat(stream, {'a': arr})\n rdr = MatFile5Reader(stream)\n assert_array_equal(rdr.get_variables()['a'], arr)\n rdr = MatFile5Reader(stream, chars_as_strings=False)\n carr = np.atleast_2d(np.array(list(arr.item()), dtype='U1'))\n assert_array_equal(rdr.get_variables()['a'], carr)\n rdr.chars_as_strings = True\n assert_array_equal(rdr.get_variables()['a'], arr)\n\n\ndef test_empty_string():\n # make sure reading empty string does not raise error\n estring_fname = pjoin(test_data_path, 'single_empty_string.mat')\n fp = open(estring_fname, 'rb')\n rdr = MatFile5Reader(fp)\n d = rdr.get_variables()\n fp.close()\n assert_array_equal(d['a'], np.array([], dtype='U1'))\n # empty string round trip. Matlab cannot distiguish\n # between a string array that is empty, and a string array\n # containing a single empty string, because it stores strings as\n # arrays of char. There is no way of having an array of char that\n # is not empty, but contains an empty string.\n stream = BytesIO()\n savemat(stream, {'a': np.array([''])})\n rdr = MatFile5Reader(stream)\n d = rdr.get_variables()\n assert_array_equal(d['a'], np.array([], dtype='U1'))\n stream.truncate(0)\n stream.seek(0)\n savemat(stream, {'a': np.array([], dtype='U1')})\n rdr = MatFile5Reader(stream)\n d = rdr.get_variables()\n assert_array_equal(d['a'], np.array([], dtype='U1'))\n stream.close()\n\n\ndef test_corrupted_data():\n import zlib\n for exc, fname in [(ValueError, 'corrupted_zlib_data.mat'),\n (zlib.error, 'corrupted_zlib_checksum.mat')]:\n with open(pjoin(test_data_path, fname), 'rb') as fp:\n rdr = MatFile5Reader(fp)\n assert_raises(exc, rdr.get_variables)\n\n\ndef test_corrupted_data_check_can_be_disabled():\n with open(pjoin(test_data_path, 'corrupted_zlib_data.mat'), 'rb') as fp:\n rdr = MatFile5Reader(fp, verify_compressed_data_integrity=False)\n rdr.get_variables()\n\n\ndef test_read_both_endian():\n # make sure big- and little- endian data is read correctly\n for fname in ('big_endian.mat', 'little_endian.mat'):\n fp = open(pjoin(test_data_path, fname), 'rb')\n rdr = MatFile5Reader(fp)\n d = rdr.get_variables()\n fp.close()\n assert_array_equal(d['strings'],\n np.array([['hello'],\n ['world']], dtype=np.object))\n assert_array_equal(d['floats'],\n np.array([[2., 3.],\n [3., 4.]], dtype=np.float32))\n\n\ndef test_write_opposite_endian():\n # We don't support writing opposite endian .mat files, but we need to behave\n # correctly if the user supplies an other-endian numpy array to write out\n float_arr = np.array([[2., 3.],\n [3., 4.]])\n int_arr = np.arange(6).reshape((2, 3))\n uni_arr = np.array(['hello', 'world'], dtype='U')\n stream = BytesIO()\n savemat(stream, {'floats': float_arr.byteswap().newbyteorder(),\n 'ints': int_arr.byteswap().newbyteorder(),\n 'uni_arr': uni_arr.byteswap().newbyteorder()})\n rdr = MatFile5Reader(stream)\n d = rdr.get_variables()\n assert_array_equal(d['floats'], float_arr)\n assert_array_equal(d['ints'], int_arr)\n assert_array_equal(d['uni_arr'], uni_arr)\n stream.close()\n\n\ndef test_logical_array():\n # The roundtrip test doesn't verify that we load the data up with the\n # correct (bool) dtype\n with open(pjoin(test_data_path, 'testbool_8_WIN64.mat'), 'rb') as fobj:\n rdr = MatFile5Reader(fobj, mat_dtype=True)\n d = rdr.get_variables()\n x = np.array([[True], [False]], dtype=np.bool_)\n assert_array_equal(d['testbools'], x)\n assert_equal(d['testbools'].dtype, x.dtype)\n\n\ndef test_logical_out_type():\n # Confirm that bool type written as uint8, uint8 class\n # See gh-4022\n stream = BytesIO()\n barr = np.array([False, True, False])\n savemat(stream, {'barray': barr})\n stream.seek(0)\n reader = MatFile5Reader(stream)\n reader.initialize_read()\n reader.read_file_header()\n hdr, _ = reader.read_var_header()\n assert_equal(hdr.mclass, mio5p.mxUINT8_CLASS)\n assert_equal(hdr.is_logical, True)\n var = reader.read_var_array(hdr, False)\n assert_equal(var.dtype.type, np.uint8)\n\n\ndef test_mat4_3d():\n # test behavior when writing 3D arrays to matlab 4 files\n stream = BytesIO()\n arr = np.arange(24).reshape((2,3,4))\n assert_raises(ValueError, savemat, stream, {'a': arr}, True, '4')\n\n\ndef test_func_read():\n func_eg = pjoin(test_data_path, 'testfunc_7.4_GLNX86.mat')\n fp = open(func_eg, 'rb')\n rdr = MatFile5Reader(fp)\n d = rdr.get_variables()\n fp.close()\n assert_(isinstance(d['testfunc'], MatlabFunction))\n stream = BytesIO()\n wtr = MatFile5Writer(stream)\n assert_raises(MatWriteError, wtr.put_variables, d)\n\n\ndef test_mat_dtype():\n double_eg = pjoin(test_data_path, 'testmatrix_6.1_SOL2.mat')\n fp = open(double_eg, 'rb')\n rdr = MatFile5Reader(fp, mat_dtype=False)\n d = rdr.get_variables()\n fp.close()\n yield assert_equal, d['testmatrix'].dtype.kind, 'u'\n\n fp = open(double_eg, 'rb')\n rdr = MatFile5Reader(fp, mat_dtype=True)\n d = rdr.get_variables()\n fp.close()\n yield assert_equal, d['testmatrix'].dtype.kind, 'f'\n\n\ndef test_sparse_in_struct():\n # reproduces bug found by DC where Cython code was insisting on\n # ndarray return type, but getting sparse matrix\n st = {'sparsefield': SP.coo_matrix(np.eye(4))}\n stream = BytesIO()\n savemat(stream, {'a':st})\n d = loadmat(stream, struct_as_record=True)\n yield assert_array_equal, d['a'][0,0]['sparsefield'].todense(), np.eye(4)\n\n\ndef test_mat_struct_squeeze():\n stream = BytesIO()\n in_d = {'st':{'one':1, 'two':2}}\n savemat(stream, in_d)\n # no error without squeeze\n out_d = loadmat(stream, struct_as_record=False)\n # previous error was with squeeze, with mat_struct\n out_d = loadmat(stream,\n struct_as_record=False,\n squeeze_me=True,\n )\n\n\ndef test_scalar_squeeze():\n stream = BytesIO()\n in_d = {'scalar': [[0.1]], 'string': 'my name', 'st':{'one':1, 'two':2}}\n savemat(stream, in_d)\n out_d = loadmat(stream, squeeze_me=True)\n assert_(isinstance(out_d['scalar'], float))\n assert_(isinstance(out_d['string'], string_types))\n assert_(isinstance(out_d['st'], np.ndarray))\n\n\ndef test_str_round():\n # from report by Angus McMorland on mailing list 3 May 2010\n stream = BytesIO()\n in_arr = np.array(['Hello', 'Foob'])\n out_arr = np.array(['Hello', 'Foob '])\n savemat(stream, dict(a=in_arr))\n res = loadmat(stream)\n # resulted in ['HloolFoa', 'elWrdobr']\n assert_array_equal(res['a'], out_arr)\n stream.truncate(0)\n stream.seek(0)\n # Make Fortran ordered version of string\n in_str = in_arr.tostring(order='F')\n in_from_str = np.ndarray(shape=a.shape,\n dtype=in_arr.dtype,\n order='F',\n buffer=in_str)\n savemat(stream, dict(a=in_from_str))\n assert_array_equal(res['a'], out_arr)\n # unicode save did lead to buffer too small error\n stream.truncate(0)\n stream.seek(0)\n in_arr_u = in_arr.astype('U')\n out_arr_u = out_arr.astype('U')\n savemat(stream, {'a': in_arr_u})\n res = loadmat(stream)\n assert_array_equal(res['a'], out_arr_u)\n\n\ndef test_fieldnames():\n # Check that field names are as expected\n stream = BytesIO()\n savemat(stream, {'a': {'a':1, 'b':2}})\n res = loadmat(stream)\n field_names = res['a'].dtype.names\n assert_equal(set(field_names), set(('a', 'b')))\n\n\ndef test_loadmat_varnames():\n # Test that we can get just one variable from a mat file using loadmat\n mat5_sys_names = ['__globals__',\n '__header__',\n '__version__']\n for eg_file, sys_v_names in (\n (pjoin(test_data_path, 'testmulti_4.2c_SOL2.mat'), []), (pjoin(\n test_data_path, 'testmulti_7.4_GLNX86.mat'), mat5_sys_names)):\n vars = loadmat(eg_file)\n assert_equal(set(vars.keys()), set(['a', 'theta'] + sys_v_names))\n vars = loadmat(eg_file, variable_names='a')\n assert_equal(set(vars.keys()), set(['a'] + sys_v_names))\n vars = loadmat(eg_file, variable_names=['a'])\n assert_equal(set(vars.keys()), set(['a'] + sys_v_names))\n vars = loadmat(eg_file, variable_names=['theta'])\n assert_equal(set(vars.keys()), set(['theta'] + sys_v_names))\n vars = loadmat(eg_file, variable_names=('theta',))\n assert_equal(set(vars.keys()), set(['theta'] + sys_v_names))\n vnames = ['theta']\n vars = loadmat(eg_file, variable_names=vnames)\n assert_equal(vnames, ['theta'])\n\n\ndef test_round_types():\n # Check that saving, loading preserves dtype in most cases\n arr = np.arange(10)\n stream = BytesIO()\n for dts in ('f8','f4','i8','i4','i2','i1',\n 'u8','u4','u2','u1','c16','c8'):\n stream.truncate(0)\n stream.seek(0) # needed for BytesIO in python 3\n savemat(stream, {'arr': arr.astype(dts)})\n vars = loadmat(stream)\n assert_equal(np.dtype(dts), vars['arr'].dtype)\n\n\ndef test_varmats_from_mat():\n # Make a mat file with several variables, write it, read it back\n names_vars = (('arr', mlarr(np.arange(10))),\n ('mystr', mlarr('a string')),\n ('mynum', mlarr(10)))\n\n # Dict like thing to give variables in defined order\n class C(object):\n def items(self):\n return names_vars\n stream = BytesIO()\n savemat(stream, C())\n varmats = varmats_from_mat(stream)\n assert_equal(len(varmats), 3)\n for i in range(3):\n name, var_stream = varmats[i]\n exp_name, exp_res = names_vars[i]\n assert_equal(name, exp_name)\n res = loadmat(var_stream)\n assert_array_equal(res[name], exp_res)\n\n\ndef test_one_by_zero():\n # Test 1x0 chars get read correctly\n func_eg = pjoin(test_data_path, 'one_by_zero_char.mat')\n fp = open(func_eg, 'rb')\n rdr = MatFile5Reader(fp)\n d = rdr.get_variables()\n fp.close()\n assert_equal(d['var'].shape, (0,))\n\n\ndef test_load_mat4_le():\n # We were getting byte order wrong when reading little-endian floa64 dense\n # matrices on big-endian platforms\n mat4_fname = pjoin(test_data_path, 'test_mat4_le_floats.mat')\n vars = loadmat(mat4_fname)\n assert_array_equal(vars['a'], [[0.1, 1.2]])\n\n\ndef test_unicode_mat4():\n # Mat4 should save unicode as latin1\n bio = BytesIO()\n var = {'second_cat': u('Schrödinger')}\n savemat(bio, var, format='4')\n var_back = loadmat(bio)\n assert_equal(var_back['second_cat'], var['second_cat'])\n\n\ndef test_logical_sparse():\n # Test we can read logical sparse stored in mat file as bytes.\n # See https://github.com/scipy/scipy/issues/3539.\n # In some files saved by MATLAB, the sparse data elements (Real Part\n # Subelement in MATLAB speak) are stored with apparent type double\n # (miDOUBLE) but are in fact single bytes.\n filename = pjoin(test_data_path,'logical_sparse.mat')\n # Before fix, this would crash with:\n # ValueError: indices and data should have the same size\n d = loadmat(filename, struct_as_record=True)\n log_sp = d['sp_log_5_4']\n assert_(isinstance(log_sp, SP.csc_matrix))\n assert_equal(log_sp.dtype.type, np.bool_)\n assert_array_equal(log_sp.toarray(),\n [[True, True, True, False],\n [False, False, True, False],\n [False, False, True, False],\n [False, False, False, False],\n [False, False, False, False]])\n\n\ndef test_empty_sparse():\n # Can we read empty sparse matrices?\n sio = BytesIO()\n import scipy.sparse\n empty_sparse = scipy.sparse.csr_matrix([[0,0],[0,0]])\n savemat(sio, dict(x=empty_sparse))\n sio.seek(0)\n res = loadmat(sio)\n assert_array_equal(res['x'].shape, empty_sparse.shape)\n assert_array_equal(res['x'].todense(), 0)\n # Do empty sparse matrices get written with max nnz 1?\n # See https://github.com/scipy/scipy/issues/4208\n sio.seek(0)\n reader = MatFile5Reader(sio)\n reader.initialize_read()\n reader.read_file_header()\n hdr, _ = reader.read_var_header()\n assert_equal(hdr.nzmax, 1)\n\n\ndef test_empty_mat_error():\n # Test we get a specific warning for an empty mat file\n sio = BytesIO()\n assert_raises(MatReadError, loadmat, sio)\n\n\ndef test_miuint32_compromise():\n # Reader should accept miUINT32 for miINT32, but check signs\n # mat file with miUINT32 for miINT32, but OK values\n filename = pjoin(test_data_path,'miuint32_for_miint32.mat')\n res = loadmat(filename)\n assert_equal(res['an_array'], np.arange(10)[None, :])\n # mat file with miUINT32 for miINT32, with negative value\n filename = pjoin(test_data_path, 'bad_miuint32.mat')\n assert_raises(ValueError, loadmat, filename)\n\n\ndef test_miutf8_for_miint8_compromise():\n # Check reader accepts ascii as miUTF8 for array names\n filename = pjoin(test_data_path,'miutf8_array_name.mat')\n res = loadmat(filename)\n assert_equal(res['array_name'], [[1]])\n # mat file with non-ascii utf8 name raises error\n filename = pjoin(test_data_path, 'bad_miutf8_array_name.mat')\n assert_raises(ValueError, loadmat, filename)\n\n\ndef test_bad_utf8():\n # Check that reader reads bad UTF with 'replace' option\n filename = pjoin(test_data_path,'broken_utf8.mat')\n res = loadmat(filename)\n assert_equal(res['bad_string'],\n b'\\x80 am broken'.decode('utf8', 'replace'))\n\n\nif __name__ == \"__main__\":\n run_module_suite()\n", "#!/usr/bin/env python\nfrom __future__ import division, print_function, absolute_import\n\nfrom scipy._build_utils import numpy_nodepr_api\n\n\ndef configuration(parent_package='', top_path=None):\n from numpy.distutils.misc_util import Configuration\n\n config = Configuration('signal', parent_package, top_path)\n\n config.add_data_dir('tests')\n\n config.add_extension('sigtools',\n sources=['sigtoolsmodule.c', 'firfilter.c',\n 'medianfilter.c', 'lfilter.c.src',\n 'correlate_nd.c.src'],\n depends=['sigtools.h'],\n include_dirs=['.'],\n **numpy_nodepr_api)\n\n config.add_extension('_spectral', sources=['_spectral.c'])\n config.add_extension('_max_len_seq', sources=['_max_len_seq.c'])\n\n spline_src = ['splinemodule.c', 'S_bspline_util.c', 'D_bspline_util.c',\n 'C_bspline_util.c', 'Z_bspline_util.c', 'bspline_util.c']\n config.add_extension('spline', sources=spline_src, **numpy_nodepr_api)\n\n return config\n\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(**configuration(top_path='').todict())\n", "# Copyright (C) 2009, Pauli Virtanen <[email protected]>\n# Distributed under the same license as Scipy.\n\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\nfrom scipy._lib.six import xrange\nfrom scipy.linalg import get_blas_funcs\nfrom .utils import make_system\n\n__all__ = ['lgmres']\n\n\ndef norm2(q):\n q = np.asarray(q)\n nrm2 = get_blas_funcs('nrm2', dtype=q.dtype)\n return nrm2(q)\n\n\ndef lgmres(A, b, x0=None, tol=1e-5, maxiter=1000, M=None, callback=None,\n inner_m=30, outer_k=3, outer_v=None, store_outer_Av=True):\n \"\"\"\n Solve a matrix equation using the LGMRES algorithm.\n\n The LGMRES algorithm [1]_ [2]_ is designed to avoid some problems\n in the convergence in restarted GMRES, and often converges in fewer\n iterations.\n\n Parameters\n ----------\n A : {sparse matrix, dense matrix, LinearOperator}\n The real or complex N-by-N matrix of the linear system.\n b : {array, matrix}\n Right hand side of the linear system. Has shape (N,) or (N,1).\n x0 : {array, matrix}\n Starting guess for the solution.\n tol : float, optional\n Tolerance to achieve. The algorithm terminates when either the relative\n or the absolute residual is below `tol`.\n maxiter : int, optional\n Maximum number of iterations. Iteration will stop after maxiter\n steps even if the specified tolerance has not been achieved.\n M : {sparse matrix, dense matrix, LinearOperator}, optional\n Preconditioner for A. The preconditioner should approximate the\n inverse of A. Effective preconditioning dramatically improves the\n rate of convergence, which implies that fewer iterations are needed\n to reach a given error tolerance.\n callback : function, optional\n User-supplied function to call after each iteration. It is called\n as callback(xk), where xk is the current solution vector.\n inner_m : int, optional\n Number of inner GMRES iterations per each outer iteration.\n outer_k : int, optional\n Number of vectors to carry between inner GMRES iterations.\n According to [1]_, good values are in the range of 1...3.\n However, note that if you want to use the additional vectors to\n accelerate solving multiple similar problems, larger values may\n be beneficial.\n outer_v : list of tuples, optional\n List containing tuples ``(v, Av)`` of vectors and corresponding\n matrix-vector products, used to augment the Krylov subspace, and\n carried between inner GMRES iterations. The element ``Av`` can\n be `None` if the matrix-vector product should be re-evaluated.\n This parameter is modified in-place by `lgmres`, and can be used\n to pass \"guess\" vectors in and out of the algorithm when solving\n similar problems.\n store_outer_Av : bool, optional\n Whether LGMRES should store also A*v in addition to vectors `v`\n in the `outer_v` list. Default is True.\n\n Returns\n -------\n x : array or matrix\n The converged solution.\n info : int\n Provides convergence information:\n\n - 0 : successful exit\n - >0 : convergence to tolerance not achieved, number of iterations\n - <0 : illegal input or breakdown\n\n Notes\n -----\n The LGMRES algorithm [1]_ [2]_ is designed to avoid the\n slowing of convergence in restarted GMRES, due to alternating\n residual vectors. Typically, it often outperforms GMRES(m) of\n comparable memory requirements by some measure, or at least is not\n much worse.\n\n Another advantage in this algorithm is that you can supply it with\n 'guess' vectors in the `outer_v` argument that augment the Krylov\n subspace. If the solution lies close to the span of these vectors,\n the algorithm converges faster. This can be useful if several very\n similar matrices need to be inverted one after another, such as in\n Newton-Krylov iteration where the Jacobian matrix often changes\n little in the nonlinear steps.\n\n References\n ----------\n .. [1] A.H. Baker and E.R. Jessup and T. Manteuffel,\n SIAM J. Matrix Anal. Appl. 26, 962 (2005).\n .. [2] A.H. Baker, PhD thesis, University of Colorado (2003).\n http://amath.colorado.edu/activities/thesis/allisonb/Thesis.ps\n\n \"\"\"\n from scipy.linalg.basic import lstsq\n A,M,x,b,postprocess = make_system(A,M,x0,b)\n\n if not np.isfinite(b).all():\n raise ValueError(\"RHS must contain only finite numbers\")\n\n matvec = A.matvec\n psolve = M.matvec\n\n if outer_v is None:\n outer_v = []\n\n axpy, dot, scal = None, None, None\n\n b_norm = norm2(b)\n if b_norm == 0:\n b_norm = 1\n\n for k_outer in xrange(maxiter):\n r_outer = matvec(x) - b\n\n # -- callback\n if callback is not None:\n callback(x)\n\n # -- determine input type routines\n if axpy is None:\n if np.iscomplexobj(r_outer) and not np.iscomplexobj(x):\n x = x.astype(r_outer.dtype)\n axpy, dot, scal = get_blas_funcs(['axpy', 'dot', 'scal'],\n (x, r_outer))\n\n # -- check stopping condition\n r_norm = norm2(r_outer)\n if r_norm < tol * b_norm or r_norm < tol:\n break\n\n # -- inner LGMRES iteration\n vs0 = -psolve(r_outer)\n inner_res_0 = norm2(vs0)\n\n if inner_res_0 == 0:\n rnorm = norm2(r_outer)\n raise RuntimeError(\"Preconditioner returned a zero vector; \"\n \"|v| ~ %.1g, |M v| = 0\" % rnorm)\n\n vs0 = scal(1.0/inner_res_0, vs0)\n hs = []\n vs = [vs0]\n ws = []\n y = None\n\n for j in xrange(1, 1 + inner_m + len(outer_v)):\n # -- Arnoldi process:\n #\n # Build an orthonormal basis V and matrices W and H such that\n # A W = V H\n # Columns of W, V, and H are stored in `ws`, `vs` and `hs`.\n #\n # The first column of V is always the residual vector, `vs0`;\n # V has *one more column* than the other of the three matrices.\n #\n # The other columns in V are built by feeding in, one\n # by one, some vectors `z` and orthonormalizing them\n # against the basis so far. The trick here is to\n # feed in first some augmentation vectors, before\n # starting to construct the Krylov basis on `v0`.\n #\n # It was shown in [BJM]_ that a good choice (the LGMRES choice)\n # for these augmentation vectors are the `dx` vectors obtained\n # from a couple of the previous restart cycles.\n #\n # Note especially that while `vs0` is always the first\n # column in V, there is no reason why it should also be\n # the first column in W. (In fact, below `vs0` comes in\n # W only after the augmentation vectors.)\n #\n # The rest of the algorithm then goes as in GMRES, one\n # solves a minimization problem in the smaller subspace\n # spanned by W (range) and V (image).\n #\n # XXX: Below, I'm lazy and use `lstsq` to solve the\n # small least squares problem. Performance-wise, this\n # is in practice acceptable, but it could be nice to do\n # it on the fly with Givens etc.\n #\n\n # ++ evaluate\n v_new = None\n if j < len(outer_v) + 1:\n z, v_new = outer_v[j-1]\n elif j == len(outer_v) + 1:\n z = vs0\n else:\n z = vs[-1]\n\n if v_new is None:\n v_new = psolve(matvec(z))\n else:\n # Note: v_new is modified in-place below. Must make a\n # copy to ensure that the outer_v vectors are not\n # clobbered.\n v_new = v_new.copy()\n\n # ++ orthogonalize\n hcur = []\n for v in vs:\n alpha = dot(v, v_new)\n hcur.append(alpha)\n v_new = axpy(v, v_new, v.shape[0], -alpha) # v_new -= alpha*v\n hcur.append(norm2(v_new))\n\n if hcur[-1] == 0:\n # Exact solution found; bail out.\n # Zero basis vector (v_new) in the least-squares problem\n # does no harm, so we can just use the same code as usually;\n # it will give zero (inner) residual as a result.\n bailout = True\n else:\n bailout = False\n v_new = scal(1.0/hcur[-1], v_new)\n\n vs.append(v_new)\n hs.append(hcur)\n ws.append(z)\n\n # XXX: Ugly: should implement the GMRES iteration properly,\n # with Givens rotations and not using lstsq. Instead, we\n # spare some work by solving the LSQ problem only every 5\n # iterations.\n if not bailout and j % 5 != 1 and j < inner_m + len(outer_v) - 1:\n continue\n\n # -- GMRES optimization problem\n hess = np.zeros((j+1, j), x.dtype)\n e1 = np.zeros((j+1,), x.dtype)\n e1[0] = inner_res_0\n for q in xrange(j):\n hess[:(q+2),q] = hs[q]\n\n y, resids, rank, s = lstsq(hess, e1)\n inner_res = norm2(np.dot(hess, y) - e1)\n\n # -- check for termination\n if inner_res < tol * inner_res_0:\n break\n\n # -- GMRES terminated: eval solution\n dx = ws[0]*y[0]\n for w, yc in zip(ws[1:], y[1:]):\n dx = axpy(w, dx, dx.shape[0], yc) # dx += w*yc\n\n # -- Store LGMRES augmentation vectors\n nx = norm2(dx)\n if store_outer_Av:\n q = np.dot(hess, y)\n ax = vs[0]*q[0]\n for v, qc in zip(vs[1:], q[1:]):\n ax = axpy(v, ax, ax.shape[0], qc)\n outer_v.append((dx/nx, ax/nx))\n else:\n outer_v.append((dx/nx, None))\n\n # -- Retain only a finite number of augmentation vectors\n while len(outer_v) > outer_k:\n del outer_v[0]\n\n # -- Apply step\n x += dx\n else:\n # didn't converge ...\n return postprocess(x), maxiter\n\n return postprocess(x), 0\n", "from __future__ import division, print_function, absolute_import\n\nimport warnings\nfrom . import _minpack\n\nimport numpy as np\nfrom numpy import (atleast_1d, dot, take, triu, shape, eye,\n transpose, zeros, product, greater, array,\n all, where, isscalar, asarray, inf, abs,\n finfo, inexact, issubdtype, dtype)\nfrom .optimize import OptimizeResult, _check_unknown_options, OptimizeWarning\n\nerror = _minpack.error\n\n__all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit']\n\n\ndef _check_func(checker, argname, thefunc, x0, args, numinputs,\n output_shape=None):\n res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))\n if (output_shape is not None) and (shape(res) != output_shape):\n if (output_shape[0] != 1):\n if len(output_shape) > 1:\n if output_shape[1] == 1:\n return shape(res)\n msg = \"%s: there is a mismatch between the input and output \" \\\n \"shape of the '%s' argument\" % (checker, argname)\n func_name = getattr(thefunc, '__name__', None)\n if func_name:\n msg += \" '%s'.\" % func_name\n else:\n msg += \".\"\n raise TypeError(msg)\n if issubdtype(res.dtype, inexact):\n dt = res.dtype\n else:\n dt = dtype(float)\n return shape(res), dt\n\n\ndef fsolve(func, x0, args=(), fprime=None, full_output=0,\n col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None,\n epsfcn=None, factor=100, diag=None):\n \"\"\"\n Find the roots of a function.\n\n Return the roots of the (non-linear) equations defined by\n ``func(x) = 0`` given a starting estimate.\n\n Parameters\n ----------\n func : callable ``f(x, *args)``\n A function that takes at least one (possibly vector) argument.\n x0 : ndarray\n The starting estimate for the roots of ``func(x) = 0``.\n args : tuple, optional\n Any extra arguments to `func`.\n fprime : callable(x), optional\n A function to compute the Jacobian of `func` with derivatives\n across the rows. By default, the Jacobian will be estimated.\n full_output : bool, optional\n If True, return optional outputs.\n col_deriv : bool, optional\n Specify whether the Jacobian function computes derivatives down\n the columns (faster, because there is no transpose operation).\n xtol : float, optional\n The calculation will terminate if the relative error between two\n consecutive iterates is at most `xtol`.\n maxfev : int, optional\n The maximum number of calls to the function. If zero, then\n ``100*(N+1)`` is the maximum where N is the number of elements\n in `x0`.\n band : tuple, optional\n If set to a two-sequence containing the number of sub- and\n super-diagonals within the band of the Jacobi matrix, the\n Jacobi matrix is considered banded (only for ``fprime=None``).\n epsfcn : float, optional\n A suitable step length for the forward-difference\n approximation of the Jacobian (for ``fprime=None``). If\n `epsfcn` is less than the machine precision, it is assumed\n that the relative errors in the functions are of the order of\n the machine precision.\n factor : float, optional\n A parameter determining the initial step bound\n (``factor * || diag * x||``). Should be in the interval\n ``(0.1, 100)``.\n diag : sequence, optional\n N positive entries that serve as a scale factors for the\n variables.\n\n Returns\n -------\n x : ndarray\n The solution (or the result of the last iteration for\n an unsuccessful call).\n infodict : dict\n A dictionary of optional outputs with the keys:\n\n ``nfev``\n number of function calls\n ``njev``\n number of Jacobian calls\n ``fvec``\n function evaluated at the output\n ``fjac``\n the orthogonal matrix, q, produced by the QR\n factorization of the final approximate Jacobian\n matrix, stored column wise\n ``r``\n upper triangular matrix produced by QR factorization\n of the same matrix\n ``qtf``\n the vector ``(transpose(q) * fvec)``\n\n ier : int\n An integer flag. Set to 1 if a solution was found, otherwise refer\n to `mesg` for more information.\n mesg : str\n If no solution is found, `mesg` details the cause of failure.\n\n See Also\n --------\n root : Interface to root finding algorithms for multivariate\n functions. See the 'hybr' `method` in particular.\n\n Notes\n -----\n ``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms.\n\n \"\"\"\n options = {'col_deriv': col_deriv,\n 'xtol': xtol,\n 'maxfev': maxfev,\n 'band': band,\n 'eps': epsfcn,\n 'factor': factor,\n 'diag': diag,\n 'full_output': full_output}\n\n res = _root_hybr(func, x0, args, jac=fprime, **options)\n if full_output:\n x = res['x']\n info = dict((k, res.get(k))\n for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res)\n info['fvec'] = res['fun']\n return x, info, res['status'], res['message']\n else:\n return res['x']\n\n\ndef _root_hybr(func, x0, args=(), jac=None,\n col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None,\n factor=100, diag=None, full_output=0, **unknown_options):\n \"\"\"\n Find the roots of a multivariate function using MINPACK's hybrd and\n hybrj routines (modified Powell method).\n\n Options\n -------\n col_deriv : bool\n Specify whether the Jacobian function computes derivatives down\n the columns (faster, because there is no transpose operation).\n xtol : float\n The calculation will terminate if the relative error between two\n consecutive iterates is at most `xtol`.\n maxfev : int\n The maximum number of calls to the function. If zero, then\n ``100*(N+1)`` is the maximum where N is the number of elements\n in `x0`.\n band : tuple\n If set to a two-sequence containing the number of sub- and\n super-diagonals within the band of the Jacobi matrix, the\n Jacobi matrix is considered banded (only for ``fprime=None``).\n eps : float\n A suitable step length for the forward-difference\n approximation of the Jacobian (for ``fprime=None``). If\n `eps` is less than the machine precision, it is assumed\n that the relative errors in the functions are of the order of\n the machine precision.\n factor : float\n A parameter determining the initial step bound\n (``factor * || diag * x||``). Should be in the interval\n ``(0.1, 100)``.\n diag : sequence\n N positive entries that serve as a scale factors for the\n variables.\n\n \"\"\"\n _check_unknown_options(unknown_options)\n epsfcn = eps\n\n x0 = asarray(x0).flatten()\n n = len(x0)\n if not isinstance(args, tuple):\n args = (args,)\n shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,))\n if epsfcn is None:\n epsfcn = finfo(dtype).eps\n Dfun = jac\n if Dfun is None:\n if band is None:\n ml, mu = -10, -10\n else:\n ml, mu = band[:2]\n if maxfev == 0:\n maxfev = 200 * (n + 1)\n retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev,\n ml, mu, epsfcn, factor, diag)\n else:\n _check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n))\n if (maxfev == 0):\n maxfev = 100 * (n + 1)\n retval = _minpack._hybrj(func, Dfun, x0, args, 1,\n col_deriv, xtol, maxfev, factor, diag)\n\n x, status = retval[0], retval[-1]\n\n errors = {0: [\"Improper input parameters were entered.\", TypeError],\n 1: [\"The solution converged.\", None],\n 2: [\"The number of calls to function has \"\n \"reached maxfev = %d.\" % maxfev, ValueError],\n 3: [\"xtol=%f is too small, no further improvement \"\n \"in the approximate\\n solution \"\n \"is possible.\" % xtol, ValueError],\n 4: [\"The iteration is not making good progress, as measured \"\n \"by the \\n improvement from the last five \"\n \"Jacobian evaluations.\", ValueError],\n 5: [\"The iteration is not making good progress, \"\n \"as measured by the \\n improvement from the last \"\n \"ten iterations.\", ValueError],\n 'unknown': [\"An error occurred.\", TypeError]}\n\n if status != 1 and not full_output:\n if status in [2, 3, 4, 5]:\n msg = errors[status][0]\n warnings.warn(msg, RuntimeWarning)\n else:\n try:\n raise errors[status][1](errors[status][0])\n except KeyError:\n raise errors['unknown'][1](errors['unknown'][0])\n\n info = retval[1]\n info['fun'] = info.pop('fvec')\n sol = OptimizeResult(x=x, success=(status == 1), status=status)\n sol.update(info)\n try:\n sol['message'] = errors[status][0]\n except KeyError:\n info['message'] = errors['unknown'][0]\n\n return sol\n\n\ndef leastsq(func, x0, args=(), Dfun=None, full_output=0,\n col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,\n gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):\n \"\"\"\n Minimize the sum of squares of a set of equations.\n\n ::\n\n x = arg min(sum(func(y)**2,axis=0))\n y\n\n Parameters\n ----------\n func : callable\n should take at least one (possibly length N vector) argument and\n returns M floating point numbers. It must not return NaNs or\n fitting might fail.\n x0 : ndarray\n The starting estimate for the minimization.\n args : tuple, optional\n Any extra arguments to func are placed in this tuple.\n Dfun : callable, optional\n A function or method to compute the Jacobian of func with derivatives\n across the rows. If this is None, the Jacobian will be estimated.\n full_output : bool, optional\n non-zero to return all optional outputs.\n col_deriv : bool, optional\n non-zero to specify that the Jacobian function computes derivatives\n down the columns (faster, because there is no transpose operation).\n ftol : float, optional\n Relative error desired in the sum of squares.\n xtol : float, optional\n Relative error desired in the approximate solution.\n gtol : float, optional\n Orthogonality desired between the function vector and the columns of\n the Jacobian.\n maxfev : int, optional\n The maximum number of calls to the function. If `Dfun` is provided\n then the default `maxfev` is 100*(N+1) where N is the number of elements\n in x0, otherwise the default `maxfev` is 200*(N+1).\n epsfcn : float, optional\n A variable used in determining a suitable step length for the forward-\n difference approximation of the Jacobian (for Dfun=None). \n Normally the actual step length will be sqrt(epsfcn)*x\n If epsfcn is less than the machine precision, it is assumed that the \n relative errors are of the order of the machine precision.\n factor : float, optional\n A parameter determining the initial step bound\n (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.\n diag : sequence, optional\n N positive entries that serve as a scale factors for the variables.\n\n Returns\n -------\n x : ndarray\n The solution (or the result of the last iteration for an unsuccessful\n call).\n cov_x : ndarray\n Uses the fjac and ipvt optional outputs to construct an\n estimate of the jacobian around the solution. None if a\n singular matrix encountered (indicates very flat curvature in\n some direction). This matrix must be multiplied by the\n residual variance to get the covariance of the\n parameter estimates -- see curve_fit.\n infodict : dict\n a dictionary of optional outputs with the key s:\n\n ``nfev``\n The number of function calls\n ``fvec``\n The function evaluated at the output\n ``fjac``\n A permutation of the R matrix of a QR\n factorization of the final approximate\n Jacobian matrix, stored column wise.\n Together with ipvt, the covariance of the\n estimate can be approximated.\n ``ipvt``\n An integer array of length N which defines\n a permutation matrix, p, such that\n fjac*p = q*r, where r is upper triangular\n with diagonal elements of nonincreasing\n magnitude. Column j of p is column ipvt(j)\n of the identity matrix.\n ``qtf``\n The vector (transpose(q) * fvec).\n\n mesg : str\n A string message giving information about the cause of failure.\n ier : int\n An integer flag. If it is equal to 1, 2, 3 or 4, the solution was\n found. Otherwise, the solution was not found. In either case, the\n optional output variable 'mesg' gives more information.\n\n Notes\n -----\n \"leastsq\" is a wrapper around MINPACK's lmdif and lmder algorithms.\n\n cov_x is a Jacobian approximation to the Hessian of the least squares\n objective function.\n This approximation assumes that the objective function is based on the\n difference between some observed target data (ydata) and a (non-linear)\n function of the parameters `f(xdata, params)` ::\n\n func(params) = ydata - f(xdata, params)\n\n so that the objective function is ::\n\n min sum((ydata - f(xdata, params))**2, axis=0)\n params\n\n \"\"\"\n x0 = asarray(x0).flatten()\n n = len(x0)\n if not isinstance(args, tuple):\n args = (args,)\n shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)\n m = shape[0]\n if n > m:\n raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))\n if epsfcn is None:\n epsfcn = finfo(dtype).eps\n if Dfun is None:\n if maxfev == 0:\n maxfev = 200*(n + 1)\n retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,\n gtol, maxfev, epsfcn, factor, diag)\n else:\n if col_deriv:\n _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))\n else:\n _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))\n if maxfev == 0:\n maxfev = 100 * (n + 1)\n retval = _minpack._lmder(func, Dfun, x0, args, full_output, col_deriv,\n ftol, xtol, gtol, maxfev, factor, diag)\n\n errors = {0: [\"Improper input parameters.\", TypeError],\n 1: [\"Both actual and predicted relative reductions \"\n \"in the sum of squares\\n are at most %f\" % ftol, None],\n 2: [\"The relative error between two consecutive \"\n \"iterates is at most %f\" % xtol, None],\n 3: [\"Both actual and predicted relative reductions in \"\n \"the sum of squares\\n are at most %f and the \"\n \"relative error between two consecutive \"\n \"iterates is at \\n most %f\" % (ftol, xtol), None],\n 4: [\"The cosine of the angle between func(x) and any \"\n \"column of the\\n Jacobian is at most %f in \"\n \"absolute value\" % gtol, None],\n 5: [\"Number of calls to function has reached \"\n \"maxfev = %d.\" % maxfev, ValueError],\n 6: [\"ftol=%f is too small, no further reduction \"\n \"in the sum of squares\\n is possible.\"\"\" % ftol,\n ValueError],\n 7: [\"xtol=%f is too small, no further improvement in \"\n \"the approximate\\n solution is possible.\" % xtol,\n ValueError],\n 8: [\"gtol=%f is too small, func(x) is orthogonal to the \"\n \"columns of\\n the Jacobian to machine \"\n \"precision.\" % gtol, ValueError],\n 'unknown': [\"Unknown error.\", TypeError]}\n\n info = retval[-1] # The FORTRAN return value\n\n if info not in [1, 2, 3, 4] and not full_output:\n if info in [5, 6, 7, 8]:\n warnings.warn(errors[info][0], RuntimeWarning)\n else:\n try:\n raise errors[info][1](errors[info][0])\n except KeyError:\n raise errors['unknown'][1](errors['unknown'][0])\n\n mesg = errors[info][0]\n if full_output:\n cov_x = None\n if info in [1, 2, 3, 4]:\n from numpy.dual import inv\n from numpy.linalg import LinAlgError\n perm = take(eye(n), retval[1]['ipvt'] - 1, 0)\n r = triu(transpose(retval[1]['fjac'])[:n, :])\n R = dot(r, perm)\n try:\n cov_x = inv(dot(transpose(R), R))\n except (LinAlgError, ValueError):\n pass\n return (retval[0], cov_x) + retval[1:-1] + (mesg, info)\n else:\n return (retval[0], info)\n\n\ndef _general_function(params, xdata, ydata, function):\n return function(xdata, *params) - ydata\n\n\ndef _weighted_general_function(params, xdata, ydata, function, weights):\n return weights * (function(xdata, *params) - ydata)\n\n\ndef curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,\n check_finite=True, **kw):\n \"\"\"\n Use non-linear least squares to fit a function, f, to data.\n\n Assumes ``ydata = f(xdata, *params) + eps``\n\n Parameters\n ----------\n f : callable\n The model function, f(x, ...). It must take the independent\n variable as the first argument and the parameters to fit as\n separate remaining arguments.\n xdata : An M-length sequence or an (k,M)-shaped array\n for functions with k predictors.\n The independent variable where the data is measured.\n ydata : M-length sequence\n The dependent data --- nominally f(xdata, ...)\n p0 : None, scalar, or N-length sequence, optional\n Initial guess for the parameters. If None, then the initial\n values will all be 1 (if the number of parameters for the function\n can be determined using introspection, otherwise a ValueError\n is raised).\n sigma : None or M-length sequence, optional\n If not None, the uncertainties in the ydata array. These are used as\n weights in the least-squares problem\n i.e. minimising ``np.sum( ((f(xdata, *popt) - ydata) / sigma)**2 )``\n If None, the uncertainties are assumed to be 1.\n absolute_sigma : bool, optional\n If False, `sigma` denotes relative weights of the data points.\n The returned covariance matrix `pcov` is based on *estimated*\n errors in the data, and is not affected by the overall\n magnitude of the values in `sigma`. Only the relative\n magnitudes of the `sigma` values matter.\n\n If True, `sigma` describes one standard deviation errors of\n the input data points. The estimated covariance in `pcov` is\n based on these values.\n check_finite : bool, optional\n If True, check that the input arrays do not contain nans of infs,\n and raise a ValueError if they do. Setting this parameter to\n False may silently produce nonsensical results if the input arrays\n do contain nans.\n Default is True.\n\n Returns\n -------\n popt : array\n Optimal values for the parameters so that the sum of the squared error\n of ``f(xdata, *popt) - ydata`` is minimized\n pcov : 2d array\n The estimated covariance of popt. The diagonals provide the variance\n of the parameter estimate. To compute one standard deviation errors\n on the parameters use ``perr = np.sqrt(np.diag(pcov))``.\n\n How the `sigma` parameter affects the estimated covariance\n depends on `absolute_sigma` argument, as described above.\n\n Raises\n ------\n OptimizeWarning\n if covariance of the parameters can not be estimated.\n\n ValueError\n if ydata and xdata contain NaNs.\n\n See Also\n --------\n leastsq\n\n Notes\n -----\n The algorithm uses the Levenberg-Marquardt algorithm through `leastsq`.\n Additional keyword arguments are passed directly to that algorithm.\n\n Examples\n --------\n >>> import numpy as np\n >>> from scipy.optimize import curve_fit\n >>> def func(x, a, b, c):\n ... return a * np.exp(-b * x) + c\n\n >>> xdata = np.linspace(0, 4, 50)\n >>> y = func(xdata, 2.5, 1.3, 0.5)\n >>> ydata = y + 0.2 * np.random.normal(size=len(xdata))\n\n >>> popt, pcov = curve_fit(func, xdata, ydata)\n\n \"\"\"\n if p0 is None:\n # determine number of parameters by inspecting the function\n import inspect\n args, varargs, varkw, defaults = inspect.getargspec(f)\n if len(args) < 2:\n msg = \"Unable to determine number of fit parameters.\"\n raise ValueError(msg)\n if 'self' in args:\n p0 = [1.0] * (len(args)-2)\n else:\n p0 = [1.0] * (len(args)-1)\n\n # Check input arguments\n if isscalar(p0):\n p0 = array([p0])\n\n # NaNs can not be handled\n if check_finite:\n ydata = np.asarray_chkfinite(ydata)\n else:\n ydata = np.asarray(ydata)\n if isinstance(xdata, (list, tuple, np.ndarray)):\n # `xdata` is passed straight to the user-defined `f`, so allow\n # non-array_like `xdata`.\n if check_finite:\n xdata = np.asarray_chkfinite(xdata)\n else:\n xdata = np.asarray(xdata)\n\n args = (xdata, ydata, f)\n if sigma is None:\n func = _general_function\n else:\n func = _weighted_general_function\n args += (1.0 / asarray(sigma),)\n\n # Remove full_output from kw, otherwise we're passing it in twice.\n return_full = kw.pop('full_output', False)\n res = leastsq(func, p0, args=args, full_output=1, **kw)\n (popt, pcov, infodict, errmsg, ier) = res\n\n if ier not in [1, 2, 3, 4]:\n msg = \"Optimal parameters not found: \" + errmsg\n raise RuntimeError(msg)\n\n warn_cov = False\n if pcov is None:\n # indeterminate covariance\n pcov = zeros((len(popt), len(popt)), dtype=float)\n pcov.fill(inf)\n warn_cov = True\n elif not absolute_sigma:\n if len(ydata) > len(p0):\n s_sq = (asarray(func(popt, *args))**2).sum() / (len(ydata) - len(p0))\n pcov = pcov * s_sq\n else:\n pcov.fill(inf)\n warn_cov = True\n\n if warn_cov:\n warnings.warn('Covariance of the parameters could not be estimated',\n category=OptimizeWarning)\n\n if return_full:\n return popt, pcov, infodict, errmsg, ier\n else:\n return popt, pcov\n\n\ndef check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0):\n \"\"\"Perform a simple check on the gradient for correctness.\n\n \"\"\"\n\n x = atleast_1d(x0)\n n = len(x)\n x = x.reshape((n,))\n fvec = atleast_1d(fcn(x, *args))\n m = len(fvec)\n fvec = fvec.reshape((m,))\n ldfjac = m\n fjac = atleast_1d(Dfcn(x, *args))\n fjac = fjac.reshape((m, n))\n if col_deriv == 0:\n fjac = transpose(fjac)\n\n xp = zeros((n,), float)\n err = zeros((m,), float)\n fvecp = None\n _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err)\n\n fvecp = atleast_1d(fcn(xp, *args))\n fvecp = fvecp.reshape((m,))\n _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err)\n\n good = (product(greater(err, 0.5), axis=0))\n\n return (good, err)\n\n\ndef fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500):\n \"\"\"\n Find a fixed point of the function.\n\n Given a function of one or more variables and a starting point, find a\n fixed-point of the function: i.e. where ``func(x0) == x0``.\n\n Parameters\n ----------\n func : function\n Function to evaluate.\n x0 : array_like\n Fixed point of function.\n args : tuple, optional\n Extra arguments to `func`.\n xtol : float, optional\n Convergence tolerance, defaults to 1e-08.\n maxiter : int, optional\n Maximum number of iterations, defaults to 500.\n\n Notes\n -----\n Uses Steffensen's Method using Aitken's ``Del^2`` convergence acceleration.\n See Burden, Faires, \"Numerical Analysis\", 5th edition, pg. 80\n\n Examples\n --------\n >>> from scipy import optimize\n >>> def func(x, c1, c2):\n ... return np.sqrt(c1/(x+c2))\n >>> c1 = np.array([10,12.])\n >>> c2 = np.array([3, 5.])\n >>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2))\n array([ 1.4920333 , 1.37228132])\n\n \"\"\"\n if not isscalar(x0):\n x0 = asarray(x0)\n p0 = x0\n for iter in range(maxiter):\n p1 = func(p0, *args)\n p2 = func(p1, *args)\n d = p2 - 2.0 * p1 + p0\n p = where(d == 0, p2, p0 - (p1 - p0)*(p1 - p0) / d)\n relerr = where(p0 == 0, p, (p-p0)/p0)\n if all(abs(relerr) < xtol):\n return p\n p0 = p\n else:\n p0 = x0\n for iter in range(maxiter):\n p1 = func(p0, *args)\n p2 = func(p1, *args)\n d = p2 - 2.0 * p1 + p0\n if d == 0.0:\n return p2\n else:\n p = p0 - (p1 - p0)*(p1 - p0) / d\n if p0 == 0:\n relerr = p\n else:\n relerr = (p - p0)/p0\n if abs(relerr) < xtol:\n return p\n p0 = p\n msg = \"Failed to converge after %d iterations, value is %s\" % (maxiter, p)\n raise RuntimeError(msg)\n", "from __future__ import absolute_import, print_function\n\nfrom numpy.testing import TestCase, assert_equal, run_module_suite\n\nfrom scipy.weave import ast_tools\n\n\nclass TestHarvestVariables(TestCase):\n \"\"\" Not much testing going on here, but at least it is a flame test.\"\"\"\n def generic_check(self,expr,desired):\n import parser\n ast_list = parser.suite(expr).tolist()\n actual = ast_tools.harvest_variables(ast_list)\n assert_equal(actual,desired,expr)\n\n def test_simple_expr(self):\n # Convert simple expr to blitz\n expr = \"a[:1:2] = b[:1+i+2:]\"\n desired = ['a','b','i']\n self.generic_check(expr,desired)\n\nif __name__ == \"__main__\":\n run_module_suite()\n", "\"\"\"\nLow-level BLAS functions\n========================\n\nThis module contains low-level functions from the BLAS library.\n\n.. versionadded:: 0.12.0\n\n.. warning::\n\n These functions do little to no error checking.\n It is possible to cause crashes by mis-using them,\n so prefer using the higher-level routines in `scipy.linalg`.\n\nFinding functions\n=================\n\n.. autosummary::\n :toctree: generated/\n\n get_blas_funcs\n find_best_blas_type\n\nBLAS Level 1 functions\n======================\n\n.. autosummary::\n :toctree: generated/\n\n caxpy\n ccopy\n cdotc\n cdotu\n crotg\n cscal\n csrot\n csscal\n cswap\n dasum\n daxpy\n dcopy\n ddot\n dnrm2\n drot\n drotg\n drotm\n drotmg\n dscal\n dswap\n dzasum\n dznrm2\n icamax\n idamax\n isamax\n izamax\n sasum\n saxpy\n scasum\n scnrm2\n scopy\n sdot\n snrm2\n srot\n srotg\n srotm\n srotmg\n sscal\n sswap\n zaxpy\n zcopy\n zdotc\n zdotu\n zdrot\n zdscal\n zrotg\n zscal\n zswap\n\nBLAS Level 2 functions\n======================\n\n.. autosummary::\n :toctree: generated/\n\n cgemv\n cgerc\n cgeru\n chemv\n ctrmv\n csyr\n cher\n cher2\n dgemv\n dger\n dsymv\n dtrmv\n dsyr\n dsyr2\n sgemv\n sger\n ssymv\n strmv\n ssyr\n ssyr2\n zgemv\n zgerc\n zgeru\n zhemv\n ztrmv\n zsyr\n zher\n zher2\n\nBLAS Level 3 functions\n======================\n\n.. autosummary::\n :toctree: generated/\n\n cgemm\n chemm\n cherk\n cher2k\n csymm\n csyrk\n csyr2k\n dgemm\n dsymm\n dsyrk\n dsyr2k\n sgemm\n ssymm\n ssyrk\n ssyr2k\n zgemm\n zhemm\n zherk\n zher2k\n zsymm\n zsyrk\n zsyr2k\n\n\"\"\"\n#\n# Author: Pearu Peterson, March 2002\n# refactoring by Fabian Pedregosa, March 2010\n#\n\nfrom __future__ import division, print_function, absolute_import\n\n__all__ = ['get_blas_funcs', 'find_best_blas_type']\n\nimport numpy as _np\n\nfrom scipy.linalg import _fblas\ntry:\n from scipy.linalg import _cblas\nexcept ImportError:\n _cblas = None\n\n# Expose all functions (only fblas --- cblas is an implementation detail)\nempty_module = None\nfrom scipy.linalg._fblas import *\ndel empty_module\n\n# 'd' will be default for 'i',..\n_type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z', 'G':'z'}\n\n# some convenience alias for complex functions\n_blas_alias = {'cnrm2': 'scnrm2', 'znrm2': 'dznrm2',\n 'cdot': 'cdotc', 'zdot': 'zdotc',\n 'cger': 'cgerc', 'zger': 'zgerc',\n 'sdotc': 'sdot', 'sdotu': 'sdot',\n 'ddotc': 'ddot', 'ddotu': 'ddot'}\n\n\ndef find_best_blas_type(arrays=(), dtype=None):\n \"\"\"Find best-matching BLAS/LAPACK type.\n\n Arrays are used to determine the optimal prefix of BLAS routines.\n\n Parameters\n ----------\n arrays : sequence of ndarrays, optional\n Arrays can be given to determine optimal prefix of BLAS\n routines. If not given, double-precision routines will be\n used, otherwise the most generic type in arrays will be used.\n dtype : str or dtype, optional\n Data-type specifier. Not used if `arrays` is non-empty.\n\n Returns\n -------\n prefix : str\n BLAS/LAPACK prefix character.\n dtype : dtype\n Inferred Numpy data type.\n prefer_fortran : bool\n Whether to prefer Fortran order routines over C order.\n\n \"\"\"\n dtype = _np.dtype(dtype)\n prefer_fortran = False\n\n if arrays:\n # use the most generic type in arrays\n dtypes = [ar.dtype for ar in arrays]\n dtype = _np.find_common_type(dtypes, ())\n try:\n index = dtypes.index(dtype)\n except ValueError:\n index = 0\n if arrays[index].flags['FORTRAN']:\n # prefer Fortran for leading array with column major order\n prefer_fortran = True\n\n prefix = _type_conv.get(dtype.char, 'd')\n\n return prefix, dtype, prefer_fortran\n\n\ndef _get_funcs(names, arrays, dtype,\n lib_name, fmodule, cmodule,\n fmodule_name, cmodule_name, alias):\n \"\"\"\n Return available BLAS/LAPACK functions.\n\n Used also in lapack.py. See get_blas_funcs for docstring.\n \"\"\"\n\n funcs = []\n unpack = False\n dtype = _np.dtype(dtype)\n module1 = (cmodule, cmodule_name)\n module2 = (fmodule, fmodule_name)\n\n if isinstance(names, str):\n names = (names,)\n unpack = True\n\n prefix, dtype, prefer_fortran = find_best_blas_type(arrays, dtype)\n\n if prefer_fortran:\n module1, module2 = module2, module1\n\n for i, name in enumerate(names):\n func_name = prefix + name\n func_name = alias.get(func_name, func_name)\n func = getattr(module1[0], func_name, None)\n module_name = module1[1]\n if func is None:\n func = getattr(module2[0], func_name, None)\n module_name = module2[1]\n if func is None:\n raise ValueError(\n '%s function %s could not be found' % (lib_name, func_name))\n func.module_name, func.typecode = module_name, prefix\n func.dtype = dtype\n func.prefix = prefix # Backward compatibility\n funcs.append(func)\n\n if unpack:\n return funcs[0]\n else:\n return funcs\n\n\ndef get_blas_funcs(names, arrays=(), dtype=None):\n \"\"\"Return available BLAS function objects from names.\n\n Arrays are used to determine the optimal prefix of BLAS routines.\n\n Parameters\n ----------\n names : str or sequence of str\n Name(s) of BLAS functions without type prefix.\n\n arrays : sequence of ndarrays, optional\n Arrays can be given to determine optimal prefix of BLAS\n routines. If not given, double-precision routines will be\n used, otherwise the most generic type in arrays will be used.\n\n dtype : str or dtype, optional\n Data-type specifier. Not used if `arrays` is non-empty.\n\n\n Returns\n -------\n funcs : list\n List containing the found function(s).\n\n\n Notes\n -----\n This routine automatically chooses between Fortran/C\n interfaces. Fortran code is used whenever possible for arrays with\n column major order. In all other cases, C code is preferred.\n\n In BLAS, the naming convention is that all functions start with a\n type prefix, which depends on the type of the principal\n matrix. These can be one of {'s', 'd', 'c', 'z'} for the numpy\n types {float32, float64, complex64, complex128} respectively.\n The code and the dtype are stored in attributes `typecode` and `dtype`\n of the returned functions.\n \"\"\"\n return _get_funcs(names, arrays, dtype,\n \"BLAS\", _fblas, _cblas, \"fblas\", \"cblas\",\n _blas_alias)\n", "# Author: Gael Varoquaux <[email protected]>\n# Jake Vanderplas <[email protected]>\n# License: BSD\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\nfrom numpy.testing import (run_module_suite, assert_allclose,\n assert_array_almost_equal, assert_raises)\nfrom scipy import sparse\n\nfrom scipy.sparse import csgraph\n\n\ndef _explicit_laplacian(x, normed=False):\n if sparse.issparse(x):\n x = x.todense()\n x = np.asarray(x)\n y = -1.0 * x\n for j in range(y.shape[0]):\n y[j,j] = x[j,j+1:].sum() + x[j,:j].sum()\n if normed:\n d = np.diag(y).copy()\n d[d == 0] = 1.0\n y /= d[:,None]**.5\n y /= d[None,:]**.5\n return y\n\n\ndef _check_symmetric_graph_laplacian(mat, normed):\n if not hasattr(mat, 'shape'):\n mat = eval(mat, dict(np=np, sparse=sparse))\n\n if sparse.issparse(mat):\n sp_mat = mat\n mat = sp_mat.todense()\n else:\n sp_mat = sparse.csr_matrix(mat)\n\n laplacian = csgraph.laplacian(mat, normed=normed)\n n_nodes = mat.shape[0]\n if not normed:\n assert_array_almost_equal(laplacian.sum(axis=0), np.zeros(n_nodes))\n assert_array_almost_equal(laplacian.T, laplacian)\n assert_array_almost_equal(laplacian,\n csgraph.laplacian(sp_mat, normed=normed).todense())\n\n assert_array_almost_equal(laplacian,\n _explicit_laplacian(mat, normed=normed))\n\n\ndef test_laplacian_value_error():\n for t in int, float, complex:\n for m in ([1, 1],\n [[[1]]],\n [[1, 2, 3], [4, 5, 6]],\n [[1, 2], [3, 4], [5, 5]]):\n A = np.array(m, dtype=t)\n assert_raises(ValueError, csgraph.laplacian, A)\n\n\ndef test_symmetric_graph_laplacian():\n symmetric_mats = ('np.arange(10) * np.arange(10)[:, np.newaxis]',\n 'np.ones((7, 7))',\n 'np.eye(19)',\n 'sparse.diags([1, 1], [-1, 1], shape=(4,4))',\n 'sparse.diags([1, 1], [-1, 1], shape=(4,4)).todense()',\n 'np.asarray(sparse.diags([1, 1], [-1, 1], shape=(4,4)).todense())',\n 'np.vander(np.arange(4)) + np.vander(np.arange(4)).T')\n for mat_str in symmetric_mats:\n for normed in True, False:\n yield _check_symmetric_graph_laplacian, mat_str, normed\n\n\ndef _assert_allclose_sparse(a, b, **kwargs):\n # helper function that can deal with sparse matrices\n if sparse.issparse(a):\n a = a.toarray()\n if sparse.issparse(b):\n b = a.toarray()\n assert_allclose(a, b, **kwargs)\n\n\ndef _check_laplacian(A, desired_L, desired_d, normed, use_out_degree):\n for arr_type in np.array, sparse.csr_matrix, sparse.coo_matrix:\n for t in int, float, complex:\n adj = arr_type(A, dtype=t)\n L = csgraph.laplacian(adj, normed=normed, return_diag=False,\n use_out_degree=use_out_degree)\n _assert_allclose_sparse(L, desired_L, atol=1e-12)\n L, d = csgraph.laplacian(adj, normed=normed, return_diag=True,\n use_out_degree=use_out_degree)\n _assert_allclose_sparse(L, desired_L, atol=1e-12)\n _assert_allclose_sparse(d, desired_d, atol=1e-12)\n\n\ndef test_asymmetric_laplacian():\n # adjacency matrix\n A = [[0, 1, 0],\n [4, 2, 0],\n [0, 0, 0]]\n\n # Laplacian matrix using out-degree\n L = [[1, -1, 0],\n [-4, 4, 0],\n [0, 0, 0]]\n d = [1, 4, 0]\n _check_laplacian(A, L, d, normed=False, use_out_degree=True)\n\n # normalized Laplacian matrix using out-degree\n L = [[1, -0.5, 0],\n [-2, 1, 0],\n [0, 0, 0]]\n d = [1, 2, 1]\n _check_laplacian(A, L, d, normed=True, use_out_degree=True)\n\n # Laplacian matrix using in-degree\n L = [[4, -1, 0],\n [-4, 1, 0],\n [0, 0, 0]]\n d = [4, 1, 0]\n _check_laplacian(A, L, d, normed=False, use_out_degree=False)\n\n # normalized Laplacian matrix using in-degree\n L = [[1, -0.5, 0],\n [-2, 1, 0],\n [0, 0, 0]]\n d = [2, 1, 1]\n _check_laplacian(A, L, d, normed=True, use_out_degree=False)\n\n\nif __name__ == '__main__':\n run_module_suite()\n", "#!/usr/bin/env python\nfrom __future__ import division, print_function, absolute_import\n\n\ndef configuration(parent_package='',top_path=None):\n from numpy.distutils.misc_util import Configuration\n\n config = Configuration('lobpcg',parent_package,top_path)\n config.add_data_dir('tests')\n\n return config\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(**configuration(top_path='').todict())\n" ]
[ [ "scipy.io.matlab.miobase.matdims", "numpy.sqrt", "scipy.io.matlab.mio.mat_reader_factory", "numpy.ndarray", "numpy.dtype", "numpy.all", "numpy.exp", "scipy.io.matlab.mio5.varmats_from_mat", "numpy.testing.assert_equal", "scipy.sparse.coo_matrix", "scipy.sparse.issparse", "scipy.io.matlab.mio.whosmat", "numpy.arange", "scipy.io.matlab.mio5.to_writeable", "numpy.eye", "scipy.io.matlab.mio.loadmat", "numpy.sin", "numpy.repeat", "scipy._lib.six.u", "numpy.zeros", "numpy.testing.assert_array_almost_equal", "scipy.sparse.csc_matrix", "scipy.io.matlab.mio5.MatFile5Writer", "scipy.io.matlab.mio5.MatFile5Reader", "numpy.testing.assert_raises", "numpy.testing.assert_", "numpy.array", "numpy.testing.run_module_suite", "numpy.cos", "numpy.testing.assert_array_equal", "numpy.empty", "scipy.io.matlab.mio.savemat" ], [ "numpy.distutils.misc_util.Configuration" ], [ "numpy.dot", "scipy.linalg.get_blas_funcs", "numpy.isfinite", "numpy.asarray", "scipy.linalg.basic.lstsq", "scipy._lib.six.xrange", "numpy.iscomplexobj", "numpy.zeros" ], [ "numpy.dot", "numpy.greater", "numpy.abs", "numpy.asarray", "numpy.issubdtype", "numpy.eye", "numpy.dtype", "numpy.finfo", "numpy.atleast_1d", "numpy.shape", "numpy.isscalar", "numpy.transpose", "numpy.array", "numpy.zeros", "numpy.where", "numpy.asarray_chkfinite" ], [ "numpy.testing.assert_equal", "numpy.testing.run_module_suite", "scipy.weave.ast_tools.harvest_variables" ], [ "numpy.find_common_type", "numpy.dtype" ], [ "numpy.diag", "numpy.testing.run_module_suite", "scipy.sparse.issparse", "numpy.asarray", "scipy.sparse.csgraph.laplacian", "scipy.sparse.csr_matrix", "numpy.testing.assert_raises", "numpy.testing.assert_allclose", "numpy.array", "numpy.zeros", "numpy.testing.assert_array_almost_equal" ], [ "numpy.distutils.misc_util.Configuration" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "0.16", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.11", "1.19", "1.24", "1.16", "1.23", "1.20", "1.7", "1.12", "1.21", "1.22", "1.14", "1.6", "1.13", "1.9", "1.17", "1.10", "1.18", "1.15", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.12" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.11", "1.19", "1.24", "1.16", "1.23", "1.20", "1.7", "1.12", "1.21", "1.22", "1.14", "1.6", "1.13", "1.9", "1.17", "1.10", "1.18", "1.15", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
StefanHD13/Machine_Learning_Projects
[ "2c6f935ed689a07d6bdb63a91566d40bf94809b8" ]
[ "SMS_spam_collection/results.py" ]
[ "import pandas\ndata_feat = pandas.read_csv('features_results.csv')\ndata_class = pandas.read_csv('class_results.csv')\n\nprint('\\nSelect K best Comparison:\\n')\nprint(data_feat)\nprint('\\n\\nClassifiers Comparison:\\n')\nprint(data_class)\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
WildMeOrg/wbia-deprecate-tpl-lightnet
[ "b910aaa88f31d0bb4ca220229852a7f58f4ca905", "b910aaa88f31d0bb4ca220229852a7f58f4ca905" ]
[ "lightnet/data/transform/_preprocess.py", "lightnet/network/loss/_regionloss.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# Image and annotations preprocessing for lightnet networks\n# The image transformations work with both Pillow and OpenCV images\n# The annotation transformations work with brambox.annotations.Annotation objects\n# Copyright EAVISE\n#\n\nimport random\nimport collections\nimport logging\nimport torch\nimport numpy as np\nfrom PIL import Image, ImageOps\nimport brambox.boxes as bbb\nfrom .util import BaseTransform, BaseMultiTransform\n\nlog = logging.getLogger(__name__)\n\ntry:\n import cv2\nexcept ImportError:\n log.warn('OpenCV is not installed and cannot be used')\n cv2 = None\n\n__all__ = ['Letterbox', 'RandomCrop', 'RandomFlip', 'HSVShift', 'BramboxToTensor']\n\n\nclass Letterbox(BaseMultiTransform):\n \"\"\" Transform images and annotations to the right network dimensions.\n\n Args:\n dimension (tuple, optional): Default size for the letterboxing, expressed as a (width, height) tuple; Default **None**\n dataset (lightnet.data.Dataset, optional): Dataset that uses this transform; Default **None**\n\n Note:\n Create 1 Letterbox object and use it for both image and annotation transforms.\n This object will save data from the image transform and use that on the annotation transform.\n \"\"\"\n\n def __init__(self, dimension=None, dataset=None):\n super().__init__(dimension=dimension, dataset=dataset)\n if self.dimension is None and self.dataset is None:\n raise ValueError(\n 'This transform either requires a dimension or a dataset to infer the dimension'\n )\n\n self.pad = None\n self.scale = None\n self.fill_color = 127\n\n def __call__(self, data):\n if data is None:\n return None\n elif isinstance(data, collections.Sequence):\n return self._tf_anno(data)\n elif isinstance(data, Image.Image):\n return self._tf_pil(data)\n elif isinstance(data, np.ndarray):\n return self._tf_cv(data)\n else:\n log.error(\n f'Letterbox only works with <brambox annotation lists>, <PIL images> or <OpenCV images> [{type(data)}]'\n )\n return data\n\n def _tf_pil(self, img):\n \"\"\" Letterbox an image to fit in the network \"\"\"\n if self.dataset is not None:\n net_w, net_h = self.dataset.input_dim\n else:\n net_w, net_h = self.dimension\n im_w, im_h = img.size\n\n if im_w == net_w and im_h == net_h:\n self.scale = None\n self.pad = None\n return img\n\n # Rescaling\n if im_w / net_w >= im_h / net_h:\n self.scale = net_w / im_w\n else:\n self.scale = net_h / im_h\n if self.scale != 1:\n bands = img.split()\n bands = [\n b.resize((int(self.scale * im_w), int(self.scale * im_h))) for b in bands\n ]\n img = Image.merge(img.mode, bands)\n im_w, im_h = img.size\n\n if im_w == net_w and im_h == net_h:\n self.pad = None\n return img\n\n # Padding\n img_np = np.array(img)\n channels = img_np.shape[2] if len(img_np.shape) > 2 else 1\n pad_w = (net_w - im_w) / 2\n pad_h = (net_h - im_h) / 2\n self.pad = (int(pad_w), int(pad_h), int(pad_w + 0.5), int(pad_h + 0.5))\n img = ImageOps.expand(img, border=self.pad, fill=(self.fill_color,) * channels)\n return img\n\n def _tf_cv(self, img):\n \"\"\" Letterbox and image to fit in the network \"\"\"\n if self.dataset is not None:\n net_w, net_h = self.dataset.input_dim\n else:\n net_w, net_h = self.dimension\n im_h, im_w = img.shape[:2]\n\n if im_w == net_w and im_h == net_h:\n self.scale = None\n self.pad = None\n return img\n\n # Rescaling\n if im_w / net_w >= im_h / net_h:\n self.scale = net_w / im_w\n else:\n self.scale = net_h / im_h\n if self.scale != 1:\n img = cv2.resize(\n img, None, fx=self.scale, fy=self.scale, interpolation=cv2.INTER_CUBIC\n )\n im_h, im_w = img.shape[:2]\n\n if im_w == net_w and im_h == net_h:\n self.pad = None\n return img\n\n # Padding\n channels = img.shape[2] if len(img.shape) > 2 else 1\n pad_w = (net_w - im_w) / 2\n pad_h = (net_h - im_h) / 2\n self.pad = (int(pad_w), int(pad_h), int(pad_w + 0.5), int(pad_h + 0.5))\n img = cv2.copyMakeBorder(\n img,\n self.pad[1],\n self.pad[3],\n self.pad[0],\n self.pad[2],\n cv2.BORDER_CONSTANT,\n value=self.fill_color,\n )\n return img\n\n def _tf_anno(self, annos):\n \"\"\" Change coordinates of an annotation, according to the previous letterboxing \"\"\"\n for anno in annos:\n if self.scale is not None:\n anno.x_top_left *= self.scale\n anno.y_top_left *= self.scale\n anno.width *= self.scale\n anno.height *= self.scale\n if self.pad is not None:\n anno.x_top_left += self.pad[0]\n anno.y_top_left += self.pad[1]\n return annos\n\n\nclass RandomCrop(BaseMultiTransform):\n \"\"\" Take random crop from the image.\n\n Args:\n jitter (Number [0-1]): Indicates how much of the image we can crop\n crop_anno(Boolean, optional): Whether we crop the annotations inside the image crop; Default **False**\n intersection_threshold(number or list, optional): Argument passed on to :class:`brambox.boxes.util.modifiers.CropModifier`\n\n Note:\n Create 1 RandomCrop object and use it for both image and annotation transforms.\n This object will save data from the image transform and use that on the annotation transform.\n \"\"\"\n\n def __init__(\n self, jitter, crop_anno=False, intersection_threshold=0.001, fill_color=127\n ):\n super().__init__(jitter=jitter, crop_anno=crop_anno, fill_color=fill_color)\n self.crop_modifier = bbb.CropModifier(float('Inf'), intersection_threshold)\n\n def __call__(self, data):\n if data is None:\n return None\n elif isinstance(data, collections.Sequence):\n return self._tf_anno(data)\n elif isinstance(data, Image.Image):\n return self._tf_pil(data)\n elif isinstance(data, np.ndarray):\n return self._tf_cv(data)\n else:\n log.error(\n f'RandomCrop only works with <brambox annotation lists>, <PIL images> or <OpenCV images> [{type(data)}]'\n )\n return data\n\n def _tf_pil(self, img):\n \"\"\" Take random crop from image \"\"\"\n im_w, im_h = img.size\n crop = self._get_crop(im_w, im_h)\n crop_w = crop[2] - crop[0]\n crop_h = crop[3] - crop[1]\n img_np = np.array(img)\n channels = img_np.shape[2] if len(img_np.shape) > 2 else 1\n\n img = img.crop(\n (\n max(0, crop[0]),\n max(0, crop[1]),\n min(im_w, crop[2] - 1),\n min(im_h, crop[3] - 1),\n )\n )\n img_crop = Image.new(\n img.mode, (crop_w, crop_h), color=(self.fill_color,) * channels\n )\n img_crop.paste(img, (max(0, -crop[0]), max(0, -crop[1])))\n\n return img_crop\n\n def _tf_cv(self, img):\n \"\"\" Take random crop from image \"\"\"\n im_h, im_w = img.shape[:2]\n crop = self._get_crop(im_w, im_h)\n\n crop_w = crop[2] - crop[0]\n crop_h = crop[3] - crop[1]\n img_crop = (\n np.ones((crop_h, crop_w) + img.shape[2:], dtype=img.dtype) * self.fill_color\n )\n\n src_x1 = max(0, crop[0])\n src_x2 = min(crop[2], im_w)\n src_y1 = max(0, crop[1])\n src_y2 = min(crop[3], im_h)\n dst_x1 = max(0, -crop[0])\n dst_x2 = crop_w - max(0, crop[2] - im_w)\n dst_y1 = max(0, -crop[1])\n dst_y2 = crop_h - max(0, crop[3] - im_h)\n img_crop[dst_y1:dst_y2, dst_x1:dst_x2] = img[src_y1:src_y2, src_x1:src_x2]\n\n return img_crop\n\n def _get_crop(self, im_w, im_h):\n dw, dh = int(im_w * self.jitter), int(im_h * self.jitter)\n crop_left = random.randint(-dw, dw)\n crop_right = random.randint(-dw, dw)\n crop_top = random.randint(-dh, dh)\n crop_bottom = random.randint(-dh, dh)\n crop = (crop_left, crop_top, im_w - crop_right, im_h - crop_bottom)\n\n self.crop_modifier.area = crop\n return crop\n\n def _tf_anno(self, annos):\n \"\"\" Change coordinates of an annotation, according to the previous crop \"\"\"\n if self.crop_anno:\n bbb.modify(annos, [self.crop_modifier])\n else:\n crop = self.crop_modifier.area\n for i in range(len(annos) - 1, -1, -1):\n anno = annos[i]\n x1 = max(crop[0], anno.x_top_left)\n x2 = min(crop[2], anno.x_top_left + anno.width)\n y1 = max(crop[1], anno.y_top_left)\n y2 = min(crop[3], anno.y_top_left + anno.height)\n w = x2 - x1\n h = y2 - y1\n\n if self.crop_modifier.inter_area:\n ratio = (\n (w * h) / (anno.width * anno.height)\n ) < self.crop_modifier.inter_thresh\n else:\n ratio = (w / anno.width) < self.crop_modifier.inter_thresh[0] or (\n h / anno.height\n ) < self.crop_modifier.inter_thresh[1]\n if w <= 0 or h <= 0 or ratio:\n del annos[i]\n continue\n\n annos[i].x_top_left -= crop[0]\n annos[i].y_top_left -= crop[1]\n\n return annos\n\n\nclass RandomFlip(BaseMultiTransform):\n \"\"\" Randomly flip image.\n\n Args:\n threshold (Number [0-1]): Chance of flipping the image\n\n Note:\n Create 1 RandomFlip object and use it for both image and annotation transforms.\n This object will save data from the image transform and use that on the annotation transform.\n \"\"\"\n\n def __init__(self, threshold):\n self.threshold = threshold\n self.flip = False\n self.im_w = None\n\n def __call__(self, data):\n if data is None:\n return None\n elif isinstance(data, collections.Sequence):\n return [self._tf_anno(anno) for anno in data]\n elif isinstance(data, Image.Image):\n return self._tf_pil(data)\n elif isinstance(data, np.ndarray):\n return self._tf_cv(data)\n else:\n log.error(\n f'RandomFlip only works with <brambox annotation lists>, <PIL images> or <OpenCV images> [{type(data)}]'\n )\n return data\n\n def _tf_pil(self, img):\n \"\"\" Randomly flip image \"\"\"\n self._get_flip()\n self.im_w = img.size[0]\n if self.flip:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n return img\n\n def _tf_cv(self, img):\n \"\"\" Randomly flip image \"\"\"\n self._get_flip()\n self.im_w = img.shape[1]\n if self.flip:\n img = cv2.flip(img, 1)\n return img\n\n def _get_flip(self):\n self.flip = random.random() < self.threshold\n\n def _tf_anno(self, anno):\n \"\"\" Change coordinates of an annotation, according to the previous flip \"\"\"\n if self.flip and self.im_w is not None:\n anno.x_top_left = self.im_w - anno.x_top_left - anno.width\n return anno\n\n\nclass HSVShift(BaseTransform):\n \"\"\" Perform random HSV shift on the RGB data.\n\n Args:\n hue (Number): Random number between -hue,hue is used to shift the hue\n saturation (Number): Random number between 1,saturation is used to shift the saturation; 50% chance to get 1/dSaturation in stead of dSaturation\n value (Number): Random number between 1,value is used to shift the value; 50% chance to get 1/dValue in stead of dValue\n\n Warning:\n If you use OpenCV as your image processing library, make sure the image is RGB before using this transform.\n By default OpenCV uses BGR, so you must use `cvtColor`_ function to transform it to RGB.\n\n .. _cvtColor: https://docs.opencv.org/master/d7/d1b/group__imgproc__misc.html#ga397ae87e1288a81d2363b61574eb8cab\n \"\"\"\n\n def __init__(self, hue, saturation, value):\n super().__init__(hue=hue, saturation=saturation, value=value)\n\n @classmethod\n def apply(cls, data, hue, saturation, value):\n dh = random.uniform(-hue, hue)\n ds = random.uniform(1, saturation)\n if random.random() < 0.5:\n ds = 1 / ds\n dv = random.uniform(1, value)\n if random.random() < 0.5:\n dv = 1 / dv\n\n if data is None:\n return None\n elif isinstance(data, Image.Image):\n return cls._tf_pil(data, dh, ds, dv)\n elif isinstance(data, np.ndarray):\n return cls._tf_cv(data, dh, ds, dv)\n else:\n log.error(\n f'HSVShift only works with <PIL images> or <OpenCV images> [{type(data)}]'\n )\n return data\n\n @staticmethod\n def _tf_pil(img, dh, ds, dv):\n \"\"\" Random hsv shift \"\"\"\n img = img.convert('HSV')\n channels = list(img.split())\n\n def wrap_hue(x):\n x += int(dh * 255)\n if x > 255:\n x -= 255\n elif x < 0:\n x += 255\n return x\n\n channels[0] = channels[0].point(wrap_hue)\n channels[1] = channels[1].point(lambda i: min(255, max(0, int(i * ds))))\n channels[2] = channels[2].point(lambda i: min(255, max(0, int(i * dv))))\n\n img = Image.merge(img.mode, tuple(channels))\n img = img.convert('RGB')\n return img\n\n @staticmethod\n def _tf_cv(img, dh, ds, dv):\n \"\"\" Random hsv shift \"\"\"\n img = img.astype(np.float32) / 255.0\n img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n\n def wrap_hue(x):\n x[x >= 360.0] -= 360.0\n x[x < 0.0] += 360.0\n return x\n\n img[:, :, 0] = wrap_hue(img[:, :, 0] + (360.0 * dh))\n img[:, :, 1] = np.clip(ds * img[:, :, 1], 0.0, 1.0)\n img[:, :, 2] = np.clip(dv * img[:, :, 2], 0.0, 1.0)\n\n img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)\n img = (img * 255).astype(np.uint8)\n return img\n\n\nclass BramboxToTensor(BaseTransform):\n \"\"\" Converts a list of brambox annotation objects to a tensor.\n\n Args:\n dimension (tuple, optional): Default size of the transformed images, expressed as a (width, height) tuple; Default **None**\n dataset (lightnet.data.Dataset, optional): Dataset that uses this transform; Default **None**\n max_anno (Number, optional): Maximum number of annotations in the list; Default **50**\n class_label_map (list, optional): class label map to convert class names to an index; Default **None**\n\n Return:\n torch.Tensor: tensor of dimension [max_anno, 5] containing [class_idx,center_x,center_y,width,height] for every detection\n\n Warning:\n If no class_label_map is given, this function will first try to convert the class_label to an integer. If that fails, it is simply given the number 0.\n \"\"\"\n\n def __init__(self, dimension=None, dataset=None, max_anno=50, class_label_map=None):\n super().__init__(\n dimension=dimension,\n dataset=dataset,\n max_anno=max_anno,\n class_label_map=class_label_map,\n )\n if self.dimension is None and self.dataset is None:\n raise ValueError(\n 'This transform either requires a dimension or a dataset to infer the dimension'\n )\n if self.class_label_map is None:\n log.warn(\n 'No class_label_map given. If the class_labels are not integers, they will be set to zero.'\n )\n\n def __call__(self, data):\n if self.dataset is not None:\n dim = self.dataset.input_dim\n else:\n dim = self.dimension\n return self.apply(data, dim, self.max_anno, self.class_label_map)\n\n @classmethod\n def apply(cls, data, dimension, max_anno=None, class_label_map=None):\n if not isinstance(data, collections.Sequence):\n raise TypeError(\n f'BramboxToTensor only works with <brambox annotation list> [{type(data)}]'\n )\n\n anno_np = np.array(\n [cls._tf_anno(anno, dimension, class_label_map) for anno in data],\n dtype=np.float32,\n )\n\n if max_anno is not None:\n anno_len = len(data)\n if anno_len > max_anno:\n raise ValueError(\n f'More annotations than maximum allowed [{anno_len}/{max_anno}]'\n )\n\n z_np = np.zeros((max_anno - anno_len, 5), dtype=np.float32)\n z_np[:, 0] = -1\n\n if anno_len > 0:\n return torch.from_numpy(np.concatenate((anno_np, z_np)))\n else:\n return torch.from_numpy(z_np)\n else:\n return torch.from_numpy(anno_np)\n\n @staticmethod\n def _tf_anno(anno, dimension, class_label_map):\n \"\"\" Transforms brambox annotation to list \"\"\"\n net_w, net_h = dimension\n\n if class_label_map is not None:\n cls = class_label_map.index(anno.class_label)\n else:\n try:\n cls = int(anno.class_label)\n except ValueError:\n cls = 0\n\n cx = (anno.x_top_left + (anno.width / 2)) / net_w\n cy = (anno.y_top_left + (anno.height / 2)) / net_h\n w = anno.width / net_w\n h = anno.height / net_h\n return [cls, cx, cy, w, h]\n", "# -*- coding: utf-8 -*-\n#\n# Darknet RegionLoss\n# Copyright EAVISE\n#\n\nimport math\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\n__all__ = ['RegionLoss']\n\n\nclass RegionLoss(nn.modules.loss._Loss):\n \"\"\" Computes region loss from darknet network output and target annotation.\n\n Args:\n num_classes (int): number of categories\n anchors (list): 2D list representing anchor boxes (see :class:`lightnet.network.Darknet`)\n coord_scale (float): weight of bounding box coordinates\n noobject_scale (float): weight of regions without target boxes\n object_scale (float): weight of regions with target boxes\n class_scale (float): weight of categorical predictions\n thresh (float): minimum iou between a predicted box and ground truth for them to be considered matching\n seen (torch.Tensor): How many images the network has already been trained on.\n \"\"\"\n\n def __init__(\n self,\n num_classes,\n anchors,\n reduction=32,\n seen=0,\n coord_scale=1.0,\n noobject_scale=1.0,\n object_scale=5.0,\n class_scale=1.0,\n thresh=0.6,\n ):\n super().__init__()\n self.num_classes = num_classes\n self.num_anchors = len(anchors)\n self.anchor_step = len(anchors[0])\n self.anchors = torch.Tensor(anchors)\n self.reduction = reduction\n self.register_buffer('seen', torch.tensor(seen))\n\n self.coord_scale = coord_scale\n self.noobject_scale = noobject_scale\n self.object_scale = object_scale\n self.class_scale = class_scale\n self.thresh = thresh\n\n def extra_repr(self):\n repr_str = f'classes={self.num_classes}, reduction={self.reduction}, threshold={self.thresh}, seen={self.seen.item()}\\n'\n repr_str += f'coord_scale={self.coord_scale}, object_scale={self.object_scale}, noobject_scale={self.noobject_scale}, class_scale={self.class_scale}\\n'\n repr_str += f'anchors='\n for a in self.anchors:\n repr_str += f'[{a[0]:.5g}, {a[1]:.5g}] '\n return repr_str\n\n def forward(self, output, target, seen=None):\n \"\"\" Compute Region loss.\n\n Args:\n output (torch.autograd.Variable): Output from the network\n target (brambox.boxes.annotations.Annotation or torch.Tensor): Brambox annotations or tensor containing the annotation targets (see :class:`lightnet.data.BramboxToTensor`)\n seen (int, optional): How many images the network has already been trained on; Default **Add batch_size to previous seen value**\n\n Note:\n The example below only shows this function working with a target tensor. |br|\n This loss function also works with a list of brambox annotations as target and will work the same.\n The added benefit of using brambox annotations is that this function will then also look at the ``ignore`` flag of the annotations\n and ignore detections that match with it. This allows you to have annotations that will not influence the loss in any way,\n as opposed to having them removed and counting them as false detections.\n\n Example:\n >>> _ = torch.random.manual_seed(0)\n >>> network = ln.models.Yolo(num_classes=2, conf_thresh=4e-2)\n >>> region_loss = ln.network.loss.RegionLoss(network.num_classes, network.anchors)\n >>> Win, Hin = 96, 96\n >>> Wout, Hout = 1, 1\n >>> # true boxes for each item in the batch\n >>> # each box encodes class, x_center, y_center, width, and height\n >>> # coordinates are normalized in the range 0 to 1\n >>> # items in each batch are padded with dummy boxes with class_id=-1\n >>> target = torch.FloatTensor([\n ... # boxes for batch item 1\n ... [[0, 0.50, 0.50, 1.00, 1.00],\n ... [1, 0.32, 0.42, 0.22, 0.12]],\n ... # boxes for batch item 2 (it has no objects, note the pad!)\n ... [[-1, 0, 0, 0, 0],\n ... [-1, 0, 0, 0, 0]],\n ... ])\n >>> im_data = torch.autograd.Variable(torch.randn(len(target), 3, Hin, Win))\n >>> output = network._forward(im_data)\n >>> loss = float(region_loss(output, target))\n >>> print(f'loss = {loss:.2f}')\n loss = 22.04\n \"\"\"\n # Parameters\n nB = output.data.size(0)\n nA = self.num_anchors\n nC = self.num_classes\n nH = output.data.size(2)\n nW = output.data.size(3)\n nPixels = nH * nW\n device = output.device\n if seen is not None:\n self.seen = torch.tensor(seen)\n elif self.training:\n self.seen += nB\n\n # Get x,y,w,h,conf,cls\n output = output.view(nB, nA, -1, nPixels)\n coord = torch.zeros_like(output[:, :, :4])\n coord[:, :, :2] = output[:, :, :2].sigmoid() # tx,ty\n coord[:, :, 2:4] = output[:, :, 2:4] # tw,th\n conf = output[:, :, 4].sigmoid()\n if nC > 1:\n cls = (\n output[:, :, 5:]\n .contiguous()\n .view(nB * nA, nC, nPixels)\n .transpose(1, 2)\n .contiguous()\n .view(-1, nC)\n )\n\n # Create prediction boxes\n pred_boxes = torch.FloatTensor(nB * nA * nPixels, 4)\n lin_x = torch.linspace(0, nW - 1, nW).repeat(nH, 1).view(nPixels).to(device)\n lin_y = (\n torch.linspace(0, nH - 1, nH)\n .view(nH, 1)\n .repeat(1, nW)\n .view(nPixels)\n .to(device)\n )\n anchor_w = self.anchors[:, 0].contiguous().view(nA, 1).to(device)\n anchor_h = self.anchors[:, 1].contiguous().view(nA, 1).to(device)\n\n pred_boxes[:, 0] = (coord[:, :, 0].detach() + lin_x).view(-1)\n pred_boxes[:, 1] = (coord[:, :, 1].detach() + lin_y).view(-1)\n pred_boxes[:, 2] = (coord[:, :, 2].detach().exp() * anchor_w).view(-1)\n pred_boxes[:, 3] = (coord[:, :, 3].detach().exp() * anchor_h).view(-1)\n pred_boxes = pred_boxes.cpu()\n\n # Get target values\n coord_mask, conf_mask, cls_mask, tcoord, tconf, tcls = self.build_targets(\n pred_boxes, target, nH, nW\n )\n coord_mask = coord_mask.expand_as(tcoord).to(device).sqrt()\n conf_mask = conf_mask.to(device).sqrt()\n tcoord = tcoord.to(device)\n tconf = tconf.to(device)\n if nC > 1:\n tcls = tcls[cls_mask].view(-1).long().to(device)\n cls_mask = cls_mask.view(-1, 1).repeat(1, nC).to(device)\n cls = cls[cls_mask].view(-1, nC)\n\n # Compute losses\n mse = nn.MSELoss(size_average=False)\n self.loss_coord = (\n self.coord_scale * mse(coord * coord_mask, tcoord * coord_mask) / nB\n )\n self.loss_conf = mse(conf * conf_mask, tconf * conf_mask) / nB\n if nC > 1:\n if tcls.numel() > 0:\n self.loss_cls = (\n self.class_scale\n * 2\n * nn.CrossEntropyLoss(size_average=False)(cls, tcls)\n / nB\n )\n else:\n self.loss_cls = torch.tensor(0.0).to(device)\n self.loss_tot = self.loss_coord + self.loss_conf + self.loss_cls\n else:\n self.loss_cls = None\n self.loss_tot = self.loss_coord + self.loss_conf\n\n return self.loss_tot\n\n def build_targets(self, pred_boxes, ground_truth, nH, nW):\n \"\"\" Compare prediction boxes and targets, convert targets to network output tensors \"\"\"\n if torch.is_tensor(ground_truth):\n return self.__build_targets_tensor(pred_boxes, ground_truth, nH, nW)\n else:\n return self.__build_targets_brambox(pred_boxes, ground_truth, nH, nW)\n\n def __build_targets_tensor(self, pred_boxes, ground_truth, nH, nW):\n \"\"\" Compare prediction boxes and ground truths, convert ground truths to network output tensors \"\"\"\n # Parameters\n nB = ground_truth.size(0)\n nT = ground_truth.size(1)\n nA = self.num_anchors\n nAnchors = nA * nH * nW\n nPixels = nH * nW\n\n # Tensors\n conf_mask = torch.ones(nB, nA, nPixels, requires_grad=False) * self.noobject_scale\n coord_mask = torch.zeros(nB, nA, 1, nPixels, requires_grad=False)\n cls_mask = torch.zeros(nB, nA, nPixels, requires_grad=False).byte()\n tcoord = torch.zeros(nB, nA, 4, nPixels, requires_grad=False)\n tconf = torch.zeros(nB, nA, nPixels, requires_grad=False)\n tcls = torch.zeros(nB, nA, nPixels, requires_grad=False)\n\n if self.seen < 12800:\n coord_mask.fill_(1)\n # coord_mask.fill_(.01 / self.coord_scale)\n\n if self.anchor_step == 4:\n tcoord[:, :, 0] = (\n self.anchors[:, 2]\n .contiguous()\n .view(1, nA, 1, 1)\n .repeat(nB, 1, 1, nPixels)\n )\n tcoord[:, :, 1] = (\n self.anchors[:, 3]\n .contiguous()\n .view(1, nA, 1, 1)\n .repeat(nB, 1, 1, nPixels)\n )\n else:\n tcoord[:, :, 0].fill_(0.5)\n tcoord[:, :, 1].fill_(0.5)\n\n for b in range(nB):\n gt = ground_truth[b][\n (ground_truth[b, :, 0] >= 0)[:, None].expand_as(ground_truth[b])\n ].view(-1, 5)\n if gt.numel() == 0: # No gt for this image\n continue\n\n # Build up tensors\n cur_pred_boxes = pred_boxes[b * nAnchors : (b + 1) * nAnchors]\n if self.anchor_step == 4:\n anchors = self.anchors.clone()\n anchors[:, :2] = 0\n else:\n anchors = torch.cat([torch.zeros_like(self.anchors), self.anchors], 1)\n\n gt = gt[:, 1:]\n gt[:, ::2] *= nW\n gt[:, 1::2] *= nH\n\n # Set confidence mask of matching detections to 0\n iou_gt_pred = bbox_ious(gt, cur_pred_boxes)\n mask = (iou_gt_pred > self.thresh).sum(0) >= 1\n conf_mask[b][mask.view_as(conf_mask[b])] = 0\n\n # Find best anchor for each gt\n gt_wh = gt.clone()\n gt_wh[:, :2] = 0\n iou_gt_anchors = bbox_ious(gt_wh, anchors)\n _, best_anchors = iou_gt_anchors.max(1)\n\n # Set masks and target values for each gt\n gt_size = gt.size(0)\n for i in range(gt_size):\n gi = min(nW - 1, max(0, int(gt[i, 0])))\n gj = min(nH - 1, max(0, int(gt[i, 1])))\n best_n = best_anchors[i]\n iou = iou_gt_pred[i][best_n * nPixels + gj * nW + gi]\n\n coord_mask[b][best_n][0][gj * nW + gi] = (\n 2 - (gt[i, 2] * gt[i, 3]) / nPixels\n )\n cls_mask[b][best_n][gj * nW + gi] = 1\n conf_mask[b][best_n][gj * nW + gi] = self.object_scale\n tcoord[b][best_n][0][gj * nW + gi] = gt[i, 0] - gi\n tcoord[b][best_n][1][gj * nW + gi] = gt[i, 1] - gj\n tcoord[b][best_n][2][gj * nW + gi] = math.log(\n gt[i, 2] / self.anchors[best_n, 0]\n )\n tcoord[b][best_n][3][gj * nW + gi] = math.log(\n gt[i, 3] / self.anchors[best_n, 1]\n )\n tconf[b][best_n][gj * nW + gi] = iou\n tcls[b][best_n][gj * nW + gi] = ground_truth[b, i, 0]\n\n return coord_mask, conf_mask, cls_mask, tcoord, tconf, tcls\n\n def __build_targets_brambox(self, pred_boxes, ground_truth, nH, nW):\n \"\"\" Compare prediction boxes and ground truths, convert ground truths to network output tensors \"\"\"\n # Parameters\n nB = len(ground_truth)\n nA = self.num_anchors\n nAnchors = nA * nH * nW\n nPixels = nH * nW\n\n # Tensors\n conf_mask = torch.ones(nB, nA, nPixels, requires_grad=False) * self.noobject_scale\n coord_mask = torch.zeros(nB, nA, 1, nPixels, requires_grad=False)\n cls_mask = torch.zeros(nB, nA, nPixels, requires_grad=False).byte()\n tcoord = torch.zeros(nB, nA, 4, nPixels, requires_grad=False)\n tconf = torch.zeros(nB, nA, nPixels, requires_grad=False)\n tcls = torch.zeros(nB, nA, nPixels, requires_grad=False)\n\n if self.seen < 12800:\n coord_mask.fill_(1)\n # coord_mask.fill_(.01 / self.coord_scale)\n\n if self.anchor_step == 4:\n tcoord[:, :, 0] = (\n self.anchors[:, 2]\n .contiguous()\n .view(1, nA, 1, 1)\n .repeat(nB, 1, 1, nPixels)\n )\n tcoord[:, :, 1] = (\n self.anchors[:, 3]\n .contiguous()\n .view(1, nA, 1, 1)\n .repeat(nB, 1, 1, nPixels)\n )\n else:\n tcoord[:, :, 0].fill_(0.5)\n tcoord[:, :, 1].fill_(0.5)\n\n for b in range(nB):\n if len(ground_truth[b]) == 0: # No gt for this image\n continue\n\n # Build up tensors\n cur_pred_boxes = pred_boxes[b * nAnchors : (b + 1) * nAnchors]\n if self.anchor_step == 4:\n anchors = self.anchors.clone()\n anchors[:, :2] = 0\n else:\n anchors = torch.cat([torch.zeros_like(self.anchors), self.anchors], 1)\n gt = torch.zeros(len(ground_truth[b]), 4)\n for i, anno in enumerate(ground_truth[b]):\n gt[i, 0] = (anno.x_top_left + anno.width / 2) / self.reduction\n gt[i, 1] = (anno.y_top_left + anno.height / 2) / self.reduction\n gt[i, 2] = anno.width / self.reduction\n gt[i, 3] = anno.height / self.reduction\n\n # Set confidence mask of matching detections to 0\n iou_gt_pred = bbox_ious(gt, cur_pred_boxes)\n mask = (iou_gt_pred > self.thresh).sum(0) >= 1\n conf_mask[b][mask.view_as(conf_mask[b])] = 0\n\n # Find best anchor for each gt\n gt_wh = gt.clone()\n gt_wh[:, :2] = 0\n iou_gt_anchors = bbox_ious(gt_wh, anchors)\n _, best_anchors = iou_gt_anchors.max(1)\n\n # Set masks and target values for each gt\n for i, anno in enumerate(ground_truth[b]):\n gi = min(nW - 1, max(0, int(gt[i, 0])))\n gj = min(nH - 1, max(0, int(gt[i, 1])))\n best_n = best_anchors[i]\n iou = iou_gt_pred[i][best_n * nPixels + gj * nW + gi]\n\n if anno.ignore:\n conf_mask[b][best_n][gj * nW + gi] = 0\n coord_mask[b][best_n][0][\n gj * nW + gi\n ] = 0 # Explicitely set to zero for when seen < 12800\n else:\n coord_mask[b][best_n][0][gj * nW + gi] = (\n 2 - (gt[i, 2] * gt[i, 3]) / nPixels\n )\n cls_mask[b][best_n][gj * nW + gi] = 1\n conf_mask[b][best_n][gj * nW + gi] = self.object_scale\n tcoord[b][best_n][0][gj * nW + gi] = gt[i, 0] - gi\n tcoord[b][best_n][1][gj * nW + gi] = gt[i, 1] - gj\n tcoord[b][best_n][2][gj * nW + gi] = math.log(\n gt[i, 2] / self.anchors[best_n, 0]\n )\n tcoord[b][best_n][3][gj * nW + gi] = math.log(\n gt[i, 3] / self.anchors[best_n, 1]\n )\n tconf[b][best_n][gj * nW + gi] = iou\n tcls[b][best_n][gj * nW + gi] = anno.class_id\n\n return coord_mask, conf_mask, cls_mask, tcoord, tconf, tcls\n\n\ndef bbox_ious(boxes1, boxes2):\n \"\"\" Compute IOU between all boxes from ``boxes1`` with all boxes from ``boxes2``.\n\n Args:\n boxes1 (torch.Tensor): List of bounding boxes\n boxes2 (torch.Tensor): List of bounding boxes\n\n Note:\n List format: [[xc, yc, w, h],...]\n \"\"\"\n b1_len = boxes1.size(0)\n b2_len = boxes2.size(0)\n\n b1x1, b1y1 = (boxes1[:, :2] - (boxes1[:, 2:4] / 2)).split(1, 1)\n b1x2, b1y2 = (boxes1[:, :2] + (boxes1[:, 2:4] / 2)).split(1, 1)\n b2x1, b2y1 = (boxes2[:, :2] - (boxes2[:, 2:4] / 2)).split(1, 1)\n b2x2, b2y2 = (boxes2[:, :2] + (boxes2[:, 2:4] / 2)).split(1, 1)\n\n dx = (b1x2.min(b2x2.t()) - b1x1.max(b2x1.t())).clamp(min=0)\n dy = (b1y2.min(b2y2.t()) - b1y1.max(b2y1.t())).clamp(min=0)\n intersections = dx * dy\n\n areas1 = (b1x2 - b1x1) * (b1y2 - b1y1)\n areas2 = (b2x2 - b2x1) * (b2y2 - b2y1)\n unions = (areas1 + areas2.t()) - intersections\n\n return intersections / unions\n" ]
[ [ "numpy.clip", "torch.from_numpy", "numpy.ones", "numpy.concatenate", "numpy.array", "numpy.zeros" ], [ "torch.nn.CrossEntropyLoss", "torch.linspace", "torch.ones", "torch.Tensor", "torch.zeros", "torch.zeros_like", "torch.is_tensor", "torch.tensor", "torch.FloatTensor", "torch.nn.MSELoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DipakBagal/BMS_Molecular_Translation
[ "881b252a3c30e5b0afce2ce2c5da73d02755394d" ]
[ "codes/data.py" ]
[ "import albumentations as A\nfrom albumentations.pytorch import ToTensorV2\nimport cv2\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.utils.data.sampler import RandomSampler, SequentialSampler, WeightedRandomSampler\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.nn.utils.rnn import pad_sequence, pack_padded_sequence\n\nimport numpy as np\nimport pandas as pd\n\nfrom utilities import smart_print\nfrom augmentations import get_augs\n\n\n####### DATASETS\n\nclass ImageData(Dataset):\n \n def __init__(self, \n df, \n tokenizer = None, \n channels = 3,\n crop = False, \n padding = False,\n morphology = False,\n meta = False,\n transform = None):\n super().__init__()\n self.df = df\n self.tokenizer = tokenizer\n self.file_paths = df['file_path'].values\n self.labels = df['InChI_text'].values\n self.transform = transform\n self.crop = crop\n self.channels = channels\n self.morphology = morphology\n self.meta = meta\n self.padding = padding\n \n def __len__(self):\n return len(self.df)\n \n def __getitem__(self, idx):\n \n # import\n file_path = self.file_paths[idx] \n image = cv2.imread(file_path, cv2.IMREAD_GRAYSCALE) \n if image is None:\n raise FileNotFoundError(file_path)\n \n # image meta data\n if self.meta:\n meta_area = (image.shape[0] * image.shape[1]) / 2500000\n meta_ratio = (image.shape[0] / image.shape[1]) / 30.0\n meta = torch.LongTensor([meta_area, meta_ratio])\n \n # morphological transforms\n if self.morphology:\n image = cv2.morphologyEx(image, cv2.MORPH_OPEN, np.ones((2, 2)))\n image = cv2.erode(image, np.ones((2, 2)))\n \n # smart crop\n if self.crop:\n image = smart_crop(image)\n \n # convert to RGB\n if self.channels == 3:\n image = cv2.merge([image, image, image]).astype(np.float32)\n elif self.channels == 1:\n image = image.astype(np.float32)\n \n # padding\n if self.padding:\n image = pad_image(image)\n \n # augmentations\n if self.transform:\n image = self.transform(image = image)['image']\n \n # output\n label = torch.LongTensor(self.tokenizer.text_to_sequence(self.labels[idx]))\n label_length = torch.LongTensor([len(label)])\n if self.meta:\n return image, meta, label, label_length\n else:\n return image, label, label_length\n \n \n \nclass ImageTestData(Dataset):\n \n def __init__(self, \n df, \n channels = 3,\n crop = False, \n padding = False,\n morphology = False,\n meta = False,\n transform = None):\n super().__init__()\n self.df = df\n self.file_paths = df['file_path'].values\n self.transform = transform\n self.crop = crop\n self.channels = channels\n self.padding = padding\n self.morphology = morphology\n self.meta = meta\n self.fix_transform = A.Compose([A.Transpose(p = 1), A.VerticalFlip(p = 1)])\n \n def __len__(self):\n return len(self.df)\n \n def __getitem__(self, idx):\n \n # import\n file_path = self.file_paths[idx]\n image = cv2.imread(file_path, cv2.IMREAD_GRAYSCALE) \n if image is None:\n raise FileNotFoundError(path)\n \n # image meta data\n if self.meta:\n meta_area = (image.shape[0] * image.shape[1]) / 2500000\n if image.shape[0] > image.shape[1]:\n meta_ratio = (image.shape[0] / image.shape[1]) / 30.0\n else:\n meta_ratio = (image.shape[1] / image.shape[0]) / 30.0\n meta = torch.LongTensor([meta_area, meta_ratio])\n \n # morphological transforms\n if self.morphology:\n image = cv2.morphologyEx(image, cv2.MORPH_OPEN, np.ones((2, 2)))\n image = cv2.erode(image, np.ones((2, 2)))\n \n # smart crop\n if self.crop:\n image = smart_crop(image)\n \n # convert to RGB\n if self.channels == 3:\n image = cv2.merge([image, image, image]).astype(np.float32)\n elif self.channels == 1:\n image = image.astype(np.float32)\n \n # fix rotation\n h, w = image.shape[0], image.shape[1]\n if h > w:\n image = self.fix_transform(image = image)['image']\n \n # padding\n if self.padding:\n image = pad_image(image)\n \n # augmentations\n if self.transform:\n image = self.transform(image = image)['image']\n \n # output \n if self.meta:\n return image, meta\n else:\n return image\n\n\n\n####### BATCH COLLATE HELPER FUNCTION\n\n''' \nBorrowed from https://www.kaggle.com/yasufuminakama/inchi-resnet-lstm-with-attention-starter\n'''\n\ndef bms_collate(batch, tokenizer):\n imgs, labels, label_lengths = [], [], []\n for data_point in batch:\n imgs.append(data_point[0])\n labels.append(data_point[1])\n label_lengths.append(data_point[2])\n labels = pad_sequence(labels, batch_first = True, padding_value = tokenizer.stoi['<pad>'])\n return torch.stack(imgs), labels, torch.stack(label_lengths).reshape(-1, 1)\n\n\n\n####### DATA PREP\n\ndef get_data(df, fold, CFG, epoch = None):\n \n # epoch number\n if epoch is None:\n epoch = 0\n\n # load splits\n df_train = df.loc[df.fold != fold].reset_index(drop = True)\n df_valid = df.loc[df.fold == fold].reset_index(drop = True)\n if CFG['valid_subset']:\n df_valid = df_valid.head(CFG['valid_subset'])\n smart_print('- no. images: train - {}, valid - {}'.format(len(df_train), len(df_valid)), CFG)\n \n # extra data\n if CFG['data_ext']:\n df_extra_epoch = df_extra.sample(n = CFG['data_ext'], random_state = CFG['seed'] + epoch).reset_index(drop = True)\n df_train = pd.concat([df_train, df_extra_epoch], axis = 0).reset_index(drop = True)\n smart_print('- appending extra data to train...', CFG)\n smart_print('- no. images: train - {}, valid - {}'.format(len(df_train), len(df_valid)), CFG)\n\n # subset for debug mode\n if CFG['debug']:\n df_train = df_train.sample(CFG['batch_size'] * 10, random_state = CFG['seed']).reset_index(drop = True)\n df_valid = df_valid.sample(CFG['batch_size'] * 10, random_state = CFG['seed']).reset_index(drop = True)\n smart_print('- subsetting data for debug mode...', CFG)\n smart_print('- no. images: train - {}, valid - {}'.format(len(df_train), len(df_valid)), CFG)\n \n # sort validation data for efficiency\n df_valid['InChI_length'] = df_valid['InChI'].str.len()\n df_valid = df_valid.sort_values(by = 'InChI_length', ascending = False).reset_index(drop = True)\n del df_valid['InChI_length']\n \n return df_train, df_valid\n\n\n\n\n####### DATA LOADERS\n\nfrom utilities import *\n\ndef get_loaders(df_train, df_valid, tokenizer, CFG, epoch = None):\n\n ##### EPOCH-BASED PARAMS\n\n image_size = CFG['image_size']\n p_aug = CFG['p_aug']\n\n\n ##### DATASETS\n \n # augmentations\n train_augs, valid_augs = get_augs(CFG, image_size, p_aug)\n\n # datasets\n train_dataset = ImageData(df = df_train, \n transform = train_augs,\n tokenizer = tokenizer, \n channels = CFG['num_channels'],\n crop = CFG['smart_crop'],\n morphology = CFG['morphology'],\n padding = CFG['padding'],\n meta = CFG['meta_data'])\n valid_dataset = ImageTestData(df = df_valid, \n transform = valid_augs,\n channels = CFG['num_channels'],\n crop = CFG['smart_crop'],\n morphology = CFG['morphology'],\n padding = CFG['padding'],\n meta = CFG['meta_data'])\n \n \n ##### DATA SAMPLERS\n \n # samplers\n train_sampler = RandomSampler(train_dataset)\n valid_sampler = SequentialSampler(valid_dataset)\n \n ##### DATA LOADERS\n \n # data loaders\n train_loader = DataLoader(dataset = train_dataset, \n batch_size = CFG['batch_size'], \n shuffle = True,\n num_workers = CFG['cpu_workers'],\n drop_last = True, \n collate_fn = lambda b: bms_collate(b, tokenizer),\n worker_init_fn = worker_init_fn,\n pin_memory = False)\n valid_loader = DataLoader(dataset = valid_dataset, \n batch_size = CFG['valid_batch_size'], \n shuffle = False,\n num_workers = CFG['cpu_workers'],\n drop_last = False,\n pin_memory = False)\n \n # feedback\n smart_print('- image size: {}x{}, p(augment): {}'.format(image_size, image_size, p_aug), CFG)\n if epoch is None:\n smart_print('-' * 55, CFG)\n \n return train_loader, valid_loader" ]
[ [ "torch.LongTensor", "pandas.concat", "torch.nn.utils.rnn.pad_sequence", "torch.utils.data.DataLoader", "numpy.ones", "torch.stack", "torch.utils.data.sampler.SequentialSampler", "torch.utils.data.sampler.RandomSampler" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
rlratzel/custrings
[ "49dabc02aa74649eff09a10d183df94e55037e54" ]
[ "python/tests/test_offsets.py" ]
[ "\nimport nvstrings\nimport numpy as np\nvalues = np.array([97, 112, 112, 108, 101], dtype=np.int8)\nprint(\"values\",values.tobytes())\noffsets = np.array([0,1,2,3,4,5], dtype=np.int32)\nprint(\"offsets\",offsets)\ns = nvstrings.from_offsets(values,offsets,5)\nprint(s)\n\nbitmask = np.array([29], dtype=np.int8)\nprint(\"bitmask\",bitmask.tobytes())\ns = nvstrings.from_offsets(values,offsets,5,bitmask,1)\nprint(s)\n\nprint(\"------------------\")\nvalues = np.array([97, 112, 112, 108, 101, 112, 101, 97, 114], dtype=np.int8)\nprint(\"values\",values.tobytes())\noffsets = np.array([0,5,5,9], dtype=np.int32)\nprint(\"offsets\",offsets)\ns = nvstrings.from_offsets(values,offsets,3)\nprint(s)\n\nbitmask = np.array([5], dtype=np.int8)\nprint(\"bitmask\",bitmask.tobytes())\ns = nvstrings.from_offsets(values,offsets,3,bitmask,1)\nprint(s)\n\nprint(\"values.ctypes.data\",hex(values.ctypes.data))\nprint(\"offsets.ctypes.data\",hex(offsets.ctypes.data))\nprint(\"bitmask.ctypes.data\",hex(bitmask.ctypes.data))\ns = nvstrings.from_offsets(values.ctypes.data,offsets.ctypes.data,3,bitmask.ctypes.data,1)\nprint(s)\n\nprint(\"------------------\")\ns = nvstrings.to_device(['a','p','p','l','e'])\nvalues = np.empty(s.size(), dtype=np.int8)\noffsets = np.empty(s.size()+1, dtype=np.int32)\nnulls = np.empty(int(s.size()/8)+1, dtype=np.int8)\ns.to_offsets(values,offsets,nulls)\nprint(\"values\",values.tobytes())\nprint(\"offsets\",offsets)\nprint(\"nulls\",nulls.tobytes())\n\nprint(\"------------------\")\nimport nvcategory\n\nvalues = np.array([97, 112, 112, 108, 101], dtype=np.int8)\nprint(\"values\",values.tobytes())\noffsets = np.array([0,1,2,3,4,5], dtype=np.int32)\nprint(\"offsets\",offsets)\nc = nvcategory.from_offsets(values,offsets,5)\nprint(c.keys(),c.values())\n\nbitmask = np.array([29], dtype=np.int8)\nprint(\"bitmask\",bitmask.tobytes())\nc = nvcategory.from_offsets(values,offsets,5,bitmask,1)\nprint(c.keys(),c.values())" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dohnlee/qufa2021
[ "5fb42caee09ec228358e49768e32c75e3c0094ce" ]
[ "outlier_detection.py" ]
[ "import os\nimport json\nimport random\nimport argparse\n\nfrom tqdm import tqdm\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.nn.init as weight_init\nfrom torch.utils.data import Dataset, DataLoader\n\n\nDEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\ndef set_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(seed)\n\n\nclass OutlierDataset(Dataset):\n \n def __init__(self, X):\n self.X = X.astype('float')\n\n def __getitem__(self, index):\n x = self.X[index, :]\n x = torch.tensor(x, dtype=torch.float32)\n return index, x\n\n def __len__(self):\n return len(self.X)\n\n\nclass Model(nn.Module):\n def __init__(self, input_size, dropout=0.5):\n super(Model, self).__init__()\n self.dropout = dropout\n if self.dropout > 0:\n self.dropout = nn.Dropout(dropout)\n \n self.encode_w1 = nn.Linear(input_size, 64)\n self.encode_w2 = nn.Linear(64, 32)\n self.decode_w1 = nn.Linear(32, 64)\n self.decode_w2 = nn.Linear(64, input_size)\n \n def encoder(self, x):\n x = self.encode_w1(x)\n x = torch.relu(x)\n x = self.encode_w2(x)\n x = torch.relu(x)\n if self.dropout:\n x = self.dropout(x)\n return x\n\n def decoder(self, x):\n x = self.decode_w1(x)\n x = torch.relu(x)\n x = self.decode_w2(x)\n return x\n\n def forward(self, x):\n x = self.encoder(x)\n x = self.decoder(x)\n return x\n\n\nclass Detector(object):\n\n def __init__(\n self,\n lr=3e-3,\n weight_decay=1e-5,\n batch_size=128,\n epochs=10\n ):\n self.lr = lr\n self.weight_decay = weight_decay\n self.batch_size = batch_size\n self.epochs = epochs\n self.threshold = 0.5\n \n def cal_recon_err(self, preds, targets):\n recon_err = F.mse_loss(preds, targets, reduction='none').mean(axis=-1)\n return recon_err\n\n def cal_loss(self, preds, targets):\n loss_mse = self.cal_recon_err(preds, targets)\n return loss_mse.mean()\n \n def run_batch(self, batch, train):\n idx, x = batch\n inputs = x.to(DEVICE)\n outputs = self.model(inputs)\n if train:\n self.optimizer.zero_grad()\n train_err = self.cal_recon_err(outputs, inputs)\n loss = train_err.mean()\n loss.backward()\n self.optimizer.step()\n else:\n loss = self.cal_loss(outputs, inputs)\n loss = loss.item()\n bsz = inputs.size(0)\n return loss * bsz, bsz, train_err.detach().cpu().tolist()\n\n def train(self, epoch=None):\n self.model.train()\n total_loss = 0\n total_cnt = 0\n train_errs = []\n\n for batch_idx, batch in enumerate(self.train_iter):\n \n loss, bsz, train_err = self.run_batch(batch, train=True)\n total_loss += loss\n total_cnt += bsz\n train_errs += train_err\n\n status = {'total_loss':total_loss/total_cnt}\n mean = np.mean(train_errs)\n std = np.std(train_errs)\n self.threshold = mean + 2*std\n\n return status\n\n def get_model(self, input_size):\n self.model = Model(input_size=input_size).to(DEVICE)\n self.optimizer = optim.Adam(self.model.parameters(),\n lr=self.lr,\n weight_decay=self.weight_decay)\n\n def fit(self, X):\n \n dataset = OutlierDataset(X)\n self.train_iter = DataLoader(dataset=dataset,\n batch_size=self.batch_size,\n shuffle=True)\n\n self.get_model(X.shape[1])\n\n wait = 0\n best_loss = 1e9\n iteration = tqdm(range(1, self.epochs + 1))\n \n for epoch in iteration:\n epoch_status = self.train(epoch)\n \n if best_loss > epoch_status['total_loss']:\n best_loss = epoch_status['total_loss']\n wait = 0\n else:\n wait += 1\n\n if wait > 3:\n break\n\n return self\n\n def extract(self, X):\n dataset = OutlierDataset(X)\n outlier_iter = DataLoader(dataset=dataset,\n batch_size=self.batch_size)\n \n outlier_idxs = []\n self.model.eval()\n with torch.no_grad():\n for batch in outlier_iter:\n idx, x = batch\n inputs = x.to(DEVICE)\n outputs = self.model(inputs)\n recon_err = self.cal_recon_err(outputs, inputs)\n\n outlier_idx = recon_err > self.threshold\n outlier_idx = idx[outlier_idx]\n\n outlier_idxs += outlier_idx.tolist()\n\n return outlier_idxs\n\n def fit_extract(self, X, **fit_params):\n\n return self.fit(X, **fit_params).extract(X)\n\n\nclass OutlierDetector(object):\n\n def __init__(self, input_fname, result_path):\n self.get_data(input_fname)\n self.input_fname = input_fname\n self.result_path = result_path\n\n def get_data(self, input_fname):\n data = pd.read_csv(input_fname)\n num_idx = data.dtypes[data.dtypes != 'object'].index\n num_vars = [data.columns.get_loc(idx) for idx in num_idx]\n cat_vars = list(set(range(data.shape[1])) - set(num_vars))\n\n self.data = data\n self.num_vars = num_vars\n self.cat_vars = cat_vars\n\n def write_json(self, outlier_idxs):\n obj = {\"result\": dict()}\n obj[\"result\"][\"num_outliers\"] = len(outlier_idxs)\n obj[\"result\"][\"outlier_indices\"] = outlier_idxs\n \n result_json_fname = os.path.join(self.result_path, \"result.json\")\n with open(result_json_fname, \"w\") as json_file:\n json.dump(obj, json_file)\n\n def run(self):\n \n if not os.path.isdir(self.result_path):\n os.makedirs(self.result_path)\n \n X_noise = self.data.iloc[:, self.num_vars]\n X_noise = StandardScaler().fit_transform(X_noise)\n\n detector = Detector()\n outlier_idxs = detector.fit_extract(X_noise)\n self.write_json(outlier_idxs)\n\n n = self.data.shape[0]\n idxs = list(range(n))\n clear_idxs = list(set(idxs) - set(outlier_idxs))\n result_csv_fname = os.path.join(self.result_path, 'result.csv')\n self.data.iloc[clear_idxs, :].to_csv(result_csv_fname, index=False)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--input_fname', type=str, default='bank.csv')\n parser.add_argument('--result_path', type=str, default='bank_outlier')\n args = parser.parse_args()\n\n detector = OutlierDetector(input_fname=args.input_fname, result_path=args.result_path)\n detector.run()\n" ]
[ [ "torch.nn.Dropout", "pandas.read_csv", "numpy.random.seed", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.tensor", "torch.nn.Linear", "torch.relu", "numpy.std", "numpy.mean", "torch.no_grad", "torch.cuda.is_available", "torch.cuda.manual_seed_all", "torch.nn.functional.mse_loss", "sklearn.preprocessing.StandardScaler" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
hhoshino0421/SecurityMachineLearning
[ "bc627219ff3e8a1e79ccf09891803b50d2096503" ]
[ "ch4/SecurityML08/MachineLearningMain.py" ]
[ "\nimport ember\n\nfrom sklearn.preprocessing import StandardScaler\n\nfrom ReadFile import *\nfrom ReadModel import *\n\n\ndef check_malware(check_file_path, model_file_path):\n\n check_file_obj = read_file(check_file_path)\n\n scaler = StandardScaler()\n scaler.fit(check_file_obj)\n sample_data_new = scaler.transform(check_file_obj)\n #sample_data_new = scaler.transform(check_file_obj)\n #sample_data_new = StandardScaler.transform(check_file_obj)\n\n model_obj = read_model(model_file_path)\n\n # extractor = ember.PEFeatureExtractor(2)\n\n pred = (model_obj.predict(sample_data_new) > 0.5).astype(\"int32\")\n\n # for debug\n print(pred)\n\n if pred:\n print(\"Malware!\")\n else:\n print(\"benign file\")\n\n print(\"end\")\n\n\n\n\n\n" ]
[ [ "sklearn.preprocessing.StandardScaler" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]