repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
mahkons/Lottery-ticket-hypothesis | [
"96ec399fdfc4138a37feecb24a63b3cdb8e50e1e"
] | [
"supervised/networks/VGG19.py"
] | [
"import torch\nimport torch.nn as nn\n\nclass VGG(nn.Module):\n #ANCHOR Change No. of Classes here.\n def __init__(self, features, num_classes=10, init_weights=True):\n super(VGG, self).__init__()\n self.features = features\n self.avgpool = nn.AdaptiveAvgPool2d((7, 7))\n self.classifier = nn.Sequential(\n nn.Linear(512 * 7 * 7, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, num_classes),\n )\n if init_weights:\n self._initialize_weights()\n\n def forward(self, x):\n x = self.features(x)\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.classifier(x)\n return x\n\n def get_path(self):\n return VGG.__name__;\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n nn.init.constant_(m.bias, 0)\n\n\ndef make_layers(cfg, batch_norm=False):\n layers = []\n #ANCHOR Change No. of Input channels here.\n in_channels = 3\n for v in cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n return nn.Sequential(*layers)\n\n\ncfgs = {\n 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],\n}\n\n\ndef _vgg(arch, cfg, batch_norm, pretrained, progress, **kwargs):\n if pretrained:\n kwargs['init_weights'] = False\n model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)\n return model\n\n\ndef vgg19(pretrained=False, progress=True, **kwargs):\n r\"\"\"VGG 19-layer model (configuration \"E\")\n `\"Very Deep Convolutional Networks For Large-Scale Image Recognition\" <https://arxiv.org/pdf/1409.1556.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _vgg('vgg19', 'E', False, pretrained, progress, **kwargs)\n\n\ndef vgg19_bn(pretrained=False, progress=True, **kwargs):\n r\"\"\"VGG 19-layer model (configuration 'E') with batch normalization\n `\"Very Deep Convolutional Networks For Large-Scale Image Recognition\" <https://arxiv.org/pdf/1409.1556.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _vgg('vgg19_bn', 'E', True, pretrained, progress, **kwargs)\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.Dropout",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.init.normal_",
"torch.nn.BatchNorm2d",
"torch.flatten",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Guo-lab/Graph | [
"c4c5fbc8fb5d645c16da20351b9746019cf75aab"
] | [
"dgi_gat/layers/gat.py"
] | [
"import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\n\nclass GraphAttention(nn.Module):\n def __init__(self, in_features, out_features, dropout, alpha, concat=True):\n super(GraphAttention, self).__init__()\n \n #//self.fc = nn.Linear(in_ft, out_ft, bias=False)\n self.in_features = in_features # 节点表示向量的输入特征维度\n self.out_features = out_features # 节点表示向量的输出特征维度\n self.dropout = dropout # dropout参数\n self.alpha = alpha # leakyrelu激活的参数\n self.concat = concat # 如果为true, 再进行elu激活\n \n # 定义可训练参数,即论文中的W和a\n self.W = nn.Parameter(torch.empty(size=(in_features, out_features)))\n nn.init.xavier_uniform_(self.W.data, gain=1.414) # xavier初始化\n \n self.a = nn.Parameter(torch.empty(size=(2*out_features, 1)))\n nn.init.xavier_uniform_(self.a.data, gain=1.414) # xavier初始化\n \n # 定义leakyrelu激活函数\n self.leakyrelu = nn.LeakyReLU(self.alpha)\n #//self.act = nn.PReLU() if act == 'prelu' else act\n\n\n\n '''\n def forward(self, seq, adj):\n Wh = torch.mm(torch.squeeze(seq, 0), self.W) # h.shape: (N, in_features), Wh.shape: (N, out_features)\n \n # 每一个节点和所有节点的attention值 \n Wh1 = torch.matmul(Wh, self.a[:self.out_features, :])\n Wh2 = torch.matmul(Wh, self.a[self.out_features:, :])\n e = Wh1 + Wh2.T\n \n e = self.leakyrelu(e)\n zero_vec = -9e15*torch.ones_like(e)\n adj = torch.tensor(adj)\n attention = torch.where(adj > 0, e, zero_vec)\n attention = F.softmax(attention, dim=1)\n attention = F.dropout(attention, self.dropout, training=self.training)\n h_prime = torch.unsqueeze(torch.matmul(attention, Wh), 0)\n \n return F.elu(h_prime)\n '''\n\n ''' NO BUG JUST WASTE TOO MUCH MEM AND TIME\n '''\n \n # TODO LET US DEBUG\n def forward(self, h, adj):\n #//hh = torch.squeeze(h, 0) \n #//Wh = torch.mm(hh, self.W) \n #@ https://zhuanlan.zhihu.com/p/99927545\n #@ https://blog.csdn.net/weixin_43476533/article/details/107229242\n #@ https://zhuanlan.zhihu.com/p/374914494\n \n #//print(h.shape)\n #//print(adj.shape)\n Wh = torch.mm(torch.squeeze(h, 0), self.W)\n #//print(self.W.shape)\n #//print(Wh.shape)\n \n N = Wh.size()[0] # number of nodes 2708 \n \n #% Wh.repeat(1, N).view(N*N, -1) ==>> (N * N, out_features) \n #% Wh.repeat(N, 1) ==>> (N * N, out_features) \n \n a_input = torch.cat([Wh.repeat(1, N).view(N*N, -1), Wh.repeat(N, 1)], dim=1).view(N, -1, 2*self.out_features) \n e = torch.matmul(a_input, self.a).squeeze(2)\n \n e = self.leakyrelu(e)\n zero_vec = -9e15*torch.ones_like(e)\n adj = torch.tensor(adj)\n attention = torch.where(adj > 0, e, zero_vec)\n attention = F.softmax(attention, dim=1)\n attention = F.dropout(attention, self.dropout, training=self.training)\n h_prime = torch.unsqueeze(torch.matmul(attention, Wh), 0)\n \n return F.elu(h_prime)\n \n\n\n\n\n\n# LET US DEBUG\n'''\n def forward(self, h, adj):\n # h: input_fea [N, in_features] in_features表示节点的输入特征向量元素个数\n # Wh: output_fea [N, out_features]\n #!\n #//Wh = torch.mm(h, self.W) \n \n #//print(\"h\", type(h)) # h <class 'torch.Tensor'>\n #//print(h.shape) # torch.Size([1, 2708, 1433]) \n hh = torch.squeeze(h, 0) \n #//print(\"hh\", type(hh)) # hh <class 'torch.Tensor'>\n #//print(hh.shape) # torch.Size([2708, 1433])\n #//print(\"W\", type(self.W)) # W <class 'torch.nn.parameter.Parameter'>\n #//print(self.W.shape) # torch.Size([1433, 512])\n Wh = torch.mm(hh, self.W) \n #//print(\"Wh\", type(Wh)) # Wh <class 'torch.Tensor'>\n #//print(Wh.shape) # torch.Size([2708, 512])\n \n # 实现论文中的特征拼接操作 Wh_i || Wh_j 得到 shape = (N, N, 2 * out_features) 新特征矩阵\n \n N = Wh.size()[0] # number of nodes \n #//print(\"OK1\") \n print(\"N\", N)\n #% Wh.repeat(1, N).view(N*N, -1) ==>> (N * N, out_features) \n #% Wh.repeat(N, 1) ==>> (N * N, out_features) \n a_input = torch.cat([Wh.repeat(1, N).view(N*N, -1), Wh.repeat(N, 1)], dim=1).view(N, -1, 2*self.out_features)\n #//print(\"OK2\") \n e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2))\n #//print(\"OK3\") \n \n zero_vec = -9e15*torch.ones_like(e)\n #//print(\"OK4\") \n #//print(type(zero_vec))# <class 'torch.Tensor'>\n #//print(type(e)) # <class 'torch.Tensor'>\n #//print(type(adj)) # <class 'numpy.matrix'>\n adj = torch.tensor(adj)\n #//print(\"OK5\") \n #//print(type(adj))\n #!\n attention = torch.where(adj > 0, e, zero_vec)\n #//print(\"OK6\") \n attention = F.softmax(attention, dim=1)\n #//print(\"OK7\") \n attention = F.dropout(attention, self.dropout, training=self.training)\n #//print(\"OK8\") \n \n #!h_prime = torch.matmul(attention, Wh)\n h_prime = torch.unsqueeze(torch.matmul(attention, Wh), 0)\n \n #//print(\"OK9\") \n print(\"h_prime\", type(h_prime))\n #//print(\"OK10\") \n print(h_prime.shape)\n \n if self.concat:\n #//print(\"concat OKK\")\n return F.elu(h_prime)\n else:\n return h_prime\n'''\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# Origin GCN layer Frame\n'''\nclass GCN(nn.Module):\n def __init__(self, in_ft, out_ft, act, bias=True):\n super(GCN, self).__init__()\n self.fc = nn.Linear(in_ft, out_ft, bias=False)\n self.act = nn.PReLU() if act == 'prelu' else act\n \n if bias:\n self.bias = nn.Parameter(torch.FloatTensor(out_ft))\n self.bias.data.fill_(0.0)\n else:\n self.register_parameter('bias', None)\n\n for m in self.modules():\n self.weights_init(m)\n\n def weights_init(self, m):\n if isinstance(m, nn.Linear):\n torch.nn.init.xavier_uniform_(m.weight.data)\n if m.bias is not None:\n m.bias.data.fill_(0.0)\n\n # Shape of seq: (batch, nodes, features)\n def forward(self, seq, adj, sparse=False):\n seq_fts = self.fc(seq)\n if sparse:\n out = torch.unsqueeze(torch.spmm(adj, torch.squeeze(seq_fts, 0)), 0)\n else:\n out = torch.bmm(adj, seq_fts)\n if self.bias is not None:\n out += self.bias\n \n return self.act(out)\n'''"
] | [
[
"torch.nn.functional.softmax",
"torch.empty",
"torch.nn.functional.dropout",
"torch.tensor",
"torch.matmul",
"torch.nn.LeakyReLU",
"torch.where",
"torch.nn.init.xavier_uniform_",
"torch.nn.functional.elu",
"torch.ones_like",
"torch.squeeze"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
radioactive73/PassGAN | [
"d11f9bb9eb9326be301bca14f47cbc5acd047495"
] | [
"train.py"
] | [
"import os, sys\nsys.path.append(os.getcwd())\n\nimport time\nimport pickle\nimport argparse\nimport numpy as np\nimport tensorflow as tf\ntf.compat.v1.disable_eager_execution()\n\nimport utils\nimport tflib as lib\nimport tflib.ops.linear\nimport tflib.ops.conv1d\nimport tflib.plot\nimport models\n\n'''\n\n$ python train.py -o \"pretrained\"\n\n'''\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--training-data', '-i',\n default='data/train.txt',\n dest='training_data',\n help='Path to training data file (one password per line) (default: data/train.txt)')\n\n parser.add_argument('--output-dir', '-o',\n required=True,\n dest='output_dir',\n help='Output directory. If directory doesn\\'t exist it will be created.')\n\n parser.add_argument('--save-every', '-s',\n type=int,\n default=5000,\n dest='save_every',\n help='Save model checkpoints after this many iterations (default: 5000)')\n\n parser.add_argument('--iters', '-n',\n type=int,\n default=200000,\n dest='iters',\n help='The number of training iterations (default: 200000)')\n\n parser.add_argument('--batch-size', '-b',\n type=int,\n default=64,\n dest='batch_size',\n help='Batch size (default: 64).')\n \n parser.add_argument('--seq-length', '-l',\n type=int,\n default=10,\n dest='seq_length',\n help='The maximum password length (default: 10)')\n \n parser.add_argument('--layer-dim', '-d',\n type=int,\n default=128,\n dest='layer_dim',\n help='The hidden layer dimensionality for the generator and discriminator (default: 128)')\n \n parser.add_argument('--critic-iters', '-c',\n type=int,\n default=10,\n dest='critic_iters',\n help='The number of discriminator weight updates per generator update (default: 10)')\n \n parser.add_argument('--lambda', '-p',\n type=int,\n default=10,\n dest='lamb',\n help='The gradient penalty lambda hyperparameter (default: 10)')\n \n return parser.parse_args()\n\nargs = parse_args()\n\nlines, charmap, inv_charmap = utils.load_dataset(\n path=args.training_data,\n max_length=args.seq_length)\n\n\n# Pickle to avoid encoding errors with json\nwith open(os.path.join(args.output_dir, 'charmap.pickle'), 'wb') as f:\n pickle.dump(charmap, f)\n\nwith open(os.path.join(args.output_dir, 'charmap_inv.pickle'), 'wb') as f:\n pickle.dump(inv_charmap, f)\n \nprint(\"Number of unique characters in dataset: {}\".format(len(charmap)))\n\nreal_inputs_discrete = tf.compat.v1.placeholder(tf.int32, shape=[args.batch_size, args.seq_length])\nreal_inputs = tf.one_hot(real_inputs_discrete, len(charmap))\n\nfake_inputs = models.Generator(args.batch_size, args.seq_length, args.layer_dim, len(charmap))\nfake_inputs_discrete = tf.argmax(input=fake_inputs, axis=fake_inputs.get_shape().ndims-1)\n\ndisc_real = models.Discriminator(real_inputs, args.seq_length, args.layer_dim, len(charmap))\ndisc_fake = models.Discriminator(fake_inputs, args.seq_length, args.layer_dim, len(charmap))\n\ndisc_cost = tf.reduce_mean(input_tensor=disc_fake) - tf.reduce_mean(input_tensor=disc_real)\ngen_cost = -tf.reduce_mean(input_tensor=disc_fake)\n\n# WGAN lipschitz-penalty\nalpha = tf.random.uniform(\n shape=[args.batch_size,1,1],\n minval=0.,\n maxval=1.\n)\n\ndifferences = fake_inputs - real_inputs\ninterpolates = real_inputs + (alpha*differences)\ngradients = tf.gradients(ys=models.Discriminator(interpolates, args.seq_length, args.layer_dim, len(charmap)), xs=[interpolates])[0]\nslopes = tf.sqrt(tf.reduce_sum(input_tensor=tf.square(gradients), axis=[1,2]))\ngradient_penalty = tf.reduce_mean(input_tensor=(slopes-1.)**2)\ndisc_cost += args.lamb * gradient_penalty\n\ngen_params = lib.params_with_name('Generator')\ndisc_params = lib.params_with_name('Discriminator')\n\ngen_train_op = tf.compat.v1.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5, beta2=0.9).minimize(gen_cost, var_list=gen_params)\ndisc_train_op = tf.compat.v1.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5, beta2=0.9).minimize(disc_cost, var_list=disc_params)\n\n# Dataset iterator\ndef inf_train_gen():\n while True:\n np.random.shuffle(lines)\n for i in range(0, len(lines)-args.batch_size+1, args.batch_size):\n yield np.array(\n [[charmap[c] for c in l] for l in lines[i:i+args.batch_size]],\n dtype='int32'\n )\n \n# During training we monitor JS divergence between the true & generated ngram\n# distributions for n=1,2,3,4. To get an idea of the optimal values, we\n# evaluate these statistics on a held-out set first.\ntrue_char_ngram_lms = [utils.NgramLanguageModel(i+1, lines[10*args.batch_size:], tokenize=False) for i in range(4)]\nvalidation_char_ngram_lms = [utils.NgramLanguageModel(i+1, lines[:10*args.batch_size], tokenize=False) for i in range(4)]\nfor i in range(4):\n print(\"validation set JSD for n={}: {}\".format(i+1, true_char_ngram_lms[i].js_with(validation_char_ngram_lms[i])))\ntrue_char_ngram_lms = [utils.NgramLanguageModel(i+1, lines, tokenize=False) for i in range(4)]\n\n\n# TensorFlow Session\nwith tf.compat.v1.Session() as session:\n\n # Time stamp\n localtime = time.asctime( time.localtime(time.time()) )\n print(\"Starting TensorFlow session...\")\n print(\"Local current time :\", localtime)\n \n # Start TensorFlow session...\n session.run(tf.compat.v1.global_variables_initializer())\n\n def generate_samples():\n samples = session.run(fake_inputs)\n samples = np.argmax(samples, axis=2)\n decoded_samples = []\n for i in range(len(samples)):\n decoded = []\n for j in range(len(samples[i])):\n decoded.append(inv_charmap[samples[i][j]])\n decoded_samples.append(tuple(decoded))\n return decoded_samples\n\n gen = inf_train_gen()\n\n for iteration in range(args.iters + 1):\n start_time = time.time()\n\n # Train generator\n if iteration > 0:\n _ = session.run(gen_train_op)\n\n # Train critic\n for i in range(args.critic_iters):\n _data = next(gen)\n _disc_cost, _ = session.run(\n [disc_cost, disc_train_op],\n feed_dict={real_inputs_discrete:_data}\n )\n\n lib.plot.output_dir = args.output_dir\n lib.plot.plot('time', time.time() - start_time)\n lib.plot.plot('train disc cost', _disc_cost)\n\n # Output to text file after every 100 samples\n if iteration % 100 == 0 and iteration > 0:\n\n samples = []\n for i in range(10):\n samples.extend(generate_samples())\n\n for i in range(4):\n lm = utils.NgramLanguageModel(i+1, samples, tokenize=False)\n lib.plot.plot('js{}'.format(i+1), lm.js_with(true_char_ngram_lms[i]))\n\n with open(os.path.join(args.output_dir, 'samples', 'samples_{}.txt').format(iteration), 'w') as f:\n for s in samples:\n s = \"\".join(s)\n f.write(s + \"\\n\")\n\n if iteration % args.save_every == 0 and iteration > 0:\n model_saver = tf.compat.v1.train.Saver()\n model_saver.save(session, os.path.join(args.output_dir, 'checkpoints', 'checkpoint_{}.ckpt').format(iteration))\n print(\"{} / {} ({}%)\".format(iteration, args.iters, iteration/args.iters*100.0 ))\n\n if iteration == args.iters:\n print(\"...Training done.\")\n \n if iteration % 100 == 0:\n lib.plot.flush()\n\n lib.plot.tick()\n \n# Time stamp\nlocaltime = time.asctime( time.localtime(time.time()) )\nprint(\"Ending TensorFlow session.\")\nprint(\"Local current time :\", localtime)\n"
] | [
[
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.reduce_mean",
"tensorflow.random.uniform",
"numpy.random.shuffle",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.placeholder",
"numpy.argmax",
"tensorflow.square",
"tensorflow.compat.v1.disable_eager_execution",
"numpy.array",
"tensorflow.compat.v1.train.Saver"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
FranckLejzerowicz/microbiome_analyzer | [
"7d48f69eac85fecc0016efba52ea23d846cdcaa2"
] | [
"microbiome_analyzer/_rarefy.py"
] | [
"# ----------------------------------------------------------------------------\n# Copyright (c) 2020, Franck Lejzerowicz.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file LICENSE, distributed with this software.\n# ----------------------------------------------------------------------------\n\nimport glob\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import skew\nnp.set_printoptions(precision=2, suppress=True)\n\n\ndef get_dat_depths(\n dat: str,\n i_datasets_folder: str,\n depths_yml: dict,\n sam_sum: pd.Series) -> tuple:\n \"\"\"\n Parameters\n ----------\n dat : str\n Dataset name\n i_datasets_folder : str\n Path to the folder containing the data/metadata sub-folders\n depths_yml : dist\n Mapping Dataset nanme -> Depths at which to rarefy\n sam_sum : pd.Series\n Sum of reads per sample\n\n Returns\n -------\n skip : bool\n Whether to skip a rarefaction\n depths_tuple : tuple\n (boolean, Rarefaction depths)\n \"\"\"\n skip = False\n if not depths_yml:\n depths = get_default_raref_depth(i_datasets_folder, dat, sam_sum)\n depths_tuple = (0, depths)\n elif dat in depths_yml:\n skip, depths = get_depths(depths_yml[dat], dat, sam_sum)\n depths_tuple = (1, depths)\n else:\n skip = True\n depths_tuple = []\n return skip, depths_tuple\n\n\ndef get_default_raref_depth(\n i_datasets_folder: str,\n dat: str,\n sam_sum: pd.Series):\n \"\"\"\n Parameters\n ----------\n i_datasets_folder : str\n Path to the folder containing the data/metadata sub-folders\n dat : str\n Dataset name\n sam_sum : pd.Series\n Sum of reads per sample\n\n Returns\n -------\n depths : list\n Rarefaction depths\n \"\"\"\n raref_files = glob.glob('%s/qiime/rarefy/%s/tab_raref*.qza' % (\n i_datasets_folder, dat))\n if len(raref_files):\n depths = [\n x.split('_raref')[-1].split('.tsv')[0] for x in raref_files\n ]\n else:\n second_quantile = sam_sum.quantile(0.2)\n print_skew(dat, sam_sum)\n if second_quantile < 1000:\n depths = []\n print_not_rarefying(dat, sam_sum)\n else:\n nfigure = len(str(int(second_quantile)))\n second_quantile_to_round = second_quantile / (10 ** (nfigure - 2))\n second_quantile_rounded = round(second_quantile_to_round) * (\n 10 ** (nfigure - 2))\n depths = [str(int(second_quantile_rounded))]\n print('[%s] Proposed rarefaction depth: %s '\n '(second quantile)' % (dat, depths[0]))\n return depths\n\n\ndef get_depths(\n depths_yml: list,\n dat: str,\n sam_sum: pd.Series) -> tuple:\n \"\"\"\n\n Parameters\n ----------\n depths_yml : list\n Depths at which to rarefy\n dat : str\n Dataset name\n sam_sum : pd.Series\n Sum of reads per sample\n\n Returns\n -------\n skip : bool\n Whether to skip a rarefaction\n depths : list\n Depths at which to rarefy\n \"\"\"\n skip = False\n depths = []\n for depth in depths_yml:\n if depth == 'min' or sum(sam_sum >= int(depth)) > 10:\n depths.append(depth)\n if not depths:\n print('[%s] Min. proposed rarefaction depths would leave <10 samples: '\n '%s (not rarefaction)' % (dat, ', '.join(depths_yml)))\n skip = True\n elif len(depths) != len(depths_yml):\n print('[%s] Proposed rarefaction depths would leave <10 samples: %s ('\n 'not rarefied)' % (dat, ', '.join([x for x in depths_yml\n if x not in depths])))\n return skip, depths\n\n\ndef print_skew(\n dat: str,\n tsv_sam_sum: pd.Series) -> None:\n \"\"\"\n Parameters\n ----------\n dat : str\n Dataset name\n tsv_sam_sum : pd.Series\n Sum of reads per sample\n \"\"\"\n count, division = np.histogram(tsv_sam_sum)\n skw = skew(count)\n if abs(skw) > 1:\n print()\n print(' ==> Consider rarefying <==')\n print('[%s] Reads-per-sample distribution [skewness=%s] (>1!)' % (\n dat, round(abs(float(skw)), 3)))\n division_std = np.interp(\n count, (min(count), max(count)), (0, 20))\n print('\\treadsbin\\tsamples\\thistogram')\n for ddx, div in enumerate(division_std):\n if div > 1:\n print('\\t%s\\t%s\\t%s' % (\n format(division[ddx], '6.3E'), count[ddx], '-' * int(div)))\n elif div == 0:\n print('\\t%s\\t%s\\t%s' % (\n format(division[ddx], '6.3E'), count[ddx], ''))\n else:\n print('\\t%s\\t%s\\t%s' % (\n format(division[ddx], '6.3E'), count[ddx], '-'))\n\n\ndef print_not_rarefying(\n dat: str,\n sam_sum: pd.Series) -> None:\n \"\"\"\n Parameters\n ----------\n dat: str\n Dataset name\n sam_sum : pd.Series\n Sum of reads per sample\n \"\"\"\n print('[%s] Second quantile of the reads-per-sample '\n 'distribution is <1000' % dat)\n print('- The sequencing might have failed! Analyze with caution')\n print('- reads-per-sample distribution described:')\n for x, y in sam_sum.describe().to_dict().items():\n print('\\t%s: %s' % (x, round(y, 3)))\n print('!!! NOT RAREFYING %s !!!' % dat)\n\n\ndef get_digit_depth(\n depth_: str,\n tsv_sums: pd.Series) -> int:\n \"\"\"Get the rarefaction depth integer\n\n Parameters\n ----------\n depth_ : str\n Rarefaction depth\n tsv_sums : pd.Series\n Sum of reads per sample\n\n Returns\n -------\n depth : int\n Rarefaction depth\n \"\"\"\n if depth_.isdigit():\n depth = int(depth_)\n else:\n depth = int(np.floor(min(tsv_sums)))\n return depth\n"
] | [
[
"numpy.set_printoptions",
"numpy.histogram",
"scipy.stats.skew"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
bzhaocaltech/alpha-zero-ramsey-numbers | [
"dfd10b577c1bc26c4f445bcc2fafa8c1bcf9cb6c"
] | [
"MCTS.py"
] | [
"import math\nimport numpy as np\n\nEPS = 1e-8\n\nclass MCTS():\n \"\"\"\n This class handles the MCTS tree.\n \"\"\"\n\n def __init__(self, game, nnet, args):\n self.game = game\n self.nnet = nnet\n self.args = args\n self.Qsa = {} # stores Q values for s,a (as defined in the paper)\n self.Nsa = {} # stores #times edge s,a was visited\n self.Ns = {} # stores #times board s was visited\n self.Ps = {} # stores initial policy (returned by neural net)\n\n self.Es = {} # stores game.getGameEnded ended for board s\n self.Vs = {} # stores game.getValidMoves for board s\n\n def getActionProb(self, canonicalBoard, temp=1):\n \"\"\"\n This function performs numMCTSSims simulations of MCTS starting from\n canonicalBoard.\n Returns:\n probs: a policy vector where the probability of the ith action is\n proportional to Nsa[(s,a)]**(1./temp)\n \"\"\"\n for i in range(self.args.numMCTSSims):\n self.search(canonicalBoard)\n\n s = self.game.stringRepresentation(canonicalBoard)\n counts = [self.Nsa[(s, a)] if (s, a) in self.Nsa else 0 for a in range(self.game.getActionSize())]\n\n if temp == 0:\n bestA = np.argmax(counts)\n probs = [0] * len(counts)\n probs[bestA] = 1\n return probs\n\n counts = [x ** (1. / temp) for x in counts]\n probs = [x / float(sum(counts)) for x in counts]\n return probs\n\n def search(self, canonicalBoard):\n \"\"\"\n This function performs one iteration of MCTS. It is recursively called\n till a leaf node is found. The action chosen at each node is one that\n has the maximum upper confidence bound as in the paper.\n Once a leaf node is found, the neural network is called to return an\n initial policy P and a value v for the state. This value is propogated\n up the search path. In case the leaf node is a terminal state, the\n outcome is propogated up the search path. The values of Ns, Nsa, Qsa are\n updated.\n NOTE: the return values are the negative of the value of the current\n state. This is done since v is in [-1,1] and if v is the value of a\n state for the current player, then its value is -v for the other player.\n Returns:\n v: the negative of the value of the current canonicalBoard\n \"\"\"\n\n s = self.game.stringRepresentation(canonicalBoard)\n\n if s not in self.Es:\n self.Es[s] = self.game.getGameEnded(canonicalBoard, 1)\n if self.Es[s] != 0:\n # terminal node\n return -self.Es[s]\n\n if s not in self.Ps:\n # leaf node\n self.Ps[s], v = self.nnet.predict(canonicalBoard)\n valids = self.game.getValidMoves(canonicalBoard, 1)\n self.Ps[s] = self.Ps[s] * valids # masking invalid moves\n sum_Ps_s = np.sum(self.Ps[s])\n if sum_Ps_s > 0:\n self.Ps[s] /= sum_Ps_s # renormalize\n else:\n # if all valid moves were masked make all valid moves equally probable\n\n # NB! All valid moves may be masked if either your NNet architecture is insufficient or you've get overfitting or something else.\n # If you have got dozens or hundreds of these messages you should pay attention to your NNet and/or training process.\n print(\"All valid moves were masked, do workaround.\")\n self.Ps[s] = self.Ps[s] + valids\n self.Ps[s] /= np.sum(self.Ps[s])\n\n self.Vs[s] = valids\n self.Ns[s] = 0\n return -v\n\n valids = self.Vs[s]\n cur_best = -float('inf')\n best_act = -1\n\n # pick the action with the highest upper confidence bound\n for a in range(self.game.getActionSize()):\n if valids[a]:\n if (s, a) in self.Qsa:\n u = self.Qsa[(s, a)] + self.args.cpuct * self.Ps[s][a] * math.sqrt(self.Ns[s]) / (\n 1 + self.Nsa[(s, a)])\n else:\n u = self.args.cpuct * self.Ps[s][a] * math.sqrt(self.Ns[s] + EPS) # Q = 0 ?\n\n if u > cur_best:\n cur_best = u\n best_act = a\n\n a = best_act\n next_s, next_player = self.game.getNextState(canonicalBoard, 1, a)\n next_s = self.game.getCanonicalForm(next_s, next_player)\n\n v = self.search(next_s)\n\n if (s, a) in self.Qsa:\n self.Qsa[(s, a)] = (self.Nsa[(s, a)] * self.Qsa[(s, a)] + v) / (self.Nsa[(s, a)] + 1)\n self.Nsa[(s, a)] += 1\n\n else:\n self.Qsa[(s, a)] = v\n self.Nsa[(s, a)] = 1\n\n self.Ns[s] += 1\n return -v\n"
] | [
[
"numpy.argmax",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
9meo/Thai-Clickbait | [
"b4d98b2b58545fa4b031aad993fb9150493f729b"
] | [
"src/w2v.py"
] | [
"from __future__ import print_function\nfrom gensim.models import word2vec\nfrom os.path import join, exists, split\nimport os\nimport numpy as np\n\n\ndef train_word2vec(sentence_matrix, vocabulary_inv,\n num_features=300, min_word_count=1, context=10 , seg='seg_lextoplus'):\n \"\"\"\n Trains, saves, loads Word2Vec model\n Returns initial weights for embedding layer.\n \n inputs:\n sentence_matrix # int matrix: num_sentences x max_sentence_len\n vocabulary_inv # list of words\n num_features # Word vector dimensionality \n min_word_count # Minimum word count \n context # Context window size \n \"\"\"\n model_dir = 'models'\n model_name = \"{:d}features_{:d}minwords_{:d}context_{}_\".format(num_features, min_word_count, context,seg)\n model_name = join(model_dir, model_name)\n if exists(model_name):\n embedding_model = word2vec.Word2Vec.load(model_name)\n print('Load existing Word2Vec model \\'%s\\'' % split(model_name)[-1])\n else:\n # Set values for various parameters\n num_workers = 2 # Number of threads to run in parallel\n downsampling = 1e-3 # Downsample setting for frequent words\n\n # Initialize and train the model\n print('Training Word2Vec model...')\n sentences = [[vocabulary_inv[w] for w in s] for s in sentence_matrix]\n embedding_model = word2vec.Word2Vec(sentences, workers=num_workers,\n size=num_features, min_count=min_word_count,\n window=context, sample=downsampling)\n\n # If we don't plan to train the model any further, calling \n # init_sims will make the model much more memory-efficient.\n embedding_model.init_sims(replace=True)\n\n # Saving the model for later use. You can load it later using Word2Vec.load()\n if not exists(model_dir):\n os.mkdir(model_dir)\n print('Saving Word2Vec model \\'%s\\'' % split(model_name)[-1])\n embedding_model.save(model_name)\n\n # add unknown words\n embedding_weights = [np.array([embedding_model[w] if w in embedding_model\n else np.random.uniform(-0.25, 0.25, embedding_model.vector_size)\n for w in vocabulary_inv])]\n return embedding_weights\n\n\nif __name__ == '__main__':\n import data_helpers\n\n print(\"Loading data...\")\n x, _, _, vocabulary_inv = data_helpers.load_data()\n w = train_word2vec(x, vocabulary_inv)\n"
] | [
[
"numpy.random.uniform"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HedgehogCode/stardist | [
"087dad36be7dbf50ae53f915df69974e00efb207"
] | [
"stardist/model.py"
] | [
"from __future__ import print_function, unicode_literals, absolute_import, division\nfrom six.moves import range, zip, map, reduce, filter\nfrom six import string_types\n\nimport numpy as np\nimport argparse\nimport warnings\nimport datetime\n\nimport keras.backend as K\nfrom keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, TensorBoard\nfrom keras.layers import Input, Conv2D\nfrom keras.models import Model\nfrom keras.utils import Sequence\nfrom keras.optimizers import Adam\n\nfrom csbdeep.internals.blocks import unet_block\nfrom csbdeep.utils import _raise, Path, load_json, save_json, backend_channels_last\nfrom csbdeep.data import Resizer, NoResizer, PadAndCropResizer\n\nfrom .utils import star_dist, edt_prob\nfrom skimage.segmentation import clear_border\n\n\n\nif not backend_channels_last():\n raise NotImplementedError(\n \"Keras is configured to use the '%s' image data format, which is currently not supported. \"\n \"Please change it to use 'channels_last' instead: \"\n \"https://keras.io/getting-started/faq/#where-is-the-keras-configuration-file-stored\" % K.image_data_format()\n )\n\n\n\ndef masked_loss(mask, penalty):\n def _loss(d_true, d_pred):\n return K.mean(mask * penalty(d_pred - d_true), axis=-1)\n return _loss\n\ndef masked_loss_mae(mask):\n return masked_loss(mask, K.abs)\n\ndef masked_loss_mse(mask):\n return masked_loss(mask, K.square)\n\n\n\nclass StarDistData(Sequence):\n\n def __init__(self, X, Y, batch_size, n_rays, patch_size=(256,256), b=32, shape_completion=False, same_patches=False):\n \"\"\"\n Parameters\n ----------\n same_patches : bool\n Set to true for validation data to always get the same patch for each image\n \"\"\"\n\n # TODO: simple augmentations (rotation & flips)\n self.X, self.Y = X, Y\n self.batch_size = batch_size\n self.n_rays = n_rays\n self.patch_size = patch_size\n self.perm = np.random.permutation(len(self.X))\n self.shape_completion = bool(shape_completion)\n self.same_patches = bool(same_patches)\n\n if self.shape_completion and b > 0:\n self.b = slice(b,-b),slice(b,-b)\n else:\n self.b = slice(None),slice(None)\n\n def __len__(self):\n return int(np.ceil(len(self.X) / float(self.batch_size)))\n\n def on_epoch_end(self):\n self.perm = np.random.permutation(len(self.X))\n\n def _random_patches(self, shapes, idx):\n def _single_patch(shape,i):\n all(s>=p for s,p in zip(shape, self.patch_size)) or _raise(ValueError('patch size > image size'))\n rng = np.random.RandomState(i) if self.same_patches else np.random\n start = (rng.randint(0,1+s-p) for s,p in zip(shape, self.patch_size))\n return tuple(slice(st,st+p) for st,p in zip(start, self.patch_size))\n return tuple(_single_patch(s,i) for s,i in zip(shapes,idx))\n\n def __getitem__(self, i):\n idx = slice(i*self.batch_size,(i+1)*self.batch_size)\n idx = list(self.perm[idx])\n patches = self._random_patches([self.X[k].shape for k in idx], idx)\n X = [self.X[k][sl][self.b] for k,sl in zip(idx,patches)]\n Y = [self.Y[k][sl] for k,sl in zip(idx,patches)]\n\n prob = np.stack([edt_prob(lbl[self.b]) for lbl in Y])\n\n if self.shape_completion:\n Y_cleared = [clear_border(lbl) for lbl in Y]\n dist = np.stack([star_dist(lbl,self.n_rays)[self.b+(slice(None),)] for lbl in Y_cleared])\n dist_mask = np.stack([edt_prob(lbl[self.b]) for lbl in Y_cleared])\n else:\n dist = np.stack([star_dist(lbl,self.n_rays) for lbl in Y])\n dist_mask = prob\n\n X = np.expand_dims(np.stack(X),-1)\n prob = np.expand_dims(prob,-1)\n dist_mask = np.expand_dims(dist_mask,-1)\n\n return [X,dist_mask], [prob,dist]\n\n\n\nclass Config(argparse.Namespace):\n \"\"\"Configuration for a :class:`StarDist` model.\n\n Parameters\n ----------\n n_rays : int\n Number of radial directions for the star-convex polygon.\n Recommended to use a power of 2 (default: 32).\n n_channel_in : int\n Number of channels of given input image (default: 1).\n kwargs : dict\n Overwrite (or add) configuration attributes (see below).\n\n\n Attributes\n ----------\n unet_n_depth : int\n Number of U-Net resolution levels (down/up-sampling layers).\n unet_kernel_size : (int,int)\n Convolution kernel size for all (U-Net) convolution layers.\n unet_n_filter_base : int\n Number of convolution kernels (feature channels) for first U-Net layer.\n Doubled after each down-sampling layer.\n net_conv_after_unet : int\n Number of extra convolution layers after U-Net (0 to disable).\n train_shape_completion : bool\n Train model to predict complete shapes for partially visible objects at image boundary.\n train_completion_crop : int\n If 'train_shape_completion' is set to True, specify number of pixels to crop at boundary of training patches.\n Should be chosen based on (largest) object sizes.\n train_patch_size : (int,int)\n Size of patches to be cropped from provided training images.\n train_dist_loss : str\n Training loss for star-convex polygon distances ('mse' or 'mae').\n train_epochs : int\n Number of training epochs.\n train_steps_per_epoch : int\n Number of parameter update steps per epoch.\n train_learning_rate : float\n Learning rate for training.\n train_batch_size : int\n Batch size for training.\n train_tensorboard : bool\n Enable TensorBoard for monitoring training progress.\n train_checkpoint : str\n Name of checkpoint file for model weights (only best are saved); set to ``None`` to disable.\n train_reduce_lr : dict\n Parameter :class:`dict` of ReduceLROnPlateau_ callback; set to ``None`` to disable.\n\n .. _ReduceLROnPlateau: https://keras.io/callbacks/#reducelronplateau\n \"\"\"\n\n def __init__(self, n_rays=32, n_channel_in=1, **kwargs):\n \"\"\"See class docstring.\"\"\"\n\n # directly set by parameters\n self.n_rays = n_rays\n self.n_channel_in = int(n_channel_in)\n\n # default config (can be overwritten by kwargs below)\n self.unet_n_depth = 3\n self.unet_kernel_size = (3,3)\n self.unet_n_filter_base = 32\n self.net_conv_after_unet = 128\n if backend_channels_last():\n self.net_input_shape = (None, None, self.n_channel_in)\n else:\n self.net_input_shape = (self.n_channel_in, None, None)\n\n self.train_shape_completion = False\n self.train_completion_crop = 32\n self.train_patch_size = (256,256)\n\n self.train_dist_loss = 'mae'\n self.train_epochs = 100\n self.train_steps_per_epoch = 400\n self.train_learning_rate = 0.0003\n self.train_batch_size = 4\n self.train_tensorboard = True\n self.train_checkpoint = 'weights_best.h5'\n self.train_reduce_lr = {'factor': 0.5, 'patience': 10}\n\n for k in kwargs:\n setattr(self, k, kwargs[k])\n\n\n def is_valid(self, return_invalid=False):\n # TODO: check if configuration is valid\n return True\n\n\n\nclass StarDist(object):\n \"\"\"StarDist model.\n\n Parameters\n ----------\n config : :class:`Config` or None\n Will be saved to disk as JSON (``config.json``).\n If set to ``None``, will be loaded from disk (must exist).\n name : str or None\n Model name. Uses a timestamp if set to ``None`` (default).\n basedir : str\n Directory that contains (or will contain) a folder with the given model name.\n\n Raises\n ------\n FileNotFoundError\n If ``config=None`` and config cannot be loaded from disk.\n ValueError\n Illegal arguments, including invalid configuration.\n\n Attributes\n ----------\n config : :class:`Config`\n Configuration, as provided during instantiation.\n keras_model : `Keras model <https://keras.io/getting-started/functional-api-guide/>`_\n Keras neural network model.\n name : str\n Model name.\n logdir : :class:`pathlib.Path`\n Path to model folder (which stores configuration, weights, etc.)\n \"\"\"\n\n def __init__(self, config=Config(), name=None, basedir='.'):\n \"\"\"See class docstring.\"\"\"\n\n config is None or isinstance(config,Config) or _raise(ValueError('Invalid configuration: %s' % str(config)))\n # if config is not None and not config.is_valid():\n # invalid_attr = config.is_valid(True)[1]\n # raise ValueError('Invalid configuration attributes: ' + ', '.join(invalid_attr))\n\n name is None or isinstance(name,string_types) or _raise(ValueError())\n isinstance(basedir,(string_types,Path)) or _raise(ValueError())\n self.config = config\n self.basedir = Path(basedir)\n self.name = name\n self._set_logdir()\n self._model_prepared = False\n self.keras_model = self._build()\n if config is None:\n self._find_and_load_weights()\n\n\n def _set_logdir(self):\n if self.name is None:\n self.name = datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S.%f\")\n self.logdir = self.basedir / self.name\n\n config_file = self.logdir / 'config.json'\n if self.config is None:\n if config_file.exists():\n config_dict = load_json(str(config_file))\n self.config = Config(**config_dict)\n if not self.config.is_valid():\n invalid_attr = self.config.is_valid(True)[1]\n raise ValueError('Invalid attributes in loaded config: ' + ', '.join(invalid_attr))\n else:\n raise FileNotFoundError(\"config file doesn't exist: %s\" % str(config_file.resolve()))\n else:\n if self.logdir.exists():\n warnings.warn('output path for model already exists, files may be overwritten: %s' % str(self.logdir.resolve()))\n self.logdir.mkdir(parents=True, exist_ok=True)\n save_json(vars(self.config), str(config_file))\n\n\n def _find_and_load_weights(self,prefer='best'):\n from itertools import chain\n # get all weight files and sort by modification time descending (newest first)\n weights_ext = ('*.h5','*.hdf5')\n weights_files = chain(*(self.logdir.glob(ext) for ext in weights_ext))\n weights_files = reversed(sorted(weights_files, key=lambda f: f.stat().st_mtime))\n weights_files = list(weights_files)\n if len(weights_files) == 0:\n warnings.warn(\"Couldn't find any network weights (%s) to load.\" % ', '.join(weights_ext))\n return\n weights_preferred = list(filter(lambda f: prefer in f.name, weights_files))\n weights_chosen = weights_preferred[0] if len(weights_preferred)>0 else weights_files[0]\n print(\"Loading network weights from '%s'.\" % weights_chosen.name)\n self.load_weights(weights_chosen.name)\n\n\n def _build(self):\n input_img = Input(self.config.net_input_shape,name='input')\n input_mask = Input(self.config.net_input_shape,name='dist_mask')\n\n unet_kwargs = {k[5:]:v for (k,v) in vars(self.config).items() if k.startswith('unet_')}\n unet = unet_block(**unet_kwargs)(input_img)\n if self.config.net_conv_after_unet > 0:\n unet = Conv2D(self.config.net_conv_after_unet,self.config.unet_kernel_size,\n name='features',padding='same',activation='relu')(unet)\n\n output_prob = Conv2D(1, (1,1),name='prob',padding='same',activation='sigmoid')(unet)\n output_dist = Conv2D(self.config.n_rays,(1,1),name='dist',padding='same',activation='linear')(unet)\n return Model([input_img,input_mask],[output_prob,output_dist])\n\n\n def load_weights(self, name='weights_best.h5'):\n \"\"\"Load neural network weights from model folder.\n\n Parameters\n ----------\n name : str\n Name of HDF5 weight file (as saved during or after training).\n \"\"\"\n self.keras_model.load_weights(str(self.logdir/name))\n\n\n def prepare_for_training(self, optimizer=None):\n \"\"\"Prepare for neural network training.\n\n Compiles the model and creates\n `Keras Callbacks <https://keras.io/callbacks/>`_ to be used for training.\n\n Note that this method will be implicitly called once by :func:`train`\n (with default arguments) if not done so explicitly beforehand.\n\n Parameters\n ----------\n optimizer : obj or None\n Instance of a `Keras Optimizer <https://keras.io/optimizers/>`_ to be used for training.\n If ``None`` (default), uses ``Adam`` with the learning rate specified in ``config``.\n\n \"\"\"\n if optimizer is None:\n optimizer = Adam(lr=self.config.train_learning_rate)\n\n dist_loss = {'mse': masked_loss_mse, 'mae': masked_loss_mae}[self.config.train_dist_loss]\n input_mask = self.keras_model.inputs[1] # second input layer is mask for dist loss\n self.keras_model.compile(optimizer, loss=['binary_crossentropy',dist_loss(input_mask)])\n\n self.callbacks = []\n if self.config.train_checkpoint is not None:\n self.callbacks.append(ModelCheckpoint(str(self.logdir / self.config.train_checkpoint), save_best_only=True, save_weights_only=True))\n\n if self.config.train_tensorboard:\n self.callbacks.append(TensorBoard(log_dir=str(self.logdir), write_graph=False))\n\n if self.config.train_reduce_lr is not None:\n rlrop_params = self.config.train_reduce_lr\n if 'verbose' not in rlrop_params:\n rlrop_params['verbose'] = True\n self.callbacks.append(ReduceLROnPlateau(**rlrop_params))\n\n self._model_prepared = True\n\n\n def train(self, X, Y, validation_data, epochs=None, steps_per_epoch=None):\n \"\"\"Train the neural network with the given data.\n\n Parameters\n ----------\n X : :class:`numpy.ndarray`\n Array of input images.\n Y : :class:`numpy.ndarray`\n Array of label masks.\n validation_data : tuple(:class:`numpy.ndarray`, :class:`numpy.ndarray`)\n Tuple of X,Y validation arrays.\n epochs : int\n Optional argument to use instead of the value from ``config``.\n steps_per_epoch : int\n Optional argument to use instead of the value from ``config``.\n\n Returns\n -------\n ``History`` object\n See `Keras training history <https://keras.io/models/model/#fit>`_.\n\n \"\"\"\n\n validation_data is not None or _raise(ValueError())\n ((isinstance(validation_data,(list,tuple)) and len(validation_data)==2)\n or _raise(ValueError('validation_data must be a pair of numpy arrays')))\n\n patch_size = self.config.train_patch_size\n b = self.config.train_completion_crop if self.config.train_shape_completion else 0\n div_by = 2**self.config.unet_n_depth\n if any((p-2*b)%div_by!=0 for p in patch_size):\n if self.config.train_shape_completion:\n raise ValueError(\"every value of 'train_patch_size' - 2*'train_completion_crop' must be divisible by 2**'unet_n_depth'\")\n else:\n raise ValueError(\"every value of 'train_patch_size' must be divisible by 2**'unet_n_depth'\")\n\n if epochs is None:\n epochs = self.config.train_epochs\n if steps_per_epoch is None:\n steps_per_epoch = self.config.train_steps_per_epoch\n\n if not self._model_prepared:\n self.prepare_for_training()\n\n data_kwargs = {\n 'n_rays': self.config.n_rays,\n 'batch_size': self.config.train_batch_size,\n 'patch_size': self.config.train_patch_size,\n 'shape_completion': self.config.train_shape_completion,\n 'b': self.config.train_completion_crop,\n }\n\n X_val, Y_val = validation_data\n data_train = StarDistData(X, Y, same_patches=False, **data_kwargs)\n data_val = StarDistData(X_val, Y_val, same_patches=True, **data_kwargs)\n\n history = self.keras_model.fit_generator(generator=data_train, validation_data=data_val,\n epochs=epochs, steps_per_epoch=steps_per_epoch,\n callbacks=self.callbacks, verbose=1)\n\n self.keras_model.save_weights(str(self.logdir / 'weights_last.h5'))\n\n if self.config.train_checkpoint is not None:\n self.load_weights(self.config.train_checkpoint)\n\n return history\n\n\n def predict(self, img, resizer=PadAndCropResizer(), **predict_kwargs):\n \"\"\"Predict.\n\n Parameters\n ----------\n img : :class:`numpy.ndarray`\n Input image\n resizer : :class:`csbdeep.data.Resizer` or None\n If necessary, input image is resized to enable neural network prediction and result is (possibly)\n resized to yield original image size.\n\n Returns\n -------\n (:class:`numpy.ndarray`,:class:`numpy.ndarray`)\n Returns the tuple (`prob`, `dist`) of per-pixel object probabilities and star-convex polygon distances.\n\n \"\"\"\n if resizer is None:\n resizer = NoResizer()\n isinstance(resizer,Resizer) or _raise(ValueError())\n\n img.ndim in (2,3) or _raise(ValueError())\n\n channel = img.ndim if backend_channels_last() else 0\n if img.ndim == 2:\n x = np.expand_dims(img,channel)\n self.config.n_channel_in == x.shape[channel] or _raise(ValueError())\n\n # resize: make divisible by power of 2 to allow downsampling steps in unet\n div_n = 2 ** self.config.unet_n_depth\n x = resizer.before(x,div_n,exclude=channel)\n\n if backend_channels_last():\n sh = x.shape[:-1] + (1,)\n else:\n sh = (1,) + x.shape[1:]\n dummy = np.empty((1,)+sh,np.float32)\n\n prob, dist = self.keras_model.predict([np.expand_dims(x,0),dummy],**predict_kwargs)\n prob, dist = prob[0], dist[0]\n\n prob = resizer.after(prob,exclude=channel)\n dist = resizer.after(dist,exclude=channel)\n\n prob = np.take(prob,0,channel)\n dist = np.moveaxis(dist,channel,-1)\n\n return prob, dist\n"
] | [
[
"numpy.expand_dims",
"numpy.take",
"numpy.stack",
"numpy.moveaxis",
"numpy.random.RandomState",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Snegovikufa/chaco | [
"89366735e20cded2bad90db8817a46de56d137b0"
] | [
"chaco/tests/arraydatasource_test_case.py"
] | [
"\"\"\"\nTest of basic dataseries behavior.\n\"\"\"\n\nimport unittest\n\nfrom numpy import arange, array, allclose, empty, isnan, nan\nimport numpy as np\n\nfrom chaco.api import ArrayDataSource, PointDataSource\n\n\nclass ArrayDataTestCase(unittest.TestCase):\n def test_basic_set_get(self):\n myarray = arange(10)\n sd = ArrayDataSource(myarray)\n self.assertTrue(allclose(myarray, sd._data))\n self.assert_(sd.value_dimension == \"scalar\")\n return\n\n def test_bounds(self):\n # ascending\n myarray = arange(10)\n sd = ArrayDataSource(myarray, sort_order=\"ascending\")\n bounds = sd.get_bounds()\n self.assert_(bounds == (0,9))\n\n # descending\n myarray = arange(10)[::-1]\n sd = ArrayDataSource(myarray, sort_order=\"descending\")\n bounds = sd.get_bounds()\n self.assert_(bounds == (0,9))\n\n # no order\n myarray = array([12,3,0,9,2,18,3])\n sd = ArrayDataSource(myarray, sort_order=\"none\")\n bounds = sd.get_bounds()\n self.assert_(bounds == (0,18))\n return\n\n def test_data_size(self):\n # We know that ScalarData always returns the exact length of its data\n myarray = arange(913)\n sd = ArrayDataSource(myarray)\n self.assert_(len(myarray) == sd.get_size())\n return\n\n def test_bounds_all_nans(self):\n myarray = empty(10)\n myarray[:] = nan\n sd = ArrayDataSource(myarray)\n bounds = sd.get_bounds()\n self.assertTrue(isnan(bounds[0]))\n self.assertTrue(isnan(bounds[1]))\n\n def test_bounds_non_numeric(self):\n myarray = np.array([u'abc', u'foo', u'bar', u'def'], dtype=unicode)\n sd = ArrayDataSource(myarray)\n bounds = sd.get_bounds()\n self.assertEqual(bounds, (u'abc', u'def'))\n\n\nclass PointDataTestCase(unittest.TestCase):\n # Since PointData is mostly the same as ScalarData, the key things to\n # test are functionality that use _compute_bounds() and reverse_map().\n def create_array(self):\n return array(zip(range(10), range(0, 100, 10)))\n\n def test_basic_set_get(self):\n myarray = self.create_array()\n pd = PointDataSource(myarray)\n self.assertTrue(allclose(myarray,pd._data))\n self.assert_(pd.value_dimension == \"point\")\n return\n\n def test_bounds(self):\n myarray = self.create_array()\n pd = PointDataSource(myarray)\n self.assertEqual(pd.get_bounds(),((0,0), (9,90)))\n return\n\nif __name__ == '__main__':\n import nose\n nose.run()\n"
] | [
[
"numpy.allclose",
"numpy.isnan",
"numpy.arange",
"numpy.array",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
uysalserkan/OpenCV-Samples | [
"ab01dc128951626aa50c571b77d419ad9dfbfd3e"
] | [
"Color_Filtering.py"
] | [
"import cv2\nimport numpy as np\n\ncap = cv2.VideoCapture(0)\n\nwhile True:\n _, frame = cap.read()\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n lower_red = np.array([10, 10, 50])\n upper_red = np.array([255, 255, 180])\n\n mask = cv2.inRange(hsv, lower_red, upper_red)\n res = cv2.bitwise_and(frame, frame, mask=mask)\n\n kernel = np.ones((5, 5), np.uint8)\n erosion = cv2.erode(mask, kernel, iterations=1)\n dilation = cv2.dilate(mask, kernel, iterations=1)\n\n opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)\n closing = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)\n\n # kernel = np.ones((15, 15), np.float32)/225\n # smoothed = cv2.filter2D(res, -1, kernel)\n\n # blur = cv2.GaussianBlur(res, (15, 15), 0)\n # bilateral = cv2.bilateralFilter(res, 15, 75, 75)\n # median = cv2.medianBlur(res, 15)\n\n cv2.imshow('frame', frame)\n cv2.imshow('res', res)\n cv2.imshow('dilation', dilation)\n cv2.imshow('erosion', erosion)\n cv2.imshow('opening', opening)\n cv2.imshow('closing', closing)\n # cv2.imshow('mask', mask)\n # cv2.imshow('smooth', smoothed)\n # cv2.imshow('blur', blur)\n # cv2.imshow('medianblur', median)\n # cv2.imshow('bila', bilateral)\n\n k = cv2.waitKey(5) & 0xFF\n if k == 27:\n break\n\ncv2.destroyAllWindows()\ncap.release()\n"
] | [
[
"numpy.array",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mnsaxena/np-shape-lab | [
"87e0b54ba147a499d0be692b8841dda887568b63"
] | [
"pythonscripts/asphericity.py"
] | [
"from numpy import linalg as LA\nimport csv\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nindices = []\nxPos = []\nyPos = []\nzPos = []\ncharge = []\n\n# loop through data as a csv, store data in lists\nwith open('disc.txt', mode='r') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n fields = next(csv_reader)\n i=0\n for line in csv_reader:\n indices.append(int(line[0]))\n xPos.append(float(line[2]))\n yPos.append(float(line[3]))\n zPos.append(float(line[4]))\n charge.append(float(line[5]))\n i=i+1\n \n# get number of vertices for future use\nNv = i\nprint(\"Number of vertices: \" + str(Nv))\n\n# calculate center of mass\nCOM = [0,0,0]\nCOM[0] = sum(xPos) / len(xPos)\nCOM[1] = sum(yPos) / len(yPos)\nCOM[2] = sum(zPos) / len(zPos)\nprint(\"Center of Mass:\" + str(COM))\n\n# set up matrix S\nS = [[0 for x in range(3)] for y in range(3)]\ndiag1, diag2, diag3, xy, xz, yz = 0, 0, 0, 0, 0, 0\n# populate diagonals and off-diagonals\nfor x in range(0, Nv):\n diag1 = diag1 + (xPos[x]-COM[0])**2\n diag2 = diag2 + (yPos[x]-COM[1])**2\n diag3 = diag3 + (zPos[x]-COM[2])**2\n\n xy = xy + (xPos[x]-COM[0])*(yPos[x]-COM[1])\n xz = xz + (xPos[x]-COM[0])*(zPos[x]-COM[2])\n yz = yz + (yPos[x]-COM[1])*(zPos[x]-COM[2])\n \nS[0][0] = diag1 / Nv\nS[0][1] = xy / Nv\nS[0][2] = xz / Nv\nS[1][0] = xy / Nv\nS[1][1] = diag2 / Nv\nS[1][2] = yz / Nv\nS[2][0] = xz / Nv\nS[2][1] = yz / Nv\nS[2][2] = diag3 / Nv\n\n# calculate eigenvalues\nS = np.array(S)\nw, v = LA.eig(S)\nprint(\"Eigenvalues of S: \" + str(w))\n\n# calculate squared radius of gyration\nRg2 = (w[0])**2 + (w[1])**2 + (w[2])**2\n\n# calculate normalized asphericitty (equation 2 of 2020 paper)\nasphericity = ((1.5*((max(w))**2)) - (0.5*(Rg2))) / Rg2\n\nprint(\"Asphericity: \" + str(asphericity))\n\n# graph deviations\n\n# calculate magnitude of distances\ndistance = []\nfor i in range(0, Nv):\n distance.append((xPos[i]**2)+(yPos[i]**2)+(zPos[i]**2))\n\nindices = np.arange(0,Nv)\navgRadius = []\nsphere = np.arange(0,Nv)\nfor item in sphere:\n sphere[item]=1\n\naverage1 = sum(distance)/Nv\n\nfor item in range(0,Nv):\n avgRadius.append(average1)\n\n\nplt.plot(indices, distance, 'o', label='Vertices Distance')\nplt.plot(indices, avgRadius, label='Average Distance')\nplt.plot(indices,sphere,label=\"Original Distance\")\nplt.xlabel(\"Vertex\")\nplt.ylabel(\"Distance from Origin\")\nplt.legend()\nplt.title(\"Disc Dataset\")\nplt.show()\n\n#TODO make subplots to compare data\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"numpy.arange",
"numpy.linalg.eig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zolboo1024/numpy | [
"ce57efe4e9f741b45b45ccfb462668e9caa1c9c3"
] | [
"numpy/typing/tests/data/fail/arithmetic.py"
] | [
"from typing import List, Any\nimport numpy as np\n\nb_ = np.bool_()\ndt = np.datetime64(0, \"D\")\ntd = np.timedelta64(0, \"D\")\n\nAR_b: np.ndarray[Any, np.dtype[np.bool_]]\nAR_u: np.ndarray[Any, np.dtype[np.uint32]]\nAR_i: np.ndarray[Any, np.dtype[np.int64]]\nAR_f: np.ndarray[Any, np.dtype[np.float64]]\nAR_c: np.ndarray[Any, np.dtype[np.complex128]]\nAR_m: np.ndarray[Any, np.dtype[np.timedelta64]]\nAR_M: np.ndarray[Any, np.dtype[np.datetime64]]\n\nANY: Any\n\nAR_LIKE_b: List[bool]\nAR_LIKE_u: List[np.uint32]\nAR_LIKE_i: List[int]\nAR_LIKE_f: List[float]\nAR_LIKE_c: List[complex]\nAR_LIKE_m: List[np.timedelta64]\nAR_LIKE_M: List[np.datetime64]\n\n# Array subtraction\n\n# NOTE: mypys `NoReturn` errors are, unfortunately, not that great\n_1 = AR_b - AR_LIKE_b # E: Need type annotation\n_2 = AR_LIKE_b - AR_b # E: Need type annotation\n\nAR_f - AR_LIKE_m # E: Unsupported operand types\nAR_f - AR_LIKE_M # E: Unsupported operand types\nAR_c - AR_LIKE_m # E: Unsupported operand types\nAR_c - AR_LIKE_M # E: Unsupported operand types\n\nAR_m - AR_LIKE_f # E: Unsupported operand types\nAR_M - AR_LIKE_f # E: Unsupported operand types\nAR_m - AR_LIKE_c # E: Unsupported operand types\nAR_M - AR_LIKE_c # E: Unsupported operand types\n\nAR_m - AR_LIKE_M # E: Unsupported operand types\nAR_LIKE_m - AR_M # E: Unsupported operand types\n\n# array floor division\n\nAR_M // AR_LIKE_b # E: Unsupported operand types\nAR_M // AR_LIKE_u # E: Unsupported operand types\nAR_M // AR_LIKE_i # E: Unsupported operand types\nAR_M // AR_LIKE_f # E: Unsupported operand types\nAR_M // AR_LIKE_c # E: Unsupported operand types\nAR_M // AR_LIKE_m # E: Unsupported operand types\nAR_M // AR_LIKE_M # E: Unsupported operand types\n\nAR_b // AR_LIKE_M # E: Unsupported operand types\nAR_u // AR_LIKE_M # E: Unsupported operand types\nAR_i // AR_LIKE_M # E: Unsupported operand types\nAR_f // AR_LIKE_M # E: Unsupported operand types\nAR_c // AR_LIKE_M # E: Unsupported operand types\nAR_m // AR_LIKE_M # E: Unsupported operand types\nAR_M // AR_LIKE_M # E: Unsupported operand types\n\n_3 = AR_m // AR_LIKE_b # E: Need type annotation\nAR_m // AR_LIKE_c # E: Unsupported operand types\n\nAR_b // AR_LIKE_m # E: Unsupported operand types\nAR_u // AR_LIKE_m # E: Unsupported operand types\nAR_i // AR_LIKE_m # E: Unsupported operand types\nAR_f // AR_LIKE_m # E: Unsupported operand types\nAR_c // AR_LIKE_m # E: Unsupported operand types\n\n# Scalars\n\nb_ - b_ # E: No overload variant\n\ndt + dt # E: Unsupported operand types\ntd - dt # E: Unsupported operand types\ntd % 1 # E: Unsupported operand types\ntd / dt # E: No overload\ntd % dt # E: Unsupported operand types\n\n-b_ # E: Unsupported operand type\n+b_ # E: Unsupported operand type\n"
] | [
[
"numpy.timedelta64",
"numpy.bool_",
"numpy.datetime64"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Laeyoung/EasyOCR | [
"f41a5d951bd6fce8cfcdaa67a956c639c013eb18"
] | [
"easyocr/recognition.py"
] | [
"from PIL import Image\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.utils.data\nimport torch.nn.functional as F\nimport torchvision.transforms as transforms\nimport numpy as np\nfrom collections import OrderedDict\n\nfrom .model import Model\nfrom .utils import CTCLabelConverter\nimport math\n\ndef contrast_grey(img):\n high = np.percentile(img, 90)\n low = np.percentile(img, 10)\n return (high-low)/np.maximum(10, high+low), high, low\n\ndef adjust_contrast_grey(img, target = 0.4):\n contrast, high, low = contrast_grey(img)\n if contrast < target:\n img = img.astype(int)\n ratio = 200./np.maximum(10, high-low)\n img = (img - low + 25)*ratio\n img = np.maximum(np.full(img.shape, 0) ,np.minimum(np.full(img.shape, 255), img)).astype(np.uint8)\n return img\n\nclass NormalizePAD(object):\n\n def __init__(self, max_size, PAD_type='right'):\n self.toTensor = transforms.ToTensor()\n self.max_size = max_size\n self.max_width_half = math.floor(max_size[2] / 2)\n self.PAD_type = PAD_type\n\n def __call__(self, img):\n img = self.toTensor(img)\n img.sub_(0.5).div_(0.5)\n c, h, w = img.size()\n Pad_img = torch.FloatTensor(*self.max_size).fill_(0)\n Pad_img[:, :, :w] = img # right pad\n if self.max_size[2] != w: # add border Pad\n Pad_img[:, :, w:] = img[:, :, w - 1].unsqueeze(2).expand(c, h, self.max_size[2] - w)\n\n return Pad_img\n\nclass ListDataset(torch.utils.data.Dataset):\n\n def __init__(self, image_list):\n self.image_list = image_list\n self.nSamples = len(image_list)\n\n def __len__(self):\n return self.nSamples\n\n def __getitem__(self, index):\n img = self.image_list[index]\n\n return Image.fromarray(img, 'L')\n\nclass AlignCollate(object):\n\n def __init__(self, imgH=32, imgW=100, keep_ratio_with_pad=False, adjust_contrast = 0.):\n self.imgH = imgH\n self.imgW = imgW\n self.keep_ratio_with_pad = keep_ratio_with_pad\n self.adjust_contrast = adjust_contrast\n\n def __call__(self, batch):\n batch = filter(lambda x: x is not None, batch)\n images = batch\n\n resized_max_w = self.imgW\n input_channel = 1\n transform = NormalizePAD((input_channel, self.imgH, resized_max_w))\n\n resized_images = []\n for image in images:\n w, h = image.size\n #### augmentation here - change contrast\n if self.adjust_contrast > 0:\n image = np.array(image.convert(\"L\"))\n image = adjust_contrast_grey(image, target = self.adjust_contrast)\n image = Image.fromarray(image, 'L')\n\n ratio = w / float(h)\n if math.ceil(self.imgH * ratio) > self.imgW:\n resized_w = self.imgW\n else:\n resized_w = math.ceil(self.imgH * ratio)\n\n resized_image = image.resize((resized_w, self.imgH), Image.BICUBIC)\n resized_images.append(transform(resized_image))\n\n image_tensors = torch.cat([t.unsqueeze(0) for t in resized_images], 0)\n return image_tensors\n\ndef recognizer_predict(model, converter, test_loader, batch_max_length,\\\n ignore_idx, char_group_idx, decoder = 'greedy', beamWidth= 5, device = 'cpu'):\n model.eval()\n result = []\n with torch.no_grad():\n for image_tensors in test_loader:\n batch_size = image_tensors.size(0)\n image = image_tensors.to(device)\n # For max length prediction\n length_for_pred = torch.IntTensor([batch_max_length] * batch_size).to(device)\n text_for_pred = torch.LongTensor(batch_size, batch_max_length + 1).fill_(0).to(device)\n\n preds = model(image, text_for_pred)\n\n # Select max probabilty (greedy decoding) then decode index to character\n preds_size = torch.IntTensor([preds.size(1)] * batch_size)\n\n ######## filter ignore_char, rebalance\n preds_prob = F.softmax(preds, dim=2)\n preds_prob = preds_prob.cpu().detach().numpy()\n preds_prob[:,:,ignore_idx] = 0.\n pred_norm = preds_prob.sum(axis=2)\n preds_prob = preds_prob/np.expand_dims(pred_norm, axis=-1)\n preds_prob = torch.from_numpy(preds_prob).float().to(device)\n\n if decoder == 'greedy':\n # Select max probabilty (greedy decoding) then decode index to character\n _, preds_index = preds_prob.max(2)\n preds_index = preds_index.view(-1)\n preds_index = preds_index.view(-1)\n preds_str = converter.decode_greedy(preds_index.data, preds_size.data)\n elif decoder == 'beamsearch':\n k = preds_prob.cpu().detach().numpy()\n preds_str = converter.decode_beamsearch(k, beamWidth=beamWidth)\n elif decoder == 'wordbeamsearch':\n k = preds_prob.cpu().detach().numpy()\n preds_str = converter.decode_wordbeamsearch(k, beamWidth=beamWidth)\n\n preds_max_prob, _ = preds_prob.max(dim=2)\n\n for pred, pred_max_prob in zip(preds_str, preds_max_prob):\n confidence_score = pred_max_prob.cumprod(dim=0)[-1]\n result.append([pred, confidence_score.item()])\n\n return result\n\ndef get_recognizer(input_channel, output_channel, hidden_size, character,\\\n separator_list, dict_list, model_path, device = 'cpu'):\n\n converter = CTCLabelConverter(character, separator_list, dict_list)\n num_class = len(converter.character)\n model = Model(input_channel, output_channel, hidden_size, num_class)\n\n if device == 'cpu':\n state_dict = torch.load(model_path, map_location=device)\n new_state_dict = OrderedDict()\n for key, value in state_dict.items():\n new_key = key[7:]\n new_state_dict[new_key] = value\n model.load_state_dict(new_state_dict)\n else:\n model = torch.nn.DataParallel(model).to(device)\n model.load_state_dict(torch.load(model_path, map_location=device))\n\n return model, converter\n\ndef get_text(character, imgH, imgW, recognizer, converter, image_list,\\\n ignore_char = '',decoder = 'greedy', beamWidth =5, batch_size=1, contrast_ths=0.1,\\\n adjust_contrast=0.5, filter_ths = 0.003, workers = 1, device = 'cpu'):\n batch_max_length = int(imgW/10)\n\n char_group_idx = {}\n ignore_idx = []\n for char in ignore_char:\n try: ignore_idx.append(character.index(char)+1)\n except: pass\n\n coord = [item[0] for item in image_list]\n img_list = [item[1] for item in image_list]\n AlignCollate_normal = AlignCollate(imgH=imgH, imgW=imgW, keep_ratio_with_pad=True)\n test_data = ListDataset(img_list)\n test_loader = torch.utils.data.DataLoader(\n test_data, batch_size=batch_size, shuffle=False,\n num_workers=int(workers), collate_fn=AlignCollate_normal, pin_memory=True)\n\n # predict first round\n result1 = recognizer_predict(recognizer, converter, test_loader,batch_max_length,\\\n ignore_idx, char_group_idx, decoder, beamWidth, device = device)\n\n # predict second round\n low_confident_idx = [i for i,item in enumerate(result1) if (item[1] < contrast_ths)]\n if len(low_confident_idx) > 0:\n img_list2 = [img_list[i] for i in low_confident_idx]\n AlignCollate_contrast = AlignCollate(imgH=imgH, imgW=imgW, keep_ratio_with_pad=True, adjust_contrast=adjust_contrast)\n test_data = ListDataset(img_list2)\n test_loader = torch.utils.data.DataLoader(\n test_data, batch_size=batch_size, shuffle=False,\n num_workers=int(workers), collate_fn=AlignCollate_contrast, pin_memory=True)\n result2 = recognizer_predict(recognizer, converter, test_loader, batch_max_length,\\\n ignore_idx, char_group_idx, decoder, beamWidth, device = device)\n\n result = []\n for i, zipped in enumerate(zip(coord, result1)):\n box, pred1 = zipped\n if i in low_confident_idx:\n pred2 = result2[low_confident_idx.index(i)]\n if pred1[1]>pred2[1]:\n result.append( (box, pred1[0], pred1[1]) )\n else:\n result.append( (box, pred2[0], pred2[1]) )\n else:\n result.append( (box, pred1[0], pred1[1]) )\n\n #confidence_score = pred_max_prob.cumprod(dim=0)[-1]\n #if confidence_score.item() > filter_ths:\n # print(pred, confidence_score.item())\n #else:\n # print('not sure', pred, confidence_score.item())\n\n return result\n"
] | [
[
"torch.nn.functional.softmax",
"numpy.expand_dims",
"numpy.maximum",
"torch.LongTensor",
"torch.load",
"torch.from_numpy",
"numpy.percentile",
"numpy.full",
"torch.no_grad",
"torch.FloatTensor",
"torch.IntTensor",
"torch.nn.DataParallel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
johnflux/deep-learning-tictactoe | [
"da4dbdf5453c0ac2ed470098736f50dce6a4574b"
] | [
"play.py"
] | [
"#!/usr/bin/env python3\nimport numpy as np\nimport copy\nimport keras\nfrom keras.models import Model\nfrom keras.layers import Flatten, Dense, Dropout\n\n\n# Call this like:\n\nmodel = None\ncallbacks = []\n\ndef makeModel():\n global model, callbacks\n if model != None:\n return\n inputs = keras.layers.Input(shape=(2,3,3))\n\n output = Flatten()(inputs)\n output = Dense(100, activation='relu')(output)\n output = Dense(50, activation='relu')(output)\n output = Dense(20, activation='relu')(output)\n output = Dense(1, activation='relu', use_bias=False)(output)\n print(output)\n\n model = Model(inputs=inputs, outputs=output)\n\n tbCallBack = keras.callbacks.TensorBoard(\n log_dir='./log', histogram_freq=1, write_graph=True, write_images=True,\n embeddings_freq=1, embeddings_layer_names=None, embeddings_metadata=None)\n checkpointCallback = keras.callbacks.ModelCheckpoint(\n 'model_running.h5', monitor='val_loss', verbose=0,\n save_best_only=True, save_weights_only=False, mode='auto', period=1)\n reduce_lr = keras.callbacks.ReduceLROnPlateau(\n monitor='val_loss', factor=0.2,\n patience=5, min_lr=0.0001)\n callbacks = [tbCallBack, checkpointCallback, reduce_lr]\n\n model.compile(loss='mse', optimizer=keras.optimizers.Adam(lr=0.001))\n from keras.models import load_model\n #model = load_weights('model_running.h5')\n\nboardgames = []\nwhowon = []\n\ndef train():\n global model, boardgames, whowon\n makeModel()\n #print(\"Boardgames is:\", np.array(boardgames).shape, \"whowon:\", np.array(whowon).shape)\n model.fit(np.array(boardgames), np.array(whowon), epochs=10, validation_split=0.2, shuffle=True,\n verbose=0, callbacks=callbacks)\n\n# board[0,:,:] is for computer player. 0 if there's no piece and 1 if there is\n# board[1,:,:] is for other player. 0 if there's no piece and 1 if there is\ncurrent_game_boards = []\n\ndef find_next_best_move(board, player):\n global model\n makeModel()\n best_prob_to_win = -1\n if player == 1:\n best_prob_to_win = 2\n best_x = 0\n best_y = 0\n for x in range(3):\n for y in range(3):\n if not board[0, x, y] and not board[1, x, y]:\n # Nobody has played in this position.\n # Let's play and see how good the board looks for us\n board[0, x, y] = 1\n prob_to_win = model.predict(np.array([board]), batch_size=1, verbose=0)[0]\n board[0, x, y] = 0\n if ((player == 0 and prob_to_win > best_prob_to_win) or\n (player == 1 and prob_to_win < best_prob_to_win)):\n best_x = x\n best_y = y\n best_prob_to_win = prob_to_win\n #print(\"Best move is\", best_x, best_y, \"with probability to win: \", prob_to_win)\n return best_x, best_y\n\ndef remember_game_board(board):\n global current_game_boards\n current_game_boards.append(board)\n\n# whowon_ should be 1 if computer, 0 if person, 0.5 if tie\ndef notify_new_game(whowon_):\n global boardgames, whowon, current_game_boards\n boardgames += current_game_boards\n whowon += (np.ones(len(current_game_boards)) * whowon_).tolist()\n current_game_boards = []\n train()\n\ndef get_valid_moves(board):\n valid_moves = []\n for x in range(3):\n for y in range(3):\n if not board[0, x, y] and not board[1, x, y]:\n valid_moves.append((x,y))\n return valid_moves\n\ndef get_random_move(board):\n valid_moves = get_valid_moves(board)\n return valid_moves[np.random.randint(len(valid_moves))]\n\ndef has_won(board, player):\n p = player\n if ((board[p,0,0] and board[p,1,1] and board[p,2,2]) or\n (board[p,2,0] and board[p,1,1] and board[p,0,2])):\n return True\n for x in range(3):\n if ((board[p,x,0] and board[p,x,1] and board[p,x,2]) or\n (board[p,0,x] and board[p,1,x] and board[p,2,x])):\n return True\n return False\n\ndef is_board_full(board):\n for x in range(3):\n for y in range(3):\n if not board[0, x, y] and not board[1, x, y]:\n return False\n return True\n\ndef playGame():\n if is_board_full(board):\n notify_new_game()\n\ndef playAgainstSelfRandomly():\n while True:\n player_who_won, board = playAgainstSelfRandomly_()\n notify_new_game(player_who_won)\n printBoard(board)\n print(\"Score:\", player_who_won)\n print()\n\n\ndef printBoard(board):\n for x in range(3):\n for y in range(3):\n if board[0,x,y]:\n print('X', end='')\n elif board[1,x,y]:\n print('O', end='')\n else:\n print('.', end='')\n print()\n\n# Return 0.5 if tie, 1 if computer player won, 0 if we lost\ndef playAgainstSelfRandomly_():\n board = np.zeros((2, 3, 3))\n # board[0,:,:] is for computer player. 0 if there's no piece and 1 if there is\n # board[1,:,:] is for other player. 0 if there's no piece and 1 if there is\n player = 0\n while True:\n if has_won(board, 0):\n return 1, board\n if has_won(board, 1):\n return 0, board\n if is_board_full(board):\n return 0.5, board\n if np.random.randint(5) == 0:\n x,y = get_random_move(board)\n else:\n x, y = find_next_best_move(board, player)\n board[player, x, y] = 1\n remember_game_board(board)\n if player == 0:\n player = 1\n else:\n player = 0\n #printBoard(board)\n #print()\n\nif __name__ == \"__main__\":\n print(\"Hello!\")\n playAgainstSelfRandomly()\n\n\n\n"
] | [
[
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cdrakesmith/CGATPipelines | [
"3c94ae4f9d87d51108255dc405c4b95af7c8b694"
] | [
"obsolete/pipeline_metagenomecommunities.py"
] | [
"\"\"\"\n=====================================================\nCommunity analysis of metgenomic shotgun sequencing\n=====================================================\n\n\nPipeline_metagenomecommunities.py takes as input a set of fastq\nfiles from a shotgun sequencing experiment of environmental samples\nand assesses community structure and function.\n\nOverview\n========\n\nThe pipeline assumes the data derive from multiple tissues/conditions\n(:term:`experiment`) with one or more biological and/or technical\nreplicates (:term:`replicate`). A :term:`replicate` within each\n:term:`experiment` is a :term:`track`.\n\nCommunity profiling\n--------------------\n\nThe pipeline uses various tools for assessing the abundance of taxa\nin an environmental sample. To assess relative abundance of taxa in\na community, we use metaphlan as it is easy to use and because it performs\nalignments against a reduced set of clade-specific marker genes\nit is relativelyfast. Metaphlan is likely to perform better where the\nsamples are derived fromhuman (e.g gut) as the database is significantly\noverrepresented for human derived taxa.\n\nWhere metaphlan attempts to estimate taxa relative abundances it does not\nattemtp to assign every read to a taxa. An alternative method is to use\nkraken. Kraken utilises a megablast-like approach in order to search for\nexact sequence matches between reads and sequences in the kraken database\n(taxonomy). Like metaphlan, kraken assumes that there is little sequence\ndivergence between the sequenced samples and the data in the database. In\ncases where sequences are derived from environmental samples that have not been\nsequenced before this will result in very few sequences being assigned to\na taxa.\n\nA third approach is to use a sensitive alignment algorithm.\nAt the time of writing this, a new approach to perform\nsensitive blastx-like alignments was developed by the Huson lab.\nIt is called Diamond and is 16000 times faster than blast -\na requirement for large datasets. The pipeline utilises diamond\nin cases where it is expected that there is high divergence between\nsequences derived from the sample and the NCBI nr database.\nFollowing alignment with diamond, the pipeline will attempt to assign\neach read to a taxa using the lcammaper tool (lowest common ancestor (LCA))\nalso developed by the Huson lab.\n\n\nFunctional profiling\n---------------------\n\nWhether DNA-seq or RNA-seq is used, functional profiling can be performed.\nThe functional profiling techniques used in the pipeline rely on a set of\nnon-redundant gene sequences (amino acids). Diamond is used to sensitively\nalign reads to the non-redundant database e.g. MetaRef or IGC.\n\n\nDifferential abundance estimations\n-----------------------------------\n\nTo detect differences in abundance of taxa or genes, we utilise the\nmetagenomeSeq R package from bioconductor. This package utilises a\nzero-inflated gaussian micture model to compensate for undersampling of\ntaxa/genes bewteen samples - which may cause overestimation of differences\ndue to differences in library size.\n\nsee http://www.nature.com/nmeth/journal/v10/n12/full/nmeth.2658.html.\n\n\nUsage\n=====\n\nSee :ref:`PipelineSettingUp` and :ref:`PipelineRunning` on general\ninformation how to use CGAT pipelines.\n\nConfiguration\n-------------\n\nThe pipeline requires a configured :file:`pipeline.ini` file.\n\nThe sphinxreport report requires a :file:`conf.py` and\n:file:`sphinxreport.ini` file (see :ref:`PipelineDocumenation`). To\nstart with, use the files supplied with the :ref:`Example` data.\n\nInput\n-----\n\nReads\n+++++\n\nReads are imported by placing files are linking to files in the\n:term:`working directory`.\n\nThe default file format assumes the following convention:\n\n <sample>-<condition>-<replicate>.<suffix>\n\n``sample`` and ``condition`` make up an :term:`experiment`, while\n``replicate`` denotes the :term:`replicate` within an\n:term:`experiment`. The ``suffix`` determines the file type. The\nfollowing suffixes/file types are possible:\n\nfastq.gz\n Single-end reads in fastq format.\n\nfastq.1.gz, fastq2.2.gz\n Paired-end reads in fastq format. The two fastq files must be\n sorted by read-pair.\n\n.. note::\n\n Quality scores need to be of the same scale for all input files.\n Thus it might be difficult to mix different formats.\n\n\nRequirements\n------------\n\nOn top of the default CGAT setup, the pipeline requires the following\nsoftware to be in the path:\n\n+--------------------+-------------------+------------+\n|*Program* |*Version* |*Purpose* |\n+--------------------+-------------------+------------+\n|diamond | | Sensitive |\n| | | alignment |\n| | | algorithm |\n+--------------------+-------------------+------------+\n|lcamapper | |Community |\n| | |profiling + |\n| | |fuctional |\n| | |profiling |\n+--------------------+-------------------+------------+\n|metaphlan | |taxanomic |\n| | | relative |\n| | | abundance |\n| | | estimator |\n+--------------------+-------------------+------------+\n|kraken | |megablast |\n| | |taxanomic |\n| | |assignment |\n| | |of reads |\n+--------------------+-------------------+------------+\n|metagenomeSeq | |differential|\n| | |abundance |\n| | |tool |\n+--------------------+-------------------+------------+\n\n\nPipeline output\n===============\n\nTODO::\n\nAdditional outputs are stored in the database file :file:`csvdb`.\n\nGlossary\n========\n\n.. glossary::\n\nCode\n====\n\n\"\"\"\n\n# load modules\nfrom ruffus import *\n\nimport CGAT.Experiment as E\n\nimport sys\nimport os\nimport re\nimport glob\n\nimport sqlite3\nimport CGAT.IOTools as IOTools\nimport CGATPipelines.PipelineMapping as PipelineMapping\n# import CGATPipelines.PipelineMetagenomeAssembly as PipelineMetagenomeAssembly\nimport CGATPipelines.PipelineMetagenomeCommunities \\\n as PipelineMetagenomeCommunities\nimport CGAT.Metaphlan as Metaphlan\nimport CGATPipelines.PipelineMapping as PipelineMapping\nimport pandas\n# import CGATPipelines.PipelineTracks as PipelineTracks\n\n###################################################\n###################################################\n###################################################\n# Pipeline configuration\n###################################################\n\n# load options from the config file\nimport CGATPipelines.Pipeline as P\n\nP.getParameters([\"pipeline.ini\",\n \"%s/pipeline.ini\" % os.path.splitext(__file__)[0], ])\n\n\nPARAMS = P.PARAMS\n\n###################################################################\n###################################################################\n# Helper functions mapping tracks to conditions, etc\n###################################################################\n\n\n# collect fastq.gz tracks\n# TRACKS = PipelineTracks.Tracks(PipelineTracks.Sample3).loadFromDirectory(\n# glob.glob(\"*.fastq.gz\"), \"(\\S+).fastq.gz\") +\\\n# PipelineTracks.Tracks(PipelineTracks.Sample3).loadFromDirectory(\n# glob.glob(\"*.fastq.1.gz\"), \"(\\S+).fastq.1.gz\")\n\n# ALL = PipelineTracks.Sample3()\n# EXPERIMENTS =\n# PipelineTracks.Aggregate(TRACKS, labels=(\"condition\", \"tissue\"))\n# CONDITIONS =\n# PipelineTracks.Aggregate(TRACKS, labels=(\"condition\", ))\n# TISSUES = PipelineTracks.Aggregate(TRACKS, labels=(\"tissue\", ))\n\n###################################################################\n# sequence files as input\n###################################################################\nSEQUENCEFILES = (\"*.fastq.gz\", \"*.fastq.1.gz\", \"*.fasta.gz\")\nSEQUENCEFILES_REGEX = regex(\n r\"(\\S+).(fastq.gz|fastq.1.gz|fasta.gz)\")\n\n###################################################################\n# connecting to database\n###################################################################\n\n\ndef connect():\n '''connect to database.\n\n This method also attaches to helper databases.\n '''\n dbh = sqlite3.connect(PARAMS[\"database\"])\n return dbh\n\n###################################################################\n###################################################################\n###################################################################\n# load number of reads\n###################################################################\n###################################################################\n###################################################################\n\n\n@transform(SEQUENCEFILES,\n SEQUENCEFILES_REGEX,\n r\"\\1.nreads\")\ndef countReads(infile, outfile):\n '''count number of reads in input files.'''\n to_cluster = True\n m = PipelineMapping.Counter()\n statement = m.build((infile,), outfile)\n P.run()\n\n\n@merge(countReads, \"reads_summary.load\")\ndef loadReadCounts(infiles, outfile):\n '''load read counts into database.'''\n to_cluster = False\n outf = P.getTempFile()\n outf.write(\"track\\ttotal_reads\\n\")\n for infile in infiles:\n track = P.snip(infile, \".nreads\")\n lines = IOTools.openFile(infile).readlines()\n nreads = int(lines[0][:-1].split(\"\\t\")[1])\n outf.write(\"%s\\t%i\\n\" % (track, nreads))\n outf.close()\n inname = outf.name\n\n tablename = P.toTable(outfile)\n statement = '''cgat csv2db -t %(tablename)s --log=%(outfile)s.log\n < %(inname)s > %(outfile)s'''\n P.run()\n os.unlink(outf.name)\n\n# read count target\n\n\n@follows(loadReadCounts)\ndef count_reads():\n pass\n\n###################################################################\n###################################################################\n###################################################################\n# Preprocessing reads for community analysis\n###################################################################\n###################################################################\n###################################################################\n\n\n@follows(mkdir(\"fasta.dir\"))\n@transform(SEQUENCEFILES, SEQUENCEFILES_REGEX, r\"fasta.dir/\\1.fa.gz\")\ndef preprocessReads(infile, outfile):\n '''\n create merged fasta file for use with metaphlan\n '''\n # check for second read in the pair\n if infile.endswith(\".fastq.gz\"):\n E.info(\"converting fastq file to fasta file\")\n statement = '''fastq-to-fasta.py %(infile)s 2> %(outfile)s.log\n | gzip > %(outfile)s'''\n P.run()\n\n elif infile.endswith(\".1.gz\"):\n read2 = P.snip(infile, \".1.gz\") + \".2.gz\"\n assert os.path.exists(read2), \"file does not exist %s\" % read2\n\n log = infile.replace(\"fastq.\", \"\")\n statement = '''cgat fastqs2fasta\n -a %(infile)s\n -b %(read2)s\n --log=%(log)s.log\n | gzip > %(outfile)s'''\n P.run()\n\n###################################################################\n###################################################################\n###################################################################\n# Estimate taxonomic relative abundances using metaphlan\n###################################################################\n###################################################################\n###################################################################\n\n\n@active_if(\"metaphlan\" in PARAMS.get(\"classifiers\"))\n@follows(mkdir(\"metaphlan.dir\"))\n@transform(SEQUENCEFILES,\n SEQUENCEFILES_REGEX,\n r\"metaphlan.dir/\\1.bt2out.txt\")\ndef mapReadsWithMetaphlan(infile, outfile):\n '''\n map reads first with metaphlan against the marker\n database - will reduce running time for subsequent\n steps to assess abundances etc\n NOTE: IF PAIRED END, FILES WILL RUN USING FIRST READ IN PAIR\n '''\n db = PARAMS.get(\"metaphlan_db\")\n nproc = PARAMS.get(\"metaphlan_nproc\")\n options = PARAMS.get(\"metaphlan_bowtie2_options\")\n assert os.path.exists(\n PARAMS[\"metaphlan_db\"] + \".1.bt2\"), \\\n \"\"\"missing file %s: Are you sure you have\n the correct database for bowtie2?\"\"\" \\\n % PARAMS[\"metaphlan_db\"] + \".1.bt2\"\n statement = '''zcat %(infile)s | metaphlan.py %(infile)s\n --input_type multifastq\n --mpa_pkl %(metaphlan_pkl)s\n --bowtie2db %(db)s\n --nproc %(nproc)s\n --bt2_ps %(options)s\n --no_map\n --bowtie2out %(outfile)s\n &> %(outfile)s.log'''\n P.run()\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@transform(mapReadsWithMetaphlan,\n regex(\"(\\S+)/(\\S+).bt2out.txt\"),\n r\"metaphlan.dir/\\2.readmap\")\ndef buildMetaphlanReadmap(infile, outfile):\n '''\n metaphlan is a program used in metagenomics. It assigns\n reads to clades based on specific genetic markers via\n blastn searching\n '''\n statement = '''metaphlan.py -t reads_map\n --input_type bowtie2out %(infile)s\n | cgat metaphlan2table\n -t read_map\n --log=%(outfile)s.log\n > %(outfile)s; checkpoint\n ; sed -i 's/order/_order/g' %(outfile)s'''\n P.run()\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@transform(buildMetaphlanReadmap, suffix(\".readmap\"), \".readmap.load\")\ndef loadMetaphlanReadmaps(infile, outfile):\n '''\n load the metaphlan read maps\n '''\n P.load(infile, outfile)\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@merge(loadMetaphlanReadmaps, \"metaphlan.dir/taxonomic.counts\")\ndef countMetaphlanTaxonomicGroups(infiles, outfile):\n '''\n count the total number of species that\n were found by metaphlan\n '''\n outf = open(outfile, \"w\")\n outf.write(\"track\\ttaxon_level\\tcount\\n\")\n taxons = [\"_order\", \"class\", \"family\",\n \"genus\", \"kingdom\", \"phylum\", \"species\"]\n dbh = connect()\n cc = dbh.cursor()\n for infile in infiles:\n table = P.toTable(infile)\n track = P.snip(table, \"_readmap\")\n for taxon in taxons:\n count = cc.execute(\n \"\"\"SELECT COUNT(DISTINCT %s) FROM %s\"\"\"\n % (taxon, table)).fetchone()[0]\n outf.write(\"\\t\".join([track, taxon, str(count)]) + \"\\n\")\n outf.close()\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@transform(mapReadsWithMetaphlan,\n regex(\"(\\S+)/(\\S+).bt2out.txt\"),\n r\"metaphlan.dir/\\2.relab\")\ndef buildMetaphlanRelativeAbundance(infile, outfile):\n '''\n metaphlan is a program used in metagenomics. It assigns\n reads to clades based on specific genetic markers via\n blastn searching\n '''\n statement = '''metaphlan.py -t rel_ab --input_type bowtie2out %(infile)s\n | cgat metaphlan2table -t rel_ab\n --log=%(outfile)s.log\n > %(outfile)s; checkpoint\n ; sed -i 's/order/_order/g' %(outfile)s'''\n P.run()\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@transform(buildMetaphlanRelativeAbundance, suffix(\".relab\"), \".relab.load\")\ndef loadMetaphlanRelativeAbundances(infile, outfile):\n '''\n load the metaphlan relative abundances\n '''\n P.load(infile, outfile)\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@merge(loadMetaphlanRelativeAbundances, \"metaphlan.dir/taxonomic.abundances\")\ndef buildMetaphlanTaxonomicAbundances(infiles, outfile):\n '''\n build a file that combines taxonomic abundances\n from each sample\n '''\n dbh = connect()\n cc = dbh.cursor()\n outf = open(outfile, \"w\")\n outf.write(\"track\\ttaxon_level\\ttaxon\\tabundance\\tidx\\n\")\n for infile in infiles:\n table = P.toTable(infile)\n track = P.snip(table, \"_relab\")\n for data in cc.execute(\n \"\"\"SELECT taxon_level,\n taxon,\n rel_abundance FROM %s\"\"\" % table).fetchall():\n idx = track.split(\"_\")[1]\n outf.write(\n \"\\t\".join([track, data[0], data[1], str(data[2]), idx]) + \"\\n\")\n outf.close()\n\n#########################################\n# metaphlan target\n#########################################\n\n\n@active_if(\"metaphlan\" in PARAMS.get(\"classifiers\"))\n@follows(loadMetaphlanRelativeAbundances,\n buildMetaphlanTaxonomicAbundances,\n countMetaphlanTaxonomicGroups,\n loadMetaphlanReadmaps)\ndef Metaphlan():\n pass\n\n###################################################################\n###################################################################\n###################################################################\n# Classify reads using Kraken and count taxonomic assignments\n###################################################################\n###################################################################\n###################################################################\n\n\n@follows(mkdir(\"kraken.dir\"))\n@transform(SEQUENCEFILES,\n SEQUENCEFILES_REGEX,\n r\"kraken.dir/\\1.classified.tsv.gz\")\ndef classifyReadsWithKraken(infile, outfile):\n '''\n classify reads using kraken\n '''\n job_memory = \"30G\"\n kraken_db = PARAMS.get(\"kraken_db\")\n temp = P.getTempFilename(\".\")\n statement = '''kraken --db %(kraken_db)s\n --fastq-input\n --gzip-compressed\n %(infile)s > %(temp)s;\n cat %(temp)s\n | gzip > %(outfile)s'''\n P.run()\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@transform(classifyReadsWithKraken,\n suffix(\".classified.tsv.gz\"),\n \".counts.tsv.gz\")\ndef buildKrakenCounts(infile, outfile):\n '''\n build kraken counts table\n note that the output is produced using\n metaphlan2table but these are COUNTS and\n not relative abundance estimates. Therefore\n the file is passed through sed on the way out.\n '''\n kraken_db = PARAMS.get(\"kraken_db\")\n temp = P.getTempFilename(\".\")\n statement = '''kraken-mpa-report\n --db %(kraken_db)s\n <(zcat %(infile)s)\n > %(temp)s;\n cat %(temp)s\n | cgat metaphlan2table\n -t rel_ab\n --log=%(outfile)s.log\n | sed 's/rel_abundance/count/g'\n | gzip > %(outfile)s\n ; rm -rf %(temp)s'''\n\n P.run()\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@transform(buildKrakenCounts, suffix(\".tsv.gz\"), \".kraken.load\")\ndef loadKrakenCounts(infile, outfile):\n '''\n load kraken report\n '''\n P.load(infile, outfile, \"--add-index=taxon\")\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@follows(mkdir(\"counts.dir\"))\n@split(loadKrakenCounts, \"counts.dir/*.kraken.counts.tsv.gz\")\ndef buildKrakenLevelCounts(infiles, outfiles):\n '''\n split counts by taxonomic levels\n '''\n for infile in infiles:\n tablename = P.toTable(os.path.basename(infile))\n track = P.snip(os.path.basename(infile), \".counts.kraken.load\")\n levels = [\n \"phylum\",\n \"class\",\n \"order\",\n \"family\",\n \"genus\",\n \"species\"]\n\n dbh = connect()\n cc = dbh.cursor()\n\n for level in levels:\n outname = \"counts.dir/\" + \\\n track + \\\n \".%s.kraken.counts.tsv.gz\" % level\n outf = IOTools.openFile(outname, \"w\")\n outf.write(\"taxa\\tcount\\n\")\n for data in cc.execute(\"\"\"SELECT taxon,\n count FROM %s\n WHERE taxon_level == '%s'\n \"\"\" % (tablename, level)).fetchall():\n outf.write(\"\\t\".join(map(str, data)) + \"\\n\")\n outf.close()\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@transform(buildKrakenLevelCounts, suffix(\".tsv.gz\"), \".load\")\ndef loadKrakenLevelCounts(infile, outfile):\n '''\n load kraken counts\n '''\n P.load(infile, outfile)\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@split(buildKrakenLevelCounts, \"counts.dir/*kraken.aggregated.counts.tsv.gz\")\ndef mergeKrakenCountsAcrossSamples(infiles, outfiles):\n '''\n merge counts into a single table across samples - input\n into metagenomeSeq\n '''\n levels = [\n \"phylum\",\n \"class\",\n \"order\",\n \"family\",\n \"genus\",\n \"species\"]\n for level in levels:\n prefixes = glob.glob(\n \"counts.dir/*.%(level)s.kraken.counts.tsv.gz\" % locals())\n prefixes = \",\".join(\n [P.snip(\n os.path.basename(x),\n \".%(level)s.kraken.counts.tsv.gz\" % locals()\n ) for x in prefixes])\n\n outname = os.path.join(\n \"counts.dir\", level + \".kraken.aggregated.counts.tsv.gz\")\n\n statement = '''cgat combine_tables\n --missing=0\n --columns=1\n --take=count\n --glob=counts.dir/*.%(level)s.kraken.counts.tsv.gz\n --prefixes=%(prefixes)s\n --log=%(outname)s.log\n | gzip > %(outname)s'''\n P.run()\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@active_if([\"kraken\" in PARAMS.get(\"classifiers\")])\n@follows(mergeKrakenCountsAcrossSamples)\ndef Kraken():\n pass\n\n###################################################################\n###################################################################\n###################################################################\n# Assign reads to taxa using Lowest Common Ancestor (LCA).\n# Initial alignment is done with Diamond\n###################################################################\n###################################################################\n###################################################################\n\n\n@follows(mkdir(\"diamond.dir\"))\n@transform(preprocessReads,\n regex(\"(\\S+)/(\\S+).fa.gz\"),\n r\"diamond.dir/\\2.diamond.tsv.gz\")\ndef runDiamondOnRawSequences(infile, outfile):\n '''\n diamond is an ultra fast equivalent to blastx. It takes\n fasta files as input\n At present it will run one sequence from paired files\n '''\n temp = P.getTempFilename(\".\")\n outtemp = P.getTempFilename(\".\")\n\n job_threads = PARAMS[\"diamond_threads\"]\n job_memory = PARAMS[\"diamond_memory\"]\n\n db = PARAMS[\"diamond_db\"]\n diamond_options = PARAMS[\"diamond_options\"]\n\n statement = '''zcat %(infile)s > %(temp)s.fastq;\n checkpoint;\n diamond blastx\n --db %(db)s\n --query %(temp)s.fastq\n --daa %(temp)s.daa\n --threads %(job_threads)s\n --log\n %(diamond_options)s\n &> %(outfile)s.log;\n diamond view -a %(temp)s.daa | gzip > %(outfile)s;\n checkpoint;\n rm -rf %(temp)s %(temp)s.fastq %(temp)s.daa\n '''\n P.run()\n\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@transform(runDiamondOnRawSequences, suffix(\".tsv.gz\"), \".lca.gz\")\ndef runLCA(infile, outfile):\n '''\n run the lowest common ancestor algorithm\n on the blast output to assign reads to\n taxa - from mtools. Runs with defaults at\n the moment.\n '''\n job_memory = \"25G\"\n\n # filtering options\n filter_list = P.asList(PARAMS.get(\"lca_filter\"))\n if filter_list:\n filter_stmt = \" | grep -v \" + \" | grep -v \".join(filter_list)\n else:\n filter_stmt = \"\"\n\n track = P.snip(outfile, \".lca.gz\")\n gi2taxid = PARAMS.get(\"megan_gi2taxid\")\n outf_tax = P.snip(outfile, \".gz\")\n options = PARAMS.get(\"lca_options\")\n statement = '''lcamapper.sh\n -i %(infile)s\n -f Detect\n %(options)s\n -gt %(gi2taxid)s\n -o %(outf_tax)s > %(outfile)s.log\n ; cat %(outf_tax)s\n %(filter_stmt)s\n | gzip > %(outfile)s\n ; checkpoint\n ; rm -rf %(outf_tax)s'''\n P.run()\n\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@follows(mkdir(\"taxa_map.dir\"))\n@transform(runLCA, regex(\"(\\S+)/(\\S+).lca.gz\"), r\"taxa_map.dir/\\2.map.gz\")\ndef buildTaxaMap(infile, outfile):\n '''\n build a map from kingdom through species for\n each lca file - allows to map clades in downstream\n analysis\n '''\n statement = '''zcat %(infile)s\n | cgat lca2table\n --output-map\n --log=%(outfile)s.log\n | gzip > %(outfile)s'''\n P.run()\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@merge(buildTaxaMap, \"taxa_map.dir/aggregated_taxa.map.gz\")\ndef aggregateTaxaMaps(infiles, outfile):\n '''\n build a union of taxa mappings from kingdom through species\n '''\n found = []\n outf = IOTools.openFile(outfile, \"w\")\n outf.write(\"kingdom\\tphylum\\tclass\\torder\\tfamily\\tgenus\\tspecies\\n\")\n for infile in infiles:\n inf = IOTools.openFile(infile)\n # skip header\n header = inf.readline()\n for line in inf.readlines():\n if line in found:\n continue\n else:\n found.append(line)\n outf.write(line)\n outf.close()\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@transform(runLCA, suffix(\".lca.gz\"), \".classified.gz\")\ndef buildLCA(infile, outfile):\n '''\n tabulate LCA output into nice format. Per read assignment\n '''\n statement = '''zcat %(infile)s\n | cgat lca2table\n --summarise=individual\n --log=%(outfile)s.log\n | sed -e 's/order/_order/g'\n | gzip > %(outfile)s'''\n P.run()\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@transform(runLCA, suffix(\".lca.gz\"), \".level.count\")\ndef countLcaPerLevelTaxa(infile, outfile):\n '''\n count the number of taxa as found using LCA algorithm\n '''\n job_memory = \"20G\"\n statement = '''zcat %(infile)s |\n cgat lca2table\n --summarise=level-counts\n --log=%(outfile)s.log\n > %(outfile)s'''\n P.run()\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@transform(countLcaPerLevelTaxa, suffix(\".count\"), \".count.load\")\ndef loadCountLcaPerLevelTaxa(infile, outfile):\n '''\n load taxa level counts\n '''\n P.load(infile, outfile)\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@transform(runLCA, suffix(\".lca.gz\"), \".counts.tsv.gz\")\ndef buildLcaCounts(infile, outfile):\n '''\n count the number of taxa as found using LCA algorithm\n '''\n job_memory = \"20G\"\n statement = '''zcat %(infile)s |\n cgat lca2table\n --summarise=taxa-counts\n --log=%(outfile)s.log\n | gzip > %(outfile)s'''\n P.run()\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@transform(buildLcaCounts,\n suffix(\".diamond.counts.tsv.gz\"),\n \".counts.diamond.load\")\ndef loadLcaCounts(infile, outfile):\n '''\n load taxa level counts\n '''\n P.load(infile, outfile, \"--add-index=taxa\")\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@follows(mkdir(\"counts.dir\"))\n@split(loadLcaCounts, \"counts.dir/*.diamond.counts.tsv.gz\")\ndef buildLcaLevelCounts(infiles, outfiles):\n '''\n split counts by taxonomic levels\n '''\n for infile in infiles:\n tablename = P.toTable(os.path.basename(infile))\n track = P.snip(os.path.basename(infile), \".counts.diamond.load\")\n levels = [\n \"phylum\",\n \"class\",\n \"order\",\n \"family\",\n \"genus\",\n \"species\"]\n\n dbh = connect()\n cc = dbh.cursor()\n\n for level in levels:\n outname = \"counts.dir/\" + \\\n track + \".%s.diamond.counts.tsv.gz\" % level\n outf = IOTools.openFile(outname, \"w\")\n outf.write(\"taxa\\tcount\\n\")\n for data in cc.execute(\n \"\"\"SELECT taxa,\n count\n FROM %s\n WHERE\n level == '%s'\"\"\" % (tablename, level)).fetchall():\n outf.write(\"\\t\".join(map(str, data)) + \"\\n\")\n outf.close()\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@transform(buildLcaLevelCounts, suffix(\".tsv.gz\"), \".load\")\ndef loadLcaLevelCounts(infile, outfile):\n '''\n load LCA per taxonomic level counts\n '''\n P.load(infile, outfile)\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@split(buildLcaLevelCounts, \"counts.dir/*.diamond.aggregated.counts.tsv.gz\")\ndef mergeLcaCountsAcrossSamples(infiles, outfiles):\n '''\n merge counts into a single table across samples\n '''\n levels = [\n \"phylum\",\n \"class\",\n \"order\",\n \"family\",\n \"genus\",\n \"species\"]\n for level in levels:\n prefixes = glob.glob(\n \"counts.dir/*.%(level)s.diamond.counts.tsv.gz\" % locals())\n prefixes = \",\".join(\n [P.snip(os.path.basename(x),\n \".%(level)s.diamond.counts.tsv.gz\"\n % locals()) for x in prefixes])\n\n outname = os.path.join(\n \"counts.dir\", level + \".diamond.aggregated.counts.tsv.gz\")\n\n statement = '''cgat combine_tables\n --missing=0\n --columns=1\n --take=count\n --glob=counts.dir/*.%(level)s.diamond.counts.tsv.gz\n --prefixes=%(prefixes)s\n --log=%(outname)s.log\n | gzip > %(outname)s'''\n P.run()\n\n###############################################\n###############################################\n###############################################\n\n\nCOUNT_DATA = []\nclassifiers = {\"kraken\": mergeKrakenCountsAcrossSamples,\n \"lca\": mergeLcaCountsAcrossSamples}\nfor classifier in P.asList(PARAMS.get(\"classifiers\")):\n COUNT_DATA.append(classifiers[classifier])\n\n###############################################\n###############################################\n###############################################\n\n\n@jobs_limit(1, \"R\")\n@transform(COUNT_DATA,\n suffix(\".counts.tsv.gz\"),\n \".counts.rarefied.tsv\")\ndef rarefyTaxa(infile, outfile):\n '''\n rarefy to minimum counts\n '''\n PipelineMetagenomeCommunities.rarefy(infile,\n outfile,\n PARAMS.get(\"rarefy_sample\"))\n\n###############################################\n###############################################\n###############################################\n\n\nrarefy = {0: (COUNT_DATA, \".counts.tsv.gz\"),\n 1: (rarefyTaxa, \".counts.rarefied.tsv\")}\nRAREFY_FUNC = rarefy[PARAMS.get(\"rarefy_rarefy_taxa\")][0]\nRAREFY_SUFFIX = rarefy[PARAMS.get(\"rarefy_rarefy_taxa\")][1]\n\n\n@jobs_limit(1, \"R\")\n@transform(RAREFY_FUNC,\n suffix(RAREFY_SUFFIX),\n \".proportion.tsv\")\ndef buildLcaProportionsAcrossSamples(infile, outfile):\n '''\n build the proportion of reads mapped to\n each taxoomic level per sample\n '''\n PipelineMetagenomeCommunities.buildLcaProportionsAcrossSamples(\n infile,\n outfile,\n dtype=\"taxa\")\n\n###############################################\n###############################################\n###############################################\n\n\n@jobs_limit(1, \"R\")\n@transform(buildLcaProportionsAcrossSamples,\n suffix(\".tsv\"),\n \".cumproportion.pdf\")\ndef plotLcaProportionDistributions(infile, outfile):\n '''\n plot the cumulative proportions of taxa\n '''\n PipelineMetagenomeCommunities.plotProportionDistributions(infile,\n outfile)\n\n###############################################\n###############################################\n###############################################\n\n\n@jobs_limit(1, \"R\")\n@transform(buildLcaProportionsAcrossSamples,\n suffix(\".tsv\"),\n \".stackedbar.pdf\")\ndef barchartLcaProportions(infile, outfile):\n '''\n barchart description of percent reads\n mapping to each taxon\n '''\n if PARAMS[\"heatmaps_order\"]:\n order = PARAMS.get(\"heatmaps_order\")\n else:\n order = open(infile).readline()[:-1].split(\"\\t\")[:-1]\n order.sort()\n order = \",\".join(order)\n PipelineMetagenomeCommunities.barchartProportions(infile,\n outfile,\n order,\n dtype=\"taxa\")\n\n###############################################\n###############################################\n###############################################\n\n\n@follows(plotLcaProportionDistributions,\n barchartLcaProportions)\ndef proportions():\n pass\n\n###############################################\n###############################################\n###############################################\n\n\n@transform(mergeLcaCountsAcrossSamples, suffix(\".tsv.gz\"), \".load\")\ndef loadAggregatedCounts(infile, outfile):\n '''\n load aggregated counts\n '''\n P.load(infile, outfile, \"--add-index=taxa\")\n\n############################\n# LCA target\n############################\n\n\n@follows(mergeLcaCountsAcrossSamples,\n aggregateTaxaMaps)\ndef Lca():\n pass\n\n###################################################################\n###################################################################\n###################################################################\n# Diversity analysis using the R package Vegan\n###################################################################\n###################################################################\n###################################################################\n\n\nCOUNT_DATA = []\nclassifiers = {\"kraken\": mergeKrakenCountsAcrossSamples,\n \"lca\": mergeLcaCountsAcrossSamples}\nfor classifier in P.asList(PARAMS.get(\"classifiers\")):\n COUNT_DATA.append(classifiers[classifier])\n\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@jobs_limit(1, \"R\")\n@follows(mkdir(\"diversity.dir\"))\n@transform(COUNT_DATA,\n regex(\"(\\S+)/(\\S+).counts.tsv.gz\"),\n r\"diversity.dir/\\2.rarefaction.pdf\")\ndef runRarefactionAnalysis(infile, outfile):\n '''\n run rarefaction analysis - sample to minimum sample count\n and calculate taxonomic richness\n '''\n f, to, step = PARAMS.get(\"rarefaction_from\"), \\\n PARAMS.get(\"rarefaction_to\"), \\\n PARAMS.get(\"rarefaction_step\")\n rdir = PARAMS.get(\"rscriptsdir\")\n PipelineMetagenomeCommunities.rarefactionCurve(infile,\n outfile,\n rdir,\n f=f,\n step=step)\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@jobs_limit(1, \"R\")\n@transform(COUNT_DATA,\n regex(\"(\\S+)/(\\S+).counts.tsv.gz\"),\n r\"diversity.dir/\\2.richness.sig\")\ndef testRichness(infile, outfile):\n '''\n test significance of richness using kruskal wallis test\n '''\n rdir = PARAMS.get(\"rscriptsdir\")\n sample = PARAMS.get(\"richness_sample\")\n PipelineMetagenomeCommunities.testRichness(infile,\n outfile,\n rdir,\n sample)\n\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@jobs_limit(1, \"R\")\n@follows(mkdir(\"diversity.dir\"))\n@transform(COUNT_DATA,\n regex(\"(\\S+)/(\\S+).counts.tsv.gz\"),\n r\"diversity.dir/\\2.diversity.tsv\")\ndef buildDiversity(infile, outfile):\n '''\n build flat file with diversity calculation\n '''\n rdir = PARAMS.get(\"rscriptsdir\")\n ind = PARAMS.get(\"diversity_index\")\n PipelineMetagenomeCommunities.buildDiversity(infile,\n outfile,\n rdir,\n ind=ind)\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@jobs_limit(1, \"R\")\n@follows(mkdir(\"diversity.dir\"))\n@transform(COUNT_DATA,\n regex(\"(\\S+)/(\\S+).counts.tsv.gz\"),\n r\"diversity.dir/\\2.diversity.pdf\")\ndef barplotDiversity(infile, outfile):\n '''\n barplot diversity between conditions\n '''\n rdir = PARAMS.get(\"rscriptsdir\")\n ind = PARAMS.get(\"diversity_index\")\n PipelineMetagenomeCommunities.barplotDiversity(infile,\n outfile,\n rdir,\n ind=ind)\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@jobs_limit(1, \"R\")\n@follows(mkdir(\"diversity.dir\"))\n@transform(COUNT_DATA,\n regex(\"(\\S+)/(\\S+).counts.tsv.gz\"),\n r\"diversity.dir/\\2.diversity.sig\")\ndef testDiversity(infile, outfile):\n '''\n significance testing on community-wide diversity\n estimate\n '''\n rdir = PARAMS.get(\"rscriptsdir\")\n ind = PARAMS.get(\"diversity_index\")\n PipelineMetagenomeCommunities.testDiversity(infile,\n outfile,\n rdir,\n ind=ind)\n\n\n@follows(testDiversity,\n testRichness,\n runRarefactionAnalysis,\n barplotDiversity,\n buildDiversity)\ndef diversity():\n pass\n\n###################################################################\n###################################################################\n###################################################################\n# Functional profiling - use diamond to align to non-redundant\n# set of proteins\n###################################################################\n###################################################################\n###################################################################\n\n\n@follows(mkdir(\"genes.dir\"))\n@transform(preprocessReads,\n regex(\"(\\S+)/(\\S+).fa.gz\"),\n r\"genes.dir/\\2.diamond.genes.tsv.gz\")\ndef runDiamondOnGenes(infile, outfile):\n '''\n diamond is an ultra fast equivalent to blastx. It takes\n fastq files as input\n At present it will run one sequence from paired files\n '''\n temp = P.getTempFilename(\".\")\n\n job_threads = PARAMS[\"diamond_threads\"]\n job_memory = PARAMS[\"diamond_memory\"]\n\n db = PARAMS[\"genes_db\"]\n diamond_options = PARAMS[\"genes_diamond_options\"]\n\n statement = '''zcat %(infile)s > %(temp)s.fastq;\n checkpoint;\n diamond blastx\n --db %(db)s\n --query %(temp)s.fastq\n --threads %(job_threads)s\n --daa %(temp)s.daa\n --log\n %(diamond_options)s\n &> %(outfile)s.log;\n diamond view -a %(temp)s.daa | gzip > %(outfile)s;\n checkpoint;\n rm -rf %(temp)s %(temp)s.fastq %(temp)s.daa\n '''\n P.run()\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@transform(runDiamondOnGenes, suffix(\".tsv.gz\"), \".counts.tsv.gz\")\ndef buildDiamondGeneCounts(infile, outfile):\n '''\n build gene level counts\n '''\n job_memory = PARAMS.get(\"genes_memory\")\n options = PARAMS.get(\"genes_count_options\")\n statement = '''zcat %(infile)s |\n cgat diamond2counts\n %(options)s\n --log=%(outfile)s.log\n | gzip > %(outfile)s'''\n\n P.run()\n\n\n###################################################################\n###################################################################\n###################################################################\n\n@transform(buildDiamondGeneCounts,\n suffix(\".tsv.gz\"),\n \".load\")\ndef loadDiamondGeneCounts(infile, outfile):\n '''\n load gene counts\n '''\n P.load(infile, outfile, \"--add-index=taxa\")\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@merge(buildDiamondGeneCounts, \"genes.dir/gene_counts.tsv.gz\")\ndef mergeDiamondGeneCounts(infiles, outfile):\n '''\n merge counts across sample datasets\n '''\n # USE THE SAME GLOB AS IN THE COMBINING TABLES SCRIPT\n # maintain correct order\n prefixes = [\n P.snip(os.path.basename(x), \".genes.counts.tsv.gz\")\n for x in glob.glob(\"genes.dir/*.genes.counts.tsv.gz\")]\n prefixes = \",\".join(prefixes)\n\n statement = '''cgat combine_tables\n --missing=0\n --columns=1\n --take=count\n --glob=genes.dir/*.genes.counts.tsv.gz\n --prefixes=%(prefixes)s\n --log=%(outfile)s.log\n | gzip > %(outfile)s'''\n P.run()\n\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@transform(mergeDiamondGeneCounts, suffix(\".tsv.gz\"), \".annotated.tsv.gz\")\ndef annotatePathways(infile, outfile):\n '''\n annotate NOGs with there respecetive pathways (functional categories)\n '''\n PipelineMetagenomeCommunities.annotate(infile,\n outfile,\n PARAMS.get(\"pathways_geneset\"))\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@jobs_limit(1, \"R\")\n@transform(annotatePathways,\n suffix(\".annotated.tsv.gz\"),\n \".proportion.pathways.tsv\")\ndef buildPathwayProportionsAcrossSamples(infile, outfile):\n '''\n build the proportion of reads mapped to\n each taxoomic level per sample\n '''\n PipelineMetagenomeCommunities.buildLcaProportionsAcrossSamples(\n infile,\n outfile,\n dtype=\"pathway\")\n\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@jobs_limit(1, \"R\")\n@transform(mergeDiamondGeneCounts,\n suffix(\"_counts.tsv.gz\"),\n \".proportion.tsv\")\ndef buildGeneProportionsAcrossSamples(infile, outfile):\n '''\n build the proportion of reads mapped to\n each taxoomic level per sample\n '''\n PipelineMetagenomeCommunities.buildLcaProportionsAcrossSamples(\n infile,\n outfile,\n dtype=\"gene\")\n\n###############################################\n###############################################\n###############################################\n\n\n@jobs_limit(1, \"R\")\n@transform([buildGeneProportionsAcrossSamples,\n buildPathwayProportionsAcrossSamples],\n suffix(\".tsv\"),\n \".cumproportion.pdf\")\ndef plotGeneProportionDistributions(infile, outfile):\n '''\n plot the cumulative proportions of taxa\n '''\n PipelineMetagenomeCommunities.plotProportionDistributions(infile,\n outfile)\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@jobs_limit(1, \"R\")\n@transform([buildGeneProportionsAcrossSamples,\n buildPathwayProportionsAcrossSamples],\n suffix(\".tsv\"),\n \".stackedbar.pdf\")\ndef barchartGeneProportions(infile, outfile):\n '''\n heatmap description of percent reads\n mapping to each taxon\n '''\n if PARAMS[\"heatmaps_order_genes\"]:\n order = PARAMS.get(\"heatmaps_order_genes\")\n else:\n order = open(infile).readline()[:-1].split(\"\\t\")[:-1]\n order.sort()\n order = \",\".join(order)\n PipelineMetagenomeCommunities.barchartProportions(infile,\n outfile,\n order,\n dtype=\"pathways\")\n\n###################################################################\n###################################################################\n###################################################################\n\n\n#################\n# genes target\n#################\n\n\n@follows(mergeDiamondGeneCounts,\n loadDiamondGeneCounts,\n barchartGeneProportions)\ndef Genes():\n pass\n\n###################################################################\n###################################################################\n###################################################################\n# Count alignments in for each of the methods\n###################################################################\n###################################################################\n###################################################################\n\n\nCOUNT_TARGETS = []\nclassifiers = {\"kraken\": loadKrakenLevelCounts,\n \"lca\": loadLcaLevelCounts}\nfor classifier in P.asList(PARAMS.get(\"classifiers\")):\n COUNT_TARGETS.append(classifiers[classifier])\n\n###################################################################\n\n\n@follows(mkdir(\"alignment_stats.dir\"))\n@transform(COUNT_TARGETS + [loadDiamondGeneCounts],\n regex(\"(\\S+)/(\\S+).counts.load\"),\n add_inputs(loadReadCounts),\n r\"alignment_stats.dir/\\2.stats\")\ndef countAlignments(infiles, outfile):\n '''\n count queries that have been aligned and have\n and assignment\n '''\n infile = infiles[0]\n summary_table = P.toTable(infiles[1])\n\n # assume that files are named without any other R[0-9]\n track = os.path.basename(re.match(\n \"(.*-R[0-9]*).(.*.counts.load)\", infile).groups()[0])\n table = P.toTable(infile)\n\n # connect to database\n dbh = connect()\n cc = dbh.cursor()\n\n alignments = cc.execute(\"\"\"SELECT SUM(count)\n FROM %(table)s\"\"\" % locals()).fetchone()[0]\n nreads = cc.execute(\"\"\"SELECT total_reads\n FROM %(summary_table)s\n WHERE track == '%(track)s'\n \"\"\" % locals()).fetchone()[0]\n\n outf = open(outfile, \"w\")\n outf.write(\"total_reads\\taligned_reads\\tpaligned_reads\\n\")\n outf.write(\"\\t\".join(\n map(str,\n [nreads, alignments, (float(alignments) / nreads) * 100])) + \"\\n\")\n outf.close()\n\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@merge(countAlignments, \"alignment_stats.dir/alignment_stats.tsv\")\ndef aggregateAlignmentStats(infiles, outfile):\n '''\n one table for alignment stats\n '''\n samples = [\n os.path.basename(x).split(\".\")[0] for x in infiles if \"genes\" not in x]\n levels = [\n os.path.basename(x).split(\".\")[1] for x in infiles if \"genes\" not in x]\n tools = [\n os.path.basename(x).split(\".\")[2] for x in infiles if \"genes\" not in x]\n outf = IOTools.openFile(outfile, \"w\")\n outf.write(\"sample\\tlevel\\ttool\\tnreads\\tnassigned\\tpassigned\\n\")\n for s, l, t in zip(samples, levels, tools):\n inf = IOTools.openFile(\n \"alignment_stats.dir/\" + s + \".\" + l + \".\" + t + \".\" + \"stats\")\n inf.readline()\n result = inf.readline()\n outf.write(\"\\t\".join([s, l, t]) + \"\\t\" + result)\n outf.close()\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@transform(aggregateAlignmentStats, suffix(\".tsv\"), \".pdf\")\ndef plotAlignmentStats(infile, outfile):\n '''\n barplot alignment stats\n '''\n PipelineMetagenomeCommunities.barplotAlignmentStats(infile,\n outfile,\n take=\"passigned\")\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@follows(count_reads)\n@transform(aggregateAlignmentStats, suffix(\".stats\"), \".stats.load\")\ndef loadAlignmentStats(infile, outfile):\n '''\n load alignment counts\n '''\n P.load(infile, outfile)\n\n\n@follows(loadAlignmentStats, plotAlignmentStats)\ndef Alignment_stats():\n pass\n\n###################################################################\n###################################################################\n###################################################################\n# Differential abundance analysis of taxa and genes. We use\n# metagenomeSeq and DESeq2 here to assess differential abundance\n###################################################################\n###################################################################\n###################################################################\n\n\nCLASSIFIER_TARGETS = []\nclassifiers = {\"kraken\": mergeKrakenCountsAcrossSamples,\n \"lca\": mergeLcaCountsAcrossSamples}\nfor classifier in P.asList(PARAMS.get(\"classifiers\")):\n CLASSIFIER_TARGETS.append(classifiers[classifier])\n\n###################################################################\n\n\n@follows(mkdir(\"diff.dir\"))\n@transform(CLASSIFIER_TARGETS + [mergeDiamondGeneCounts],\n regex(\"(\\S+)/(\\S+).tsv.gz\"),\n r\"diff.dir/\\2.diff.tsv\")\ndef runMetagenomeSeq(infile, outfile):\n '''\n run metagenomeSeq - a tool for calculating significance\n based on gene counts\n '''\n rscriptsdir = PARAMS.get(\"rscriptsdir\")\n rscript = PARAMS.get(\"metagenomeseq_rscript\")\n prefix = P.snip(infile.replace(\"counts.dir\", \"diff.dir\"), \".tsv.gz\")\n\n if infile.find(\"gene\") != -1:\n prefix = P.snip(infile.replace(\"genes.dir\", \"diff.dir\"), \".tsv.gz\")\n k = PARAMS.get(\"metagenomeseq_genes_k\")\n a = PARAMS.get(\"metagenomeseq_genes_a\")\n\n if PARAMS.get(\"metagenomeseq_genes_restrict\"):\n restrict_file = PARAMS.get(\"metagenomeseq_genes_restrict_file\")\n temp = P.getTempFile(\".\")\n genes = set([x[:-1] for x in open(restrict_file).readlines()])\n inf = IOTools.openFile(infile)\n header = inf.readline()\n temp.write(header)\n for line in IOTools.openFile(infile).readlines():\n data = line[:-1].split(\"\\t\")\n if data[0] in genes:\n temp.write(line)\n temp.close()\n infile = temp.name\n else:\n k = PARAMS.get(\"metagenomeseq_taxa_k\")\n a = PARAMS.get(\"metagenomeseq_taxa_a\")\n\n # hack just to look at genus\n if PARAMS.get(\"metagenomeseq_taxa_restrict\") and \"genus\" in infile:\n restrict_file = PARAMS.get(\"metagenomeseq_taxa_restrict_file\")\n temp = P.getTempFile(\".\")\n taxa = set([x[:-1] for x in open(restrict_file).readlines()])\n inf = IOTools.openFile(infile)\n header = inf.readline()\n temp.write(header)\n for line in IOTools.openFile(infile).readlines():\n data = line[:-1].split(\"\\t\")\n if data[0] in taxa:\n temp.write(line)\n temp.close()\n infile = temp.name\n\n statement = '''%(rscript)s %(rscriptsdir)s/run_metagenomeseq.R\n -c %(infile)s\n -p %(prefix)s\n --k %(k)i\n --a %(a)f > %(outfile)s.log'''\n\n P.run()\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@follows(mkdir(\"deseq2.dir\"))\n@transform(CLASSIFIER_TARGETS + [mergeDiamondGeneCounts],\n regex(\"(\\S+)/(\\S+).tsv.gz\"),\n r\"deseq2.dir/\\2.diff.tsv\")\ndef runDESeq2(infile, outfile):\n '''\n run DESeq2 - a tool for calculating significance\n based on gene counts\n '''\n # build design as a temporary file\n design = P.getTempFile(\".\")\n design.write(\"track\\tgroup\\tinclude\\tpair\\n\")\n samples = IOTools.openFile(infile).readline()[:-1].split(\"\\t\")\n samples = samples[1:]\n conditions = [x.split(\"-\")[1] for x in samples]\n for i in range(len(samples)):\n design.write(\"%s\\t%s\\t1\\t0\\n\" % (samples[i], conditions[i]))\n design.close()\n d = design.name\n\n # run DESeq2\n outpattern = P.snip(outfile, \".diff.tsv\") + \"_\"\n fdr = PARAMS.get(\"deseq2_fdr\")\n min_rowcounts = PARAMS.get(\"deseq2_filter_min_counts_per_row\")\n min_samplecounts = PARAMS.get(\"deseq2_filter_min_counts_per_sample\")\n percentile_rowsums = PARAMS.get(\"deseq2_filter_percentile_rowsums\")\n statement = '''cgat runExpression\n --method=deseq2\n --outfile=%(outfile)s\n --output-filename-pattern=%(outpattern)s\n --fdr=%(fdr)s\n --tags-tsv-file=%(infile)s\n --design-tsv-file=%(d)s\n --filter-min-counts-per-row=%(min_rowcounts)s\n --filter-min-counts-per-sample=%(min_samplecounts)s\n --filter-percentile-rowsums=%(percentile_rowsums)s\n --log=%(outfile)s.log'''\n\n P.run()\n\n os.unlink(design.name)\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@subdivide([runMetagenomeSeq, runDESeq2],\n regex(\"(\\S+)/(\\S+).diff.tsv*\"),\n add_inputs(aggregateTaxaMaps),\n r\"\\1/*.\\2.diff.tsv\")\ndef splitResultsByKingdom(infiles, outfiles):\n '''\n split results by kingdom for downstream\n analysis\n TODO: Tidy this up and put into module with\n P.submit\n '''\n result, mapfile = infiles\n\n # metagenomeseq normalised values are in .norm.matrix\n # and deseq2 normalised values are in .rlog.tsv.gz\n if os.path.dirname(infiles[0]) == \"deseq2.dir\":\n matrix = P.snip(infiles[0], \".diff.tsv\") + \"_rlog.tsv.gz\"\n else:\n matrix = P.snip(infiles[0], \".diff.tsv\") + \".norm.matrix\"\n\n hierarchy = PipelineMetagenomeCommunities.readHierarchy(mapfile)\n\n # need to do it for both the results\n # table and the normalised matrix file\n for inf in [result, matrix]:\n header = IOTools.openFile(inf).readline()\n for kingdom, taxa in hierarchy.items():\n if kingdom == \"NA\":\n kingdom = \"other\"\n else:\n kingdom = kingdom\n # sepcify new outfile name\n outf = os.path.join(\n os.path.dirname(result), kingdom + \".\")\n outf = outf + os.path.basename(result)\n if inf.endswith(\".matrix\"):\n suffix = \".norm.matrix\"\n # last column in the matrix file\n taxon_ind = -1\n elif inf.endswith(\"_rlog.tsv.gz\"):\n suffix = \"_rlog.tsv.gz\"\n taxon_ind = 0\n else:\n suffix = None\n taxon_ind = 8\n if suffix:\n outf = outf.replace(\".diff.tsv\", suffix)\n outf = IOTools.openFile(outf, \"w\")\n outf.write(header)\n for line in IOTools.openFile(inf).readlines():\n data = line.strip(\"\\n\").split(\"\\t\")\n taxon = data[taxon_ind].replace('\"', '')\n if taxon in taxa:\n outf.write(line)\n else:\n continue\n outf.close()\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@transform([runMetagenomeSeq, runDESeq2, splitResultsByKingdom],\n suffix(\".tsv\"), \".load\")\ndef loadDifferentialAbundance(infile, outfile):\n '''\n load differentially abundant features\n '''\n P.load(infile, outfile, \"--allow-empty-file\")\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@follows(mkdir(\"annotations.dir\"))\n@transform(PARAMS.get(\"genes_annotation\"),\n regex(\"(\\S+)/(\\S+).txt\"),\n r\"annotations.dir/\\2.load\")\ndef loadGeneAnnotations(infile, outfile):\n '''\n load annotations file\n '''\n P.load(infile, outfile, \"--header=COG,description\")\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@follows(mkdir(\"pathways.dir\"))\n@transform(loadDifferentialAbundance,\n regex(\"genes.dir/(\\S+).diff.load\"),\n r\"pathways.dir/foreground.tsv\")\ndef buildForegroundGeneset(infile, outfile):\n '''\n build foreground data set for pathways analysis\n '''\n table = P.toTable(infile)\n dbh = connect()\n cc = dbh.cursor()\n result = {}\n groups = set()\n for group in cc.execute(\n \"\"\"SELECT group1, group2\n FROM %(table)s\"\"\" % locals()).fetchall():\n groups.add(group)\n\n for group in groups:\n result[group[0] + \"_vs_\" + group[1]] = {}\n\n p_type = PARAMS.get(\"metagenomeseq_taxa_threshold_option\")\n logfc = PARAMS.get(\"metagenomeseq_taxa_fc_threshold\")\n p = PARAMS.get(\"metagenomeseq_taxa_p_threshold\")\n if p_type == \"p\":\n p_type = \"P_Value\"\n elif p_type == \"padj\":\n p_type = \"adj_P_Val\"\n\n for group in groups:\n group1, group2 = group[0], group[1]\n for data in cc.execute(\n \"\"\"SELECT taxa, %(p_type)s, logFC FROM %(table)s\n WHERE\n group1 == '%(group1)s'\n AND\n group2 == '%(group2)s'\"\"\" % locals()).fetchall():\n gene_id, pval, logFC = data\n if pval < p and abs(logFC) > logfc:\n result[group[0] + \"_vs_\" + group[1]][gene_id] = 1\n else:\n result[group[0] + \"_vs_\" + group[1]][gene_id] = 0\n\n df = pandas.DataFrame(result)\n df.to_csv(outfile, sep=\"\\t\", index_label=\"gene_id\")\n\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@transform(loadDifferentialAbundance,\n regex(\"genes.dir/(\\S+).diff.load\"),\n r\"pathways.dir/background.tsv\")\ndef buildBackgroundGeneset(infile, outfile):\n '''\n build background data set for pathways analysis\n '''\n table = P.toTable(infile)\n dbh = connect()\n cc = dbh.cursor()\n\n outf = open(outfile, \"w\")\n outf.write(\"gene_id\\n\")\n for data in cc.execute(\n \"\"\"SELECT DISTINCT taxa\n FROM %(table)s\"\"\" % locals()).fetchall():\n outf.write(data[0] + \"\\n\")\n outf.close()\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@split([buildForegroundGeneset,\n buildBackgroundGeneset,\n PARAMS.get(\"pathways_geneset\")],\n \"pathways.dir/*.overall\")\ndef runPathwaysAnalysis(infiles, outfiles):\n '''\n run pathways analysis\n '''\n genes, background, gene2pathway = infiles\n statement = '''cgat runGO \\\n --background=%(background)s\n --genes=%(genes)s \\\n --filename-input=%(gene2pathway)s \\\n -q BH \\\n --fdr \\\n --output-filename-pattern=\\\n pathways.dir/%%(set)s.%%(go)s.%%(section)s\" \\\n > pathways.dir/pathways.log \\\n '''\n P.run()\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@jobs_limit(1, \"R\")\n@transform([runMetagenomeSeq, runDESeq2, splitResultsByKingdom],\n suffix(\".diff.tsv\"),\n \".pca.tsv\")\ndef runPCA(infile, outfile):\n '''\n run principle components analysis\n '''\n # metagenomeseq normalised values are in .norm.matrix\n # and deseq2 normalised values are in .rlog.tsv.gz\n if os.path.dirname(infile) == \"deseq2.dir\":\n inf = P.snip(infile, \".diff.tsv\") + \"_rlog.tsv.gz\"\n rownames = 1\n else:\n inf = P.snip(infile, \".diff.tsv\") + \".norm.matrix\"\n rownames = len(open(inf).readline().strip(\"\\n\").split(\"\\t\"))\n if len(IOTools.openFile(inf).readlines()) <= 2:\n E.warn(\"Empty matrix %s: Check this is correct\" % inf)\n P.touch(outfile)\n P.touch(outfile.replace(\".tsv\", \".ve.tsv\"))\n else:\n PipelineMetagenomeCommunities.runPCA(inf, outfile, rownames=rownames)\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@jobs_limit(1, \"R\")\n@transform(runPCA, suffix(\".tsv\"), \".pdf\")\ndef plotPCA(infile, outfile):\n '''\n plot principle components\n '''\n # also outputted a separate file for variance explained\n scores, ve = infile, P.snip(infile, \".tsv\") + \".ve.tsv\"\n\n if os.path.getsize(scores) == 0:\n E.warn(\"Empty matrix %s: Check this is correct\" % scores)\n P.touch(outfile)\n else:\n PipelineMetagenomeCommunities.plotPCA(scores,\n ve,\n outfile)\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@jobs_limit(1, \"R\")\n@transform([runMetagenomeSeq, runDESeq2], suffix(\".diff.tsv\"), \".mds.pdf\")\ndef runMDS(infile, outfile):\n '''\n run MDS analysis\n '''\n # metagenomeseq normalised values are in .norm.matrix\n # and deseq2 normalised values are in _rlog.tsv.gz\n if os.path.dirname(infile) == \"deseq2.dir\":\n inf = P.snip(infile, \".diff.tsv\") + \"_rlog.tsv.gz\"\n else:\n inf = P.snip(infile, \".diff.tsv\") + \".norm.matrix\"\n PipelineMetagenomeCommunities.plotMDS(inf, outfile)\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@jobs_limit(1, \"R\")\n@transform([runMetagenomeSeq, runDESeq2, splitResultsByKingdom],\n suffix(\".diff.tsv\"),\n \".mds.sig\")\ndef runPermanova(infile, outfile):\n '''\n run permanova on euclidean distances\n '''\n # metagenomeseq normalised values are in .norm.matrix\n # and deseq2 normalised values are in _rlog.tsv.gz\n if os.path.dirname(infile) == \"deseq2.dir\":\n inf = P.snip(infile, \".diff.tsv\") + \"_rlog.tsv.gz\"\n rownames = 1\n else:\n inf = P.snip(infile, \".diff.tsv\") + \".norm.matrix\"\n rownames = len(open(inf).readline().strip(\"\\n\").split(\"\\t\"))\n\n # only run if the file is not empty\n if len(IOTools.openFile(inf).readlines()) == 1:\n E.warn(\"Empty matrix %s: Check this is correct\" % inf)\n P.touch(outfile)\n else:\n PipelineMetagenomeCommunities.testDistSignificance(inf,\n outfile,\n rownames=rownames,\n method=\"adonis\")\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@follows(plotPCA,\n runPermanova)\ndef PCA():\n pass\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@jobs_limit(1, \"R\")\n@transform(mergeLcaCountsAcrossSamples,\n suffix(\".tsv.gz\"), \".barplot.png\")\ndef barplotAbundances(infile, outfile):\n '''\n barplots abundances\n '''\n # the infile is a separate file output by\n # run_metagenomeseq = normalised counts\n PipelineMetagenomeCommunities.barplotAbundance(infile, outfile)\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@jobs_limit(1, \"R\")\n@transform(runMetagenomeSeq, suffix(\".tsv\"), \".ma.png\")\ndef MAPlot(infile, outfile):\n '''\n ma plot the results\n '''\n if infile.find(\"gene\") != -1:\n threshold_option = PARAMS.get(\"metagenomeseq_genes_threshold_option\")\n p = PARAMS.get(\"metagenomeseq_genes_p_threshold\")\n fc = PARAMS.get(\"metagenomeseq_genes_fc_threshold\")\n\n else:\n threshold_option = PARAMS.get(\"metagenomeseq_taxa_threshold_option\")\n p = PARAMS.get(\"metagenomeseq_taxa_p_threshold\")\n fc = PARAMS.get(\"metagenomeseq_taxa_fc_threshold\")\n\n # MAPlot for each group pair\n\n PipelineMetagenomeCommunities.MAPlot(infile,\n threshold_option,\n p,\n fc,\n outfile)\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@jobs_limit(1, \"R\")\n@transform([runMetagenomeSeq, splitResultsByKingdom],\n suffix(\".tsv\"),\n \".heatmap.pdf\")\ndef plotDiffHeatmap(infile, outfile):\n '''\n plot differentially expressed genes on a heatmap\n '''\n norm_file = P.snip(infile, \".diff.tsv\") + \".norm.matrix\"\n\n if infile.find(\"gene\") != -1:\n threshold_option = PARAMS.get(\"metagenomeseq_genes_threshold_option\")\n p = PARAMS.get(\"metagenomeseq_genes_p_threshold\")\n fc = PARAMS.get(\"metagenomeseq_genes_fc_threshold\")\n\n else:\n threshold_option = PARAMS.get(\"metagenomeseq_taxa_threshold_option\")\n p = PARAMS.get(\"metagenomeseq_taxa_p_threshold\")\n fc = PARAMS.get(\"metagenomeseq_taxa_fc_threshold\")\n\n PipelineMetagenomeCommunities.plotHeatmap(infile,\n norm_file,\n threshold_option,\n p,\n fc,\n outfile)\n\n################################\n# differential abundance target\n################################\n\n\n@follows(PCA, loadDifferentialAbundance)\ndef Differential_abundance():\n pass\n\n\n###################################################################\n###################################################################\n###################################################################\n\n# @transform(mergeDiamondGeneCounts, suffix(\".tsv.gz\"), \".diff.tsv\")\n# def runMetagenomeSeqOnGenes(infile, outfile):\n# '''\n# run metagenomeSeq - a tool for calculating significance\n# based on gene counts\n# '''\n# rscriptsdir = PARAMS.get(\"rscriptsdir\")\n# rscript = PARAMS.get(\"metagenomeseq_rscript\")\n# prefix = P.snip(infile, \".tsv.gz\")\n\n# statement = '''%(rscript)s %(rscriptsdir)s/run_metagenomeseq.R\n# -c %(infile)s\n# -p %(prefix)s\n# --k %(metagenomeseq_genes_k)i\n# --a %(metagenomeseq_genes_a)i > %(outfile)s.log'''\n\n# P.run()\n\n# ###################################################################\n# ###################################################################\n# ###################################################################\n\n# @transform(runMetagenomeSeqOnGenes, suffix(\".tsv\"), \".annotated.tsv\")\n# def annotateDifferentialAbundance(infile, outfile):\n# '''\n# annotate differential abundance table with gene names etc\n# '''\n# annotation = PARAMS.get(\"genes_annotation\")\n# PipelineMetagenomeCommunities.annotate(infile,\n# annotation,\n# outfile)\n\n# ###################################################################\n# ###################################################################\n# ###################################################################\n\n# @transform(annotateDifferentialAbundance, suffix(\".tsv\"), \".load\")\n# def loadDifferentialAbundance(infile, outfile):\n# '''\n# load the results of metagenomeSeq analysis\n# '''\n# P.load(infile, outfile, \"--add-index=taxa\")\n\n# ###################################################################\n# ###################################################################\n# ###################################################################\n\n# @transform(runMetagenomeSeqOnGenes, suffix(\".diff.tsv\"), \".mds.pdf\")\n# def runMDSOnGenes(infile, outfile):\n# '''\n# run MDS analysis on genes\n# '''\n# # the infile is a separate file output by\n# # run_metagenomeseq = normalised counts\n# inf = P.snip(infile, \".diff.tsv\") + \".norm.matrix\"\n# PipelineMetagenomeCommunities.plotMDS(inf, outfile)\n\n# ###################################################################\n# ###################################################################\n# ###################################################################\n\n# @transform(runMetagenomeSeqOnGenes, suffix(\".tsv\"), \".heatmap.png\")\n# def plotGenesDiffHeatmap(infile, outfile):\n# '''\n# plot differentially expressed genes on a heatmap\n# '''\n# norm_file = P.snip(infile, \".diff.tsv\") + \".norm.matrix\"\n\n# PipelineMetagenomeCommunities.plotHeatmap(infile,\n# norm_file,\n# PARAMS.get(\"metagenomeseq_p_threshold\"),\n# PARAMS.get(\"metagenomeseq_fc_threshold\"),\n# outfile)\n\n\n# ###################################################################\n# ###################################################################\n# ###################################################################\n\n# @transform(runMetagenomeSeqOnGenes, suffix(\".tsv\"), \".load\")\n# def loadMetagenomeSeqOnGenes(infile, outfile):\n# '''\n# laod differential abundance results\n# '''\n# P.load(infile, outfile, \"--add-index=taxa\")\n\n\n# ###################################################################\n# ###################################################################\n# ###################################################################\n# ## Counting KEGG associations\n# ###################################################################\n# ###################################################################\n# ###################################################################\n# @follows(mkdir(\"kegg.dir\"))\n# @transform(PARAMS.get(\"kegg_tre\"),\n# regex(\"(\\S+)/(\\S+).tre\"),\n# add_inputs(PARAMS.get(\"kegg_map\")),\n# r\"kegg.dir/\\2.tsv\")\n# def buildKeggTable(infiles, outfile):\n# '''\n# build kegg table mapping KO identifiers to pathways. This is\n# based on the file that was downloaded with mtools (D.Huson)\n# '''\n# keggtre, keggmap = infiles\n# statement = '''cgat keggtre2table\n# --kegg-tre=%(keggtre)s\n# --map=%(keggmap)s\n# --log=%(outfile)s.log\n# > %(outfile)s'''\n# P.run()\n\n# ###################################################################\n# ###################################################################\n# ###################################################################\n# @transform(buildKeggTable, suffix(\".tsv\"), \".load\")\n# def loadKeggTable(infile, outfile):\n# '''\n# load KEGG table\n# '''\n# P.load(infile, outfile)\n\n# ###################################################################\n# ###################################################################\n# ###################################################################\n# @follows(mkdir(\"kegg.dir\"))\n# @transform(runLCA,\n# regex(\"(\\S+)/(\\S+).lca.gz\"),\n# add_inputs(buildKeggTable),\n# r\"kegg.dir/\\2.kegg.counts\")\n# def countKeggAssociations(infiles, outfile):\n# '''\n# count the number of reads associted with Kegg pathways\n# '''\n# job_options = \"-l mem_free=25G\"\n# infile = infiles[0].replace(\".lca\", \".kegg\")\n# kegg_table = infiles[1]\n# level = PARAMS.get(\"kegg_level\")\n# statement = '''zcat %(infile)s |\n# cgat lcakegg2counts\n# --kegg-table=%(kegg_table)s\n# --level=%(level)s\n# --method=proportion\n# --log=%(outfile)s.log\n# > %(outfile)s'''\n# P.run()\n\n# ###################################################################\n# ###################################################################\n# ###################################################################\n# @transform(countKeggAssociations, suffix(\".counts\"), \".counts.load\")\n# def loadCountKeggAssociations(infile, outfile):\n# '''\n# load counts of KO associations\n# '''\n# P.load(infile, outfile, \"--header=pathway,p_annotated_reads\")\n\n# ###################################################################\n# ###################################################################\n# ###################################################################\n# @follows(mkdir(\"kegg.dir\"))\n# @transform(runLCA,\n# regex(\"(\\S+)/(\\S+).lca.gz\"),\n# add_inputs(buildKeggTable),\n# r\"kegg.dir/\\2.kegg.ko.counts\")\n# def countKeggGenes(infiles, outfile):\n# '''\n# count the number of reads associated with KO identifiers\n# '''\n# job_options = \"-l mem_free=25G\"\n# infile = infiles[0].replace(\".lca\", \".kegg\")\n# kegg_table = infiles[1]\n# level = PARAMS.get(\"kegg_level\")\n# statement = '''zcat %(infile)s |\n# cgat lcakegg2counts\n# --kegg-table=%(kegg_table)s\n# --level=D\n# --method=proportion\n# --log=%(outfile)s.log\n# > %(outfile)s'''\n# P.run()\n\n# ###################################################################\n# ###################################################################\n# ###################################################################\n# @transform(countKeggGenes, suffix(\".ko.counts\"), \".ko.counts.load\")\n# def loadCountKeggGenes(infile, outfile):\n# '''\n# load counts of KO associations\n# '''\n# P.load(infile, outfile, \"--header=KO,p_annotated_reads\")\n\n# #########################################\n# # kegg target\n# #########################################\n# @follows(loadCountKeggAssociations)\n# def kegg():\n# pass\n\n#########################################\n# full target\n#########################################\n\n@follows(Alignment_stats,\n Differential_abundance,\n diversity,\n plotPCA,\n proportions,\n Genes)\ndef full():\n pass\n\n####################\n# report building\n####################\n\n\n@follows(mkdir(\"report\"))\ndef build_report():\n '''build report from scratch.'''\n E.info(\"starting documentation build process from scratch\")\n P.run_report(clean=True)\n\n\n@follows(mkdir(\"report\"))\ndef update_report():\n '''update report.'''\n E.info(\"updating documentation\")\n P.run_report(clean=False)\n\n#########################################\n#########################################\n#########################################\n\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n P.main(argv)\n\n\nif __name__ == \"__main__\":\n sys.exit(P.main(sys.argv))\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Heechul90/Deep_Learning | [
"6e3b172dd36198c83f19528bd3687faf487a0af9"
] | [
"Study/DeepLearning01.py"
] | [
"# 딥러닝을 구동하는 데 필요한 케라스 함수를 불러옴\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\n# 필요한 라이브러리를 불러옴\nimport numpy as np\nimport tensorflow as tf\n\n# 실행할 때마다 같은 결과를 출력하기 위해 설정하는 부분\nseed = 0\nnp.random.seed(seed)\ntf.set_random_seed(seed)\n\n# 준비된 수술 환자 데이터를 불러옴\ndata_set = np.loadtxt('dataset1/ThoraricSurgery.csv',\n delimiter = ',')\ndata_set\ndata_set.shape\n\n\n# 환자의 기록과 수술 결과를 X와 Y로 구분\nX = data_set[:, 0:17]\nY = data_set[:, 17]\n\n\n# 딥러닝 구조를 결정(모델을 설정하고 실행하는 부분)\n# 딥러닝은 퍼셉트론 위에 숨겨진 퍼셉트론 층을 차곡차곡 추가하는 형태\n# 층들을 케라스에서 Sequential() 함수를 통해서 구현\n# model.add를 통해서 라인(층)을 추가\n# Dence 함수를 통해서 구체적으로 구조를 정함\n\nmodel = Sequential() # Sequential() 함수를 model로 선언\nmodel.add(Dense(30, input_dim = 17, activation = 'relu')) # model.add로 층을 추가, Dense 함수로 30개의 노드생성\nmodel.add(Dense(1, activation = 'sigmoid')) # model.add로 층을 추가 , Dense 함수로 1개 노드 생성\n\n\n# 딥러닝 컴파일\nmodel.compile(loss = 'binary_crossentropy',\n optimizer = 'adam',\n metrics = ['accuracy'])\n\n# 모델 실행\n# epochs: 학습 프로세스가 모든 샘플에 대해 한 번 실행되는 것을 1epoch\n# batch_size: 샘플을 한번에 몇 개씩 처리할지를 정하는 부분\nmodel.fit(X, Y, epochs = 500, batch_size = 10)\n\n"
] | [
[
"tensorflow.set_random_seed",
"numpy.loadtxt",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
nile649/POLY-GAN | [
"c91a0322fed909f8e96a1144ee33e0808e276c45"
] | [
"utils/utils.py"
] | [
"import torch\nimport numpy as np\nfrom torch.autograd import Variable\nimport random\n\n# Photo-Realistic Single Image Super-Resolution Using a Generative Adversarial Network\n# ReplayBuffer was first introduced in the above mentioned paper, It's effect mathematically has been supported in \n# latest ICLR paper ProbGAN. Replay buffer uses previous data as prior for the Discriminator which it has seen already.\n# Page 5 of the paper, just over Theory section.\n# Hence we propose to maintain a subset of discriminators by subsampling the whole sequence of discriminators.\n\nclass ReplayBuffer():\n def __init__(self, max_size=50):\n assert (max_size > 0), 'Empty buffer or trying to create a black hole. Be careful.'\n self.max_size = max_size\n self.data = []\n\n def push_and_pop(self, data):\n to_return = []\n for element in data.data:\n element = torch.unsqueeze(element, 0)\n if len(self.data) < self.max_size:\n self.data.append(element)\n to_return.append(element)\n else:\n if random.uniform(0, 1) > 0.5:\n i = random.randint(0, self.max_size - 1)\n to_return.append(self.data[i].clone())\n self.data[i] = element\n else:\n to_return.append(element)\n return Variable(torch.cat(to_return))\n\n# LambdaLR is use for Learning rate scheduling (Not used in main code).\nclass LambdaLR():\n def __init__(self, n_epochs, offset, decay_start_epoch):\n assert ((n_epochs - decay_start_epoch) > 0), \"Decay must start before the training session ends!\"\n self.n_epochs = n_epochs\n self.offset = offset\n self.decay_start_epoch = decay_start_epoch\n\n def step(self, epoch):\n return 1.0 - max(0, epoch + self.offset - self.decay_start_epoch) / (self.n_epochs - self.decay_start_epoch)\n\n# Initialize kernel weights to uniform. We are not using BatchNorm in final code.\n\ndef weights_init_normal(m):\n classname = m.__class__.__name__\n if classname.find('Conv2d') != -1:\n torch.nn.init.normal(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm2d') != -1:\n torch.nn.init.normal(m.weight.data, 1.0, 0.02)\n torch.nn.init.constant(m.bias.data, 0.0)\n\n"
] | [
[
"torch.nn.init.constant",
"torch.cat",
"torch.unsqueeze",
"torch.nn.init.normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mashfiq10/Burgers1D | [
"9e8aafc612075cff9af4fe5cd4530b0eb2697b90"
] | [
"burgers1d.py"
] | [
"#!/usr/bin/python\n\n#################################################\n# 1D Burgers equation solver\n# Sk. Mashfiqur Rahman\n# Oklahoma State University\n# CWID: A20102717\n#################################################\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# domain\nx0 = 0.\nxL = 1.\n\nnx = 200\ndx = (xL - x0)/float(nx)\n\nx = np.empty(nx+1)\nu = np.empty(nx+1)\n\nfor i in range(nx+1):\n x[i] = x0 + float(i)*dx\n\nTmax = 0.3\nnt = 2000\nnf = 20\nkf = nt/nf\ndt = Tmax/float(nt)\nt = 0.\n\nfor i in range(nx+1):\n u[i] = np.sin(2.*np.pi*x[i])\nu[0] = 0.\nu[nx] = 0.\nplot = [u]\n\n\ndef w3(a, b, c):\n eps = 1.e-6\n q1 = -0.5*a + 1.5*b\n q2 = 0.5*b + 0.5*c\n\n s1 = (b-a)**2\n s2 = (c-b)**2\n\n a1 = (1./3.)/(eps+s1)**2\n a2 = (2./3.)/(eps+s2)**2\n f = (a1*q1 + a2*q2)/(a1 + a2)\n\n return f\n\n\ndef rhs_weno(nx,dx,_u):\n q = np.empty(nx+3)\n r = np.empty(nx-1)\n for i in range(0, nx+1):\n q[i+1] = _u[i]\n q[0] = 2.*q[1] - q[2]\n q[nx+2] = 2.*q[nx+1] - q[nx]\n\n for i in range(1, nx):\n if _u[i] >= 0.:\n v1 = (q[i] - q[i-1])/dx\n v2 = (q[i+1] - q[i])/dx\n v3 = (q[i+2] - q[i+1])/dx\n\n g = w3(v1, v2, v3)\n r[i-1] = -_u[i]*g\n\n else:\n v1 = (q[i+3] - q[i+2])/dx\n v2 = (q[i+2] - q[i+1])/dx\n v3 = (q[i+1] - q[i])/dx\n\n g = w3(v1, v2, v3)\n r[i-1] = -_u[i]*g\n\n return r\n\n\ndef weno(nx, dx, dt, _u):\n v = np.empty(nx+1)\n v[0] = _u[0]\n v[nx] = _u[nx]\n\n r = rhs_weno(nx, dx, _u)\n for i in range(1,nx):\n v[i] = _u[i] + dt*r[i-1]\n\n r = rhs_weno(nx, dx, v)\n for i in range(1,nx):\n v[i] = 0.75*_u[i] + 0.25*v[i] + 0.25*dt*r[i-1]\n\n r = rhs_weno(nx, dx, v)\n for i in range(1,nx):\n v[i] = 1./3.*_u[i] + 2./3.*v[i] + 2./3.*dt*r[i-1]\n\n return v\n\n\n# main function\nfor k in range(1, nt+1):\n\n u = weno(nx, dx, dt, u)\n t = t + dt\n\n if (k % kf) == 0:\n plot.append(u)\n # plt.figure()\n # plt.plot(x, u, label='t=final time')\n # plt.show()\n\nplt.figure()\nfor i in range(nf+1):\n plt.plot(x, plot[i], linewidth=1.5, label=r't = '+str(i))\nplt.ylabel('u')\nplt.xlabel('x')\nplt.legend(fontsize=10)\nplt.tick_params(axis='both', labelsize=10)\nplt.savefig('burgers1d.png', dpi = 1000)\nplt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"numpy.sin",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.tick_params",
"numpy.empty",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
phivision/surreal | [
"61f56db4c840013497eef4c3954d1112c1c1acec"
] | [
"datageneration/main_part.py"
] | [
"import sys\nimport os\nimport random\nimport math\nimport bpy\nimport numpy as np\nfrom os import getenv\nfrom os import remove\nfrom os.path import join, dirname, realpath, exists\nfrom mathutils import Matrix, Vector, Quaternion, Euler\nfrom glob import glob\nfrom random import choice\nfrom pickle import load\nfrom bpy_extras.object_utils import world_to_camera_view as world2cam\n\nsys.path.insert(0, \".\")\n\n\ndef mkdir_safe(directory):\n try:\n os.makedirs(directory)\n except FileExistsError:\n pass\n\n\ndef setState0():\n for ob in bpy.data.objects.values():\n ob.select = False\n bpy.context.scene.objects.active = None\n\n\nsorted_parts = ['hips', 'leftUpLeg', 'rightUpLeg', 'spine', 'leftLeg', 'rightLeg',\n 'spine1', 'leftFoot', 'rightFoot', 'spine2', 'leftToeBase', 'rightToeBase',\n 'neck', 'leftShoulder', 'rightShoulder', 'head', 'leftArm', 'rightArm',\n 'leftForeArm', 'rightForeArm', 'leftHand', 'rightHand', 'leftHandIndex1', 'rightHandIndex1']\n# order\npart_match = {'root': 'root', 'bone_00': 'Pelvis', 'bone_01': 'L_Hip', 'bone_02': 'R_Hip',\n 'bone_03': 'Spine1', 'bone_04': 'L_Knee', 'bone_05': 'R_Knee', 'bone_06': 'Spine2',\n 'bone_07': 'L_Ankle', 'bone_08': 'R_Ankle', 'bone_09': 'Spine3', 'bone_10': 'L_Foot',\n 'bone_11': 'R_Foot', 'bone_12': 'Neck', 'bone_13': 'L_Collar', 'bone_14': 'R_Collar',\n 'bone_15': 'Head', 'bone_16': 'L_Shoulder', 'bone_17': 'R_Shoulder', 'bone_18': 'L_Elbow',\n 'bone_19': 'R_Elbow', 'bone_20': 'L_Wrist', 'bone_21': 'R_Wrist', 'bone_22': 'L_Hand',\n 'bone_23': 'R_Hand'}\n\npart2num = {part: (ipart + 1) for ipart, part in enumerate(sorted_parts)}\n\n\n# create one material per part as defined in a pickle with the segmentation\n# this is useful to render the segmentation in a material pass\ndef create_segmentation(ob, params):\n materials = {}\n vgroups = {}\n with open('pkl/segm_per_v_overlap.pkl', 'rb') as f:\n vsegm = load(f)\n bpy.ops.object.material_slot_remove()\n parts = sorted(vsegm.keys())\n for part in parts:\n vs = vsegm[part]\n vgroups[part] = ob.vertex_groups.new(part)\n vgroups[part].add(vs, 1.0, 'ADD')\n bpy.ops.object.vertex_group_set_active(group=part)\n materials[part] = bpy.data.materials['Material'].copy()\n materials[part].pass_index = part2num[part]\n bpy.ops.object.material_slot_add()\n ob.material_slots[-1].material = materials[part]\n bpy.ops.object.mode_set(mode='EDIT')\n bpy.ops.mesh.select_all(action='DESELECT')\n bpy.ops.object.vertex_group_select()\n bpy.ops.object.material_slot_assign()\n bpy.ops.object.mode_set(mode='OBJECT')\n return (materials)\n\n\n# create the different passes that we render\ndef create_composite_nodes(tree, params, img=None, idx=0):\n res_paths = {k: join(params['tmp_path'], '%05d_%s' % (idx, k)) for k in params['output_types'] if\n params['output_types'][k]}\n\n # clear default nodes\n for n in tree.nodes:\n tree.nodes.remove(n)\n\n # create node for foreground image\n layers = tree.nodes.new('CompositorNodeRLayers')\n layers.location = -300, 400\n\n # create node for background image\n bg_im = tree.nodes.new('CompositorNodeImage')\n bg_im.location = -300, 30\n if img is not None:\n bg_im.image = img\n\n if (params['output_types']['vblur']):\n # create node for computing vector blur (approximate motion blur)\n vblur = tree.nodes.new('CompositorNodeVecBlur')\n vblur.factor = params['vblur_factor']\n vblur.location = 240, 400\n\n # create node for saving output of vector blurred image\n vblur_out = tree.nodes.new('CompositorNodeOutputFile')\n vblur_out.format.file_format = 'PNG'\n vblur_out.base_path = res_paths['vblur']\n vblur_out.location = 460, 460\n\n # create node for mixing foreground and background images\n mix = tree.nodes.new('CompositorNodeMixRGB')\n mix.location = 40, 30\n mix.use_alpha = True\n\n # create node for the final output\n composite_out = tree.nodes.new('CompositorNodeComposite')\n composite_out.location = 240, 30\n\n # create node for saving depth\n if (params['output_types']['depth']):\n depth_out = tree.nodes.new('CompositorNodeOutputFile')\n depth_out.location = 40, 700\n depth_out.format.file_format = 'OPEN_EXR'\n depth_out.base_path = res_paths['depth']\n\n # create node for saving normals\n if (params['output_types']['normal']):\n normal_out = tree.nodes.new('CompositorNodeOutputFile')\n normal_out.location = 40, 600\n normal_out.format.file_format = 'OPEN_EXR'\n normal_out.base_path = res_paths['normal']\n\n # create node for saving foreground image\n if (params['output_types']['fg']):\n fg_out = tree.nodes.new('CompositorNodeOutputFile')\n fg_out.location = 170, 600\n fg_out.format.file_format = 'PNG'\n fg_out.base_path = res_paths['fg']\n\n # create node for saving ground truth flow\n if (params['output_types']['gtflow']):\n gtflow_out = tree.nodes.new('CompositorNodeOutputFile')\n gtflow_out.location = 40, 500\n gtflow_out.format.file_format = 'OPEN_EXR'\n gtflow_out.base_path = res_paths['gtflow']\n\n # create node for saving segmentation\n if (params['output_types']['segm']):\n segm_out = tree.nodes.new('CompositorNodeOutputFile')\n segm_out.location = 40, 400\n segm_out.format.file_format = 'OPEN_EXR'\n segm_out.base_path = res_paths['segm']\n\n # merge fg and bg images\n tree.links.new(bg_im.outputs[0], mix.inputs[1])\n tree.links.new(layers.outputs['Image'], mix.inputs[2])\n\n if (params['output_types']['vblur']):\n tree.links.new(mix.outputs[0], vblur.inputs[0]) # apply vector blur on the bg+fg image,\n tree.links.new(layers.outputs['Z'], vblur.inputs[1]) # using depth,\n tree.links.new(layers.outputs['Speed'], vblur.inputs[2]) # and flow.\n tree.links.new(vblur.outputs[0], vblur_out.inputs[0]) # save vblurred output\n\n tree.links.new(mix.outputs[0], composite_out.inputs[0]) # bg+fg image\n if (params['output_types']['fg']):\n tree.links.new(layers.outputs['Image'], fg_out.inputs[0]) # save fg\n if (params['output_types']['depth']):\n tree.links.new(layers.outputs['Z'], depth_out.inputs[0]) # save depth\n if (params['output_types']['normal']):\n tree.links.new(layers.outputs['Normal'], normal_out.inputs[0]) # save normal\n if (params['output_types']['gtflow']):\n tree.links.new(layers.outputs['Speed'], gtflow_out.inputs[0]) # save ground truth flow\n if (params['output_types']['segm']):\n tree.links.new(layers.outputs['IndexMA'], segm_out.inputs[0]) # save segmentation\n\n return (res_paths)\n\n\n# creation of the spherical harmonics material, using an OSL script\ndef create_sh_material(tree, sh_path, img=None):\n # clear default nodes\n for n in tree.nodes:\n tree.nodes.remove(n)\n\n uv = tree.nodes.new('ShaderNodeTexCoord')\n uv.location = -800, 400\n\n uv_xform = tree.nodes.new('ShaderNodeVectorMath')\n uv_xform.location = -600, 400\n uv_xform.inputs[1].default_value = (0, 0, 1)\n uv_xform.operation = 'AVERAGE'\n\n uv_im = tree.nodes.new('ShaderNodeTexImage')\n uv_im.location = -400, 400\n if img is not None:\n uv_im.image = img\n\n rgb = tree.nodes.new('ShaderNodeRGB')\n rgb.location = -400, 200\n\n script = tree.nodes.new('ShaderNodeScript')\n script.location = -230, 400\n script.mode = 'EXTERNAL'\n script.filepath = sh_path # 'spher_harm/sh.osl' #using the same file from multiple jobs causes white texture\n script.update()\n\n # the emission node makes it independent of the scene lighting\n emission = tree.nodes.new('ShaderNodeEmission')\n emission.location = -60, 400\n\n mat_out = tree.nodes.new('ShaderNodeOutputMaterial')\n mat_out.location = 110, 400\n\n tree.links.new(uv.outputs[2], uv_im.inputs[0])\n tree.links.new(uv_im.outputs[0], script.inputs[0])\n tree.links.new(script.outputs[0], emission.inputs[0])\n tree.links.new(emission.outputs[0], mat_out.inputs[0])\n\n\n# computes rotation matrix through Rodrigues formula as in cv2.Rodrigues\ndef Rodrigues(rotvec):\n theta = np.linalg.norm(rotvec)\n r = (rotvec / theta).reshape(3, 1) if theta > 0. else rotvec\n cost = np.cos(theta)\n mat = np.asarray([[0, -r[2], r[1]],\n [r[2], 0, -r[0]],\n [-r[1], r[0], 0]])\n return (cost * np.eye(3) + (1 - cost) * r.dot(r.T) + np.sin(theta) * mat)\n\n\ndef init_scene(scene, params, gender='female'):\n # load fbx model\n bpy.ops.import_scene.fbx(\n filepath=join(params['smpl_data_folder'], 'basicModel_%s_lbs_10_207_0_v1.0.2.fbx' % gender[0]),\n axis_forward='Y', axis_up='Z', global_scale=100)\n obname = '%s_avg' % gender[0]\n ob = bpy.data.objects[obname]\n ob.data.use_auto_smooth = False # autosmooth creates artifacts\n\n # assign the existing spherical harmonics material\n ob.active_material = bpy.data.materials['Material']\n\n # delete the default cube (which held the material)\n bpy.ops.object.select_all(action='DESELECT')\n bpy.data.objects['Cube'].select = True\n bpy.ops.object.delete(use_global=False)\n\n # set camera properties and initial position\n bpy.ops.object.select_all(action='DESELECT')\n cam_ob = bpy.data.objects['Camera']\n scn = bpy.context.scene\n scn.objects.active = cam_ob\n\n cam_ob.matrix_world = Matrix(((0., 0., 1, params['camera_distance']),\n (0., -1, 0., -1.0),\n (-1., 0., 0., 0.),\n (0.0, 0.0, 0.0, 1.0)))\n cam_ob.data.angle = math.radians(40)\n cam_ob.data.lens = 60\n cam_ob.data.clip_start = 0.1\n cam_ob.data.sensor_width = 32\n\n # setup an empty object in the center which will be the parent of the Camera\n # this allows to easily rotate an object around the origin\n scn.cycles.film_transparent = True\n scn.render.layers[\"RenderLayer\"].use_pass_vector = True\n scn.render.layers[\"RenderLayer\"].use_pass_normal = True\n scene.render.layers['RenderLayer'].use_pass_emit = True\n scene.render.layers['RenderLayer'].use_pass_emit = True\n scene.render.layers['RenderLayer'].use_pass_material_index = True\n\n # set render size\n scn.render.resolution_x = params['resy']\n scn.render.resolution_y = params['resx']\n scn.render.resolution_percentage = 100\n scn.render.image_settings.file_format = 'PNG'\n\n # clear existing animation data\n ob.data.shape_keys.animation_data_clear()\n arm_ob = bpy.data.objects['Armature']\n arm_ob.animation_data_clear()\n\n return (ob, obname, arm_ob, cam_ob)\n\n\n# transformation between pose and blendshapes\ndef rodrigues2bshapes(pose):\n rod_rots = np.asarray(pose).reshape(24, 3)\n mat_rots = [Rodrigues(rod_rot) for rod_rot in rod_rots]\n bshapes = np.concatenate([(mat_rot - np.eye(3)).ravel()\n for mat_rot in mat_rots[1:]])\n return (mat_rots, bshapes)\n\n\n# apply trans pose and shape to character\ndef apply_trans_pose_shape(trans, pose, shape, ob, arm_ob, obname, scene, cam_ob, frame=None):\n # transform pose into rotation matrices (for pose) and pose blendshapes\n mrots, bsh = rodrigues2bshapes(pose)\n\n # set the location of the first bone to the translation parameter\n arm_ob.pose.bones[obname + '_Pelvis'].location = trans\n if frame is not None:\n arm_ob.pose.bones[obname + '_root'].keyframe_insert('location', frame=frame)\n # set the pose of each bone to the quaternion specified by pose\n for ibone, mrot in enumerate(mrots):\n bone = arm_ob.pose.bones[obname + '_' + part_match['bone_%02d' % ibone]]\n bone.rotation_quaternion = Matrix(mrot).to_quaternion()\n if frame is not None:\n bone.keyframe_insert('rotation_quaternion', frame=frame)\n bone.keyframe_insert('location', frame=frame)\n\n # apply pose blendshapes\n for ibshape, bshape in enumerate(bsh):\n ob.data.shape_keys.key_blocks['Pose%03d' % ibshape].value = bshape\n if frame is not None:\n ob.data.shape_keys.key_blocks['Pose%03d' % ibshape].keyframe_insert('value', index=-1, frame=frame)\n\n # apply shape blendshapes\n for ibshape, shape_elem in enumerate(shape):\n ob.data.shape_keys.key_blocks['Shape%03d' % ibshape].value = shape_elem\n if frame is not None:\n ob.data.shape_keys.key_blocks['Shape%03d' % ibshape].keyframe_insert('value', index=-1, frame=frame)\n\n\ndef get_bone_locs(obname, arm_ob, scene, cam_ob):\n n_bones = 24\n render_scale = scene.render.resolution_percentage / 100\n render_size = (int(scene.render.resolution_x * render_scale),\n int(scene.render.resolution_y * render_scale))\n bone_locations_2d = np.empty((n_bones, 2))\n bone_locations_3d = np.empty((n_bones, 3), dtype='float32')\n\n # obtain the coordinates of each bone head in image space\n for ibone in range(n_bones):\n bone = arm_ob.pose.bones[obname + '_' + part_match['bone_%02d' % ibone]]\n co_2d = world2cam(scene, cam_ob, arm_ob.matrix_world * bone.head)\n co_3d = arm_ob.matrix_world * bone.head\n bone_locations_3d[ibone] = (co_3d.x,\n co_3d.y,\n co_3d.z)\n bone_locations_2d[ibone] = (round(co_2d.x * render_size[0]),\n round(co_2d.y * render_size[1]))\n return (bone_locations_2d, bone_locations_3d)\n\n\n# reset the joint positions of the character according to its new shape\ndef reset_joint_positions(orig_trans, shape, ob, arm_ob, obname, scene, cam_ob, reg_ivs, joint_reg):\n # since the regression is sparse, only the relevant vertex\n # elements (joint_reg) and their indices (reg_ivs) are loaded\n reg_vs = np.empty((len(reg_ivs), 3)) # empty array to hold vertices to regress from\n # zero the pose and trans to obtain joint positions in zero pose\n apply_trans_pose_shape(orig_trans, np.zeros(72), shape, ob, arm_ob, obname, scene, cam_ob)\n\n # obtain a mesh after applying modifiers\n bpy.ops.wm.memory_statistics()\n # me holds the vertices after applying the shape blendshapes\n me = ob.to_mesh(scene, True, 'PREVIEW')\n\n # fill the regressor vertices matrix\n for iiv, iv in enumerate(reg_ivs):\n reg_vs[iiv] = me.vertices[iv].co\n bpy.data.meshes.remove(me)\n\n # regress joint positions in rest pose\n joint_xyz = joint_reg.dot(reg_vs)\n # adapt joint positions in rest pose\n arm_ob.hide = False\n bpy.ops.object.mode_set(mode='EDIT')\n arm_ob.hide = True\n for ibone in range(24):\n bb = arm_ob.data.edit_bones[obname + '_' + part_match['bone_%02d' % ibone]]\n bboffset = bb.tail - bb.head\n bb.head = joint_xyz[ibone]\n bb.tail = bb.head + bboffset\n bpy.ops.object.mode_set(mode='OBJECT')\n return (shape)\n\n\n# load poses and shapes\ndef load_body_data(smpl_data, ob, obname, gender='female', idx=0):\n # load MoSHed data from CMU Mocap (only the given idx is loaded)\n\n # create a dictionary with key the sequence name and values the pose and trans\n cmu_keys = []\n for seq in smpl_data.files:\n if seq.startswith('pose_'):\n cmu_keys.append(seq.replace('pose_', ''))\n\n name = sorted(cmu_keys)[idx % len(cmu_keys)]\n\n cmu_parms = {}\n for seq in smpl_data.files:\n if seq == ('pose_' + name):\n cmu_parms[seq.replace('pose_', '')] = {'poses': smpl_data[seq],\n 'trans': smpl_data[seq.replace('pose_', 'trans_')]}\n\n # compute the number of shape blendshapes in the model\n n_sh_bshapes = len([k for k in ob.data.shape_keys.key_blocks.keys()\n if k.startswith('Shape')])\n\n # load all SMPL shapes\n fshapes = smpl_data['%sshapes' % gender][:, :n_sh_bshapes]\n\n return (cmu_parms, fshapes, name)\n\n#slightly different version of the above function pulled from main_part2\n#Not needed as the same parameters are returned.\n# def load_body_data2(smpl_data, idx=0):\n# cmu_keys = []\n# for seq in smpl_data.files:\n# if seq.startswith('pose_'):\n# cmu_keys.append(seq.replace('pose_', ''))\n#\n# name = sorted(cmu_keys)[idx % len(cmu_keys)]\n#\n# cmu_parms = {}\n# for seq in smpl_data.files:\n# if seq == ('pose_' + name):\n# cmu_parms[seq.replace('pose_', '')] = {'poses': smpl_data[seq],\n# 'trans': smpl_data[seq.replace('pose_', 'trans_')]}\n# return (cmu_parms, name)\n\nimport time\n\nstart_time = None\n\n\ndef log_message(message):\n elapsed_time = time.time() - start_time\n print(\"[%.2f s] %s\" % (elapsed_time, message))\n\n\ndef main():\n # time logging\n global start_time\n start_time = time.time()\n\n import argparse\n\n # parse commandline arguments\n log_message(sys.argv)\n parser = argparse.ArgumentParser(description='Generate synth dataset images.')\n parser.add_argument('--idx', type=int,\n help='idx of the requested sequence')\n parser.add_argument('--ishape', type=int,\n help='requested cut, according to the stride')\n parser.add_argument('--stride', type=int,\n help='stride amount, default 50')\n\n args = parser.parse_args(sys.argv[sys.argv.index(\"--\") + 1:])\n\n idx = args.idx\n ishape = args.ishape\n stride = args.stride\n\n log_message(\"input idx: %d\" % idx)\n log_message(\"input ishape: %d\" % ishape)\n log_message(\"input stride: %d\" % stride)\n\n if idx == None:\n exit(1)\n if ishape == None:\n exit(1)\n if stride == None:\n log_message(\"WARNING: stride not specified, using default value 50\")\n stride = 50\n\n # import idx info (name, split)\n idx_info = load(open(\"pkl/idx_info.pickle\", 'rb'))\n\n # get runpass\n (runpass, idx) = divmod(idx, len(idx_info))\n\n log_message(\"runpass: %d\" % runpass)\n log_message(\"output idx: %d\" % idx)\n idx_info = idx_info[idx]\n log_message(\"sequence: %s\" % idx_info['name'])\n log_message(\"nb_frames: %f\" % idx_info['nb_frames'])\n log_message(\"use_split: %s\" % idx_info['use_split'])\n\n # import configuration\n log_message(\"Importing configuration\")\n import config\n params = config.load_file('config', 'SYNTH_DATA')\n\n smpl_data_folder = params['smpl_data_folder']\n smpl_data_filename = params['smpl_data_filename']\n bg_path = params['bg_path']\n resy = params['resy']\n resx = params['resx']\n clothing_option = params['clothing_option'] # grey, nongrey or all\n tmp_path = params['tmp_path']\n output_path = params['output_path']\n output_types = params['output_types']\n stepsize = params['stepsize']\n clipsize = params['clipsize']\n openexr_py2_path = params['openexr_py2_path']\n\n # compute number of cuts\n nb_ishape = max(1, int(np.ceil((idx_info['nb_frames'] - (clipsize - stride)) / stride)))\n log_message(\"Max ishape: %d\" % (nb_ishape - 1))\n\n if ishape == None:\n exit(1)\n\n assert (ishape < nb_ishape)\n\n # name is set given idx\n name = idx_info['name']\n output_path = join(output_path, 'run%d' % runpass, name.replace(\" \", \"\"))\n params['output_path'] = output_path\n tmp_path = join(tmp_path, 'run%d_%s_c%04d' % (runpass, name.replace(\" \", \"\"), (ishape + 1)))\n params['tmp_path'] = tmp_path\n\n # check if already computed\n # + clean up existing tmp folders if any\n if exists(tmp_path) and tmp_path != \"\" and tmp_path != \"/\":\n os.system('rm -rf %s' % tmp_path)\n rgb_vid_filename = \"%s_c%04d.mp4\" % (join(output_path, name.replace(' ', '')), (ishape + 1))\n # if os.path.isfile(rgb_vid_filename):\n # log_message(\"ALREADY COMPUTED - existing: %s\" % rgb_vid_filename)\n # return 0\n\n # create tmp directory\n if not exists(tmp_path):\n mkdir_safe(tmp_path)\n\n # >> don't use random generator before this point <<\n\n # initialize RNG with seeds from sequence id\n import hashlib\n s = \"synth_data:%d:%d:%d\" % (idx, runpass, ishape)\n seed_number = int(hashlib.sha1(s.encode('utf-8')).hexdigest(), 16) % (10 ** 8)\n log_message(\"GENERATED SEED %d from string '%s'\" % (seed_number, s))\n random.seed(seed_number)\n np.random.seed(seed_number)\n\n if (output_types['vblur']):\n vblur_factor = np.random.normal(0.5, 0.5)\n params['vblur_factor'] = vblur_factor\n\n log_message(\"Setup Blender\")\n\n # create copy-spher.harm. directory if not exists\n sh_dir = join(tmp_path, 'spher_harm')\n if not exists(sh_dir):\n mkdir_safe(sh_dir)\n sh_dst = join(sh_dir, 'sh_%02d_%05d.osl' % (runpass, idx))\n os.system('cp spher_harm/sh.osl %s' % sh_dst)\n\n genders = {0: 'female', 1: 'male'}\n # pick random gender\n gender = choice(genders)\n\n scene = bpy.data.scenes['Scene']\n scene.render.engine = 'CYCLES'\n bpy.data.materials['Material'].use_nodes = True\n scene.cycles.shading_system = True\n scene.use_nodes = True\n\n log_message(\"Listing background images\")\n bg_names = join(bg_path, '%s_img.txt' % idx_info['use_split'])\n nh_txt_paths = []\n with open(bg_names) as f:\n for line in f:\n nh_txt_paths.append(join(bg_path, line))\n\n # grab clothing names\n log_message(\"clothing: %s\" % clothing_option)\n with open(join(smpl_data_folder, 'textures', '%s_%s.txt' % (gender, idx_info['use_split']))) as f:\n txt_paths = f.read().splitlines()\n\n # if using only one source of clothing\n if clothing_option == 'nongrey':\n txt_paths = [k for k in txt_paths if 'nongrey' in k]\n elif clothing_option == 'grey':\n txt_paths = [k for k in txt_paths if 'nongrey' not in k]\n\n # random clothing texture\n cloth_img_name = choice(txt_paths)\n cloth_img_name = join(smpl_data_folder, cloth_img_name)\n cloth_img = bpy.data.images.load(cloth_img_name)\n\n # random background\n bg_img_name = choice(nh_txt_paths)[:-1]\n bg_img = bpy.data.images.load(bg_img_name)\n\n log_message(\"Loading parts segmentation\")\n beta_stds = np.load(join(smpl_data_folder, ('%s_beta_stds.npy' % gender)))\n\n log_message(\"Building materials tree\")\n mat_tree = bpy.data.materials['Material'].node_tree\n create_sh_material(mat_tree, sh_dst, cloth_img)\n res_paths = create_composite_nodes(scene.node_tree, params, img=bg_img, idx=idx)\n\n log_message(\"Loading smpl data\")\n smpl_data = np.load(join(smpl_data_folder, smpl_data_filename))\n\n log_message(\"Initializing scene\")\n camera_distance = np.random.normal(8.0, 1)\n params['camera_distance'] = camera_distance\n ob, obname, arm_ob, cam_ob = init_scene(scene, params, gender)\n\n setState0()\n ob.select = True\n bpy.context.scene.objects.active = ob\n segmented_materials = True # True: 0-24, False: expected to have 0-1 bg/fg\n\n log_message(\"Creating materials segmentation\")\n # create material segmentation\n if segmented_materials:\n materials = create_segmentation(ob, params)\n prob_dressed = {'leftLeg': .5, 'leftArm': .9, 'leftHandIndex1': .01,\n 'rightShoulder': .8, 'rightHand': .01, 'neck': .01,\n 'rightToeBase': .9, 'leftShoulder': .8, 'leftToeBase': .9,\n 'rightForeArm': .5, 'leftHand': .01, 'spine': .9,\n 'leftFoot': .9, 'leftUpLeg': .9, 'rightUpLeg': .9,\n 'rightFoot': .9, 'head': .01, 'leftForeArm': .5,\n 'rightArm': .5, 'spine1': .9, 'hips': .9,\n 'rightHandIndex1': .01, 'spine2': .9, 'rightLeg': .5}\n else:\n materials = {'FullBody': bpy.data.materials['Material']}\n prob_dressed = {'FullBody': .6}\n\n orig_pelvis_loc = (arm_ob.matrix_world.copy() * arm_ob.pose.bones[obname + '_Pelvis'].head.copy()) - Vector(\n (-1., 1., 1.))\n orig_cam_loc = cam_ob.location.copy()\n\n # unblocking both the pose and the blendshape limits\n for k in ob.data.shape_keys.key_blocks.keys():\n bpy.data.shape_keys[\"Key\"].key_blocks[k].slider_min = -10\n bpy.data.shape_keys[\"Key\"].key_blocks[k].slider_max = 10\n\n log_message(\"Loading body data\")\n cmu_parms, fshapes, name = load_body_data(smpl_data, ob, obname, idx=idx, gender=gender)\n\n log_message(\"Loaded body data for %s\" % name)\n\n nb_fshapes = len(fshapes)\n if idx_info['use_split'] == 'train':\n fshapes = fshapes[:int(nb_fshapes * 0.8)]\n elif idx_info['use_split'] == 'test':\n fshapes = fshapes[int(nb_fshapes * 0.8):]\n\n # pick random real body shape\n shape = choice(fshapes) # +random_shape(.5) can add noise\n # shape = random_shape(3.) # random body shape\n\n # example shapes\n # shape = np.zeros(10) #average\n # shape = np.array([ 2.25176191, -3.7883464 , 0.46747496, 3.89178988, 2.20098416, 0.26102114, -3.07428093, 0.55708514, -3.94442258, -2.88552087]) #fat\n # shape = np.array([-2.26781107, 0.88158132, -0.93788176, -0.23480508, 1.17088298, 1.55550789, 0.44383225, 0.37688275, -0.27983086, 1.77102953]) #thin\n # shape = np.array([ 0.00404852, 0.8084637 , 0.32332591, -1.33163664, 1.05008727, 1.60955275, 0.22372946, -0.10738459, 0.89456312, -1.22231216]) #short\n # shape = np.array([ 3.63453289, 1.20836171, 3.15674431, -0.78646793, -1.93847355, -0.32129994, -0.97771656, 0.94531640, 0.52825811, -0.99324327]) #tall\n\n ndofs = 10\n\n scene.objects.active = arm_ob\n orig_trans = np.asarray(arm_ob.pose.bones[obname + '_Pelvis'].location).copy()\n\n # create output directory\n if not exists(output_path):\n mkdir_safe(output_path)\n\n # spherical harmonics material needs a script to be loaded and compiled\n scs = []\n for mname, material in materials.items():\n scs.append(material.node_tree.nodes['Script'])\n scs[-1].filepath = sh_dst\n scs[-1].update()\n\n rgb_dirname = name.replace(\" \", \"\") + '_c%04d.mp4' % (ishape + 1)\n rgb_path = join(tmp_path, rgb_dirname)\n\n data = cmu_parms[name]\n\n fbegin = ishape * stepsize * stride\n fend = min(ishape * stepsize * stride + stepsize * clipsize, len(data['poses']))\n\n log_message(\"Computing how many frames to allocate\")\n N = len(data['poses'][fbegin:fend:stepsize])\n log_message(\"Allocating %d frames in mat file\" % N)\n\n # force recomputation of joint angles unless shape is all zeros\n curr_shape = np.zeros_like(shape)\n nframes = len(data['poses'][::stepsize])\n\n matfile_info = join(output_path, name.replace(\" \", \"\") + \"_c%04d_info.mat\" % (ishape + 1))\n log_message('Working on %s' % matfile_info)\n\n # allocate\n dict_info = {}\n dict_info['bg'] = np.zeros((N,), dtype=np.object) # background image path\n dict_info['camLoc'] = np.empty(3) # (1, 3)\n dict_info['clipNo'] = ishape + 1\n dict_info['cloth'] = np.zeros((N,), dtype=np.object) # clothing texture image path\n dict_info['gender'] = np.empty(N, dtype='uint8') # 0 for male, 1 for female\n dict_info['joints2D'] = np.empty((2, 24, N), dtype='float32') # 2D joint positions in pixel space\n dict_info['joints3D'] = np.empty((3, 24, N), dtype='float32') # 3D joint positions in world coordinates\n dict_info['light'] = np.empty((9, N), dtype='float32')\n dict_info['pose'] = np.empty((data['poses'][0].size, N), dtype='float32') # joint angles from SMPL (CMU)\n dict_info['sequence'] = name.replace(\" \", \"\") + \"_c%04d\" % (ishape + 1)\n dict_info['shape'] = np.empty((ndofs, N), dtype='float32')\n dict_info['zrot'] = np.empty(N, dtype='float32')\n dict_info['camDist'] = camera_distance\n dict_info['stride'] = stride\n\n if name.replace(\" \", \"\").startswith('h36m'):\n dict_info['source'] = 'h36m'\n else:\n dict_info['source'] = 'cmu'\n\n if (output_types['vblur']):\n dict_info['vblur_factor'] = np.empty(N, dtype='float32')\n\n # for each clipsize'th frame in the sequence\n get_real_frame = lambda ifr: ifr\n random_zrot = 0\n reset_loc = False\n batch_it = 0\n curr_shape = reset_joint_positions(orig_trans, shape, ob, arm_ob, obname, scene,\n cam_ob, smpl_data['regression_verts'], smpl_data['joint_regressor'])\n random_zrot = 2 * np.pi * np.random.rand()\n\n arm_ob.animation_data_clear()\n cam_ob.animation_data_clear()\n\n # create a keyframe animation with pose, translation, blendshapes and camera motion\n # LOOP TO CREATE 3D ANIMATION\n for seq_frame, (pose, trans) in enumerate(\n zip(data['poses'][fbegin:fend:stepsize], data['trans'][fbegin:fend:stepsize])):\n iframe = seq_frame\n scene.frame_set(get_real_frame(seq_frame))\n\n # apply the translation, pose and shape to the character\n apply_trans_pose_shape(Vector(trans), pose, shape, ob, arm_ob, obname, scene, cam_ob, get_real_frame(seq_frame))\n dict_info['shape'][:, iframe] = shape[:ndofs]\n dict_info['pose'][:, iframe] = pose\n dict_info['gender'][iframe] = list(genders)[list(genders.values()).index(gender)]\n if (output_types['vblur']):\n dict_info['vblur_factor'][iframe] = vblur_factor\n\n arm_ob.pose.bones[obname + '_root'].rotation_quaternion = Quaternion(Euler((0, 0, random_zrot), 'XYZ'))\n arm_ob.pose.bones[obname + '_root'].keyframe_insert('rotation_quaternion', frame=get_real_frame(seq_frame))\n dict_info['zrot'][iframe] = random_zrot\n\n scene.update()\n\n # Bodies centered only in each minibatch of clipsize frames\n if seq_frame == 0 or reset_loc:\n reset_loc = False\n new_pelvis_loc = arm_ob.matrix_world.copy() * arm_ob.pose.bones[obname + '_Pelvis'].head.copy()\n cam_ob.location = orig_cam_loc.copy() + (new_pelvis_loc.copy() - orig_pelvis_loc.copy())\n cam_ob.keyframe_insert('location', frame=get_real_frame(seq_frame))\n dict_info['camLoc'] = np.array(cam_ob.location)\n\n scene.node_tree.nodes['Image'].image = bg_img\n\n for part, material in materials.items():\n material.node_tree.nodes['Vector Math'].inputs[1].default_value[:2] = (0, 0)\n\n # random light\n sh_coeffs = .7 * (2 * np.random.rand(9) - 1)\n sh_coeffs[\n 0] = .5 + .9 * np.random.rand() # Ambient light (first coeff) needs a minimum is ambient. Rest is uniformly distributed, higher means brighter.\n sh_coeffs[1] = -.7 * np.random.rand()\n\n for ish, coeff in enumerate(sh_coeffs):\n for sc in scs:\n sc.inputs[ish + 1].default_value = coeff\n\n # iterate over the keyframes and render\n # LOOP TO RENDER\n for seq_frame, (pose, trans) in enumerate(\n zip(data['poses'][fbegin:fend:stepsize], data['trans'][fbegin:fend:stepsize])):\n scene.frame_set(get_real_frame(seq_frame))\n iframe = seq_frame\n\n dict_info['bg'][iframe] = bg_img_name\n dict_info['cloth'][iframe] = cloth_img_name\n dict_info['light'][:, iframe] = sh_coeffs\n\n scene.render.use_antialiasing = False\n scene.render.filepath = join(rgb_path, 'Image%04d.png' % get_real_frame(seq_frame))\n\n log_message(\"Rendering frame %d\" % seq_frame)\n\n # disable render output\n logfile = '/dev/null'\n open(logfile, 'a').close()\n old = os.dup(1)\n sys.stdout.flush()\n os.close(1)\n os.open(logfile, os.O_WRONLY)\n\n # Render\n bpy.ops.render.render(write_still=True)\n\n # disable output redirection\n os.close(1)\n os.dup(old)\n os.close(old)\n\n # NOTE:\n # ideally, pixels should be readable from a viewer node, but I get only zeros\n # --> https://ammous88.wordpress.com/2015/01/16/blender-access-render-results-pixels-directly-from-python-2/\n # len(np.asarray(bpy.data.images['Render Result'].pixels) is 0\n # Therefore we write them to temporary files and read with OpenEXR library (available for python2) in main_part2.py\n # Alternatively, if you don't want to use OpenEXR library, the following commented code does loading with Blender functions, but it can cause memory leak.\n # If you want to use it, copy necessary lines from main_part2.py such as definitions of dict_normal, matfile_normal...\n\n # for k, folder in res_paths.items():\n # if not k== 'vblur' and not k=='fg':\n # path = join(folder, 'Image%04d.exr' % get_real_frame(seq_frame))\n # render_img = bpy.data.images.load(path)\n # # render_img.pixels size is width * height * 4 (rgba)\n # arr = np.array(render_img.pixels[:]).reshape(resx, resy, 4)[::-1,:, :] # images are vertically flipped\n # if k == 'normal':# 3 channels, original order\n # mat = arr[:,:, :3]\n # dict_normal['normal_%d' % (iframe + 1)] = mat.astype(np.float32, copy=False)\n # elif k == 'gtflow':\n # mat = arr[:,:, 1:3]\n # dict_gtflow['gtflow_%d' % (iframe + 1)] = mat.astype(np.float32, copy=False)\n # elif k == 'depth':\n # mat = arr[:,:, 0]\n # dict_depth['depth_%d' % (iframe + 1)] = mat.astype(np.float32, copy=False)\n # elif k == 'segm':\n # mat = arr[:,:,0]\n # dict_segm['segm_%d' % (iframe + 1)] = mat.astype(np.uint8, copy=False)\n #\n # # remove the image to release memory, object handles, etc.\n # render_img.user_clear()\n # bpy.data.images.remove(render_img)\n\n # bone locations should be saved after rendering so that the bones are updated\n bone_locs_2D, bone_locs_3D = get_bone_locs(obname, arm_ob, scene, cam_ob)\n dict_info['joints2D'][:, :, iframe] = np.transpose(bone_locs_2D)\n dict_info['joints3D'][:, :, iframe] = np.transpose(bone_locs_3D)\n\n reset_loc = (bone_locs_2D.max(axis=-1) > 256).any() or (bone_locs_2D.min(axis=0) < 0).any()\n arm_ob.pose.bones[obname + '_root'].rotation_quaternion = Quaternion((1, 0, 0, 0))\n\n # save a .blend file for debugging:\n # bpy.ops.wm.save_as_mainfile(filepath=join(tmp_path, 'pre.blend'))\n\n # save RGB data with ffmpeg (if you don't have h264 codec, you can replace with another one and control the quality with something like -q:v 3)\n cmd_ffmpeg = 'ffmpeg -y -r 30 -i ''%s'' -c:v h264 -pix_fmt yuv420p -crf 23 ''%s_c%04d.mp4''' % (\n join(rgb_path, 'Image%04d.png'), join(output_path, name.replace(' ', '')), (ishape + 1))\n log_message(\"Generating RGB video (%s)\" % cmd_ffmpeg)\n os.system(cmd_ffmpeg)\n\n if (output_types['vblur']):\n cmd_ffmpeg_vblur = 'ffmpeg -y -r 30 -i ''%s'' -c:v h264 -pix_fmt yuv420p -crf 23 -vf \"scale=trunc(iw/2)*2:trunc(ih/2)*2\" ''%s_c%04d.mp4''' % (\n join(res_paths['vblur'], 'Image%04d.png'), join(output_path, name.replace(' ', '') + '_vblur'), (ishape + 1))\n log_message(\"Generating vblur video (%s)\" % cmd_ffmpeg_vblur)\n os.system(cmd_ffmpeg_vblur)\n\n if (output_types['fg']):\n cmd_ffmpeg_fg = 'ffmpeg -y -r 30 -i ''%s'' -c:v h264 -pix_fmt yuv420p -crf 23 ''%s_c%04d.mp4''' % (\n join(res_paths['fg'], 'Image%04d.png'), join(output_path, name.replace(' ', '') + '_fg'), (ishape + 1))\n log_message(\"Generating fg video (%s)\" % cmd_ffmpeg_fg)\n os.system(cmd_ffmpeg_fg)\n\n cmd_tar = 'tar -czvf %s/%s.tar.gz -C %s %s' % (output_path, rgb_dirname, tmp_path, rgb_dirname)\n log_message(\"Tarballing the images (%s)\" % cmd_tar)\n os.system(cmd_tar)\n\n # save annotation excluding png/exr data to _info.mat file\n import scipy.io\n scipy.io.savemat(matfile_info, dict_info, do_compression=True)\n\n log_message(\"start part 2\")\n\n # if 'openexr_py2_path' in locals() or 'openexr_py2_path' in globals():\n # for exr_path in openexr_py2_path.split(':'):\n # sys.path.insert(1, exr_path)\n\n # to install OpenEXR:\n # export ARCHFLAGS = \"-arch x86_64\"\n # CPPFLAGS = \"-std=c++11\"\n\n import OpenEXR\n import array\n import Imath\n\n # .mat files\n matfile_normal = join(output_path, name.replace(\" \", \"\") + \"_c%04d_normal.mat\" % (ishape + 1))\n matfile_gtflow = join(output_path, name.replace(\" \", \"\") + \"_c%04d_gtflow.mat\" % (ishape + 1))\n matfile_depth = join(output_path, name.replace(\" \", \"\") + \"_c%04d_depth.mat\" % (ishape + 1))\n matfile_segm = join(output_path, name.replace(\" \", \"\") + \"_c%04d_segm.mat\" % (ishape + 1))\n dict_normal = {}\n dict_gtflow = {}\n dict_depth = {}\n dict_segm = {}\n get_real_frame = lambda ifr: ifr\n FLOAT = Imath.PixelType(Imath.PixelType.FLOAT)\n\n # LOOP OVER FRAMES\n for seq_frame, (pose, trans) in enumerate(\n zip(data['poses'][fbegin:fend:stepsize], data['trans'][fbegin:fend:stepsize])):\n iframe = seq_frame\n\n log_message(\"Processing frame %d\" % iframe)\n\n for k, folder in res_paths.items():\n if not k == 'vblur' and not k == 'fg':\n path = join(folder, 'Image%04d.exr' % get_real_frame(seq_frame))\n exr_file = OpenEXR.InputFile(path)\n if k == 'normal':\n mat = np.transpose(np.reshape(\n [array.array('f', exr_file.channel(Chan, FLOAT)).tolist() for Chan in (\"R\", \"G\", \"B\")],\n (3, resx, resy)), (1, 2, 0))\n dict_normal['normal_%d' % (iframe + 1)] = mat.astype(np.float32,\n copy=False) # +1 for the 1-indexing\n elif k == 'gtflow':\n mat = np.transpose(\n np.reshape([array.array('f', exr_file.channel(Chan, FLOAT)).tolist() for Chan in (\"R\", \"G\")],\n (2, resx, resy)), (1, 2, 0))\n dict_gtflow['gtflow_%d' % (iframe + 1)] = mat.astype(np.float32, copy=False)\n elif k == 'depth':\n mat = np.reshape([array.array('f', exr_file.channel(Chan, FLOAT)).tolist() for Chan in (\"R\")],\n (resx, resy))\n dict_depth['depth_%d' % (iframe + 1)] = mat.astype(np.float32, copy=False)\n elif k == 'segm':\n mat = np.reshape([array.array('f', exr_file.channel(Chan, FLOAT)).tolist() for Chan in (\"R\")],\n (resx, resy))\n dict_segm['segm_%d' % (iframe + 1)] = mat.astype(np.uint8, copy=False)\n # remove(path)\n\n scipy.io.savemat(matfile_normal, dict_normal, do_compression=True)\n scipy.io.savemat(matfile_gtflow, dict_gtflow, do_compression=True)\n scipy.io.savemat(matfile_depth, dict_depth, do_compression=True)\n scipy.io.savemat(matfile_segm, dict_segm, do_compression=True)\n\n # cleaning up tmp\n if tmp_path != \"\" and tmp_path != \"/\":\n log_message(\"Cleaning up tmp\")\n os.system('rm -rf %s' % tmp_path)\n\n\n log_message(\"Completed batch\")\n\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.random.seed",
"numpy.asarray",
"numpy.eye",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"numpy.ceil",
"numpy.random.normal",
"numpy.zeros_like",
"numpy.random.rand",
"numpy.transpose",
"numpy.array",
"numpy.zeros",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LoicDagnas/tensorflow-onnx | [
"6691850e79047d05d85017573170fd8240393b57"
] | [
"tf2onnx/optimizer/transpose_optimizer.py"
] | [
"# SPDX-License-Identifier: Apache-2.0\n\n\n\"\"\"Transpose Optimizer.\"\"\"\n\nfrom collections import defaultdict\n\nimport numpy as np\nimport onnx\nfrom tf2onnx.constants import NCHW_TO_NHWC, NHWC_TO_NCHW, NCDHW_TO_NDHWC, NDHWC_TO_NCDHW, TARGET_CHANNELS_LAST\nfrom .. import utils\nfrom .optimizer_base import GraphOptimizerBase\n\n\n# pylint: disable=logging-not-lazy,unused-argument,missing-docstring,abstract-method\n# FIXME:\n# pylint: disable=unused-variable\n\ndef is_transpose(transpose_node):\n perm_attr = transpose_node.get_attr('perm')\n return transpose_node.type == \"Transpose\" and perm_attr\n\n\ndef is_tranpose_of_type(node, perm):\n perm_attr = node.get_attr('perm')\n return node.type == \"Transpose\" and perm_attr and perm_attr.ints == perm\n\n\ndef is_useless_transpose(transpose_node):\n perm_attr = transpose_node.get_attr('perm')\n return transpose_node.type == \"Transpose\" and perm_attr and perm_attr.ints == list(range(len(perm_attr.ints)))\n\n\ndef get_transpose_rank(trans):\n return len(trans.get_attr('perm').ints)\n\n\ndef invert_perm(perm):\n inv = [0] * len(perm)\n for i, p in enumerate(perm):\n inv[p] = i\n return inv\n\n\nclass TransposeOptimizer(GraphOptimizerBase):\n \"\"\"Transpose Optimizer.\"\"\"\n\n def __init__(self):\n super(TransposeOptimizer, self).__init__()\n\n self._handler_map = {}\n self._force_stop = {}\n\n self._initialize_handlers()\n self._g = None\n self._output_names = None\n\n @property\n def nodes(self):\n return self._g.get_nodes()\n\n def pre_optimize_action(self):\n # make Reshape into a const, which then can be fused into Conv's weight for mobilenet_v1_75_192\n self._output_names = [self._g.get_node_by_output(out).name for out in self._g.outputs]\n ops = self.nodes\n constable_reshape_ops = [n for n in ops\n if (n.type == \"Reshape\"\n and n.inputs[0].is_const()\n and n.inputs[1].is_const())]\n for reshape_op in constable_reshape_ops:\n target_t = reshape_op.inputs[0].get_tensor_value(as_list=False)\n target_shape = reshape_op.inputs[1].get_tensor_value(as_list=True)\n for i, dim in enumerate(target_shape):\n if dim == 0:\n # In ORT a dim of 0 means the shape stays the same.\n target_shape[i] = target_t.shape[i]\n new_data = np.reshape(target_t, target_shape)\n const_name = reshape_op.output[0]\n self._g.remove_node(reshape_op.name)\n self._g.make_const(const_name, new_data)\n\n # point all children nodes inputs to the new node\n for output_name in reshape_op.output:\n for child in ops:\n for i, name in enumerate(child.input):\n if name == output_name:\n child.input[i] = const_name\n\n self._g.topological_sort(self._g.get_nodes())\n\n def post_optimize_action(self):\n def _calculate_new_shape(graph, op):\n input_shape = graph.get_shape(op.input[0])\n tagged_shape = [d if d == 1 else \"var\" + str(i) for i, d in enumerate(input_shape)]\n trim_shape = [d for d in tagged_shape if d != 1]\n\n perm = op.get_attr_value(\"perm\")\n perm_shape = [tagged_shape[p] for p in perm]\n trim_perm_shape = [d for d in perm_shape if d != 1]\n\n if trim_perm_shape != trim_shape:\n return None\n\n if input_shape.count(-1) <= 1:\n new_shape = [input_shape[p] for p in perm]\n return graph.make_const(utils.make_name(\"new_shape\"), np.array(new_shape, dtype=np.int64)).output[0]\n\n # reshape requires tha output shape can only contain one -1, if not some extra op needed.\n input_shape = graph.make_node(\"Shape\", [op.input[0]]).output[0]\n indice = graph.make_const(utils.make_name(\"indice\"), np.array(perm, np.int64)).output[0]\n\n return graph.make_node(\"Gather\", [input_shape, indice]).output[0]\n\n nodes = self.nodes\n # if channel==1 or height==width==1, replace transpose with reshape\n # replacing trans with reshape is because transpose will copy data even if this transpose doesn't nothing\n need_sort = False\n for op in nodes:\n if op.type == \"Transpose\" and \"perm\" in op.attr:\n input_shape = self._g.get_shape(op.input[0])\n if not input_shape:\n continue\n new_shape = _calculate_new_shape(self._g, op)\n if new_shape is not None:\n # replace transpose with reshape\n shapes = op.output_shapes\n dtypes = op.output_dtypes\n self._g.remove_node(op.name)\n self._g.make_node(\"Reshape\", [op.input[0], new_shape], name=op.name, outputs=op.output,\n shapes=shapes, dtypes=dtypes)\n need_sort = True\n if need_sort:\n self._g.topological_sort(self._g.get_nodes())\n\n def merge_duplicated_transposes(self):\n # strategy used in previous procedure is to move transpose nodes down if possible,\n # and it means that when a node has n outputs then n transpose will be generated,\n # so we should merge them back to one if they can't be eliminated in previous procedure.\n graph = self._g\n input_transposes_map = defaultdict(list)\n for node in graph.get_nodes():\n if node.type == \"Transpose\" and node.get_attr(\"perm\"):\n key = (node.input[0], str(node.get_attr(\"perm\").ints))\n input_transposes_map[key].append(node)\n\n for transposes in input_transposes_map.values():\n # merge transpose nodes into one: make nodes use the output of the first transpose node\n transpose_out = transposes[0].output[0]\n for node in transposes[1:]:\n old_transpose_out = node.output[0]\n graph.replace_all_inputs(old_transpose_out, transpose_out) # ops=graph.get_nodes()\n\n # dangling transpose nodes can be deleted\n graph.delete_unused_nodes(graph.outputs)\n\n def _optimize(self, graph):\n return self._apply_optimization(graph, self._optimize_at_current_graph_level)\n\n def _optimize_at_current_graph_level(self, graph):\n self._g = graph\n self.pre_optimize_action()\n no_action = False\n iteration_cnt = 0\n while not no_action:\n no_action = True\n nodes = self.nodes\n self._force_stop = {}\n for n in nodes:\n if is_transpose(n):\n if self._handle_nhwc_tranpose(n):\n no_action = False\n self.graph_been_opt = True\n iteration_cnt += 1\n # need break, because handler may change nodes set, making the n stale object\n # referencing already deleted elements\n break\n\n # Make sure node wasn't already deleted in _handle_nhwc_tranpose\n if graph.get_node_by_name(n.name) is not None and is_useless_transpose(n):\n no_action = False\n iteration_cnt += 1\n self._remove_useless_tranpose(n)\n break\n # for debugging purpose\n if \"stop\" in self._force_stop and self._force_stop[\"stop\"] == 1:\n break\n\n self.logger.debug(\"finish after \" + str(iteration_cnt) + \" iteration(s)\")\n\n self.merge_duplicated_transposes()\n self.post_optimize_action()\n return self._g\n\n def _initialize_handlers(self):\n self._handler_map = {\n \"Abs\": self._simple_through_handler,\n \"Add\": self._add_handler,\n \"ArgMax\": self._arg_min_max_handler,\n \"ArgMin\": self._arg_min_max_handler,\n \"Cast\": self._simple_through_handler,\n \"Clip\": self._simple_through_handler,\n \"Concat\": self._concat_handler,\n \"Elu\": self._simple_through_handler,\n \"Exp\": self._simple_through_handler,\n \"Identity\": self._identity_handler,\n \"LeakyRelu\": self._simple_through_handler,\n \"Log\": self._simple_through_handler,\n \"Max\": self._maxmin_handler,\n \"Min\": self._maxmin_handler,\n \"Mul\": self._mul_handler,\n \"Neg\": self._simple_through_handler,\n \"Pad\": self._pad_handler,\n \"PRelu\": self._prelu_handler,\n \"Reciprocal\": self._simple_through_handler,\n \"ReduceLogSum\": self._reduce_handler,\n \"ReduceLogSumExp\": self._reduce_handler,\n \"ReduceMax\": self._reduce_handler,\n \"ReduceMean\": self._reduce_handler,\n \"ReduceMin\": self._reduce_handler,\n \"ReduceProd\": self._reduce_handler,\n \"ReduceSum\": self._reducesum_handler,\n \"ReduceSumSquare\": self._reduce_handler,\n \"Relu\": self._simple_through_handler,\n \"Shape\": self._shape_handler,\n \"Sigmoid\": self._simple_through_handler,\n \"Sum\": self._sum_handler,\n \"Slice\": self._slice_handler,\n \"Split\": self._split_handler,\n \"Softplus\": self._simple_through_handler,\n \"Sqrt\": self._simple_through_handler,\n \"Squeeze\": self._squeeze_handler,\n \"Sub\": self._sub_handler,\n \"Unsqueeze\": self._unsqueeze_handler,\n \"Tanh\": self._simple_through_handler,\n \"Tile\": self._tile_handler,\n \"Transpose\": self._transpose_handler,\n \"DequantizeLinear\": self._quantize_handler,\n \"QuantizeLinear\": self._quantize_handler,\n }\n\n def _handle_node_having_branches(self, trans, node):\n if not self._should_push_transpose(trans, node) or len(node.output) != 1:\n return False\n # create transpose pairs if some input are not.\n if not self._create_transpose_pairs_before_node(trans, node):\n return False\n # make sure node's all input transpose all have only 1 consumer node,\n # otherwise, it would impact their other output nodes\n if self._nodes_has_single_consumer_node(node.inputs):\n self._create_transpose_pairs_after_node(trans, node)\n input_transposes = set(node.inputs)\n for n in input_transposes:\n n_input = n.input[0]\n utils.make_sure(len(n.output) == 1, \"only expect single output\")\n self._g.replace_all_inputs(n.output[0], n_input) # ops=self._g.get_nodes()\n self._g.remove_node(n.name)\n\n utils.make_sure(len(node.output) == 1, \"only expect single output\")\n # currently we assume node only has 1 output, for cases where it is more than 1 for example Split\n # we need consider the fact that Split's multiple output will not always has data in NCHW/NHWC,\n # it might be a different shape.\n output_transposes = self._g.find_output_consumers(node.output[0])\n for n in output_transposes:\n n_input = n.input[0]\n utils.make_sure(len(n.output) == 1, \"only expect single output\")\n self._g.replace_all_inputs(n.output[0], n_input) # ops=self._g.get_nodes()\n self._g.remove_node(n.name)\n\n shape = self._g.get_shape(node.output[0])\n if shape:\n # only nhwc transpose can reach here\n perm_inv = invert_perm(trans.get_attr_value(\"perm\"))\n new_shape = [shape[i] for i in perm_inv]\n self._g.set_shape(node.output[0], new_shape)\n return True\n\n self.logger.debug(\"input transpose does not have single consumer, skipping...\")\n return False\n\n # get the input index of transpose op in node's inputs.\n def _get_input_index_for_trans(self, node, trans):\n input_index = 0\n for i in node.input:\n if i == trans.output[0]:\n break\n input_index += 1\n return input_index\n\n # the assumption is: both node and trans have only 1 output\n def _switch_transpose_and_node(self, node, trans, update_shape=True):\n if not self._nodes_has_single_consumer_node([trans]):\n return False\n\n input_index = self._get_input_index_for_trans(node, trans)\n\n self._g.replace_all_inputs(node.output[0], trans.output[0]) # ops=self._g.get_nodes()\n self._g.replace_input(node, node.input[input_index], trans.input[0], input_index)\n self._g.replace_input(trans, trans.input[0], node.output[0], 0)\n\n # need to transpose node shape in backward direction as well after switch\n # otherwise, reshape added in post_optimize_action may not work correctly\n shape = self._g.get_shape(node.output[0])\n if update_shape and shape:\n # only nhwc transpose can reach here\n perm_inv = invert_perm(trans.get_attr_value(\"perm\"))\n new_shape = [shape[i] for i in perm_inv]\n self._g.set_shape(node.output[0], new_shape)\n self._g.set_shape(trans.output[0], shape)\n return True\n\n # if return value is True, then it means Transpose is handled as designed\n # otherwise, it means that we skip handling since it is not in our support set\n def _handle_nhwc_tranpose(self, trans):\n if trans.output[0] in self._g.outputs:\n self.logger.debug(\"%s connects to graph outputs, skip\", trans.output[0])\n return False\n out_nodes = self._g.find_output_consumers(trans.output[0])\n if len(out_nodes) == 1:\n p = out_nodes[0]\n if p.name in self._output_names:\n self.logger.debug(\"cannot move transpose down since it met output node %s\", p.name)\n return False\n\n if p.type in self._handler_map:\n op_handler = self._handler_map[p.type]\n return op_handler(trans, p)\n return False\n if out_nodes and trans.get_attr_value(\"perm\") in [NCHW_TO_NHWC, NCDHW_TO_NDHWC]:\n # Move transpose into branches to let Transposes can be \"handled\" in each branch.\n # This will add more transpose ops, so only do this if further optimization is likely (check perm).\n for n in out_nodes:\n branch_trans = n.graph.make_node(\"Transpose\", [trans.input[0]], attr=trans.get_onnx_attrs())\n n.graph.replace_input(n, trans.output[0], branch_trans.output[0])\n self._g.remove_node(trans.name)\n return False\n\n def _remove_useless_tranpose(self, trans):\n self._g.replace_all_inputs(trans.output[0], trans.input[0]) # ops=self._g.get_nodes()\n self._g.remove_node(trans.name)\n\n def _nodes_has_single_consumer_node(self, nodes):\n for n in nodes:\n for output in n.output:\n cnt = len(set(self._g.find_output_consumers(output)))\n if cnt != 1:\n return False\n return True\n\n def _cost_to_transpose(self, node, inp_id):\n if node.type in [\"Const\", \"Transpose\"]:\n # Transposes can be combined/folded, so there is no additional cost\n return 0\n prod = 1\n shape = self._g.get_shape(inp_id)\n if shape is None:\n return 500\n for d in shape:\n if d == -1:\n # Assume unknown dims are approx. 20\n prod *= 20\n else:\n prod *= d\n return prod\n\n def _should_push_transpose(self, trans, node):\n perm = trans.get_attr_value(\"perm\")\n optimization_gains = 0\n removed_nchws = 0\n perm_to_push_down = [NCHW_TO_NHWC, NCDHW_TO_NDHWC]\n perm_to_push_up = [NHWC_TO_NCHW, NDHWC_TO_NCDHW]\n if self._g.is_target(TARGET_CHANNELS_LAST):\n perm_to_push_down, perm_to_push_up = perm_to_push_up, perm_to_push_down\n\n for n, inp_id in zip(node.inputs, node.input):\n if is_tranpose_of_type(n, perm):\n optimization_gains += self._cost_to_transpose(n.inputs[0], n.input[0])\n if perm in perm_to_push_down:\n removed_nchws += 1\n else:\n optimization_gains -= self._cost_to_transpose(n, inp_id)\n if perm in perm_to_push_up:\n removed_nchws -= 1\n if removed_nchws != 0:\n # Always push nchw transposes if possible\n return removed_nchws > 0\n return optimization_gains > 0\n\n def _get_non_nchw_transpose_output_nodes(self, trans, node):\n # we just support node having 1 output, we need consider cases where node has more than 1 outputs\n assert len(node.output) == 1\n perm = trans.get_attr_value(\"perm\")\n perm_inv = invert_perm(perm)\n non_nchw_tranpose_nodes = []\n consumers = self._g.find_output_consumers(node.output[0])\n for o in consumers:\n if not is_tranpose_of_type(o, perm_inv) and o not in non_nchw_tranpose_nodes:\n non_nchw_tranpose_nodes.append(o)\n return non_nchw_tranpose_nodes\n\n def _create_transpose_pairs_after_node(self, trans, node):\n assert len(node.output) == 1 # just support node who has 1 output\n non_nchw_trans_consumers = self._get_non_nchw_transpose_output_nodes(trans, node)\n # add Transpose(0, 3, 1, 2) and Transpose(0, 2, 3, 1) before each non_nchw_trans_consumers\n for consumer in non_nchw_trans_consumers:\n perm = trans.get_attr_value(\"perm\")\n perm_inv = invert_perm(perm)\n nchw_node = self._g.make_node(\"Transpose\", [node.output[0]], attr={\"perm\": perm_inv})\n nhwc_node = self._g.make_node(\"Transpose\", [nchw_node.output[0]], attr={\"perm\": perm})\n self._g.replace_input(consumer, node.output[0], nhwc_node.output[0])\n\n def _create_transpose_pairs_before_node(self, trans, node):\n perm = trans.get_attr_value(\"perm\")\n perm_inv = invert_perm(perm)\n trans_rank = len(perm)\n def shape_after_expand(ori_shape):\n # according to broadcasting rule to expand shape to 4D while not tile the tensor here\n # still count on the broadcasting op to tile the tensor\n if ori_shape.count(-1) >= 2:\n self.logger.warning(\"%s shape can contain one -1 at most, otherwise reshape op can't work\", node.name)\n return None\n ori_rank = len(ori_shape)\n new_shape = [1] * (trans_rank - ori_rank) + ori_shape\n return new_shape\n\n non_nhwc_trans_inputs = []\n for input_id, n in zip(node.input, node.inputs):\n if not is_tranpose_of_type(n, perm):\n # check in case node has two inputs coming from a same node output.\n if [input_id, n] not in non_nhwc_trans_inputs:\n non_nhwc_trans_inputs.append([input_id, n])\n\n # add Transpose NHWC_TO_NCHW and Transpose NCHW_TO_NHWC before each non_nhwc_trans_consumers\n shape_unknow = [input_id for input_id, _ in non_nhwc_trans_inputs if self._g.get_shape(input_id) is None]\n if shape_unknow:\n if self._g.opset <= 9:\n msg = \"%s 's shape is unknown, ConstantOfShape will be used which exists in version 9 or higher\" \\\n \"while graph's opset version is %s\" % (shape_unknow, self._g.opset)\n self.logger.warning(msg)\n return False\n\n for input_id, n in non_nhwc_trans_inputs:\n shape = self._g.get_shape(input_id)\n # if rank of n is not transpose rank, then we need to insert a reshape op before inserting a transpose\n # for example shape of n is [x, y], then output shape of reshape will be [1, 1, x, y] or [1, 1, 1, x, y]\n if shape is None:\n const_4 = self._g.make_const(utils.make_name(\"const_4\"), np.array([trans_rank], np.int64)).output[0]\n tensor_1 = onnx.helper.make_tensor(\"value\", onnx.TensorProto.INT64, [1], [1])\n shape_node = self._g.make_node(\"Shape\", [input_id]).output[0]\n rank_node = self._g.make_node(\"Shape\", [shape_node]).output[0]\n expand_rank = self._g.make_node(\"Sub\", [const_4, rank_node]).output[0]\n array_fill_1 = self._g.make_node(\"ConstantOfShape\", [expand_rank], attr={\"value\": tensor_1}).output[0]\n new_shape = self._g.make_node(\"Concat\", [array_fill_1, shape_node], attr={\"axis\": 0}).output[0]\n reshape = self._g.make_node(\"Reshape\", [input_id, new_shape]).output[0]\n input_of_new_trans = reshape\n elif len(shape) == trans_rank:\n input_of_new_trans = input_id\n else:\n shape = shape_after_expand(shape)\n if shape is None:\n return False\n const = self._g.make_const(utils.make_name(\"reshape_shape\"), np.array(shape, np.int64)).output[0]\n reshape = self._g.make_node(\"Reshape\", [input_id, const]).output[0]\n input_of_new_trans = reshape\n\n nchw_node = self._g.make_node(\"Transpose\", [input_of_new_trans], attr={\"perm\": perm_inv})\n nhwc_node = self._g.make_node(\"Transpose\", [nchw_node.output[0]], attr={\"perm\": perm})\n self._g.replace_input(node, input_id, nhwc_node.output[0])\n return True\n\n def _add_handler(self, trans, node):\n if node.inputs[1].is_const():\n t_p = trans.inputs[0]\n if t_p.type in (\"Conv\", \"ConvTranspose\") and len(t_p.input) == 2:\n # if Conv or ConvTranspose's bias input is not set, then we set, otherwise, we don't set\n # todo: maybe we can add already set bias with the input??? try later\n\n if not self._nodes_has_single_consumer_node([t_p]):\n self.logger.debug(\"Conv does not have single consumer, can not merge Conv and Add\")\n return self._handle_node_having_branches(trans, node)\n\n if not self._nodes_has_single_consumer_node([trans]):\n self.logger.debug(\"input transpose does not have single consumer, skipping...\")\n return False\n\n target_node = node.inputs[1]\n numpy_val = target_node.get_tensor_value(as_list=False)\n # Optional 1D bias to be added to the convolution, has size of M\n if len(numpy_val.shape) - numpy_val.shape.count(1) > 1:\n self.logger.debug(\"Bias is not 1D, can not merge Conv and Add\")\n return self._handle_node_having_branches(trans, node)\n\n bias_size = max(numpy_val.shape)\n size_m = t_p.inputs[1].output_shapes[0][0]\n if bias_size != size_m:\n self.logger.debug(\"Bias size is not M, can not merge Conv and Add\")\n return self._handle_node_having_branches(trans, node)\n\n target_val = numpy_val.reshape(bias_size)\n target_node.set_tensor_value(target_val)\n\n conv_inputs = [t_p.input[0], t_p.input[1], node.input[1]]\n conv_node = self._g.make_node(t_p.type, conv_inputs, attr=t_p.get_onnx_attrs())\n self._g.replace_input(trans, trans.input[0], utils.port_name(conv_node.name), 0)\n self._g.replace_all_inputs(node.output[0], trans.output[0]) # ops=self._g.get_nodes()\n self._g.remove_node(t_p.name)\n self._g.remove_node(node.name)\n return True\n return self._handle_node_having_branches(trans, node)\n\n def _transpose_handler(self, trans, node):\n perm = trans.get_attr_value(\"perm\")\n perm_inv = invert_perm(perm)\n if is_tranpose_of_type(node, perm_inv):\n for g in {self._g, node.graph}:\n g.replace_all_inputs(node.output[0], trans.input[0]) # ops=g.get_nodes()\n\n shape = node.graph.get_shape(node.output[0])\n dtype = node.graph.get_dtype(node.output[0])\n if node.output[0] in node.graph.outputs:\n node.graph.make_node(\"Identity\", [trans.input[0]],\n outputs=node.output, shapes=[shape], dtypes=[dtype])\n self._g.remove_node(trans.name)\n node.graph.remove_node(node.name)\n return True\n return False\n\n def _maxmin_handler(self, trans, node):\n return self._handle_node_having_branches(trans, node)\n\n def _mul_handler(self, trans, node):\n multiplier_input_id = None\n multiplier_input_node = None\n multiplier_input_idx = None\n for idx, (input_id, input_node) in enumerate(zip(node.input, node.inputs)):\n if input_id != trans.output[0]:\n multiplier_input_id = input_id\n multiplier_input_node = input_node\n multiplier_input_idx = idx\n\n # node's inputs may come from one same node. if so the multiplier_input_node may be none\n if multiplier_input_node is None:\n if not self._nodes_has_single_consumer_node([trans]):\n return False\n self._g.replace_all_inputs(node.output[0], trans.output[0])\n self._g.replace_input(node, node.input[0], trans.input[0], 0)\n self._g.replace_input(node, node.input[1], trans.input[0], 1)\n self._g.replace_input(trans, trans.input[0], node.output[0], 0)\n return True\n\n # convert mul(trans(x), trans(y)) -> trans(mul(x, y))\n if is_tranpose_of_type(multiplier_input_node, trans.get_attr_value(\"perm\")):\n if not self._nodes_has_single_consumer_node([multiplier_input_node]):\n return False\n input_index = self._get_input_index_for_trans(node, multiplier_input_node)\n if not self._switch_transpose_and_node(node, trans):\n return False\n\n self._g.replace_input(node, node.input[input_index], multiplier_input_node.input[0], input_index)\n self._g.remove_node(multiplier_input_node.name)\n return True\n\n # handle const multipliers\n if not multiplier_input_node.is_const():\n return False\n multiplier = multiplier_input_node.get_tensor_value(as_list=False)\n\n # todo: apply this block if we have model case multiplier_input_id==0, and verify that.\n if multiplier_input_id == node.input[1]:\n t_p = trans.inputs[0]\n trans_rank = get_transpose_rank(trans)\n # make sure conv don't have bias set\n can_opt = t_p.type == \"Conv\" and t_p.inputs[1].is_const() and len(t_p.input) == 2 and trans_rank == 4\n can_opt = can_opt and self._nodes_has_single_consumer_node([t_p])\n if can_opt:\n conv = t_p\n numpy_val = conv.inputs[1].get_tensor_value(as_list=False)\n transposed_val = np.transpose(numpy_val, (2, 3, 1, 0))\n mul_val = multiplier\n result = np.multiply(transposed_val, mul_val)\n conv.inputs[1].set_tensor_value(np.transpose(result, (3, 2, 0, 1)))\n\n self._g.replace_all_inputs(node.output[0], trans.output[0]) # ops=self._g.get_nodes()\n self._g.remove_node(node.name)\n return True\n\n # if the shape is (), we just move transpose after the mul\n if not multiplier.shape:\n return self._switch_transpose_and_node(node, trans)\n\n # if multiplier is 1-D\n if len(multiplier.shape) == 1 and multiplier.shape[0] == 1:\n # shape is (1)\n return self._switch_transpose_and_node(node, trans)\n\n # if multiplier has shape (N,) or (1, N) or (1, 1, N) ....\n if np.prod(multiplier.shape) == multiplier.shape[-1]:\n if not self._nodes_has_single_consumer_node([multiplier_input_node]):\n new_inp = self._g.copy_const(multiplier_input_node)\n self._g.replace_input(node, multiplier_input_id, new_inp.output[0], multiplier_input_idx)\n multiplier_input_node = new_inp\n perm = list(trans.get_attr('perm').ints)\n new_shape = np.ones(len(perm), dtype=np.int32)\n new_shape[perm[-1]] = multiplier.shape[-1]\n multiplier_input_node.set_tensor_value(multiplier.reshape(new_shape))\n return self._switch_transpose_and_node(node, trans)\n\n return False\n\n def _sum_handler(self, trans, node):\n inputs = node.inputs\n trans_shape = self._g.get_shape(trans.output[0])\n perm = list(trans.get_attr('perm').ints)\n untrans_idx = [perm.index(i) for i in range(len(perm))]\n\n # check if sum(trans(x1), trans(x2), const(x3), ...) can be switched\n for n in inputs:\n if n.type not in [\"Transpose\", \"Const\"]:\n return False\n if not self._nodes_has_single_consumer_node([n]):\n return False\n if n.is_const():\n # if graph is valid, op shapes should be valid\n # const is special case, in case of broadcasting\n # ensure rank matches\n n_shape = self._g.get_shape(n.output[0])\n if len(n_shape) != len(trans_shape):\n return False\n else:\n if list(n.get_attr('perm').ints) != perm:\n return False\n\n # switch to trans(sum(x1, x2, x3, ...))\n self._g.replace_all_inputs(node.output[0], trans.output[0]) # ops=self._g.get_nodes()\n new_input = [n.output[0] if n.is_const() else n.input[0] for n in inputs]\n self._g.replace_inputs(node, new_input)\n self._g.replace_input(trans, trans.input[0], node.output[0], 0)\n\n # adjust shape if present\n shape = self._g.get_shape(node.output[0])\n if shape:\n self._g.set_shape(node.output[0], [shape[i] for i in untrans_idx])\n\n # update constants, remove dangling transposes\n for n in inputs:\n if n.is_const():\n val = n.get_tensor_value(as_list=False)\n new_val = np.transpose(val, untrans_idx)\n n.set_tensor_value(new_val)\n elif n.name != trans.name:\n self._g.remove_node(n.name)\n return True\n\n def _identity_handler(self, trans, node):\n if node.output[0] in node.graph.outputs:\n return False\n for g in {self._g, node.graph}:\n g.replace_all_inputs(node.output[0], trans.output[0]) # ops=g.get_nodes()\n node.graph.remove_node(node.name)\n return True\n\n def _concat_handler(self, trans, node):\n if self._handle_node_having_branches(trans, node):\n perm = trans.get_attr_value(\"perm\")\n axis = node.get_attr_value(\"axis\", 0)\n new_axis = perm[axis]\n node.set_attr(\"axis\", new_axis)\n return True\n return False\n\n def _split_handler(self, trans, node):\n # Todo: need handle cases where Slit node has more than 1 outputs.\n if self._handle_node_having_branches(trans, node):\n node.set_attr(\"axis\", 1)\n return True\n return False\n\n def _unsqueeze_handler(self, trans, node):\n trans_rank = get_transpose_rank(trans)\n perm = trans.get_attr_value(\"perm\")\n axes = None\n if node.get_attr(\"axes\"):\n axes = node.get_attr(\"axes\").ints\n if len(node.input) > 1 and node.inputs[1].is_const():\n axes = node.inputs[1].get_tensor_value(as_list=True)\n if axes is None:\n return False\n\n new_rank = trans_rank + len(axes)\n axes = sorted([a % new_rank for a in axes])\n # We have a choice of where to put the new axes for unsqueeze after we push the transpose. We will try to keep\n # them next to the axis they will be next to after transpose ex: a1bc -> ac1b not 1abc -> ac1b\n partner_axes = [a - i for i, a in enumerate(axes)]\n pre_perm_axes = [perm[a] if a < len(perm) else len(perm) for a in partner_axes]\n pre_perm_sorted = sorted((a, i) for i, a in enumerate(pre_perm_axes))\n new_axes = [a + pre_perm_sorted.index((a, i)) for i, a in enumerate(pre_perm_axes)]\n\n shift_map = []\n for i in range(new_rank):\n if i not in new_axes:\n shift_map.append(i)\n\n new_perm = []\n perm_i = 0\n axes_i = 0\n for i in range(new_rank):\n if i in axes:\n new_perm.append(new_axes[axes_i])\n axes_i += 1\n else:\n new_perm.append(shift_map[perm[perm_i]])\n perm_i += 1\n\n if not self._switch_transpose_and_node(node, trans, update_shape=False):\n return False\n\n new_axes_sorted = sorted(new_axes)\n trans.set_attr(\"perm\", new_perm)\n new_perm_inv = invert_perm(new_perm)\n if self._g.opset <= 12:\n node.set_attr(\"axes\", new_axes_sorted)\n else:\n new_axes_np = np.array(new_axes_sorted, dtype=np.int64)\n new_axes_const = self._g.make_const(utils.make_name(node.inputs[1].name), new_axes_np)\n self._g.replace_inputs(node, [node.input[0], new_axes_const.output[0]])\n\n shape = self._g.get_shape(node.output[0])\n if shape is not None:\n self._g.set_shape(trans.output[0], shape)\n mid_shape = [shape[p] for p in new_perm_inv]\n self._g.set_shape(node.output[0], mid_shape)\n\n return True\n\n def _squeeze_handler(self, trans, node):\n trans_rank = get_transpose_rank(trans)\n def _calculate_new_attr(ori_perm, ori_squeeze_axes):\n ori_squeeze_axes = [i if i >= 0 else i + trans_rank for i in ori_squeeze_axes]\n new_squeeze_axes = sorted([ori_perm[i] for i in ori_squeeze_axes])\n # calculate output shape after trans and squeeze\n n = len(ori_perm)\n input_shape = list(range(n))\n shape_after_trans = [input_shape[i] for i in ori_perm]\n output_shape = [shape_after_trans[i] for i in range(n) if i not in ori_squeeze_axes]\n # calculate new_perm\n # after switch, the output shape should be same, using this condtion we can figure the new perm\n shape_after_squeeze = [input_shape[i] for i in range(n) if i not in new_squeeze_axes]\n new_perm = [shape_after_squeeze.index(i) for i in output_shape]\n\n return new_perm, new_squeeze_axes\n\n if not self._nodes_has_single_consumer_node([trans]):\n return False\n\n axes = None\n # in opset 13, axes is an input not attr\n if node.get_attr(\"axes\"):\n axes = node.get_attr(\"axes\").ints\n if len(node.input) > 1 and node.inputs[1].is_const():\n axes = node.inputs[1].get_tensor_value(as_list=True)\n\n if axes is not None:\n # switch tran and squeeze\n # 1 switch\n self._g.replace_all_inputs(node.output[0], trans.output[0]) # ops=self._g.get_nodes()\n self._g.replace_input(node, node.input[0], trans.input[0], 0)\n self._g.replace_input(trans, trans.input[0], node.output[0], 0)\n # 2 correct attr of nodes\n squeeze_axes = sorted(axes)\n trans_perm = list(trans.get_attr(\"perm\").ints)\n new_perm, new_squeeze_axes = _calculate_new_attr(ori_perm=trans_perm, ori_squeeze_axes=squeeze_axes)\n trans.set_attr(\"perm\", new_perm)\n if self._g.opset <= 12:\n node.set_attr(\"axes\", new_squeeze_axes)\n else:\n new_axes_np = np.array(new_squeeze_axes, dtype=np.int64)\n new_axes_const = self._g.make_const(utils.make_name(node.inputs[1].name), new_axes_np)\n self._g.replace_inputs(node, [node.input[0], new_axes_const.output[0]])\n # 3 set shape\n squeeze_shape = self._g.get_shape(node.output[0])\n self._g.set_shape(trans.output[0], squeeze_shape)\n input_shape = self._g.get_shape(node.input[0])\n if input_shape is not None:\n new_squeeze_output_shape = [input_shape[i] for i in range(trans_rank) if i not in new_squeeze_axes]\n else:\n new_squeeze_output_shape = [-1] * (trans_rank - len(new_squeeze_axes))\n self._g.set_shape(node.output[0], new_squeeze_output_shape)\n return True\n return False\n\n def _sub_handler(self, trans, node):\n return self._handle_node_having_branches(trans, node)\n\n def _pad_handler(self, trans, node):\n trans_rank = get_transpose_rank(trans)\n perm_inv = invert_perm(trans.get_attr_value(\"perm\"))\n # [N-start, H-start, W-start, C-start, N-end, H-end, W-end, C-end]\n def permute_pads(pads):\n return [pads[i] for i in perm_inv] + [pads[i + trans_rank] for i in perm_inv]\n\n if self._g.opset < 11:\n pads = node.get_attr('pads').ints # [x1_begin, x2_begin...x1_end, x2_end,...]\n new_pads = np.array(permute_pads(pads), np.int64)\n node.set_attr(\"pads\", new_pads)\n return self._switch_transpose_and_node(node, trans)\n\n input1 = node.inputs[1]\n if input1.is_const():\n if not self._nodes_has_single_consumer_node([input1]):\n input1 = self._g.copy_const(input1)\n self._g.replace_input(node, node.input[1], input1.output[0], 1)\n pads = input1.get_tensor_value()\n new_pads = np.array(permute_pads(pads), np.int64)\n input1.set_tensor_value(new_pads)\n return self._switch_transpose_and_node(node, trans)\n # when the second input is not a constant, let's shuffle it with Split followed by Concat\n # there are examples of models, where this non-constant input\n # gets constant folded anyway by a framework.\n split = self._g.make_node(\"Split\", inputs=[node.input[1]], attr={}, output_count=trans_rank * 2)\n pads = split.output\n new_pads = self._g.make_node(\"Concat\", permute_pads(pads), {'axis': 0})\n self._g.replace_input(node, node.input[1], new_pads.output[0], 1)\n return self._switch_transpose_and_node(node, trans)\n\n def _prelu_handler(self, trans, node):\n return self._handle_node_having_branches(trans, node)\n\n def _arg_min_max_handler(self, trans, node):\n axis = node.get_attr_value(\"axis\", 0)\n node.set_attr(\"axes\", [axis])\n result = self._reduce_handler(trans, node)\n new_axis = node.get_attr_value(\"axes\")[0]\n node.set_attr(\"axis\", new_axis)\n del node.attr[\"axes\"]\n return result\n\n def _reduce_handler(self, trans, node):\n keepdims = node.get_attr_value(\"keepdims\", 1)\n trans_rank = get_transpose_rank(trans)\n axes = node.get_attr_value(\"axes\", list(range(trans_rank)))\n perm = trans.get_attr(\"perm\").ints\n axes = [a + trans_rank if a < 0 else a for a in axes]\n new_axes = [perm[a] for a in axes]\n update_shape = keepdims == 1\n shape = self._g.get_shape(node.output[0])\n if not self._switch_transpose_and_node(node, trans, update_shape):\n return False\n node.set_attr(\"axes\", new_axes)\n if keepdims == 0:\n remaining_axes = []\n j = 0\n for i in range(trans_rank):\n if i in new_axes:\n remaining_axes.append(None)\n else:\n remaining_axes.append(j)\n j += 1\n new_perm = [remaining_axes[p] for p in perm if remaining_axes[p] is not None]\n if shape:\n new_shape = [shape[new_perm.index(i)] for i in range(len(new_perm))]\n self._g.set_shape(node.output[0], new_shape)\n trans.set_attr(\"perm\", new_perm)\n return True\n\n def _tile_handler(self, trans, node):\n if not node.inputs[1].is_const():\n return False\n if not self._switch_transpose_and_node(node, trans):\n return False\n repeats = node.inputs[1].get_tensor_value()\n perm_inv = invert_perm(trans.get_attr_value(\"perm\"))\n repeats_val = [repeats[p] for p in perm_inv]\n new_repeats = np.array(repeats_val, dtype=np.int64)\n if not self._nodes_has_single_consumer_node([node.inputs[1]]):\n new_inp = self._g.copy_const(node.inputs[1])\n self._g.replace_input(node, node.input[1], new_inp.output[0], 1)\n node.inputs[1].set_tensor_value(new_repeats)\n return True\n\n def _reducesum_handler(self, trans, node):\n keepdims = node.get_attr(\"keepdims\")\n if self._g.opset <= 12:\n return self._reduce_handler(trans, node)\n if keepdims and keepdims.i == 0:\n return False\n if node.inputs[1].is_const():\n axes = node.inputs[1].get_tensor_value()\n perm = trans.get_attr('perm').ints\n axes = [perm[axes[i]] for i in range(len(axes))]\n new_axes = np.array(axes, dtype=np.int64)\n if self._nodes_has_single_consumer_node([node.inputs[1]]):\n node.inputs[1].set_tensor_value(new_axes)\n else:\n new_axes_const = self._g.make_const(\n utils.make_name(node.inputs[1].name), new_axes\n )\n self._g.replace_input(node, node.input[1], new_axes_const.output[0], 1)\n return self._switch_transpose_and_node(node, trans)\n return False\n\n def _slice_handler(self, trans, node):\n axes = None\n if self._g.opset < 10:\n axes_values = node.get_attr(\"axes\")\n if not axes_values:\n return False\n axes = axes_values.ints\n perm = trans.get_attr_value(\"perm\")\n new_axes = [perm[axes[i]] for i in range(len(axes))]\n node.set_attr(\"axes\", new_axes)\n return self._switch_transpose_and_node(node, trans)\n # in opset 10, axes is input instead of an attribute.\n if len(node.inputs) >= 4 and node.inputs[3].is_const():\n axes = node.inputs[3].get_tensor_value(as_list=False)\n dtype = axes.dtype\n axes = axes.tolist()\n perm = trans.get_attr_value(\"perm\")\n axes = [perm[axes[i]] for i in range(len(axes))]\n # axes node might be shared\n new_axes = np.array(axes, dtype=dtype)\n if self._nodes_has_single_consumer_node([node.inputs[3]]):\n node.inputs[3].set_tensor_value(new_axes)\n else:\n new_axes_const = self._g.make_const(\n utils.make_name(node.inputs[3].name), new_axes\n )\n self._g.replace_input(node, node.input[3], new_axes_const.output[0], 3)\n return self._switch_transpose_and_node(node, trans)\n return False\n\n def _quantize_handler(self, trans, node):\n # Used for QuantizeLinear and DequantizeLinear\n if node.type == \"DequantizeLinear\":\n # Only push through if we will be able to push through consumers too.\n cons = self._g.find_output_consumers(node.output[0])\n # If there is a false positive in the handler map, the q_dq and transpose optimizers might fight.\n # Give up after 3 iterations. The q_dq optimizer should win so the dq hugs the op.\n if not all(n.type in self._handler_map for n in cons) or self.opt_iteration >= 3:\n return False\n if not self._switch_transpose_and_node(node, trans):\n return False\n if 'axis' in node.attr:\n perm = trans.get_attr_value(\"perm\")\n axis = node.get_attr_value(\"axis\")\n new_axis = perm[axis]\n node.set_attr(\"axis\", new_axis)\n return True\n\n def _simple_through_handler(self, trans, node):\n return self._switch_transpose_and_node(node, trans)\n\n def _shape_handler(self, trans, node):\n # input > trans > shape can be changed into input > shape > gather\n if not self._nodes_has_single_consumer_node([trans]):\n return False\n\n output_shape = self._g.get_shape(node.output[0])\n output_dtype = self._g.get_dtype(node.output[0])\n self._g.remove_node(trans.name)\n self._g.remove_node(node.name)\n shape_node = self._g.make_node(\"Shape\", [trans.input[0]])\n const_node = self._g.make_const(utils.make_name(\"Const\"), np.array(trans.get_attr(\"perm\").ints))\n gather_node = self._g.make_node(\"Gather\", [shape_node.output[0], const_node.output[0]], outputs=node.output)\n self._g.set_shape(gather_node.output[0], output_shape)\n self._g.set_dtype(gather_node.output[0], output_dtype)\n return True\n"
] | [
[
"numpy.multiply",
"numpy.reshape",
"numpy.prod",
"numpy.transpose",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DIYer22/maskrcnn-benchmark | [
"c297c690adc06e6ee9ce45df9f1406a72c0eeec8"
] | [
"demo/predictor.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport cv2\nimport torch\nfrom torchvision import transforms as T\n\nfrom maskrcnn_benchmark.modeling.detector import build_detection_model\nfrom maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer\nfrom maskrcnn_benchmark.structures.image_list import to_image_list\nfrom maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker\nfrom maskrcnn_benchmark import layers as L\nfrom maskrcnn_benchmark.utils import cv2_util\n\ncategories = ['__background', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'] + ['中文']*200\n\nclass COCODemo(object):\n # COCO categories for pretty print\n CATEGORIES = categories\n\n def __init__(\n self,\n cfg,\n confidence_threshold=0.7,\n show_mask_heatmaps=False,\n masks_per_dim=2,\n min_image_size=224,\n ):\n self.cfg = cfg.clone()\n self.model = build_detection_model(cfg)\n self.model.eval()\n self.device = torch.device(cfg.MODEL.DEVICE)\n self.model.to(self.device)\n self.min_image_size = min_image_size\n\n save_dir = cfg.OUTPUT_DIR\n checkpointer = DetectronCheckpointer(cfg, self.model, save_dir=save_dir)\n _ = checkpointer.load(cfg.MODEL.WEIGHT)\n\n self.transforms = self.build_transform()\n\n mask_threshold = -1 if show_mask_heatmaps else 0.5\n self.masker = Masker(threshold=mask_threshold, padding=1)\n\n # used to make colors for each class\n self.palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])\n\n self.cpu_device = torch.device(\"cpu\")\n self.confidence_threshold = confidence_threshold\n self.show_mask_heatmaps = show_mask_heatmaps\n self.masks_per_dim = masks_per_dim\n\n def build_transform(self):\n \"\"\"\n Creates a basic transformation that was used to train the models\n \"\"\"\n cfg = self.cfg\n\n # we are loading images with OpenCV, so we don't need to convert them\n # to BGR, they are already! So all we need to do is to normalize\n # by 255 if we want to convert to BGR255 format, or flip the channels\n # if we want it to be in RGB in [0-1] range.\n if cfg.INPUT.TO_BGR255:\n to_bgr_transform = T.Lambda(lambda x: x * 255)\n else:\n to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])\n\n normalize_transform = T.Normalize(\n mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD\n )\n\n transform = T.Compose(\n [\n T.ToPILImage(),\n T.Resize(self.min_image_size),\n T.ToTensor(),\n to_bgr_transform,\n normalize_transform,\n ]\n )\n return transform\n\n def run_on_opencv_image(self, image):\n \"\"\"\n Arguments:\n image (np.ndarray): an image as returned by OpenCV\n\n Returns:\n prediction (BoxList): the detected objects. Additional information\n of the detection properties can be found in the fields of\n the BoxList via `prediction.fields()`\n \"\"\"\n predictions = self.compute_prediction(image)\n top_predictions = self.select_top_predictions(predictions)\n result = image.copy()\n if self.show_mask_heatmaps:\n return self.create_mask_montage(result, top_predictions)\n result = self.overlay_boxes(result, top_predictions)\n if self.cfg.MODEL.MASK_ON:\n result = self.overlay_mask(result, top_predictions)\n if self.cfg.MODEL.KEYPOINT_ON:\n result = self.overlay_keypoints(result, top_predictions)\n result = self.overlay_class_names(result, top_predictions)\n\n return result\n \n def getBboxList(self, image):\n \"\"\"\n \"\"\"\n image = image[...,[2,1,0]]\n predictions = self.compute_prediction(image)\n top_predictions = self.select_top_predictions(predictions)\n return top_predictions\n\n def compute_prediction(self, original_image):\n \"\"\"\n Arguments:\n original_image (np.ndarray): an image as returned by OpenCV\n\n Returns:\n prediction (BoxList): the detected objects. Additional information\n of the detection properties can be found in the fields of\n the BoxList via `prediction.fields()`\n \"\"\"\n # apply pre-processing to image\n image = self.transforms(original_image)\n # convert to an ImageList, padded so that it is divisible by\n # cfg.DATALOADER.SIZE_DIVISIBILITY\n image_list = to_image_list(image, self.cfg.DATALOADER.SIZE_DIVISIBILITY)\n image_list = image_list.to(self.device)\n # compute predictions\n with torch.no_grad():\n predictions = self.model(image_list)\n predictions = [o.to(self.cpu_device) for o in predictions]\n\n # always single image is passed at a time\n prediction = predictions[0]\n\n # reshape prediction (a BoxList) into the original image size\n height, width = original_image.shape[:-1]\n prediction = prediction.resize((width, height))\n\n if prediction.has_field(\"mask\"):\n # if we have masks, paste the masks in the right position\n # in the image, as defined by the bounding boxes\n masks = prediction.get_field(\"mask\")\n # always single image is passed at a time\n masks = self.masker([masks], [prediction])[0]\n prediction.add_field(\"mask\", masks)\n return prediction\n\n def select_top_predictions(self, predictions):\n \"\"\"\n Select only predictions which have a `score` > self.confidence_threshold,\n and returns the predictions in descending order of score\n\n Arguments:\n predictions (BoxList): the result of the computation by the model.\n It should contain the field `scores`.\n\n Returns:\n prediction (BoxList): the detected objects. Additional information\n of the detection properties can be found in the fields of\n the BoxList via `prediction.fields()`\n \"\"\"\n scores = predictions.get_field(\"scores\")\n keep = torch.nonzero(scores > self.confidence_threshold).squeeze(1)\n predictions = predictions[keep]\n scores = predictions.get_field(\"scores\")\n _, idx = scores.sort(0, descending=True)\n return predictions[idx]\n\n def compute_colors_for_labels(self, labels):\n \"\"\"\n Simple function that adds fixed colors depending on the class\n \"\"\"\n colors = labels[:, None] * self.palette\n colors = (colors % 255).numpy().astype(\"uint8\")\n return colors\n\n def overlay_boxes(self, image, predictions):\n \"\"\"\n Adds the predicted boxes on top of the image\n\n Arguments:\n image (np.ndarray): an image as returned by OpenCV\n predictions (BoxList): the result of the computation by the model.\n It should contain the field `labels`.\n \"\"\"\n labels = predictions.get_field(\"labels\")\n boxes = predictions.bbox\n\n colors = self.compute_colors_for_labels(labels).tolist()\n\n for box, color in zip(boxes, colors):\n box = box.to(torch.int64)\n top_left, bottom_right = box[:2].tolist(), box[2:].tolist()\n image = cv2.rectangle(\n image, tuple(top_left), tuple(bottom_right), tuple(color), 1\n )\n\n return image\n\n def overlay_mask(self, image, predictions):\n \"\"\"\n Adds the instances contours for each predicted object.\n Each label has a different color.\n\n Arguments:\n image (np.ndarray): an image as returned by OpenCV\n predictions (BoxList): the result of the computation by the model.\n It should contain the field `mask` and `labels`.\n \"\"\"\n masks = predictions.get_field(\"mask\").numpy()\n labels = predictions.get_field(\"labels\")\n\n colors = self.compute_colors_for_labels(labels).tolist()\n\n for mask, color in zip(masks, colors):\n thresh = mask[0, :, :, None]\n contours, hierarchy = cv2_util.findContours(\n thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE\n )\n image = cv2.drawContours(image, contours, -1, color, 3)\n\n composite = image\n\n return composite\n\n def overlay_keypoints(self, image, predictions):\n keypoints = predictions.get_field(\"keypoints\")\n kps = keypoints.keypoints\n scores = keypoints.get_field(\"logits\")\n kps = torch.cat((kps[:, :, 0:2], scores[:, :, None]), dim=2).numpy()\n for region in kps:\n image = vis_keypoints(image, region.transpose((1, 0)))\n return image\n\n def create_mask_montage(self, image, predictions):\n \"\"\"\n Create a montage showing the probability heatmaps for each one one of the\n detected objects\n\n Arguments:\n image (np.ndarray): an image as returned by OpenCV\n predictions (BoxList): the result of the computation by the model.\n It should contain the field `mask`.\n \"\"\"\n masks = predictions.get_field(\"mask\")\n masks_per_dim = self.masks_per_dim\n masks = L.interpolate(\n masks.float(), scale_factor=1 / masks_per_dim\n ).byte()\n height, width = masks.shape[-2:]\n max_masks = masks_per_dim ** 2\n masks = masks[:max_masks]\n # handle case where we have less detections than max_masks\n if len(masks) < max_masks:\n masks_padded = torch.zeros(max_masks, 1, height, width, dtype=torch.uint8)\n masks_padded[: len(masks)] = masks\n masks = masks_padded\n masks = masks.reshape(masks_per_dim, masks_per_dim, height, width)\n result = torch.zeros(\n (masks_per_dim * height, masks_per_dim * width), dtype=torch.uint8\n )\n for y in range(masks_per_dim):\n start_y = y * height\n end_y = (y + 1) * height\n for x in range(masks_per_dim):\n start_x = x * width\n end_x = (x + 1) * width\n result[start_y:end_y, start_x:end_x] = masks[y, x]\n return cv2.applyColorMap(result.numpy(), cv2.COLORMAP_JET)\n\n def overlay_class_names(self, image, predictions):\n \"\"\"\n Adds detected class names and scores in the positions defined by the\n top-left corner of the predicted bounding box\n\n Arguments:\n image (np.ndarray): an image as returned by OpenCV\n predictions (BoxList): the result of the computation by the model.\n It should contain the field `scores` and `labels`.\n \"\"\"\n scores = predictions.get_field(\"scores\").tolist()\n labels = predictions.get_field(\"labels\").tolist()\n labels = [self.CATEGORIES[i] for i in labels]\n boxes = predictions.bbox\n\n template = \"{}: {:.2f}\"\n for box, score, label in zip(boxes, scores, labels):\n x, y = box[:2]\n s = template.format(label, score)\n cv2.putText(\n image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1\n )\n\n return image\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom maskrcnn_benchmark.structures.keypoint import PersonKeypoints\n\ndef vis_keypoints(img, kps, kp_thresh=2, alpha=0.7):\n \"\"\"Visualizes keypoints (adapted from vis_one_image).\n kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob).\n \"\"\"\n dataset_keypoints = PersonKeypoints.NAMES\n kp_lines = PersonKeypoints.CONNECTIONS\n\n # Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv.\n cmap = plt.get_cmap('rainbow')\n colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]\n colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors]\n\n # Perform the drawing on a copy of the image, to allow for blending.\n kp_mask = np.copy(img)\n\n # Draw mid shoulder / mid hip first for better visualization.\n mid_shoulder = (\n kps[:2, dataset_keypoints.index('right_shoulder')] +\n kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0\n sc_mid_shoulder = np.minimum(\n kps[2, dataset_keypoints.index('right_shoulder')],\n kps[2, dataset_keypoints.index('left_shoulder')])\n mid_hip = (\n kps[:2, dataset_keypoints.index('right_hip')] +\n kps[:2, dataset_keypoints.index('left_hip')]) / 2.0\n sc_mid_hip = np.minimum(\n kps[2, dataset_keypoints.index('right_hip')],\n kps[2, dataset_keypoints.index('left_hip')])\n nose_idx = dataset_keypoints.index('nose')\n if sc_mid_shoulder > kp_thresh and kps[2, nose_idx] > kp_thresh:\n cv2.line(\n kp_mask, tuple(mid_shoulder), tuple(kps[:2, nose_idx]),\n color=colors[len(kp_lines)], thickness=2, lineType=cv2.LINE_AA)\n if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:\n cv2.line(\n kp_mask, tuple(mid_shoulder), tuple(mid_hip),\n color=colors[len(kp_lines) + 1], thickness=2, lineType=cv2.LINE_AA)\n\n # Draw the keypoints.\n for l in range(len(kp_lines)):\n i1 = kp_lines[l][0]\n i2 = kp_lines[l][1]\n p1 = kps[0, i1], kps[1, i1]\n p2 = kps[0, i2], kps[1, i2]\n if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:\n cv2.line(\n kp_mask, p1, p2,\n color=colors[l], thickness=2, lineType=cv2.LINE_AA)\n if kps[2, i1] > kp_thresh:\n cv2.circle(\n kp_mask, p1,\n radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)\n if kps[2, i2] > kp_thresh:\n cv2.circle(\n kp_mask, p2,\n radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)\n\n # Blend the keypoints.\n return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0)\n"
] | [
[
"torch.zeros",
"torch.cat",
"matplotlib.pyplot.get_cmap",
"torch.tensor",
"numpy.copy",
"torch.no_grad",
"torch.nonzero",
"torch.device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lxb1989/AugmentedAutoencoder | [
"954f60432009b9873b8e60544aee6a567dfbf67d"
] | [
"auto_pose/ae/ae_train.py"
] | [
" # -*- coding: utf-8 -*-\nimport os\nimport configparser\nimport argparse\nimport numpy as np\nimport signal\nimport shutil\nimport cv2\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport progressbar\nimport tensorflow as tf\n\nfrom auto_pose.ae import ae_factory as factory\nfrom auto_pose.ae import utils as u\n\n\ndef main():\n workspace_path = os.environ.get('AE_WORKSPACE_PATH')\n\n if workspace_path is None:\n print('Please define a workspace path:\\n')\n print('export AE_WORKSPACE_PATH=/path/to/workspace\\n')\n exit(-1)\n\n gentle_stop = np.array((1,), dtype=np.bool)\n gentle_stop[0] = False\n def on_ctrl_c(signal, frame):\n gentle_stop[0] = True\n signal.signal(signal.SIGINT, on_ctrl_c)\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"experiment_name\")\n parser.add_argument(\"-d\", action='store_true', default=False)\n parser.add_argument(\"-gen\", action='store_true', default=False)\n arguments = parser.parse_args()\n\n full_name = arguments.experiment_name.split('/')\n\n experiment_name = full_name.pop()\n experiment_group = full_name.pop() if len(full_name) > 0 else ''\n\n debug_mode = arguments.d\n generate_data = arguments.gen\n\n cfg_file_path = u.get_config_file_path(workspace_path, experiment_name, experiment_group)\n log_dir = u.get_log_dir(workspace_path, experiment_name, experiment_group)\n checkpoint_file = u.get_checkpoint_basefilename(log_dir)\n ckpt_dir = u.get_checkpoint_dir(log_dir)\n train_fig_dir = u.get_train_fig_dir(log_dir)\n dataset_path = u.get_dataset_path(workspace_path)\n\n if not os.path.exists(cfg_file_path):\n print('Could not find config file:\\n')\n print('{}\\n'.format(cfg_file_path))\n exit(-1)\n\n if not os.path.exists(ckpt_dir):\n os.makedirs(ckpt_dir)\n if not os.path.exists(train_fig_dir):\n os.makedirs(train_fig_dir)\n if not os.path.exists(dataset_path):\n os.makedirs(dataset_path)\n\n args = configparser.ConfigParser()\n args.read(cfg_file_path)\n\n shutil.copy2(cfg_file_path, log_dir)\n\n with tf.variable_scope(experiment_name):\n dataset = factory.build_dataset(dataset_path, args)\n queue = factory.build_queue(dataset, args)\n encoder = factory.build_encoder(queue.x, args, is_training=True)\n decoder = factory.build_decoder(queue.y, encoder, args, is_training=True)\n ae = factory.build_ae(encoder, decoder, args)\n codebook = factory.build_codebook(encoder, dataset, args)\n train_op = factory.build_train_op(ae, args)\n saver = tf.train.Saver(save_relative_paths=True)\n\n num_iter = args.getint('Training', 'NUM_ITER') if not debug_mode else 100000\n save_interval = args.getint('Training', 'SAVE_INTERVAL')\n model_type = args.get('Dataset', 'MODEL')\n\n if model_type=='dsprites':\n dataset.get_sprite_training_images(args)\n else:\n dataset.get_training_images(dataset_path, args)\n dataset.load_bg_images(dataset_path)\n\n if generate_data:\n print('finished generating synthetic training data for ' + experiment_name)\n print('exiting...')\n exit()\n\n widgets = ['Training: ', progressbar.Percentage(),\n ' ', progressbar.Bar(),\n ' ', progressbar.Counter(), ' / %s' % num_iter,\n ' ', progressbar.ETA(), ' ']\n bar = progressbar.ProgressBar(maxval=num_iter,widgets=widgets)\n\n\n gpu_options = tf.GPUOptions(allow_growth=True, per_process_gpu_memory_fraction = 0.7)\n config = tf.ConfigProto(gpu_options=gpu_options)\n\n with tf.Session(config=config) as sess:\n\n chkpt = tf.train.get_checkpoint_state(ckpt_dir)\n if chkpt and chkpt.model_checkpoint_path:\n saver.restore(sess, chkpt.model_checkpoint_path)\n else:\n sess.run(tf.global_variables_initializer())\n\n merged_loss_summary = tf.summary.merge_all()\n summary_writer = tf.summary.FileWriter(ckpt_dir, sess.graph)\n\n\n if not debug_mode:\n print('Training with %s model' % args.get('Dataset','MODEL'), os.path.basename(args.get('Paths','MODEL_PATH')))\n bar.start()\n\n queue.start(sess)\n for i in range(ae.global_step.eval(), num_iter):\n if not debug_mode:\n sess.run(train_op)\n if i % 10 == 0:\n loss = sess.run(merged_loss_summary)\n summary_writer.add_summary(loss, i)\n\n bar.update(i)\n if (i+1) % save_interval == 0:\n saver.save(sess, checkpoint_file, global_step=ae.global_step)\n\n this_x, this_y = sess.run([queue.x, queue.y])\n reconstr_train = sess.run(decoder.x,feed_dict={queue.x:this_x})\n train_imgs = np.hstack(( u.tiles(this_x, 4, 4), u.tiles(reconstr_train, 4,4),u.tiles(this_y, 4, 4)))\n cv2.imwrite(os.path.join(train_fig_dir,'training_images_%s.png' % i), train_imgs*255)\n else:\n\n this_x, this_y = sess.run([queue.x, queue.y])\n reconstr_train = sess.run(decoder.x,feed_dict={queue.x:this_x})\n cv2.imshow('sample batch', np.hstack(( u.tiles(this_x, 3, 3), u.tiles(reconstr_train, 3,3),u.tiles(this_y, 3, 3))) )\n k = cv2.waitKey(0)\n if k == 27:\n break\n\n if gentle_stop[0]:\n break\n\n queue.stop(sess)\n if not debug_mode:\n bar.finish()\n if not gentle_stop[0] and not debug_mode:\n print('To create the embedding run:\\n')\n print('ae_embed {}\\n'.format(full_name))\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"tensorflow.train.get_checkpoint_state",
"tensorflow.summary.FileWriter",
"tensorflow.ConfigProto",
"tensorflow.global_variables_initializer",
"tensorflow.GPUOptions",
"tensorflow.summary.merge_all",
"tensorflow.Session",
"tensorflow.variable_scope",
"tensorflow.train.Saver",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
richardwu/gdaxtrader | [
"1f9dab48f08bd0fd4e3e562c41ae4bc9d1218504"
] | [
"gdaxtrader/visualize.py"
] | [
"#!/usr/bin/env python\n\nimport matplotlib.pyplot as plt\nimport plotly\nimport pandas as pd\nimport time\nimport os\nimport copy\n\nimport common\nimport timeseries as ts\nimport ratesutil\n\n_plot_dir = 'plots'\n\n_increase_col = '#419871'\n_decrease_col = '#fc433e'\n\n_mov_avg_width = 1\n\n_bbands_col = '#ccc'\n_bbands_width = 1\n\ndef hist_rates(rates, product=None, savetofile=True, movavg_windows=[], bbands_window=14, bbands_std=2):\n \"\"\"\n Takes in a list of historic rates with the following schema per element\n [unix time, low, high, open, close, volume]\n which is the same format returned by mktdata.get_rates.\n\n Params:\n rates (iteraable): each element is one tick.\n product (str): name of the product used for labelling and title.\n savetofile (bool): saves plot to plots/hist_rate_<TIME>.png if True.\n movavg_windows\n (list of int): plot moving averages with the specified windows.\n bbands_window (int): window size used to compute Bollinger Bands.\n bbands_std (int): number of standard deviations used to compute\n lower and upper Bollinger Bands.\n\n Returns the dict object that can be plotted directly with\n plotly.(offline).(i)plot.\n \"\"\"\n fname_prefix = 'hist_rate'\n\n # Sort by timestamp\n ratesdf = ratesutil.to_df(rates)\n\n # Main candlestick plot.\n data = [dict(\n type= 'candlestick',\n x = ratesdf.datetime,\n open = ratesdf.open,\n high = ratesdf.high,\n low = ratesdf.low,\n close = ratesdf.close,\n yaxis = 'y2',\n name = product,\n increasing = dict(line=dict(color=_increase_col)),\n decreasing = dict(line=dict(color=_decrease_col)),\n )]\n\n # Layout.\n layout = dict(\n title = product,\n # Volume axis\n yaxis = dict(domain = [0, 0.2], showticklabels=False),\n # Price axis\n yaxis2 = dict(domain = [0.2, 0.8], title = 'Price'),\n )\n\n # Initialize figure dict.\n fig = dict(data=data, layout=layout)\n\n # Moving averages as line plots.\n for window in movavg_windows:\n mv_close = ts.mov_avg(ratesdf.close, window=window)\n mv_datetime = ts.truncate_start(ratesdf.datetime, window)\n\n assert(len(mv_close) == len(mv_datetime))\n\n fig['data'].append(dict(\n x = mv_datetime,\n y = mv_close,\n type = 'scatter',\n mode = 'lines',\n name = 'Moving Average (' + str(window) + ')',\n yaxis = 'y2',\n line = dict(width=_mov_avg_width),\n ))\n\n # Bollingers Bands\n if bbands_window is not None and bbands_std is not None:\n bbands_label = 'Bollinger Bands (' + str(bbands_window) + ', n_std=' + str(bbands_std) + ')'\n\n _, bb_lower, bb_upper = ts.bollingers(\n ratesdf.close,\n window=bbands_window,\n n_std=bbands_std,\n )\n\n bbands_dt = ts.truncate_start(ratesdf.datetime, bbands_window)\n\n lower_dict = dict(\n x = bbands_dt,\n y = bb_lower,\n type ='scatter',\n yaxis='y2',\n line = dict(width=_bbands_width),\n marker=dict(color=_bbands_col),\n hoverinfo='none',\n legendgroup=bbands_label,\n name=bbands_label,\n )\n\n # Only need a shallow copy since we are only changing the top-level\n # references of y and showlegend.\n upper_dict = copy.copy(lower_dict)\n upper_dict['y'] = bb_upper\n upper_dict['showlegend'] = False\n\n fig['data'].append(lower_dict)\n fig['data'].append(upper_dict)\n\n # Volume bars.\n\n # Generate colors based on close price delta.\n vol_cols = []\n for i, close in enumerate(ratesdf.close):\n if i == 0 or close <= ratesdf.close[i-1]:\n vol_cols.append(_decrease_col)\n continue\n vol_cols.append(_increase_col)\n\n fig['data'].append(dict(\n x = ratesdf.datetime,\n y = ratesdf.vol,\n type = 'bar',\n marker = dict(color=vol_cols),\n showlegend = False,\n yaxis = 'y',\n name = 'Volume'))\n\n if savetofile:\n saveplot(fname_prefix, plotlydata=fig)\n\n return fig\n\ndef saveplot(fname_prefix, plotlydata=None):\n if not os.path.exists(_plot_dir):\n os.makedirs(_plot_dir)\n\n cur_time_str = common.fmttime(time.gmtime())\n fname = os.path.join(_plot_dir, fname_prefix + '_' + cur_time_str)\n\n # Plotly candlestick graph\n if plotlydata is not None:\n plotly.offline.plot(plotlydata, filename=fname)\n return\n\n # Matplotlib graph\n plt.savefig(fname + '.png')\n\n\n"
] | [
[
"matplotlib.pyplot.savefig"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DS-Wen/SSPredict | [
"663f693405b066d4b93751c8374d9f5412c501ee"
] | [
"build/lib/sspredict/make_prediction/models.py"
] | [
"import scipy \nimport numpy as np\nimport pandas as pd\nfrom scipy import stats \nimport copy\nimport scipy.optimize as optimize\nimport scipy.integrate as integrate\n\nclass ss_edge_model:\n# calculate solid solution strengthening contribution for FCC/BCC CCAs\n# pseudo-ternary compositions\n# Edge dislocation models\n# FCC model: Varvenne-Leyson-Ghazisaeidi-Curtin 2016: http://dx.doi.org/10.1016/j.actamat.2016.09.046\n# BCC model: Maresca-Curtin 2019: https://doi.org/10.1016/j.actamat.2019.10.015\n def __init__(self,\n dislocation_properties,\n exp_conditions,\n comp_elements,comp_pst,\n elements_data,\n structure):\n \n # dislocation properties, alpha, f1, and f2\n self.alpha = float(dislocation_properties[0])\n self.f_tau = float(dislocation_properties[1])\n self.f_dEb = float(dislocation_properties[2])\n \n # experiment conditions, T, strain rate\n self.T = float(exp_conditions[0])\n self.ep = float(exp_conditions[1])\n self.ep0 = 10**4 #reference strain rate (/s)\n \n # some constants \n self.boltzmann_J = 1.38064852*10**(-23) #J/K\n self.J2eV=6.2415093433*10**18 # covert J to eV\n \n # elemental data\n self.elements_order = comp_elements.columns.tolist()\n self.compositions = comp_elements #pandas df\n self.elements_data = copy.deepcopy(elements_data) #json\n self.comp_pst = comp_pst\n # convert unit for properties\n # Vn: Å^3 to m^3\n # b: Å to m\n # a: Å to m\n # E: GPa to Pa\n # G: GPa to Pa\n \n for element_i in self.elements_order:\n self.elements_data[element_i]['Vn'] = elements_data[element_i]['Vn']*10**(-30)\n self.elements_data[element_i]['b'] = elements_data[element_i]['b']*10**(-10)\n self.elements_data[element_i]['a'] = elements_data[element_i]['a']*10**(-10)\n self.elements_data[element_i]['E'] = elements_data[element_i]['E']*10**(9)\n self.elements_data[element_i]['G'] = elements_data[element_i]['G']*10**(9)\n\n \n \n self.structure = structure\n\n def FCC_V_L_G_C_2016_analytical(self):\n # FCC model: Varvenne-Leyson-Ghazisaeidi-Curtin 2016: http://dx.doi.org/10.1016/j.actamat.2016.09.046\n \n self.prefac_ty0 = 0.051\n self.Taylor_fac = 3.06\n self.prefac_dEb = 0.274\n # averaged properties\n cn_Vn = []\n cn_nu = []\n cn_G = []\n cn_b = []\n cn_E = []\n for element_i in self.elements_order:\n cn_Vn.append(self.compositions[element_i]/100*self.elements_data[element_i]['Vn'])\n cn_nu.append(self.compositions[element_i]/100*self.elements_data[element_i]['nu'])\n cn_G.append(self.compositions[element_i]/100*self.elements_data[element_i]['G'])\n cn_b.append(self.compositions[element_i]/100*self.elements_data[element_i]['b'])\n cn_E.append(self.compositions[element_i]/100*self.elements_data[element_i]['E'])\n\n self.aver_E = sum(cn_E);\n self.aver_V = sum(cn_Vn);\n self.aver_G = sum(cn_G)\n self.aver_Nu = sum(cn_nu)\n self.aver_b = sum(cn_b)\n \n i = 0;cn_Delta_Vn2=[]\n for element_i in self.elements_order:\n cn_Delta_Vn2.append(self.compositions[element_i]/100*(self.elements_data[element_i]['Vn']-self.aver_V)**2)\n \n self.sum_cndVn_b6 = sum(cn_Delta_Vn2)/self.aver_b**6;\n q_nu = ((1 + self.aver_Nu)/(1 - self.aver_Nu))\n \n self.dEb = self.prefac_dEb * self.f_dEb * self.alpha**(1/3) * self.aver_G * self.aver_b**3 * q_nu**(2/3) * self.sum_cndVn_b6**(1/3)\n self.Ty0 = self.prefac_ty0 * self.f_tau * self.alpha**(-1/3) * self.aver_G * q_nu**(4/3) * self.sum_cndVn_b6**(2/3)\n self.Ty0_pc = self.Taylor_fac * self.Ty0\n delta_ss_low_T = self.Ty0 * (1 - ((self.boltzmann_J*self.T)/(self.dEb) * np.log(self.ep0/self.ep))**(2/3) )\n delta_ss_high_T = self.Ty0 * np.exp(-1/0.57 * self.boltzmann_J*self.T/self.dEb * np.log(self.ep0/self.ep) )\n Ty_threshold = self.Ty0/2\n \n self.delta_ss = self.Taylor_fac*np.array([delta_ss_low_T[i] if delta_ss_low_T[i]>=Ty_threshold[i] else delta_ss_high_T[i] for i in range(len(Ty_threshold))])\n \n \n def BCC_M_C_2020_analytical(self):\n # BCC model: Maresca-Curtin-2019: https://doi.org/10.1016/j.actamat.2019.10.015\n \n self.prefac_ty0 = 0.051\n self.Taylor_fac = 3.06\n self.prefac_dEb = 0.274\n # averaged properties\n cn_Vn = []\n cn_nu = []\n cn_G = []\n cn_b = []\n cn_E = []\n for element_i in self.elements_order:\n cn_Vn.append(self.compositions[element_i]/100*self.elements_data[element_i]['Vn'])\n cn_nu.append(self.compositions[element_i]/100*self.elements_data[element_i]['nu'])\n cn_G.append(self.compositions[element_i]/100*self.elements_data[element_i]['G'])\n cn_b.append(self.compositions[element_i]/100*self.elements_data[element_i]['b'])\n cn_E.append(self.compositions[element_i]/100*self.elements_data[element_i]['E'])\n\n self.aver_E = sum(cn_E);\n self.aver_V = sum(cn_Vn);\n self.aver_G = sum(cn_G)\n self.aver_Nu = sum(cn_nu)\n self.aver_b = sum(cn_b)\n \n i = 0;cn_Delta_Vn2=[]\n for element_i in self.elements_order:\n cn_Delta_Vn2.append(self.compositions[element_i]/100*(self.elements_data[element_i]['Vn']-self.aver_V)**2)\n \n self.sum_cndVn_b6 = sum(cn_Delta_Vn2)/self.aver_b**6;\n q_nu = ((1 + self.aver_Nu)/(1 - self.aver_Nu))\n \n self.dEb = self.prefac_dEb * self.f_dEb * self.alpha**(1/3) * self.aver_G * self.aver_b**3 * q_nu**(2/3) * self.sum_cndVn_b6**(1/3)\n self.Ty0 = self.prefac_ty0 * self.f_tau * self.alpha**(-1/3) * self.aver_G * q_nu**(4/3) * self.sum_cndVn_b6**(2/3)\n self.Ty0_pc = self.Taylor_fac * self.Ty0\n delta_ss_low_T = self.Ty0 * (1 - ((self.boltzmann_J*self.T)/(self.dEb) * np.log(self.ep0/self.ep))**(2/3) )\n delta_ss_high_T = self.Ty0 * np.exp(-1/0.57 * self.boltzmann_J*self.T/self.dEb * np.log(self.ep0/self.ep) )\n Ty_threshold = self.Ty0/2\n \n self.delta_ss = self.Taylor_fac*np.array([delta_ss_low_T[i] if delta_ss_low_T[i]>=Ty_threshold[i] else delta_ss_high_T[i] for i in range(len(Ty_threshold))])\n \n def calculate(self):\n if self.structure == 'fcc':\n self.FCC_V_L_G_C_2016_analytical()\n elif self.structure == 'bcc':\n self.BCC_M_C_2020_analytical()\n \n def writedata(self):\n self.calc_data = copy.deepcopy(self.comp_pst)\n self.calc_data['V_ave'] = [round(i,2) for i in (np.array(self.aver_V*10**30))]\n self.calc_data['b_ave'] = np.round(self.aver_b*10**10,4)\n self.calc_data['E_ave'] = np.round(self.aver_E/10**9,2)\n self.calc_data['G_ave'] = np.round(self.aver_G/10**9,2)\n self.calc_data['nu_ave'] = np.round(self.aver_Nu,3)\n self.calc_data['T'] = np.ones(len(self.calc_data)) * self.T\n self.calc_data['sum_cnVn^2_b6'] = np.round(self.sum_cndVn_b6,8) \n self.calc_data['Ty0'] = np.round(self.Ty0/10**6,2) \n self.calc_data['Delta_Eb'] = np.round(self.dEb*self.J2eV,4) \n self.calc_data['Delta_sigma_ss'] = np.round(self.delta_ss/10**6,2) \n\n\n\nclass ss_edge_model_w_uncertainty:\n# calculate solid solution strengthening contribution for FCC/BCC CCAs\n# different from ss_edge_model, \n# consider the uncertainties in the elemental data input, lattice constants and elastic constants\n# Edge dislocation models\n# FCC model: Varvenne-Leyson-Ghazisaeidi-Curtin 2016: http://dx.doi.org/10.1016/j.actamat.2016.09.046\n# BCC model: Maresca-Curtin 2019: https://doi.org/10.1016/j.actamat.2019.10.015\n def __init__(self,\n ss_edge_model,\n dislocation_properties,\n exp_conditions,\n comp_elements,\n comp_pst,\n elements_data,\n uncertainty_levels,\n structure):\n \n\n self.dislocation_properties = dislocation_properties\n self.exp_conditions = exp_conditions\n self.T = float(self.exp_conditions[0])\n self.comp_elements = comp_elements\n self.elements_order = comp_elements.columns.tolist()\n self.comp_pst = comp_pst\n self.elements_data_save = copy.deepcopy(elements_data)\n self.structure = structure \n self.J2eV=6.2415093433*10**18 # covert J to eV\n\n\n # uncertainty_levels controls the distribution of random variables of inputs\n # uncertainty_levels[0] for lattice constant a\n # uncertainty_levels[1] for elastic constants E, G, nu\n # use normal distribution\n # so uncertainty level is converted to standard deviation \n # then the uncertainty will propagate to the predicted quantities. \n # predicted quantity uncertainty will be appended to the predicted data. \n \n self.uncertainty_levels = uncertainty_levels \n \n def gen_rv(self):\n '''for element_i in self.elements_order:\n self.elements_data[element_i]['a'] = stats.norm.rvs( elements_data[element_i]['a'],scale=self.uncertainty_levels[0],elements_data[element_i]['a']*self.uncertainty_levels[0])\n self.elements_data[element_i]['b'] = stats.norm.rvs( elements_data[element_i]['b'],scale=self.uncertainty_levels[0],elements_data[element_i]['b']*self.uncertainty_levels[0])\n self.elements_data[element_i]['Vn'] = elements_data[element_i]['a']**3/4\n self.elements_data[element_i]['E'] = stats.norm.rvs( elements_data[element_i]['E'],scale=self.uncertainty_levels[0],elements_data[element_i]['E']*self.uncertainty_levels[1])\n self.elements_data[element_i]['G'] = stats.norm.rvs( elements_data[element_i]['G'],scale=self.uncertainty_levels[0],elements_data[element_i]['G']*self.uncertainty_levels[1])\n self.elements_data[element_i]['nu'] = stats.norm.rvs( elements_data[element_i]['nu'],scale=self.uncertainty_levels[0],elements_data[element_i]['nu']*self.uncertainty_levels[1])\n '''\n new_elements_data = copy.deepcopy(self.elements_data_save)\n for element_i in self.elements_order:\n new_elements_data[element_i]['a'] = stats.norm.rvs( self.elements_data_save[element_i]['a'],scale=self.elements_data_save[element_i]['a']*self.uncertainty_levels[0])\n new_elements_data[element_i]['b'] = stats.norm.rvs( self.elements_data_save[element_i]['b'],scale=self.elements_data_save[element_i]['b']*self.uncertainty_levels[0])\n new_elements_data[element_i]['Vn'] = new_elements_data[element_i]['a']**3/4\n new_elements_data[element_i]['E'] = stats.norm.rvs( self.elements_data_save[element_i]['E'],scale=self.elements_data_save[element_i]['E']*self.uncertainty_levels[1])\n new_elements_data[element_i]['G'] = stats.norm.rvs( self.elements_data_save[element_i]['G'],scale=self.elements_data_save[element_i]['G']*self.uncertainty_levels[1])\n new_elements_data[element_i]['nu'] = stats.norm.rvs( self.elements_data_save[element_i]['nu'],scale=self.elements_data_save[element_i]['nu']*self.uncertainty_levels[1])\n return new_elements_data\n\n def calculate(self):\n self.aver_V_list = []\n self.aver_b_list = []\n self.aver_E_list = []\n self.aver_G_list = []\n self.aver_Nu_list = []\n self.sum_cndVn_b6_list = []\n self.Ty0_list = []\n self.dEb_list = []\n self.delta_ss_list = []\n i=0\n while i <=1000:\n \n self.elements_data = self.gen_rv()\n self.model = ss_edge_model(self.dislocation_properties,\n self.exp_conditions,\n self.comp_elements,self.comp_pst,\n self.elements_data,\n self.structure\n )\n #calculate data\n \n if self.structure == 'fcc':\n self.model.FCC_V_L_G_C_2016_analytical()\n elif self.structure == 'bcc':\n self.model.BCC_M_C_2020_analytical()\n \n self.aver_V_list.append(self.model.aver_V)\n self.aver_b_list.append(self.model.aver_b)\n self.aver_E_list.append(self.model.aver_E)\n self.aver_G_list.append(self.model.aver_G)\n self.aver_Nu_list.append(self.model.aver_Nu)\n self.sum_cndVn_b6_list.append(self.model.sum_cndVn_b6)\n self.Ty0_list.append(self.model.Ty0)\n self.dEb_list.append(self.model.dEb)\n self.delta_ss_list.append(self.model.delta_ss)\n i+=1\n \n \n self.aver_V = np.mean( np.array([ aver_V for aver_V in self.aver_V_list ]), axis=0 )\n self.aver_b = np.mean( np.array([ aver_b for aver_b in self.aver_b_list ]), axis=0 )\n self.aver_E = np.mean( np.array([ aver_E for aver_E in self.aver_E_list ]), axis=0 )\n self.aver_G = np.mean( np.array([ aver_G for aver_G in self.aver_G_list ]), axis=0 )\n self.aver_Nu = np.mean( np.array([ aver_Nu for aver_Nu in self.aver_Nu_list ]), axis=0 )\n self.aver_sum_cndVn_b6 = np.mean( np.array([ sum_cndVn_b6 for sum_cndVn_b6 in self.sum_cndVn_b6_list ]), axis=0 )\n self.aver_Ty0 = np.mean( np.array([ Ty0 for Ty0 in self.Ty0_list ]), axis=0 )\n self.aver_dEb = np.mean( np.array([ dEb for dEb in self.dEb_list ]), axis=0 )\n self.aver_delta_ss = np.mean( np.array([delta_ss for delta_ss in self.delta_ss_list ]), axis=0 )\n # evaluate uncertainty standard deviation\n self.std_V = np.std( np.array([ aver_V for aver_V in self.aver_V_list ]), axis=0 )\n self.std_b = np.std( np.array([ aver_b for aver_b in self.aver_b_list ]), axis=0 )\n self.std_E = np.std( np.array([ aver_E for aver_E in self.aver_E_list ]), axis=0 )\n self.std_G = np.std( np.array([ aver_G for aver_G in self.aver_G_list ]), axis=0 )\n self.std_Nu = np.std( np.array([ aver_Nu for aver_Nu in self.aver_Nu_list ]), axis=0 )\n self.std_sum_cndVn_b6 = np.std( np.array([ sum_cndVn_b6 for sum_cndVn_b6 in self.sum_cndVn_b6_list ]), axis=0 )\n self.std_Ty0 = np.std( np.array([ Ty0 for Ty0 in self.Ty0_list ]), axis=0 )\n self.std_dEb = np.std( np.array([ dEb for dEb in self.dEb_list ]), axis=0 )\n self.std_delta_ss = np.std( np.array([ delta_ss for delta_ss in self.delta_ss_list ]), axis=0 )\n\n\n \n def writedata(self):\n self.calc_data = copy.deepcopy(self.comp_pst)\n self.calc_data['V_ave'] = [round(i,2) for i in (np.array(self.aver_V*10**30))]\n self.calc_data['b_ave'] = np.round(self.aver_b*10**10,4)\n self.calc_data['E_ave'] = np.round(self.aver_E/10**9,2)\n self.calc_data['G_ave'] = np.round(self.aver_G/10**9,2)\n self.calc_data['nu_ave'] = np.round(self.aver_Nu,4)\n self.calc_data['T'] = np.ones(len(self.calc_data)) * self.T\n self.calc_data['sum_cnVn^2_b6'] = np.round(self.aver_sum_cndVn_b6,8)\n self.calc_data['Ty0'] = np.round(self.aver_Ty0/10**6,2)\n self.calc_data['Delta_Eb'] = np.round(self.aver_dEb*self.J2eV,4)\n self.calc_data['Delta_sigma_ss'] = np.round(self.aver_delta_ss/10**6,2)\n\n \n self.calc_data['std_V_ave'] = [round(i,2) for i in np.array(self.std_V*10**30)]\n self.calc_data['std_b_ave'] = np.round(self.std_b*10**10,4)\n self.calc_data['std_E_ave'] = np.round(self.std_E/10**9,2)\n self.calc_data['std_G_ave'] = np.round(self.std_G/10**9,2)\n self.calc_data['std_nu_ave'] = np.round(self.std_Nu,3)\n self.calc_data['std_sum_cnVn^2_b6'] = np.round(self.std_sum_cndVn_b6,8)\n self.calc_data['std_Ty0'] = np.round(self.std_Ty0/10**6,2)\n self.calc_data['std_Delta_Eb'] = np.round(self.std_dEb*self.J2eV,4)\n self.calc_data['std_Delta_sigma_ss'] = np.round(self.std_delta_ss/10**6,2)\n\n\nclass ss_edge_model_T_w_uncertainty:\n# calculate solid solution strengthening contribution for FCC/BCC CCAs\n# slightly different from ss_edge_model_T, \n# consider the uncertainties in the elemental data input, lattice constants and elastic constants\n# Edge dislocation models\n# FCC model: Varvenne-Leyson-Ghazisaeidi-Curtin 2016: http://dx.doi.org/10.1016/j.actamat.2016.09.046\n# BCC model: Maresca-Curtin 2020: \n\n def __init__(self,\n ss_edge_model_T,\n dislocation_properties,\n exp_conditions,\n comp_elements,\n elements_data,\n uncertainty_levels,\n structure):\n \n\n self.dislocation_properties = dislocation_properties\n self.exp_conditions = exp_conditions\n self.comp_elements = comp_elements\n self.elements_order = comp_elements.columns.tolist()\n self.elements_data_save = copy.deepcopy(elements_data)\n self.structure = structure \n self.J2eV=6.2415093433*10**18 # covert J to eV\n\n\n # uncertainty_levels controls the distribution of random variables of inputs\n # uncertainty_levels[0] for lattice constant a\n # uncertainty_levels[1] for elastic constants E, G, nu\n # use normal distribution\n # so uncertainty level is converted to standard deviation \n # then the uncertainty will propagate to the predicted quantities. \n # predicted quantity uncertainty will be appended to the predicted data. \n \n self.uncertainty_levels = uncertainty_levels \n \n def gen_rv(self):\n '''for element_i in self.elements_order:\n self.elements_data[element_i]['a'] = stats.norm.rvs( elements_data[element_i]['a'],scale=self.uncertainty_levels[0],elements_data[element_i]['a']*self.uncertainty_levels[0])\n self.elements_data[element_i]['b'] = stats.norm.rvs( elements_data[element_i]['b'],scale=self.uncertainty_levels[0],elements_data[element_i]['b']*self.uncertainty_levels[0])\n self.elements_data[element_i]['Vn'] = elements_data[element_i]['a']**3/4\n self.elements_data[element_i]['E'] = stats.norm.rvs( elements_data[element_i]['E'],scale=self.uncertainty_levels[0],elements_data[element_i]['E']*self.uncertainty_levels[1])\n self.elements_data[element_i]['G'] = stats.norm.rvs( elements_data[element_i]['G'],scale=self.uncertainty_levels[0],elements_data[element_i]['G']*self.uncertainty_levels[1])\n self.elements_data[element_i]['nu'] = stats.norm.rvs( elements_data[element_i]['nu'],scale=self.uncertainty_levels[0],elements_data[element_i]['nu']*self.uncertainty_levels[1])\n '''\n new_elements_data = copy.deepcopy(self.elements_data_save)\n for element_i in self.elements_order:\n new_elements_data[element_i]['a'] = stats.norm.rvs( self.elements_data_save[element_i]['a'],scale=self.elements_data_save[element_i]['a']*self.uncertainty_levels[0])\n new_elements_data[element_i]['b'] = stats.norm.rvs( self.elements_data_save[element_i]['b'],scale=self.elements_data_save[element_i]['b']*self.uncertainty_levels[0])\n new_elements_data[element_i]['Vn'] = new_elements_data[element_i]['a']**3/4\n new_elements_data[element_i]['E'] = stats.norm.rvs( self.elements_data_save[element_i]['E'],scale=self.elements_data_save[element_i]['E']*self.uncertainty_levels[1])\n new_elements_data[element_i]['G'] = stats.norm.rvs( self.elements_data_save[element_i]['G'],scale=self.elements_data_save[element_i]['G']*self.uncertainty_levels[1])\n new_elements_data[element_i]['nu'] = stats.norm.rvs( self.elements_data_save[element_i]['nu'],scale=self.elements_data_save[element_i]['nu']*self.uncertainty_levels[1])\n return new_elements_data\n\n def calculate(self):\n self.aver_V_list = []\n self.aver_b_list = []\n self.aver_E_list = []\n self.aver_G_list = []\n self.aver_Nu_list = []\n self.sum_cndVn_b6_list = []\n self.Ty0_list = []\n self.dEb_list = []\n self.delta_ss_list = []\n i=0\n while i <=1000:\n \n self.elements_data = self.gen_rv()\n self.model = ss_edge_model_T(self.dislocation_properties,\n self.exp_conditions,\n self.comp_elements,\n self.elements_data,\n self.structure\n )\n \n if self.structure == 'fcc':\n self.model.FCC_V_L_G_C_2016_analytical()\n elif self.structure == 'bcc':\n self.model.BCC_M_C_2020_analytical()\n \n self.aver_V_list.append(self.model.aver_V)\n self.aver_b_list.append(self.model.aver_b)\n self.aver_E_list.append(self.model.aver_E)\n self.aver_G_list.append(self.model.aver_G)\n self.aver_Nu_list.append(self.model.aver_Nu)\n self.sum_cndVn_b6_list.append(self.model.sum_cndVn_b6)\n self.Ty0_list.append(self.model.Ty0)\n self.dEb_list.append(self.model.dEb)\n self.delta_ss_list.append(self.model.delta_ss)\n i+=1\n \n \n self.aver_V = np.mean( np.array([ aver_V for aver_V in self.aver_V_list ]), axis=0 )\n self.aver_b = np.mean( np.array([ aver_b for aver_b in self.aver_b_list ]), axis=0 )\n self.aver_E = np.mean( np.array([ aver_E for aver_E in self.aver_E_list ]), axis=0 )\n self.aver_G = np.mean( np.array([ aver_G for aver_G in self.aver_G_list ]), axis=0 )\n self.aver_Nu = np.mean( np.array([ aver_Nu for aver_Nu in self.aver_Nu_list ]), axis=0 )\n self.aver_sum_cndVn_b6 = np.mean( np.array([ sum_cndVn_b6 for sum_cndVn_b6 in self.sum_cndVn_b6_list ]), axis=0 )\n self.aver_Ty0 = np.mean( np.array([ Ty0 for Ty0 in self.Ty0_list ]), axis=0 )\n self.aver_dEb = np.mean( np.array([ dEb for dEb in self.dEb_list ]), axis=0 )\n self.aver_delta_ss = np.mean( np.array([delta_ss for delta_ss in self.delta_ss_list ]), axis=0 )\n \n self.std_V = np.std( np.array([ aver_V for aver_V in self.aver_V_list ]), axis=0 )\n self.std_b = np.std( np.array([ aver_b for aver_b in self.aver_b_list ]), axis=0 )\n self.std_E = np.std( np.array([ aver_E for aver_E in self.aver_E_list ]), axis=0 )\n self.std_G = np.std( np.array([ aver_G for aver_G in self.aver_G_list ]), axis=0 )\n self.std_Nu = np.std( np.array([ aver_Nu for aver_Nu in self.aver_Nu_list ]), axis=0 )\n self.std_sum_cndVn_b6 = np.std( np.array([ sum_cndVn_b6 for sum_cndVn_b6 in self.sum_cndVn_b6_list ]), axis=0 )\n self.std_Ty0 = np.std( np.array([ Ty0 for Ty0 in self.Ty0_list ]), axis=0 )\n self.std_dEb = np.std( np.array([ dEb for dEb in self.dEb_list ]), axis=0 )\n self.std_delta_ss = np.std( np.array([ delta_ss for delta_ss in self.delta_ss_list ]), axis=0 )\n\n\n \n def writedata(self):\n self.calc_data = copy.deepcopy(self.comp_elements)\n self.calc_data['T'] = self.exp_conditions[0]\n self.calc_data['V_ave'] = [round(i,2) for i in (np.array(self.aver_V*10**30))]\n self.calc_data['b_ave'] = np.round(self.aver_b*10**10,4)\n self.calc_data['E_ave'] = np.round(self.aver_E/10**9,2)\n self.calc_data['G_ave'] = np.round(self.aver_G/10**9,2)\n self.calc_data['nu_ave'] = np.round(self.aver_Nu,4)\n self.calc_data['sum_cnVn^2_b6'] = np.round(self.aver_sum_cndVn_b6,8)\n self.calc_data['Ty0'] = np.round(self.aver_Ty0/10**6,2)\n self.calc_data['Delta_Eb'] = np.round(self.aver_dEb*self.J2eV,4)\n self.calc_data['Delta_sigma_ss'] = np.round(self.aver_delta_ss/10**6,2)\n\n \n self.calc_data['std_V_ave'] = np.round(self.std_V*10**30,2)\n self.calc_data['std_b_ave'] = np.round(self.std_b*10**10,4)\n self.calc_data['std_E_ave'] = np.round(self.std_E/10**9,2)\n self.calc_data['std_G_ave'] = np.round(self.std_G/10**9,2)\n self.calc_data['std_nu_ave'] = np.round(self.std_Nu,3)\n self.calc_data['std_sum_cnVn^2_b6'] = np.round(self.std_sum_cndVn_b6,8)\n self.calc_data['std_Ty0'] = np.round(self.std_Ty0/10**6,2)\n self.calc_data['std_Delta_Eb'] = np.round(self.std_dEb*self.J2eV,4)\n self.calc_data['std_Delta_sigma_ss'] = np.round(self.std_delta_ss/10**6,2)\n\n\nclass ss_edge_model_T:\n# calculate solid solution strengthening contribution for FCC/BCC CCAs\n# Edge dislocation models\n# FCC model: Varvenne-Leyson-Ghazisaeidi-Curtin 2016: http://dx.doi.org/10.1016/j.actamat.2016.09.046\n# BCC model: Maresca-Curtin 2020: https://doi.org/10.1016/j.actamat.2019.10.015\n# for simeple calculations \n def __init__(self,\n dislocation_properties,\n exp_conditions,\n comp_elements,\n elements_data,\n structure):\n \n # dislocation properties, alpha, f1, and f2\n self.alpha = float(dislocation_properties[0])\n self.f_tau = float(dislocation_properties[1])\n self.f_dEb = float(dislocation_properties[2])\n \n # experiment conditions, T, strain rate\n self.T = np.array(exp_conditions[0])\n self.ep = float(exp_conditions[1])\n self.ep0 = 10**4 #reference strain rate (/s)\n \n # some constants \n self.boltzmann_J = 1.38064852*10**(-23) #J/K\n self.J2eV=6.2415093433*10**18 # covert J to eV\n \n # elemental data\n self.elements_order = comp_elements.columns.tolist()\n self.compositions = comp_elements #pandas df\n self.elements_data = copy.deepcopy(elements_data) #json\n \n # convert unit for properties\n # Vn: Å^3 to m^3\n # b: Å to m\n # a: Å to m\n # E: GPa to Pa\n # G: GPa to Pa\n \n for element_i in self.elements_order:\n self.elements_data[element_i]['Vn'] = elements_data[element_i]['Vn']*10**(-30)\n self.elements_data[element_i]['b'] = elements_data[element_i]['b']*10**(-10)\n self.elements_data[element_i]['a'] = elements_data[element_i]['a']*10**(-10)\n self.elements_data[element_i]['E'] = elements_data[element_i]['E']*10**(9)\n self.elements_data[element_i]['G'] = elements_data[element_i]['G']*10**(9)\n\n \n \n self.structure = structure\n\n def FCC_V_L_G_C_2016_analytical(self):\n # FCC model: Varvenne-Leyson-Ghazisaeidi-Curtin 2016: http://dx.doi.org/10.1016/j.actamat.2016.09.046\n \n self.prefac_ty0 = 0.051\n self.Taylor_fac = 3.06\n self.prefac_dEb = 0.274\n # averaged properties\n cn_Vn = []\n cn_nu = []\n cn_G = []\n cn_b = []\n cn_E = []\n for element_i in self.elements_order:\n cn_Vn.append(self.compositions[element_i]/100*self.elements_data[element_i]['Vn'])\n cn_nu.append(self.compositions[element_i]/100*self.elements_data[element_i]['nu'])\n cn_G.append(self.compositions[element_i]/100*self.elements_data[element_i]['G'])\n cn_b.append(self.compositions[element_i]/100*self.elements_data[element_i]['b'])\n cn_E.append(self.compositions[element_i]/100*self.elements_data[element_i]['E'])\n\n self.aver_E = sum(cn_E);\n self.aver_V = sum(cn_Vn);\n self.aver_G = sum(cn_G)\n self.aver_Nu = sum(cn_nu)\n self.aver_b = sum(cn_b)\n \n i = 0;cn_Delta_Vn2=[]\n for element_i in self.elements_order:\n cn_Delta_Vn2.append(self.compositions[element_i]/100*(self.elements_data[element_i]['Vn']-self.aver_V)**2)\n \n self.sum_cndVn_b6 = sum(cn_Delta_Vn2)/self.aver_b**6;\n q_nu = ((1 + self.aver_Nu)/(1 - self.aver_Nu))\n \n self.dEb = self.prefac_dEb * self.f_dEb * self.alpha**(1/3) * self.aver_G * self.aver_b**3 * q_nu**(2/3) * self.sum_cndVn_b6**(1/3)\n self.Ty0 = self.prefac_ty0 * self.f_tau * self.alpha**(-1/3) * self.aver_G * q_nu**(4/3) * self.sum_cndVn_b6**(2/3)\n self.Ty0_pc = self.Taylor_fac * self.Ty0\n delta_ss_low_T = self.Ty0 * (1 - ((self.boltzmann_J*self.T)/(self.dEb) * np.log(self.ep0/self.ep))**(2/3) )\n delta_ss_high_T = self.Ty0 * np.exp(-1/0.57 * self.boltzmann_J*self.T/self.dEb * np.log(self.ep0/self.ep) )\n self.delta_ss_low_T = delta_ss_low_T\n self.delta_ss_high_T = delta_ss_high_T\n Ty_threshold = self.Ty0/2\n \n self.delta_ss = self.Taylor_fac*np.array([delta_ss_low_T[i] if delta_ss_low_T[i]>=Ty_threshold[i] else delta_ss_high_T[i] for i in range(len(Ty_threshold))])\n \n \n def BCC_M_C_2020_analytical(self):\n # BCC model: Maresca-Curtin-2019: https://doi.org/10.1016/j.actamat.2019.10.015\n \n self.prefac_ty0 = 0.051\n self.Taylor_fac = 3.06\n self.prefac_dEb = 0.274\n # averaged properties\n cn_Vn = []\n cn_nu = []\n cn_G = []\n cn_b = []\n cn_E = []\n for element_i in self.elements_order:\n cn_Vn.append(self.compositions[element_i]/100*self.elements_data[element_i]['Vn'])\n cn_nu.append(self.compositions[element_i]/100*self.elements_data[element_i]['nu'])\n cn_G.append(self.compositions[element_i]/100*self.elements_data[element_i]['G'])\n cn_b.append(self.compositions[element_i]/100*self.elements_data[element_i]['b'])\n cn_E.append(self.compositions[element_i]/100*self.elements_data[element_i]['E'])\n\n self.aver_E = sum(cn_E);\n self.aver_V = sum(cn_Vn);\n self.aver_G = sum(cn_G)\n self.aver_Nu = sum(cn_nu)\n self.aver_b = sum(cn_b)\n \n i = 0;cn_Delta_Vn2=[]\n for element_i in self.elements_order:\n cn_Delta_Vn2.append(self.compositions[element_i]/100*(self.elements_data[element_i]['Vn']-self.aver_V)**2)\n \n self.sum_cndVn_b6 = sum(cn_Delta_Vn2)/self.aver_b**6;\n q_nu = ((1 + self.aver_Nu)/(1 - self.aver_Nu))\n \n self.dEb = self.prefac_dEb * self.f_dEb * self.alpha**(1/3) * self.aver_G * self.aver_b**3 * q_nu**(2/3) * self.sum_cndVn_b6**(1/3)\n self.Ty0 = self.prefac_ty0 * self.f_tau * self.alpha**(-1/3) * self.aver_G * q_nu**(4/3) * self.sum_cndVn_b6**(2/3)\n self.Ty0_pc = self.Taylor_fac * self.Ty0\n delta_ss_low_T = self.Ty0 * (1 - ((self.boltzmann_J*self.T)/(self.dEb) * np.log(self.ep0/self.ep))**(2/3) )\n delta_ss_high_T = self.Ty0 * np.exp(-1/0.57 * self.boltzmann_J*self.T/self.dEb * np.log(self.ep0/self.ep) )\n Ty_threshold = self.Ty0/2\n self.delta_ss_low_T = delta_ss_low_T\n self.delta_ss_high_T = delta_ss_high_T\n self.delta_ss = self.Taylor_fac*np.array([delta_ss_low_T[i] if delta_ss_low_T[i]>=Ty_threshold[i] else delta_ss_high_T[i] for i in range(len(Ty_threshold))])\n \n def calculate(self):\n if self.structure == 'fcc':\n self.FCC_V_L_G_C_2016_analytical()\n elif self.structure == 'bcc':\n self.BCC_M_C_2020_analytical()\n \n def writedata(self):\n self.calc_data = copy.deepcopy(self.compositions)\n self.calc_data['T'] = self.T\n self.calc_data['V_ave'] = self.aver_V*10**30\n self.calc_data['b_ave'] = np.round(self.aver_b*10**10,4)\n self.calc_data['E_ave'] = self.aver_E/10**9\n self.calc_data['G_ave'] = self.aver_G/10**9\n self.calc_data['nu_ave'] = self.aver_Nu\n self.calc_data['sum_cnVn^2_b6'] = np.round(self.sum_cndVn_b6,8)\n self.calc_data['Ty0'] = np.round(self.Ty0/10**6,2)\n self.calc_data['Delta_Eb'] = np.round(self.dEb*self.J2eV,4)\n self.calc_data['Delta_sigma_ss'] = np.round(self.delta_ss/10**6,2)\n\n\nclass ss_model_M_C_screw_pseudo_ternary:\n # BCC screw dislocation model: Maresca-Curtin 2019: https://doi.org/10.1016/j.actamat.2019.10.007\n # BCC_screw_Maresca-Curtin-2019\n # for pseudo-ternary prediction\n\n def __init__(self,\n inputdata,\n compositions,comp_pst\n ):\n\n # adjustable scalers\n self.kink_width = inputdata.adjustable_scalers['kink_width'] \n self.Delta_V_p_scaler = inputdata.adjustable_scalers['Delta_V_p_scaler'] \n self.Delta_E_p_scaler = inputdata.adjustable_scalers['Delta_E_p_scaler'] \n self.comp_pst = comp_pst\n # some constants\n self.boltzmann_J = 1.38064852*10**(-23) #J/K\n self.boltzmann_eV = 8.617333262145e-5 #eV\n self.J2eV = 6.2415093433*10**18 # covert J to eV \n self.eV2J = 1/self.J2eV\n \n # properties\n self.elements_order = compositions.columns.tolist()\n self.compositions = copy.deepcopy(compositions)\n self.element_data = copy.deepcopy(inputdata.element_data)\n cn_a = []\n cn_E_k = []\n cn_E_v = []\n cn_E_si = []\n cn_Delta_E_p = []\n cn_Delta_V_p = []\n \n for element_i in self.elements_order:\n cn_a.append(self.compositions[element_i]/100*self.element_data[element_i]['a'])\n cn_E_k.append(self.compositions[element_i]/100*self.element_data[element_i]['E_k'])\n cn_E_v.append(self.compositions[element_i]/100*self.element_data[element_i]['E_v'])\n cn_E_si.append(self.compositions[element_i]/100*self.element_data[element_i]['E_si'])\n cn_Delta_E_p.append(self.compositions[element_i]/100*self.element_data[element_i]['Delta_E_p']**2)\n cn_Delta_V_p.append(self.compositions[element_i]/100*self.element_data[element_i]['Delta_V_p'])\n\n self.a = sum(cn_a) * 10**(-10) \n self.a_p = self.a*np.sqrt(2/3) # Peierls spacing\n self.b = self.a*np.sqrt(3)/2 # burgers vector\n self.E_k = sum(cn_E_k) * self.eV2J \n self.E_v = sum(cn_E_v) * self.eV2J \n self.E_si = sum(cn_E_si) * self.eV2J \n self.Delta_E_p = np.sqrt(sum(cn_Delta_E_p)) * self.Delta_E_p_scaler * self.eV2J \n self.Delta_V_p = sum(cn_Delta_V_p) * self.Delta_E_p_scaler * self.eV2J /self.b\n \n # exp conditions\n self.T = float(inputdata.conditions['temperature'])\n self.strain_r = inputdata.conditions['strain_r'] # strain rate\n self.strain_r_0 = 10**4 # reference strain rate 10^4 /s\n \n\n self.Delta_H = self.boltzmann_J * self.T * np.log(self.strain_r_0/self.strain_r) #activation enthalpy\n self.w_k = self.kink_width * self.b # kink width \n self.xi_c = (1.083*self.E_k/self.Delta_E_p)**2*self.b # characteristic length of dislocation segment \n self.xi_si = self.xi_c * 15\n self.xi_v = self.xi_c * 7.5 \n \n def M_C_screw_model(self):\n \n # cross-kink\n # self-interstitial\n self.tau_xk_0_si = np.pi * self.E_si / (self.a_p * self.b * self.xi_si )\n self.tau_xk_si = self.tau_xk_0_si * (1-(self.Delta_H/self.E_si)**(2/3))\n # vacancy\n self.tau_xk_0_v = np.pi * self.E_v / (self.a_p * self.b * self.xi_v )\n self.tau_xk_v = self.tau_xk_0_v * (1-(self.Delta_H/self.E_v)**(2/3))\n # select the larger value from si or vacancy strengthening\n self.tau_xk_T = np.maximum(self.tau_xk_si,self.tau_xk_v)\n \n \n # kink glide\n self.tau_b = 1.08 * self.E_k / (self.a_p * self.b * self.xi_c)\n self.tau_k_0 = 6.3 * self.Delta_E_p / (self.a_p * self.b**2 * np.sqrt(self.w_k/self.b)) + self.tau_b\n self.Delta_E_k_0 = 1.37 * np.sqrt(self.w_k/self.b) * self.Delta_E_p \n self.tau_k_low_T = self.tau_b + \\\n (self.tau_k_0 - self.tau_b) / \\\n (np.exp(0.89*self.Delta_H/self.Delta_E_k_0 + \\\n 0.5*(self.Delta_H/self.Delta_E_k_0)**(1/4) + 0.6)-1)\n \n self.tau_k_high_T = self.tau_b - \\\n (self.tau_k_0 - self.tau_b) * self.w_k / (5.75 * self.xi_c) * \\\n (self.Delta_H/self.Delta_E_k_0 - np.log(5.75*self.xi_c/self.w_k+1))\n \n self.tau_k_T = np.array([self.tau_k_low_T[i] if (self.tau_k_low_T[i]-self.tau_b[i])/(self.tau_k_0[i] - self.tau_b[i])>= \n (1/(5.75 * self.xi_c[i]/self.w_k[i] + 1)) else \n self.tau_k_high_T[i] for i in range(len(self.a))])\n \n # Peierls\n self.Delta_E_b_p = (10*self.Delta_V_p*self.xi_c + 0.7 * self.E_k)**3/\\\n (20*self.Delta_V_p*self.xi_c + 0.7 * self.E_k)**2\n self.tau_p_0 = np.pi*self.Delta_V_p/(self.b*self.a_p ) + \\\n 0.44 * self.E_k / (self.b*self.a_p * self.xi_c) * \\\n ( 1 - 5* self.Delta_V_p*self.xi_c/(20*self.Delta_V_p*self.xi_c+0.7*self.E_k))\n \n self.tau_p_T = self.tau_p_0 * (1-(self.Delta_H/self.Delta_E_b_p)**(2/3))\n \n # min of Peierls and kink glide\n self.min_tau_k_tau_p_T = np.minimum(self.tau_p_T,self.tau_k_T)\n \n # total strength\n self.tau_tot_T = np.maximum(self.min_tau_k_tau_p_T,np.zeros(len(self.a))) + self.tau_xk_T\n \n def calculate(self):\n self.M_C_screw_model()\n \n def writedata(self):\n self.calc_data = copy.deepcopy(self.comp_pst)\n self.calc_data['a'] = np.round(self.a*10**10,4)\n self.calc_data['b'] = np.round(self.b*10**10,4)\n self.calc_data['a_p'] = np.round(self.a_p*10**10,4)\n self.calc_data['T'] = [self.T for i in range(len(self.calc_data))]\n self.calc_data['tau_y'] = np.round(self.tau_tot_T/10**6,2)\n self.calc_data['tau_k'] = np.round(self.tau_k_T/10**6,2)\n self.calc_data['tau_xk'] = np.round(self.tau_xk_T/10**6,2)\n self.calc_data['tau_p'] = np.round(self.tau_p_T/10**6,2)\n self.calc_data['E_k'] = np.round(self.E_k*self.J2eV,4)\n self.calc_data['E_v'] = np.round(self.E_v*self.J2eV,4)\n self.calc_data['E_si'] = np.round(self.E_si*self.J2eV,4)\n self.calc_data['Delta_E_p'] = np.round(self.Delta_E_p*(self.J2eV),4)\n\n\nclass ss_model_M_C_screw:\n # BCC screw dislocation model: Maresca-Curtin 2019: https://doi.org/10.1016/j.actamat.2019.10.007\n\n\n def __init__(self,\n inputdata\n ):\n\n # adjustable scalers\n self.kink_width = inputdata.adjustable_scalers['kink_width'] \n self.Delta_V_p_scaler = inputdata.adjustable_scalers['Delta_V_p_scaler'] \n self.Delta_E_p_scaler = inputdata.adjustable_scalers['Delta_E_p_scaler'] \n \n # some constants\n self.boltzmann_J = 1.38064852*10**(-23) #J/K\n self.boltzmann_eV = 8.617333262145e-5 #eV\n self.J2eV = 6.2415093433*10**18 # covert J to eV \n self.eV2J = 1/self.J2eV\n \n # properties\n self.a = inputdata.properties['a'] * 10**(-10) #m # lattice constant\n self.a_p = self.a*np.sqrt(2/3) # Peierls spacing\n self.b = self.a*np.sqrt(3)/2\n \n self.E_k = inputdata.properties['E_k'] * self.eV2J # J # kink formation energy\n self.Delta_E_p = self.Delta_E_p_scaler * inputdata.properties['Delta_E_p'] * self.eV2J # J # screw-solute interaction\n self.Delta_V_p = self.Delta_V_p_scaler * inputdata.properties['Delta_V_p'] * self.eV2J /self.b# J/b # Peierls barrier\n \n self.E_si = inputdata.properties['E_si'] * self.eV2J #J # formation energy of self-interstitial\n self.E_v = inputdata.properties['E_v'] * self.eV2J #J # formation energy of vacancy \n \n \n # exp conditions\n self.T = np.arange(inputdata.conditions['temperature']['min'],\n inputdata.conditions['temperature']['max']+inputdata.conditions['temperature']['inc'],\n inputdata.conditions['temperature']['inc'])\n self.strain_r = inputdata.conditions['strain_r'] # strain rate\n self.strain_r_0 = 10**4 # reference strain rate 10^4 /s\n \n\n self.Delta_H = self.boltzmann_J * self.T * np.log(self.strain_r_0/self.strain_r) #activation enthalpy\n self.w_k = self.kink_width * self.b # kink width \n self.xi_c = (1.083*self.E_k/self.Delta_E_p)**2*self.b # characteristic length of dislocation segment \n self.xi_si = self.xi_c * 15\n self.xi_v = self.xi_c * 7.5 \n def M_C_screw_model(self):\n \n # cross-kink\n # self-interstitial\n self.tau_xk_0_si = np.pi * self.E_si / (self.a_p * self.b * self.xi_si )\n self.tau_xk_si = self.tau_xk_0_si * (1-(self.Delta_H/self.E_si)**(2/3))\n # vacancy\n self.tau_xk_0_v = np.pi * self.E_v / (self.a_p * self.b * self.xi_v )\n self.tau_xk_v = self.tau_xk_0_v * (1-(self.Delta_H/self.E_v)**(2/3))\n # select the larger value from si or vacancy strengthening\n self.tau_xk_T = np.array([self.tau_xk_si[i] if self.tau_xk_si[i]>=self.tau_xk_v[i] else \n self.tau_xk_v[i] for i in range(len(self.T)) ])\n \n \n # kink glide\n self.tau_b = 1.08 * self.E_k / (self.a_p * self.b * self.xi_c)\n self.tau_k_0 = 6.3 * self.Delta_E_p / (self.a_p * self.b**2 * np.sqrt(self.w_k/self.b)) + self.tau_b\n self.Delta_E_k_0 = 1.37 * np.sqrt(self.w_k/self.b) * self.Delta_E_p \n self.tau_k_low_T = self.tau_b + \\\n (self.tau_k_0 - self.tau_b) / \\\n (np.exp(0.89*self.Delta_H/self.Delta_E_k_0 + \\\n 0.5*(self.Delta_H/self.Delta_E_k_0)**(1/4) + 0.6)-1)\n \n self.tau_k_high_T = self.tau_b - \\\n (self.tau_k_0 - self.tau_b) * self.w_k / (5.75 * self.xi_c) * \\\n (self.Delta_H/self.Delta_E_k_0 - np.log(5.75*self.xi_c/self.w_k+1))\n \n self.tau_k_T = np.array([self.tau_k_low_T[i] if (self.tau_k_low_T[i]-self.tau_b)/(self.tau_k_0 - self.tau_b)>= \n (1/(5.75 * self.xi_c/self.w_k + 1)) else \n self.tau_k_high_T[i] for i in range(len(self.T))])\n \n # Peierls\n self.Delta_E_b_p = (10*self.Delta_V_p*self.xi_c + 0.7 * self.E_k)**3/\\\n (20*self.Delta_V_p*self.xi_c + 0.7 * self.E_k)**2\n self.tau_p_0 = np.pi*self.Delta_V_p/(self.b*self.a_p ) + \\\n 0.44 * self.E_k / (self.b*self.a_p * self.xi_c) * \\\n ( 1 - 5* self.Delta_V_p*self.xi_c/(20*self.Delta_V_p*self.xi_c+0.7*self.E_k))\n \n self.tau_p_T = self.tau_p_0 * (1-(self.Delta_H/self.Delta_E_b_p)**(2/3))\n \n # min of Peierls and kink glide\n self.min_tau_k_tau_p_T = np.minimum(self.tau_p_T,self.tau_k_T)\n \n # total strength\n self.tau_tot_T = np.maximum(self.min_tau_k_tau_p_T,np.zeros(len(self.T))) + self.tau_xk_T\n def calculate(self):\n self.M_C_screw_model()\n def writedata(self):\n self.calc_data = pd.DataFrame(data={})\n self.calc_data['T'] = self.T\n self.calc_data['tau_y'] = np.round(self.tau_tot_T/1e6,2)\n \n\nclass Suzuki_model_RWASM_T:\n \n def __init__(self,\n element_data,\n experiment_conditions,\n adjustable_scalers):\n \n # \n self.element_data = element_data\n # conditions\n self.strain_r = experiment_conditions['strain_r']\n self.T_range = np.arange(experiment_conditions['temperature']['min'],\n experiment_conditions['temperature']['max']+experiment_conditions['temperature']['inc'],\n experiment_conditions['temperature']['inc'])\n # constants\n self.boltzmann_J = 1.380649e-23\n self.boltzmann_eV = 8.617333262145e-5\n self.J2eV = self.boltzmann_eV/self.boltzmann_J\n self.eV2J = 1/self.J2eV\n self.Debye = 5 * 10**(12) # Debye frequency /s\n \n #adjustables\n self.rho = adjustable_scalers['dislocation_density']\n self.tau_i_exponent = adjustable_scalers['tau_i_exponent']\n self.trial_kappa_range = np.arange(adjustable_scalers['trial_kappa']['min'],\n adjustable_scalers['trial_kappa']['max']+adjustable_scalers['trial_kappa']['inc'],\n adjustable_scalers['trial_kappa']['inc'])\n self.trial_tau_k = adjustable_scalers['trial_tau_k'] * 1e6\n self.kink_width = adjustable_scalers['kink_width']\n\n \n \n def L(self,kappa_i):\n f = lambda x: np.exp(-x**2/2)/np.sqrt(2*np.pi)\n y = integrate.quad(f,kappa_i,np.inf)\n return self.b/(3*y[0]*self.c) \n \n def tau_y_optimize(self,x):\n self.tau_j = lambda kappa_i: (self.E_int + self.E_vac)/(4*self.b*self.L(kappa_i))\n \n self.Delta_V = lambda x: 3 * x[1]**2 * self.E_w**2 * self.c / (2*x[0]**2*self.a_p*self.b**2) + \\\n x[0]**2 * self.a_p**3 * self.b**4 * self.lambda_k**2 / (6*x[1]**2 * self.E_w**2 * self.c)\n self.S = lambda x: 18 * x[1]**2 * self.E_w**2 * self.c *self.kT /(self.a_p**3 * self.b**4 * self.lambda_k**2) * \\\n np.log( (5*np.pi*self.kT)**2 * self.Debye * self.a_p * self.b /((self.G*self.b*self.Delta_V(x))**2 * self.strain_r) )\n self.R = lambda kappa_i: 27 * kappa_i**4 * self.E_w**4 * self.c**2 / (self.a_p**4 * self.b**6 * self.lambda_k**2)\n # x[0] = tau_k\n # x[1] = kappa_i\n #self.tau_k_opt_func = lambda x: x[0]**4 + x[0]*self.S(x) - self.R(x[1]) \n self.tau_y_funcs = lambda x: (self.tau_j(x[1]) + x[0], x[0]**4 + x[0]*self.S(x) - self.R(x[1]))\n self.res = optimize.root(self.tau_y_funcs, x)\n self.tau_k_value = self.res.x[0]\n self.tau_y_value = (self.res.x[0]) + self.tau_j(self.res.x[1])\n self.tau_j_value = self.tau_j(self.res.x[1])\n self.L_value = self.L(self.res.x[1])\n \n \n def phenomelogical_model_tau_y(self): \n # tau_y = ( sum( tau_y_i**(1/q) ) )**q\n self.tau_y_tot = sum(self.tau_y_i**(1/self.tau_i_exponent))**self.tau_i_exponent\n \n def calculate(self):\n tau_y_tot_T = []\n tau_y_i_T_list = []\n tau_k_i_T_list = []\n tau_j_i_T_list = []\n self.elements_kappa_i_convergence_record = pd.DataFrame(data={})\n for element_symbol in self.element_data:\n self.elements_kappa_i_convergence_record[element_symbol] = {}\n for T in self.T_range:\n self.T = T\n self.kT = self.boltzmann_J * self.T\n # record tau_y for every element\n tau_y_i = []\n tau_k_i = []\n tau_j_i = []\n for element_symbol in self.element_data:\n element_i = self.element_data[element_symbol]\n #print(element_i)\n # calculate the yield strength contribution for every element\n # according to concentration\n # setup properties for every element\n self.E_f_v = element_i['E_f_v'] * self.eV2J #J\n self.E_f_si = element_i['E_f_si'] * self.eV2J # J\n self.a_0 = element_i['a']*1e-10#element_i['a_0'] * 10**(-10) # unit: m\n self.E_w = element_i['E_w'] * self.eV2J#element_i['E_w'] * self.eV2J # J\n self.c = element_i['c']\n self.G = element_i['G'] * 10**9 # Pa\n self.nu = element_i['nu']\n self.b = self.a_0 * np.sqrt(3) / 2\n self.a_p = self.a_0 * np.sqrt(2/3)\n #self.E_vac = 0.6 * self.eV2J / 10**(-10) # test NbTiZr\n #self.E_int = 0.9 * self.eV2J / 10**(-10) # test NbTiZr\n self.E_vac = 0.707 * self.E_f_v /self.b + self.G * self.b**2 / (np.pi*(1-self.nu)) * np.log(1.5)\n self.E_int = 0.707 * self.E_f_si /self.b + self.G * self.b**2 / (np.pi*(1-self.nu)) * np.log(1.5)\n self.lambda_k = self.b * self.kink_width\n \n # record the optimization results for post-processing\n tau_k_list = []\n tau_j_list = []\n tau_y_list = []\n optimized_kappa_list = []\n \n # start to optimize tau_k for every trial kappa\n for trial_kappa_i in (self.trial_kappa_range):\n \n x_trial = [self.trial_tau_k, trial_kappa_i]\n self.tau_y_optimize(x_trial)\n tau_k_list.append(self.tau_k_value/1e6)\n tau_j_list.append(self.tau_j_value/1e6)\n tau_y_list.append(self.tau_y_value/1e6)\n optimized_kappa_list.append((self.res.x[1]))\n \n # optimize tau_y over kappa, this finds the true tau_y for each element\n optimized_kappa_sort, tau_y_sort, tau_j_sort, tau_k_sort = zip(*sorted(zip(optimized_kappa_list, tau_y_list, tau_j_list, tau_k_list)))\n '''\n # polyfit tau_y over kappa_i, then find minimum of the polyfit\n # this is because the kappa_list and tau_y_list are discrete points and maybe noisy, \n # need a smooth curve to find min\n polyfit = np.polyfit(optimized_kappa_sort, tau_y_sort,9)\n npfit = np.poly1d(polyfit)\n guess_kappa = (self.trial_kappa_range[0]+self.trial_kappa_range[1])/2\n optimized_kappa = optimize.fmin_slsqp(npfit,guess_kappa,\n bounds=([(self.trial_kappa_range[0],self.trial_kappa_range[-1])]))\n if self.T == 300:\n plt.plot(optimized_kappa_sort, tau_y_sort)\n print('optimized_kappa:',optimized_kappa)\n plt.ylim(0,500)\n plt.plot(self.trial_kappa_range,npfit(self.trial_kappa_range))\n # record tau_y for every element\n tau_y_i.append(npfit(optimized_kappa[0]))''' # doesn't work very well, need a better way. np.polyfit gets weird shape\n index = tau_y_sort.index(min(tau_y_sort))\n tau_y_i.append(min(tau_y_sort)) # just live with that...\n \n tau_k_i.append((tau_k_sort[index]))\n tau_j_i.append((tau_j_sort[index]))\n # record for convergence check\n self.elements_kappa_i_convergence_record[element_symbol]['kappa_'+str(self.T)] = None # strange thing here, only by setting None it records the first row of data\n self.elements_kappa_i_convergence_record[element_symbol]['tau_y_'+str(self.T)] = None\n self.elements_kappa_i_convergence_record[element_symbol]['kappa_'+str(self.T)] = optimized_kappa_sort\n self.elements_kappa_i_convergence_record[element_symbol]['tau_y_'+str(self.T)] = tau_y_sort\n # tau_k_i, tau_j_i dont add up to tau_y_tot\n tau_y_i_T_list.append(tau_y_i)\n tau_k_i_T_list.append(tau_k_i)\n tau_j_i_T_list.append(tau_j_i)\n\n self.tau_y_i = np.array(tau_y_i)\n self.phenomelogical_model_tau_y()\n tau_y_tot_T.append(self.tau_y_tot)\n self.tau_y_tot_T = np.array(tau_y_tot_T)\n self.tau_y_i_T_list = np.array(tau_y_i_T_list).transpose()\n self.tau_k_i_T_list = np.array(tau_k_i_T_list).transpose()\n self.tau_j_i_T_list = np.array(tau_j_i_T_list).transpose()\n def writedata(self):\n self.calc_data = pd.DataFrame(data=\n {\n \"T\": self.T_range,\n \"tau_y\": np.round(self.tau_y_tot_T,2)\n })\n for i, element_symbol in zip(range(len(self.element_data)),self.element_data):\n self.calc_data[\"tau_y_\"+str(element_symbol)] = np.round(self.tau_y_i_T_list[i],2)\n self.calc_data[\"tau_k_\"+str(element_symbol)] = np.round(self.tau_k_i_T_list[i],2)\n self.calc_data[\"tau_j_\"+str(element_symbol)] = np.round(self.tau_j_i_T_list[i],2)\n\n\n\nclass Suzuki_model_RWASM_ternary:\n \n def __init__(self,\n element_data,\n comp_elements,comp_pst,\n experiment_conditions,\n adjustable_scalers):\n \n # \n self.element_composition = comp_elements\n self.element_data = element_data\n self.comp_pst = comp_pst\n # conditions\n self.strain_r = experiment_conditions['strain_r']\n self.T = experiment_conditions['temperature']\n \n # constants\n self.boltzmann_J = 1.380649e-23\n self.boltzmann_eV = 8.617333262145e-5\n self.J2eV = self.boltzmann_eV/self.boltzmann_J\n self.eV2J = 1/self.J2eV\n self.Debye = 5 * 10**(12) # Debye frequency /s\n self.kT = self.boltzmann_J * self.T\n #adjustables\n self.rho = adjustable_scalers['dislocation_density']\n self.tau_i_exponent = adjustable_scalers['tau_i_exponent']\n self.trial_kappa_range = np.arange(adjustable_scalers['trial_kappa']['min'],\n adjustable_scalers['trial_kappa']['max']+adjustable_scalers['trial_kappa']['inc'],\n adjustable_scalers['trial_kappa']['inc'])\n self.trial_tau_k = adjustable_scalers['trial_tau_k'] * 1e6\n self.kink_width = adjustable_scalers['kink_width']\n\n \n \n def L(self,kappa_i):\n f = lambda x: np.exp(-x**2/2)/np.sqrt(2*np.pi)\n y = integrate.quad(f,kappa_i,np.inf)\n return self.b/(3*y[0]*self.c) \n \n def tau_y_optimize(self,x):\n self.tau_j = lambda kappa_i: (self.E_int + self.E_vac)/(4*self.b*self.L(kappa_i))\n \n self.Delta_V = lambda x: 3 * x[1]**2 * self.E_w**2 * self.c / (2*x[0]**2*self.a_p*self.b**2) + \\\n x[0]**2 * self.a_p**3 * self.b**4 * self.lambda_k**2 / (6*x[1]**2 * self.E_w**2 * self.c)\n self.S = lambda x: 18 * x[1]**2 * self.E_w**2 * self.c *self.kT /(self.a_p**3 * self.b**4 * self.lambda_k**2) * \\\n np.log( (5*np.pi*self.kT)**2 * self.Debye * self.a_p * self.b /((self.G*self.b*self.Delta_V(x))**2 * self.strain_r) )\n self.R = lambda kappa_i: 27 * kappa_i**4 * self.E_w**4 * self.c**2 / (self.a_p**4 * self.b**6 * self.lambda_k**2)\n # x[0] = tau_k\n # x[1] = kappa_i\n #self.tau_k_opt_func = lambda x: x[0]**4 + x[0]*self.S(x) - self.R(x[1]) \n self.tau_y_funcs = lambda x: (self.tau_j(x[1]) + x[0], x[0]**4 + x[0]*self.S(x) - self.R(x[1]))\n self.res = optimize.root(self.tau_y_funcs, x)\n self.tau_k_value = self.res.x[0]\n self.tau_y_value = (self.res.x[0]) + self.tau_j(self.res.x[1])\n self.tau_j_value = self.tau_j(self.res.x[1])\n self.L_value = self.L(self.res.x[1])\n \n \n def phenomelogical_model_tau_y(self): \n # tau_y = ( sum( tau_y_i**(1/q) ) )**q\n self.tau_y_tot = [sum(tau_y_i**(1/self.tau_i_exponent))**self.tau_i_exponent for tau_y_i in self.tau_y_i_pst]\n \n def calculate(self):\n self.tau_y_i_pst = [] # record all compositions shape(len(composition),len(element))\n tau_y_i_pst = []\n for i in range(len(self.element_composition)):\n \n # record tau_y for every element at composition i\n tau_y_i = []\n for element_symbol in self.element_composition.columns:\n element_i = self.element_data[element_symbol]\n #print(element_i)\n # calculate the yield strength contribution for every element\n # according to concentration\n # setup properties for every element\n self.c = self.element_composition[element_symbol][i]/100\n if self.c == 0:\n tau_y_i.append(0)\n continue\n self.E_f_v = element_i['E_f_v'] * self.eV2J #J\n self.E_f_si = element_i['E_f_si'] * self.eV2J # J\n self.a_0 = element_i['a']*1e-10#element_i['a_0'] * 10**(-10) # unit: m\n self.E_w = element_i['E_w'] * self.eV2J#element_i['E_w'] * self.eV2J # J\n \n self.G = element_i['G'] * 10**9 # Pa\n self.nu = element_i['nu']\n self.b = self.a_0 * np.sqrt(3) / 2\n self.a_p = self.a_0 * np.sqrt(2/3)\n #self.E_vac = 0.6 * self.eV2J / 10**(-10) # test NbTiZr\n #self.E_int = 0.9 * self.eV2J / 10**(-10) # test NbTiZr\n self.E_vac = 0.707 * self.E_f_v /self.b + self.G * self.b**2 / (np.pi*(1-self.nu)) * np.log(1.5)\n self.E_int = 0.707 * self.E_f_si /self.b + self.G * self.b**2 / (np.pi*(1-self.nu)) * np.log(1.5)\n self.lambda_k = self.b * self.kink_width\n \n # record the optimization results for post-processing\n tau_k_list = []\n tau_j_list = []\n tau_y_list = []\n optimized_kappa_list = []\n \n # start to optimize tau_k for every trial kappa\n for trial_kappa_i in (self.trial_kappa_range):\n \n x_trial = [self.trial_tau_k, trial_kappa_i]\n self.tau_y_optimize(x_trial)\n tau_k_list.append(self.tau_k_value/1e6)\n tau_j_list.append(self.tau_j_value/1e6)\n tau_y_list.append(self.tau_y_value/1e6)\n optimized_kappa_list.append((self.res.x[1]))\n \n # optimize tau_y over kappa, this finds the true tau_y for each element\n optimized_kappa_sort, tau_y_sort, tau_j_sort, tau_k_sort = zip(*sorted(zip(optimized_kappa_list, tau_y_list, tau_j_list, tau_k_list)))\n\n '''\n # polyfit tau_y over kappa_i, then find minimum of the polyfit\n # this is because the kappa_list and tau_y_list are discrete points and maybe noisy, \n # need a smooth curve to find min\n polyfit = np.polyfit(optimized_kappa_sort, tau_y_sort,9)\n npfit = np.poly1d(polyfit)\n guess_kappa = (self.trial_kappa_range[0]+self.trial_kappa_range[1])/2\n optimized_kappa = optimize.fmin_slsqp(npfit,guess_kappa,\n bounds=([(self.trial_kappa_range[0],self.trial_kappa_range[-1])]))\n if self.T == 300:\n plt.plot(optimized_kappa_sort, tau_y_sort)\n print('optimized_kappa:',optimized_kappa)\n plt.ylim(0,500)\n plt.plot(self.trial_kappa_range,npfit(self.trial_kappa_range))\n # record tau_y for every element\n tau_y_i.append(npfit(optimized_kappa[0]))''' # doesn't work very well, need a better way. np.polyfit gets weird shape\n # tau_y_i contains tau_y values for every element at composition i\n tau_y_i.append(min(tau_y_sort)) # just live with that...\n \n self.tau_y_i = np.array(tau_y_i)\n \n tau_y_i_pst.append(self.tau_y_i)\n self.tau_y_i_pst = np.array(tau_y_i_pst)\n self.phenomelogical_model_tau_y()\n \n def writedata(self):\n self.calc_data = copy.deepcopy(self.comp_pst)\n for idx in range(len(self.element_composition.columns)):\n self.calc_data['tau_y_'+self.element_composition.columns[idx]] = np.round(self.tau_y_i_pst.transpose()[idx],2)\n self.calc_data['T'] = np.ones(len(self.calc_data)) * self.T\n self.calc_data['tau_y'] = np.round(self.tau_y_tot,2)\n\n "
] | [
[
"numpy.log",
"numpy.maximum",
"numpy.minimum",
"numpy.sqrt",
"numpy.arange",
"scipy.optimize.root",
"pandas.DataFrame",
"numpy.round",
"numpy.exp",
"scipy.stats.norm.rvs",
"scipy.integrate.quad",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
mindspore-ai/akg | [
"c9e922219c5a2153f3d83ffe9d68707ff90368a0"
] | [
"tests/common/test_run/gpu/csr_div_run.py"
] | [
"import numpy as np\nimport scipy.sparse\n\nimport akg\nfrom akg import tvm\nfrom akg import composite\nfrom akg.utils import CUDA\nfrom tests.common.base import get_rtol_atol\nfrom tests.common.gen_random import random_gaussian\nfrom tests.common.tensorio import compare_tensor\nfrom akg.utils import kernel_exec as utils\nfrom akg.utils.result_analysis import target_profiling\nfrom akg.utils.format_transform import to_tvm_nd_array\n\ndef csr_div(dense, sparse_data, col_idx, row_idx, shape, target=CUDA):\n assert target == CUDA, \"only supports GPU\"\n return composite.csr_div((row_idx, col_idx, sparse_data, dense), {\"dense_shape\": shape})\n\ndef gen_data(shape1, shape2, dtype1, dtype2):\n dense = random_gaussian(shape1).astype(dtype1)\n sparse_data = scipy.sparse.rand(shape2[0], shape2[1], density=0.2, format='csr', dtype=dtype1)\n expect = sparse_data.multiply(np.divide(1, np.broadcast_to(dense, shape2)))\n return dense, sparse_data.data, sparse_data.indices.astype(dtype2), sparse_data.indptr.astype(dtype2), expect.data\n \ndef csr_div_run(shape1, shape2, dtype1, dtype2, poly_sch=True, attrs=None):\n if not attrs:\n attrs = {\"target\": \"cuda\"}\n # gen data\n op_attrs = [shape2]\n dense, sparse_data, col_idx, row_idx, expect = gen_data(shape1, shape2, dtype1, dtype2)\n output_shape = expect.shape\n attrs[\"csr_avg_row\"] = sparse_data.shape[0] // shape1[0]\n attrs[\"is_csr\"] = True\n\n mod = utils.op_build_test(csr_div, [shape1, sparse_data.shape, col_idx.shape, row_idx.shape], \n [dtype1, dtype1, dtype2, dtype2], op_attrs=op_attrs, polyhedral=poly_sch,\n attrs=attrs, kernel_name=\"csr_div\")\n\n if len(expect.shape) == 0:\n output_shape = (1, )\n output = np.zeros(output_shape, expect.dtype)\n output = utils.mod_launch(mod, (dense, sparse_data, col_idx, row_idx, output), expect=expect)\n atol, rtol = get_rtol_atol(\"csr_div\", dtype1)\n res = compare_tensor(output, expect, rtol=rtol, atol=atol)\n print(\"Test {}\".format(\"Pass\" if res else \"Failed\"))\n target_name = attrs[\"target\"].split()[0]\n if not res:\n mod_source = mod\n if target_name != \"llvm\":\n mod_source = mod.imported_modules[0]\n print(\"Error {}:========================\".format(target_name))\n print(mod_source.get_source())\n raise AssertionError(\"Test fail\")\n if attrs[\"profiling\"]:\n args_list = to_tvm_nd_array(\n [dense, sparse_data, col_idx, row_idx, output, expect], akg.tvm.context(target_name, 0))\n target_profiling(mod, *args_list, target=target_name, repeat_time=attrs[\"repeat_times\"])\n return (dense, sparse_data, col_idx, row_idx), output, expect, res"
] | [
[
"numpy.zeros",
"numpy.broadcast_to"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Mirofil/AutoDL-Projects | [
"e7ee9fe27e5c5561a4b9fd1c1ee185677ef30893"
] | [
"lib/models/cell_searchs/nb101/optimizers/darts/train_search_no_higher.py"
] | [
"# python ./lib/models/cell_searchs/nb101/optimizers/darts/train_search_no_higher.py --seed=90 --mode=reptile --inner_steps=4 --inner_steps_same_batch=True\n\nimport argparse\nimport glob\nimport json\nimport logging\nimport os\nimport pickle\nimport sys\nimport time\n\nimport numpy as np\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.utils\nimport torchvision.datasets as dset\n\nfrom pathlib import Path\nlib_dir = (Path(__file__).parent / '..' / '..').resolve()\nif str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))\nfrom genotypes import count_ops\nfrom utils import genotype_width, genotype_depth\n\n\nlib_dir = (Path(__file__).parent / '..' / '..' / '..' / '..'/ '..' / '..' /'lib').resolve()\nif str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))\n\n\nfrom nasbench_analysis import eval_darts_one_shot_model_in_nasbench as naseval\nfrom nasbench_analysis.search_spaces.search_space_1 import SearchSpace1\nfrom nasbench_analysis.search_spaces.search_space_2 import SearchSpace2\nfrom nasbench_analysis.search_spaces.search_space_3 import SearchSpace3\nfrom optimizers.darts import utils\nfrom optimizers.darts.architect import Architect\nfrom optimizers.darts.model_search import Network\n\nfrom optimizers.sotl_utils import wandb_auth\nimport wandb\nfrom pathlib import Path\nfrom tqdm import tqdm\nfrom datasets import get_datasets, get_nas_search_loaders\n\nfrom nasbench import api\nfrom copy import deepcopy\nfrom nasbench_analysis.utils import NasbenchWrapper\nfrom sotl_utils import approx_hessian, exact_hessian, Analyzer\n\nparser = argparse.ArgumentParser(\"cifar\")\nparser.add_argument('--data', type=str, default='../data', help='location of the darts corpus')\nparser.add_argument('--batch_size', type=int, default=96, help='batch size')\nparser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')\nparser.add_argument('--learning_rate_min', type=float, default=0.001, help='min learning rate')\nparser.add_argument('--momentum', type=float, default=0.9, help='momentum')\nparser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')\nparser.add_argument('--report_freq', type=float, default=50, help='report frequency')\nparser.add_argument('--gpu', type=int, default=0, help='gpu device id')\nparser.add_argument('--epochs', type=int, default=50, help='num of training epochs')\nparser.add_argument('--init_channels', type=int, default=16, help='num of init channels')\nparser.add_argument('--layers', type=int, default=9, help='total number of layers')\nparser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')\nparser.add_argument('--cutout', action='store_true', default=False, help='use cutout')\nparser.add_argument('--cutout_length', type=int, default=16, help='cutout length')\nparser.add_argument('--cutout_prob', type=float, default=1.0, help='cutout probability')\nparser.add_argument('--drop_path_prob', type=float, default=0.3, help='drop path probability')\nparser.add_argument('--save', type=str, default='EXP', help='experiment name')\nparser.add_argument('--seed', type=int, default=2, help='random_ws seed')\nparser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')\nparser.add_argument('--train_portion', type=float, default=1, help='portion of training darts')\nparser.add_argument('--unrolled',type=lambda x: False if x in [\"False\", \"false\", \"\", \"None\", False, None] else True, default=False, help='use one-step unrolled validation loss')\nparser.add_argument('--arch_learning_rate', type=float, default=3e-4, help='learning rate for arch encoding')\nparser.add_argument('--arch_weight_decay', type=float, default=1e-3, help='weight decay for arch encoding')\nparser.add_argument('--output_weights', type=bool, default=True, help='Whether to use weights on the output nodes')\nparser.add_argument('--search_space', choices=['1', '2', '3'], default='1')\nparser.add_argument('--debug', action='store_true', default=False, help='run only for some batches')\nparser.add_argument('--warm_start', type=int, default=0,\n help='Warm start one-shot model before starting architecture updates.')\nparser.add_argument('--steps_per_epoch', type=float, default=None, help='weight decay for arch encoding')\nparser.add_argument('--inner_steps', type=int, default=100, help='Steps for inner loop of bilevel')\nparser.add_argument('--bilevel_train_steps', type=int, default=None, help='Steps for inner loop of bilevel')\n\nparser.add_argument('--higher_method' , type=str, choices=['val', 'sotl', \"val_multiple\", \"sotl_v2\"], default='sotl', help='Whether to take meta gradients with respect to SoTL or val set (which might be the same as training set if they were merged)')\nparser.add_argument('--merge_train_val', type=lambda x: False if x in [\"False\", \"false\", \"\", \"None\", False, None] else True, default=False, help='portion of training data')\nparser.add_argument('--perturb_alpha', type=str, default=None, help='portion of training data')\nparser.add_argument('--epsilon_alpha', type=float, default=0.3, help='max epsilon for alpha')\nparser.add_argument('--higher_loop' , type=str, choices=['bilevel', 'joint'], default=\"bilevel\", help='Whether to make a copy of network for the Higher rollout or not. If we do not copy, it will be as in joint training')\n\nparser.add_argument('--hessian', type=lambda x: False if x in [\"False\", \"false\", \"\", \"None\", False, None] else True, default=True,\n help='Warm start one-shot model before starting architecture updates.')\nparser.add_argument('--dataset', type=str, default=\"cifar10\",\n help='Warm start one-shot model before starting architecture updates.')\n\nparser.add_argument('--total_samples', type=int, default=None, help='Number of total samples in dataset. Useful for limiting Cifar5m')\nparser.add_argument('--data_path' , type=str,default=\"$TORCH_HOME/cifar.python\", help='Path to dataset')\nparser.add_argument('--mmap', type=str, default=\"r\", help='Whether to mmap cifar5m')\n\nparser.add_argument('--mode' , type=str, default=\"higher\", choices=[\"higher\", \"reptile\"], help='Number of steps to do in the inner loop of bilevel meta-learning')\nparser.add_argument('--inner_steps_same_batch' , type=lambda x: False if x in [\"False\", \"false\", \"\", \"None\", False, None] else True, default=False, help='Number of steps to do in the inner loop of bilevel meta-learning')\nparser.add_argument('--train_acc_threshold' , type=float, default=None, help='Number of steps to do in the inner loop of bilevel meta-learning')\nparser.add_argument('--train_loss_threshold' , type=float, default=None, help='Number of steps to do in the inner loop of bilevel meta-learning')\n\nargs = parser.parse_args()\n\nargs.save = 'experiments/darts/search_space_{}/search-no_higher-{}-{}-{}'.format(args.search_space, args.save,\n args.seed,\n args.search_space)\n\ntry:\n utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))\nexcept Exception as e:\n print(f\"Couldnt create exp dir due to {e}\")\n\n# Dump the config of the run\nwith open(os.path.join(args.save, 'config.json'), 'w') as fp:\n json.dump(args.__dict__, fp)\n\nfor handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n\nlog_format = '%(asctime)s %(message)s'\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO,\n format=log_format, datefmt='%m/%d %I:%M:%S %p')\nfh = logging.FileHandler(os.path.join(args.save, 'log.txt'))\nfh.setFormatter(logging.Formatter(log_format))\nlogging.getLogger().addHandler(fh)\nlogger = logging.getLogger()\n\nCIFAR_CLASSES = 10\n\ndef get_torch_home():\n if \"TORCH_HOME\" in os.environ:\n return os.environ[\"TORCH_HOME\"]\n elif os.path.exists('/storage/.torch/'):\n return os.path.join('/storage/', \".torch\")\n\n elif \"HOME\" in os.environ:\n return os.path.join(os.environ[\"HOME\"], \".torch\")\n else:\n raise ValueError(\n \"Did not find HOME in os.environ. \"\n \"Please at least setup the path of HOME or TORCH_HOME \"\n \"in the environment.\"\n )\n\ndef main():\n # Select the search space to search in\n if args.search_space == '1':\n search_space = SearchSpace1()\n elif args.search_space == '2':\n search_space = SearchSpace2()\n elif args.search_space == '3':\n search_space = SearchSpace3()\n else:\n raise ValueError('Unknown search space')\n\n if not torch.cuda.is_available():\n logging.info('no gpu device available')\n sys.exit(1)\n\n np.random.seed(args.seed)\n torch.cuda.set_device(args.gpu)\n cudnn.benchmark = True\n torch.manual_seed(args.seed)\n cudnn.enabled = True\n torch.cuda.manual_seed(args.seed)\n logging.info('gpu device = %d' % args.gpu)\n logging.info(\"args = %s\", args)\n logger = logging.getLogger()\n\n wandb_auth()\n run = wandb.init(project=\"NAS\", group=f\"Search_Cell_nb101\", reinit=True)\n wandb.config.update(args)\n \n criterion = nn.CrossEntropyLoss()\n criterion = criterion.cuda()\n model = Network(args.init_channels, CIFAR_CLASSES, args.layers, criterion, output_weights=args.output_weights,\n steps=search_space.num_intermediate_nodes, search_space=search_space)\n model = model.cuda()\n logging.info(\"param size = %fMB\", utils.count_parameters_in_MB(model))\n\n optimizer = torch.optim.SGD(\n model.weights_parameters(),\n args.learning_rate,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n\n train_transform, valid_transform = utils._data_transforms_cifar10(args)\n \n if args.dataset == \"cifar10\" or args.dataset == \"cifar100\":\n if args.dataset == \"cifar10\":\n train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)\n elif args.dataset == \"cifar100\":\n train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)\n\n num_train = len(train_data)\n indices = list(range(num_train))\n split = int(np.floor(args.train_portion * num_train))\n\n train_queue = torch.utils.data.DataLoader(\n train_data, batch_size=args.batch_size,\n sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),\n pin_memory=True)\n\n valid_queue = torch.utils.data.DataLoader(\n train_data, batch_size=args.batch_size,\n sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),\n pin_memory=True)\n \n elif args.dataset == \"cifar5m\":\n train_data, valid_data, xshape, class_num = get_datasets(args.dataset, args.data_path, -1, mmap=args.mmap, total_samples=args.total_samples)\n _, train_queue, valid_queue = get_nas_search_loaders(train_data, valid_data, args.dataset, 'configs/nas-benchmark/', \n (args.batch_size, args.batch_size), workers=0, \n epochs=args.epochs, determinism=\"all\", \n merge_train_val = False, merge_train_val_and_use_test = False, \n extra_split = True, valid_ratio=1, use_only_train=True, xargs=args)\n train_queue.sampler.auto_counter = True\n valid_queue.sampler.auto_counter = True\n \n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n optimizer, float(args.epochs), eta_min=args.learning_rate_min)\n if args.merge_train_val:\n valid_queue = train_queue\n architect = Architect(model, args)\n \n # if os.path.exists(Path(args.save) / \"checkpoint.pt\"):\n # checkpoint = torch.load(Path(args.save) / \"checkpoint.pt\")\n # optimizer.load_state_dict(checkpoint[\"w_optimizer\"])\n # architect.optimizer.load_state_dict(checkpoint[\"a_optimizer\"])\n # model.load_state_dict(checkpoint[\"model\"])\n # scheduler.load_state_dict(checkpoint[\"w_scheduler\"])\n # start_epoch = checkpoint[\"epoch\"]\n # all_logs = checkpoint[\"all_logs\"]\n\n # else:\n # print(f\"Path at {Path(args.save) / 'checkpoint.pt'} does not exist\")\n # start_epoch=0\n # all_logs=[]\n all_logs = []\n try:\n nasbench = NasbenchWrapper(os.path.join(get_torch_home() ,'nasbench_only108.tfrecord'))\n\n except:\n nasbench = NasbenchWrapper(os.path.join(get_torch_home() ,'nasbench_full.tfrecord'))\n\n for epoch in tqdm(range(args.epochs), desc = \"Iterating over epochs\"):\n scheduler.step()\n lr = scheduler.get_lr()[0]\n # increase the cutout probability linearly throughout search\n train_transform.transforms[-1].cutout_prob = args.cutout_prob * epoch / (args.epochs - 1)\n logging.info('epoch %d lr %e cutout_prob %e', epoch, lr,\n train_transform.transforms[-1].cutout_prob)\n\n # Save the one shot model architecture weights for later analysis\n arch_filename = os.path.join(args.save, 'one_shot_architecture_{}.obj'.format(epoch))\n with open(arch_filename, 'wb') as filehandler:\n numpy_tensor_list = []\n for tensor in model.arch_parameters():\n numpy_tensor_list.append(tensor.detach().cpu().numpy())\n pickle.dump(numpy_tensor_list, filehandler)\n\n # Save the entire one-shot-model\n filepath = os.path.join(args.save, 'one_shot_model_{}.obj'.format(epoch))\n torch.save(model.state_dict(), filepath)\n\n logging.info(f'architecture : {numpy_tensor_list}')\n \n if args.perturb_alpha:\n epsilon_alpha = 0.03 + (args.epsilon_alpha - 0.03) * epoch / args.epochs\n logging.info('epoch %d epsilon_alpha %e', epoch, epsilon_alpha)\n else:\n epsilon_alpha = None\n \n # training\n if args.mode == \"higher\":\n train_acc, train_obj, ev = train(train_queue=train_queue, valid_queue=valid_queue, network=model, architect=architect, \n criterion=criterion, w_optimizer=optimizer, a_optimizer=architect.optimizer,\n logger=logger, inner_steps=args.inner_steps, epoch=epoch, steps_per_epoch=args.steps_per_epoch, epsilon_alpha=epsilon_alpha,\n perturb_alpha=utils.Random_alpha, args=args, warm_start=args.warm_start)\n elif args.mode == \"reptile\":\n train_acc, train_obj = train_reptile(train_queue=train_queue, valid_queue=valid_queue, network=model, architect=architect, \n criterion=criterion, w_optimizer=optimizer, a_optimizer=architect.optimizer,\n logger=logger, inner_steps=args.inner_steps, epoch=epoch, steps_per_epoch=args.steps_per_epoch, epsilon_alpha=epsilon_alpha,\n perturb_alpha=utils.Random_alpha, args=args)\n \n if (args.train_acc_threshold is not None and train_acc > args.train_acc_threshold) or (args.train_loss_threshold is not None and train_obj < args.train_loss_threshold):\n logging.info(f\"Switching from SoTL to ValLoss optimization at epoch={epoch}\")\n valid_queue = torch.utils.data.DataLoader(\n train_data, batch_size=args.batch_size,\n sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),\n pin_memory=True)\n args.inner_steps = 1\n args.higher_method = \"val\"\n args.merge_train_val = False\n if args.perturb_alpha:\n args.higher_loop = \"bilevel\" # Wouldnt do anything otherwise\n \n\n logging.info('train_acc %f', train_acc)\n\n # validation\n valid_acc, valid_obj = infer(valid_queue, model, criterion)\n logging.info('valid_acc %f', valid_acc)\n \n genotype_perf, _, _, _ = naseval.eval_one_shot_model(config=args.__dict__,\n model=arch_filename, nasbench=nasbench)\n print(f\"Genotype performance: {genotype_perf}\" )\n if False and args.hessian and torch.cuda.get_device_properties(0).total_memory < 9147483648:\n eigenvalues = approx_hessian(network=model, val_loader=valid_queue, criterion=criterion, xloader=valid_queue, args=args)\n # eigenvalues = exact_hessian(network=model, val_loader=valid_queue, criterion=criterion, xloader=valid_queue, epoch=epoch, logger=logger, args=args)\n elif args.hessian and torch.cuda.get_device_properties(0).total_memory > 15147483648:\n # eigenvalues = exact_hessian(network=model, val_loader=valid_queue, criterion=criterion, xloader=valid_queue, epoch=epoch, logger=logger, args=args)\n # _data_loader = deepcopy(train_queue)\n # input, target = next(iter(_data_loader))\n # input_search, target_search = next(iter(_data_loader))\n # analyser = Analyzer(args, model)\n\n # from torch.autograd import Variable\n # input = Variable(input, requires_grad=False).cuda()\n # target = Variable(target, requires_grad=False).cuda()\n\n # # get gradient information\n # #param_grads = [p.grad for p in model.parameters() if p.grad is not None]\n # #param_grads = torch.cat([x.view(-1) for x in param_grads])\n # #param_grads = param_grads.cpu().data.numpy()\n # #grad_norm = np.linalg.norm(param_grads)\n\n # #gradient_vector = torch.cat([x.view(-1) for x in gradient_vector]) \n # #grad_norm = LA.norm(gradient_vector.cpu())\n # #logging.info('\\nCurrent grad norm based on Train Dataset: %.4f',\n # # grad_norm)\n\n # H = analyser.compute_Hw(input, target, input_search, target_search,\n # lr, a_optimizer, False)\n # g = analyser.compute_dw(input, target, input_search, target_search,\n # lr, a_optimizer, False)\n # g = torch.cat([x.view(-1) for x in g])\n\n # del _data_loader\n\n # state = {'epoch': epoch,\n # 'H': H.cpu().data.numpy().tolist(),\n # 'g': g.cpu().data.numpy().tolist(),\n # #'g_train': float(grad_norm),\n # #'eig_train': eigenvalue,\n # }\n\n\n\n # # early stopping\n # from numpy import linalg as LA\n # print(f\"Hessian: {H}\")\n # ev = max(LA.eigvals(H.cpu().data.numpy()))\n eigenvalues = ev\n else:\n eigenvalues = None\n adj_matrix, ops_list = naseval.extract_arch(config=args.__dict__,\n model=arch_filename, nasbench=nasbench)\n \n ops_count = count_ops(ops_list)\n width = genotype_width(adj_matrix)\n depth = genotype_depth(adj_matrix)\n ops_count = count_ops(ops_list)\n print(f\"Adj matrix: {adj_matrix}, ops_list: {ops_list}, width: {width}, depth: {depth}, ops_count: {ops_count}\")\n wandb_log = {\"train_acc\":train_acc, \"train_loss\":train_obj, \"val_acc\": valid_acc, \"valid_loss\":valid_obj,\n \"depth\":depth, \"width\":width, \"ops_count\":ops_count,\n \"search.final.cifar10\": genotype_perf, \"epoch\":epoch, \"eigval\":eigenvalues}\n all_logs.append(wandb_log)\n wandb.log(wandb_log)\n \n utils.save_checkpoint2({\"model\":model.state_dict(), \"w_optimizer\":optimizer.state_dict(), \n \"a_optimizer\":architect.optimizer.state_dict(), \"w_scheduler\":scheduler.state_dict(), \"epoch\": epoch, \n \"all_logs\":all_logs}, \n Path(args.save) / \"checkpoint.pt\")\n # utils.save(model, os.path.join(args.save, 'weights.pt'))\n\n logging.info('STARTING EVALUATION')\n test, valid, runtime, params = naseval.eval_one_shot_model(config=args.__dict__,\n model=arch_filename, nasbench=nasbench)\n index = 0\n logging.info('TEST ERROR: %.3f | VALID ERROR: %.3f | RUNTIME: %f | PARAMS: %d'\n % (test,\n valid,\n runtime,\n params)\n )\n wandb.log({\"test_error\":test, \"valid_error\": valid, \"runtime\":runtime, \"params\":params})\n for log in tqdm(all_logs, desc = \"Logging search logs\"):\n wandb.log(log)\n\n\ndef train(train_queue, valid_queue, network, architect, criterion, w_optimizer, a_optimizer, logger=None, inner_steps=100, epoch=0, \n steps_per_epoch=None, perturb_alpha=None, epsilon_alpha=None, args=None, warm_start=None):\n \n objs = utils.AvgrageMeter()\n top1 = utils.AvgrageMeter()\n top5 = utils.AvgrageMeter()\n\n train_iter = iter(train_queue)\n valid_iter = iter(valid_queue)\n search_loader_iter = zip(train_iter, valid_iter)\n for data_step, ((base_inputs, base_targets), (arch_inputs, arch_targets)) in tqdm(enumerate(search_loader_iter), total = round(len(train_queue)/inner_steps)):\n if steps_per_epoch is not None and data_step > steps_per_epoch:\n break\n network.train()\n n = base_inputs.size(0)\n\n base_inputs = base_inputs.cuda()\n base_targets = base_targets.cuda(non_blocking=True)\n\n # get a random minibatch from the search queue with replacement\n input_search, target_search = next(iter(valid_queue))\n input_search = input_search.cuda()\n target_search = target_search.cuda(non_blocking=True)\n \n all_base_inputs, all_base_targets, all_arch_inputs, all_arch_targets = format_input_data(base_inputs, base_targets, arch_inputs, arch_targets, \n search_loader_iter, inner_steps=inner_steps, epoch=epoch, args=args)\n\n network.zero_grad()\n architect.optimizer.zero_grad()\n\n model_init = deepcopy(network.state_dict())\n w_optim_init = deepcopy(w_optimizer.state_dict())\n arch_grads = [torch.zeros_like(p) for p in network.arch_parameters()]\n for inner_step, (base_inputs, base_targets, arch_inputs, arch_targets) in tqdm(enumerate(zip(all_base_inputs, all_base_targets, all_arch_inputs, all_arch_targets)), \n desc = \"Unrolling bilevel loop\", total=len(all_base_inputs)/(inner_steps if not args.inner_steps_same_batch else 1), disable=True):\n if data_step < 2 and inner_step < 2:\n print(f\"Base targets in the inner loop at inner_step={inner_step}, step={data_step}: {base_targets[0:10]}\")\n # if args.perturb_alpha:\n # # print('before softmax', model.arch_parameters())\n # network.softmax_arch_parameters()\n \n # # perturb on alpha\n # # print('after softmax', model.arch_parameters())\n # perturb_alpha(network, base_inputs, base_targets, epsilon_alpha)\n # w_optimizer.zero_grad()\n # architect.optimizer.zero_grad()\n # # print('afetr perturb', model.arch_parameters())\n logits = network(base_inputs)\n base_loss = criterion(logits, base_targets)\n base_loss.backward()\n # if data_step == 0 and inner_step == 0:\n # print(f\"BEFORE: {network.arch_parameters()}\")\n w_optimizer.step()\n # if data_step == 0 and inner_step == 0:\n # print(f\"AFTER: {network.arch_parameters()}\")\n w_optimizer.zero_grad()\n # if args.perturb_alpha:\n # network.restore_arch_parameters()\n if args.higher_method in [\"val_multiple\", \"val\"]:\n # if data_step < 2 and epoch < 1:\n # print(f\"Arch grads during unrolling from last step: {arch_grads}\")\n logits = network(arch_inputs)\n arch_loss = criterion(logits, arch_targets)\n arch_loss.backward()\n with torch.no_grad():\n\n for g1, g2 in zip(arch_grads, network.arch_parameters()):\n g1.add_(g2)\n \n network.zero_grad()\n a_optimizer.zero_grad()\n w_optimizer.zero_grad()\n # if data_step < 2 and epoch < 1:\n # print(f\"Arch grads during unrolling: {arch_grads}\")\n \n if args.higher_loop == \"joint\":\n prec1, prec5 = utils.accuracy(logits, base_targets, topk=(1, 5))\n objs.update(base_loss.item(), n)\n top1.update(prec1.data, n)\n top5.update(prec5.data, n)\n \n if args.higher_method in [\"val_multiple\", \"val\"] and data_step < 3:\n print(f\"Arch grads after unrolling: {arch_grads}\")\n with torch.no_grad():\n for g, p in zip(arch_grads, network.arch_parameters()):\n p.grad = g\n \n \n if warm_start is None or (warm_start is not None and epoch >= warm_start):\n\n a_optimizer.step()\n a_optimizer.zero_grad()\n \n w_optimizer.zero_grad()\n architect.optimizer.zero_grad()\n \n # Restore original model state before unrolling and put in the new arch parameters\n if args.higher_loop == \"bilevel\":\n new_arch = deepcopy(network._arch_parameters)\n network.load_state_dict(model_init)\n w_optimizer.load_state_dict(w_optim_init)\n for p1, p2 in zip(network._arch_parameters, new_arch):\n p1.data = p2.data\n for inner_step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(zip(all_base_inputs, all_base_targets, all_arch_inputs, all_arch_targets)):\n if args.higher_method == \"sotl_v2\":\n base_inputs, base_targets = arch_inputs, arch_targets ## Use train set for the unrolling to compute hypergradients, then forget the training and train weights only using a separate set\n if data_step in [0, 1] and inner_step < 3 and epoch % 5 == 0:\n logger.info(f\"Doing weight training for real in at inner_step={inner_step}, step={data_step}: {base_targets[0:10]}\")\n if args.bilevel_train_steps is not None and inner_step >= args.bilevel_train_steps :\n break\n if args.perturb_alpha:\n # print('before softmax', model.arch_parameters())\n network.softmax_arch_parameters()\n # perturb on alpha\n # print('after softmax', model.arch_parameters())\n perturb_alpha(network, base_inputs, base_targets, epsilon_alpha)\n w_optimizer.zero_grad()\n architect.optimizer.zero_grad()\n # print('afetr perturb', model.arch_parameters())\n logits = network(base_inputs)\n base_loss = criterion(logits, base_targets)\n network.zero_grad()\n base_loss.backward()\n w_optimizer.step()\n w_optimizer.zero_grad()\n \n if args.perturb_alpha:\n network.restore_arch_parameters()\n # print('after restore', model.arch_parameters())\n \n n = base_inputs.size(0)\n\n prec1, prec5 = utils.accuracy(logits, base_targets, topk=(1, 5))\n\n objs.update(base_loss.item(), n)\n top1.update(prec1.data, n)\n top5.update(prec5.data, n)\n\n if data_step % args.report_freq == 0 :\n logging.info('train %03d %e %f %f', data_step, objs.avg, top1.avg, top5.avg)\n if 'debug' in args.save:\n break\n logging.info('train final %e %f %f', objs.avg, top1.avg, top5.avg)\n \n if torch.cuda.get_device_properties(0).total_memory > 15147483648:\n _data_loader = deepcopy(train_queue)\n input, target = next(iter(_data_loader))\n input_search, target_search = next(iter(_data_loader))\n input_search, target_search = input_search.cuda(), target_search.cuda()\n analyser = Analyzer(args, network)\n\n from torch.autograd import Variable\n input = Variable(input, requires_grad=False).cuda()\n target = Variable(target, requires_grad=False).cuda()\n\n # get gradient information\n #param_grads = [p.grad for p in model.parameters() if p.grad is not None]\n #param_grads = torch.cat([x.view(-1) for x in param_grads])\n #param_grads = param_grads.cpu().data.numpy()\n #grad_norm = np.linalg.norm(param_grads)\n\n #gradient_vector = torch.cat([x.view(-1) for x in gradient_vector]) \n #grad_norm = LA.norm(gradient_vector.cpu())\n #logging.info('\\nCurrent grad norm based on Train Dataset: %.4f',\n # grad_norm)\n\n H = analyser.compute_Hw(input, target, input_search, target_search,\n 0, a_optimizer, False)\n g = analyser.compute_dw(input, target, input_search, target_search,\n 0, a_optimizer, False)\n g = torch.cat([x.view(-1) for x in g])\n\n del _data_loader\n\n state = {'epoch': epoch,\n 'H': H.cpu().data.numpy().tolist(),\n 'g': g.cpu().data.numpy().tolist(),\n #'g_train': float(grad_norm),\n #'eig_train': eigenvalue,\n }\n\n\n\n # early stopping\n from numpy import linalg as LA\n print(f\"Hessian: {H}\")\n ev = max(LA.eigvals(H.cpu().data.numpy()))\n else:\n ev = 0\n\n return top1.avg, objs.avg, ev\n\n\ndef train_reptile(train_queue, valid_queue, network, architect, criterion, w_optimizer, a_optimizer, logger=None, inner_steps=100, epoch=0, \n steps_per_epoch=None, perturb_alpha=None, epsilon_alpha=None, args=None):\n \n objs = utils.AvgrageMeter()\n top1 = utils.AvgrageMeter()\n top5 = utils.AvgrageMeter()\n\n train_iter = iter(train_queue)\n valid_iter = iter(valid_queue)\n search_loader_iter = zip(train_iter, valid_iter)\n for data_step, ((base_inputs, base_targets), (arch_inputs, arch_targets)) in tqdm(enumerate(search_loader_iter), \n total = round(len(train_queue)/(inner_steps if not args.inner_steps_same_batch else 1))):\n if steps_per_epoch is not None and data_step > steps_per_epoch:\n break\n network.train()\n n = base_inputs.size(0)\n\n base_inputs = base_inputs.cuda()\n base_targets = base_targets.cuda(non_blocking=True)\n\n # get a random minibatch from the search queue with replacement\n input_search, target_search = next(iter(valid_queue))\n input_search = input_search.cuda()\n target_search = target_search.cuda(non_blocking=True)\n \n all_base_inputs, all_base_targets, all_arch_inputs, all_arch_targets = format_input_data(base_inputs, base_targets, arch_inputs, arch_targets, \n search_loader_iter, inner_steps=inner_steps, epoch=epoch, args=args)\n\n network.zero_grad()\n\n model_init = deepcopy(network.state_dict())\n w_optim_init = deepcopy(w_optimizer.state_dict())\n arch_grads = [torch.zeros_like(p) for p in network.arch_parameters()]\n for inner_step, (base_inputs, base_targets, arch_inputs, arch_targets) in tqdm(enumerate(zip(all_base_inputs, all_base_targets, all_arch_inputs, all_arch_targets)), desc = \"Unrolling bilevel loop\", total=inner_steps, disable=True):\n if data_step < 2 and inner_step < 2:\n print(f\"Base targets in the inner loop at inner_step={inner_step}, step={data_step}: {base_targets[0:10]}\")\n \n logits = network(base_inputs)\n base_loss = criterion(logits, base_targets)\n base_loss.backward()\n\n w_optimizer.step()\n a_optimizer.step()\n\n w_optimizer.zero_grad()\n a_optimizer.zero_grad()\n \n \n a_optimizer.zero_grad()\n \n w_optimizer.zero_grad()\n architect.optimizer.zero_grad()\n \n # Restore original model state before unrolling and put in the new arch parameters\n \n # new_arch = deepcopy(network._arch_parameters)\n # network.load_state_dict(model_init)\n # for p1, p2 in zip(network._arch_parameters, new_arch):\n # p1.data = p2.data\n \n \n # for inner_step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(zip(all_base_inputs, all_base_targets, all_arch_inputs, all_arch_targets)):\n # if args.higher_method == \"sotl_v2\":\n # base_inputs, base_targets = arch_inputs, arch_targets ## Use train set for the unrolling to compute hypergradients, then forget the training and train weights only using a separate set\n # if data_step in [0, 1] and inner_step < 3 and epoch % 5 == 0:\n # logger.info(f\"Doing weight training for real in at inner_step={inner_step}, step={data_step}: {base_targets[0:10]}\")\n # if args.bilevel_train_steps is not None and inner_step >= args.bilevel_train_steps :\n # break\n # if args.perturb_alpha:\n # # print('before softmax', model.arch_parameters())\n # network.softmax_arch_parameters()\n \n # # perturb on alpha\n # # print('after softmax', model.arch_parameters())\n # perturb_alpha(network, base_inputs, base_targets, epsilon_alpha)\n # w_optimizer.zero_grad()\n # architect.optimizer.zero_grad()\n # # print('afetr perturb', model.arch_parameters())\n # logits = network(base_inputs)\n # base_loss = criterion(logits, base_targets)\n # network.zero_grad()\n # base_loss.backward()\n # w_optimizer.step()\n # w_optimizer.zero_grad()\n \n # if args.perturb_alpha:\n # network.restore_arch_parameters()\n # # print('after restore', model.arch_parameters())\n \n # n = base_inputs.size(0)\n\n # prec1, prec5 = utils.accuracy(logits, base_targets, topk=(1, 5))\n\n # objs.update(base_loss.item(), n)\n # top1.update(prec1.data, n)\n # top5.update(prec5.data, n)\n\n # if data_step % args.report_freq == 0:\n # logging.info('train %03d %e %f %f', data_step, objs.avg, top1.avg, top5.avg)\n # if 'debug' in args.save:\n # break\n\n return top1.avg, objs.avg\n\n\ndef infer(valid_queue, model, criterion):\n objs = utils.AvgrageMeter()\n top1 = utils.AvgrageMeter()\n top5 = utils.AvgrageMeter()\n model.eval()\n\n with torch.no_grad():\n for step, (input, target) in enumerate(valid_queue):\n if step > 101:\n break\n input = input.cuda()\n target = target.cuda(non_blocking=True)\n\n logits = model(input)\n loss = criterion(logits, target)\n\n prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))\n n = input.size(0)\n objs.update(loss.item(), n)\n top1.update(prec1.item(), n)\n top5.update(prec5.item(), n)\n\n if step % args.report_freq == 0:\n logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)\n if args.debug:\n break\n\n return top1.avg, objs.avg\n\ndef format_input_data(base_inputs, base_targets, arch_inputs, arch_targets, search_loader_iter, inner_steps, args, epoch=1000, loader_type=\"train-val\"):\n base_inputs, base_targets = base_inputs.cuda(non_blocking=True), base_targets.cuda(non_blocking=True)\n arch_inputs, arch_targets = arch_inputs.cuda(non_blocking=True), arch_targets.cuda(non_blocking=True)\n if args.higher_method == \"sotl\":\n arch_inputs, arch_targets = None, None\n all_base_inputs, all_base_targets, all_arch_inputs, all_arch_targets = [base_inputs], [base_targets], [arch_inputs], [arch_targets]\n for extra_step in range(inner_steps-1):\n if args.inner_steps_same_batch and epoch >= args.warm_start:\n all_base_inputs.append(base_inputs)\n all_base_targets.append(base_targets)\n all_arch_inputs.append(arch_inputs)\n all_arch_targets.append(arch_targets)\n continue # If using the same batch, we should not try to query the search_loader_iter for more samples\n try:\n if loader_type == \"train-val\" or loader_type == \"train-train\":\n (extra_base_inputs, extra_base_targets), (extra_arch_inputs, extra_arch_targets)= next(search_loader_iter)\n else:\n extra_base_inputs, extra_base_targets = next(search_loader_iter)\n extra_arch_inputs, extra_arch_targets = None, None\n except Exception as e:\n continue\n\n extra_base_inputs, extra_base_targets = extra_base_inputs.cuda(non_blocking=True), extra_base_targets.cuda(non_blocking=True)\n if extra_arch_inputs is not None and extra_arch_targets is not None:\n extra_arch_inputs, extra_arch_targets = extra_arch_inputs.cuda(non_blocking=True), extra_arch_targets.cuda(non_blocking=True)\n \n all_base_inputs.append(extra_base_inputs)\n all_base_targets.append(extra_base_targets)\n all_arch_inputs.append(extra_arch_inputs)\n all_arch_targets.append(extra_arch_targets)\n\n return all_base_inputs, all_base_targets, all_arch_inputs, all_arch_targets\n\nif __name__ == '__main__':\n main()"
] | [
[
"torch.cuda.get_device_properties",
"torch.nn.CrossEntropyLoss",
"torch.cuda.set_device",
"torch.cuda.manual_seed",
"numpy.random.seed",
"torch.manual_seed",
"torch.zeros_like",
"torch.utils.data.sampler.SubsetRandomSampler",
"torch.no_grad",
"torch.cuda.is_available",
"numpy.floor",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ougx/modflow6 | [
"70a056d977bfeb7b077eddab084f0836c9d1eff7"
] | [
"autotest/test_gwf_csub_sub01.py"
] | [
"import os\nimport numpy as np\n\ntry:\n import pymake\nexcept:\n msg = \"Error. Pymake package is not available.\\n\"\n msg += \"Try installing using the following command:\\n\"\n msg += \" pip install https://github.com/modflowpy/pymake/zipball/master\"\n raise Exception(msg)\n\ntry:\n import flopy\nexcept:\n msg = \"Error. FloPy package is not available.\\n\"\n msg += \"Try installing using the following command:\\n\"\n msg += \" pip install flopy\"\n raise Exception(msg)\n\nfrom framework import testing_framework, running_on_CI\nfrom simulation import Simulation\n\npaktest = \"csub\"\nbudtol = 1e-2\n\nex = [\"csub_sub01a\", \"csub_sub01b\"]\nexdirs = []\nfor s in ex:\n exdirs.append(os.path.join(\"temp\", s))\nddir = \"data\"\n\ncompression_indices = [None, True]\n\nndcell = [19] * len(ex)\n\n# run all examples on Travis\n# continuous_integration = [True for idx in range(len(exdirs))]\n# the delay bed problems only run on the development version of MODFLOW-2005\n# set travis to True when version 1.13.0 is released\ncontinuous_integration = [True for idx in range(len(exdirs))]\n\n# set replace_exe to None to use default executable\nreplace_exe = None\n\n# static model data\n# spatial discretization\nnlay, nrow, ncol = 1, 1, 3\nshape3d = (nlay, nrow, ncol)\nsize3d = nlay * nrow * ncol\ndelr, delc = 1.0, 1.0\ntop = 0.0\nbotm = [-100.0]\n\n# temporal discretization\nnper = 1\nperlen = [1000.0 for i in range(nper)]\nnstp = [100 for i in range(nper)]\ntsmult = [1.05 for i in range(nper)]\nsteady = [False for i in range(nper)]\n\nstrt = 0.0\nstrt6 = 1.0\nhnoflo = 1e30\nhdry = -1e30\nhk = 1e6\nlaytyp = [0]\nS = 1e-4\nsy = 0.0\n\nnouter, ninner = 1000, 300\nhclose, rclose, relax = 1e-6, 1e-6, 0.97\n\ntdis_rc = []\nfor idx in range(nper):\n tdis_rc.append((perlen[idx], nstp[idx], tsmult[idx]))\n\nib = 1\n\nc = []\nc6 = []\nfor j in range(0, ncol, 2):\n c.append([0, 0, j, strt, strt])\n c6.append([(0, 0, j), strt])\ncd = {0: c}\ncd6 = {0: c6}\n\n# sub data\nndb = 1\nnndb = 0\ncc = 100.0\ncr = 1.0\nvoid = 0.82\ntheta = void / (1.0 + void)\nkv = 0.025\nsgm = 0.0\nsgs = 0.0\nini_stress = 1.0\nthick = [1.0]\nsfe = cr * thick[0]\nsfv = cc * thick[0]\nlnd = [0]\nldnd = [0]\ndp = [[kv, cr, cc]]\nss = S / (100.0 - thick[0])\n\nds15 = [0, 0, 0, 2052, 0, 0, 0, 0, 0, 0, 0, 0]\nds16 = [0, 0, 0, 100, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1]\n\n\ndef build_model(idx, ws):\n name = ex[idx]\n\n sim = flopy.mf6.MFSimulation(\n sim_name=name, version=\"mf6\", exe_name=\"mf6\", sim_ws=ws\n )\n # create tdis package\n tdis = flopy.mf6.ModflowTdis(\n sim, time_units=\"DAYS\", nper=nper, perioddata=tdis_rc\n )\n\n # create iterative model solution\n ims = flopy.mf6.ModflowIms(\n sim,\n print_option=\"SUMMARY\",\n outer_dvclose=hclose,\n outer_maximum=nouter,\n under_relaxation=\"NONE\",\n inner_maximum=ninner,\n inner_dvclose=hclose,\n rcloserecord=rclose,\n linear_acceleration=\"CG\",\n scaling_method=\"NONE\",\n reordering_method=\"NONE\",\n relaxation_factor=relax,\n )\n\n # create gwf model\n gwf = flopy.mf6.ModflowGwf(sim, modelname=name)\n\n dis = flopy.mf6.ModflowGwfdis(\n gwf,\n nlay=nlay,\n nrow=nrow,\n ncol=ncol,\n delr=delr,\n delc=delc,\n top=top,\n botm=botm,\n filename=\"{}.dis\".format(name),\n )\n\n # initial conditions\n ic = flopy.mf6.ModflowGwfic(gwf, strt=strt, filename=\"{}.ic\".format(name))\n\n # node property flow\n npf = flopy.mf6.ModflowGwfnpf(\n gwf, save_flows=False, icelltype=laytyp, k=hk, k33=hk\n )\n # storage\n sto = flopy.mf6.ModflowGwfsto(\n gwf,\n save_flows=False,\n iconvert=laytyp,\n ss=0.0,\n sy=sy,\n storagecoefficient=True,\n transient={0: True},\n )\n\n # chd files\n chd = flopy.mf6.modflow.mfgwfchd.ModflowGwfchd(\n gwf, maxbound=len(c6), stress_period_data=cd6, save_flows=False\n )\n\n # csub files\n ci = compression_indices[idx]\n if ci is None:\n sub6 = [\n [\n 0,\n (0, 0, 1),\n \"delay\",\n ini_stress,\n thick[0],\n 1.0,\n cc,\n cr,\n theta,\n kv,\n ini_stress,\n ]\n ]\n else:\n sub6 = [\n [\n 0,\n (0, 0, 1),\n \"delay\",\n ini_stress,\n thick[0],\n 1.0,\n 230.258658761733000,\n 2.302586587617330,\n theta,\n kv,\n ini_stress,\n ]\n ]\n\n opth = \"{}.csub.obs\".format(name)\n cnvgpth = \"{}.csub.cnvg.csv\".format(name)\n csub = flopy.mf6.ModflowGwfcsub(\n gwf,\n head_based=True,\n compression_indices=ci,\n print_input=True,\n save_flows=True,\n package_convergence_filerecord=cnvgpth,\n effective_stress_lag=True,\n ndelaycells=ndcell[idx],\n ninterbeds=1,\n beta=0.0,\n cg_ske_cr=ss,\n packagedata=sub6,\n )\n orecarray = {}\n orecarray[\"csub_obs.csv\"] = [\n (\"tcomp\", \"compaction-cell\", (0, 0, 1)),\n (\"sk\", \"sk\", (0, 0, 1)),\n ]\n csub_obs_package = csub.obs.initialize(\n filename=opth, digits=10, print_input=True, continuous=orecarray\n )\n\n # output control\n oc = flopy.mf6.ModflowGwfoc(\n gwf,\n budget_filerecord=\"{}.cbc\".format(name),\n head_filerecord=\"{}.hds\".format(name),\n headprintrecord=[(\"COLUMNS\", 10, \"WIDTH\", 15, \"DIGITS\", 6, \"GENERAL\")],\n saverecord=[(\"HEAD\", \"ALL\"), (\"BUDGET\", \"ALL\")],\n printrecord=[(\"HEAD\", \"ALL\"), (\"BUDGET\", \"ALL\")],\n )\n\n return sim\n\n\ndef get_model(idx, dir):\n\n # build MODFLOW 6 files\n ws = dir\n sim = build_model(idx, ws)\n\n # build MODFLOW-2005 files\n ws = os.path.join(dir, \"mf6-regression\")\n mc = build_model(idx, ws)\n\n return sim, mc\n\n\ndef eval_sub(sim):\n print(\"evaluating subsidence...\")\n\n # MODFLOW 6 total compaction results\n fpth = os.path.join(sim.simpath, \"csub_obs.csv\")\n try:\n tc = np.genfromtxt(fpth, names=True, delimiter=\",\")\n except:\n assert False, 'could not load data from \"{}\"'.format(fpth)\n\n # comparison total compaction results\n fpth = os.path.join(sim.simpath, \"mf6-regression\", \"csub_obs.csv\")\n try:\n tc0 = np.genfromtxt(fpth, names=True, delimiter=\",\")\n except:\n assert False, 'could not load data from \"{}\"'.format(fpth)\n\n # calculate maximum absolute error\n diff = tc[\"TCOMP\"] - tc0[\"TCOMP\"]\n diffmax = np.abs(diff).max()\n dtol = 1e-6\n msg = \"maximum absolute total-compaction difference ({}) \".format(diffmax)\n\n # write summary\n fpth = os.path.join(\n sim.simpath, \"{}.comp.cmp.out\".format(os.path.basename(sim.name))\n )\n f = open(fpth, \"w\")\n line = \"{:>15s}\".format(\"TOTIM\")\n line += \" {:>15s}\".format(\"CSUB\")\n line += \" {:>15s}\".format(\"MF\")\n line += \" {:>15s}\".format(\"DIFF\")\n f.write(line + \"\\n\")\n for i in range(diff.shape[0]):\n line = \"{:15g}\".format(tc0[\"time\"][i])\n line += \" {:15g}\".format(tc[\"TCOMP\"][i])\n line += \" {:15g}\".format(tc0[\"TCOMP\"][i])\n line += \" {:15g}\".format(diff[i])\n f.write(line + \"\\n\")\n f.close()\n\n if diffmax > dtol:\n sim.success = False\n msg += \"exceeds {}\".format(dtol)\n assert diffmax < dtol, msg\n else:\n sim.success = True\n print(\" \" + msg)\n\n # compare budgets\n cbc_compare(sim)\n\n return\n\n\n# compare cbc and lst budgets\ndef cbc_compare(sim):\n # open cbc file\n fpth = os.path.join(\n sim.simpath, \"{}.cbc\".format(os.path.basename(sim.name))\n )\n cobj = flopy.utils.CellBudgetFile(fpth, precision=\"double\")\n\n # build list of cbc data to retrieve\n avail = cobj.get_unique_record_names()\n cbc_bud = []\n bud_lst = []\n for t in avail:\n if isinstance(t, bytes):\n t = t.decode()\n t = t.strip()\n if paktest in t.lower():\n cbc_bud.append(t)\n bud_lst.append(\"{}_IN\".format(t))\n bud_lst.append(\"{}_OUT\".format(t))\n\n # get results from listing file\n fpth = os.path.join(\n sim.simpath, \"{}.lst\".format(os.path.basename(sim.name))\n )\n budl = flopy.utils.Mf6ListBudget(fpth)\n names = list(bud_lst)\n d0 = budl.get_budget(names=names)[0]\n dtype = d0.dtype\n nbud = d0.shape[0]\n d = np.recarray(nbud, dtype=dtype)\n for key in bud_lst:\n d[key] = 0.0\n\n # get data from cbc dile\n kk = cobj.get_kstpkper()\n times = cobj.get_times()\n for idx, (k, t) in enumerate(zip(kk, times)):\n for text in cbc_bud:\n qin = 0.0\n qout = 0.0\n v = cobj.get_data(kstpkper=k, text=text)[0]\n if isinstance(v, np.recarray):\n vt = np.zeros(size3d, dtype=float)\n for jdx, node in enumerate(v[\"node\"]):\n vt[node - 1] += v[\"q\"][jdx]\n v = vt.reshape(shape3d)\n for kk in range(v.shape[0]):\n for ii in range(v.shape[1]):\n for jj in range(v.shape[2]):\n vv = v[kk, ii, jj]\n if vv < 0.0:\n qout -= vv\n else:\n qin += vv\n d[\"totim\"][idx] = t\n d[\"time_step\"][idx] = k[0]\n d[\"stress_period\"] = k[1]\n key = \"{}_IN\".format(text)\n d[key][idx] = qin\n key = \"{}_OUT\".format(text)\n d[key][idx] = qout\n\n diff = np.zeros((nbud, len(bud_lst)), dtype=float)\n for idx, key in enumerate(bud_lst):\n diff[:, idx] = d0[key] - d[key]\n diffmax = np.abs(diff).max()\n msg = \"maximum absolute total-budget difference ({}) \".format(diffmax)\n\n # write summary\n fpth = os.path.join(\n sim.simpath, \"{}.bud.cmp.out\".format(os.path.basename(sim.name))\n )\n f = open(fpth, \"w\")\n for i in range(diff.shape[0]):\n if i == 0:\n line = \"{:>10s}\".format(\"TIME\")\n for idx, key in enumerate(bud_lst):\n line += \"{:>25s}\".format(key + \"_LST\")\n line += \"{:>25s}\".format(key + \"_CBC\")\n line += \"{:>25s}\".format(key + \"_DIF\")\n f.write(line + \"\\n\")\n line = \"{:10g}\".format(d[\"totim\"][i])\n for idx, key in enumerate(bud_lst):\n line += \"{:25g}\".format(d0[key][i])\n line += \"{:25g}\".format(d[key][i])\n line += \"{:25g}\".format(diff[i, idx])\n f.write(line + \"\\n\")\n f.close()\n\n if diffmax > budtol:\n sim.success = False\n msg += \"diffmax {} exceeds tolerance {}\".format(diffmax, budtol)\n assert diffmax < budtol, msg\n else:\n sim.success = True\n print(\" \" + msg)\n\n return\n\n\n# - No need to change any code below\ndef build_models():\n for idx, dir in enumerate(exdirs):\n sim, mc = get_model(idx, dir)\n sim.write_simulation()\n if mc is not None:\n mc.write_simulation()\n return\n\n\ndef test_mf6model():\n # determine if running on Travis or GitHub actions\n is_CI = running_on_CI()\n r_exe = None\n if not is_CI:\n if replace_exe is not None:\n r_exe = replace_exe\n\n # initialize testing framework\n test = testing_framework()\n\n # build the models\n build_models()\n\n # run the test models\n for idx, dir in enumerate(exdirs):\n if is_CI and not continuous_integration[idx]:\n continue\n yield test.run_mf6, Simulation(\n dir,\n exfunc=eval_sub,\n exe_dict=r_exe,\n idxsim=idx,\n mf6_regression=True,\n )\n\n return\n\n\ndef main():\n # initialize testing framework\n test = testing_framework()\n\n # build the models\n build_models()\n\n # run the test models\n for idx, dir in enumerate(exdirs):\n sim = Simulation(\n dir,\n exfunc=eval_sub,\n exe_dict=replace_exe,\n idxsim=idx,\n mf6_regression=True,\n )\n test.run_mf6(sim)\n return\n\n\n# use python testmf6_csub_sub01.py --mf2005 mf2005devdbl\nif __name__ == \"__main__\":\n # print message\n print(\"standalone run of {}\".format(os.path.basename(__file__)))\n\n # run main routine\n main()\n"
] | [
[
"numpy.recarray",
"numpy.zeros",
"numpy.abs",
"numpy.genfromtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
droseger/mmdetection | [
"355da53ea7c4b061c62c5a8430adce7641bc2894"
] | [
"mmdet/datasets/sidewalk.py"
] | [
"import numpy as np\nfrom pycocotools.coco import COCO\n\nfrom .custom import CustomDataset\n\n\nclass Sidewalk(CustomDataset):\n CLASSES = ('_background_',\n 'tarmac_cavity',\n 'tarmac_cavity_dirt',\n 'tarmac_crack_fine',\n 'tarmac_crack_medium',\n 'tarmac_crack_thick',\n 'tarmac_crocodile_light',\n 'tarmac_crocodile_severe',\n 'tarmac_grass',\n 'tarmac_patch',\n 'slabs_crack_light',\n 'slabs_crack_medium',\n 'slabs_crack_heavy')\n\n def load_annotations(self, ann_file):\n self.coco = COCO(ann_file)\n self.cat_ids = self.coco.getCatIds()\n self.cat2label = {\n cat_id: i + 1\n for i, cat_id in enumerate(self.cat_ids)\n }\n self.img_ids = self.coco.getImgIds()\n img_infos = []\n for i in self.img_ids:\n info = self.coco.loadImgs([i])[0]\n info['filename'] = info['file_name']\n img_infos.append(info)\n return img_infos\n\n def get_ann_info(self, idx):\n img_id = self.img_infos[idx]['id']\n ann_ids = self.coco.getAnnIds(imgIds=[img_id])\n ann_info = self.coco.loadAnns(ann_ids)\n return self._parse_ann_info(ann_info, self.with_mask)\n\n def _filter_imgs(self, min_size=32):\n '''Filter images too small or without ground truths.'''\n valid_inds = []\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\n for i, img_info in enumerate(self.img_infos):\n if self.img_ids[i] not in ids_with_ann:\n continue\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds\n\n def _parse_ann_info(self, ann_info, with_mask=True):\n '''Parse bbox and mask annotation.\n\n Args:\n ann_info (list[dict]): Annotation info of an image.\n with_mask (bool): Whether to parse mask annotations.\n\n Returns:\n dict: A dict containing the following keys: bboxes, bboxes_ignore,\n labels, masks, mask_polys, poly_lens.\n '''\n gt_bboxes = []\n gt_labels = []\n gt_bboxes_ignore = []\n # Two formats are provided.\n # 1. mask: a binary map of the same size of the image.\n # 2. polys: each mask consists of one or several polys, each poly is a\n # list of float.\n if with_mask:\n gt_masks = []\n gt_mask_polys = []\n gt_poly_lens = []\n for i, ann in enumerate(ann_info):\n if ann.get('ignore', False):\n continue\n x1, y1, w, h = ann['bbox']\n if ann['area'] <= 0 or w < 1 or h < 1:\n continue\n bbox = [x1, y1, x1 + w - 1, y1 + h - 1]\n if ann['iscrowd']:\n gt_bboxes_ignore.append(bbox)\n else:\n gt_bboxes.append(bbox)\n gt_labels.append(self.cat2label[ann['category_id']])\n if with_mask:\n gt_masks.append(self.coco.annToMask(ann))\n mask_polys = [\n p for p in ann['segmentation'] if len(p) >= 6\n ] # valid polygons have >= 3 points (6 coordinates)\n poly_lens = [len(p) for p in mask_polys]\n gt_mask_polys.append(mask_polys)\n gt_poly_lens.extend(poly_lens)\n if gt_bboxes:\n gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\n gt_labels = np.array(gt_labels, dtype=np.int64)\n else:\n gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n gt_labels = np.array([], dtype=np.int64)\n\n if gt_bboxes_ignore:\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\n else:\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n\n ann = dict(\n bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore)\n\n if with_mask:\n ann['masks'] = gt_masks\n # poly format is not used in the current implementation\n ann['mask_polys'] = gt_mask_polys\n ann['poly_lens'] = gt_poly_lens\n return ann\n"
] | [
[
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Hou-Yijie/NNPJ-Final | [
"b83e34ba6def1f65ad8b65d3c99bfe3f68cbd836"
] | [
"Mixup.py"
] | [
"#!/usr/bin/env python3 -u\n# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree.\nfrom __future__ import print_function\n\nimport argparse\nimport csv\nimport os\n\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\n\nfrom models import *\nfrom utils import progress_bar\n\nparser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')\nparser.add_argument('--batch-size', default=128, type=int, help='batch size')\nparser.add_argument('--epoch', default=200, type=int,\n help='total epochs to run')\nparser.add_argument('--lr', default=0.1, type=float, help='learning rate')\nparser.add_argument('--seed', default=520, type=int, help='random seed')\nparser.add_argument('--decay', default=1e-4, type=float, help='weight decay')\n\n\nparser.add_argument('--resume', '-r', action='store_true',\n help='resume from checkpoint')\nparser.add_argument('--alpha', default=1., type=float,\n help='mixup interpolation coefficient (default: 1)')\n\nargs = parser.parse_args()\n\nuse_cuda = torch.cuda.is_available()\ndevice = 'cuda' if use_cuda else 'cpu'\n\nstart_epoch = 0 # start from epoch 0 or last checkpoint epoch\n\nif args.seed != 0:\n torch.manual_seed(args.seed)\n\n# Data\nprint('==> Preparing data..')\n\ntransform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n])\n\ntransform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n])\n\n\ntrainset = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size,\n pin_memory=True, shuffle=True, num_workers=4)\n\ntestset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size,\n pin_memory=True, shuffle=False, num_workers=4)\n\n\n# Model\nif args.resume:\n # Load checkpoint.\n print('==> Resuming from checkpoint..')\n assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'\n checkpoint = torch.load('./checkpoint/mixup.pth.tar')\n net = checkpoint['net']\n best_acc = checkpoint['acc']\n start_epoch = checkpoint['epoch'] + 1\n rng_state = checkpoint['rng_state']\n torch.set_rng_state(rng_state)\nelse:\n print('==> Building model..')\n net = ResNet18()\n net = net.to(device)\n\n\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, nesterov=True,\n weight_decay=args.decay)\n\n\ndef mixup_data(x, y, alpha=1.0, use_cuda=True):\n '''Returns mixed inputs, pairs of targets, and lambda'''\n if alpha > 0:\n lam = np.random.beta(alpha, alpha)\n else:\n lam = 1\n\n batch_size = x.size()[0]\n if use_cuda:\n index = torch.randperm(batch_size).cuda()\n else:\n index = torch.randperm(batch_size)\n\n mixed_x = lam * x + (1 - lam) * x[index, :]\n y_a, y_b = y, y[index]\n return mixed_x, y_a, y_b, lam\n\n\ndef mixup_criterion(criterion, pred, y_a, y_b, lam):\n return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)\n\n\ndef train(epoch):\n print('\\nEpoch: %d' % epoch)\n net.train()\n train_loss = 0\n correct = 0\n total = 0\n for batch_idx, (inputs, targets) in enumerate(trainloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n\n inputs, targets_a, targets_b, lam = mixup_data(inputs, targets,args.alpha, use_cuda)\n inputs, targets_a, targets_b = map(Variable, (inputs,targets_a, targets_b))\n outputs = net(inputs)\n loss = mixup_criterion(criterion, outputs, targets_a, targets_b, lam)\n train_loss += loss.item()\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += (lam * predicted.eq(targets_a.data).cpu().sum().float()\n + (1 - lam) * predicted.eq(targets_b.data).cpu().sum().float())\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))\n return train_loss/(batch_idx+1)\n\n\ndef test(epoch):\n global best_acc\n net.eval()\n test_loss = 0\n correct = 0\n total = 0\n for batch_idx, (inputs, targets) in enumerate(testloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = inputs.to(device), targets.to(device)\n outputs = net(inputs)\n loss = criterion(outputs, targets)\n\n test_loss += loss.item()\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n\n progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n % (test_loss/(batch_idx+1), 100.*correct/total, correct, total))\n acc = 100.*correct/total\n return acc\n\n\ndef checkpoint(acc, epoch):\n # Save checkpoint.\n print('Saving..')\n state = {\n 'net': net,\n 'acc': acc,\n 'epoch': epoch,\n 'rng_state': torch.get_rng_state()\n }\n if not os.path.isdir('checkpoint'):\n os.mkdir('checkpoint')\n torch.save(state, './checkpoint/mixup.pth.tar')\n\n\ndef adjust_learning_rate(optimizer, epoch):\n \"\"\"decrease the learning rate at 100 and 150 epoch\"\"\"\n lr = args.lr\n if epoch >= 50:\n lr /= 10\n if epoch >= 100:\n lr /= 10\n if epoch >= 150:\n lr /= 10\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\nif not os.path.isdir('result'):\n os.mkdir('result')\nlogname = ('result/mixup' + '.csv')\n\nif not os.path.exists(logname):\n with open(logname, 'w') as logfile:\n logwriter = csv.writer(logfile, delimiter=',')\n logwriter.writerow(['epoch', 'train loss', 'test acc'])\n\nif __name__=='__main__':\n for epoch in range(start_epoch, args.epoch):\n adjust_learning_rate(optimizer, epoch)\n train_loss = train(epoch)\n test_acc = test(epoch)\n with open(logname, 'a') as logfile:\n logwriter = csv.writer(logfile, delimiter=',')\n logwriter.writerow([epoch, train_loss, test_acc])\n checkpoint(test_acc, epoch)\n"
] | [
[
"torch.set_rng_state",
"torch.nn.CrossEntropyLoss",
"numpy.random.beta",
"torch.max",
"torch.load",
"torch.randperm",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.get_rng_state",
"torch.cuda.is_available",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jmetzz/ml-laboratory | [
"26b1e87bd0d80efa4f15280f7f32ad46d59efc1f"
] | [
"basic_ml/tests/regression/test_lasso.py"
] | [
"import math\n\nimport numpy as np\nfrom numpy.testing import assert_almost_equal\n\nfrom regression.lasso import coordinate_descent_step\n\n\ndef test_lasso_coordinate_descent_step():\n expected_w = 0.425558846691\n actual_w = coordinate_descent_step(\n feature_matrix=np.array(\n [[3.0 / math.sqrt(13), 1.0 / math.sqrt(10)], [2.0 / math.sqrt(13), 3.0 / math.sqrt(10)]]\n ),\n feature_idx=1,\n weights=np.array([1.0, 4.0]),\n output=np.array([1.0, 1.0]),\n l1_penalty=0.1,\n )\n\n assert_almost_equal(expected_w, actual_w, decimal=12)\n"
] | [
[
"numpy.testing.assert_almost_equal",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Shuep418Slw/OSlw_StyleTransfer | [
"e197cabc0b0f2fc16409bb2d00061761b866f776"
] | [
"Python_Code/img2txt.py"
] | [
"import numpy as np\nimport cv2\nfrom PIL import Image\n#img=cv2.imread('picture.jpg')\n\nimg=np.array(Image.open('picture.jpg'))\nprint(img.shape)\n\nmaxlen=max(img.shape)\n\nheight,width = img.shape[:2]\n\n#if maxlen<=512:\n# tout=img\n#else:\ndivlen = int(maxlen / 512)+1\nprint(divlen)\ntout=cv2.resize(img,(512,512),interpolation=cv2.INTER_LINEAR)\ntout=img\n\nfid=open(\"res.csv\",'w+')\n\nynew=tout.shape[0]\nxnew=tout.shape[1]\nfid.write(\"%d\\n\"%(ynew))\nfid.write(\"%d\\n\"%(xnew))\n\n\nprint(tout[0:10,0,0])\n\n\nfor n in range(0,3):\n temp= np.reshape(tout[:,:,n],ynew*xnew)\n print(temp.shape)\n for x in temp:\n fid.write(\"%u\\n\"%x)\n\n\nfid.close()\n\nprint(tout.shape)\n\n# while True:\n# cv2.imshow('t1',tout)\n# if cv2.waitKey(1) & 0XFF == 27:\n# break\n\n"
] | [
[
"numpy.reshape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ZichengDuan/MVM3D | [
"b62c96de5894ae5fef615e2ee54fe975248a3df7"
] | [
"codes/evaluation/pyeval/calAOS.py"
] | [
"import numpy as np\nfrom scipy.optimize import linear_sum_assignment\nimport math\nimport shapely\nfrom shapely.geometry import Polygon, MultiPoint # 多边形计算的库\n\ndef wh2bottomleft(bbox):\n x, y, w, h = bbox\n xmin = x - w/2\n xmax = x + w/2\n ymin = y - h/2\n ymax = y + h/2\n return xmin, ymin, xmax, ymax\n\n\n# 求任意四边形iou\ndef compute_IOU(line1,line2):\n # 四边形四个点坐标的一维数组表示,[x,y,x,y....]\n # 如:line1 = [728, 252, 908, 215, 934, 312, 752, 355]\n # 返回iou的值,如 0.7\n line1_box = np.array(line1).reshape(4, 2) # 四边形二维坐标表示\n # 凸多边形与凹多边形\n poly1 = Polygon(line1_box)\n # .convex_hull # python四边形对象,会自动计算四个点,最后四个点顺序为:左上 左下 右下 右上 左上\n line2_box = np.array(line2).reshape(4, 2)\n # 凸多边形与凹多边形\n poly2 = Polygon(line2_box)\n\n union_poly = np.concatenate((line1_box, line2_box)) # 合并两个box坐标,变为8*2\n if not poly1.intersects(poly2): # 如果两四边形不相交\n iou = 0\n else:\n try:\n inter_area = poly1.intersection(poly2).area # 相交面积\n union_area = MultiPoint(union_poly).convex_hull.area\n if union_area == 0:\n iou = 0\n else:\n iou = float(inter_area) / union_area\n except shapely.geos.TopologicalError:\n print('shapely.geos.TopologicalError occured, iou set to 0')\n iou = 0\n return iou\n\ndef bbox_iou(box1, box2):\n '''\n 两个框(二维)的 iou 计算\n 注意:边框以左上为原点\n box:[x1,y2,x2,y2],依次为左上右下坐标\n '''\n h = max(0, min(box1[2], box2[2]) - max(box1[0], box2[0]))\n w = max(0, min(box1[3], box2[3]) - max(box1[1], box2[1]))\n area_box1 = ((box1[2] - box1[0]) * (box1[3] - box1[1]))\n area_box2 = ((box2[2] - box2[0]) * (box2[3] - box2[1]))\n inter = w * h\n union = area_box1 + area_box2 - inter\n iou = inter / union\n return iou\n\ndef getDistance(x1, y1, x2, y2):\n return math.sqrt(pow((x1 - x2), 2) + pow((y1 - y2), 2))\n\ndef CLEAR_MOD_HUN2(gt, det, thresh):\n F = int(max(gt[:, 0])) + 1\n # F = 1\n precs = 0\n aos = 0\n all_infolist = None\n for t in range(1, F + 1):\n gt_results = gt[gt[:, 0] == t - 1]\n det_results = det[det[:, 0] == t - 1]\n frame_infolist = cal_frame_TPFP_iou(thresh, gt_results, det_results)\n if all_infolist is None:\n all_infolist = frame_infolist\n else:\n all_infolist = np.concatenate((all_infolist, frame_infolist), axis=0)\n\n idx = np.argsort(all_infolist[:,0], axis=0)\n idx = idx[::-1]\n all_infolist = all_infolist[idx]\n TP = 0\n FP = 0\n all_P = gt.shape[0]\n for i, data in enumerate(all_infolist):\n flag = data[-4]\n if flag == 1:\n TP += 1\n else:\n FP += 1\n\n all_infolist[i, -3] = TP / (TP + FP)\n all_infolist[i, -2] = TP / all_P\n cur_aos = 0\n for m in range(i + 1):\n cur_aos += all_infolist[m, -4] * (1 + np.cos(np.deg2rad(all_infolist[m, 3]))) / 2\n cur_aos /= (i + 1)\n all_infolist[i, -1] = cur_aos\n recall_threshold = np.arange(0, 1.1, 0.1)\n accu_precisions = 0\n for thresh in recall_threshold:\n max_prec = 0\n for k in range(all_infolist.shape[0]):\n if all_infolist[k][-2] >= thresh:\n max_prec = max(all_infolist[k:,-3])\n break\n accu_precisions += max_prec\n\n\n final_11_precision = accu_precisions / 11\n\n # AOS\n accu_aos = 0\n for thresh in recall_threshold:\n max_aos = 0\n for k in range(all_infolist.shape[0]):\n if all_infolist[k][-2] >= thresh:\n max_aos = max(all_infolist[k:,-1])\n break\n accu_aos += max_aos\n final_11_aos = accu_aos / 11\n\n return final_11_precision, final_11_aos\n\ndef cal_frame_TPFP(dist_threshold, gt_res, pred_res):\n # prec = TP / (TP + FP)\n # recall = TP / label_P\n # 根据距离打标签,超出39厘米就是FP\n # 0 1 2 3 4 5 6 7\n # score, frame_idx, delta_dist, delta_ori, TP/FP?, prec, recall, aos\n frame_gt_det_match = np.zeros(shape=(pred_res.shape[0], 8)) - 1\n frame_gt_det_match[:, -4:] += 1\n for i, pred in enumerate(pred_res):\n min_dist = -1\n min_idx = -1\n cur_gt_ori = -1\n _, _, x_pred, y_pred, w_pred, h_pred, score, ori_pred = pred\n for j, gt in enumerate(gt_res):\n _, _, x_gt, y_gt, w_pred, h_pred, ori_gt = gt\n dist = math.sqrt(pow(x_pred - x_gt, 2)+ pow(y_pred - y_gt, 2))\n if (dist < min_dist or min_dist == -1) and dist <= dist_threshold:\n # 找到距离最近的gt分配给那个pred,始终没分配到gt的pred认为是FN\n min_dist = dist\n min_idx = j\n cur_gt_ori = ori_gt\n\n # 将这个检测对应的gt信息存到数组里,如果没有gt(前两行都是-1),那就是FN\n frame_gt_det_match[i][0] = score\n frame_gt_det_match[i][1] = min_idx\n frame_gt_det_match[i][2] = min_dist\n frame_gt_det_match[i][3] = ori_pred - cur_gt_ori\n\n TP = 0\n FP = 0\n passed_index = []\n # 这一帧的TP,FP\n for k in range(pred_res.shape[0]):\n # 判断是TP还是FP\n if -1 not in frame_gt_det_match[k, :] and frame_gt_det_match[k, :][1] not in passed_index:\n TP += 1\n passed_index.append(frame_gt_det_match[k, :][1])\n frame_gt_det_match[k, 4] = 1\n elif -1 not in frame_gt_det_match[k, :] and frame_gt_det_match[k, :][1] in passed_index:\n FP += 1\n frame_gt_det_match[k, 4] = 0\n elif -1 in frame_gt_det_match[k, :]:\n FP += 1\n frame_gt_det_match[k, 4] = 0\n return frame_gt_det_match\n\ndef cal_frame_TPFP_iou(dist_threshold, gt_res, pred_res):\n # 0 1 2 3 4 5 6 7\n # score, frame_idx, iou, delta_ori, TP/FP?, prec, recall, aos\n frame_gt_det_match = np.zeros(shape=(pred_res.shape[0], 8)) - 1\n frame_gt_det_match[:, -4:] += 1\n for i, pred in enumerate(pred_res):\n max_iou = -1\n max_idx = -1\n cur_gt_ori = -1\n _, _, x1_rot, y1_rot, x2_rot, y2_rot, x3_rot, y3_rot, x4_rot, y4_rot, score, ori_pred = pred\n for j, gt in enumerate(gt_res):\n _, _, gt_x1_rot, gt_y1_rot, gt_x2_rot, gt_y2_rot, gt_x3_rot, gt_y3_rot, gt_x4_rot, gt_y4_rot, ori_gt = gt\n iou = compute_IOU([x1_rot, y1_rot, x2_rot, y2_rot, x3_rot, y3_rot, x4_rot, y4_rot],\\\n [gt_x1_rot, gt_y1_rot, gt_x2_rot, gt_y2_rot, gt_x3_rot, gt_y3_rot, gt_x4_rot, gt_y4_rot])\n if max_iou != 0 and iou >= dist_threshold and iou > max_iou:\n # 找到距离最近的gt分配给那个pred,始终没分配到gt的pred认为是FN\n max_iou = iou\n max_idx = j\n cur_gt_ori = ori_gt\n\n frame_gt_det_match[i][0] = score\n frame_gt_det_match[i][1] = max_idx\n frame_gt_det_match[i][2] = max_iou\n frame_gt_det_match[i][3] = ori_pred - cur_gt_ori\n\n\n TP = 0\n FP = 0\n passed_index = []\n for k in range(pred_res.shape[0]):\n if -1 not in frame_gt_det_match[k, :]:\n TP += 1\n passed_index.append(frame_gt_det_match[k, :][1])\n frame_gt_det_match[k, 4] = 1\n elif -1 in frame_gt_det_match[k, :]:\n FP += 1\n frame_gt_det_match[k, 4] = 0\n return frame_gt_det_match\n\ndef evaluateDetectionAPAOS(res_fpath, gt_fpath):\n gtRaw = np.loadtxt(gt_fpath)\n detRaw = np.loadtxt(res_fpath)\n\n frames = np.unique(detRaw[:, 0]) if detRaw.size else np.zeros(0)\n frame_ctr = 0\n gt_flag = True\n det_flag = True\n\n gtAllMatrix = 0\n detAllMatrix = 0\n if detRaw is None or detRaw.shape[0] == 0:\n MODP, MODA, recall, precision = 0, 0, 0, 0\n return MODP, MODA, recall, precision\n\n for t in frames:\n idxs = np.where(gtRaw[:, 0] == t)\n idx = idxs[0]\n idx_len = len(idx)\n tmp_arr = np.zeros(shape=(idx_len, 11))\n\n tmp_arr[:, 0] = np.array([frame_ctr for n in range(idx_len)])\n tmp_arr[:, 1] = np.array([i for i in range(idx_len)])\n tmp_arr[:, 2] = np.array([j for j in gtRaw[idx, -8]])\n tmp_arr[:, 3] = np.array([k for k in gtRaw[idx, -7]])\n tmp_arr[:, 4] = np.array([k for k in gtRaw[idx, -6]])\n tmp_arr[:, 5] = np.array([k for k in gtRaw[idx, -5]])\n tmp_arr[:, 6] = np.array([j for j in gtRaw[idx, -4]])\n tmp_arr[:, 7] = np.array([k for k in gtRaw[idx, -3]])\n tmp_arr[:, 8] = np.array([k for k in gtRaw[idx, -2]])\n tmp_arr[:, 9] = np.array([k for k in gtRaw[idx, -1]])\n tmp_arr[:, 10] = np.array([m for m in gtRaw[idx, -9]])\n if gt_flag:\n gtAllMatrix = tmp_arr\n gt_flag = False\n else:\n gtAllMatrix = np.concatenate((gtAllMatrix, tmp_arr), axis=0)\n idxs = np.where(detRaw[:, 0] == t)\n idx = idxs[0]\n idx_len = len(idx)\n tmp_arr = np.zeros(shape=(idx_len, 12))\n tmp_arr[:, 0] = np.array([frame_ctr for n in range(idx_len)])\n tmp_arr[:, 1] = np.array([i for i in range(idx_len)])\n tmp_arr[:, 2] = np.array([j for j in detRaw[idx, -8]])\n tmp_arr[:, 3] = np.array([k for k in detRaw[idx, -7]])\n tmp_arr[:, 4] = np.array([k for k in detRaw[idx, -6]])\n tmp_arr[:, 5] = np.array([k for k in detRaw[idx, -5]])\n tmp_arr[:, 6] = np.array([j for j in detRaw[idx, -4]])\n tmp_arr[:, 7] = np.array([k for k in detRaw[idx, -3]])\n tmp_arr[:, 8] = np.array([k for k in detRaw[idx, -2]])\n tmp_arr[:, 9] = np.array([k for k in detRaw[idx, -1]])\n tmp_arr[:, 10] = np.array([m for m in detRaw[idx, -9]])\n tmp_arr[:, 11] = np.array([p for p in detRaw[idx, -10]])\n\n if det_flag:\n detAllMatrix = tmp_arr\n det_flag = False\n else:\n detAllMatrix = np.concatenate((detAllMatrix, tmp_arr), axis=0)\n frame_ctr += 1\n\n AP_50, AOS_50 = CLEAR_MOD_HUN2(gtAllMatrix, detAllMatrix, 0.5)\n AP_25, AOS_25 = CLEAR_MOD_HUN2(gtAllMatrix, detAllMatrix, 0.25)\n return AP_50 * 100, AOS_50 * 100, AOS_50/AP_50, AP_25* 100, AOS_25* 100, AOS_25/AP_25\n\n"
] | [
[
"numpy.unique",
"numpy.arange",
"numpy.concatenate",
"numpy.deg2rad",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.where",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yjhexy/tensorflow_learn | [
"2748f2e9d780a4915b56b6e6a7a4bfdfe3320488"
] | [
"tf_api/matrix/trace.py"
] | [
"# trace 练习测试 矩阵对角线只和\nimport tensorflow as tf\n\nwith tf.Session() as sess:\n a = tf.constant([[1, 5, 0, 0], [0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 4]])\n z = tf.trace(a)\n print(sess.run(z))\n"
] | [
[
"tensorflow.constant",
"tensorflow.Session",
"tensorflow.trace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
danipab12/Lab3 | [
"7b9f43fe169e4c8f745fa946dc0355c8ac739f16"
] | [
"scripts/plot_log_regression.py"
] | [
"import csv\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pylab import rcParams\n\nrcParams['figure.figsize'] = 10, 6\nrcParams['text.usetex']=True\n#rcParams['text.latex.unicode']=True\n\npenalties = {'l1': ([], []), 'l2': ([], [])}\nwith open('/Users/matze/Studium/Bachelorarbeit/Documents/thesis/data/decision-makers/log-regression.csv', 'rb') as f:\n reader = csv.reader(f, delimiter=';')\n for row_idx, row in enumerate(reader):\n if row_idx == 0:\n continue\n score = float(row[2])\n penalty = row[3]\n C = float(row[4])\n penalties[penalty][0].append(C)\n penalties[penalty][1].append(score)\n\nplt.plot(penalties['l1'][0], penalties['l1'][1])\nplt.plot(penalties['l2'][0], penalties['l2'][1])\nplt.legend(['L1 regularization', 'L2 regularization'], loc='lower right')\nplt.xticks(penalties['l1'][0])\nplt.xlabel('C')\nplt.ylabel('score')\nplt.xscale('log')\nplt.grid(True)\nplt.axis([penalties['l1'][0][0], penalties['l1'][0][-1], 0.6, 1])\nplt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jwkanggist/tf-keras-stock-pred | [
"c10a7ea9934443511bcf4b16096c0f574c4f5b03"
] | [
"tfmodule/train_config.py"
] | [
"# -*- coding: utf-8 -*-\nimport tensorflow as tf\n\nclass TrainConfig(object):\n def __init__(self):\n\n # the number of step between evaluation\n self.train_input_size = 1\n self.train_data_size = 0.8\n self.test_data_size = 0.8\n\n self.training_epochs = 300\n\n self.optimizer = 'adam'\n self.loss_fn = 'mse'\n\n saved_model_folder_path = './saved_model'\n if not tf.gfile.Exists(saved_model_folder_path):\n tf.gfile.MakeDirs(saved_model_folder_path)\n self.save_weight_name= saved_model_folder_path + '/save_weight_1.h5'\n\n"
] | [
[
"tensorflow.gfile.Exists",
"tensorflow.gfile.MakeDirs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zyh1999/pytorch-quantum | [
"c00bd564a99001fee2fd6b30e5e34562ab981e28"
] | [
"examples/core/models/q4digit_models.py"
] | [
"import torchquantum as tq\nimport torch.nn.functional as F\nfrom torchpack.utils.logging import logger\n\n\nclass Q4DigitFCModel0(tq.QuantumModule):\n \"\"\"rx ry rz crx cry crz layers\"\"\"\n class QLayer(tq.QuantumModule):\n def __init__(self, arch=None):\n super().__init__()\n self.arch = arch\n self.n_wires = arch['n_wires']\n self.rx_layers = tq.QuantumModuleList()\n self.ry_layers = tq.QuantumModuleList()\n self.rz_layers = tq.QuantumModuleList()\n self.crx_layers = tq.QuantumModuleList()\n self.cry_layers = tq.QuantumModuleList()\n self.crz_layers = tq.QuantumModuleList()\n\n for k in range(arch['n_blocks']):\n self.rx_layers.append(\n tq.Op1QAllLayer(op=tq.RX, n_wires=self.n_wires,\n has_params=True, trainable=True))\n self.ry_layers.append(\n tq.Op1QAllLayer(op=tq.RY, n_wires=self.n_wires,\n has_params=True, trainable=True))\n self.rz_layers.append(\n tq.Op1QAllLayer(op=tq.RZ, n_wires=self.n_wires,\n has_params=True, trainable=True))\n self.crx_layers.append(\n tq.Op2QAllLayer(op=tq.CRX, n_wires=self.n_wires,\n has_params=True, trainable=True,\n circular=True))\n self.cry_layers.append(\n tq.Op2QAllLayer(op=tq.CRY, n_wires=self.n_wires,\n has_params=True, trainable=True,\n circular=True))\n self.crz_layers.append(\n tq.Op2QAllLayer(op=tq.CRZ, n_wires=self.n_wires,\n has_params=True, trainable=True,\n circular=True))\n\n @tq.static_support\n def forward(self, q_device: tq.QuantumDevice):\n self.q_device = q_device\n\n for k in range(self.arch['n_blocks']):\n self.rx_layers[k](self.q_device)\n self.ry_layers[k](self.q_device)\n self.rz_layers[k](self.q_device)\n self.crx_layers[k](self.q_device)\n self.cry_layers[k](self.q_device)\n self.crz_layers[k](self.q_device)\n\n def __init__(self, arch=None):\n super().__init__()\n self.arch = arch\n self.n_wires = arch['n_wires']\n self.q_device = tq.QuantumDevice(n_wires=self.n_wires)\n self.encoder = tq.GeneralEncoder([\n {'input_idx': [0], 'func': 'ry', 'wires': [0]},\n {'input_idx': [1], 'func': 'ry', 'wires': [1]},\n {'input_idx': [2], 'func': 'ry', 'wires': [2]},\n {'input_idx': [3], 'func': 'ry', 'wires': [3]},\n {'input_idx': [4], 'func': 'rz', 'wires': [0]},\n {'input_idx': [5], 'func': 'rz', 'wires': [1]},\n {'input_idx': [6], 'func': 'rz', 'wires': [2]},\n {'input_idx': [7], 'func': 'rz', 'wires': [3]},\n {'input_idx': [8], 'func': 'rx', 'wires': [0]},\n {'input_idx': [9], 'func': 'rx', 'wires': [1]},\n {'input_idx': [10], 'func': 'rx', 'wires': [2]},\n {'input_idx': [11], 'func': 'rx', 'wires': [3]},\n {'input_idx': [12], 'func': 'ry', 'wires': [0]},\n {'input_idx': [13], 'func': 'ry', 'wires': [1]},\n {'input_idx': [14], 'func': 'ry', 'wires': [2]},\n {'input_idx': [15], 'func': 'ry', 'wires': [3]}\n ])\n self.q_layer = self.QLayer(arch=arch)\n self.measure = tq.MeasureAll(tq.PauliZ)\n\n def forward(self, x, verbose=False, use_qiskit=False):\n bsz = x.shape[0]\n x = F.avg_pool2d(x, 6).view(bsz, 16)\n\n if use_qiskit:\n x = self.qiskit_processor.process_parameterized(\n self.q_device, self.encoder, self.q_layer, self.measure, x)\n else:\n self.encoder(self.q_device, x)\n self.q_layer(self.q_device)\n x = self.measure(self.q_device)\n\n if verbose:\n logger.info(f\"[use_qiskit]={use_qiskit}, expectation:\\n {x.data}\")\n\n x = x.squeeze()\n x = F.log_softmax(x, dim=1)\n\n return x\n\n\nclass Q4DigitFCModel1(Q4DigitFCModel0):\n \"\"\"u3 and cu3 layers, one layer of u3 and one layer of cu3 in one block\"\"\"\n class QLayer(tq.QuantumModule):\n def __init__(self, arch=None):\n super().__init__()\n self.arch = arch\n self.n_wires = arch['n_wires']\n self.u3_layers = tq.QuantumModuleList()\n self.cu3_layers = tq.QuantumModuleList()\n\n for k in range(arch['n_blocks']):\n self.u3_layers.append(\n tq.Op1QAllLayer(op=tq.U3, n_wires=self.n_wires,\n has_params=True, trainable=True))\n self.cu3_layers.append(\n tq.Op2QAllLayer(op=tq.CU3, n_wires=self.n_wires,\n has_params=True, trainable=True,\n circular=True))\n\n @tq.static_support\n def forward(self, q_device: tq.QuantumDevice):\n self.q_device = q_device\n for k in range(self.arch['n_blocks']):\n self.u3_layers[k](self.q_device)\n self.cu3_layers[k](self.q_device)\n\n\nclass Q4DigitFCModel2(Q4DigitFCModel0):\n \"\"\"ry and cz layers, one layer of ry and one layer of cz in one block\"\"\"\n class QLayer(tq.QuantumModule):\n def __init__(self, arch=None):\n super().__init__()\n self.arch = arch\n self.n_wires = arch['n_wires']\n self.ry_layers = tq.QuantumModuleList()\n self.cz_layers = tq.QuantumModuleList()\n\n for k in range(arch['n_blocks']):\n self.ry_layers.append(\n tq.Op1QAllLayer(op=tq.RY, n_wires=self.n_wires,\n has_params=True, trainable=True))\n self.cz_layers.append(\n tq.Op2QAllLayer(op=tq.CZ, n_wires=self.n_wires,\n circular=True))\n\n @tq.static_support\n def forward(self, q_device: tq.QuantumDevice):\n self.q_device = q_device\n for k in range(self.arch['n_blocks']):\n self.ry_layers[k](self.q_device)\n self.cz_layers[k](self.q_device)\n\n\nclass Q4DigitFCRandomModel0(Q4DigitFCModel0):\n \"\"\"model with random gates\"\"\"\n class QLayer(tq.QuantumModule):\n def __init__(self, arch=None):\n super().__init__()\n self.arch = arch\n self.n_wires = arch['n_wires']\n op_type_name = arch['op_type_name']\n op_types = [tq.op_name_dict[name] for name in op_type_name]\n\n self.random_layer = tq.RandomLayer(\n n_ops=arch['n_random_ops'],\n n_params=arch['n_random_params'],\n wires=list(range(self.n_wires)),\n op_types=op_types)\n\n @tq.static_support\n def forward(self, q_device: tq.QuantumDevice):\n self.q_device = q_device\n self.random_layer(q_device)\n\n\nmodel_dict = {\n 'q4digit_fc0': Q4DigitFCModel0,\n 'q4digit_fc1': Q4DigitFCModel1,\n 'q4digit_fc2': Q4DigitFCModel2,\n 'q4digit_fc_rand0': Q4DigitFCRandomModel0,\n}\n"
] | [
[
"torch.nn.functional.avg_pool2d",
"torch.nn.functional.log_softmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DANISHFAYAZNAJAR/nalp | [
"8a7d8b7cb13dfc755a72d0770bf81ba9bc6ddb35"
] | [
"nalp/datasets/image.py"
] | [
"\"\"\"Imaging dataset class.\n\"\"\"\n\nfrom tensorflow import data\n\nimport nalp.utils.logging as l\nfrom nalp.core import Dataset\n\nlogger = l.get_logger(__name__)\n\n\nclass ImageDataset(Dataset):\n \"\"\"An ImageDataset class is responsible for creating a dataset that encodes images for\n adversarial generation.\n\n \"\"\"\n\n def __init__(self, images, batch_size=256, shape=None, normalize=True, shuffle=True):\n \"\"\"Initialization method.\n\n Args:\n images (np.array): An array of images.\n batch_size (int): Size of batches.\n shape (tuple): A tuple containing the shape if the array should be forced to reshape.\n normalize (bool): Whether images should be normalized between -1 and 1.\n shuffle (bool): Whether batches should be shuffled or not.\n\n \"\"\"\n\n logger.info('Overriding class: Dataset -> ImageDataset.')\n\n super(ImageDataset, self).__init__(shuffle)\n\n # Pre-process an array of images\n processed_images = self._preprocess(images, shape, normalize)\n\n # Building up the dataset class\n self._build(processed_images, batch_size)\n\n logger.debug('Size: %s | Batch size: %d | Normalization: %s | Shuffle: %s.',\n shape, batch_size, normalize, self.shuffle)\n logger.info('Class overrided.')\n\n def _preprocess(self, images, shape, normalize):\n \"\"\"Pre-process an array of images by reshaping and normalizing, if necessary.\n\n Args:\n images (np.array): An array of images.\n shape (tuple): A tuple containing the shape if the array should be forced to reshape.\n normalize (bool): Whether images should be normalized between -1 and 1.\n\n Returns:\n Slices of pre-processed tensor-based images.\n\n \"\"\"\n\n images = images.astype('float32')\n\n if shape:\n images = images.reshape(shape)\n\n if normalize:\n images = (images - 127.5) / 127.5\n\n # Slices the arrays into tensors\n images = data.Dataset.from_tensor_slices(images)\n\n return images\n"
] | [
[
"tensorflow.data.Dataset.from_tensor_slices"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
hardikk13/BSP-NET-pytorch | [
"128092d930389b56a33723c425f85e363b542b68"
] | [
"modelSVR.py"
] | [
"import os\nimport time\nimport math\nimport random\nimport numpy as np\nimport h5py\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import optim\nfrom torch.autograd import Variable\n\nimport mcubes\n# from bspt import digest_bsp, get_mesh, get_mesh_watertight\nfrom bspt_slow import digest_bsp, get_mesh, get_mesh_watertight\n\nfrom utils import *\n\n#pytorch 1.2.0 implementation\n\n\nclass generator(nn.Module):\n\tdef __init__(self, p_dim, c_dim):\n\t\tsuper(generator, self).__init__()\n\t\tself.p_dim = p_dim\n\t\tself.c_dim = c_dim\n\t\tconvex_layer_weights = torch.zeros((self.p_dim, self.c_dim))\n\t\tself.convex_layer_weights = nn.Parameter(convex_layer_weights)\n\t\tnn.init.normal_(self.convex_layer_weights, mean=0.0, std=0.02)\n\n\tdef forward(self, points, plane_m, is_training=False):\n\t\t#level 1\n\t\th1 = torch.matmul(points, plane_m)\n\t\th1 = torch.clamp(h1, min=0)\n\n\t\t#level 2\n\t\th2 = torch.matmul(h1, (self.convex_layer_weights>0.01).float())\n\n\t\t#level 3\n\t\th3 = torch.min(h2, dim=2, keepdim=True)[0]\n\n\t\treturn h2,h3\n\nclass resnet_block(nn.Module):\n\tdef __init__(self, dim_in, dim_out):\n\t\tsuper(resnet_block, self).__init__()\n\t\tself.dim_in = dim_in\n\t\tself.dim_out = dim_out\n\t\tif self.dim_in == self.dim_out:\n\t\t\tself.conv_1 = nn.Conv2d(self.dim_in, self.dim_out, 3, stride=1, padding=1, bias=False)\n\t\t\tself.conv_2 = nn.Conv2d(self.dim_out, self.dim_out, 3, stride=1, padding=1, bias=False)\n\t\t\tnn.init.xavier_uniform_(self.conv_1.weight)\n\t\t\tnn.init.xavier_uniform_(self.conv_2.weight)\n\t\telse:\n\t\t\tself.conv_1 = nn.Conv2d(self.dim_in, self.dim_out, 3, stride=2, padding=1, bias=False)\n\t\t\tself.conv_2 = nn.Conv2d(self.dim_out, self.dim_out, 3, stride=1, padding=1, bias=False)\n\t\t\tself.conv_s = nn.Conv2d(self.dim_in, self.dim_out, 1, stride=2, padding=0, bias=False)\n\t\t\tnn.init.xavier_uniform_(self.conv_1.weight)\n\t\t\tnn.init.xavier_uniform_(self.conv_2.weight)\n\t\t\tnn.init.xavier_uniform_(self.conv_s.weight)\n\n\tdef forward(self, input, is_training=False):\n\t\tif self.dim_in == self.dim_out:\n\t\t\toutput = self.conv_1(input)\n\t\t\toutput = F.leaky_relu(output, negative_slope=0.01, inplace=True)\n\t\t\toutput = self.conv_2(output)\n\t\t\toutput = output+input\n\t\t\toutput = F.leaky_relu(output, negative_slope=0.01, inplace=True)\n\t\telse:\n\t\t\toutput = self.conv_1(input)\n\t\t\toutput = F.leaky_relu(output, negative_slope=0.01, inplace=True)\n\t\t\toutput = self.conv_2(output)\n\t\t\tinput_ = self.conv_s(input)\n\t\t\toutput = output+input_\n\t\t\toutput = F.leaky_relu(output, negative_slope=0.01, inplace=True)\n\t\treturn output\n\nclass img_encoder(nn.Module):\n\tdef __init__(self, img_ef_dim, z_dim):\n\t\tsuper(img_encoder, self).__init__()\n\t\tself.img_ef_dim = img_ef_dim\n\t\tself.z_dim = z_dim\n\t\tself.conv_0 = nn.Conv2d(1, self.img_ef_dim, 7, stride=2, padding=3, bias=False)\n\t\tself.res_1 = resnet_block(self.img_ef_dim, self.img_ef_dim)\n\t\tself.res_2 = resnet_block(self.img_ef_dim, self.img_ef_dim)\n\t\tself.res_3 = resnet_block(self.img_ef_dim, self.img_ef_dim*2)\n\t\tself.res_4 = resnet_block(self.img_ef_dim*2, self.img_ef_dim*2)\n\t\tself.res_5 = resnet_block(self.img_ef_dim*2, self.img_ef_dim*4)\n\t\tself.res_6 = resnet_block(self.img_ef_dim*4, self.img_ef_dim*4)\n\t\tself.res_7 = resnet_block(self.img_ef_dim*4, self.img_ef_dim*8)\n\t\tself.res_8 = resnet_block(self.img_ef_dim*8, self.img_ef_dim*8)\n\t\tself.conv_9 = nn.Conv2d(self.img_ef_dim*8, self.img_ef_dim*16, 4, stride=2, padding=1, bias=True)\n\t\tself.conv_10 = nn.Conv2d(self.img_ef_dim*16, self.img_ef_dim*16, 4, stride=1, padding=0, bias=True)\n\t\tself.linear_1 = nn.Linear(self.img_ef_dim*16, self.img_ef_dim*16, bias=True)\n\t\tself.linear_2 = nn.Linear(self.img_ef_dim*16, self.img_ef_dim*16, bias=True)\n\t\tself.linear_3 = nn.Linear(self.img_ef_dim*16, self.img_ef_dim*16, bias=True)\n\t\tself.linear_4 = nn.Linear(self.img_ef_dim*16, self.z_dim, bias=True)\n\t\tnn.init.xavier_uniform_(self.conv_0.weight)\n\t\tnn.init.xavier_uniform_(self.conv_9.weight)\n\t\tnn.init.constant_(self.conv_9.bias,0)\n\t\tnn.init.xavier_uniform_(self.conv_10.weight)\n\t\tnn.init.constant_(self.conv_10.bias,0)\n\t\tnn.init.xavier_uniform_(self.linear_1.weight)\n\t\tnn.init.constant_(self.linear_1.bias,0)\n\t\tnn.init.xavier_uniform_(self.linear_2.weight)\n\t\tnn.init.constant_(self.linear_2.bias,0)\n\t\tnn.init.xavier_uniform_(self.linear_3.weight)\n\t\tnn.init.constant_(self.linear_3.bias,0)\n\t\tnn.init.xavier_uniform_(self.linear_4.weight)\n\t\tnn.init.constant_(self.linear_4.bias,0)\n\n\tdef forward(self, view, is_training=False):\n\t\tlayer_0 = self.conv_0(1-view)\n\t\tlayer_0 = F.leaky_relu(layer_0, negative_slope=0.01, inplace=True)\n\n\t\tlayer_1 = self.res_1(layer_0, is_training=is_training)\n\t\tlayer_2 = self.res_2(layer_1, is_training=is_training)\n\t\t\n\t\tlayer_3 = self.res_3(layer_2, is_training=is_training)\n\t\tlayer_4 = self.res_4(layer_3, is_training=is_training)\n\t\t\n\t\tlayer_5 = self.res_5(layer_4, is_training=is_training)\n\t\tlayer_6 = self.res_6(layer_5, is_training=is_training)\n\t\t\n\t\tlayer_7 = self.res_7(layer_6, is_training=is_training)\n\t\tlayer_8 = self.res_8(layer_7, is_training=is_training)\n\t\t\n\t\tlayer_9 = self.conv_9(layer_8)\n\t\tlayer_9 = F.leaky_relu(layer_9, negative_slope=0.01, inplace=True)\n\t\t\n\t\tlayer_10 = self.conv_10(layer_9)\n\t\tlayer_10 = layer_10.view(-1,self.img_ef_dim*16)\n\t\tlayer_10 = F.leaky_relu(layer_10, negative_slope=0.01, inplace=True)\n\n\t\tl1 = self.linear_1(layer_10)\n\t\tl1 = F.leaky_relu(l1, negative_slope=0.01, inplace=True)\n\n\t\tl2 = self.linear_2(l1)\n\t\tl2 = F.leaky_relu(l2, negative_slope=0.01, inplace=True)\n\n\t\tl3 = self.linear_3(l2)\n\t\tl3 = F.leaky_relu(l3, negative_slope=0.01, inplace=True)\n\n\t\tl4 = self.linear_4(l3)\n\t\tl4 = torch.sigmoid(l4)\n\n\t\treturn l4\n\nclass decoder(nn.Module):\n\tdef __init__(self, ef_dim, p_dim):\n\t\tsuper(decoder, self).__init__()\n\t\tself.ef_dim = ef_dim\n\t\tself.p_dim = p_dim\n\t\tself.linear_1 = nn.Linear(self.ef_dim*8, self.ef_dim*16, bias=True)\n\t\tself.linear_2 = nn.Linear(self.ef_dim*16, self.ef_dim*32, bias=True)\n\t\tself.linear_3 = nn.Linear(self.ef_dim*32, self.ef_dim*64, bias=True)\n\t\tself.linear_4 = nn.Linear(self.ef_dim*64, self.p_dim*4, bias=True)\n\t\tnn.init.xavier_uniform_(self.linear_1.weight)\n\t\tnn.init.constant_(self.linear_1.bias,0)\n\t\tnn.init.xavier_uniform_(self.linear_2.weight)\n\t\tnn.init.constant_(self.linear_2.bias,0)\n\t\tnn.init.xavier_uniform_(self.linear_3.weight)\n\t\tnn.init.constant_(self.linear_3.bias,0)\n\t\tnn.init.xavier_uniform_(self.linear_4.weight)\n\t\tnn.init.constant_(self.linear_4.bias,0)\n\n\tdef forward(self, inputs, is_training=False):\n\t\tl1 = self.linear_1(inputs)\n\t\tl1 = F.leaky_relu(l1, negative_slope=0.01, inplace=True)\n\n\t\tl2 = self.linear_2(l1)\n\t\tl2 = F.leaky_relu(l2, negative_slope=0.01, inplace=True)\n\n\t\tl3 = self.linear_3(l2)\n\t\tl3 = F.leaky_relu(l3, negative_slope=0.01, inplace=True)\n\n\t\tl4 = self.linear_4(l3)\n\t\tl4 = l4.view(-1, 4, self.p_dim)\n\n\t\treturn l4\n\nclass bsp_network(nn.Module):\n\tdef __init__(self, ef_dim, p_dim, c_dim, img_ef_dim, z_dim):\n\t\tsuper(bsp_network, self).__init__()\n\t\tself.ef_dim = ef_dim\n\t\tself.p_dim = p_dim\n\t\tself.c_dim = c_dim\n\t\tself.img_ef_dim = img_ef_dim\n\t\tself.z_dim = z_dim\n\t\tself.img_encoder = img_encoder(self.img_ef_dim, self.z_dim)\n\t\tself.decoder = decoder(self.ef_dim, self.p_dim)\n\t\tself.generator = generator(self.p_dim, self.c_dim)\n\n\tdef forward(self, inputs, z_vector, plane_m, point_coord, is_training=False):\n\t\tif is_training:\n\t\t\tz_vector = self.img_encoder(inputs, is_training=is_training)\n\t\t\tplane_m = None\n\t\t\tnet_out_convexes = None\n\t\t\tnet_out = None\n\t\telse:\n\t\t\tif inputs is not None:\n\t\t\t\tz_vector = self.img_encoder(inputs, is_training=is_training)\n\t\t\tif z_vector is not None:\n\t\t\t\tplane_m = self.decoder(z_vector, is_training=is_training)\n\t\t\tif point_coord is not None:\n\t\t\t\tnet_out_convexes, net_out = self.generator(point_coord, plane_m, is_training=is_training)\n\t\t\telse:\n\t\t\t\tnet_out_convexes = None\n\t\t\t\tnet_out = None\n\n\t\treturn z_vector, plane_m, net_out_convexes, net_out\n\n\nclass BSP_SVR(object):\n\tdef __init__(self, config):\n\t\t\"\"\"\n\t\tArgs:\n\t\t\ttoo lazy to explain\n\t\t\"\"\"\n\t\tself.input_size = 64 #input voxel grid size\n\n\t\tself.ef_dim = 32\n\t\tself.p_dim = 4096\n\t\tself.c_dim = 256\n\t\tself.img_ef_dim = 64\n\t\tself.z_dim = self.ef_dim*8\n\n\t\t#actual batch size\n\t\tself.shape_batch_size = 64\n\n\t\tself.view_size = 137\n\t\tself.crop_size = 128\n\t\tself.view_num = 24\n\t\tself.crop_edge = self.view_size-self.crop_size\n\t\tself.test_idx = 23\n\n\t\tself.dataset_name = config.dataset\n\t\tself.dataset_load = self.dataset_name + '_train'\n\t\tif not config.train:\n\t\t\tself.dataset_load = self.dataset_name + '_test'\n\t\tself.checkpoint_dir = config.checkpoint_dir\n\t\tself.data_dir = config.data_dir\n\t\t\n\t\tdata_hdf5_name = self.data_dir+'/'+self.dataset_load+'.hdf5'\n\t\tif os.path.exists(data_hdf5_name):\n\t\t\tdata_dict = h5py.File(data_hdf5_name, 'r')\n\t\t\toffset_x = int(self.crop_edge/2)\n\t\t\toffset_y = int(self.crop_edge/2)\n\t\t\t#reshape to NCHW\n\t\t\tself.data_pixels = np.reshape(data_dict['pixels'][:,:,offset_y:offset_y+self.crop_size, offset_x:offset_x+self.crop_size], [-1,self.view_num,1,self.crop_size,self.crop_size])\n\t\telse:\n\t\t\tprint(\"error: cannot load \"+data_hdf5_name)\n\t\t\texit(0)\n\t\tif config.train:\n\t\t\tdataz_hdf5_name = self.checkpoint_dir+'/'+self.modelAE_dir+'/'+self.dataset_name+'_train_z.hdf5'\n\t\t\tif os.path.exists(dataz_hdf5_name):\n\t\t\t\tdataz_dict = h5py.File(dataz_hdf5_name, 'r')\n\t\t\t\tself.data_zs = dataz_dict['zs'][:]\n\t\t\telse:\n\t\t\t\tprint(\"error: cannot load \"+dataz_hdf5_name)\n\t\t\t\texit(0)\n\t\t\tif len(self.data_zs) != len(self.data_pixels):\n\t\t\t\tprint(\"error: len(self.data_zs) != len(self.data_pixels)\")\n\t\t\t\tprint(len(self.data_zs), len(self.data_pixels))\n\t\t\t\texit(0)\n\t\t\n\t\tself.real_size = 64 #output point-value voxel grid size in testing\n\t\tself.test_size = 32 #related to testing batch_size, adjust according to gpu memory size\n\t\ttest_point_batch_size = self.test_size*self.test_size*self.test_size #do not change\n\t\t\n\t\t#get coords\n\t\tdima = self.test_size\n\t\tdim = self.real_size\n\t\tself.aux_x = np.zeros([dima,dima,dima],np.uint8)\n\t\tself.aux_y = np.zeros([dima,dima,dima],np.uint8)\n\t\tself.aux_z = np.zeros([dima,dima,dima],np.uint8)\n\t\tmultiplier = int(dim/dima)\n\t\tmultiplier2 = multiplier*multiplier\n\t\tmultiplier3 = multiplier*multiplier*multiplier\n\t\tfor i in range(dima):\n\t\t\tfor j in range(dima):\n\t\t\t\tfor k in range(dima):\n\t\t\t\t\tself.aux_x[i,j,k] = i*multiplier\n\t\t\t\t\tself.aux_y[i,j,k] = j*multiplier\n\t\t\t\t\tself.aux_z[i,j,k] = k*multiplier\n\t\tself.coords = np.zeros([multiplier3,dima,dima,dima,3],np.float32)\n\t\tfor i in range(multiplier):\n\t\t\tfor j in range(multiplier):\n\t\t\t\tfor k in range(multiplier):\n\t\t\t\t\tself.coords[i*multiplier2+j*multiplier+k,:,:,:,0] = self.aux_x+i\n\t\t\t\t\tself.coords[i*multiplier2+j*multiplier+k,:,:,:,1] = self.aux_y+j\n\t\t\t\t\tself.coords[i*multiplier2+j*multiplier+k,:,:,:,2] = self.aux_z+k\n\t\tself.coords = (self.coords+0.5)/dim-0.5\n\t\tself.coords = np.reshape(self.coords,[multiplier3,test_point_batch_size,3])\n\t\tself.coords = np.concatenate([self.coords, np.ones([multiplier3,test_point_batch_size,1],np.float32) ],axis=2)\n\t\tself.coords = torch.from_numpy(self.coords)\n\n\t\tif torch.cuda.is_available():\n\t\t\tself.device = torch.device('cuda')\n\t\t\ttorch.backends.cudnn.benchmark = True\n\t\telse:\n\t\t\tself.device = torch.device('cpu')\n\t\tself.coords = self.coords.to(self.device)\n\n\t\t#build model\n\t\tself.bsp_network = bsp_network(self.ef_dim, self.p_dim, self.c_dim, self.img_ef_dim, self.z_dim)\n\t\tself.bsp_network.to(self.device)\n\t\t#print params\n\t\t#for param_tensor in self.bsp_network.state_dict():\n\t\t#\tprint(param_tensor, \"\\t\", self.bsp_network.state_dict()[param_tensor].size())\n\t\tself.optimizer = torch.optim.Adam(self.bsp_network.img_encoder.parameters(), lr=config.learning_rate, betas=(config.beta1, 0.999))\n\t\t#pytorch does not have a checkpoint manager\n\t\t#have to define it myself to manage max num of checkpoints to keep\n\t\tself.max_to_keep = 10\n\t\tself.checkpoint_path = os.path.join(self.checkpoint_dir, self.model_dir)\n\t\tself.checkpoint_name='BSP_SVR.model'\n\t\tself.checkpoint_manager_list = [None] * self.max_to_keep\n\t\tself.checkpoint_manager_pointer = 0\n\t\tself.checkpoint_AE_path = os.path.join(self.checkpoint_dir, self.modelAE_dir)\n\t\tself.checkpoint_AE_name='BSP_AE.model'\n\t\t#loss\n\t\tdef network_loss(pred_z, gt_z):\n\t\t\treturn torch.mean((pred_z - gt_z)**2)\n\t\tself.loss = network_loss\n\n\t@property\n\tdef model_dir(self):\n\t\treturn \"{}_svr_{}\".format(\n\t\t\t\tself.dataset_name, self.crop_size)\n\t@property\n\tdef modelAE_dir(self):\n\t\treturn \"{}_ae_{}\".format(\n\t\t\t\tself.dataset_name, self.input_size)\n\n\tdef train(self, config):\n\t\t#load AE weights\n\t\tcheckpoint_txt = os.path.join(self.checkpoint_AE_path, \"checkpoint\")\n\t\tif os.path.exists(checkpoint_txt):\n\t\t\tfin = open(checkpoint_txt)\n\t\t\tmodel_dir = fin.readline().strip()\n\t\t\tfin.close()\n\t\t\tself.bsp_network.load_state_dict(torch.load(model_dir), strict=False)\n\t\t\tprint(\" [*] Load SUCCESS\")\n\t\telse:\n\t\t\tprint(\" [!] Load failed...\")\n\t\t\texit(-1)\n\n\t\tshape_num = len(self.data_pixels)\n\t\tbatch_index_list = np.arange(shape_num)\n\t\t\n\t\tprint(\"\\n\\n----------net summary----------\")\n\t\tprint(\"training samples \", shape_num)\n\t\tprint(\"-------------------------------\\n\\n\")\n\t\t\n\t\tstart_time = time.time()\n\t\tassert config.epoch==0 or config.iteration==0\n\t\ttraining_epoch = config.epoch + int(config.iteration/shape_num)\n\t\tbatch_num = int(shape_num/self.shape_batch_size)\n\t\t#batch_view = np.zeros([self.shape_batch_size,self.crop_size,self.crop_size,1], np.float32)\n\n\t\tself.bsp_network.train()\n\t\tfor epoch in range(0, training_epoch):\n\t\t\tnp.random.shuffle(batch_index_list)\n\t\t\tavg_loss = 0\n\t\t\tavg_num = 0\n\t\t\tfor idx in range(batch_num):\n\t\t\t\tdxb = batch_index_list[idx*self.shape_batch_size:(idx+1)*self.shape_batch_size]\n\n\t\t\t\t'''\n\t\t\t\t#random flip - not used\n\t\t\t\tfor t in range(self.shape_batch_size):\n\t\t\t\t\twhich_view = np.random.randint(self.view_num)\n\t\t\t\t\tbatch_view_ = self.data_pixels[dxb[t],which_view].astype(np.float32)\n\t\t\t\t\tif np.random.randint(2)==0:\n\t\t\t\t\t\tbatch_view_ = np.flip(batch_view_, 1)\n\t\t\t\t\tbatch_view[t] = batch_view_/255.0\n\t\t\t\t'''\n\t\t\t\t\n\t\t\t\twhich_view = np.random.randint(self.view_num)\n\t\t\t\tbatch_view = self.data_pixels[dxb,which_view].astype(np.float32)/255.0\n\t\t\t\tbatch_zs = self.data_zs[dxb]\n\n\t\t\t\tbatch_view = torch.from_numpy(batch_view)\n\t\t\t\tbatch_zs = torch.from_numpy(batch_zs)\n\n\t\t\t\tbatch_view = batch_view.to(self.device)\n\t\t\t\tbatch_zs = batch_zs.to(self.device)\n\n\t\t\t\tself.bsp_network.zero_grad()\n\t\t\t\tz_vector, _,_,_ = self.bsp_network(batch_view, None, None, None, is_training=True)\n\t\t\t\terr = self.loss(z_vector, batch_zs)\n\n\t\t\t\terr.backward()\n\t\t\t\tself.optimizer.step()\n\n\t\t\t\tavg_loss += err\n\t\t\t\tavg_num += 1\n\t\t\tprint(\"Epoch: [%2d/%2d] time: %4.4f, loss: %.8f\" % (epoch, training_epoch, time.time() - start_time, avg_loss/avg_num))\n\t\t\tif epoch%10==9:\n\t\t\t\tself.test_1(config,\"train_\"+str(epoch))\n\t\t\tif epoch%100==99:\n\t\t\t\tif not os.path.exists(self.checkpoint_path):\n\t\t\t\t\tos.makedirs(self.checkpoint_path)\n\t\t\t\tsave_dir = os.path.join(self.checkpoint_path,self.checkpoint_name+\"-\"+str(epoch)+\".pth\")\n\t\t\t\tself.checkpoint_manager_pointer = (self.checkpoint_manager_pointer+1)%self.max_to_keep\n\t\t\t\t#delete checkpoint\n\t\t\t\tif self.checkpoint_manager_list[self.checkpoint_manager_pointer] is not None:\n\t\t\t\t\tif os.path.exists(self.checkpoint_manager_list[self.checkpoint_manager_pointer]):\n\t\t\t\t\t\tos.remove(self.checkpoint_manager_list[self.checkpoint_manager_pointer])\n\t\t\t\t#save checkpoint\n\t\t\t\ttorch.save(self.bsp_network.state_dict(), save_dir)\n\t\t\t\t#update checkpoint manager\n\t\t\t\tself.checkpoint_manager_list[self.checkpoint_manager_pointer] = save_dir\n\t\t\t\t#write file\n\t\t\t\tcheckpoint_txt = os.path.join(self.checkpoint_path, \"checkpoint\")\n\t\t\t\tfout = open(checkpoint_txt, 'w')\n\t\t\t\tfor i in range(self.max_to_keep):\n\t\t\t\t\tpointer = (self.checkpoint_manager_pointer+self.max_to_keep-i)%self.max_to_keep\n\t\t\t\t\tif self.checkpoint_manager_list[pointer] is not None:\n\t\t\t\t\t\tfout.write(self.checkpoint_manager_list[pointer]+\"\\n\")\n\t\t\t\tfout.close()\n\n\t\tif not os.path.exists(self.checkpoint_path):\n\t\t\tos.makedirs(self.checkpoint_path)\n\t\tsave_dir = os.path.join(self.checkpoint_path,self.checkpoint_name+\"-\"+str(training_epoch)+\".pth\")\n\t\tself.checkpoint_manager_pointer = (self.checkpoint_manager_pointer+1)%self.max_to_keep\n\t\t#delete checkpoint\n\t\tif self.checkpoint_manager_list[self.checkpoint_manager_pointer] is not None:\n\t\t\tif os.path.exists(self.checkpoint_manager_list[self.checkpoint_manager_pointer]):\n\t\t\t\tos.remove(self.checkpoint_manager_list[self.checkpoint_manager_pointer])\n\t\t#save checkpoint\n\t\ttorch.save(self.bsp_network.state_dict(), save_dir)\n\t\t#update checkpoint manager\n\t\tself.checkpoint_manager_list[self.checkpoint_manager_pointer] = save_dir\n\t\t#write file\n\t\tcheckpoint_txt = os.path.join(self.checkpoint_path, \"checkpoint\")\n\t\tfout = open(checkpoint_txt, 'w')\n\t\tfor i in range(self.max_to_keep):\n\t\t\tpointer = (self.checkpoint_manager_pointer+self.max_to_keep-i)%self.max_to_keep\n\t\t\tif self.checkpoint_manager_list[pointer] is not None:\n\t\t\t\tfout.write(self.checkpoint_manager_list[pointer]+\"\\n\")\n\t\tfout.close()\n\n\tdef test_1(self, config, name):\n\t\tmultiplier = int(self.real_size/self.test_size)\n\t\tmultiplier2 = multiplier*multiplier\n\n\t\tthres = 0.99\n\t\n\t\tt = np.random.randint(len(self.data_pixels))\n\t\tmodel_float = np.zeros([self.real_size+2,self.real_size+2,self.real_size+2],np.float32)\n\t\tbatch_view = self.data_pixels[t:t+1,self.test_idx].astype(np.float32)/255.0\n\t\tbatch_view = torch.from_numpy(batch_view)\n\t\tbatch_view = batch_view.to(self.device)\n\t\t_, out_m, _,_ = self.bsp_network(batch_view, None, None, None, is_training=False)\n\t\tfor i in range(multiplier):\n\t\t\tfor j in range(multiplier):\n\t\t\t\tfor k in range(multiplier):\n\t\t\t\t\tminib = i*multiplier2+j*multiplier+k\n\t\t\t\t\tpoint_coord = self.coords[minib:minib+1]\n\t\t\t\t\t_,_,_, net_out = self.bsp_network(None, None, out_m, point_coord, is_training=False)\n\t\t\t\t\tnet_out = torch.clamp(1-net_out, min=0, max=1)\n\t\t\t\t\tmodel_float[self.aux_x+i+1,self.aux_y+j+1,self.aux_z+k+1] = np.reshape(net_out.detach().cpu().numpy(), [self.test_size,self.test_size,self.test_size])\n\t\t\n\t\tvertices, triangles = mcubes.marching_cubes(model_float, thres)\n\t\tvertices = (vertices-0.5)/self.real_size-0.5\n\t\t#output ply sum\n\t\twrite_ply_triangle(config.sample_dir+\"/\"+name+\".ply\", vertices, triangles)\n\t\tprint(\"[sample]\")\n\n\n\t#output bsp shape as ply\n\tdef test_bsp(self, config):\n\t\t#load previous checkpoint\n\t\tcheckpoint_txt = os.path.join(self.checkpoint_path, \"checkpoint\")\n\t\tif os.path.exists(checkpoint_txt):\n\t\t\tfin = open(checkpoint_txt)\n\t\t\tmodel_dir = fin.readline().strip()\n\t\t\tfin.close()\n\t\t\tself.bsp_network.load_state_dict(torch.load(model_dir))\n\t\t\tprint(\" [*] Load SUCCESS\")\n\t\telse:\n\t\t\tprint(\" [!] Load failed...\")\n\t\t\treturn\n\t\t\n\t\tw2 = self.bsp_network.generator.convex_layer_weights.detach().cpu().numpy()\n\n\t\tdima = self.test_size\n\t\tdim = self.real_size\n\t\tmultiplier = int(dim/dima)\n\t\tmultiplier2 = multiplier*multiplier\n\n\t\tself.bsp_network.eval()\n\t\tfor t in range(config.start, min(len(self.data_pixels),config.end)):\n\t\t\tmodel_float = np.ones([self.real_size,self.real_size,self.real_size,self.c_dim],np.float32)\n\t\t\tbatch_view = self.data_pixels[t:t+1,self.test_idx].astype(np.float32)/255.0\n\t\t\tbatch_view = torch.from_numpy(batch_view)\n\t\t\tbatch_view = batch_view.to(self.device)\n\t\t\t_, out_m, _,_ = self.bsp_network(batch_view, None, None, None, is_training=False)\n\t\t\tfor i in range(multiplier):\n\t\t\t\tfor j in range(multiplier):\n\t\t\t\t\tfor k in range(multiplier):\n\t\t\t\t\t\tminib = i*multiplier2+j*multiplier+k\n\t\t\t\t\t\tpoint_coord = self.coords[minib:minib+1]\n\t\t\t\t\t\t_,_, model_out, _ = self.bsp_network(None, None, out_m, point_coord, is_training=False)\n\t\t\t\t\t\tmodel_float[self.aux_x+i,self.aux_y+j,self.aux_z+k,:] = np.reshape(model_out.detach().cpu().numpy(), [self.test_size,self.test_size,self.test_size,self.c_dim])\n\t\t\t\n\t\t\tout_m = out_m.detach().cpu().numpy()\n\t\t\t\n\t\t\tbsp_convex_list = []\n\t\t\tmodel_float = model_float<0.01\n\t\t\tmodel_float_sum = np.sum(model_float,axis=3)\n\t\t\tfor i in range(self.c_dim):\n\t\t\t\tslice_i = model_float[:,:,:,i]\n\t\t\t\tif np.max(slice_i)>0: #if one voxel is inside a convex\n\t\t\t\t\tif np.min(model_float_sum-slice_i*2)>=0: #if this convex is redundant, i.e. the convex is inside the shape\n\t\t\t\t\t\tmodel_float_sum = model_float_sum-slice_i\n\t\t\t\t\telse:\n\t\t\t\t\t\tbox = []\n\t\t\t\t\t\tfor j in range(self.p_dim):\n\t\t\t\t\t\t\tif w2[j,i]>0.01:\n\t\t\t\t\t\t\t\ta = -out_m[0,0,j]\n\t\t\t\t\t\t\t\tb = -out_m[0,1,j]\n\t\t\t\t\t\t\t\tc = -out_m[0,2,j]\n\t\t\t\t\t\t\t\td = -out_m[0,3,j]\n\t\t\t\t\t\t\t\tbox.append([a,b,c,d])\n\t\t\t\t\t\tif len(box)>0:\n\t\t\t\t\t\t\tbsp_convex_list.append(np.array(box,np.float32))\n\n\t\t\t#print(bsp_convex_list)\n\t\t\tprint(len(bsp_convex_list))\n\t\t\t\n\t\t\t#convert bspt to mesh\n\t\t\tvertices, polygons = get_mesh(bsp_convex_list)\n\t\t\t#use the following alternative to merge nearby vertices to get watertight meshes\n\t\t\t#vertices, polygons = get_mesh_watertight(bsp_convex_list)\n\n\t\t\t#output ply\n\t\t\twrite_ply_polygon(config.sample_dir+\"/\"+str(t)+\"_bsp.ply\", vertices, polygons)\n\t\n\t#output bsp shape as ply and point cloud as ply\n\tdef test_mesh_point(self, config):\n\t\t#load previous checkpoint\n\t\tcheckpoint_txt = os.path.join(self.checkpoint_path, \"checkpoint\")\n\t\tif os.path.exists(checkpoint_txt):\n\t\t\tfin = open(checkpoint_txt)\n\t\t\tmodel_dir = fin.readline().strip()\n\t\t\tfin.close()\n\t\t\tself.bsp_network.load_state_dict(torch.load(model_dir))\n\t\t\tprint(\" [*] Load SUCCESS\")\n\t\telse:\n\t\t\tprint(\" [!] Load failed...\")\n\t\t\treturn\n\n\t\tw2 = self.bsp_network.generator.convex_layer_weights.detach().cpu().numpy()\n\t\tdima = self.test_size\n\t\tdim = self.real_size\n\t\tmultiplier = int(dim/dima)\n\t\tmultiplier2 = multiplier*multiplier\n\n\t\tself.bsp_network.eval()\n\t\tfor t in range(config.start, min(len(self.data_pixels),config.end)):\n\t\t\tprint(t)\n\t\t\tmodel_float = np.ones([self.real_size,self.real_size,self.real_size,self.c_dim],np.float32)\n\t\t\tmodel_float_combined = np.ones([self.real_size,self.real_size,self.real_size],np.float32)\n\t\t\tbatch_view = self.data_pixels[t:t+1,self.test_idx].astype(np.float32)/255.0\n\t\t\tbatch_view = torch.from_numpy(batch_view)\n\t\t\tbatch_view = batch_view.to(self.device)\n\t\t\t_, out_m, _,_ = self.bsp_network(batch_view, None, None, None, is_training=False)\n\t\t\tfor i in range(multiplier):\n\t\t\t\tfor j in range(multiplier):\n\t\t\t\t\tfor k in range(multiplier):\n\t\t\t\t\t\tminib = i*multiplier2+j*multiplier+k\n\t\t\t\t\t\tpoint_coord = self.coords[minib:minib+1]\n\t\t\t\t\t\t_,_, model_out, model_out_combined = self.bsp_network(None, None, out_m, point_coord, is_training=False)\n\t\t\t\t\t\tmodel_float[self.aux_x+i,self.aux_y+j,self.aux_z+k,:] = np.reshape(model_out.detach().cpu().numpy(), [self.test_size,self.test_size,self.test_size,self.c_dim])\n\t\t\t\t\t\tmodel_float_combined[self.aux_x+i,self.aux_y+j,self.aux_z+k] = np.reshape(model_out_combined.detach().cpu().numpy(), [self.test_size,self.test_size,self.test_size])\n\t\t\t\n\t\t\tout_m_ = out_m.detach().cpu().numpy()\n\n\t\t\tbsp_convex_list = []\n\t\t\tmodel_float = model_float<0.01\n\t\t\tmodel_float_sum = np.sum(model_float,axis=3)\n\t\t\tfor i in range(self.c_dim):\n\t\t\t\tslice_i = model_float[:,:,:,i]\n\t\t\t\tif np.max(slice_i)>0: #if one voxel is inside a convex\n\t\t\t\t\t#if np.min(model_float_sum-slice_i*2)>=0: #if this convex is redundant, i.e. the convex is inside the shape\n\t\t\t\t\t#\tmodel_float_sum = model_float_sum-slice_i\n\t\t\t\t\t#else:\n\t\t\t\t\t\tbox = []\n\t\t\t\t\t\tfor j in range(self.p_dim):\n\t\t\t\t\t\t\tif w2[j,i]>0.01:\n\t\t\t\t\t\t\t\ta = -out_m_[0,0,j]\n\t\t\t\t\t\t\t\tb = -out_m_[0,1,j]\n\t\t\t\t\t\t\t\tc = -out_m_[0,2,j]\n\t\t\t\t\t\t\t\td = -out_m_[0,3,j]\n\t\t\t\t\t\t\t\tbox.append([a,b,c,d])\n\t\t\t\t\t\tif len(box)>0:\n\t\t\t\t\t\t\tbsp_convex_list.append(np.array(box,np.float32))\n\t\t\t\t\t\t\t\n\t\t\t#convert bspt to mesh\n\t\t\tvertices, polygons = get_mesh(bsp_convex_list)\n\t\t\t#use the following alternative to merge nearby vertices to get watertight meshes\n\t\t\t#vertices, polygons = get_mesh_watertight(bsp_convex_list)\n\n\t\t\t#output ply\n\t\t\twrite_ply_polygon(config.sample_dir+\"/\"+str(t)+\"_bsp.ply\", vertices, polygons)\n\t\t\t\n\t\t\t#sample surface points\n\t\t\tsampled_points_normals = sample_points_polygon_vox64(vertices, polygons, model_float_combined, 16000)\n\t\t\t#check point inside shape or not\n\t\t\tpoint_coord = np.reshape(sampled_points_normals[:,:3]+sampled_points_normals[:,3:]*1e-4, [1,-1,3])\n\t\t\tpoint_coord = np.concatenate([point_coord, np.ones([1,point_coord.shape[1],1],np.float32) ],axis=2)\n\t\t\tpoint_coord = torch.from_numpy(point_coord)\n\t\t\tpoint_coord = point_coord.to(self.device)\n\t\t\t_,_,_, sample_points_value = self.bsp_network(None, None, out_m, point_coord, is_training=False)\n\t\t\tsample_points_value = sample_points_value.detach().cpu().numpy()\n\t\t\tsampled_points_normals = sampled_points_normals[sample_points_value[0,:,0]>1e-4]\n\t\t\tprint(len(bsp_convex_list), len(sampled_points_normals))\n\t\t\tnp.random.shuffle(sampled_points_normals)\n\t\t\twrite_ply_point_normal(config.sample_dir+\"/\"+str(t)+\"_pc.ply\", sampled_points_normals[:4096])\n\n\n\t#output bsp shape as obj with color\n\tdef test_mesh_obj_material(self, config):\n\t\t#load previous checkpoint\n\t\tcheckpoint_txt = os.path.join(self.checkpoint_path, \"checkpoint\")\n\t\tif os.path.exists(checkpoint_txt):\n\t\t\tfin = open(checkpoint_txt)\n\t\t\tmodel_dir = fin.readline().strip()\n\t\t\tfin.close()\n\t\t\tself.bsp_network.load_state_dict(torch.load(model_dir))\n\t\t\tprint(\" [*] Load SUCCESS\")\n\t\telse:\n\t\t\tprint(\" [!] Load failed...\")\n\t\t\treturn\n\t\t\n\t\tw2 = self.bsp_network.generator.convex_layer_weights.detach().cpu().numpy()\n\n\t\tdima = self.test_size\n\t\tdim = self.real_size\n\t\tmultiplier = int(dim/dima)\n\t\tmultiplier2 = multiplier*multiplier\n\n\t\t#write material\n\t\t#all output shapes share the same material\n\t\t#which means the same convex always has the same color for different shapes\n\t\t#change the colors in default.mtl to visualize correspondences between shapes\n\t\tfout2 = open(config.sample_dir+\"/default.mtl\", 'w')\n\t\tfor i in range(self.c_dim):\n\t\t\tfout2.write(\"newmtl m\"+str(i+1)+\"\\n\") #material id\n\t\t\tfout2.write(\"Kd 0.80 0.80 0.80\\n\") #color (diffuse) RGB 0.00-1.00\n\t\t\tfout2.write(\"Ka 0 0 0\\n\") #color (ambient) leave 0s\n\t\tfout2.close()\n\n\t\tself.bsp_network.eval()\n\t\tfor t in range(config.start, min(len(self.data_pixels),config.end)):\n\t\t\tmodel_float = np.ones([self.real_size,self.real_size,self.real_size,self.c_dim],np.float32)\n\t\t\tbatch_view = self.data_pixels[t:t+1,self.test_idx].astype(np.float32)/255.0\n\t\t\tbatch_view = torch.from_numpy(batch_view)\n\t\t\tbatch_view = batch_view.to(self.device)\n\t\t\t_, out_m, _,_ = self.bsp_network(batch_view, None, None, None, is_training=False)\n\t\t\tfor i in range(multiplier):\n\t\t\t\tfor j in range(multiplier):\n\t\t\t\t\tfor k in range(multiplier):\n\t\t\t\t\t\tminib = i*multiplier2+j*multiplier+k\n\t\t\t\t\t\tpoint_coord = self.coords[minib:minib+1]\n\t\t\t\t\t\t_,_, model_out, _ = self.bsp_network(None, None, out_m, point_coord, is_training=False)\n\t\t\t\t\t\tmodel_float[self.aux_x+i,self.aux_y+j,self.aux_z+k,:] = np.reshape(model_out.detach().cpu().numpy(), [self.test_size,self.test_size,self.test_size,self.c_dim])\n\t\t\t\n\t\t\tout_m = out_m.detach().cpu().numpy()\n\t\t\t\n\t\t\tbsp_convex_list = []\n\t\t\tcolor_idx_list = []\n\t\t\tmodel_float = model_float<0.01\n\t\t\tmodel_float_sum = np.sum(model_float,axis=3)\n\t\t\tfor i in range(self.c_dim):\n\t\t\t\tslice_i = model_float[:,:,:,i]\n\t\t\t\tif np.max(slice_i)>0: #if one voxel is inside a convex\n\t\t\t\t\tif np.min(model_float_sum-slice_i*2)>=0: #if this convex is redundant, i.e. the convex is inside the shape\n\t\t\t\t\t\tmodel_float_sum = model_float_sum-slice_i\n\t\t\t\t\telse:\n\t\t\t\t\t\tbox = []\n\t\t\t\t\t\tfor j in range(self.p_dim):\n\t\t\t\t\t\t\tif w2[j,i]>0.01:\n\t\t\t\t\t\t\t\ta = -out_m[0,0,j]\n\t\t\t\t\t\t\t\tb = -out_m[0,1,j]\n\t\t\t\t\t\t\t\tc = -out_m[0,2,j]\n\t\t\t\t\t\t\t\td = -out_m[0,3,j]\n\t\t\t\t\t\t\t\tbox.append([a,b,c,d])\n\t\t\t\t\t\tif len(box)>0:\n\t\t\t\t\t\t\tbsp_convex_list.append(np.array(box,np.float32))\n\t\t\t\t\t\t\tcolor_idx_list.append(i)\n\n\t\t\t#print(bsp_convex_list)\n\t\t\tprint(len(bsp_convex_list))\n\t\t\t\n\t\t\t#convert bspt to mesh\n\t\t\tvertices = []\n\n\t\t\t#write obj\n\t\t\tfout2 = open(config.sample_dir+\"/\"+str(t)+\"_bsp.obj\", 'w')\n\t\t\tfout2.write(\"mtllib default.mtl\\n\")\n\n\t\t\tfor i in range(len(bsp_convex_list)):\n\t\t\t\tvg, tg = get_mesh([bsp_convex_list[i]])\n\t\t\t\tvbias=len(vertices)+1\n\t\t\t\tvertices = vertices+vg\n\n\t\t\t\tfout2.write(\"usemtl m\"+str(color_idx_list[i]+1)+\"\\n\")\n\t\t\t\tfor ii in range(len(vg)):\n\t\t\t\t\tfout2.write(\"v \"+str(vg[ii][0])+\" \"+str(vg[ii][1])+\" \"+str(vg[ii][2])+\"\\n\")\n\t\t\t\tfor ii in range(len(tg)):\n\t\t\t\t\tfout2.write(\"f\")\n\t\t\t\t\tfor jj in range(len(tg[ii])):\n\t\t\t\t\t\tfout2.write(\" \"+str(tg[ii][jj]+vbias))\n\t\t\t\t\tfout2.write(\"\\n\")\n\n\t\t\tfout2.close()\n"
] | [
[
"torch.mean",
"torch.zeros",
"torch.load",
"numpy.max",
"torch.cuda.is_available",
"torch.device",
"numpy.random.randint",
"numpy.reshape",
"numpy.arange",
"torch.from_numpy",
"numpy.zeros",
"torch.sigmoid",
"torch.nn.Parameter",
"numpy.min",
"torch.nn.init.constant_",
"torch.min",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.init.normal_",
"torch.nn.functional.leaky_relu",
"numpy.array",
"numpy.sum",
"numpy.random.shuffle",
"numpy.ones",
"torch.matmul",
"torch.nn.init.xavier_uniform_",
"torch.clamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
junjc9/PSG | [
"4149b846c1dc19fe67fd8ff3939738ef04a2524a"
] | [
"train.py"
] | [
"# camera-ready\n\nimport sys\n\nfrom datasets import DatasetTrain, DatasetVal # (this needs to be imported before torch, because cv2 needs to be imported before torch for some reason)\n\nsys.path.append(\"./model\")\nfrom deeplabv3 import DeepLabV3\n\nsys.path.append(\"./utils\")\nfrom utils import add_weight_decay\n\nimport torch\n# import torch.utils.data\nfrom torch.utils.data import DataLoader\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nimport numpy as np\nimport pickle\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nimport cv2\n\nimport time\n\n# NOTE! NOTE! change this to not overwrite all log data when you train the model:\nmodel_id = \"1\"\n\nnum_epochs = 100\nbatch_size = 8\nlearning_rate = 0.0001\n\nnetwork = DeepLabV3(model_id, project_dir=\".\").cuda()\n\ntrain_dataset = DatasetTrain(cityscapes_data_path=\"./data/cityscapes\",\n cityscapes_meta_path=\"./data/cityscapes/meta\")\nval_dataset = DatasetVal(cityscapes_data_path=\"./data/cityscapes\",\n cityscapes_meta_path=\"./data/cityscapes/meta\")\n\nnum_train_batches = int(len(train_dataset)/batch_size)\nnum_val_batches = int(len(val_dataset)/batch_size)\nprint (\"num_train_batches:\", num_train_batches)\nprint (\"num_val_batches:\", num_val_batches)\n\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=batch_size, shuffle=True,\n num_workers=4)\nval_loader = torch.utils.data.DataLoader(dataset=val_dataset,\n batch_size=batch_size, shuffle=False,\n num_workers=4)\n\nparams = add_weight_decay(network, l2_value=0.0001)\noptimizer = torch.optim.Adam(params, lr=learning_rate)\n\nwith open(\"./data/cityscapes/meta/class_weights.pkl\", \"rb\") as file: # (needed for python3)\n class_weights = np.array(pickle.load(file))\nclass_weights = torch.from_numpy(class_weights)\nclass_weights = Variable(class_weights.type(torch.FloatTensor)).cuda()\n\n# loss function\nloss_fn = nn.CrossEntropyLoss(weight=class_weights)\n\nepoch_losses_train = []\nepoch_losses_val = []\nif __name__ == '__main__':\n\n for epoch in range(num_epochs):\n print (\"###########################\")\n print (\"######## NEW EPOCH ########\")\n print (\"###########################\")\n print (\"epoch: %d/%d\" % (epoch+1, num_epochs))\n\n ############################################################################\n # train:\n ############################################################################\n network.train() # (set in training mode, this affects BatchNorm and dropout)\n batch_losses = []\n for step, (imgs, label_imgs) in enumerate(train_loader):\n #current_time = time.time()\n\n imgs = Variable(imgs).cuda() # (shape: (batch_size, 3, img_h, img_w))\n label_imgs = Variable(label_imgs.type(torch.LongTensor)).cuda() # (shape: (batch_size, img_h, img_w))\n\n outputs = network(imgs) # (shape: (batch_size, num_classes, img_h, img_w))\n\n # compute the loss:\n loss = loss_fn(outputs, label_imgs)\n loss_value = loss.data.cpu().numpy()\n batch_losses.append(loss_value)\n\n # optimization step:\n optimizer.zero_grad() # (reset gradients)\n loss.backward() # (compute gradients)\n optimizer.step() # (perform optimization step)\n\n #print (time.time() - current_time)\n\n epoch_loss = np.mean(batch_losses)\n epoch_losses_train.append(epoch_loss)\n with open(\"%s/epoch_losses_train.pkl\" % network.model_dir, \"wb\") as file:\n pickle.dump(epoch_losses_train, file)\n print (\"train loss: %g\" % epoch_loss)\n plt.figure(1)\n plt.plot(epoch_losses_train, \"k^\")\n plt.plot(epoch_losses_train, \"k\")\n plt.ylabel(\"loss\")\n plt.xlabel(\"epoch\")\n plt.title(\"train loss per epoch\")\n plt.savefig(\"%s/epoch_losses_train.png\" % network.model_dir)\n plt.close(1)\n\n print (\"####\")\n\n ############################################################################\n # val:\n ############################################################################\n network.eval() # (set in evaluation mode, this affects BatchNorm and dropout)\n batch_losses = []\n for step, (imgs, label_imgs, img_ids) in enumerate(val_loader):\n with torch.no_grad(): # (corresponds to setting volatile=True in all variables, this is done during inference to reduce memory consumption)\n imgs = Variable(imgs).cuda() # (shape: (batch_size, 3, img_h, img_w))\n label_imgs = Variable(label_imgs.type(torch.LongTensor)).cuda() # (shape: (batch_size, img_h, img_w))\n\n outputs = network(imgs) # (shape: (batch_size, num_classes, img_h, img_w))\n\n # compute the loss:\n loss = loss_fn(outputs, label_imgs)\n loss_value = loss.data.cpu().numpy()\n batch_losses.append(loss_value)\n\n epoch_loss = np.mean(batch_losses)\n epoch_losses_val.append(epoch_loss)\n with open(\"%s/epoch_losses_val.pkl\" % network.model_dir, \"wb\") as file:\n pickle.dump(epoch_losses_val, file)\n print (\"val loss: %g\" % epoch_loss)\n plt.figure(1)\n plt.plot(epoch_losses_val, \"k^\")\n plt.plot(epoch_losses_val, \"k\")\n plt.ylabel(\"loss\")\n plt.xlabel(\"epoch\")\n plt.title(\"val loss per epoch\")\n plt.savefig(\"%s/epoch_losses_val.png\" % network.model_dir)\n plt.close(1)\n\n # save the model weights to disk:\n checkpoint_path = network.checkpoints_dir + \"/model_\" + model_id +\"_epoch_\" + str(epoch+1) + \".pth\"\n torch.save(network.state_dict(), checkpoint_path)\n"
] | [
[
"torch.optim.Adam",
"torch.nn.CrossEntropyLoss",
"matplotlib.pyplot.title",
"matplotlib.use",
"torch.utils.data.DataLoader",
"torch.from_numpy",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"torch.autograd.Variable",
"matplotlib.pyplot.ylabel",
"numpy.mean",
"torch.no_grad",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AndresQuichimbo/landlab | [
"39fee962ec962a389ae4522a55a17f53a0d37a6e"
] | [
"landlab/components/overland_flow/generate_overland_flow_kinwave.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"Landlab component for overland flow using the kinematic-wave approximation.\n\nCreated on Fri May 27 14:26:13 2016\n\n@author: gtucker\n\"\"\"\n\n\nimport numpy as np\n\nfrom landlab import Component\n\n\nclass KinwaveOverlandFlowModel(Component):\n \"\"\"Calculate water flow over topography.\n\n Landlab component that implements a two-dimensional\n kinematic wave model. This is an extremely simple, unsophisticated\n model, originally built simply to demonstrate the component creation\n process. Limitations to the present version include: infiltration is\n handled very crudely, the called is responsible for picking a stable\n time step size (no adaptive time stepping is used in the `run_one_step`\n method), precipitation rate is constant for a given duration (then zero),\n and all parameters are uniform in space. Also, the terrain is assumed\n to be stable over time. Caveat emptor!\n\n Examples\n --------\n >>> from landlab import RasterModelGrid\n >>> rg = RasterModelGrid((4, 5), xy_spacing=10.0)\n >>> z = rg.add_zeros(\"topographic__elevation\", at=\"node\")\n >>> s = rg.add_zeros(\"topographic__gradient\", at=\"link\")\n >>> kw = KinwaveOverlandFlowModel(rg)\n >>> kw.vel_coef\n 100.0\n >>> rg.at_node['surface_water__depth']\n array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0.])\n\n References\n ----------\n **Required Software Citation(s) Specific to this Component**\n\n None Listed\n\n **Additional References**\n\n None Listed\n\n \"\"\"\n\n _name = \"KinwaveOverlandFlowModel\"\n\n _info = {\n \"surface_water__depth\": {\n \"dtype\": float,\n \"intent\": \"out\",\n \"optional\": False,\n \"units\": \"m\",\n \"mapping\": \"node\",\n \"doc\": \"Depth of water on the surface\",\n },\n \"topographic__elevation\": {\n \"dtype\": float,\n \"intent\": \"in\",\n \"optional\": False,\n \"units\": \"m\",\n \"mapping\": \"node\",\n \"doc\": \"Land surface topographic elevation\",\n },\n \"topographic__gradient\": {\n \"dtype\": float,\n \"intent\": \"in\",\n \"optional\": False,\n \"units\": \"m/m\",\n \"mapping\": \"link\",\n \"doc\": \"Gradient of the ground surface\",\n },\n \"water__specific_discharge\": {\n \"dtype\": float,\n \"intent\": \"out\",\n \"optional\": False,\n \"units\": \"m2/s\",\n \"mapping\": \"link\",\n \"doc\": \"flow discharge component in the direction of the link\",\n },\n \"water__velocity\": {\n \"dtype\": float,\n \"intent\": \"out\",\n \"optional\": False,\n \"units\": \"m/s\",\n \"mapping\": \"link\",\n \"doc\": \"flow velocity component in the direction of the link\",\n },\n }\n\n def __init__(\n self,\n grid,\n precip_rate=1.0,\n precip_duration=1.0,\n infilt_rate=0.0,\n roughness=0.01,\n ):\n \"\"\"Initialize the KinwaveOverlandFlowModel.\n\n Parameters\n ----------\n grid : ModelGrid\n Landlab ModelGrid object\n precip_rate : float, optional (defaults to 1 mm/hr)\n Precipitation rate, mm/hr\n precip_duration : float, optional (defaults to 1 hour)\n Duration of precipitation, hours\n infilt_rate : float, optional (defaults to 0)\n Maximum rate of infiltration, mm/hr\n roughness : float, defaults to 0.01\n Manning roughness coefficient, s/m^1/3\n \"\"\"\n super(KinwaveOverlandFlowModel, self).__init__(grid)\n\n # Store parameters and do unit conversion\n self._current_time = 0\n\n self._precip = precip_rate / 3600000.0 # convert to m/s\n self._precip_duration = precip_duration * 3600.0 # h->s\n self._infilt = infilt_rate / 3600000.0 # convert to m/s\n self._vel_coef = 1.0 / roughness # do division now to save time\n\n # Create fields...\n # Elevation\n self._elev = grid.at_node[\"topographic__elevation\"]\n\n # Slope\n self._slope = grid.at_link[\"topographic__gradient\"]\n\n self.initialize_output_fields()\n self._depth = grid.at_node[\"surface_water__depth\"]\n self._vel = grid.at_link[\"water__velocity\"]\n self._disch = grid.at_link[\"water__specific_discharge\"]\n\n # Calculate the ground-surface slope (assume it won't change)\n self._slope[self._grid.active_links] = self._grid.calc_grad_at_link(self._elev)[\n self._grid.active_links\n ]\n self._sqrt_slope = np.sqrt(self._slope)\n self._sign_slope = np.sign(self._slope)\n\n @property\n def vel_coef(self):\n \"\"\"Velocity coefficient.\n\n (1/roughness)\n \"\"\"\n return self._vel_coef\n\n def run_one_step(self, dt):\n \"\"\"Calculate water flow for a time period `dt`.\n\n Default units for dt are *seconds*.\n \"\"\"\n # Calculate water depth at links. This implements an \"upwind\" scheme\n # in which water depth at the links is the depth at the higher of the\n # two nodes.\n H_link = self._grid.map_value_at_max_node_to_link(\n \"topographic__elevation\", \"surface_water__depth\"\n )\n\n # Calculate velocity using the Manning equation.\n self._vel = (\n -self._sign_slope * self._vel_coef * H_link ** 0.66667 * self._sqrt_slope\n )\n\n # Calculate discharge\n self._disch[:] = H_link * self._vel\n\n # Flux divergence\n dqda = self._grid.calc_flux_div_at_node(self._disch)\n\n # Rate of change of water depth\n if self._current_time < self._precip_duration:\n ppt = self._precip\n else:\n ppt = 0.0\n dHdt = ppt - self._infilt - dqda\n\n # Update water depth: simple forward Euler scheme\n self._depth[self._grid.core_nodes] += dHdt[self._grid.core_nodes] * dt\n\n # Very crude numerical hack: prevent negative water depth\n self._depth[np.where(self._depth < 0.0)[0]] = 0.0\n\n self._current_time += dt\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n"
] | [
[
"numpy.sign",
"numpy.where",
"numpy.sqrt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
EdmundLuan/CLF_CBF_NMPC_python | [
"9e89e585b67055f31e9324815a5267ed4c29894d"
] | [
"observer.py"
] | [
"import numpy as np\nimport math\n\nclass Observer:\n \"\"\"\n A simple observer that assumes targets move in straight lines at a constant speed.\n\n Take in full observation, return estimated full states and estimated velocities.\n\n Attributes: \n observation_history : emmmmm... \n vel : velocities\n window : history length, i.e. history for how long before now we would like to keep\n step : discretization time step\n r_roi : Radius of region of interest\n \"\"\"\n window = 5\n observation_history = [0]*window\n step = 0.1\n vel = 0\n states = 0\n r_roi = 1\n\n\n def __init__(self, x0, stp, windw, r=1):\n self.window = windw\n self.observation_history = [x0]*windw\n self.step = stp\n self.states = x0\n self.r_roi = r\n\n\n \n def feed(self, new_obsrv):\n # print('1',self.observation_history)\n obsrv = new_obsrv.copy()\n # print(obsrv)\n self.observation_history.pop(0) # Discard the oldest observation in the slot\n self.observation_history.append(obsrv)\n\n # Successive difference method calculating velocities\n num_grp = math.floor(self.window/2)\n sum = self.observation_history[self.window-1] - self.observation_history[self.window-1]\n for i in range(1, num_grp+1):\n sum = sum + self.observation_history[self.window-i] - self.observation_history[self.window-num_grp-i]\n self.states = new_obsrv\n self.vel = sum / num_grp / (self.step*num_grp)\n\n\n\n## Test\nif __name__ == '__main__':\n v = 1\n T = 0.01\n x = np.array([[0, 0], [1, 1], [2,2]])\n observer = Observer(x, T, 5)\n for k in range(1, 10):\n x = x + v*T\n observer.feed(x)\n print(k)\n print(observer.observation_history)\n print(observer.vel)\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sirmammingtonham/ai_and_society_final_proj | [
"f6cb6fd1ce163292441745b99fcbee01ce1e8814"
] | [
"classification/detection.py"
] | [
"\"\"\"\nEvaluates a folder of video files or a single file with a xception binary\nclassification network.\n\nUsage:\npython detect_from_video.py\n -i <folder with video files or path to video file>\n -m <path to model file>\n -o <path to output folder, will write one or multiple output videos there>\n\nAuthor: Andreas Rössler\n\"\"\"\nimport os\nimport argparse\nfrom os.path import join\nimport cv2\nimport dlib\nimport torch\nimport torch.nn as nn\nfrom PIL import Image as pil_image\nimport urllib\nfrom tqdm import tqdm\n\nfrom classification.network.models import model_selection\nfrom classification.dataset.transform import xception_default_data_transforms\n\n# https://discuss.pytorch.org/t/problem-loading-model-trained-on-gpu/17745\n# Download this and change in models.py\n# https://data.lip6.fr/cadene/pretrainedmodels/\n\ncuda = True\n\ndef get_boundingbox(face, width, height, scale=1.3, minsize=None):\n \"\"\"\n Expects a dlib face to generate a quadratic bounding box.\n :param face: dlib face class\n :param width: frame width\n :param height: frame height\n :param scale: bounding box size multiplier to get a bigger face region\n :param minsize: set minimum bounding box size\n :return: x, y, bounding_box_size in opencv form\n \"\"\"\n x1 = face.left()\n y1 = face.top()\n x2 = face.right()\n y2 = face.bottom()\n size_bb = int(max(x2 - x1, y2 - y1) * scale)\n if minsize:\n if size_bb < minsize:\n size_bb = minsize\n center_x, center_y = (x1 + x2) // 2, (y1 + y2) // 2\n\n # Check for out of bounds, x-y top left corner\n x1 = max(int(center_x - size_bb // 2), 0)\n y1 = max(int(center_y - size_bb // 2), 0)\n # Check for too big bb size for given x, y\n size_bb = min(width - x1, size_bb)\n size_bb = min(height - y1, size_bb)\n\n return x1, y1, size_bb\n\n\ndef preprocess_image(image, cuda=cuda):\n \"\"\"\n Preprocesses the image such that it can be fed into our network.\n During this process we envoke PIL to cast it into a PIL image.\n\n :param image: numpy image in opencv form (i.e., BGR and of shape\n :return: pytorch tensor of shape [1, 3, image_size, image_size], not\n necessarily casted to cuda\n \"\"\"\n # Revert from BGR\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n # Preprocess using the preprocessing function used during training and\n # casting it to PIL image\n preprocess = xception_default_data_transforms['test']\n preprocessed_image = preprocess(pil_image.fromarray(image))\n # Add first dimension as the network expects a batch\n preprocessed_image = preprocessed_image.unsqueeze(0)\n if cuda:\n preprocessed_image = preprocessed_image.cuda()\n return preprocessed_image\n\n\ndef predict_with_model(image, model, post_function=nn.Softmax(dim=1),\n cuda=cuda):\n \"\"\"\n Predicts the label of an input image. Preprocesses the input image and\n casts it to cuda if required\n\n :param image: numpy image\n :param model: torch model with linear layer at the end\n :param post_function: e.g., softmax\n :param cuda: enables cuda, must be the same parameter as the model\n :return: prediction (1 = fake, 0 = real)\n \"\"\"\n # Preprocess\n preprocessed_image = preprocess_image(image, cuda)\n\n # Model prediction\n output = model(preprocessed_image)\n output = post_function(output)\n\n # Cast to desired\n _, prediction = torch.max(output, 1) # argmax\n prediction = float(prediction.cpu().numpy())\n\n return int(prediction), output\n\n\ndef test_full_image_network(video_path, output_path, model=None, model_path=None,\n start_frame=0, end_frame=None, threshold=.1, cuda=cuda):\n \"\"\"\n Reads a video and evaluates a subset of frames with the a detection network\n that takes in a full frame. Outputs are only given if a face is present\n and the face is highlighted using dlib.\n :param video_path: path to video file\n :param model_path: path to model file (should expect the full sized image)\n :param output_path: path where the output video is stored\n :param start_frame: first frame to evaluate\n :param end_frame: last frame to evaluate\n :param cuda: enable cuda\n :return:\n \"\"\"\n\n print('Starting: {}'.format(video_path))\n\n # Read and write\n reader = cv2.VideoCapture(video_path)\n\n video_fn = video_path.split('/')[-1].split('.')[0] + '.avi'\n os.makedirs(output_path, exist_ok=True)\n fourcc = cv2.VideoWriter_fourcc(*'MJPG')\n fps = reader.get(cv2.CAP_PROP_FPS)\n num_frames = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n writer = None\n\n # Face detector\n face_detector = dlib.get_frontal_face_detector()\n\n # Load model\n # model, *_ = model_selection(modelname='xception', num_out_classes=2)\n if model is None:\n if model_path is not None:\n model = torch.load(model_path, map_location=lambda storage, loc: storage)\n print('Model found in {}'.format(model_path))\n else:\n print('No model found, initializing random model.')\n\n if cuda:\n model = model.cuda()\n\n # Text variables\n font_face = cv2.FONT_HERSHEY_SIMPLEX\n thickness = 2\n font_scale = 1\n\n # Frame numbers and length of output video\n frame_num = 0\n predictions = []\n\n analyse_percentage = .10\n # total_steps = int(num_frames * analyse_percentage)\n total_steps = 10\n frame_step = num_frames // total_steps\n\n assert start_frame < num_frames - 1\n end_frame = end_frame if end_frame else num_frames\n pbar = tqdm(total=end_frame - start_frame)\n # print(total_steps, frame_step, end_frame)\n\n while reader.isOpened():\n # print(frame_num, total_steps, frame_num/num_frames)\n vid_location = frame_num/num_frames\n reader.set(1, vid_location)\n _, image = reader.read()\n if image is None:\n break\n frame_num += frame_step\n\n # if frame_num < start_frame:\n # continue\n pbar.update(frame_step)\n\n # Image size\n height, width = image.shape[:2]\n\n # Init output writer\n # if writer is None:\n # writer = cv2.VideoWriter(join(output_path, video_fn), fourcc, fps,\n # (height, width)[::-1])\n\n # 2. Detect with dlib\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n faces = face_detector(gray, 1)\n if len(faces):\n # For now only take biggest face\n face = faces[0]\n\n # --- Prediction ---------------------------------------------------\n # Face crop with dlib and bounding box scale enlargement\n x, y, size = get_boundingbox(face, width, height)\n cropped_face = image[y:y + size, x:x + size]\n\n # Actual prediction using our model\n prediction, output = predict_with_model(cropped_face, model,\n cuda=cuda)\n predictions.append(prediction)\n # ------------------------------------------------------------------\n\n tqdm.write(f'prediction = {prediction}')\n\n # Text and bb\n x = face.left()\n y = face.top()\n w = face.right() - x\n h = face.bottom() - y\n label = 'fake' if prediction == 1 else 'real'\n color = (0, 255, 0) if prediction == 0 else (0, 0, 255)\n output_list = ['{0:.2f}'.format(float(x)) for x in\n output.detach().cpu().numpy()[0]]\n cv2.putText(image, str(output_list) + '=>' + label, (x, y + h + 30),\n font_face, font_scale,\n color, thickness, 2)\n # draw box over face\n cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)\n\n if frame_num >= end_frame:\n break\n\n # Show\n cv2.imshow('Frame', image)\n cv2.waitKey(33) # About 30 fps\n # writer.write(image)\n pbar.close()\n reader.release()\n cv2.destroyAllWindows()\n # if writer is not None:\n # writer.release()\n # print('Finished! Output saved under {}'.format(output_path))\n # else:\n # print('Input video file was empty')\n import numpy as np\n if np.mean(predictions) > threshold:\n return 1\n else:\n return 0\n\ndef detect_from_image(url, model, cuda=False):\n face_detector = dlib.get_frontal_face_detector()\n if cuda:\n model = model.cuda()\n\n opener = urllib.request.build_opener()\n opener.addheaders = [('User-agent', 'Mozilla/5.0')]\n urllib.request.install_opener(opener)\n urllib.request.urlretrieve(url, \"temp.jpg\")\n # Read the image\n image = cv2.imread(\"temp.jpg\")\n\n font_face = cv2.FONT_HERSHEY_SIMPLEX\n thickness = 2\n font_scale = 1\n # Image size\n height, width = image.shape[:2]\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n faces = face_detector(gray, 1)\n\n if len(faces):\n # For now only take biggest face\n face = faces[0]\n\n # --- Prediction ---------------------------------------------------\n # Face crop with dlib and bounding box scale enlargement\n x, y, size = get_boundingbox(face, width, height)\n cropped_face = image[y:y+size, x:x+size]\n\n # Actual prediction using our model\n prediction, output = predict_with_model(cropped_face, model,\n cuda=cuda)\n \n return prediction\n\n\n\nif __name__ == '__main__':\n p = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n p.add_argument('--video_path', '-i', type=str)\n p.add_argument('--model_path', '-m', type=str, default=None)\n p.add_argument('--output_path', '-o', type=str,\n default='.')\n p.add_argument('--start_frame', type=int, default=0)\n p.add_argument('--end_frame', type=int, default=None)\n p.add_argument('--cuda', action='store_true')\n args = p.parse_args()\n\n video_path = args.video_path\n if video_path.endswith('.mp4') or video_path.endswith('.avi'):\n test_full_image_network(**vars(args))\n else:\n videos = os.listdir(video_path)\n for video in videos:\n args.video_path = join(video_path, video)\n test_full_image_network(**vars(args))\n"
] | [
[
"torch.nn.Softmax",
"numpy.mean",
"torch.max",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
topikuu/scikit-fem | [
"3b244831001604bed87e93e88495fba1d950161d"
] | [
"skfem/io/meshio.py"
] | [
"\"\"\"Import any formats supported by meshio.\"\"\"\n\nimport warnings\n\nimport meshio\nimport numpy as np\n\nimport skfem\n\n\nMESH_TYPE_MAPPING = {\n 'tetra': skfem.MeshTet,\n 'hexahedron': skfem.MeshHex,\n 'triangle': skfem.MeshTri,\n 'quad': skfem.MeshQuad,\n 'line': skfem.MeshLine,\n 'tetra10': skfem.MeshTet2,\n 'triangle6': skfem.MeshTri2,\n 'quad9': skfem.MeshQuad2,\n}\n\nTYPE_MESH_MAPPING = {v: k for k, v in MESH_TYPE_MAPPING.items()}\n\n\ndef from_meshio(m, force_mesh_type=None):\n \"\"\"Convert meshio mesh into :class:`skfem.mesh.Mesh`.\n\n Parameters\n ----------\n m\n The mesh from meshio.\n force_mesh_type\n An optional string forcing the mesh type if automatic detection\n fails. See :data:`skfem.io.meshio.MESH_TYPE_MAPPING` for possible\n values.\n\n Returns\n -------\n A :class:`~skfem.mesh.Mesh` object.\n\n \"\"\"\n\n cells = m.cells_dict\n\n if force_mesh_type is None:\n meshio_type = None\n\n for k, v in MESH_TYPE_MAPPING.items():\n # find first if match\n if k in cells:\n meshio_type, mesh_type = k, v\n break\n\n if meshio_type is None:\n raise NotImplementedError(\"Mesh type(s) not supported \"\n \"in import: {}.\".format(cells.keys()))\n else:\n meshio_type, mesh_type = (force_mesh_type,\n MESH_TYPE_MAPPING[force_mesh_type])\n\n # create p and t\n p = np.ascontiguousarray(mesh_type.strip_extra_coordinates(m.points).T)\n t = np.ascontiguousarray(cells[meshio_type].T)\n\n # reorder t if needed\n if meshio_type == 'hexahedron':\n t = t[[0, 4, 3, 1, 7, 5, 2, 6]]\n\n mtmp = mesh_type(p, t)\n\n try:\n # element to boundary element type mapping\n bnd_type = {\n 'line': 'vertex',\n 'triangle': 'line',\n 'quad': 'line',\n 'tetra': 'triangle',\n 'hexahedron': 'quad',\n }[meshio_type]\n\n def find_tagname(tag):\n for key in m.field_data:\n if m.field_data[key][0] == tag:\n return key\n return None\n\n if m.cell_sets: # MSH 4.1\n subdomains = {k: v[meshio_type]\n for k, v in m.cell_sets_dict.items()\n if meshio_type in v}\n facets = {k: [tuple(f) for f in\n np.sort(m.cells_dict[bnd_type][v[bnd_type]])]\n for k, v in m.cell_sets_dict.items()\n if bnd_type in v}\n boundaries = {k: np.array([i for i, f in\n enumerate(map(tuple, mtmp.facets.T))\n if f in v])\n for k, v in facets.items()}\n else: # MSH 2.2?\n elements_tag = m.cell_data_dict['gmsh:physical'][meshio_type]\n subdomains = {}\n tags = np.unique(elements_tag)\n\n for tag in tags:\n t_set = np.nonzero(tag == elements_tag)[0]\n subdomains[find_tagname(tag)] = t_set\n\n # find tagged boundaries\n if bnd_type in m.cell_data_dict['gmsh:physical']:\n facets = m.cells_dict[bnd_type]\n facets_tag = m.cell_data_dict['gmsh:physical'][bnd_type]\n\n # put meshio facets to dict\n dic = {tuple(np.sort(facets[i])): facets_tag[i]\n for i in range(facets.shape[0])}\n\n # get index of corresponding Mesh.facets for each meshio\n # facet found in the dict\n index = np.array([[dic[tuple(np.sort(mtmp.facets[:, i]))], i]\n for i in mtmp.boundary_facets()\n if tuple(np.sort(mtmp.facets[:, i])) in dic])\n\n # read meshio tag numbers and names\n tags = index[:, 0]\n boundaries = {}\n for tag in np.unique(tags):\n tagindex = np.nonzero(tags == tag)[0]\n boundaries[find_tagname(tag)] = index[tagindex, 1]\n\n mtmp = mesh_type(p, t, boundaries, subdomains)\n\n except Exception as e:\n warnings.warn(\"Unable to load tagged boundaries/subdomains.\")\n print(e)\n\n return mtmp\n\n\ndef from_file(filename):\n return from_meshio(meshio.read(filename))\n\n\ndef to_meshio(mesh, point_data=None):\n\n t = mesh.t.copy()\n if isinstance(mesh, skfem.MeshHex):\n t = t[[0, 3, 6, 2, 1, 5, 7, 4]]\n\n cells = {TYPE_MESH_MAPPING[type(mesh)]: t.T}\n return meshio.Mesh(mesh.p.T, cells, point_data)\n\n\ndef to_file(mesh, filename, point_data=None, **kwargs):\n meshio.write(filename, to_meshio(mesh, point_data), **kwargs)\n"
] | [
[
"numpy.ascontiguousarray",
"numpy.sort",
"numpy.nonzero",
"numpy.unique"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hxyue1/Nonlinear-Statistical-Coupling | [
"fe3076e68f72579e647ca6abe05542bf4b9fab46"
] | [
"nsc/math/entropy.py"
] | [
"# -*- coding: utf-8 -*-\nimport numpy as np\nfrom .function import coupled_logarithm, coupled_exponential\n\n\ndef importance_sampling_integrator(function, pdf, sampler, n=10000, rounds=1, seed=1):\n \"\"\"\n \n\n Parameters\n ----------\n function : TYPE\n DESCRIPTION.\n pdf : TYPE\n DESCRIPTION.\n sampler : TYPE\n DESCRIPTION.\n n : TYPE, optional\n DESCRIPTION. The default is 10000.\n rounds : int\n DESCRIPTION. The default is 5.\n seed : TYPE, optional\n DESCRIPTION. The default is 1.\n\n Returns\n -------\n TYPE\n DESCRIPTION.\n\n \"\"\"\n # Set a random seed.\n np.random.seed(seed)\n \n # Create a list to hold the estimates for each round.\n estimates = []\n \n for i in range(rounds):\n # Generate n samples from the probability distribution.\n samples = sampler(n)\n # Evaluate the function at the samples and divide by the probability \n # density of the distribution at those samples.\n sampled_values = function(samples) / pdf(samples)\n # Add the estimate of the integral to the estimates list.\n estimates.append(np.mean(sampled_values))\n \n # Return the mean of the estimates as the estimate of the integral.\n return np.mean(estimates)\n\n\ndef coupled_probability(density_func,\n sampler,\n kappa = 0.0, \n alpha = 1.0, \n dim = 1,\n n = 10000,\n rounds=1,\n seed=1):\n \"\"\"\n \n\n Parameters\n ----------\n density_func : TYPE\n DESCRIPTION.\n sampler : TYPE\n DESCRIPTION.\n kappa : TYPE, optional\n DESCRIPTION. The default is 0.0.\n alpha : TYPE, optional\n DESCRIPTION. The default is 1.0.\n dim : TYPE, optional\n DESCRIPTION. The default is 1.\n n : TYPE, optional\n DESCRIPTION. The default is 10000.\n rounds : TYPE, optional\n DESCRIPTION. The default is 5.\n seed : TYPE, optional\n DESCRIPTION. The default is 1.\n\n Returns\n -------\n TYPE\n DESCRIPTION.\n\n \"\"\"\n\n \n # Calculate the risk-bias.\n kMult = (-alpha * kappa) / (1 + dim*kappa)\n \n def raised_density_func(x):\n return density_func(x) ** (1-kMult)\n \n\n def raised_density_func_integration(x):\n return density_func(x) ** (1-kMult)\n \n # Calculate the normalization factor to the coupled CDF equals 1.\n division_factor = importance_sampling_integrator(raised_density_func_integration, \n pdf=density_func,\n sampler=sampler, \n n=n,\n rounds=rounds,\n seed=seed)\n \n \n # Define a function to calculate coupled densities\n def coupled_prob(values):\n return raised_density_func(values) / division_factor\n \n # Return the new functions that calculates the coupled density of a value.\n return coupled_prob\n\n\ndef coupled_cross_entropy(density_func_p, \n density_func_q, \n sampler_p,\n kappa: float = 0.0, \n alpha: float = 1.0, \n dim: int = 1,\n root: bool = False,\n n=10000,\n rounds=1,\n seed=1) -> [float, np.ndarray]:\n \"\"\"\n \n\n Parameters\n ----------\n density_func_p : TYPE\n DESCRIPTION.\n density_func_q : TYPE\n DESCRIPTION.\n sampler_p : TYPE\n DESCRIPTION.\n kappa : float, optional\n DESCRIPTION. The default is 0.0.\n alpha : float, optional\n DESCRIPTION. The default is 1.0.\n dim : int, optional\n DESCRIPTION. The default is 1.\n root : bool, optional\n DESCRIPTION. The default is False.\n n : TYPE, optional\n DESCRIPTION. The default is 10000.\n rounds : TYPE, optional\n DESCRIPTION. The default is 5.\n seed : TYPE, optional\n DESCRIPTION. The default is 1.\n\n Returns\n -------\n [float, np.ndarray]\n DESCRIPTION.\n\n \"\"\"\n \n # Fit a coupled_probability function to density_func_p with the other\n # given parameters.\n my_coupled_probability = coupled_probability(density_func=density_func_p,\n sampler=sampler_p,\n kappa=kappa, \n alpha=alpha,\n dim=dim, \n n=n,\n rounds=rounds,\n seed=seed)\n \n def raised_density_func_q(x):\n return density_func_q(x)**(-alpha)\n \n if root == False:\n \n def no_root_coupled_cross_entropy(x):\n \n return (my_coupled_probability(x)\n *(1/-alpha)\n *coupled_logarithm(value=raised_density_func_q(x),\n kappa=kappa, \n dim=dim))\n \n # Integrate the function.\n final_integration = -importance_sampling_integrator(no_root_coupled_cross_entropy, \n pdf=density_func_p,\n sampler=sampler_p, \n n=n,\n rounds=rounds,\n seed=seed)\n \n else:\n def root_coupled_cross_entropy(x):\n\n return (my_coupled_probability(x)\n *coupled_logarithm(value=raised_density_func_q(x),\n kappa=kappa, \n dim=dim)**(1/alpha))\n \n # Integrate the function.\n final_integration = importance_sampling_integrator(root_coupled_cross_entropy, \n pdf=density_func_p,\n sampler=sampler_p, \n n=n,\n rounds=rounds,\n seed=seed)\n \n return final_integration\n\n\ndef coupled_entropy(density_func, \n sampler,\n kappa: float = 0.0, \n alpha: float = 1.0, \n dim: int = 1, \n root: bool = False,\n n=10000,\n rounds=1,\n seed=1) -> [float, np.ndarray]:\n \"\"\"\n \n\n Parameters\n ----------\n density_func : TYPE\n DESCRIPTION.\n sampler : TYPE\n DESCRIPTION.\n kappa : float, optional\n DESCRIPTION. The default is 0.0.\n alpha : float, optional\n DESCRIPTION. The default is 1.0.\n dim : int, optional\n DESCRIPTION. The default is 1.\n root : bool, optional\n DESCRIPTION. The default is False.\n n : TYPE, optional\n DESCRIPTION. The default is 10000.\n rounds : TYPE, optional\n DESCRIPTION. The default is 1.\n seed : TYPE, optional\n DESCRIPTION. The default is 1.\n\n Returns\n -------\n [float, np.ndarray]\n DESCRIPTION.\n\n \"\"\"\n\n \n return coupled_cross_entropy(density_func, \n density_func, \n sampler_p=sampler,\n kappa=kappa, \n alpha=alpha, \n dim=dim,\n root=root,\n n=n,\n rounds=rounds,\n seed=seed\n )\n\n\ndef coupled_kl_divergence(density_func_p, \n density_func_q, \n sampler_p,\n kappa: float = 0.0, \n alpha: float = 1.0, \n dim: int = 1, \n root: bool = False,\n n=10000,\n rounds=1,\n seed=1) -> [float, np.ndarray]:\n \"\"\"\n \n\n Parameters\n ----------\n density_func_p : TYPE\n DESCRIPTION.\n density_func_q : TYPE\n DESCRIPTION.\n sampler_p : TYPE\n DESCRIPTION.\n kappa : float, optional\n DESCRIPTION. The default is 0.0.\n alpha : float, optional\n DESCRIPTION. The default is 1.0.\n dim : int, optional\n DESCRIPTION. The default is 1.\n root : bool, optional\n DESCRIPTION. The default is False.\n n : TYPE, optional\n DESCRIPTION. The default is 10000.\n rounds : TYPE, optional\n DESCRIPTION. The default is 1.\n seed : TYPE, optional\n DESCRIPTION. The default is 1.\n\n Returns\n -------\n [float, np.ndarray]\n DESCRIPTION.\n\n \"\"\"\n \n # Calculate the coupled cross-entropy of the dist_p and dist_q.\n coupled_cross_entropy_of_dists = coupled_cross_entropy(density_func_p,\n density_func_q,\n sampler_p=sampler_p,\n kappa=kappa,\n alpha=alpha, \n dim=dim,\n root=root,\n n=n,\n rounds=rounds,\n seed=seed\n )\n # Calculate the coupled entropy of dist_p\n coupled_entropy_of_dist_p = coupled_entropy(density_func_p, \n sampler=sampler_p,\n kappa=kappa, \n alpha=alpha, \n dim=dim,\n root=root,\n n=n,\n rounds=rounds,\n seed=seed\n )\n \n return coupled_cross_entropy_of_dists - coupled_entropy_of_dist_p\n\n\ndef generalized_mean(values: np.ndarray, r: float = 1.0, weights: np.ndarray = None) -> float:\n \"\"\"\n This function calculates the generalized mean of a 1-D array of non- \n negative real numbers using the coupled logarithm and exponential functions.\n \n Parameters\n ----------\n values : np.ndarray\n DESCRIPTION : A 1-D numpy array (row vector) of non-negative numbers\n for which we are calculating the generalized mean.\n r : float, optional\n DESCRIPTION : The risk bias and the power of the generalized mean. \n The default is 1.0 (Arithmetric Mean).\n weights : np.ndarray, optional\n DESCRIPTION : A 1-D numpy array of the weights for each value. \n The default is None, which triggers a conditional to use equal weights.\n\n Returns gen_mean\n -------\n float\n DESCRIPTION : The coupled generalized mean.\n \"\"\"\n \n assert type(values) == np.ndarray, \"values must be a 1-D numpy ndarray.\"\n if len(values.shape) != 1:\n assert ((len(values.shape) == 2) \n & ((values.shape[0] == 1)\n | (values.shape[1] == 1))), \"values must be a 1-D numpy ndarray.\"\n assert (values <= 0).sum() == 0, \"all numbers in values must be greater than 0.\"\n assert ((type(r) == int) | (type(r) == float) | (type(r) == np.int32 ) \n | (type(r) == np.float32) | (type(r) == np.int64) \n | (type(r) == np.float64)), \"r must be a numeric data type, like a float or int.\"\n assert ((type(weights) == type(None))\n | (type(weights) == np.ndarray)), \"weights must either be None or 1-D numpy ndarray.\"\n \n # If weights equals None, equally weight all observations.\n if type(weights) == type(None):\n weights = weights or np.ones(len(values))\n \n # Calculate the log of the generalized mean by taking the dot product of the\n # weights vector and the vector of the coupled logarithm of the values and\n # divide the result by the sum of the the weights.\n log_gen_mean = np.dot(weights, coupled_logarithm(values, kappa=r, dim=0)) / np.sum(weights)\n \n # Calculate the generalized mean by exponentiating the log-generalized mean.\n gen_mean = coupled_exponential(log_gen_mean, kappa=r, dim=0)\n \n # Return the generalized mean.\n return gen_mean\n"
] | [
[
"numpy.mean",
"numpy.sum",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
agartland/tcrdist2 | [
"77ab0036a3f8f3951093a3bb14741d961ae14eda"
] | [
"tcrdist/tests/test_mixcr.py"
] | [
"import pytest\nfrom tcrdist.repertoire import TCRrep\nfrom tcrdist import mixcr\nimport numpy as np\nimport pandas as pd\nimport os\n\n# INTEGRATION TESTS\ndef test_mixcr_to_tcrdist_on_clones():\n test_clones = os.path.join('tcrdist','test_files_compact','SRR5130260.1.test.fastq.output.clns.txt')\n df = mixcr.mixcr_to_tcrdist2(chain = \"delta\", organism = \"human\", clones_fn = test_clones)\n\n assert isinstance(df, pd.DataFrame)\n df1 = mixcr.remove_entries_with_invalid_vgene(df, chain = \"delta\", organism = \"human\")\n assert isinstance(df, pd.DataFrame)\n df1['subject'] = 'SRR5130260.1'\n \n tr = TCRrep(cell_df = df1, \n organism = \"human\", \n chains = ['delta'], \n db_file='gammadelta_db.tsv') \n print(tr.cell_df.shape[0])\n \n tr.infer_cdrs_from_v_gene(chain = 'delta', imgt_aligned=True)\n \n tr.index_cols = ['subject', \"v_d_gene\", 'd_d_gene', 'j_d_gene',\n 'cdr3_d_nucseq', 'cdr3_d_aa','cdr1_d_aa', \n 'cdr2_d_aa', 'pmhc_d_aa']\n \n tr.deduplicate()\n assert isinstance(tr.clone_df, pd.DataFrame)\n\ndef test_mixcr_to_tcrdist_on_seqs():\n test_seqs = os.path.join('tcrdist','test_files_compact','SRR5130260.1.test.fastq.result.txt')\n df = mixcr.mixcr_to_tcrdist2(chain = \"delta\", organism = \"human\", seqs_fn = test_seqs, clones_fn = None)\n assert isinstance(df, pd.DataFrame)\n df1 = mixcr.remove_entries_with_invalid_vgene(df, chain = \"delta\", organism = \"human\")\n assert isinstance(df, pd.DataFrame)\n df1['subject'] = 'SRR5130260.1'\n df1['count'] = 1\n assert df1.shape[0] == 249\n tr = TCRrep(cell_df = df1, \n organism = \"human\", \n chains = ['delta'], \n db_file='gammadelta_db.tsv') \n print(tr.cell_df.columns)\n assert tr.cell_df.shape[0] == 249\n assert tr.cell_df.shape[0] == 249\n assert tr.cell_df.shape[1] == 7\n # ['v_d_gene', 'd_d_gene', 'j_d_gene', 'cdr3_d_nucseq', 'cdr3_d_aa','subject', 'count']\n tr.infer_cdrs_from_v_gene(chain = 'delta', imgt_aligned=True)\n assert tr.cell_df.shape[0] == 249\n assert tr.cell_df.shape[1] == 10\n # ['v_d_gene', 'd_d_gene', 'j_d_gene', 'cdr3_d_nucseq', 'cdr3_d_aa','subject', cdr1_d_aa', 'cdr2_d_aa', 'pmhc_d_aa']\n tr.index_cols = ['subject', \"v_d_gene\", 'd_d_gene', 'j_d_gene',\n 'cdr3_d_nucseq', 'cdr3_d_aa','cdr1_d_aa', \n 'cdr2_d_aa', 'pmhc_d_aa']\n tr.deduplicate()\n\n #assert tr.clone_df.shape[1] == 10\n\n\n\ndef test_mixcr_integration_with_correct_chain():\n test_clones_fn = os.path.join('tcrdist','test_files_compact','SRR5130260.1.test.fastq.output.clns.txt')\n\n df = mixcr.mixcr_to_tcrdist2(chain = \"delta\", organism = \"human\", seqs_fn = None, clones_fn = test_clones_fn )\n assert isinstance(df, pd.DataFrame)\n df1 = mixcr.remove_entries_with_invalid_vgene(df, chain = \"delta\", organism = \"human\")\n assert isinstance(df, pd.DataFrame)\n assert df1.shape[0] == 89\n\ndef test_mixcr_integration_with_wrong_chain():\n test_clones_fn = os.path.join('tcrdist','test_files_compact','SRR5130260.1.test.fastq.output.clns.txt')\n df = mixcr.mixcr_to_tcrdist2(chain = \"gamma\", organism = \"human\", seqs_fn = None, clones_fn = test_clones_fn )\n df2 = mixcr.remove_entries_with_invalid_vgene(df, chain = \"gamma\", organism = \"human\")\n assert df2.shape[0] == 0\n\n\n\n# def test_mixcr_to_tcrdist_on_clones():\n# clones_seqs = os.path.join('tcrdist','test_files_compact','SRR5130260.1.test.fastq.result.txt')\n# df = mixcr.mixcr_to_tcrdist2(chain = \"delta\", organism = \"human\", seqs_fn = test_seqs, clones_fn = None)\n# assert isinstance(df, pd.DataFrame)\n# df1 = mixcr.remove_entries_with_invalid_vgene(df, chain = \"delta\", organism = \"human\")\n# assert isinstance(df, pd.DataFrame)\n# df1['subject'] = 'SRR5130260.1'\n# df1['count'] = 1\n# assert df1.shape[0] == 249\n# tr = TCRrep(cell_df = df1, \n# organism = \"human\", \n# chains = ['delta'], \n# db_file='gammadelta_db.tsv') \n# print(tr.cell_df.columns)\n# assert tr.cell_df.shape[0] == 249\n# assert tr.cell_df.shape[0] == 249\n# assert tr.cell_df.shape[1] == 7\n# # ['v_d_gene', 'd_d_gene', 'j_d_gene', 'cdr3_d_nucseq', 'cdr3_d_aa','subject', 'count']\n# tr.infer_cdrs_from_v_gene(chain = 'delta', imgt_aligned=True)\n# assert tr.cell_df.shape[0] == 249\n# assert tr.cell_df.shape[1] == 10\n# # ['v_d_gene', 'd_d_gene', 'j_d_gene', 'cdr3_d_nucseq', 'cdr3_d_aa','subject', cdr1_d_aa', 'cdr2_d_aa', 'pmhc_d_aa']\n# tr.index_cols = ['subject', \"v_d_gene\", 'd_d_gene', 'j_d_gene',\n# 'cdr3_d_nucseq', 'cdr3_d_aa','cdr1_d_aa', \n# 'cdr2_d_aa', 'pmhc_d_aa']\n# tr.deduplicate()\n# assert tr.clone_df.shape[1] == 10\n\n \n# UNIT TESTS\ndef test_change_TRAVDV_to_TRAVdashDV_1():\n assert mixcr._change_TRAVDV_to_TRAVdashDV(s = 'TRAV29DV5*01') == 'TRAV29/DV5*01'\n assert mixcr._change_TRAVDV_to_TRAVdashDV(s = 'TRAV36DV7*01') == 'TRAV36/DV7*01'\n assert mixcr._change_TRAVDV_to_TRAVdashDV(s = 'TRAV36DV7*02') == 'TRAV36/DV7*02'\n \ndef test_change_TRAVDV_to_TRAVdashDV_2():\n assert mixcr._change_TRAVDV_to_TRAVdashDV(s='TRAV38-2DV8*01') == 'TRAV38-2/DV8*01'\n\ndef test_change_TRAVDV_to_TRAVdashDV_3():\n assert mixcr._change_TRAVDV_to_TRAVdashDV(s='TRAV38-1*01') == 'TRAV38-1*01'\n\ndef test_change_TRAVDV_to_TRAVdashDV_4():\n \"NaN case\"\n assert mixcr._change_TRAVDV_to_TRAVdashDV(s=np.NaN) is np.NaN \n\ndef test_allele_00_to_01_1():\n assert mixcr._allele_00_to_01('TRDV3*00') == 'TRDV3*01'\n assert mixcr._allele_00_to_01('TRDV3*01') == 'TRDV3*01'\n assert mixcr._allele_00_to_01('TRDV3*02') == 'TRDV3*02'\n\ndef test_allele_00_to_01_2():\n assert mixcr._allele_00_to_01('TRDD3*00') == 'TRDD3*01'\n assert mixcr._allele_00_to_01('TRDD3*01') == 'TRDD3*01'\n assert mixcr._allele_00_to_01('TRDD3*02') == 'TRDD3*02'\n\ndef test_allele_00_to_01_3():\n assert mixcr._allele_00_to_01(np.NaN) is np.NaN\n\ndef test_take_top_mixcr_gene_hit():\n assert mixcr._take_top_mixcr_gene_hit('TRDD3*00(45),TRDD2*00(40)') == 'TRDD3*00'\n assert mixcr._take_top_mixcr_gene_hit('TRDD3*00(45)') == 'TRDD3*00'\n assert isinstance(mixcr._take_top_mixcr_gene_hit(np.NaN),float)\n assert mixcr._take_top_mixcr_gene_hit(np.NaN) is np.NaN\n\ndef test_validate_gene_names_1():\n df = pd.DataFrame({'v_d_gene':['TRDV3*01','TRDV1*01', 'TRAV29/DV5*01', \n 'TRAV38-2/DV8*01', \"TRBV1*01\"]})\n r = mixcr._validate_gene_names( series = df['v_d_gene'], \n chain = 'delta', \n organism = 'human')\n assert np.all(r == pd.Series([True,True,True,True,False])) \n\ndef test_validate_gene_names_2():\n df = pd.DataFrame({'v_d_gene':['TRDV3*01','TRDV1*01', 'TRAV29DV5*01', \n 'TRAV38-2DV8*01', \"TRBV1*01\"]})\n r = mixcr._validate_gene_names( series = df['v_d_gene'], \n chain = 'delta', \n organism = 'human')\n assert np.all(r == pd.Series([True,True,False,False,False])) "
] | [
[
"pandas.Series",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
dvtxc/fibresem | [
"7c763bc7a153ad382a182cdb7b43614e11182f07"
] | [
"fibresem/matplotlib_scalebar/scalebar.py"
] | [
"\"\"\"\nArtist for matplotlib to display a scale / micron bar.\n\nExample::\n\n >>> fig = plt.figure()\n >>> ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])\n >>> ax.imshow(...)\n >>> scalebar = ScaleBar(0.2)\n >>> ax.add_artist(scalebar)\n >>> plt.show()\n\nThe following parameters are available for customization in the matplotlibrc:\n - scalebar.length_fraction\n - scalebar.height_fraction\n - scalebar.location\n - scalebar.pad\n - scalebar.border_pad\n - scalebar.sep\n - scalebar.frameon\n - scalebar.color\n - scalebar.box_color\n - scalebar.box_alpha\n - scalebar.scale_loc\n - scalebar.label_loc\n\nSee the class documentation (:class:`.Scalebar`) for a description of the\nparameters.\n\"\"\"\n\n__all__ = [\n \"ScaleBar\",\n \"SI_LENGTH\",\n \"SI_LENGTH_RECIPROCAL\",\n \"IMPERIAL_LENGTH\",\n \"PIXEL_LENGTH\",\n]\n\n# Standard library modules.\nimport bisect\nimport warnings\n\n# Third party modules.\nimport matplotlib\nfrom matplotlib.artist import Artist\nfrom matplotlib.font_manager import FontProperties\nfrom matplotlib.rcsetup import (\n defaultParams,\n validate_float,\n validate_bool,\n validate_color,\n ValidateInStrings,\n)\nfrom matplotlib.offsetbox import (\n AuxTransformBox,\n TextArea,\n VPacker,\n HPacker,\n AnchoredOffsetbox,\n)\nfrom matplotlib.patches import Rectangle\n\n# Local modules.\nfrom .dimension import (\n _Dimension,\n SILengthDimension,\n SILengthReciprocalDimension,\n ImperialLengthDimension,\n PixelLengthDimension,\n AngleDimension,\n)\n\n# Globals and constants variables.\n\n# Setup of extra parameters in the matplotlic rc\n_VALID_SCALE_LOCATIONS = [\"bottom\", \"top\", \"right\", \"left\"]\n_validate_scale_loc = ValidateInStrings(\n \"scale_loc\", _VALID_SCALE_LOCATIONS, ignorecase=True\n)\n\n_VALID_LABEL_LOCATIONS = [\"bottom\", \"top\", \"right\", \"left\"]\n_validate_label_loc = ValidateInStrings(\n \"label_loc\", _VALID_LABEL_LOCATIONS, ignorecase=True\n)\n\n_VALID_ROTATIONS = [\"horizontal\", \"vertical\"]\n_validate_rotation = ValidateInStrings(\"rotation\", _VALID_ROTATIONS, ignorecase=True)\n\n\ndef _validate_legend_loc(loc):\n rc = matplotlib.RcParams()\n rc[\"legend.loc\"] = loc\n return loc\n\n\ndefaultParams.update(\n {\n \"scalebar.length_fraction\": [0.2, validate_float],\n \"scalebar.width_fraction\": [0.01, validate_float],\n \"scalebar.location\": [\"upper right\", _validate_legend_loc],\n \"scalebar.pad\": [0.2, validate_float],\n \"scalebar.border_pad\": [0.1, validate_float],\n \"scalebar.sep\": [5, validate_float],\n \"scalebar.frameon\": [True, validate_bool],\n \"scalebar.color\": [\"k\", validate_color],\n \"scalebar.box_color\": [\"w\", validate_color],\n \"scalebar.box_alpha\": [1.0, validate_float],\n \"scalebar.scale_loc\": [\"bottom\", _validate_scale_loc],\n \"scalebar.label_loc\": [\"top\", _validate_label_loc],\n \"scalebar.rotation\": [\"horizontal\", _validate_rotation],\n }\n)\n\n# Recreate the validate function\nmatplotlib.rcParams.validate = dict(\n (key, converter)\n for key, (default, converter) in defaultParams.items()\n if key not in matplotlib._all_deprecated\n)\n\n# Dimension lookup\nSI_LENGTH = \"si-length\"\nSI_LENGTH_RECIPROCAL = \"si-length-reciprocal\"\nIMPERIAL_LENGTH = \"imperial-length\"\nPIXEL_LENGTH = \"pixel-length\"\nANGLE = \"angle\"\n\n_DIMENSION_LOOKUP = {\n SI_LENGTH: SILengthDimension,\n SI_LENGTH_RECIPROCAL: SILengthReciprocalDimension,\n IMPERIAL_LENGTH: ImperialLengthDimension,\n PIXEL_LENGTH: PixelLengthDimension,\n ANGLE: AngleDimension,\n}\n\n\nclass ScaleBar(Artist):\n\n zorder = 6\n\n _PREFERRED_VALUES = [1, 2, 5, 10, 20, 25, 50, 75, 100, 125, 150, 200, 500, 750]\n\n _LOCATIONS = {\n \"upper right\": 1,\n \"upper left\": 2,\n \"lower left\": 3,\n \"lower right\": 4,\n \"right\": 5,\n \"center left\": 6,\n \"center right\": 7,\n \"lower center\": 8,\n \"upper center\": 9,\n \"center\": 10,\n }\n\n def __init__(\n self,\n dx,\n units=\"m\",\n dimension=\"si-length\",\n label=None,\n length_fraction=None,\n height_fraction=None,\n width_fraction=None,\n location=None,\n loc=None,\n pad=None,\n border_pad=None,\n sep=None,\n frameon=None,\n color=None,\n box_color=None,\n box_alpha=None,\n scale_loc=None,\n label_loc=None,\n font_properties=None,\n label_formatter=None,\n scale_formatter=None,\n fixed_value=None,\n fixed_units=None,\n animated=False,\n rotation=None,\n ):\n \"\"\"\n Creates a new scale bar.\n\n There are two modes of operation:\n\n 1. Length, value and units of the scale bar are automatically\n determined based on the specified pixel size *dx* and\n *length_fraction*. The value will only take the following numbers:\n 1, 2, 5, 10, 15, 20, 25, 50, 75, 100, 125, 150, 200, 500 or 750.\n 2. The desired value and units are specified by the user\n (*fixed_value* and *fixed_units*) and the length is calculated\n based on the specified pixel size *dx*.\n\n :arg dx: size of one pixel in *units*\n Set ``dx`` to 1.0 if the axes image has already been calibrated by\n setting its ``extent``.\n :type dx: :class:`float`\n\n :arg units: units of *dx* (default: ``m``)\n :type units: :class:`str`\n\n :arg dimension: dimension of *dx* and *units*.\n It can either be equal\n * ``:const:`si-length```: scale bar showing km, m, cm, etc.\n * ``:const:`imperial-length```: scale bar showing in, ft, yd, mi, etc.\n * ``:const:`si-length-reciprocal```: scale bar showing 1/m, 1/cm, etc.\n * ``:const:`pixel-length```: scale bar showing px, kpx, Mpx, etc.\n * ``:const:`angle```: scale bar showing \\u00b0, \\u2032 or \\u2032\\u2032.\n * a :class:`matplotlib_scalebar.dimension._Dimension` object\n :type dimension: :class:`str` or\n :class:`matplotlib_scalebar.dimension._Dimension`\n\n :arg label: optional label associated with the scale bar\n (default: ``None``, no label is shown)\n :type label: :class:`str`\n\n :arg length_fraction: length of the scale bar as a fraction of the\n axes's width (default: rcParams['scalebar.lenght_fraction'] or ``0.2``).\n This argument is ignored if a *fixed_value* is specified.\n :type length_fraction: :class:`float`\n\n :arg width_fraction: width of the scale bar as a fraction of the\n axes's height (default: rcParams['scalebar.width_fraction'] or ``0.01``)\n :type width_fraction: :class:`float`\n\n :arg location: a location code (same as legend)\n (default: rcParams['scalebar.location'] or ``upper right``)\n :type location: :class:`str`\n\n :arg loc: alias for location\n :type loc: :class:`str`\n\n :arg pad: fraction of the font size\n (default: rcParams['scalebar.pad'] or ``0.2``)\n :type pad: :class:`float`\n\n :arg border_pad : fraction of the font size\n (default: rcParams['scalebar.border_pad'] or ``0.1``)\n :type border_pad: :class:`float`\n\n :arg sep : separation between scale bar and label in points\n (default: rcParams['scalebar.sep'] or ``5``)\n :type sep: :class:`float`\n\n :arg frameon : if True, will draw a box around the scale bar\n and label (default: rcParams['scalebar.frameon'] or ``True``)\n :type frameon: :class:`bool`\n\n :arg color : color for the scale bar and label\n (default: rcParams['scalebar.color'] or ``k``)\n :type color: :class:`str`\n\n :arg box_color: color of the box (if *frameon*)\n (default: rcParams['scalebar.box_color'] or ``w``)\n :type box_color: :class:`str`\n\n :arg box_alpha: transparency of box\n (default: rcParams['scalebar.box_alpha'] or ``1.0``)\n :type box_alpha: :class:`float`\n\n :arg scale_loc : either ``bottom``, ``top``, ``left``, ``right``\n (default: rcParams['scalebar.scale_loc'] or ``bottom``)\n :type scale_loc: :class:`str`\n\n :arg label_loc: either ``bottom``, ``top``, ``left``, ``right``\n (default: rcParams['scalebar.label_loc'] or ``top``)\n :type label_loc: :class:`str`\n\n :arg font_properties: font properties of the label text, specified\n either as dict or `fontconfig <http://www.fontconfig.org/>`_\n pattern (XML).\n :type font_properties: :class:`matplotlib.font_manager.FontProperties`,\n :class:`str` or :class:`dict`\n\n :arg scale_formatter: function used to format the label. Needs to take\n the value (float) and the unit (str) as input and return the label\n string.\n :type scale_formatter: :class:`func`\n\n :arg fixed_value: value for the scale bar. If ``None``, the value is\n automatically determined based on *length_fraction*.\n :type fixed_value: :class:`float`\n\n :arg fixed_units: units of the *fixed_value*. If ``None`` and\n *fixed_value* is not ``None``, the units of *dx* are used.\n :type fixed_units: :class:`str`\n\n :arg animated: animation state (default: ``False``)\n :type animated: :class`bool`\n\n :arg rotation: either ``horizontal`` or ``vertical``\n (default: rcParams['scalebar.rotation'] or ``horizontal``)\n :type rotation: :class:`str`\n \"\"\"\n Artist.__init__(self)\n\n # Deprecation\n if height_fraction is not None:\n warnings.warn(\n \"The height_fraction argument was deprecated. Use width_fraction instead.\",\n DeprecationWarning,\n )\n width_fraction = width_fraction or height_fraction\n\n if label_formatter is not None:\n warnings.warn(\n \"The label_formatter argument was deprecated. Use scale_formatter instead.\",\n DeprecationWarning,\n )\n scale_formatter = scale_formatter or label_formatter\n\n if loc is not None and self._convert_location(loc) != self._convert_location(\n location\n ):\n raise ValueError(\"loc and location are specified and not equal\")\n\n self.dx = dx\n self.dimension = dimension # Should be initialize before units\n self.units = units\n self.label = label\n self.length_fraction = length_fraction\n self.width_fraction = width_fraction\n self.location = location or loc\n self.pad = pad\n self.border_pad = border_pad\n self.sep = sep\n self.frameon = frameon\n self.color = color\n self.box_color = box_color\n self.box_alpha = box_alpha\n self.scale_loc = scale_loc\n self.label_loc = label_loc\n self.scale_formatter = scale_formatter\n self.font_properties = font_properties\n self.fixed_value = fixed_value\n self.fixed_units = fixed_units\n self.set_animated(animated)\n self.rotation = rotation\n\n def _calculate_best_length(self, length_px):\n dx = self.dx\n units = self.units\n value = length_px * dx\n\n newvalue, newunits = self.dimension.calculate_preferred(value, units)\n factor = value / newvalue\n\n index = bisect.bisect_left(self._PREFERRED_VALUES, newvalue)\n if index > 0:\n # When we get the lowest index of the list, removing -1 will\n # return the last index.\n index -= 1\n newvalue = self._PREFERRED_VALUES[index]\n\n length_px = newvalue * factor / dx\n\n return length_px, newvalue, newunits\n\n def _calculate_exact_length(self, value, units):\n newvalue = self.dimension.convert(value, units, self.units)\n return newvalue / self.dx\n\n def draw(self, renderer, *args, **kwargs):\n if not self.get_visible():\n return\n if self.dx == 0:\n return\n\n # Late import\n from matplotlib import rcParams\n\n # Deprecation\n if rcParams.get(\"scalebar.height_fraction\") is not None:\n warnings.warn(\n \"The scalebar.height_fraction parameter in matplotlibrc is deprecated. \"\n \"Use scalebar.width_fraction instead.\",\n DeprecationWarning,\n )\n rcParams.setdefault(\n \"scalebar.width_fraction\", rcParams[\"scalebar.height_fraction\"]\n )\n\n # Get parameters\n def _get_value(attr, default):\n value = getattr(self, attr)\n if value is None:\n value = rcParams.get(\"scalebar.\" + attr, default)\n return value\n\n length_fraction = _get_value(\"length_fraction\", 0.2)\n width_fraction = _get_value(\"width_fraction\", 0.01)\n location = _get_value(\"location\", \"upper right\")\n if isinstance(location, str):\n location = self._LOCATIONS[location.lower()]\n pad = _get_value(\"pad\", 0.2)\n border_pad = _get_value(\"border_pad\", 0.1)\n sep = _get_value(\"sep\", 5)\n frameon = _get_value(\"frameon\", True)\n color = _get_value(\"color\", \"k\")\n box_color = _get_value(\"box_color\", \"w\")\n box_alpha = _get_value(\"box_alpha\", 1.0)\n scale_loc = _get_value(\"scale_loc\", \"bottom\").lower()\n label_loc = _get_value(\"label_loc\", \"top\").lower()\n font_properties = self.font_properties\n fixed_value = self.fixed_value\n fixed_units = self.fixed_units or self.units\n rotation = _get_value(\"rotation\", \"horizontal\").lower()\n label = self.label\n\n # Create text properties\n textprops = {\"color\": color, \"rotation\": rotation}\n if font_properties is not None:\n textprops[\"fontproperties\"] = font_properties\n\n # Calculate value, units and length\n ax = self.axes\n xlim = ax.get_xlim()\n ylim = ax.get_ylim()\n if rotation == \"vertical\":\n xlim, ylim = ylim, xlim\n\n # Mode 1: Auto\n if self.fixed_value is None:\n length_px = abs(xlim[1] - xlim[0]) * length_fraction\n length_px, value, units = self._calculate_best_length(length_px)\n\n # Mode 2: Fixed\n else:\n value = fixed_value\n units = fixed_units\n length_px = self._calculate_exact_length(value, units)\n\n scale_text = self.scale_formatter(value, self.dimension.to_latex(units))\n\n width_px = abs(ylim[1] - ylim[0]) * width_fraction\n\n # Create scale bar\n if rotation == \"horizontal\":\n scale_rect = Rectangle(\n (0, 0),\n length_px,\n width_px,\n fill=True,\n facecolor=color,\n edgecolor=\"none\",\n )\n else:\n scale_rect = Rectangle(\n (0, 0),\n width_px,\n length_px,\n fill=True,\n facecolor=color,\n edgecolor=\"none\",\n )\n\n scale_bar_box = AuxTransformBox(ax.transData)\n scale_bar_box.add_artist(scale_rect)\n\n scale_text_box = TextArea(scale_text, textprops=textprops)\n\n if scale_loc in [\"bottom\", \"right\"]:\n children = [scale_bar_box, scale_text_box]\n else:\n children = [scale_text_box, scale_bar_box]\n\n if scale_loc in [\"bottom\", \"top\"]:\n Packer = VPacker\n else:\n Packer = HPacker\n\n scale_box = Packer(children=children, align=\"center\", pad=0, sep=sep)\n\n # Create label\n if label:\n label_box = TextArea(label, textprops=textprops)\n else:\n label_box = None\n\n # Create final offset box\n if label_box:\n if label_loc in [\"bottom\", \"right\"]:\n children = [scale_box, label_box]\n else:\n children = [label_box, scale_box]\n\n if label_loc in [\"bottom\", \"top\"]:\n Packer = VPacker\n else:\n Packer = HPacker\n\n child = Packer(children=children, align=\"center\", pad=0, sep=sep)\n else:\n child = scale_box\n\n box = AnchoredOffsetbox(\n loc=location, pad=pad, borderpad=border_pad, child=child, frameon=frameon\n )\n\n box.axes = ax\n box.set_figure(self.get_figure())\n box.patch.set_color(box_color)\n box.patch.set_alpha(box_alpha)\n box.draw(renderer)\n\n def get_dx(self):\n return self._dx\n\n def set_dx(self, dx):\n self._dx = float(dx)\n\n dx = property(get_dx, set_dx)\n\n def get_dimension(self):\n return self._dimension\n\n def set_dimension(self, dimension):\n if dimension in _DIMENSION_LOOKUP:\n dimension = _DIMENSION_LOOKUP[dimension]()\n\n if not isinstance(dimension, _Dimension):\n raise ValueError(\n f\"Unknown dimension: {dimension}. \"\n f\"Known dimensions: {', '.join(_DIMENSION_LOOKUP)}\"\n )\n\n self._dimension = dimension\n\n dimension = property(get_dimension, set_dimension)\n\n def get_units(self):\n return self._units\n\n def set_units(self, units):\n if not self.dimension.is_valid_units(units):\n raise ValueError(f\"Invalid unit ({units}) with dimension\")\n self._units = units\n\n units = property(get_units, set_units)\n\n def get_label(self):\n return self._label\n\n def set_label(self, label):\n self._label = label\n\n label = property(get_label, set_label)\n\n def get_length_fraction(self):\n return self._length_fraction\n\n def set_length_fraction(self, fraction):\n if fraction is not None:\n fraction = float(fraction)\n if fraction <= 0.0 or fraction > 1.0:\n raise ValueError(\"Length fraction must be between [0.0, 1.0]\")\n self._length_fraction = fraction\n\n length_fraction = property(get_length_fraction, set_length_fraction)\n\n def get_width_fraction(self):\n return self._width_fraction\n\n def set_width_fraction(self, fraction):\n if fraction is not None:\n fraction = float(fraction)\n if fraction <= 0.0 or fraction > 1.0:\n raise ValueError(\"Width fraction must be between [0.0, 1.0]\")\n self._width_fraction = fraction\n\n width_fraction = property(get_width_fraction, set_width_fraction)\n\n def get_height_fraction(self):\n warnings.warn(\n \"The get_height_fraction method is deprecated. Use get_width_fraction instead.\",\n DeprecationWarning,\n )\n return self.width_fraction\n\n def set_height_fraction(self, fraction):\n warnings.warn(\n \"The set_height_fraction method is deprecated. Use set_width_fraction instead.\",\n DeprecationWarning,\n )\n self.width_fraction = fraction\n\n height_fraction = property(get_height_fraction, set_height_fraction)\n\n @classmethod\n def _convert_location(cls, loc):\n if isinstance(loc, str):\n if loc not in cls._LOCATIONS:\n raise ValueError(\n f\"Unknown location: {loc}. \"\n f\"Valid locations: {', '.join(cls._LOCATIONS)}\"\n )\n loc = cls._LOCATIONS[loc]\n return loc\n\n def get_location(self):\n return self._location\n\n def set_location(self, loc):\n self._location = self._convert_location(loc)\n\n location = property(get_location, set_location)\n\n get_loc = get_location\n set_loc = set_location\n loc = location\n\n def get_pad(self):\n return self._pad\n\n def set_pad(self, pad):\n self._pad = pad\n\n pad = property(get_pad, set_pad)\n\n def get_border_pad(self):\n return self._border_pad\n\n def set_border_pad(self, pad):\n self._border_pad = pad\n\n border_pad = property(get_border_pad, set_border_pad)\n\n def get_sep(self):\n return self._sep\n\n def set_sep(self, sep):\n self._sep = sep\n\n sep = property(get_sep, set_sep)\n\n def get_frameon(self):\n return self._frameon\n\n def set_frameon(self, on):\n self._frameon = on\n\n frameon = property(get_frameon, set_frameon)\n\n def get_color(self):\n return self._color\n\n def set_color(self, color):\n self._color = color\n\n color = property(get_color, set_color)\n\n def get_box_color(self):\n return self._box_color\n\n def set_box_color(self, color):\n self._box_color = color\n\n box_color = property(get_box_color, set_box_color)\n\n def get_box_alpha(self):\n return self._box_alpha\n\n def set_box_alpha(self, alpha):\n if alpha is not None:\n alpha = float(alpha)\n if alpha < 0.0 or alpha > 1.0:\n raise ValueError(\"Alpha must be between [0.0, 1.0]\")\n self._box_alpha = alpha\n\n box_alpha = property(get_box_alpha, set_box_alpha)\n\n def get_scale_loc(self):\n return self._scale_loc\n\n def set_scale_loc(self, loc):\n if loc is not None and loc not in _VALID_SCALE_LOCATIONS:\n raise ValueError(\n f\"Unknown location: {loc}. \"\n f\"Valid locations: {', '.join(_VALID_SCALE_LOCATIONS)}\"\n )\n self._scale_loc = loc\n\n scale_loc = property(get_scale_loc, set_scale_loc)\n\n def get_label_loc(self):\n return self._label_loc\n\n def set_label_loc(self, loc):\n if loc is not None and loc not in _VALID_LABEL_LOCATIONS:\n raise ValueError(\n f\"Unknown location: {loc}. \"\n f\"Valid locations: {', '.join(_VALID_LABEL_LOCATIONS)}\"\n )\n\n self._label_loc = loc\n\n label_loc = property(get_label_loc, set_label_loc)\n\n def get_font_properties(self):\n return self._font_properties\n\n def set_font_properties(self, props):\n if props is None:\n props = FontProperties()\n elif isinstance(props, dict):\n props = FontProperties(**props)\n elif isinstance(props, str):\n props = FontProperties(props)\n else:\n raise ValueError(\n \"Unsupported `font_properties`. \"\n \"Pass either a dict or a font config pattern as string.\"\n )\n self._font_properties = props\n\n font_properties = property(get_font_properties, set_font_properties)\n\n def get_scale_formatter(self):\n if self._scale_formatter is None:\n return self.dimension.create_label\n return self._scale_formatter\n\n def set_scale_formatter(self, scale_formatter):\n self._scale_formatter = scale_formatter\n\n scale_formatter = property(get_scale_formatter, set_scale_formatter)\n\n def get_label_formatter(self):\n warnings.warn(\n \"The get_label_formatter method is deprecated. Use get_scale_formatter instead.\",\n DeprecationWarning,\n )\n return self.scale_formatter\n\n def set_label_formatter(self, scale_formatter):\n warnings.warn(\n \"The set_label_formatter method is deprecated. Use set_scale_formatter instead.\",\n DeprecationWarning,\n )\n self.scale_formatter = scale_formatter\n\n label_formatter = property(get_label_formatter, set_label_formatter)\n\n def get_fixed_value(self):\n return self._fixed_value\n\n def set_fixed_value(self, value):\n self._fixed_value = value\n\n fixed_value = property(get_fixed_value, set_fixed_value)\n\n def get_fixed_units(self):\n return self._fixed_units\n\n def set_fixed_units(self, units):\n self._fixed_units = units\n\n fixed_units = property(get_fixed_units, set_fixed_units)\n\n def get_rotation(self):\n return self._rotation\n\n def set_rotation(self, rotation):\n if rotation is not None and rotation not in _VALID_ROTATIONS:\n raise ValueError(\n f\"Unknown rotation: {rotation}. \"\n f\"Valid locations: {', '.join(_VALID_ROTATIONS)}\"\n )\n self._rotation = rotation\n\n rotation = property(get_rotation, set_rotation)\n"
] | [
[
"matplotlib.offsetbox.AuxTransformBox",
"matplotlib.rcsetup.ValidateInStrings",
"matplotlib.rcParams.get",
"matplotlib.artist.Artist.__init__",
"matplotlib.rcsetup.defaultParams.update",
"matplotlib.RcParams",
"matplotlib.patches.Rectangle",
"matplotlib.font_manager.FontProperties",
"matplotlib.rcsetup.defaultParams.items",
"matplotlib.offsetbox.AnchoredOffsetbox",
"matplotlib.offsetbox.TextArea",
"matplotlib.rcParams.setdefault"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
toddkarin/vocmax | [
"00ba93acf67d03d6a3e8e5055a006accc5629159"
] | [
"vocmax/nsrdb.py"
] | [
"\nimport numpy as np\nimport pandas as pd\nimport glob\nimport os\nimport webbrowser\nimport time\n\n# import sys\n# import matplotlib\n# matplotlib.use('TkAgg')\n# import matplotlib.pyplot as plt\nimport pytz\n\ndef make_lat_long_grid(lat_lims=[-124,-66], lon_lims=[25, 47], lat_step=1, lon_step=1 ):\n \"\"\"\n Make a lat/long grid pairs for the coordinates specified. Note that the\n end limit point is typically not included in the resultant grid.\n\n Example\n Make a latitude longitude grid:\n\n > make_lat_long_grid(lat_lims=[-124,-66], lon_lims=[25, 47], lat_step=1, lon_step=1 )\n\n\n\n\n\n \"\"\"\n\n lat_flat = np.arange( np.min(lat_lims), np.max(lat_lims), lat_step)\n lon_flat = np.arange( np.min(lon_lims), np.max(lon_lims), lon_step)\n\n\n lat = np.zeros(len(lat_flat)*len(lon_flat))\n lon = np.zeros(len(lat_flat) * len(lon_flat))\n n=0\n for j in range(len(lat_flat)):\n for k in range(len(lon_flat)):\n lat[n], lon[n] = lat_flat[j], lon_flat[k]\n n=n+1\n\n\n\n return {'lat':lat, 'lon':lon, 'num':len(lat)}\n\n\n\n\ndef inspect_database(root_path):\n \"\"\"Build database for NSRDB files\n\n Build a lat/long and year list for NSRDB csv files in a data folder.\n Folders are searched recursively (folders within folders are okay). This\n is a fast way to inspect a set of data files and build a database of file\n path, latitude, longitude and year.\n\n File names must be of the form 'locationid_lat_lon_year.csv'. For\n example, '14189_18.81_-155.94_2000.csv'.\n\n Examples\n --------\n inspect_database('data_folder')\n\n\n Parameters\n ----------\n root_path\n\n Returns\n -------\n filedata\n pandas DataFrame containing information on files in the root_path..\n\n \"\"\"\n\n import fnmatch\n import os\n\n # root_path = 'around_fairfield'\n pattern = '*.csv'\n\n\n filedata = pd.DataFrame(columns=['lat','lon','year','filepath'])\n filename_list = []\n filename_fullpath = []\n location_id = []\n lat = []\n lon = []\n year = []\n\n # Cycle through files in directory, extract info from filename without opening file.\n # Note this would break if NREL changed their naming scheme.\n for root, dirs, files in os.walk(root_path):\n for filename in fnmatch.filter(files, pattern):\n\n temp = filename.split('_')\n\n filename_list.append(filename)\n filename_fullpath.append(os.path.join(root, filename))\n location_id.append(int(temp[0]))\n lat.append(float(temp[1]))\n lon.append(float(temp[2]))\n year.append(int(temp[3][0:-4]))\n\n # Create a DataFrame\n filedata = pd.DataFrame.from_dict({\n 'location_id': location_id,\n 'lat': lat,\n 'lon': lon,\n 'year': year,\n 'filename': filename_list,\n 'fullpath': filename_fullpath})\n\n\n filedata = filedata.sort_values(by='location_id')\n\n # Redefine the index.\n filedata.index = range(filedata.__len__())\n return filedata\n\n\n\n\ndef inspect_compressed_database(glob_str):\n \"\"\"\n Build filename list from directory.\n\n Examples\n\n glob_str = '/Users/toddkarin/Documents/NSRDB_compressed/*'\n filedata = nsrdbtools.inspect_compressed_database(glob_str)\n\n\n Returns\n -------\n\n \"\"\"\n\n\n location_id = []\n lat = []\n lon = []\n\n\n # filename = get_s3_files()\n # base_dir = '/Users/toddkarin/Documents/NSRDB_compressed/*'\n filename = glob.glob(glob_str)\n\n\n # Extract location id, lat and lon.\n for key in filename:\n if key.endswith('.npz'):\n\n path_parts = os.path.split(key)\n\n filename_parts = path_parts[-1].split('_')\n\n location_id.append(int(filename_parts[0]))\n lat.append(float(filename_parts[1]))\n lon.append(float(filename_parts[2][0:-4]))\n\n\n # Create a DataFrame\n filedata = pd.DataFrame.from_dict({\n 'location_id': location_id,\n 'lat': lat,\n 'lon': lon,\n 'filename': filename,\n })\n\n # Redefine the index.\n filedata.index = range(filedata.__len__())\n\n\n return filedata\n\n\n\n\n\ndef inspect_pickle_database(root_path):\n \"\"\"Build database for NSRDB files\n\n Build a lat/long and year list for NSRDB pickled data.\n\n Examples\n --------\n inspect_pickle_database('data_folder')\n\n\n Parameters\n ----------\n root_path\n\n Returns\n -------\n filedata\n pandas DataFrame containing information on files in the root_path..\n\n \"\"\"\n\n import fnmatch\n import os\n\n # root_path = 'around_fairfield'\n pattern = '*weather.pkl'\n\n\n # filedata = pd.DataFrame(columns=['lat','lon','type','filepath'])\n weather_filename = []\n weather_fullpath = []\n info_filename = []\n info_fullpath = []\n location_id = []\n lat = []\n lon = []\n type = []\n\n # Cycle through files in directory, extract info from filename without opening file.\n # Note this would break if NREL changed their naming scheme.\n for root, dirs, files in os.walk(root_path):\n for filename in fnmatch.filter(files, pattern):\n\n temp = filename.split('_')\n\n weather_filename.append(filename)\n weather_fullpath.append(os.path.join(root, filename))\n location_id.append(int(temp[0]))\n lat.append(float(temp[1]))\n lon.append(float(temp[2]))\n type.append(temp[3][0:-4])\n\n info_filename.append(filename[0:-11] + 'info.pkl')\n info_fullpath.append(os.path.join(root, filename)[0:-11] + 'info.pkl')\n\n # Create a DataFrame\n filedata = pd.DataFrame.from_dict({\n 'location_id': location_id,\n 'lat': lat,\n 'lon': lon,\n 'type': type,\n 'weather_filename': weather_filename,\n 'weather_fullpath': weather_fullpath,\n 'info_filename': info_filename,\n 'info_fullpath': info_fullpath,\n })\n\n\n filedata = filedata.sort_values(by='location_id')\n\n # Redefine the index.\n filedata.index = range(filedata.__len__())\n return filedata\n\n\n\ndef import_csv(filename):\n \"\"\"Import an NSRDB csv file.\n\n The function (df,info) = import_csv(filename) imports an NSRDB formatted\n csv file\n\n Parameters\n ----------\n filename\n\n Returns\n -------\n df\n pandas dataframe of data\n info\n pandas dataframe of header data.\n \"\"\"\n\n # filename = '1ad06643cad4eeb947f3de02e9a0d6d7/128364_38.29_-122.14_1998.csv'\n\n info_df = pd.read_csv(filename, nrows=1)\n info = {}\n for p in info_df:\n info[p] = info_df[p].iloc[0]\n\n # See metadata for specified properties, e.g., timezone and elevation\n # timezone, elevation = info['Local Time Zone'], info['Elevation']\n\n # Return all but first 2 lines of csv to get data:\n df = pd.read_csv(filename, skiprows=2)\n\n # Set the time index in the pandas dataframe:\n year=str(df['Year'][0])\n\n\n if np.diff(df[0:2].Minute) == 30:\n interval = '30'\n info['interval_in_hours']= 0.5\n df = df.set_index(\n pd.date_range('1/1/{yr}'.format(yr=year), freq=interval + 'Min',\n periods=60*24*365 / int(interval)))\n elif df['Minute'][1] - df['Minute'][0]==0:\n interval = '60'\n info['interval_in_hours'] = 1\n df = df.set_index(\n pd.date_range('1/1/{yr}'.format(yr=year), freq=interval + 'Min',\n periods=60*24*365 / int(interval)))\n else:\n print('Interval not understood!')\n\n df.index = df.index.tz_localize(\n pytz.FixedOffset(float(info['Time Zone'] * 60)))\n\n return (df, info)\n\n# df, info = import_csv('nsrdb_1degree_uv/104_30.97_-83.22_tmy.csv')\n\ndef import_sequence(folder):\n \"\"\"Import and append NSRDB files in a folder\n\n Import a sequence of NSRDB files, data is appended to a pandas dataframe.\n This is useful for importing all years of data from one folder.\n\n Parameters\n ----------\n folder\n directory containing files to import.\n\n Returns\n -------\n df\n pandas dataframe of data\n info\n pandas dataframe of header data for last file imported.\n \"\"\"\n\n # Get all files.\n files = glob.glob(os.path.join(folder, '*.csv'))\n\n if len(files)==0:\n raise ValueError('No input files found in directory')\n files.sort()\n df = pd.DataFrame()\n for f in files:\n print(f)\n (df_temp,info) = import_csv(f)\n\n df = df.append(df_temp)\n\n info['timedelta_in_years'] = (df.index[-1] - df.index[0]).days/365\n\n return (df,info)\n\ndef combine_csv(files):\n \"\"\"\n\n Combine multiple files into one dataframe. Note files must be in time\n sequential order.\n\n\n :param files:\n :return:\n \"\"\"\n\n df = pd.DataFrame()\n for f in files:\n df_temp, info = import_csv(f)\n\n df = df.append(df_temp)\n\n info['timedelta_in_years'] = (df.index[-1] - df.index[0]).days / 365\n\n return (df, info)\n\n\ndef find_all(a_str, sub):\n start = 0\n while True:\n start = a_str.find(sub, start)\n if start == -1: return\n yield start\n start += len(sub) # use start += 1 to find overlapping matches\n\n\ndef build_nsrdb_link_list(filename):\n \"\"\"\n\n Example\n url_list = build_nsrdb_link_list('link_list.txt')\n\n see also: download_nsrdb_link_list\n\n Parameters\n ----------\n filename\n text file containing file list to import. can be \"copy/pasted\" rough\n from gmail.\n\n Returns\n -------\n url_list\n List of url's to open\n\n\n \"\"\"\n\n # filename = 'link_list.txt'\n with open(filename, 'r') as content_file:\n content = content_file.read()\n\n content.replace('\\n','')\n url_start = list(find_all(content,'https://maps.nrel.gov/api/'))\n url_end = list(find_all(content,'.zip'))\n\n url_list = [None] * len(url_start)\n for j in range(len(url_list)):\n url_list[j] = content[url_start[j]:url_end[j]] + '.zip'\n\n\n\n\n\n return url_list\n\n\n\ndef download_nsrdb_link_list(url_list, sleep=0.2):\n \"\"\"\n This simple script opens a list of urls for downloading files.\n\n Example:\n downlaod_nsrdb_link_list(url_list)\n\n Parameters\n ----------\n url_list\n list of urls to open.\n sleep\n Wait time between opening each url\n \"\"\"\n for j in range(len(url_list)):\n webbrowser.open(url_list[j])\n time.sleep(sleep)\n\n\n\n\ndef load_npz(filename):\n \"\"\"\n Load npz file from a local file\n\n Parameters\n ----------\n filename\n\n Returns\n -------\n\n \"\"\"\n #\n data = {}\n with np.load(filename) as arr:\n for var in list(arr.keys()):\n data[var] = arr[var]\n return data\n\n\ndef get_local_weather_data(filename):\n \"\"\"\n\n Load a local compressed weather datafile.\n\n Parameters\n ----------\n filename\n\n Returns\n -------\n\n \"\"\"\n\n\n data = load_npz(filename)\n return build_weather_info(data)\n\n\n\ndef build_weather_info(info):\n \"\"\"\n\n Parameters\n ----------\n info\n\n Returns\n -------\n\n \"\"\"\n\n for f in info:\n try:\n if info[f].dtype == np.dtype('<U5'):\n info[f] = str(info[f])\n elif info[f].dtype == np.dtype('<U6'):\n info[f] = str(info[f])\n elif info[f].dtype == np.dtype('int64'):\n info[f] = int(info[f])\n elif info[f].dtype == np.dtype('float64'):\n info[f] = float(info[f])\n\n\n except:\n print(f)\n\n\n weather = pd.DataFrame.from_dict({\n 'year': info['year'],\n 'month': info['month'],\n 'day': info['day'],\n 'hour': info['hour'],\n 'minute': info['minute'],\n 'dni': info['dni'],\n 'ghi': info['ghi'],\n 'dhi': info['dhi'],\n 'temp_air': info['temp_air'],\n 'wind_speed': info['wind_speed'],\n }\n )\n\n weather.index = pd.to_datetime(\n pd.DataFrame.from_dict({\n 'year': info['year'],\n 'month': info['month'],\n 'day': info['day'],\n 'hour': info['hour'],\n 'minute': info['minute'],\n })\n )\n\n weather.index = weather.index.tz_localize(\n pytz.FixedOffset(float(info['local_time_zone'] * 60)))\n\n # Remove long vectors from info.\n for f in list(info.keys()):\n if type(info[f]) == type(np.array([0])):\n del info[f]\n\n\n return weather, info\n\n\n\ndef haversine_distance(lat1, lon1, lat2, lon2):\n \"\"\"\n Calculate Haversine distance in km between two locations.\n\n Parameters\n ----------\n lat1 : numeric\n latitude of first point, in degrees.\n lon1 : numeric\n longitude of first point, in degrees.\n lat2 : numeric\n latitude of second point, in degrees.\n lon2 : numeric\n longitude of second point, in degrees.\n\n Returns\n -------\n numeric: Haversine distance in km.\n\n \"\"\"\n p = 0.017453292519943295\n a = 0.5 - np.cos((lat2-lat1)*p)/2 + np.cos(lat1*p)*np.cos(lat2*p) * (1-np.cos((lon2-lon1)*p)) / 2\n return 12742 * np.arcsin(np.sqrt(a))\n\ndef arg_closest_point(lat_point, lon_point, lat_list, lon_list):\n \"\"\"\n Calculate the index of the closest point in the list of coordinates (\n lat_list, lon_list) to the point (lat_point, lon_point). Uses Haversine\n distance formula to calculate the distance.\n\n Parameters\n ----------\n lat_point : numeric\n latitude of point to search for, in degrees\n lon_point : numeric\n longitude of point to search for, in degrees.\n lat_list : array\n list of latitudes to search within, in degrees.\n lon_list : array\n list of longitudes to search within, in degrees. Must be the same size\n as lat_list\n\n Returns\n -------\n numeric : distance\n \"\"\"\n return np.argmin(\n haversine_distance(np.array(lat_list), np.array(lon_list),\n lat_point, lon_point))\n\n\n\n\n#\n#\n# def haversine_distance(lat1, lon1, lat2, lon2):\n# p = 0.017453292519943295\n# a = 0.5 - np.cos((lat2-lat1)*p)/2 + np.cos(lat1*p)*np.cos(lat2*p) * (1-np.cos((lon2-lon1)*p)) / 2\n# return 12742 * np.arcsin(np.sqrt(a))\n\ndef closest_degrees(lat_find, lon_find, lat_list, lon_list):\n\n distance = np.sqrt( (lat_find-lat_list)**2 + (lon_find-lon_list)**2 )\n closest_index = np.argmin(np.array(distance))\n distance_in_degrees = distance[closest_index]\n\n return (closest_index, distance_in_degrees)\n\n\n\ndef find_closest_datafiles(lat,lon,filedata):\n \"\"\"\n Finds the closest location to lat,lon in the filedata.\n\n :param lat:\n :param lon:\n :param filedata:\n :return:\n \"\"\"\n closest_index = arg_closest_point(lat, lon,filedata['lat'],filedata['lon'])\n\n closest_location_id = filedata['location_id'][closest_index]\n # closest_lat = filedata['lat'][closest_index]\n # closest_lon = filedata['lon'][closest_index]\n\n closest_filedata = filedata[filedata['location_id']==closest_location_id]\n\n return closest_filedata\n"
] | [
[
"pandas.read_csv",
"numpy.sqrt",
"numpy.min",
"numpy.cos",
"pandas.DataFrame",
"numpy.dtype",
"numpy.max",
"numpy.diff",
"pandas.DataFrame.from_dict",
"numpy.load",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
CypHelp/TestNewWorldDemo | [
"ee6f73df05756f191c1c56250fa290461fdd1b9a"
] | [
"PythonBaseDemo/dataVisualizationDemo/19.5/plot_gdp_compare.py"
] | [
"# coding: utf-8\n#########################################################################\n# 网站: <a href=\"http://www.crazyit.org\">疯狂Java联盟</a> #\n# author yeeku.H.lee [email protected] #\n# #\n# version 1.0 #\n# #\n# Copyright (C), 2001-2018, yeeku.H.Lee #\n# #\n# This program is protected by copyright laws. #\n# #\n# Program Name: #\n# #\n# <br>Date: #\n#########################################################################\nimport json\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\nfilename = 'gdp_json.json'\n# 读取JSON格式的GDP数据\nwith open(filename) as f:\n gpd_list = json.load(f)\n# 使用list列表依次保存中国、美国、日本、俄罗斯、加拿大的GDP值\ncountry_gdps = [{}, {}, {}, {}, {}]\ncountry_codes = ['CHN', 'USA', 'JPN', 'RUS', 'CAN']\n# 遍历列表的每个元素,每个元素是一个GDP数据项\nfor gpd_dict in gpd_list:\n for i, country_code in enumerate(country_codes):\n # 只读取指定国家的数据\n if gpd_dict['Country Code'] == country_code:\n year = gpd_dict['Year']\n # 只读取2001年到2016\n if 2017 > year > 2000:\n country_gdps[i][year] = gpd_dict['Value']\n# 使用list列表依次保存中国、美国、日本、俄罗斯、加拿大的GDP值\ncountry_gdp_list = [[], [], [], [], []]\n# 构建时间数据\nx_data = range(2001, 2017)\nfor i in range(len(country_gdp_list)):\n for year in x_data:\n # 除以1e8,让数值变成以亿为单位\n country_gdp_list[i].append(country_gdps[i][year] / 1e8)\nbar_width=0.15\nfig = plt.figure(dpi=128, figsize=(15, 8))\ncolors = ['indianred', 'steelblue', 'gold', 'lightpink', 'seagreen']\n# 定义国家名称列表\ncountries = ['中国', '美国', '日本', '俄罗斯', '加拿大']\n# 采用循环绘制5组柱状图\nfor i in range(len(colors)):\n # 使用自定义X坐标将数据分开\n plt.bar(x=np.arange(len(x_data))+bar_width*i, height=country_gdp_list[i],\n label=countries[i], color=colors[i], alpha=0.8, width=bar_width)\n # 仅为中国、美国的条柱上绘制GDP数值\n if i < 2:\n for x, y in enumerate(country_gdp_list[i]):\n plt.text(x, y + 100, '%.0f' % y, ha='center', va='bottom')\n# 为X轴设置刻度值\nplt.xticks(np.arange(len(x_data))+bar_width*2, x_data)\n# 设置标题\nplt.title(\"2001到2016年各国GDP对比\")\n# 为两条坐标轴设置名称\nplt.xlabel(\"年份\")\nplt.ylabel(\"GDP(亿美元)\")\n# 显示图例\nplt.legend()\nplt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.text",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NCTUyoung/Codes-for-Lane-Detection | [
"1f49c957accd2244f7dfe9dd2bf8c6e5a4d4da84"
] | [
"ERFNet-CULane-PyTorch/demo.py"
] | [
"import os\nfrom erf_settings import *\nimport numpy as np\nfrom tools import prob_to_lines as ptl\nimport cv2\nimport models\nimport torch\nimport torch.nn.functional as F\nfrom options.options import parser\nimport torch.backends.cudnn as cudnn\nimport torchvision.transforms as transforms\nfrom PIL import Image\nimport numpy\n\ncap_name = './data/test.mp4'\nimage = './data/10.jpg'\n\ndef main():\n args = parser.parse_args()\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = ','.join(str(gpu) for gpu in args.gpus)\n args.gpus = len(args.gpus)\n # model\n model = models.ERFNet(5)\n model = torch.nn.DataParallel(model, device_ids=range(args.gpus)).cuda()\n\n if args.resume:\n if os.path.isfile(args.resume):\n print((\"=> loading checkpoint '{}'\".format(args.resume)))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n torch.nn.Module.load_state_dict(model, checkpoint['state_dict'])\n print((\"=> loaded checkpoint '{}' (epoch {})\".format(args.evaluate, checkpoint['epoch'])))\n else:\n print((\"=> no checkpoint found at '{}'\".format(args.resume)))\n\n cudnn.benchmark = True\n cudnn.fastest = True\n\n if args.mode == 0: # mode 0 for video\n cap = cv2.VideoCapture(cap_name)\n while(True):\n check, in_frame_src = cap.read()\n if check:\n test(model, in_frame_src)\n else:\n print(\"Last frame\")\n break\n\n elif args.mode == 1: # mode 1 for test image\n image_src = cv2.imread(image)\n test(model, image_src)\n cv2.waitKey(0)\n\ndef test(model, image_src):\n\n in_frame_src = cv2.cvtColor(image_src, cv2.COLOR_BGR2RGB)\n\n # Input\n in_frame = cv2.resize(in_frame_src, (IN_IMAGE_W, IN_IMAGE_H), interpolation=cv2.INTER_LINEAR)\n croppedImage = in_frame[VERTICAL_CROP_SIZE:, :, :] # FIX IT\n croppedImageTrain = cv2.resize(croppedImage, (TRAIN_IMG_W, TRAIN_IMG_H), interpolation=cv2.INTER_LINEAR)\n\n input_transform = transforms.Compose([\n transforms.ToPILImage(),\n transforms.Grayscale(num_output_channels=3),\n transforms.ToTensor(),\n ]\n )\n\n image = input_transform(croppedImageTrain)\n image = image.unsqueeze(0)\n input_var = torch.autograd.Variable(image)\n\n # Comput\n output, output_exist = model(input_var)\n output = F.softmax(output, dim=1)\n pred = output.data.cpu().numpy() # BxCxHxW\n pred_exist = output_exist.data.cpu().numpy()\n\n maps = []\n mapsResized = []\n exists = []\n img = Image.fromarray(cv2.cvtColor(croppedImageTrain, cv2.COLOR_BGR2RGB))\n\n for l in range(LANES_COUNT):\n prob_map = (pred[0][l + 1] * 255).astype(int)\n prob_map = cv2.blur(prob_map, (9, 9))\n prob_map = prob_map.astype(np.uint8)\n maps.append(prob_map)\n mapsResized.append(cv2.resize(prob_map, (IN_IMAGE_W, IN_IMAGE_H_AFTER_CROP), interpolation=cv2.INTER_LINEAR))\n img = ptl.AddMask(img, prob_map, COLORS[l],0.1) # Image with probability map\n\n exists.append(pred_exist[0][l] > 0.5)\n lines = ptl.GetLines(exists, maps)\n\n print(exists)\n res_img = cv2.cvtColor(numpy.asarray(img), cv2.COLOR_RGB2BGR)\n cv2.imshow(\"result_pb\", res_img)\n\n for l in range(LANES_COUNT):\n points = lines[l] # Points for the lane\n for point in points:\n cv2.circle(image_src, point, 5, POINT_COLORS[l], -1)\n\n cv2.imshow(\"result_points\", image_src)\n cv2.waitKey(100)\n\n\nif __name__ == '__main__':\n main()"
] | [
[
"torch.nn.functional.softmax",
"torch.load",
"numpy.asarray",
"torch.nn.Module.load_state_dict",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
GaIbatorix/Quantum-SVM | [
"30e2d7378ac6e19a4ba062b92970a9e8033ad525"
] | [
"utils.py"
] | [
"# Created by Dennis Willsch ([email protected]) \n# Modified by Gabriele Cavallaro ([email protected]) \n\nimport sys\nimport re\nimport json\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import roc_auc_score,average_precision_score,precision_recall_curve,roc_curve,accuracy_score,auc\n\nnp.set_printoptions(precision=4, suppress=True)\n\ndef kernel(xn, xm, gamma=-1): # here (xn.shape: NxD, xm.shape: ...xD) -> Nx...\n if gamma == -1:\n return xn @ xm.T\n xn = np.atleast_2d(xn)\n xm = np.atleast_2d(xm)\n return np.exp(-gamma * np.sum((xn[:,None] - xm[None,:])**2, axis=-1)) # (N,1,D) - (1,...,D) -> (N,...,D) -> (N,...); see Hsu guide.pdf for formula\n\n# B = base\n# K = number of qubits per alpha\n\n# decode binary -> alpha\ndef decode(binary, B=10, K=3):\n N = len(binary) // K\n Bvec = B ** np.arange(K)\n return np.fromiter(binary,float).reshape(N,K) @ Bvec\n\n# encode alpha -> binary with B and K (for each n, the binary coefficients an,k such that sum_k an,k B**k is closest to alphan)\ndef encode(alphas, B=10, K=3):\n N = len(alphas)\n Bvec = B ** np.arange(K) # 10^0 10^1 10^2 ...\n allvals = np.array(list(map(lambda n : np.fromiter(bin(n)[2:].zfill(K),float,K), range(2**K)))) @ Bvec # [[0,0,0],[0,0,1],...] @ [1, 10, 100]\n return ''.join(list(map(lambda n : bin(n)[2:].zfill(K),np.argmin(np.abs(allvals[:,None] - alphas), axis=0))))\n\ndef encode_as_vec(alphas, B=10, K=3):\n return np.fromiter(encode(alphas,B,K), float)\n\ndef seqs_to_onehots(seqs): # from ../utils.py\n return np.asarray([np.asarray([[1 if bp == letter else 0 for letter in 'ACGT'] for bp in seq]).flatten() for seq in seqs])\n\ndef loadraw(key='mad50'): # key = 'mad50', 'myc99', ... basically from do-svm.py\n data = np.genfromtxt(f'data/intensities-{key[:3]}filtered', dtype=None, names=True, encoding=None, usecols=(0,1))\n phis = seqs_to_onehots(data['sequence'])\n X = 2*phis - 1 \n ys = data['log_mean']\n\n percentile = float(key[3:])\n theta_percentile = int(len(data) * percentile / 100.)\n theta_idx = np.argpartition(ys, theta_percentile)[theta_percentile]\n theta = ys[theta_idx]\n labels = np.sign(ys - theta)\n labels[theta_idx] = 1 # corner case is counted as 1 (b/c we have >= theta in mlr)\n\n return X, labels\n\n#def loaddataset(datakey='mad50p2calibtrain0'):\n# dataset = np.loadtxt('data/datasets/'+datakey, dtype=float, skiprows=1)\n# return dataset[:,2:], dataset[:,1] # data, labels\n\ndef loaddataset(datakey):\n dataset = np.loadtxt(datakey, dtype=float, skiprows=1)\n return dataset[:,2:], dataset[:,1] # data, labels\n\ndef save_json(filename, var):\n with open(filename,'w') as f:\n f.write(str(json.dumps(var, indent=4, sort_keys=True, separators=(',', ': '), ensure_ascii=False)))\n\ndef eval_classifier(x, alphas, data, label, gamma, b=0): # evaluates the distance to the hyper plane according to 16.5.32 on p. 891 (Numerical Recipes); sign is the assigned class; x.shape = ...xD\n return np.sum((alphas * label)[:,None] * kernel(data, x, gamma), axis=0) + b\n\ndef eval_offset_avg(alphas, data, label, gamma, C, useavgforb=True): # evaluates offset b according to 16.5.33\n cross = eval_classifier(data, alphas, data, label, gamma) # cross[i] = sum_j aj yj K(xj, xi) (error in Numerical Recipes)\n if useavgforb:\n return np.sum(alphas * (C-alphas) * (label - cross)) / np.sum(alphas * (C-alphas))\n else: # this is actually not used, but we did a similar-in-spirit implementation in eval_finaltraining_avgscore.py\n if np.isclose(np.sum(alphas * (C-alphas)),0):\n print('no support vectors found, discarding this classifer')\n return np.nan\n bcandidates = [np.sum(alphas * (C-alphas) * (label - cross)) / np.sum(alphas * (C-alphas))] # average according to NR should be the first candidate\n crosssorted = np.sort(cross)\n crosscandidates = -(crosssorted[1:] + crosssorted[:-1])/2 # each value between f(xi) and the next higher f(xj) is a candidate\n bcandidates += sorted(crosscandidates, key=lambda x:abs(x - bcandidates[0])) # try candidates closest to the average first\n bnumcorrect = [(label == np.sign(cross + b)).sum() for b in bcandidates]\n return bcandidates[np.argmax(bnumcorrect)]\n\ndef eval_acc_auroc_auprc(label, score): # score is the distance to the hyper plane (output from eval_classifier)\n precision,recall,_ = precision_recall_curve(label, score)\n return accuracy_score(label,np.sign(score)), roc_auc_score(label,score), auc(recall,precision)\n\n\n\n################ This I/O functions are provided by http://hyperlabelme.uv.es/index.html ################ \n\ndef dataread(filename):\n lasttag = 'description:'\n # Open file and locate lasttag\n f = open(filename, 'r')\n nl = 1\n for line in f:\n if line.startswith(lasttag): break\n nl += 1\n f.close()\n\n # Read data\n data = np.loadtxt(filename, delimiter=',', skiprows=nl)\n Y = data[:, 0]\n X = data[:, 1:]\n # Separate train/test\n Xtest = X[Y < 0, :]\n X = X[Y >= 0, :]\n Y = Y[Y >= 0, None]\n\n return X, Y, Xtest\n\n\ndef datawrite(path,method, dataset, Yp):\n filename = '{0}{1}_predictions.txt'.format(path, dataset)\n res = True\n try:\n with open(filename, mode='w') as f:\n f.write('{0} {1}'.format(method, dataset))\n for v in Yp:\n f.write(' {0}'.format(str(v)))\n f.write('\\n')\n except Exception as e:\n print('Error', e)\n res = False\n return res\n\n################ \n\n\ndef write_samples(X, Y,path): \n f = open(path,\"w\") \n f.write(\"id label data \\n\") \n for i in range(0,X.shape[0]):\n f.write(str(i)+\" \")\n if(Y[i]==1):\n f.write(\"-1 \")\n else:\n f.write(\"1 \")\n for j in range(0,X.shape[1]):\n f.write(str(X[i,j])+\" \")\n f.write(\"\\n\") \n f.close() "
] | [
[
"sklearn.metrics.roc_auc_score",
"numpy.abs",
"numpy.asarray",
"numpy.arange",
"numpy.set_printoptions",
"sklearn.metrics.precision_recall_curve",
"numpy.genfromtxt",
"numpy.sign",
"numpy.atleast_2d",
"numpy.sort",
"numpy.argmax",
"numpy.argpartition",
"sklearn.metrics.auc",
"numpy.fromiter",
"numpy.sum",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DanielJHaar/pythonpracticejun2020 | [
"24e2501fab559841c976eca07bd1900b356c3336"
] | [
"NP_ZerosOnes.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jun 28 12:09:18 2020\r\n\r\n@author: danie\r\n\"\"\"\r\n\r\n\r\nimport numpy as np\r\ndim = tuple(map(int,input().split()))\r\n\r\nprint(np.zeros(dim, dtype = np.int))\r\nprint(np.ones(dim, dtype = np.int))"
] | [
[
"numpy.zeros",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
intersun/CoDIR | [
"5b2abd49e92536d486324bd802b7ee6ff272e9b5"
] | [
"fairseq_cli/train.py"
] | [
"#!/usr/bin/env python3 -u\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"\nTrain a new model on one or across multiple GPUs.\n\"\"\"\n\nimport logging\nimport math\nimport os\nimport random\nimport sys\n\nimport numpy as np\nimport torch\n\nfrom fairseq import (\n checkpoint_utils,\n distributed_utils,\n options,\n quantization_utils,\n tasks,\n utils,\n)\nfrom fairseq.data import iterators\nfrom fairseq.logging import meters, metrics, progress_bar\nfrom fairseq.trainer import Trainer\nfrom fairseq.model_parallel.megatron_trainer import MegatronTrainer\nfrom sklearn.metrics import matthews_corrcoef, f1_score\nfrom scipy.stats import pearsonr, spearmanr\n\n\nlogging.basicConfig(\n format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n level=logging.INFO,\n stream=sys.stdout,\n)\nlogger = logging.getLogger('fairseq_cli.train')\n\n\ndef main(args, init_distributed=False):\n utils.import_user_module(args)\n\n assert args.max_tokens is not None or args.max_sentences is not None, \\\n 'Must specify batch size either with --max-tokens or --max-sentences'\n metrics.reset()\n\n # Initialize CUDA and distributed training\n if torch.cuda.is_available() and not args.cpu:\n torch.cuda.set_device(args.device_id)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if init_distributed:\n args.distributed_rank = distributed_utils.distributed_init(args)\n\n if distributed_utils.is_master(args):\n checkpoint_utils.verify_checkpoint_directory(args.save_dir)\n\n # Print args\n logger.info(args)\n\n # Setup task, e.g., translation, language modeling, etc.\n task = tasks.setup_task(args)\n\n # Load valid dataset (we load training data below, based on the latest checkpoint)\n for valid_sub_split in args.valid_subset.split(','):\n task.load_dataset(valid_sub_split, combine=False, epoch=1)\n\n if args.crd_weight > 0.0:\n # activate crd loss, need to read data before hand to get label of each data and number of samples\n for train_sub_split in args.train_subset.split(','):\n task.load_dataset(train_sub_split, combine=True, epoch=1)\n\n # Build model and criterion\n model = task.build_model(args)\n criterion = task.build_criterion(args)\n logger.info(model)\n logger.info('model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))\n logger.info('num. model params: {} (num. trained: {})'.format(\n sum(p.numel() for p in model.parameters()),\n sum(p.numel() for p in model.parameters() if p.requires_grad),\n ))\n\n if 'distill' in args.criterion.lower() or 'crd' in args.criterion.lower():\n logger.info('In teacher model, num. model params: {} (num. trained: {})'.format(\n sum(p.numel() for p in criterion.teacher_model.parameters()),\n sum(p.numel() for p in criterion.teacher_model.parameters() if p.requires_grad),\n ))\n\n # (optionally) Configure quantization\n if args.quantization_config_path is not None:\n quantizer = quantization_utils.Quantizer(\n config_path=args.quantization_config_path,\n max_epoch=args.max_epoch,\n max_update=args.max_update,\n )\n else:\n quantizer = None\n\n # Build trainer\n if args.model_parallel_size == 1:\n trainer = Trainer(args, task, model, criterion, quantizer)\n else:\n trainer = MegatronTrainer(args, task, model, criterion)\n\n logger.info('training on {} GPUs'.format(args.distributed_world_size))\n logger.info('max tokens per GPU = {} and max sentences per GPU = {}'.format(\n args.max_tokens,\n args.max_sentences,\n ))\n\n # Load the latest checkpoint if one is available and restore the\n # corresponding train iterator\n extra_state, epoch_itr = checkpoint_utils.load_checkpoint(args, trainer)\n\n # Train until the learning rate gets too small\n max_epoch = args.max_epoch or math.inf\n max_update = args.max_update or math.inf\n lr = trainer.get_lr()\n train_meter = meters.StopwatchMeter()\n train_meter.start()\n while (\n lr > args.min_lr\n and epoch_itr.next_epoch_idx <= max_epoch\n ):\n # train for one epoch\n valid_losses = train(args, trainer, task, epoch_itr, max_update)\n if should_stop_early(args, valid_losses[0]) or trainer.get_num_updates() >= max_update:\n break\n\n # only use first validation loss to update the learning rate\n lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])\n\n epoch_itr = trainer.get_train_iterator(\n epoch_itr.next_epoch_idx,\n # sharded data: get train iterator for next epoch\n load_dataset=(os.pathsep in getattr(args, 'data', '')),\n )\n train_meter.stop()\n logger.info('done training in {:.1f} seconds'.format(train_meter.sum))\n\n\ndef should_stop_early(args, valid_loss):\n # skip check if no validation was done in the current epoch\n if valid_loss is None:\n return False\n if args.patience <= 0:\n return False\n\n def is_better(a, b):\n return a > b if args.maximize_best_checkpoint_metric else a < b\n\n prev_best = getattr(should_stop_early, 'best', None)\n if prev_best is None or is_better(valid_loss, prev_best):\n should_stop_early.best = valid_loss\n should_stop_early.num_runs = 0\n return False\n else:\n should_stop_early.num_runs += 1\n if should_stop_early.num_runs >= args.patience:\n logger.info('early stop since valid performance hasn\\'t improved for last {} runs'.format(args.patience))\n return True\n else:\n return False\n\n\[email protected]('train')\ndef train(args, trainer, task, epoch_itr, max_update=math.inf):\n \"\"\"Train the model for one epoch and return validation losses.\"\"\"\n # Initialize data iterator\n itr = epoch_itr.next_epoch_itr(\n fix_batches_to_gpus=args.fix_batches_to_gpus,\n shuffle=(epoch_itr.next_epoch_idx > args.curriculum),\n )\n update_freq = (\n args.update_freq[epoch_itr.epoch - 1]\n if epoch_itr.epoch <= len(args.update_freq)\n else args.update_freq[-1]\n )\n itr = iterators.GroupedIterator(itr, update_freq)\n progress = progress_bar.progress_bar(\n itr,\n log_format=args.log_format,\n log_interval=args.log_interval,\n epoch=epoch_itr.epoch,\n tensorboard_logdir=(\n args.tensorboard_logdir if distributed_utils.is_master(args) else None\n ),\n default_log_format=('tqdm' if not args.no_progress_bar else 'simple'),\n )\n\n trainer.begin_epoch(epoch_itr.epoch)\n\n valid_subsets = args.valid_subset.split(',')\n for samples in progress:\n with metrics.aggregate('train_inner'):\n log_output = trainer.train_step(samples)\n if log_output is None: # OOM, overflow, ...\n continue\n\n # log mid-epoch stats\n num_updates = trainer.get_num_updates()\n if num_updates % args.log_interval == 0:\n stats = get_training_stats(metrics.get_smoothed_values('train_inner'))\n progress.log(stats, tag='train_inner', step=num_updates)\n\n # reset mid-epoch stats after each log interval\n # the end-of-epoch stats will still be preserved\n metrics.reset_meters('train_inner')\n\n valid_losses = validate_and_save(args, trainer, task, epoch_itr, valid_subsets)\n if should_stop_early(args, valid_losses[0]) or num_updates >= max_update:\n break\n\n # log end-of-epoch stats\n stats = get_training_stats(metrics.get_smoothed_values('train'))\n progress.print(stats, tag='train', step=num_updates)\n\n # reset epoch-level meters\n metrics.reset_meters('train')\n return valid_losses\n\n\ndef validate_and_save(args, trainer, task, epoch_itr, valid_subsets):\n num_updates = trainer.get_num_updates()\n do_save = (\n (\n args.save_interval_updates > 0\n and num_updates > 0\n and num_updates % args.save_interval_updates == 0\n )\n or (\n epoch_itr.end_of_epoch()\n and epoch_itr.epoch % args.save_interval == 0\n )\n )\n do_validate = (\n (\n do_save # saving requires validation\n or (\n epoch_itr.end_of_epoch()\n and epoch_itr.epoch % args.validate_interval == 0\n )\n )\n and not args.disable_validation\n )\n\n # Validate\n valid_losses = [None]\n if do_validate:\n valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)\n # Save\n if do_save:\n checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])\n return valid_losses\n\n\ndef get_training_stats(stats):\n if 'nll_loss' in stats and 'ppl' not in stats:\n stats['ppl'] = utils.get_perplexity(stats['nll_loss'])\n stats['wall'] = round(metrics.get_meter('default', 'wall').elapsed_time, 0)\n return stats\n\n\ndef validate(args, trainer, task, epoch_itr, subsets):\n \"\"\"Evaluate the model on the validation set(s) and return the losses.\"\"\"\n\n if args.fixed_validation_seed is not None:\n # set fixed seed for every validation\n utils.set_torch_seed(args.fixed_validation_seed)\n\n valid_losses = []\n for subset in subsets:\n # Initialize data iterator\n itr = task.get_batch_iterator(\n dataset=task.dataset(subset),\n max_tokens=args.max_tokens_valid,\n max_sentences=args.max_sentences_valid,\n max_positions=utils.resolve_max_positions(\n task.max_positions(),\n trainer.get_model().max_positions(),\n ),\n ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,\n required_batch_size_multiple=args.required_batch_size_multiple,\n seed=args.seed,\n num_shards=args.distributed_world_size,\n shard_id=args.distributed_rank,\n num_workers=args.num_workers,\n ).next_epoch_itr(shuffle=False)\n progress = progress_bar.progress_bar(\n itr,\n log_format=args.log_format,\n log_interval=args.log_interval,\n epoch=epoch_itr.epoch,\n prefix=f\"valid on '{subset}' subset\",\n tensorboard_logdir=(\n args.tensorboard_logdir if distributed_utils.is_master(args) else None\n ),\n default_log_format=('tqdm' if not args.no_progress_bar else 'simple'),\n )\n\n # create a new root metrics aggregator so validation metrics\n # don't pollute other aggregators (e.g., train meters)\n all_preds, all_labels = [], []\n with metrics.aggregate(new_root=True) as agg:\n for sample in progress:\n logging_outputs, preds, labels = trainer.valid_step(sample)\n if preds is not None:\n all_preds.extend(preds)\n all_labels.extend(labels)\n else:\n all_preds, all_labels = None, None\n\n if all_preds is not None:\n all_preds = torch.cat(all_preds).cpu().numpy()\n all_labels = torch.cat(all_labels).cpu().numpy()\n\n # log validation stats\n stats = get_valid_stats(args, trainer, agg.get_smoothed_values(), all_preds, all_labels)\n progress.print(stats, tag=subset, step=trainer.get_num_updates())\n\n valid_losses.append(stats[args.best_checkpoint_metric])\n return valid_losses\n\n\ndef get_valid_stats(args, trainer, stats, preds=None, labels=None):\n if 'nll_loss' in stats and 'ppl' not in stats:\n stats['ppl'] = utils.get_perplexity(stats['nll_loss'])\n\n if getattr(args, \"regression_target\", None) is not None and preds is not None:\n stats['pearson'] = pearsonr(preds, labels)[0]\n stats['spearman'] = spearmanr(preds, labels)[0]\n # if args.best_checkpoint_metric == 'accuracy' and args.num_classes==2:\n if getattr(args, \"num_classes\", None) == 2 and preds is not None:\n stats['mcc'] = matthews_corrcoef(labels, preds)\n stats['f1'] = f1_score(y_true=labels, y_pred=preds)\n\n stats['num_updates'] = trainer.get_num_updates()\n if hasattr(checkpoint_utils.save_checkpoint, 'best'):\n key = 'best_{0}'.format(args.best_checkpoint_metric)\n best_function = max if args.maximize_best_checkpoint_metric else min\n stats[key] = best_function(\n checkpoint_utils.save_checkpoint.best,\n stats[args.best_checkpoint_metric],\n )\n return stats\n\n\ndef distributed_main(i, args, start_rank=0):\n args.device_id = i\n if args.distributed_rank is None: # torch.multiprocessing.spawn\n args.distributed_rank = start_rank + i\n main(args, init_distributed=True)\n\n\ndef cli_main(modify_parser=None):\n parser = options.get_training_parser()\n args = options.parse_args_and_arch(parser, modify_parser=modify_parser)\n\n if args.distributed_init_method is None:\n distributed_utils.infer_init_method(args)\n\n if args.distributed_init_method is not None:\n # distributed training\n if torch.cuda.device_count() > 1 and not args.distributed_no_spawn:\n start_rank = args.distributed_rank\n args.distributed_rank = None # assign automatically\n torch.multiprocessing.spawn(\n fn=distributed_main,\n args=(args, start_rank),\n nprocs=torch.cuda.device_count(),\n )\n else:\n distributed_main(args.device_id, args)\n elif args.distributed_world_size > 1:\n # fallback for single node with multiple GPUs\n assert args.distributed_world_size <= torch.cuda.device_count()\n port = random.randint(10000, 20000)\n args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port)\n args.distributed_rank = None # set based on device id\n torch.multiprocessing.spawn(\n fn=distributed_main,\n args=(args, ),\n nprocs=args.distributed_world_size,\n )\n else:\n # single GPU training\n main(args)\n\n\nif __name__ == '__main__':\n cli_main()\n"
] | [
[
"torch.cuda.set_device",
"numpy.random.seed",
"torch.multiprocessing.spawn",
"torch.manual_seed",
"scipy.stats.pearsonr",
"sklearn.metrics.matthews_corrcoef",
"torch.cuda.device_count",
"torch.cat",
"torch.cuda.is_available",
"scipy.stats.spearmanr",
"sklearn.metrics.f1_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
cstorm125/thxxwiki | [
"394f8c0df773dd097fbd3fdd970954e99155e97e"
] | [
"align_sentences.py"
] | [
"import argparse\nimport glob\nimport numpy as np\nimport pandas as pd\nfrom tqdm.auto import tqdm\nfrom preprocess import rm_useless_spaces\nimport tensorflow_hub as hub\nimport tensorflow_text\nimport tensorflow as tf # tensorflow 2.1.0\n\n# #debug\n# class A:\n# def __init__(self):\n# self.max_n=3\n# self.use_thres=0.7\n# self.max_size=max_size\n# self.en_dir = 'raw_data/economic_outlook/en_data2'\n# self.th_dir = 'raw_data/economic_outlook/th_data2'\n# self.output_path = 'cleaned_data/pdf_sentences.csv'\n# args = A()\n\n\ndef stitch_sentences(sent, max_n=3):\n res = []\n for n in range(max_n + 1):\n for i in range(len(sent) - n + 1):\n r = \" \".join(sent[i : (i + n)])\n r = rm_useless_spaces(r.replace(\"\\n\", \" \").strip())\n res.append((i, r))\n return res[(len(sent) + 1) :]\n\n\ndef match_sentences(lang1_sentences, lang2_sentences, model):\n embedding_1 = model(lang1_sentences)\n embedding_2 = model(lang2_sentences)\n distance_matrix_12 = tf.matmul(embedding_1, embedding_2, transpose_b=True)\n print(embedding_1.shape, embedding_2.shape, distance_matrix_12.shape)\n best_distances = tf.argmax(distance_matrix_12, axis=1).numpy()\n\n matched_sentences_lang2 = []\n scores = []\n for i, lang2_idx in enumerate(best_distances):\n score = distance_matrix_12[i][lang2_idx].numpy()\n scores.append(score)\n matched_sentences_lang2.append(lang2_sentences[lang2_idx])\n return matched_sentences_lang2, scores\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--en_dir\", type=str)\n parser.add_argument(\"--th_dir\", type=str)\n parser.add_argument(\"--output_path\", type=str)\n parser.add_argument(\"--max_n\", type=int, default=3)\n parser.add_argument(\"--bs\", type=int, default=3000)\n parser.add_argument(\"--use_thres\", type=int, default=0.7)\n args = parser.parse_args()\n\n print(\"loading model...\")\n # _model = hub.load(\"https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/3\")\n _model = hub.load(\n \"https://tfhub.dev/google/universal-sentence-encoder-multilingual/3\"\n )\n print(\"model loaded\")\n\n en_paths = sorted(glob.glob(f\"{args.en_dir}/*.sent\"))\n th_paths = sorted(glob.glob(f\"{args.th_dir}/*.sent\"))\n if len(en_paths) != len(th_paths):\n raise ValueError(\"must have equal number of documents\")\n print(f\"there are {len(en_paths)} parallel docs\")\n\n res_en_ths = []\n for en_path, th_path in tqdm(zip(en_paths, th_paths)):\n print(en_path)\n print(th_path)\n with open(en_path, \"r\") as f:\n sent_en = f.readlines()\n tup_en = stitch_sentences(sent_en, args.max_n)\n sent_en2 = [i[1] for i in tup_en]\n id_en = [i[0] for i in tup_en]\n with open(th_path, \"r\") as f:\n sent_th = f.readlines()\n tup_th = stitch_sentences(sent_th, args.max_n)\n sent_th2 = [i[1] for i in tup_th]\n id_th = [i[0] for i in tup_th]\n\n print(\n f\"\"\"\n {en_path}\n en sentences: {len(sent_en)}\n th sentences: {len(sent_th)}\n stitched en sentences (max_n = {args.max_n}): {len(tup_en)}\n stiched th sentences (max_n = {args.max_n}): {len(tup_th)}\n \"\"\"\n )\n\n # skip if there's only title\n if (len(sent_en) == 1) | (len(sent_th) == 1):\n print(\"skipping...\")\n continue\n\n for i in tqdm(range(len(sent_en2) // args.bs + 1)):\n for j in tqdm(range(len(sent_th2) // args.bs + 1)):\n matched_sentences_th, scores = match_sentences(\n sent_en2[i * args.bs : (i + 1) * args.bs],\n sent_th2[j * args.bs : (j + 1) * args.bs],\n _model,\n )\n res_en_th = pd.DataFrame(\n {\n \"en_text\": sent_en2[i * args.bs : (i + 1) * args.bs],\n \"th_text\": matched_sentences_th,\n \"use_score\": scores,\n \"id_en\": id_en,\n }\n )\n res_en_th = res_en_th[(res_en_th.use_score > args.use_thres)]\n res_en_th[\"src\"] = en_path\n res_en_ths.append(res_en_th)\n print(\n f\"{res_en_th.shape[0]} sentences above {args.use_thres} threshold\"\n )\n\n df = (\n pd.concat(res_en_ths).dropna().drop_duplicates().reset_index(drop=True)\n )\n df[\"rnk\"] = (\n df.sort_values(\"use_score\", ascending=False)\n .groupby([\"src\",\"id_en\"])\n .cumcount()\n + 1\n )\n df = df[df.rnk == 1]\n print(f\"saving {df.shape} to {args.output_path}\")\n df.to_csv(args.output_path, index=False)\n"
] | [
[
"tensorflow.matmul",
"tensorflow.argmax",
"pandas.concat",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
HaMF/bbfmr | [
"90bea743ac549828495354091145c025e3da3ee7"
] | [
"complex_model.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 28 14:07:02 2016\n\n@author: hannes.maierflaig\n\"\"\"\n\nimport operator\nfrom lmfit.model import Model, Parameter, _align, _ensureMatplotlib, warnings\nfrom lmfit.model import ModelResult as ModelResultBase\nfrom lmfit.minimizer import Minimizer\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom copy import deepcopy\ntry:\n from collections import OrderedDict\nexcept ImportError:\n from ordereddict import OrderedDict\n\n\nclass ComplexModelResult(ModelResultBase):\n def __init__(self, model, params, data=None, weights=None,\n method='leastsq', fcn_args=None, fcn_kws=None,\n iter_cb=None, scale_covar=True, **fit_kws):\n self.complex_data = None\n self.model = model\n self.data = data\n self.weights = weights\n self.method = method\n self.ci_out = None\n self.init_params = deepcopy(params)\n\n # modify residual fcnt here in order to get an array of real floats\n def reim_residual(*a, **kws):\n res = model._residual(*a, **kws)\n return res.view(np.float)\n\n Minimizer.__init__(self, reim_residual, params, fcn_args=fcn_args,\n fcn_kws=fcn_kws, iter_cb=iter_cb,\n scale_covar=scale_covar, **fit_kws)\n\n @_ensureMatplotlib\n def plot_fit(self, ax=None, datafmt='o', fitfmt='-', initfmt='--', yerr=None,\n numpoints=None, data_kws=None, fit_kws=None, init_kws=None,\n ax_kws=None, norm=lambda x: x):\n \"\"\"Plot the fit results using matplotlib.\n\n The method will plot results of the fit using matplotlib, including:\n the data points, the initial fit curve and the fitted curve. If the fit\n model included weights, errorbars will also be plotted.\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes, optional\n The axes to plot on. The default in None, which means use the\n current pyplot axis or create one if there is none.\n datafmt : string, optional\n matplotlib format string for data points\n fitfmt : string, optional\n matplotlib format string for fitted curve\n initfmt : string, optional\n matplotlib format string for initial conditions for the fit\n yerr : ndarray, optional\n array of uncertainties for data array\n numpoints : int, optional\n If provided, the final and initial fit curves are evaluated not\n only at data points, but refined to contain `numpoints` points in\n total.\n data_kws : dictionary, optional\n keyword arguments passed on to the plot function for data points\n fit_kws : dictionary, optional\n keyword arguments passed on to the plot function for fitted curve\n init_kws : dictionary, optional\n keyword arguments passed on to the plot function for the initial\n conditions of the fit\n ax_kws : dictionary, optional\n keyword arguments for a new axis, if there is one being created\n\n Returns\n -------\n matplotlib.axes.Axes\n\n Notes\n ----\n For details about plot format strings and keyword arguments see\n documentation of matplotlib.axes.Axes.plot.\n\n If yerr is specified or if the fit model included weights, then\n matplotlib.axes.Axes.errorbar is used to plot the data. If yerr is\n not specified and the fit includes weights, yerr set to 1/self.weights\n\n If `ax` is None then matplotlib.pyplot.gca(**ax_kws) is called.\n\n See Also\n --------\n ModelResult.plot_residuals : Plot the fit residuals using matplotlib.\n ModelResult.plot : Plot the fit results and residuals using matplotlib.\n \"\"\"\n if data_kws is None:\n data_kws = {}\n if fit_kws is None:\n fit_kws = {}\n if init_kws is None:\n init_kws = {}\n if ax_kws is None:\n ax_kws = {}\n\n if len(self.model.independent_vars) == 1:\n independent_var = self.model.independent_vars[0]\n else:\n print('Fit can only be plotted if the model function has one '\n 'independent variable.')\n return False\n\n if not isinstance(ax, plt.Axes):\n ax = plt.gca(**ax_kws)\n\n x_array = self.userkws[independent_var]\n\n # make a dense array for x-axis if data is not dense\n if numpoints is not None and len(self.data) < numpoints:\n x_array_dense = np.linspace(min(x_array), max(x_array), numpoints)\n else:\n x_array_dense = x_array\n\n ax.plot(x_array_dense, norm(self.model.eval(self.init_params,\n **{independent_var: x_array_dense})), initfmt,\n label='init', **init_kws)\n ax.plot(x_array_dense, norm(self.model.eval(self.params,\n **{independent_var: x_array_dense})), fitfmt,\n label='best-fit', **fit_kws)\n\n if yerr is None and self.weights is not None:\n yerr = 1.0/self.weights\n if yerr is not None:\n ax.errorbar(x_array, norm(self.data), yerr=norm(yerr),\n fmt=datafmt, label='data', **data_kws)\n else:\n ax.plot(x_array, norm(self.data), datafmt, label='data',\n **data_kws)\n\n ax.set_title(self.model.name)\n ax.set_xlabel(independent_var)\n ax.set_ylabel('y')\n ax.legend()\n\n return ax\n\n @_ensureMatplotlib\n def plot_residuals(self, ax=None, datafmt='o', yerr=None, data_kws=None,\n fit_kws=None, ax_kws=None, norm=lambda x: x):\n \"\"\"Plot the fit residuals using matplotlib.\n\n The method will plot residuals of the fit using matplotlib, including:\n the data points and the fitted curve (as horizontal line). If the fit\n model included weights, errorbars will also be plotted.\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes, optional\n The axes to plot on. The default in None, which means use the\n current pyplot axis or create one if there is none.\n datafmt : string, optional\n matplotlib format string for data points\n yerr : ndarray, optional\n array of uncertainties for data array\n data_kws : dictionary, optional\n keyword arguments passed on to the plot function for data points\n fit_kws : dictionary, optional\n keyword arguments passed on to the plot function for fitted curve\n ax_kws : dictionary, optional\n keyword arguments for a new axis, if there is one being created\n\n Returns\n -------\n matplotlib.axes.Axes\n\n Notes\n ----\n For details about plot format strings and keyword arguments see\n documentation of matplotlib.axes.Axes.plot.\n\n If yerr is specified or if the fit model included weights, then\n matplotlib.axes.Axes.errorbar is used to plot the data. If yerr is\n not specified and the fit includes weights, yerr set to 1/self.weights\n\n If `ax` is None then matplotlib.pyplot.gca(**ax_kws) is called.\n\n See Also\n --------\n ModelResult.plot_fit : Plot the fit results using matplotlib.\n ModelResult.plot : Plot the fit results and residuals using matplotlib.\n \"\"\"\n if data_kws is None:\n data_kws = {}\n if fit_kws is None:\n fit_kws = {}\n if fit_kws is None:\n fit_kws = {}\n if ax_kws is None:\n ax_kws = {}\n\n if len(self.model.independent_vars) == 1:\n independent_var = self.model.independent_vars[0]\n else:\n print('Fit can only be plotted if the model function has one '\n 'independent variable.')\n return False\n\n if not isinstance(ax, plt.Axes):\n ax = plt.gca(**ax_kws)\n\n x_array = self.userkws[independent_var]\n\n ax.axhline(0, **fit_kws)\n\n if yerr is None and self.weights is not None:\n yerr = 1.0/self.weights\n if yerr is not None:\n ax.errorbar(x_array, norm(self.eval() - self.data),\n yerr=norm(yerr), fmt=datafmt, label='residuals',\n **data_kws)\n else:\n ax.plot(x_array, norm(self.eval() - self.data), datafmt,\n label='residuals', **data_kws)\n\n ax.set_title(self.model.name)\n ax.set_ylabel('residuals')\n ax.legend()\n\n return ax\n\n @_ensureMatplotlib\n def plot(self, datafmt='o', fitfmt='-', initfmt='--', yerr=None,\n numpoints=None, fig=None, data_kws=None, fit_kws=None,\n init_kws=None, ax_res_kws=None, ax_fit_kws=None, fig_kws=None):\n \"\"\"Plot the fit results and residuals using matplotlib.\n\n The method will produce a matplotlib figure with both results of the\n fit and the residuals plotted. If the fit model included weights,\n errorbars will also be plotted.\n\n Parameters\n ----------\n datafmt : string, optional\n matplotlib format string for data points\n fitfmt : string, optional\n matplotlib format string for fitted curve\n initfmt : string, optional\n matplotlib format string for initial conditions for the fit\n yerr : ndarray, optional\n array of uncertainties for data array\n numpoints : int, optional\n If provided, the final and initial fit curves are evaluated not\n only at data points, but refined to contain `numpoints` points in\n total.\n fig : matplotlib.figure.Figure, optional\n The figure to plot on. The default in None, which means use the\n current pyplot figure or create one if there is none.\n data_kws : dictionary, optional\n keyword arguments passed on to the plot function for data points\n fit_kws : dictionary, optional\n keyword arguments passed on to the plot function for fitted curve\n init_kws : dictionary, optional\n keyword arguments passed on to the plot function for the initial\n conditions of the fit\n ax_res_kws : dictionary, optional\n keyword arguments for the axes for the residuals plot\n ax_fit_kws : dictionary, optional\n keyword arguments for the axes for the fit plot\n fig_kws : dictionary, optional\n keyword arguments for a new figure, if there is one being created\n\n Returns\n -------\n matplotlib.figure.Figure\n\n Notes\n ----\n The method combines ModelResult.plot_fit and ModelResult.plot_residuals.\n\n If yerr is specified or if the fit model included weights, then\n matplotlib.axes.Axes.errorbar is used to plot the data. If yerr is\n not specified and the fit includes weights, yerr set to 1/self.weights\n\n If `fig` is None then matplotlib.pyplot.figure(**fig_kws) is called.\n\n See Also\n --------\n ModelResult.plot_fit : Plot the fit results using matplotlib.\n ModelResult.plot_residuals : Plot the fit residuals using matplotlib.\n \"\"\"\n if data_kws is None:\n data_kws = {}\n if fit_kws is None:\n fit_kws = {}\n if init_kws is None:\n init_kws = {}\n if ax_res_kws is None:\n ax_res_kws = {}\n if ax_fit_kws is None:\n ax_fit_kws = {}\n if fig_kws is None:\n fig_kws = {}\n\n if len(self.model.independent_vars) != 1:\n print('Fit can only be plotted if the model function has one '\n 'independent variable.')\n return False\n\n if not isinstance(fig, plt.Figure):\n fig = plt.figure(**fig_kws)\n\n if self.data.dtype == complex:\n ncols = 2\n gs = plt.GridSpec(nrows=2, ncols=ncols, height_ratios=[1, 4])\n for i, norm in enumerate((np.real, np.imag)):\n ax_res = fig.add_subplot(gs[0, i], **ax_res_kws)\n ax_fit = fig.add_subplot(gs[1, i], sharex=ax_res, **ax_fit_kws)\n self.plot_fit(ax=ax_fit, datafmt=datafmt, fitfmt=fitfmt, yerr=yerr,\n initfmt=initfmt, numpoints=numpoints, data_kws=data_kws,\n fit_kws=fit_kws, init_kws=init_kws, ax_kws=ax_fit_kws,\n norm=norm)\n self.plot_residuals(ax=ax_res, datafmt=datafmt, yerr=yerr,\n data_kws=data_kws, fit_kws=fit_kws,\n ax_kws=ax_res_kws, norm=norm)\n else:\n ncols = 1\n gs = plt.GridSpec(nrows=2, ncols=ncols, height_ratios=[1, 4])\n ax_res = fig.add_subplot(gs[0, 0], **ax_res_kws)\n ax_fit = fig.add_subplot(gs[1, 0], sharex=ax_res, **ax_fit_kws)\n\n self.plot_fit(ax=ax_fit, datafmt=datafmt, fitfmt=fitfmt, yerr=yerr,\n initfmt=initfmt, numpoints=numpoints, data_kws=data_kws,\n fit_kws=fit_kws, init_kws=init_kws, ax_kws=ax_fit_kws)\n self.plot_residuals(ax=ax_res, datafmt=datafmt, yerr=yerr,\n data_kws=data_kws, fit_kws=fit_kws,\n ax_kws=ax_res_kws)\n\n return fig\n\n\nclass ComplexModel(Model):\n def __init__(self, func, **kwargs):\n # initialize model, find parameter names etc.\n super().__init__(func, **kwargs)\n\n def fit(self, data, params=None, weights=None, method='leastsq',\n iter_cb=None, scale_covar=True, verbose=True, fit_kws=None,\n **kwargs):\n \"\"\"Fit the model to the data.\n\n Parameters\n ----------\n data: array-like\n params: Parameters object\n weights: array-like of same size as data\n used for weighted fit\n method: fitting method to use (default = 'leastsq')\n iter_cb: None or callable callback function to call at each iteration.\n scale_covar: bool (default True) whether to auto-scale covariance matrix\n verbose: bool (default True) print a message when a new parameter is\n added because of a hint.\n fit_kws: dict\n default fitting options, such as xtol and maxfev, for scipy optimizer\n keyword arguments: optional, named like the arguments of the\n model function, will override params. See examples below.\n\n Returns\n -------\n lmfit.ModelResult\n\n Examples\n --------\n # Take t to be the independent variable and data to be the\n # curve we will fit.\n\n # Using keyword arguments to set initial guesses\n >>> result = my_model.fit(data, tau=5, N=3, t=t)\n\n # Or, for more control, pass a Parameters object.\n >>> result = my_model.fit(data, params, t=t)\n\n # Keyword arguments override Parameters.\n >>> result = my_model.fit(data, params, tau=5, t=t)\n\n Note\n ----\n All parameters, however passed, are copied on input, so the original\n Parameter objects are unchanged.\n\n \"\"\"\n if params is None:\n params = self.make_params(verbose=verbose)\n else:\n params = deepcopy(params)\n\n # If any kwargs match parameter names, override params.\n param_kwargs = set(kwargs.keys()) & set(self.param_names)\n for name in param_kwargs:\n p = kwargs[name]\n if isinstance(p, Parameter):\n p.name = name # allows N=Parameter(value=5) with implicit name\n params[name] = deepcopy(p)\n else:\n params[name].set(value=p)\n del kwargs[name]\n\n # All remaining kwargs should correspond to independent variables.\n for name in kwargs.keys():\n if name not in self.independent_vars:\n warnings.warn(\"The keyword argument %s does not\" % name +\n \"match any arguments of the model function.\" +\n \"It will be ignored.\", UserWarning)\n\n # If any parameter is not initialized raise a more helpful error.\n missing_param = any([p not in params.keys()\n for p in self.param_names])\n blank_param = any([(p.value is None and p.expr is None)\n for p in params.values()])\n if missing_param or blank_param:\n msg = ('Assign each parameter an initial value by passing '\n 'Parameters or keyword arguments to fit.\\n')\n missing = [p for p in self.param_names if p not in params.keys()]\n blank = [name for name, p in params.items()\n if (p.value is None and p.expr is None)]\n msg += 'Missing parameters: %s\\n' % str(missing)\n msg += 'Non initialized parameters: %s' % str(blank)\n raise ValueError(msg)\n\n # Do not alter anything that implements the array interface (np.array, pd.Series)\n # but convert other iterables (e.g., Python lists) to numpy arrays.\n if not hasattr(data, '__array__'):\n data = np.asarray(data)\n for var in self.independent_vars:\n var_data = kwargs[var]\n if (not hasattr(var_data, '__array__')) and (not np.isscalar(var_data)):\n kwargs[var] = np.asfarray(var_data)\n\n # Handle null/missing values.\n mask = None\n if self.missing not in (None, 'none'):\n mask = self._handle_missing(data) # This can raise.\n if mask is not None:\n data = data[mask]\n if weights is not None:\n weights = _align(weights, mask, data)\n\n # If independent_vars and data are alignable (pandas), align them,\n # and apply the mask from above if there is one.\n for var in self.independent_vars:\n if not np.isscalar(kwargs[var]):\n kwargs[var] = _align(kwargs[var], mask, data)\n\n if fit_kws is None:\n fit_kws = {}\n\n output = ComplexModelResult(self, params, method=method,\n iter_cb=iter_cb, scale_covar=scale_covar,\n fcn_kws=kwargs, **fit_kws)\n output.fit(data=data, weights=weights)\n output.components = self.components\n return output\n\n def __add__(self, other):\n return ComplexCompositeModel(self, other, operator.add)\n\n def __sub__(self, other):\n return ComplexCompositeModel(self, other, operator.sub)\n\n def __mul__(self, other):\n return ComplexCompositeModel(self, other, operator.mul)\n\n def __div__(self, other):\n return ComplexCompositeModel(self, other, operator.truediv)\n\n def __truediv__(self, other):\n return ComplexCompositeModel(self, other, operator.truediv)\n\n\nclass ComplexCompositeModel(ComplexModel):\n _names_collide = (\"\\nTwo models have parameters named '{clash}'. \"\n \"Use distinct names.\")\n _bad_arg = \"CompositeModel: argument {arg} is not a Model\"\n _bad_op = \"CompositeModel: operator {op} is not callable\"\n _known_ops = {operator.add: '+', operator.sub: '-',\n operator.mul: '*', operator.truediv: '/'}\n\n def __init__(self, left, right, op, **kws):\n if not isinstance(left, Model):\n raise ValueError(self._bad_arg.format(arg=left))\n if not isinstance(right, Model):\n raise ValueError(self._bad_arg.format(arg=right))\n if not callable(op):\n raise ValueError(self._bad_op.format(op=op))\n\n self.left = left\n self.right = right\n self.op = op\n\n name_collisions = set(left.param_names) & set(right.param_names)\n if len(name_collisions) > 0:\n msg = ''\n for collision in name_collisions:\n msg += self._names_collide.format(clash=collision)\n raise NameError(msg)\n\n # we assume that all the sub-models have the same independent vars\n if 'independent_vars' not in kws:\n kws['independent_vars'] = self.left.independent_vars\n if 'missing' not in kws:\n kws['missing'] = self.left.missing\n\n def _tmp(*args, **kws): pass\n ComplexModel.__init__(self, _tmp, **kws)\n\n for side in (left, right):\n prefix = side.prefix\n for basename, hint in side.param_hints.items():\n self.param_hints[\"%s%s\" % (prefix, basename)] = hint\n\n def _parse_params(self):\n self._func_haskeywords = (self.left._func_haskeywords or\n self.right._func_haskeywords)\n self._func_allargs = (self.left._func_allargs +\n self.right._func_allargs)\n self.def_vals = deepcopy(self.right.def_vals)\n self.def_vals.update(self.left.def_vals)\n self.opts = deepcopy(self.right.opts)\n self.opts.update(self.left.opts)\n\n def _reprstring(self, long=False):\n return \"(%s %s %s)\" % (self.left._reprstring(long=long),\n self._known_ops.get(self.op, self.op),\n self.right._reprstring(long=long))\n\n def eval(self, params=None, **kwargs):\n return self.op(self.left.eval(params=params, **kwargs),\n self.right.eval(params=params, **kwargs))\n\n def eval_components(self, **kwargs):\n \"\"\"return ordered dict of name, results for each component\"\"\"\n out = OrderedDict(self.left.eval_components(**kwargs))\n out.update(self.right.eval_components(**kwargs))\n return out\n\n @property\n def param_names(self):\n return self.left.param_names + self.right.param_names\n\n @property\n def components(self):\n \"\"\"return components for composite model\"\"\"\n return self.left.components + self.right.components\n\n def _make_all_args(self, params=None, **kwargs):\n \"\"\"generate **all** function args for all functions\"\"\"\n out = self.right._make_all_args(params=params, **kwargs)\n out.update(self.left._make_all_args(params=params, **kwargs))\n return out\n"
] | [
[
"matplotlib.pyplot.gca",
"numpy.asarray",
"numpy.asfarray",
"numpy.isscalar",
"matplotlib.pyplot.GridSpec",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ctrasd/Panda-2020-gold-medal-solution | [
"cab252b149f05d29e7321911bec3d8f314bd16b5"
] | [
"train_efficient_reg.py"
] | [
"from __future__ import print_function, absolute_import\r\nimport os\r\nimport sys\r\nimport time\r\nimport datetime\r\nimport argparse\r\nimport os.path as osp\r\nimport numpy as np\r\nimport random\r\nfrom PIL import Image\r\nimport tqdm\r\nimport cv2\r\n\r\nimport torchvision as tv\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.backends.cudnn as cudnn\r\nfrom sklearn.metrics import f1_score\r\nfrom torch.utils.data import DataLoader\r\nfrom torch.autograd import Variable\r\nfrom torch.optim import lr_scheduler\r\nfrom tqdm import tqdm\r\nfrom torch.utils.data import Dataset\r\nimport torchvision.transforms as transforms\r\nfrom tensorboardX import SummaryWriter\r\nfrom efficientnet_pytorch import EfficientNet\r\nfrom warmup_scheduler import GradualWarmupScheduler\r\nfrom evaluation import *\r\nfrom models.inception_v4 import *\r\n#from models.resnet import *\r\nfrom models.densenet import *\r\nparser = argparse.ArgumentParser(description='Don\\'t worry, be happy')\r\nparser.add_argument('--train-batch', default=32, type=int,\r\n help=\"train batch size\")\r\nparser.add_argument('--test-batch', default=1, type=int,\r\n help=\"test batch size\")\r\nparser.add_argument('--gpu-devices', default='0', type=str, help='gpu device ids for CUDA_VISIBLE_DEVICES')\r\nparser.add_argument('--fold', default='4', choices=['0','1','2','3','4'])\r\nparser.add_argument('--zoom', default='no', choices=['yes','no'])\r\nparser.add_argument('--max-epoch', default=50, type=int,\r\n help=\"maximum epochs to run\")\r\nparser.add_argument('--start-epoch', default=0, type=int,\r\n help=\"manual epoch number (useful on restarts)\")\r\nparser.add_argument('--save-dir', type=str, default='/data/ctr/tangwang_new/save_dir/')\r\nparser.add_argument('--model', default='dense',choices=['dense','inception_resnet','efficientnet-b0','efficientnet-b1','efficientnet-b2',\r\n 'efficientnet-b3','efficientnet-b4','efficientnet-b5','efficientnet-b6','efficientnet-b7','efficientnet-b8'])\r\n\r\nparser.add_argument('--seed', type=int, default=1, help=\"manual seed\")\r\nparser.add_argument('--lr', '--learning-rate', default=1e-5, type=float,\r\n help=\"initial learning rate, use 0.0001 for rnn, use 0.0003 for pooling and attention\")\r\nparser.add_argument('--stepsize', default=100, type=int,\r\n help=\"stepsize to decay learning rate (>0 means this is enabled)\")\r\nparser.add_argument('--gamma', default=0.3, type=float,\r\n help=\"learning rate decay\")\r\nparser.add_argument('--weight-decay', default=5e-04, type=float,\r\n help=\"weight decay (default: 5e-04)\")\r\nparser.add_argument('--margin', type=float, default=0.3, help=\"margin for triplet loss\")\r\nparser.add_argument('--momentum', type=float, default=0.9, metavar='M',help='SGD momentum (default: 0.9)')\r\nargs = parser.parse_args()\r\ndef cv_imread(file_path):\r\n cv_img=cv2.imdecode(np.fromfile(file_path,dtype=np.uint8),-1)\r\n return cv_img \r\n\r\nfrom torch.nn.parameter import Parameter\r\n\r\ndef gem(x, p=3, eps=1e-6):\r\n return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1))).pow(1./p)\r\nclass GeM(nn.Module):\r\n def __init__(self, p=3, eps=1e-6):\r\n super(GeM,self).__init__()\r\n self.p = Parameter(torch.ones(1)*p)\r\n self.eps = eps\r\n def forward(self, x):\r\n return gem(x, p=self.p, eps=self.eps) \r\n def __repr__(self):\r\n return self.__class__.__name__ + '(' + 'p=' + '{:.4f}'.format(self.p.data.tolist()[0]) + ', ' + 'eps=' + str(self.eps) + ')'\r\n\r\nimage_size=256\r\ncnt_t=0\r\n\r\ndef get_aug_img(this_img):\r\n se=random.random()\r\n if se<=0.5:\r\n this_img=cv2.flip(this_img,0)\r\n se2=random.random()\r\n if se2<=0.5:\r\n this_img=cv2.flip(this_img,1)\r\n se3=random.random()\r\n if se3<=0.5:\r\n this_img=np.transpose(this_img,(1,0,2))\r\n return this_img\r\n\r\nclass panda_dataset_random(Dataset):\r\n \"\"\"docstring for data\"\"\"\r\n def __init__(self, txt_path,transform=None):\r\n fh = open(txt_path, 'r')\r\n imgs = []\r\n for line in fh:\r\n line = line.rstrip()\r\n words = line.split()\r\n imgs.append((words[0], int(words[2])))\r\n self.imgs = imgs \r\n self.transform = transform\r\n def __getitem__(self, index):\r\n fn, label1 = self.imgs[index]\r\n label=torch.tensor([0]*6).float()\r\n label[0:label1+1]=1\r\n label=label*0.9+0.05\r\n label[0]=1\r\n se=random.random()\r\n if se<=0.5:\r\n img=cv2.imread('./train_images_png/'+fn+'.png')\r\n img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\r\n #img = Image.open('./train_images_png/'+fn+'.png').convert('RGB') \r\n else:\r\n img=cv2.imread('./train_images_png/'+fn+'_aug.png')\r\n img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\r\n #img = Image.open('./train_images_png/'+fn+'_aug.png').convert('RGB') \r\n flip_idx=np.random.choice(list(range(36)), 4, replace=False)\r\n for id in flip_idx:\r\n x=id%6\r\n y=id//6\r\n h1 = x * image_size\r\n w1 = y * image_size\r\n flip_img=images[h1:h1+image_size, w1:w1+image_size]\r\n flip_img=get_aug_img(flip_img)\r\n images[h1:h1+image_size, w1:w1+image_size]=flip_img\r\n #img= Image.open(fn).convert('L')\r\n #img = Image.open(fn).convert('RGB')\r\n #img=cv2.imread(fn)\r\n #img=load_ben_color(img)\r\n #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n #img = cv2.resize(img, (512, 512))\r\n #img=load_ben_yuan(img)\r\n\r\n #cv2.imwrite('./save_test/test_'+str(index)+'.jpg',img)\r\n img = Image.fromarray(img)\r\n if self.transform is not None:\r\n img = self.transform(img)\r\n #img = img.unsqueeze(0) \r\n \r\n return fn,img, label,label1\r\n def __len__(self):\r\n return len(self.imgs)\r\n\r\n\r\nclass panda_dataset(Dataset):\r\n \"\"\"docstring for data\"\"\"\r\n def __init__(self, txt_path,transform=None):\r\n fh = open(txt_path, 'r')\r\n imgs = []\r\n for line in fh:\r\n line = line.rstrip()\r\n words = line.split()\r\n imgs.append((words[0], int(words[2])))\r\n self.imgs = imgs \r\n self.transform = transform\r\n def __getitem__(self, index):\r\n fn, label1 = self.imgs[index]\r\n label=torch.tensor([0]*6).float()\r\n label[0:label1+1]=1\r\n label=label*0.9+0.05\r\n label[0]=1\r\n se=random.random()\r\n if se<=0.5:\r\n img = Image.open('./train_images_png/'+fn+'.png').convert('RGB') \r\n else:\r\n img = Image.open('./train_images_png/'+fn+'_aug.png').convert('RGB') \r\n if self.transform is not None:\r\n img = self.transform(img)\r\n #img = img.unsqueeze(0) \r\n \r\n return fn,img, label,label1\r\n def __len__(self):\r\n return len(self.imgs)\r\nclass panda_dataset_test(Dataset):\r\n \"\"\"docstring for data\"\"\"\r\n def __init__(self, txt_path,transform=None):\r\n fh = open(txt_path, 'r')\r\n imgs = []\r\n for line in fh:\r\n line = line.rstrip()\r\n words = line.split()\r\n imgs.append((words[0], int(words[2])))\r\n self.imgs = imgs \r\n self.transform = transform\r\n def __getitem__(self, index):\r\n fn, label1 = self.imgs[index]\r\n label=torch.tensor([0]*6).float()\r\n label[0:label1+1]=1\r\n label=label*0.9+0.05\r\n label[0]=1\r\n img = Image.open('./train_images_png/'+fn+'.png').convert('RGB') \r\n #img= Image.open(fn).convert('L')\r\n #img = Image.open(fn).convert('RGB')\r\n #img=cv2.imread(fn)\r\n #img=load_ben_color(img)\r\n #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n #img = cv2.resize(img, (512, 512))\r\n #img=load_ben_yuan(img)\r\n\r\n #cv2.imwrite('./save_test/test_'+str(index)+'.jpg',img)\r\n #img = Image.fromarray(img)\r\n if self.transform is not None:\r\n img = self.transform(img)\r\n #img = img.unsqueeze(0) \r\n \r\n return fn,img, label,label1\r\n def __len__(self):\r\n return len(self.imgs)\r\n\r\ninit_coef = [0.5, 1.5, 2.5, 3.5, 4.5]\r\ndef get_preds(arr,coef=init_coef):\r\n X_p = np.copy(arr.detach().cpu())\r\n for i, pred in enumerate(X_p):\r\n if pred < coef[0]:\r\n X_p[i] = 0\r\n elif pred >= coef[0] and pred < coef[1]:\r\n X_p[i] = 1\r\n elif pred >= coef[1] and pred < coef[2]:\r\n X_p[i] = 2\r\n elif pred >= coef[2] and pred < coef[3]:\r\n X_p[i] = 3\r\n elif pred >= coef[3] and pred < coef[4]:\r\n X_p[i] = 4\r\n else:\r\n X_p[i] = 5\r\n return X_p\r\n\r\nwarmup_factor = 10\r\nwarmup_epo=1\r\nif __name__ == '__main__':\r\n max_loss=100000000\r\n torch.set_num_threads(5) \r\n runId = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S-')\r\n args.save_dir = os.path.join(args.save_dir, runId)\r\n writer=SummaryWriter(comment='_36_256_256_'+args.model+'_00001_adam_l1_fold_aug_reg_'+args.fold+'_')\r\n #if not os.path.exists(args.save_dir):\r\n #os.mkdir(args.save_dir)\r\n print(\"==========\\nArgs:{}\\n==========\".format(args))\r\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices\r\n use_gpu = torch.cuda.is_available()\r\n if use_gpu:\r\n print(\"Currently using GPU {}\".format(args.gpu_devices))\r\n cudnn.benchmark = True\r\n #torch.cuda.manual_seed_all(args.seed)\r\n else:\r\n print(\"Currently using CPU (GPU is highly recommended)\")\r\n\r\n\r\n transform_dense = transforms.Compose([\r\n #transforms.CenterCrop(512)\r\n #transforms.RandomResizedCrop(512),\r\n transforms.RandomRotation(360),\r\n transforms.ColorJitter(brightness=0.1,contrast=0.2,saturation=0.1,hue=0.1),\r\n transforms.ToTensor(), # 转为Tensor\r\n #transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\r\n #transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), # 归一化\r\n ]) \r\n transform_eff = transforms.Compose([\r\n #transforms.CenterCrop(512)\r\n #transforms.RandomResizedCrop(456),\r\n transforms.RandomHorizontalFlip(p=0.5),\r\n transforms.RandomVerticalFlip(p=0.5),\r\n #transforms.RandomRotation(360),\r\n transforms.ColorJitter(brightness=0.1,contrast=0.05,saturation=0.05,hue=0.05),\r\n #transforms.Resize((528, 528)),\r\n transforms.ToTensor(), # 转为Tensor\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\r\n ])\r\n transform_test = transforms.Compose([\r\n #transforms.CenterCrop(512)\r\n #transforms.RandomResizedCrop(512),\r\n #transforms.ColorJitter(brightness=20,contrast=0.2,saturation=20,hue=0.1),\r\n #transforms.Resize((512, 512)),\r\n transforms.ToTensor(), # 转为Tensor\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\r\n ])\r\n if args.model=='dense':\r\n transform2=transform_dense\r\n else:\r\n transform2=transform_eff\r\n\r\n train_data=panda_dataset('./split/train_fold_'+args.fold+'.txt',transform2)\r\n test_data=panda_dataset_test('./split/fold_'+args.fold+'.txt',transform_test)\r\n if args.model=='dense':\r\n net=Baseline_single(num_classes=6)\r\n #net.load_state_dict(torch.load('./save_models/model_yuan456_'+args.model+'_00001_adam_combine_'+args.dataset+'_bce_maxest.pkl'))\r\n else:\r\n '''\r\n net = EfficientNet.from_pretrained(args.model)\r\n #net = EfficientNet.from_name('efficientnet-b0')\r\n feature = net._fc.in_features\r\n net._fc = nn.Linear(in_features=feature,out_features=1,bias=True)\r\n net._avg_pooling=GeM(p=2.5)\r\n '''\r\n net=regnet(num_classes=1,base_model=args.model)\r\n if use_gpu:\r\n net= nn.DataParallel(net).cuda()\r\n #net.load_state_dict(torch.load('./save_models/model_36_256_256_efficientnet-b0_0.0003_adam_3_l1_newest_aug_gem_reg.pkl'))\r\n #criterion = nn.CrossEntropyLoss()\r\n criterion = nn.SmoothL1Loss()\r\n #criterion=nn.BCEWithLogitsLoss()\r\n #criterion = nn.MSELoss()\r\n optimizer = optim.Adam(net.parameters(),lr=args.lr,betas=(0.9,0.999), eps=1e-08, weight_decay =0.0)\r\n #scheduler=torch.optim.lr_scheduler.MultiStepLR(optimizer,[10,25,35], gamma=0.3, last_epoch=-1)\r\n scheduler =torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', factor=0.8, patience=5, verbose=True, eps=1e-6)\r\n #print(epoch, 'lr={:.6f}'.format(scheduler.get_lr()[0]))\r\n #scheduler_cosine = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.max_epoch)\r\n #scheduler = GradualWarmupScheduler(optimizer, multiplier=warmup_factor, total_epoch=warmup_epo, after_scheduler=scheduler_cosine) \r\n start_epoch = args.start_epoch\r\n dataloader = DataLoader(\r\n train_data,batch_size= args.train_batch, shuffle = True, num_workers= 1)\r\n dataloader_test=DataLoader(\r\n test_data,batch_size= args.test_batch, shuffle = False, num_workers= 1)\r\n optimizer.zero_grad()\r\n idx=0\r\n max_correct=0\r\n for epoch in range(start_epoch,args.max_epoch):\r\n print('lr:',optimizer.state_dict()['param_groups'][0]['lr'])\r\n #print(epoch, 'lr={:.7f}'.format(scheduler.get_lr()[0]))\r\n #print(\"==> Epoch {}/{}\".format(epoch+1, args.max_epoch))\r\n #if epoch==1:\r\n # for param_group in optimizer.param_groups:\r\n # param_group['lr']=param_group['lr']*0.1\r\n # print('lr',param_group['lr'])\r\n total_loss=0\r\n total_correct=0\r\n total=0\r\n net.train()\r\n feature_show_flag=0\r\n for id,item in tqdm(enumerate(dataloader)):\r\n #print(id)\r\n idx=idx+1\r\n fn,data,label,label_num=item\r\n #print(fn,label_num)\r\n if use_gpu:\r\n data=data.cuda()\r\n label=label.float().cuda()\r\n label_num=label_num.long().cuda()\r\n label_num_float=label_num.float().cuda().reshape(label_num.shape[0],1)\r\n else:\r\n #label=label.float()\r\n label_num=label_num.long()\r\n print(data.size())\r\n optimizer.zero_grad()\r\n #out,aux=net(data)\r\n out=net(data)\r\n #out=out.reshape(out.shape[0])\r\n #print(out.shape)\r\n #print(feat_map[0,9,:,:].shape)\r\n #out=net(data)\r\n #print(out,type(out))\r\n #print(type(out))\r\n #print('')\r\n #print(out)\r\n #print(label_num_float)\r\n\r\n #_, predicted = torch.max(out, 1)\r\n predicted=get_preds(out).reshape(out.shape[0])\r\n total += len(label_num)\r\n #print('')\r\n #print(predicted)\r\n #print(label_num.cpu().numpy())\r\n correct = (predicted == label_num.cpu().numpy()).sum()\r\n total_correct=total_correct+correct\r\n loss=criterion(out,label_num_float)\r\n total_loss=total_loss+loss.item()\r\n #print(torch.sigmoid(out),'\\n',label)\r\n loss.backward()\r\n optimizer.step()\r\n print('id:%d loss:%f correct:%d'%(id,loss.item(),correct))\r\n writer.add_scalar('scalar/running_loss',loss.item(), idx)\r\n print(\"loss:%f\"%(total_loss))\r\n writer.add_scalar('scalar/running_correct',total_correct.astype('float32')/total, epoch)\r\n #f = open(args.save_dir+'/train_log.txt','a')\r\n #f.writelines([str(id),str(total_loss)])\r\n #f.close()\r\n feature_show_flag=0\r\n if epoch%1==0:\r\n ratera=np.array([])\r\n raterb=np.array([])\r\n with torch.no_grad():\r\n net.eval()\r\n total=0\r\n total_loss=0\r\n correct=0\r\n for id,item in tqdm(enumerate(dataloader_test)):\r\n fn,data,label,label_num=item\r\n if use_gpu:\r\n data=data.cuda()\r\n label=label.float().cuda()\r\n label_num=label_num.long().cuda()\r\n label_num_float=label_num.float().cuda().reshape(label_num.shape[0],1)\r\n else:\r\n label=label.float()\r\n label_num=label_num.long()\r\n #print(data.size())\r\n \r\n #out,aux=net(data)\r\n out=net(data)\r\n #out=out.reshape(out.shape[0])\r\n '''\r\n if feature_show_flag<=5:\r\n for i in range(3):\r\n writer.add_image(fn[0].split('/')[-1]+'_'+str(epoch)+'_'+str(label_num),feat_map[0,i,:,:].reshape((-1,feat_map[0,i,:,:].shape[0],feat_map[0,i,:,:].shape[1])))\r\n feature_show_flag+=1 \r\n '''\r\n #out=net(data)\r\n #_, predicted = torch.max(out, 1)\r\n predicted=get_preds(out).reshape(out.shape[0])\r\n total += label.size(0)\r\n correct += (predicted == label_num.cpu().numpy()).sum()\r\n loss1=criterion(out, label_num_float)\r\n total_loss+=loss1.item()\r\n #predicted=predicted.cpu().numpy()\r\n #label_num=predicted.cpu().numpy()\r\n ratera=np.hstack((ratera,predicted))\r\n raterb=np.hstack((raterb,label_num.cpu().numpy()))\r\n print(ratera,raterb)\r\n kappa=quadratic_weighted_kappa(ratera,raterb)\r\n #np.save('./save_predict/'+str(epoch)+'.npy',ratera)\r\n #np.save('./save_predict/gt.npy',raterb)\r\n print('测试准确率为: %.2f %% kappa为: %.4f'%((100*correct.astype('float32')/total),kappa))\r\n writer.add_scalar('scalar/test_correct',correct.astype('float32')/total, epoch)\r\n writer.add_scalar('scalar/test_loss',total_loss/total, epoch)\r\n writer.add_scalar('scalar/test_kappa',kappa, epoch)\r\n #scheduler_cosine.step()\r\n scheduler.step(total_loss/total)\r\n state_dict = net.state_dict()\r\n torch.save(state_dict, './save_models/model_36_256_256'+'_'+args.model+'_'+str(args.lr)+'_adam_'+args.fold+'_l1_newest_aug_gem_reg_ji.pkl')\r\n if kappa>max_correct:\r\n max_correct=kappa\r\n print('saved kappa:',max_correct)\r\n state_dict = net.state_dict()\r\n torch.save(state_dict, './save_models/model_36_256_256'+'_'+args.model+'_'+str(args.lr)+'_adam_'+args.fold+'_l1_maxest_aug_gem_reg_ji.pkl')\r\n if total_loss/total<max_loss:\r\n max_loss=total_loss/total\r\n print('saved min_loss:',max_loss)\r\n state_dict = net.state_dict()\r\n torch.save(state_dict, './save_models/model_36_256_256'+'_'+args.model+'_'+str(args.lr)+'_adam_'+args.fold+'_l1_min_loss_aug_gem_reg_ji.pkl')\r\n"
] | [
[
"torch.nn.SmoothL1Loss",
"numpy.hstack",
"numpy.fromfile",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.ones",
"torch.utils.data.DataLoader",
"torch.tensor",
"torch.set_num_threads",
"torch.no_grad",
"torch.cuda.is_available",
"numpy.transpose",
"torch.nn.DataParallel",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rhayes777/PyAutoF | [
"87f56419348833b285b00da1a524e329588e0b01"
] | [
"test_autofit/graphical/gaussian/conftest.py"
] | [
"import numpy as np\nimport pytest\n\nimport autofit as af\nfrom test_autofit.graphical.gaussian.model import Gaussian, make_data\n\n\[email protected](\n name=\"x\"\n)\ndef make_x():\n return np.arange(100)\n\n\[email protected](\n name=\"y\"\n)\ndef make_y(x):\n return make_data(Gaussian(centre=50.0, intensity=25.0, sigma=10.0), x)\n\n\[email protected](\n name=\"prior_model\"\n)\ndef make_prior_model():\n return af.PriorModel(\n Gaussian,\n centre=af.GaussianPrior(mean=50, sigma=20),\n intensity=af.GaussianPrior(mean=25, sigma=10),\n sigma=af.GaussianPrior(mean=10, sigma=10),\n )\n"
] | [
[
"numpy.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HrBlack/NMT_Pytorch | [
"2652958fbd4cf382ae54f6ce57e9b8ebcd9ace92"
] | [
"translate.py"
] | [
"import os\nimport logging\nimport argparse\nimport numpy as np\nfrom tqdm import tqdm\n\nimport torch\nfrom torch.serialization import default_restore_location\n\nfrom seq2seq import models, utils\nfrom seq2seq.data.dictionary import Dictionary\nfrom seq2seq.data.dataset import Seq2SeqDataset, BatchSampler\n\n\ndef get_args():\n \"\"\" Defines generation-specific hyper-parameters. \"\"\"\n parser = argparse.ArgumentParser('Sequence to Sequence Model')\n parser.add_argument('--cuda', default=False, help='Use a GPU')\n parser.add_argument('--seed', default=42, type=int, help='pseudo random number generator seed')\n\n # Add data arguments\n parser.add_argument('--data', default='data-bin', help='path to data directory')\n parser.add_argument('--checkpoint-path', default='checkpoints/checkpoint_best.pt', help='path to the model file')\n parser.add_argument('--batch-size', default=None, type=int, help='maximum number of sentences in a batch')\n parser.add_argument('--output', default='model_translations.txt', type=str,\n help='path to the output file destination')\n parser.add_argument('--max-len', default=25, type=int, help='maximum length of generated sequence')\n\n return parser.parse_args()\n\n\ndef main(args):\n \"\"\" Main translation function' \"\"\"\n # Load arguments from checkpoint\n torch.manual_seed(args.seed)\n state_dict = torch.load(args.checkpoint_path, map_location=lambda s, l: default_restore_location(s, 'cpu'))\n args = argparse.Namespace(**{**vars(args), **vars(state_dict['args'])})\n utils.init_logging(args)\n\n # Load dictionaries\n src_dict = Dictionary.load(os.path.join(args.data, 'dict.{:s}'.format(args.source_lang)))\n logging.info('Loaded a source dictionary ({:s}) with {:d} words'.format(args.source_lang, len(src_dict)))\n tgt_dict = Dictionary.load(os.path.join(args.data, 'dict.{:s}'.format(args.target_lang)))\n logging.info('Loaded a target dictionary ({:s}) with {:d} words'.format(args.target_lang, len(tgt_dict)))\n\n # Load dataset\n test_dataset = Seq2SeqDataset(\n src_file=os.path.join(args.data, 'test.{:s}'.format(args.source_lang)),\n tgt_file=os.path.join(args.data, 'test.{:s}'.format(args.target_lang)),\n src_dict=src_dict, tgt_dict=tgt_dict)\n\n test_loader = torch.utils.data.DataLoader(test_dataset, num_workers=1, collate_fn=test_dataset.collater,\n batch_sampler=BatchSampler(test_dataset, 9999999,\n args.batch_size, 1, 0, shuffle=False,\n seed=args.seed))\n # Build model and criterion\n model = models.build_model(args, src_dict, tgt_dict)\n if args.cuda:\n model = model.cuda()\n model.eval()\n model.load_state_dict(state_dict['model'])\n logging.info('Loaded a model from checkpoint {:s}'.format(args.checkpoint_path))\n progress_bar = tqdm(test_loader, desc='| Generation', leave=False)\n\n # Iterate over the test set\n all_hyps = {}\n for i, sample in enumerate(progress_bar):\n with torch.no_grad():\n # Compute the encoder output\n encoder_out = model.encoder(sample['src_tokens'], sample['src_lengths'])\n go_slice = \\\n torch.ones(sample['src_tokens'].shape[0], 1).fill_(tgt_dict.eos_idx).type_as(sample['src_tokens'])\n prev_words = go_slice\n next_words = None\n\n for _ in range(args.max_len):\n with torch.no_grad():\n # Compute the decoder output by repeatedly feeding it the decoded sentence prefix\n decoder_out, _ = model.decoder(prev_words, encoder_out)\n # Suppress <UNK>s\n _, next_candidates = torch.topk(decoder_out, 2, dim=-1)\n best_candidates = next_candidates[:, :, 0]\n backoff_candidates = next_candidates[:, :, 1]\n next_words = torch.where(best_candidates == tgt_dict.unk_idx, backoff_candidates, best_candidates)\n prev_words = torch.cat([go_slice, next_words], dim=1)\n\n # Segment into sentences\n decoded_batch = next_words.numpy()\n output_sentences = [decoded_batch[row, :] for row in range(decoded_batch.shape[0])]\n assert(len(output_sentences) == len(sample['id'].data))\n\n # Remove padding\n temp = list()\n for sent in output_sentences:\n first_eos = np.where(sent == tgt_dict.eos_idx)[0]\n if len(first_eos) > 0:\n temp.append(sent[:first_eos[0]])\n else:\n temp.append([])\n output_sentences = temp\n\n # Convert arrays of indices into strings of words\n output_sentences = [tgt_dict.string(sent) for sent in output_sentences]\n\n # Save translations\n assert(len(output_sentences) == len(sample['id'].data))\n for ii, sent in enumerate(output_sentences):\n all_hyps[int(sample['id'].data[ii])] = sent\n\n # Write to file\n if args.output is not None:\n with open(args.output, 'w') as out_file:\n for sent_id in range(len(all_hyps.keys())):\n out_file.write(all_hyps[sent_id] + '\\n')\n\n\nif __name__ == '__main__':\n args = get_args()\n main(args)\n"
] | [
[
"torch.ones",
"torch.cat",
"torch.manual_seed",
"torch.serialization.default_restore_location",
"torch.no_grad",
"torch.where",
"torch.topk",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kenchan0226/control-sum-cmdp | [
"5181e8e0c9bf6bef48f66457e06d3f398f4a428a"
] | [
"predict.py"
] | [
"import torch\nimport config\nimport argparse\nimport pickle as pkl\nfrom utils import io\nfrom utils.io import Many2ManyDatasetWithAttributes\nfrom torch.utils.data import DataLoader\nimport os\nfrom os.path import join\nfrom model.seq2seq import Seq2SeqModel\nfrom model.seq2seq_style_input import Seq2SeqModelStyleInput\nfrom model.seq2seq_exact_length_input import Seq2SeqModelExactLenInput\nfrom model.diversity_attn_seq2seq import Seq2SeqDiversityAttnModel\nfrom sequence_generator import SequenceGenerator\nfrom tqdm import tqdm\nimport json\nfrom utils.string_helper import prediction_to_sentence\nimport nltk\nimport rreplace\nfrom types import SimpleNamespace\n\n\ndef process_opt(opt):\n if opt.seed > 0:\n torch.manual_seed(opt.seed)\n\n # make directory\n if not os.path.exists(opt.pred_path):\n os.makedirs(opt.pred_path)\n os.makedirs(join(opt.pred_path, 'output'))\n else:\n print(\"Folder exists!\")\n raise ValueError\n\n # dump configuration\n torch.save(opt, open(join(opt.pred_path, 'decode.config'), 'wb'))\n json.dump(vars(opt), open(join(opt.pred_path, 'log.json'), 'w'))\n\n return opt\n\n\ndef init_pretrained_model(pretrained_model_path, opt):\n if opt.model_type == 'seq2seq':\n assert not opt.multi_style\n model = Seq2SeqModel(opt)\n elif opt.model_type == 'seq2seq_style_input':\n assert opt.multi_style\n model = Seq2SeqModelStyleInput(opt)\n elif opt.model_type == 'seq2seq_exact_length_input':\n model = Seq2SeqModelExactLenInput(opt)\n elif opt.model_type == 'seq2seq_diversity_attn':\n model = Seq2SeqDiversityAttnModel(opt)\n else:\n raise ValueError\n\n model.load_state_dict(torch.load(pretrained_model_path))\n model.to(opt.device)\n model.eval()\n return model\n\n\ndef preprocess_beam_search_result(beam_search_result, idx2word, vocab_size, oov_lists, eos_idx, unk_idx, replace_unk, src_str_list):\n batch_size = beam_search_result['batch_size']\n predictions = beam_search_result['predictions']\n scores = beam_search_result['scores']\n attention = beam_search_result['attention']\n assert len(predictions) == batch_size\n pred_list = [] # a list of dict, with len = batch_size\n for pred_n_best, score_n_best, attn_n_best, oov, src_word_list in zip(predictions, scores, attention, oov_lists, src_str_list):\n # attn_n_best: list of tensor with size [trg_len, src_len], len=n_best\n pred_dict = {}\n sentences_n_best = []\n for pred, attn in zip(pred_n_best, attn_n_best):\n sentence = prediction_to_sentence(pred, idx2word, vocab_size, oov, eos_idx, unk_idx, replace_unk, src_word_list, attn)\n #sentence = [idx2word[int(idx.item())] if int(idx.item()) < vocab_size else oov[int(idx.item())-vocab_size] for idx in pred[:-1]]\n sentences_n_best.append(sentence)\n pred_dict['sentences'] = sentences_n_best # a list of list of word, with len [n_best, out_seq_len], does not include tbe final <EOS>\n pred_dict['scores'] = score_n_best # a list of zero dim tensor, with len [n_best]\n pred_dict['attention'] = attn_n_best # a list of FloatTensor[output sequence length, src_len], with len = [n_best]\n pred_list.append(pred_dict)\n return pred_list\n\n\ndef predict(test_data_loader, model, opt):\n generator = SequenceGenerator(model,\n bos_idx=io.BOS,\n eos_idx=io.EOS,\n pad_idx=io.PAD,\n beam_size=opt.beam_size,\n max_sequence_length=opt.pred_max_len,\n include_attn_dist=opt.include_attn_dist,\n length_penalty_factor=opt.length_penalty_factor,\n coverage_penalty_factor=opt.coverage_penalty_factor,\n length_penalty=opt.length_penalty,\n coverage_penalty=opt.coverage_penalty,\n cuda=opt.gpuid > -1,\n n_best=opt.n_best,\n block_ngram_repeat=opt.block_ngram_repeat,\n ignore_when_blocking=opt.ignore_when_blocking,\n len_idx=opt.word2idx[io.EXACT_LEN_WORD] if 2 in opt.control_modes else -1\n )\n\n num_exported_samples = 0\n with torch.no_grad():\n for batch in tqdm(test_data_loader):\n #src, src_lens, src_mask, src_oov, oov_lists, src_str_list, original_idx_list = batch\n \"\"\"\n src: a LongTensor containing the word indices of source sentences, [batch, src_seq_len], with oov words replaced by unk idx\n src_lens: a list containing the length of src sequences for each batch, with len=batch\n src_mask: a FloatTensor, [batch, src_seq_len]\n src_oov: a LongTensor containing the word indices of source sentences, [batch, src_seq_len], contains the index of oov words (used by copy)\n oov_lists: a list of oov words for each src, 2dlist\n \"\"\"\n src = batch['src_tensor']\n src_lens = batch['src_lens']\n src_mask = batch['src_mask']\n src_oov = batch['src_oov_tensor']\n oov_lists = batch['oov_lists']\n src_str_list = batch['src_list_tokenized']\n #original_idx_list = batch['original_indices']\n\n src = src.to(opt.device)\n src_mask = src_mask.to(opt.device)\n src_oov = src_oov.to(opt.device)\n\n \"\"\"\n for src_str in src_str_list:\n print(src_str[:10])\n print(src.detach().cpu().numpy()[:, :10])\n print(batch['trg_lens'])\n print(batch['len_bins'])\n print(batch['exact_lens'])\n exit()\n \"\"\"\n\n if opt.multi_style:\n style_label = batch['style_tensor']\n style_label = style_label.to(opt.device)\n\n if isinstance(model, Seq2SeqModel):\n beam_search_result = generator.beam_search(src, src_lens, src_oov, src_mask, oov_lists, opt.word2idx)\n elif isinstance(model, Seq2SeqModelStyleInput):\n beam_search_result = generator.beam_search_with_style(src, src_lens, src_oov, src_mask, oov_lists,\n opt.word2idx, style_label)\n elif isinstance(model, Seq2SeqModelExactLenInput):\n beam_search_result = generator.beam_search_with_exact_len(src, src_lens, src_oov, src_mask, oov_lists,\n opt.word2idx, batch['exact_lens'])\n elif isinstance(model, Seq2SeqDiversityAttnModel):\n query_tensor = batch['query_tensor'].to(opt.device)\n query_mask = batch['query_mask'].to(opt.device)\n query_lens = batch['query_lens']\n beam_search_result = generator.beam_search_diversity_attn(src, src_lens, query_tensor, query_lens, src_oov, src_mask, query_mask, oov_lists, opt.word2idx)\n pred_list = preprocess_beam_search_result(beam_search_result, opt.idx2word, opt.vocab_size, oov_lists, io.EOS, io.UNK, opt.replace_unk, src_str_list)\n # list of {\"sentences\": [], \"scores\": [], \"attention\": []}\n\n # recover the original order in the dataset\n #seq_pairs = sorted(zip(original_idx_list, src_str_list, pred_list, oov_lists), key=lambda p: p[0])\n #original_idx_list, src_str_list, pred_list, oov_lists = zip(*seq_pairs)\n\n # Process every src in the batch\n\n for src_str, pred, oov in zip(src_str_list, pred_list, oov_lists):\n # src_str: a list of words; trg_str: a list of keyphrases, each keyphrase is a list of words\n # pred_seq_list: a list of sequence objects, sorted by scores\n # oov: a list of oov words\n pred_str_list = pred['sentences'] # predicted sentences from a single src, a list of list of word, with len=[n_best, out_seq_len], does not include the final <EOS>\n pred_score_list = pred['scores']\n pred_attn_list = pred['attention'] # a list of FloatTensor[output sequence length, src_len], with len = [n_best]\n\n decode_out_str = ' '.join(pred_str_list[0])\n decode_out_sent_list = nltk.tokenize.sent_tokenize(decode_out_str)\n\n # output the predicted sentences to a file\n with open(join(opt.pred_path, 'output/{}.dec'.format(num_exported_samples)), 'w') as f:\n f.write(io.make_html_safe('\\n'.join(decode_out_sent_list)))\n num_exported_samples += 1\n\n\ndef main(opt):\n # load word2idx and idx2word\n model_dir_path = os.path.dirname(opt.pretrained_model)\n model_dir_path = rreplace.rreplace(model_dir_path, 'ckpt', '', 1)\n with open(join(model_dir_path, 'vocab.pkl'), 'rb') as f:\n word2idx = pkl.load(f)\n idx2word = {i: w for w, i in word2idx.items()}\n opt.word2idx = word2idx\n opt.idx2word = idx2word\n opt.vocab_size = len(word2idx)\n\n # load style label map\n if opt.multi_style:\n with open(join(model_dir_path, 'style_label_map.pkl'), 'rb') as f:\n style_label_map = pkl.load(f)\n else:\n style_label_map = None\n\n if opt.target_style != \"\":\n target_style_idx = style_label_map[opt.target_style]\n else:\n target_style_idx = -1\n\n # init the pretrained model\n #old_opt = torch.load(join(model_dir_path, \"initial.config\"))\n old_opt_dict = json.load(open(join(model_dir_path, \"initial.json\")))\n old_opt = SimpleNamespace(**old_opt_dict)\n old_opt.word2idx = word2idx\n old_opt.idx2word = idx2word\n old_opt.device = opt.device\n opt.control_modes = old_opt.control_modes\n\n if len(opt.control_modes) > 0:\n assert opt.with_ground_truth_input or len(opt.desired_target_numbers) == len(opt.control_modes)\n assert opt.multi_style == old_opt.multi_style\n model = init_pretrained_model(opt.pretrained_model, old_opt)\n\n coll_fn_customized = io.coll_fn_with_attribute(word2idx=word2idx, style_label_map=style_label_map,\n with_style=opt.with_groundtruth_style,\n target_style_idx=target_style_idx, src_max_len=opt.src_max_len,\n trg_max_len=-1,\n control_modes=opt.control_modes, with_ground_truth=opt.with_ground_truth_input,\n desired_target_numbers=opt.desired_target_numbers,\n is_multiple_ref=opt.multiple_reference)\n\n test_loader = DataLoader(Many2ManyDatasetWithAttributes(opt.split, opt.data, opt.control_modes),\n collate_fn=coll_fn_customized,\n num_workers=opt.batch_workers,\n batch_size=opt.batch_size, pin_memory=True, shuffle=False)\n\n # Print out predict path\n print(\"Prediction path: %s\" % opt.pred_path)\n\n # output the summaries to opt.pred_path/output\n predict(test_loader, model, opt)\n\n\nif __name__ == '__main__':\n # load settings for training\n parser = argparse.ArgumentParser(\n description='predict.py',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n config.predict_opts(parser)\n opt = parser.parse_args()\n\n opt = process_opt(opt)\n\n if torch.cuda.is_available():\n if not opt.gpuid:\n opt.gpuid = 0\n opt.device = torch.device(\"cuda:%d\" % opt.gpuid)\n else:\n opt.device = torch.device(\"cpu\")\n opt.gpuid = -1\n print(\"CUDA is not available, fall back to CPU.\")\n\n assert not (opt.with_ground_truth_input and len(opt.desired_target_numbers) > 0)\n assert not (opt.with_groundtruth_style and opt.target_style != \"\")\n\n main(opt)\n\n"
] | [
[
"torch.load",
"torch.manual_seed",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
denricoNBHS/stem | [
"06a8e0cc2064a9ffa9d2b85c969e6b224f0e90d1"
] | [
"projects/ascii/to_ascii.py"
] | [
"from PIL import Image\nimport numpy as np\n\ndef to_ascii(infile, width=80, outfile=None, font_ratio=.43):\n \n # ASCII character gradient from light to dark\n gradient = \" .:-=+*#%@\"\n\n # open the image\n img = Image.open(infile)\n \n # determine aspect ratio of image\n aspect = img.width/img.height\n \n # resize image to given width at scale \n # (rounding height to integer number of pixels and correcting for font dimensions)\n # then convert to B&W and reduce color depth\n img = img.resize((width, int(font_ratio * width/aspect))).convert('L').quantize(colors=10)\n \n # convert to array\n img = np.array(img)\n \n # replace each pixel value with corresponding value from gradient\n # use .join() to store as string\n img_str = '\\n'.join([''.join([gradient[p] for p in row]) for row in img])\n \n # if a target file is specified, write the string to that file\n # otherwise return the string\n \n if outfile:\n with open(outfile, 'w') as f:\n f.write(img_str)\n else:\n return img_str"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Microsoft/fairlearn | [
"594f42ba4e30c40ef1a61e739686c2d401a03bfa"
] | [
"test/unit/metrics/test_create_group_metric_set.py"
] | [
"# Copyright (c) Microsoft Corporation and Fairlearn contributors.\n# Licensed under the MIT License.\n\nimport json\nfrom test.unit.input_convertors import conversions_for_1d\n\nimport pytest\nimport sklearn.metrics as skm\n\nfrom fairlearn.metrics import MetricFrame\nfrom fairlearn.metrics._group_metric_set import (\n _create_group_metric_set,\n _process_predictions,\n _process_sensitive_features,\n)\n\nfrom .sample_loader import load_sample_dashboard\n\n_BC_1P_1F = \"bc-1p-1f.json\"\n_BC_2P_3F = \"bc-2p-3f.json\"\n\n\ndef validate_dashboard_dictionary(dashboard_dict):\n \"\"\"Ensure dictionary is a valid Dashboard.\"\"\"\n schema_type = dashboard_dict[\"schemaType\"]\n assert schema_type == \"dashboardDictionary\"\n schema_version = dashboard_dict[\"schemaVersion\"]\n # Will want to update the following prior to release\n assert schema_version == 0\n\n pred_type = dashboard_dict[\"predictionType\"]\n assert pred_type in {\"binaryClassification\", \"regression\", \"probability\"}\n len_y_true = len(dashboard_dict[\"trueY\"])\n num_y_pred = len(dashboard_dict[\"predictedY\"])\n for y_pred in dashboard_dict[\"predictedY\"]:\n assert len(y_pred) == len_y_true\n\n len_model_names = len(dashboard_dict[\"modelNames\"])\n assert len_model_names == num_y_pred\n\n num_sf = len(dashboard_dict[\"precomputedFeatureBins\"])\n for sf in dashboard_dict[\"precomputedFeatureBins\"]:\n sf_vector = sf[\"binVector\"]\n assert len(sf_vector) == len_y_true\n for val in sf_vector:\n assert isinstance(val, int)\n sf_classes = sf[\"binLabels\"]\n assert len(sf_classes) == 1 + max(sf_vector)\n\n expected_keys = sorted(list(dashboard_dict[\"precomputedMetrics\"][0][0].keys()))\n assert len(dashboard_dict[\"precomputedMetrics\"]) == num_sf\n for metrics_arr in dashboard_dict[\"precomputedMetrics\"]:\n assert len(metrics_arr) == num_y_pred\n for m in metrics_arr:\n keys = sorted(list(m.keys()))\n assert keys == expected_keys\n\n\nclass TestProcessSensitiveFeatures:\n @pytest.mark.parametrize(\"transform_feature\", conversions_for_1d)\n def test_smoke(self, transform_feature):\n sf_name = \"My SF\"\n sf_vals = transform_feature([1, 3, 3, 1])\n\n sf = {sf_name: sf_vals}\n result = _process_sensitive_features(sf)\n assert isinstance(result, list)\n assert len(result) == 1\n assert result[0][\"featureBinName\"] == sf_name\n assert result[0][\"binVector\"] == [0, 1, 1, 0]\n assert result[0][\"binLabels\"] == [\"1\", \"3\"]\n\n @pytest.mark.parametrize(\"transform_feature\", conversions_for_1d)\n def test_smoke_string_groups(self, transform_feature):\n sf_name = \"My SF\"\n sf_vals = transform_feature([\"b\", \"a\", \"c\", \"a\", \"b\"])\n\n sf = {sf_name: sf_vals}\n result = _process_sensitive_features(sf)\n assert isinstance(result, list)\n assert len(result) == 1\n assert result[0][\"featureBinName\"] == sf_name\n assert result[0][\"binVector\"] == [1, 0, 2, 0, 1]\n assert result[0][\"binLabels\"] == [\"a\", \"b\", \"c\"]\n\n def test_result_is_sorted(self):\n sf_vals = [1, 2, 3, 1]\n\n sf = {\"b\": sf_vals, \"a\": sf_vals, \"c\": sf_vals}\n result = _process_sensitive_features(sf)\n assert isinstance(result, list)\n assert len(result) == 3\n for r in result:\n assert r[\"binVector\"] == [0, 1, 2, 0]\n assert r[\"binLabels\"] == [\"1\", \"2\", \"3\"]\n result_names = [r[\"featureBinName\"] for r in result]\n assert result_names == [\"a\", \"b\", \"c\"]\n\n\nclass TestProcessPredictions:\n @pytest.mark.parametrize(\"transform_y_p\", conversions_for_1d)\n def test_smoke(self, transform_y_p):\n y_pred = transform_y_p([0, 1, 1, 0])\n name = \"my model\"\n\n predictions = {name: y_pred}\n names, preds = _process_predictions(predictions)\n assert isinstance(names, list)\n assert isinstance(preds, list)\n assert len(names) == 1\n assert len(preds) == 1\n assert names[0] == name\n assert isinstance(preds[0], list)\n assert preds[0] == [0, 1, 1, 0]\n\n @pytest.mark.parametrize(\"transform_y_1\", conversions_for_1d)\n @pytest.mark.parametrize(\"transform_y_2\", conversions_for_1d)\n @pytest.mark.parametrize(\"transform_y_3\", conversions_for_1d)\n def test_results_are_sorted(self, transform_y_1, transform_y_2, transform_y_3):\n y_p1 = transform_y_1([0, 0, 1, 1])\n y_p2 = transform_y_2([0, 1, 0, 1])\n y_p3 = transform_y_3([1, 1, 0, 0])\n predictions = {\"b\": y_p1, \"a\": y_p2, \"c\": y_p3}\n\n names, preds = _process_predictions(predictions)\n assert names == [\"a\", \"b\", \"c\"]\n for i in range(3):\n assert isinstance(preds[i], list)\n assert preds[0] == [0, 1, 0, 1]\n assert preds[1] == [0, 0, 1, 1]\n assert preds[2] == [1, 1, 0, 0]\n\n\nclass TestCreateGroupMetricSet:\n @pytest.mark.parametrize(\"t_y_t\", conversions_for_1d)\n @pytest.mark.parametrize(\"t_y_p\", conversions_for_1d)\n @pytest.mark.parametrize(\"t_sf\", conversions_for_1d)\n def test_round_trip_1p_1f(self, t_y_t, t_y_p, t_sf):\n expected = load_sample_dashboard(_BC_1P_1F)\n\n y_true = t_y_t(expected[\"trueY\"])\n y_pred = {expected[\"modelNames\"][0]: t_y_p(expected[\"predictedY\"][0])}\n\n sf_file = expected[\"precomputedFeatureBins\"][0]\n sf = [sf_file[\"binLabels\"][x] for x in sf_file[\"binVector\"]]\n sensitive_feature = {sf_file[\"featureBinName\"]: t_sf(sf)}\n\n actual = _create_group_metric_set(\n y_true, y_pred, sensitive_feature, \"binary_classification\"\n )\n validate_dashboard_dictionary(actual)\n assert expected == actual\n\n @pytest.mark.parametrize(\"t_y_t\", conversions_for_1d)\n @pytest.mark.parametrize(\"t_y_p\", conversions_for_1d)\n @pytest.mark.parametrize(\"t_sf\", conversions_for_1d)\n def test_round_trip_2p_3f(self, t_y_t, t_y_p, t_sf):\n expected = load_sample_dashboard(_BC_2P_3F)\n\n y_true = t_y_t(expected[\"trueY\"])\n\n y_pred = {}\n y_p_ts = [t_y_p, lambda x: x] # Only transform one y_p\n for i, name in enumerate(expected[\"modelNames\"]):\n y_pred[name] = y_p_ts[i](expected[\"predictedY\"][i])\n\n sensitive_features = {}\n t_sfs = [lambda x: x, t_sf, lambda x: x] # Only transform one sf\n for i, sf_file in enumerate(expected[\"precomputedFeatureBins\"]):\n sf = [sf_file[\"binLabels\"][x] for x in sf_file[\"binVector\"]]\n sensitive_features[sf_file[\"featureBinName\"]] = t_sfs[i](sf)\n\n actual = _create_group_metric_set(\n y_true, y_pred, sensitive_features, \"binary_classification\"\n )\n validate_dashboard_dictionary(actual)\n assert expected == actual\n\n def test_specific_metrics(self):\n y_t = [0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1]\n y_p = [1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0]\n s_f = [0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]\n\n expected = MetricFrame(\n metrics={\n \"accuracy_score\": skm.accuracy_score,\n \"roc_auc_score\": skm.roc_auc_score,\n },\n y_true=y_t,\n y_pred=y_p,\n sensitive_features=s_f,\n )\n\n predictions = {\"some model\": y_p}\n sensitive_feature = {\"my sf\": s_f}\n\n actual = _create_group_metric_set(\n y_t, predictions, sensitive_feature, \"binary_classification\"\n )\n\n # Do some sanity checks\n validate_dashboard_dictionary(actual)\n assert actual[\"trueY\"] == y_t\n assert actual[\"predictedY\"][0] == y_p\n assert actual[\"precomputedFeatureBins\"][0][\"binVector\"] == s_f\n assert len(actual[\"precomputedMetrics\"][0][0]) == 12\n\n # Cross check the two metrics we computed\n # Comparisons simplified because s_f was already {0,1}\n actual_acc = actual[\"precomputedMetrics\"][0][0][\"accuracy_score\"]\n assert actual_acc[\"global\"] == expected.overall[\"accuracy_score\"]\n assert actual_acc[\"bins\"] == list(expected.by_group[\"accuracy_score\"])\n\n actual_roc = actual[\"precomputedMetrics\"][0][0][\"balanced_accuracy_score\"]\n assert actual_roc[\"global\"] == expected.overall[\"roc_auc_score\"]\n assert actual_roc[\"bins\"] == list(expected.by_group[\"roc_auc_score\"])\n\n def test_roc_auc_single_class(self, recwarn):\n # Note that y_t and s_f are identical, so subgroup evaluation will fail for\n # roc_auc_score\n y_p = [0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1]\n y_t = [0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]\n s_f = [0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]\n\n predictions = {\"some model\": y_p}\n sensitive_feature = {\"my sf\": s_f}\n\n actual = _create_group_metric_set(\n y_t, predictions, sensitive_feature, \"binary_classification\"\n )\n\n # Check that the error case was intercepted for roc_auc_score\n validate_dashboard_dictionary(actual)\n actual_roc = actual[\"precomputedMetrics\"][0][0][\"balanced_accuracy_score\"]\n expected_all_roc = skm.roc_auc_score(y_t, y_p)\n assert actual_roc[\"global\"] == expected_all_roc\n assert actual_roc[\"bins\"] == [0, 0] # We substituted zero\n # Check that the right warnings were issued\n assert len(recwarn) == 3\n msgs = sorted([str(x.message) for x in recwarn])\n # We get the message from roc_auc_score once for each subgroup\n assert msgs[0] == \"Evaluation of roc_auc_score failed. Substituting 0\"\n assert msgs[1] == \"Evaluation of roc_auc_score failed. Substituting 0\"\n assert msgs[2].startswith(\"Recall is ill-defined and being set to 0.0\")\n\n def test_json_serializable(self):\n y_t = [0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1]\n y_p = [1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0]\n s_f = [0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]\n\n predictions = {\"some model\": y_p}\n sensitive_feature = {\"my sf\": s_f}\n\n actual = _create_group_metric_set(\n y_t, predictions, sensitive_feature, \"binary_classification\"\n )\n\n # Check that we can turn the dictionary into JSON\n # Sometimes, you need to listen carefully to the quack\n result = json.dumps(actual)\n assert isinstance(result, str)\n\n def test_regression_prediction_type(self):\n # For regression, both y_t and y_p can have floating point values\n y_t = [0, 1, 1, 0, 1, 1, 1.5, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1]\n y_p = [1, 1, 1, 0, 1, 1, 1.5, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0]\n s_f = [0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]\n\n predictions = {\"some model\": y_p}\n sensitive_feature = {\"my sf\": s_f}\n\n # Using the `regression` prediction type should not crash\n result = _create_group_metric_set(\n y_t, predictions, sensitive_feature, \"regression\"\n )\n assert result[\"predictionType\"] == \"regression\"\n assert len(result[\"precomputedMetrics\"][0][0]) == 6\n\n def test_probability_prediction_type(self):\n # For probability, y_p can have real values [0, 1]\n y_t = [0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1]\n y_p = [\n 0.9,\n 1,\n 1,\n 0.1,\n 1,\n 1,\n 0.8,\n 0,\n 0,\n 0,\n 1,\n 1,\n 1,\n 1,\n 0,\n 0,\n 1,\n 1,\n 0,\n 1,\n 0,\n ]\n s_f = [0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]\n\n predictions = {\"some model\": y_p}\n sensitive_feature = {\"my sf\": s_f}\n\n # Using the `probability` prediction type should not crash\n result = _create_group_metric_set(\n y_t, predictions, sensitive_feature, \"probability\"\n )\n assert result[\"predictionType\"] == \"probability\"\n assert len(result[\"precomputedMetrics\"][0][0]) == 10\n"
] | [
[
"sklearn.metrics.roc_auc_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
GimpelZhang/git_test | [
"78dddbdc71209c3cfba58d831cfde1588989f8ab"
] | [
"notebooks/SLAM/extended_kalman_filter.py"
] | [
"\"\"\"\n\nExtended kalman filter (EKF) localization sample\n\nauthor: Atsushi Sakai (@Atsushi_twi)\n\nhttps://github.com/AtsushiSakai/PythonRobotics/blob/master/Localization/extended_kalman_filter/\n\"\"\"\n\nimport math\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Covariance for EKF:\n# 运动模型协方差:\nQ = np.diag([\n 0.1, # variance of location on x-axis\n 0.1, # variance of location on y-axis\n np.deg2rad(10), # variance of yaw angle\n 10.0 # variance of velocity\n]) ** 2 # predict state covariance\n# 观测模型协方差:\nR = np.diag([1.0, 1.0]) ** 2 # Observation x,y position covariance\n\n# Simulation parameter\nINPUT_NOISE = np.diag([1.0, np.deg2rad(30.0)]) ** 2\nGPS_NOISE = np.diag([0.5, 0.5]) ** 2\n\nDT = 0.1 # time tick [s]\nSIM_TIME = 50.0 # simulation time [s]\n\nshow_animation = True\n\n\ndef calc_input():\n v = 1.0 # [m/s]\n yawrate = 0.1 # [rad/s]\n u = np.array([[v], [yawrate]])\n return u\n\n\ndef observation(xTrue, xd, u):\n \"\"\"\n 执行仿真过程,不是EKF的一部分\n \"\"\"\n # 轨迹真值\n xTrue = motion_model(xTrue, u)\n\n # add noise to gps x-y\n z = observation_model(xTrue) + GPS_NOISE @ np.random.randn(2, 1)\n\n # add noise to input\n ud = u + INPUT_NOISE @ np.random.randn(2, 1)\n\n # 航迹推测得出的轨迹:\n xd = motion_model(xd, ud)\n\n return xTrue, z, xd, ud\n\n\ndef motion_model(x, u):\n \"\"\"\n 运动模型\n \"\"\"\n F = np.array([[1.0, 0, 0, 0],\n [0, 1.0, 0, 0],\n [0, 0, 1.0, 0],\n [0, 0, 0, 0]])\n # 注意:在这里B矩阵中耦合了状态向量x,因此并不是简单的线性模型:\n B = np.array([[DT * math.cos(x[2, 0]), 0],\n [DT * math.sin(x[2, 0]), 0],\n [0.0, DT],\n [1.0, 0.0]])\n\n x = F @ x + B @ u\n\n return x\n\n\ndef observation_model(x):\n H = np.array([\n [1, 0, 0, 0],\n [0, 1, 0, 0]\n ])\n\n z = H @ x\n\n return z\n\n\ndef jacob_f(x, u):\n \"\"\"\n Jacobian of Motion Model\n\n motion model\n x_{t+1} = x_t+v*dt*cos(yaw)\n y_{t+1} = y_t+v*dt*sin(yaw)\n yaw_{t+1} = yaw_t+omega*dt\n v_{t+1} = v{t}\n so\n dx/dyaw = -v*dt*sin(yaw)\n dx/dv = dt*cos(yaw)\n dy/dyaw = v*dt*cos(yaw)\n dy/dv = dt*sin(yaw)\n \"\"\"\n yaw = x[2, 0]\n v = u[0, 0]\n jF = np.array([\n [1.0, 0.0, -DT * v * math.sin(yaw), DT * math.cos(yaw)],\n [0.0, 1.0, DT * v * math.cos(yaw), DT * math.sin(yaw)],\n [0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0]])\n\n return jF\n\n\ndef jacob_h():\n # Jacobian of Observation Model\n jH = np.array([\n [1, 0, 0, 0],\n [0, 1, 0, 0]\n ])\n\n return jH\n\n\ndef ekf_estimation(xEst, PEst, z, u):\n # Predict\n xPred = motion_model(xEst, u)\n jF = jacob_f(xEst, u)\n PPred = jF @ PEst @ jF.T + Q\n\n # Update\n jH = jacob_h()\n zPred = observation_model(xPred)\n y = z - zPred\n S = jH @ PPred @ jH.T + R\n K = PPred @ jH.T @ np.linalg.inv(S)\n xEst = xPred + K @ y\n PEst = (np.eye(len(xEst)) - K @ jH) @ PPred\n return xEst, PEst\n\n\ndef plot_covariance_ellipse(xEst, PEst): # pragma: no cover\n Pxy = PEst[0:2, 0:2]\n eigval, eigvec = np.linalg.eig(Pxy)\n\n if eigval[0] >= eigval[1]:\n bigind = 0\n smallind = 1\n else:\n bigind = 1\n smallind = 0\n\n t = np.arange(0, 2 * math.pi + 0.1, 0.1)\n a = math.sqrt(eigval[bigind])\n b = math.sqrt(eigval[smallind])\n x = [a * math.cos(it) for it in t]\n y = [b * math.sin(it) for it in t]\n angle = math.atan2(eigvec[bigind, 1], eigvec[bigind, 0])\n rot = np.array([[math.cos(angle), math.sin(angle)],\n [-math.sin(angle), math.cos(angle)]])\n fx = rot @ (np.array([x, y]))\n px = np.array(fx[0, :] + xEst[0, 0]).flatten()\n py = np.array(fx[1, :] + xEst[1, 0]).flatten()\n plt.plot(px, py, \"--r\")\n\n\ndef main():\n print(__file__ + \" start!!\")\n\n time = 0.0\n\n # State Vector [x y yaw v]'\n xEst = np.zeros((4, 1)) # 初始值全部为0\n xTrue = np.zeros((4, 1))\n PEst = np.eye(4) #用一个对角都是1的矩阵表示状态协方差矩阵初始值\n\n xDR = np.zeros((4, 1)) # Dead reckoning\n\n # history\n hxEst = xEst\n hxTrue = xTrue\n hxDR = xTrue\n hz = np.zeros((2, 1))\n\n while SIM_TIME >= time:\n time += DT\n u = calc_input()\n\n xTrue, z, xDR, ud = observation(xTrue, xDR, u)\n\n xEst, PEst = ekf_estimation(xEst, PEst, z, ud)\n\n # store data history\n hxEst = np.hstack((hxEst, xEst))\n hxDR = np.hstack((hxDR, xDR))\n hxTrue = np.hstack((hxTrue, xTrue))\n hz = np.hstack((hz, z))\n\n if show_animation:\n plt.cla()\n # for stopping simulation with the esc key.\n plt.gcf().canvas.mpl_connect('key_release_event',\n lambda event: [exit(0) if event.key == 'escape' else None])\n plt.plot(hz[0, :], hz[1, :], \".g\")\n plt.plot(hxTrue[0, :].flatten(),\n hxTrue[1, :].flatten(), \"-b\")\n plt.plot(hxDR[0, :].flatten(),\n hxDR[1, :].flatten(), \"-k\")\n plt.plot(hxEst[0, :].flatten(),\n hxEst[1, :].flatten(), \"-r\")\n plot_covariance_ellipse(xEst, PEst)\n plt.axis(\"equal\")\n plt.grid(True)\n plt.pause(0.001)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.diag",
"numpy.hstack",
"numpy.linalg.inv",
"numpy.linalg.eig",
"numpy.arange",
"numpy.eye",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.plot",
"numpy.deg2rad",
"numpy.random.randn",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.axis",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.pause"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
idraper/Physics-Project | [
"6bd800813a060ada7026040e9d58150865a7327d"
] | [
"wave_math.py"
] | [
"import numpy as np\nimport sys, math\nsys.setrecursionlimit(3000)\n\nclass Math():\n\tdef __init__(self, amps, sfs):\n\t\tself.amplitudes = amps\n\t\tself.N = len(self.amplitudes)\n\t\tself.sample_frequency = sfs\n\t\tself.nyquist_limit = self.sample_frequency / 2\n\t\tself.calc_parts()\n\t\tself.calc_mag()\n\t\tself.calc_freq()\n\t\t\n\t\tself.real_part = []\n\t\tself.imag_part = []\n\t\tself.mags = []\n\t\tself.freqs = []\n\t\t\n\tdef calc_parts(self):\t\n\t\ttmp = 0.0\n\t\tfor k in range(self.N):\n\t\t\tfor n in range(self.N):\n\t\t\t\ttmp += self.amplitudes[n] * np.cos((2 * np.PI * k * n)/ self.N)\n\t\t\tself.real_part.push_back(tmp)\n\t\t\ttmp = 0.0\n\t\t\t\n\t\tfor k in range(self.N):\n\t\t\tfor n in range(self.N):\n\t\t\t\ttmp += self.amplitudes[n] * np.sin((2 * np.PI * k * n)/ self.N)\n\t\t\tself.imag_part.push_back(tmp)\n\t\t\ttmp = 0.0\n\t\t\t\n\t\tfor i in range(self.N):\n\t\t\tprint (\"Re[f(\", i, \")] : \", self.real_part[i], \"\\t\\tIm[f(\", i,\")] : \", self.imag_part[i])\n\t\t\t\n\t\treturn\n\t\t\n\tdef calc_mag(self):\n\t\ttmp = 0.0\n\t\tfor i in range(self.N):\n\t\t\ttmp = np.sqrt(np.pow(self.real_part[i], 2) + np.pow(self.imag_part[i], 2))\n\t\t\tself.mags.push_back(tmp)\n\t\t\t\n\t\tfor i in range(self.N):\n\t\t\tprint (\"Magnitudes: \", i, self.mags[i])\n\t\t\t\n\t\treturn\n\t\t\n\tdef calc_freq(self):\n\t\ttmp = 0.0\n\t\tfor i in range(self.N - self.nyquist_limit):\n\t\t\ttmp = (self.mags[i] * 2) / self.N\n\t\t\tself.freqs.push_back(tmp)\n\t\t\t\n\t\treturn\n\t\t\t\n\t\t\t\nclass FastFFT():\n\tdef __init__(self, a, sR=44100, useW=True):\n\t\tself.RATE = sR\n\t\tself.nyquist_limit = self.RATE / 2\n\t\tself.amps = a\n\t\tself.N = len(self.amps)\n\t\tself.c_amps_r = []\n\t\tself.c_amps_c = []\n\t\tself.freqs = []\n\t\t\n\t\tif (useW):\n\t\t\tprint (\"Applying Hann Window\")\n\t\t\tself.applyHanningWindow()\n\t\t\t\n\t\tif (not self.isPowOfTwo(self.N)):\n\t\t\tprint (\"Zero padding vector \", self.N)\n\t\t\tself.zeroPad()\n\t\t\tprint (\"...Done zero padding.\")\n\t\t\t\n\t\tself.frame_size = self.N / self.RATE\n\t\t\n\t\tprint (\"Starting bit reversal\")\n\t\tprint (self.N)\n\t\tself.bitReverseVector(self.amps, self.N)\n\t\tprint (\"... Bit reversal done.\")\n\t\t\n\t\tprint (\"Initializing complex vector...\")\n\t\tself.c_amps_r = self.amps\n\t\tfor i in range(len(self.c_amps_r)):\n\t\t\tself.c_amps_c.append(0)\n\t\tprint (\"Initialization done...\")\n\t\t\n\t\tprint (\"Calculating FFT...\")\n\t\tself.calcFFT()\n\t\tprint (\"...FFT analysis done.\")\n\t\tprint (\"Processing frequencies...\")\n\t\tself.calcFreqs();\n\t\tprint (\"...Done\")\n\t\n\tdef isPowOfTwo(self, val):\n\t\tcompare = 1\n\t\t\n\t\twhile (compare < val):\n\t\t\tcompare = int(compare) << 1\n\t\tif val == compare:\n\t\t\treturn True\n\t\treturn False\n\t\n\tdef makePowOfTwo(self, val):\n\t\tcompare = 1\n\t\tif val > compare:\n\t\t\twhile val > compare:\n\t\t\t\tcompare = int(compare) << 1\n\t\t\t\t#print (val, compare)\n\t\telif val < compare:\n\t\t\tcompare = 1\n\t\treturn compare\n\t\n\tdef zeroPad(self):\n\t\toldN = self.N\n\t\tself.N = self.makePowOfTwo(self.N)\n\t\tfor i in range(self.N - oldN):\n\t\t\tself.amps = np.append(self.amps, 0)\n\t\t\n\tdef applyHanningWindow(self):\n\t\tk = 2 * np.pi / (self.N - 1)\n\t\ttmp = np.arange(len(self.amps))\n\t\tself.amps = self.amps * (1.0/2.0 * (1.0 - np.cos(k * tmp)))\n\t\treturn\n\t\t\n\tdef bitReverseVector(self, vToReverse, size):\n\t\teven = []\n\t\todd = []\n\t\t\n\t\tif size == 2:\n\t\t\treturn\n\t\t\t\n\t\t\t\n\t\tfor i in range(0, len(vToReverse), 2):\n\t\t\teven.append(vToReverse[i])\n\t\tfor i in range(1, len(vToReverse), 2):\n\t\t\todd.append(vToReverse[i])\n\t\t\t\n\t\tself.bitReverseVector(even, len(even))\n\t\tself.bitReverseVector(odd, len(odd))\n\t\t\n\t\tfor i in range(int(size/2)):\n\t\t\tvToReverse[int(size/2) + i] = odd[i]\n\t\treturn\n\t\n\tdef calcFFT(self):\n\t\teven_c = 0.0\n\t\teven_r = 0.0\n\t\todd_c = 0.0\n\t\todd_r = 0.0\n\t\todd_x_t_c = 0.0\n\t\todd_x_t_r = 0.0\n\t\ttmp_c = 0.0\n\t\ttmp_r = 0.0\n\t\t\n\t\tWN = np.pi * 2 / self.N\n\t\tlog2N = np.log2(self.N)\n\t\t\n\t\tWnK_tbl_c = []\n\t\tWnK_tbl_r = []\n\t\tfor k in range(self.N + 1):\n\t\t\tWnK_tbl_c.append(tmp_c)\n\t\t\tWnK_tbl_r.append(tmp_r)\n\t\t\tWnK_tbl_r[k] = np.cos(WN * k)\n\t\t\tWnK_tbl_c[k] = np.sin(WN * k)\n\t\t\t\n\t\tstride = 1\n\t\twhile stride < self.N:\n\t\t\tstage = np.log2(self.N / stride)\n\t\t\t#print (\"Stage: \", stage)\n\t\t\tk = 0\n\t\t\twhile k < self.N:\n\t\t\t\tn = 0\n\t\t\t\tfor n in range(stride):\t\n\t\t\t\t\ti1 = k + n\n\t\t\t\t\ti2 = k + n + stride\n\t\t\t\t\tWnK_i = ((n * stride) % self.N)\n\t\t\t\t\t\n\t\t\t\t\teven_r = self.c_amps_r[i1]\n\t\t\t\t\teven_c = self.c_amps_c[i1]\n\t\t\t\t\todd_r = self.c_amps_r[i2]\n\t\t\t\t\todd_c = self.c_amps_c[i2]\n\t\t\t\t\todd_x_t_r = WnK_tbl_r[WnK_i] * odd_r + WnK_tbl_c[WnK_i] * odd_c\n\t\t\t\t\todd_x_t_c = WnK_tbl_c[WnK_i] * odd_r + WnK_tbl_r[WnK_i] * odd_c\n\t\t\t\t\t\n\t\t\t\t\tself.c_amps_r[i1] = even_r + odd_x_t_r\n\t\t\t\t\tself.c_amps_c[i1] = even_r + odd_x_t_c\n\t\t\t\t\tself.c_amps_r[i2] = even_r - odd_x_t_r\n\t\t\t\t\tself.c_amps_c[i2] = even_c + odd_x_t_c\n\t\t\t\tk += (stride << 1)\t\t\n\t\t\tstride = stride << 1\n\t\treturn\n\t\t\n\tdef calcFreqs(self):\n\t\ttmp_v = 0.0\n\t\ttmp_m = 0.0\n\t\tfor i in range(len(self.c_amps_c)):\n\t\t\ttmp_v = i / self.frame_size\n\t\t\ttmp_m = math.sqrt(self.c_amps_r[i]**2 + self.c_amps_c[i]**2)\n\t\t\ttmp_m = (tmp_m * 2) / self.N\n\t\t\tself.freqs.append(tmp_m)\n\t\treturn self.freqs\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n"
] | [
[
"numpy.log2",
"numpy.cos",
"numpy.sin",
"numpy.append",
"numpy.pow"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JinhuaSu/todomanager | [
"accacb9f2d2d09ec7595c869e822fb26ab0a187b"
] | [
"src/read_todo_excel.py"
] | [
"#%%\nimport pandas as pd\ndf = pd.read_excel(\"../data/tomatodo_history_765.xlsx\",)\ndf\n# %%\n\ncol_names = [\"专注时间\",\"待办名称\",\"专注时长(分钟)\",\"心得\",\"状态\",\"完成度\"]\ndf_new = df.iloc[6:,1:]\ndf_new.columns = col_names\ndf_new.index = range(len(df_new))\ndf_new\n\n# %%\n# 心得最好包含尽可能多的信息,心得可以是一个json格式,直接读\n# 类型等填表,或者在起名时就有一些自动化了\nscore_map = {\"整理\":1, \"矩阵模拟开发\":3, \"卫生\":1, \"课外阅读\":1, \"刷手机\": -2, \"锻炼\":1, \"开发\":2, \"调研\":1, \"沟通\":2}\n(df_new[\"专注时长(分钟)\"] / 60 * df_new[\"待办名称\"].apply(lambda x: score_map[x])).sum()\n# %%\n# 5月7日 12.416分\n# 5月8日 16.717分\n#%%\n# 番茄挑战\ndf_fq = pd.read_csv(\"../data/番茄挑战v1-报告.csv\")\ndf_fq\n# %%\ndf_fq[\"time\"] = pd.to_datetime(df_fq[\"添加时间\"])\n# %%\ndf_new\n# %%\ndf_new[\"start\"] = df_new[\"专注时间\"].apply(lambda x: pd.to_datetime(x.split(\"至\")[0]))\n# %%\ndf_new[\"end\"] = df_new[\"专注时间\"].apply(lambda x: pd.to_datetime(x.split(\"至\")[1]))\n\n# %%\nidx=0\ndef get_one_score(df_new, df_fq, idx):\n # print((df_new[\"start\"] - df_fq[\"time\"][idx]).apply(lambda x:x.days* 60 *60 *24+ x.seconds))\n df_select = df_new[(df_new[\"start\"] - df_fq[\"time\"][idx]).apply(lambda x: x.days* 60 *60 *24+ x.seconds < 240 and x.days* 60 *60 *24+ x.seconds>-120)]\n expect_time = df_fq.loc[idx,\"挑战时间(分钟)\"]\n\n cost_time = list(df_select[\"专注时长(分钟)\"])[0]\n type_score = score_map[list(df_select[\"待办名称\"])[0]]\n def score_multi_type(str_):\n base = 1\n if type(str_) != str:\n return base\n if \"ddl当日\" in str_:\n base *= 1.2\n if \"赚钱\" in str_:\n base *= 1.2\n if \"当日突发\" in str_:\n base *= 0.8\n if \"人情\" in str_:\n base *= 1.2\n if \"不紧急但重要\" in str_:\n base *= 1.4\n return base\n score_factor = score_multi_type(df_fq.loc[idx,\"挑战类别\"])\n score_factor\n def get_time_factor(expect, real):\n if expect >= real:\n return 1\n elif expect >= real * 1.2:\n return 0.5\n elif expect >= real * 1.5:\n return 0.2\n else:\n return -1\n time_factor = get_time_factor(expect_time,cost_time)\n return expect_time > cost_time, expect_time /60 * type_score* score_factor * time_factor\nget_one_score(df_new, df_fq, idx)\n# %%\nfq_score = 0\nfq_level = 0\nfor idx in df_fq.index:\n flag, score_tmp = get_one_score(df_new, df_fq, idx)\n fq_level += (flag - 0.5) * 2\n fq_score += score_tmp\nfq_score\n# %%\nfq_level\n# %%\nget_one_score(df_new, df_fq, idx)\n# %%\n# 5月8日 -3级 16.717-4.12=12.597"
] | [
[
"pandas.read_excel",
"pandas.to_datetime",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
Modelmat/frc-characterization | [
"76cd65b777eb2d3eb151d923dee7c297fb552514"
] | [
"frc_characterization/logger_analyzer/data_analyzer.py"
] | [
"# This GUI analyzes the data collected by the data logger. Support is\n# provided for both feedforward and feedback analysis, as well as diagnostic\n# plotting.\n\nimport copy\nimport json\nimport logging\nimport math\nimport os\nimport tkinter\nfrom tkinter import *\nfrom tkinter import filedialog\nfrom tkinter import messagebox\n\nimport control as cnt\nimport frccontrol as frccnt\nimport matplotlib\nimport pint\n\n# This fixes a crash on macOS Mojave by using the TkAgg backend\n# https://stackoverflow.com/a/34109240\nmatplotlib.use(\"TkAgg\")\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport statsmodels.api as sm\nfrom frc_characterization.newproject import Tests, Units\nfrom frc_characterization.utils import FloatEntry, IntEntry\nfrom mpl_toolkits.mplot3d import Axes3D\n\nlogger = logging.getLogger(\"logger\")\nlog_format = \"%(asctime)s:%(msecs)03d %(levelname)-8s: %(name)-20s: %(message)s\"\n\nlogging.basicConfig(level=logging.INFO, format=log_format)\n\n# These are the indices of data stored in the json file\nTIME_COL = 0\nBATTERY_COL = 1\nAUTOSPEED_COL = 2\nL_VOLTS_COL = 3\nR_VOLTS_COL = 4\nL_ENCODER_P_COL = 5\nR_ENCODER_P_COL = 6\nL_ENCODER_V_COL = 7\nR_ENCODER_V_COL = 8\nGYRO_ANGLE_COL = 9\n\n# The are the indices of data returned from prepare_data function\nPREPARED_TM_COL = 0\nPREPARED_V_COL = 1\nPREPARED_POS_COL = 2\nPREPARED_VEL_COL = 3\nPREPARED_ACC_COL = 4\nPREPARED_COS_COL = 5\n\nPREPARED_MAX_COL = PREPARED_ACC_COL\n\nJSON_DATA_KEYS = [\"slow-forward\", \"slow-backward\", \"fast-forward\", \"fast-backward\"]\n\n\nclass Analyzer:\n def __init__(self, dir):\n self.mainGUI = tkinter.Tk()\n\n self.project_path = StringVar(self.mainGUI)\n self.project_path.set(dir)\n\n self.window_size = IntVar(self.mainGUI)\n self.window_size.set(8)\n\n self.motion_threshold = DoubleVar(self.mainGUI)\n self.motion_threshold.set(0.2)\n\n self.subset = StringVar(self.mainGUI)\n\n self.units = StringVar(self.mainGUI)\n self.units.set(Units.FEET.value)\n\n self.track_width = DoubleVar(self.mainGUI)\n self.track_width.set(\"N/A\")\n\n self.stored_data = None\n\n self.prepared_data = None\n\n self.ks = DoubleVar(self.mainGUI)\n self.kv = DoubleVar(self.mainGUI)\n self.ka = DoubleVar(self.mainGUI)\n self.kcos = DoubleVar(self.mainGUI)\n self.r_square = DoubleVar(self.mainGUI)\n\n self.qp = DoubleVar(self.mainGUI)\n self.qp.set(1)\n\n self.qv = DoubleVar(self.mainGUI)\n self.qv.set(1.5)\n\n self.max_effort = DoubleVar(self.mainGUI)\n self.max_effort.set(7)\n\n self.period = DoubleVar(self.mainGUI)\n self.period.set(0.02)\n\n self.max_controller_output = DoubleVar(self.mainGUI)\n self.max_controller_output.set(12)\n\n self.controller_time_normalized = BooleanVar(self.mainGUI)\n self.controller_time_normalized.set(True)\n\n self.measurement_delay = DoubleVar(self.mainGUI)\n self.measurement_delay.set(0)\n\n self.gearing = DoubleVar(self.mainGUI)\n self.gearing.set(1)\n\n self.controller_type = StringVar(self.mainGUI)\n self.controller_type.set(\"Onboard\")\n\n self.encoder_epr = IntVar(self.mainGUI)\n self.encoder_epr.set(4096)\n\n self.has_follower = BooleanVar(self.mainGUI)\n self.has_follower.set(False)\n\n self.follower_period = DoubleVar(self.mainGUI)\n self.follower_period.set(0.01)\n\n self.gain_units_preset = StringVar(self.mainGUI)\n self.gain_units_preset.set(\"Default\")\n\n self.loop_type = StringVar(self.mainGUI)\n self.loop_type.set(\"Velocity\")\n\n self.kp = DoubleVar(self.mainGUI)\n self.kd = DoubleVar(self.mainGUI)\n\n self.test = StringVar(self.mainGUI)\n self.kg = DoubleVar(self.mainGUI)\n self.kcos = DoubleVar(self.mainGUI)\n\n self.units_per_rot = DoubleVar(self.mainGUI)\n\n self.convert_gains = BooleanVar(self.mainGUI)\n\n # Set up main window\n\n def configure_gui(self):\n def getFile():\n dataFile = tkinter.filedialog.askopenfile(\n parent=self.mainGUI,\n mode=\"rb\",\n title=\"Choose the data file (.JSON)\",\n initialdir=self.project_path.get(),\n )\n fileEntry.configure(state=\"normal\")\n fileEntry.delete(0, END)\n fileEntry.insert(0, dataFile.name)\n fileEntry.configure(state=\"readonly\")\n try:\n data = json.load(dataFile)\n\n try:\n # Transform the data into a numpy array to make it easier to use\n # -> transpose it so we can deal with it in columns\n for k in JSON_DATA_KEYS:\n data[k] = np.array(data[k]).transpose()\n\n self.stored_data = data\n logger.info(\"Received Data!\")\n\n analyzeButton.configure(state=\"normal\")\n self.units.set(data[\"units\"])\n self.test.set(data[\"test\"])\n self.units_per_rot.set(float(data[\"unitsPerRotation\"]))\n logger.info(\n \"Units: %s, Test: %s, Units per rotation: %.3f\",\n self.units.get(),\n self.test.get(),\n self.units_per_rot.get(),\n )\n initialUnitEnable()\n enableUnitsPerRot()\n except Exception as e:\n messagebox.showerror(\n \"Error!\",\n \"The structure of the data JSON was not recognized.\\n\"\n + \"Details\\n\"\n + repr(e),\n )\n return\n except Exception as e:\n messagebox.showerror(\n \"Error!\",\n \"The JSON file could not be loaded.\\n\" + \"Details:\\n\" + repr(e),\n parent=self.mainGUI,\n )\n return\n\n def runAnalysis():\n test_runners = {\n Tests.DRIVETRAIN: runAnalysisDrive,\n Tests.ELEVATOR: runAnalysisElevator,\n Tests.ARM: runAnalysisArm,\n Tests.SIMPLE_MOTOR: runAnalysisSimple,\n }\n\n self.prepared_data = self.prepare_data(\n self.stored_data, window=self.window_size.get()\n )\n\n if not self.prepared_data[\"Valid\"]:\n return\n\n test_runners[Tests(self.test.get())]()\n convertGains.configure(state=\"normal\")\n\n calcGains()\n\n timePlotsButton.configure(state=\"normal\")\n voltPlotsButton.configure(state=\"normal\")\n fancyPlotButton.configure(state=\"normal\")\n calcGainsButton.configure(state=\"normal\")\n\n def runAnalysisDrive():\n ks, kv, ka, rsquare = self.calcFit(\n *self.prepared_data[self.subset.get()], self.test.get()\n )\n\n self.ks.set(float(\"%.3g\" % ks))\n self.kv.set(float(\"%.3g\" % kv))\n self.ka.set(float(\"%.3g\" % ka))\n self.r_square.set(float(\"%.3g\" % rsquare))\n\n if \"track-width\" in self.stored_data:\n self.track_width.set(calcTrackWidth(self.stored_data[\"track-width\"]))\n else:\n self.track_width.set(\"N/A\")\n\n def runAnalysisElevator():\n kg, kfr, kv, ka, rsquare = self.calcFit(\n *self.prepared_data[self.subset.get()], self.test.get()\n )\n\n self.kg.set(float(\"%.3g\" % kg))\n self.ks.set(float(\"%.3g\" % kfr))\n self.kv.set(float(\"%.3g\" % kv))\n self.ka.set(float(\"%.3g\" % ka))\n self.r_square.set(float(\"%.3g\" % rsquare))\n\n def runAnalysisArm():\n ks, kv, ka, kcos, rsquare = self.calcFit(\n *self.prepared_data[self.subset.get()], self.test.get()\n )\n\n self.ks.set(float(\"%.3g\" % ks))\n self.kv.set(float(\"%.3g\" % kv))\n self.ka.set(float(\"%.3g\" % ka))\n self.kcos.set(float(\"%.3g\" % kcos))\n self.r_square.set(float(\"%.3g\" % rsquare))\n\n def runAnalysisSimple():\n ks, kv, ka, rsquare = self.calcFit(\n *self.prepared_data[self.subset.get()], self.test.get()\n )\n\n self.ks.set(float(\"%.3g\" % ks))\n self.kv.set(float(\"%.3g\" % kv))\n self.ka.set(float(\"%.3g\" % ka))\n self.r_square.set(float(\"%.3g\" % rsquare))\n\n def plotTimeDomain():\n subset = self.subset.get()\n self._plotTimeDomain(subset, *self.prepared_data[subset])\n\n def plotVoltageDomain():\n subset = self.subset.get()\n self._plotVoltageDomain(subset, *self.prepared_data[subset])\n\n def plot3D():\n subset = self.subset.get()\n self._plot3D(subset, *self.prepared_data[subset])\n\n def calcGains():\n\n period = (\n self.period.get()\n if not self.has_follower.get()\n else self.follower_period.get()\n )\n\n if self.loop_type.get() == \"Position\":\n kp, kd = self._calcGainsPos(\n self.kv.get(),\n self.ka.get(),\n self.qp.get(),\n self.qv.get(),\n self.max_effort.get(),\n period,\n self.measurement_delay.get(),\n )\n else:\n kp, kd = self._calcGainsVel(\n self.kv.get(),\n self.ka.get(),\n self.qv.get(),\n self.max_effort.get(),\n period,\n self.measurement_delay.get(),\n )\n\n # Scale gains to output\n kp = kp / 12 * self.max_controller_output.get()\n kd = kd / 12 * self.max_controller_output.get()\n\n # Rescale kD if not time-normalized\n if not self.controller_time_normalized.get():\n kd = kd / self.period.get()\n\n # Get correct conversion factor for rotations\n units = Units(self.units.get())\n if isRotation(units.value):\n rotation = (1 * units.ROTATIONS.unit).to(units.unit)\n else:\n rotation = self.units_per_rot.get()\n\n # Convert to controller-native units if desired\n if self.convert_gains.get():\n if self.controller_type.get() == \"Talon\":\n kp = kp * rotation / (self.encoder_epr.get() * self.gearing.get())\n kd = kd * rotation / (self.encoder_epr.get() * self.gearing.get())\n if self.loop_type.get() == \"Velocity\":\n kp = kp * 10\n if self.controller_type.get() == \"Spark\":\n kp = kp / (self.gearing.get())\n kd = kd / (self.gearing.get())\n if self.loop_type.get() == \"Velocity\":\n kp = kp / 60\n\n self.kp.set(float(\"%.3g\" % kp))\n self.kd.set(float(\"%.3g\" % kd))\n\n def calcTrackWidth(table):\n # Note that this assumes the gyro angle is not modded (i.e. on [0, +infinity)),\n # and that a positive angle travels in the counter-clockwise direction\n\n d_left = table[-1][R_ENCODER_P_COL] - table[0][R_ENCODER_P_COL]\n d_right = table[-1][L_ENCODER_P_COL] - table[0][L_ENCODER_P_COL]\n d_angle = table[-1][GYRO_ANGLE_COL] - table[0][GYRO_ANGLE_COL]\n\n if d_angle == 0:\n messagebox.showerror(\n \"Error!\",\n \"Change in gyro angle was 0... Is your gyro set up correctly?\",\n )\n return 0.0\n\n # The below comes from solving ω=(vr−vl)/2r for 2r\n # Absolute values used to ensure the calculated value is always positive\n # and to add robustness to sensor inversion\n diameter = (abs(d_left) + abs(d_right)) / abs(d_angle)\n\n return diameter\n\n def presetGains(*args):\n def setMeasurementDelay(delay):\n self.measurement_delay.set(\n 0 if self.loop_type.get() == \"Position\" else delay\n )\n\n # A number of motor controllers use moving average filters; these are types of FIR filters.\n # A moving average filter with a window size of N is a FIR filter with N taps.\n # The average delay (in taps) of an arbitrary FIR filter with N taps is (N-1)/2.\n # All of the delays below assume that 1 T takes 1 ms.\n #\n # Proof:\n # N taps with delays of 0 .. N - 1 T\n #\n # average delay = (sum 0 .. N - 1) / N T\n # = (sum 1 .. N - 1) / N T\n #\n # note: sum 1 .. n = n(n + 1) / 2\n #\n # = (N - 1)((N - 1) + 1) / (2N) T\n # = (N - 1)N / (2N) T\n # = (N - 1)/2 T\n\n presets = {\n \"Default\": lambda: (\n self.max_controller_output.set(12),\n self.period.set(0.02),\n self.controller_time_normalized.set(True),\n self.controller_type.set(\"Onboard\"),\n setMeasurementDelay(0),\n ),\n \"WPILib (2020-)\": lambda: (\n self.max_controller_output.set(12),\n self.period.set(0.02),\n self.controller_time_normalized.set(True),\n self.controller_type.set(\"Onboard\"),\n # Note that the user will need to remember to set this if the onboard controller is getting delayed measurements.\n setMeasurementDelay(0),\n ),\n \"WPILib (Pre-2020)\": lambda: (\n self.max_controller_output.set(1),\n self.period.set(0.05),\n self.controller_time_normalized.set(False),\n self.controller_type.set(\"Onboard\"),\n # Note that the user will need to remember to set this if the onboard controller is getting delayed measurements.\n setMeasurementDelay(0),\n ),\n \"Talon FX\": lambda: (\n self.max_controller_output.set(1),\n self.period.set(0.001),\n self.controller_time_normalized.set(True),\n self.controller_type.set(\"Talon\"),\n # https://phoenix-documentation.readthedocs.io/en/latest/ch14_MCSensor.html#changing-velocity-measurement-parameters\n # 100 ms sampling period + a moving average window size of 64 (i.e. a 64-tap FIR) = 100/2 ms + (64-1)/2 ms = 81.5 ms.\n # See above for more info on moving average delays.\n setMeasurementDelay(81.5),\n ),\n \"Talon SRX (2020-)\": lambda: (\n self.max_controller_output.set(1),\n self.period.set(0.001),\n self.controller_time_normalized.set(True),\n self.controller_type.set(\"Talon\"),\n # https://phoenix-documentation.readthedocs.io/en/latest/ch14_MCSensor.html#changing-velocity-measurement-parameters\n # 100 ms sampling period + a moving average window size of 64 (i.e. a 64-tap FIR) = 100/2 ms + (64-1)/2 ms = 81.5 ms.\n # See above for more info on moving average delays.\n setMeasurementDelay(81.5),\n ),\n \"Talon SRX (Pre-2020)\": lambda: (\n self.max_controller_output.set(1023),\n self.period.set(0.001),\n self.controller_time_normalized.set(False),\n self.controller_type.set(\"Talon\"),\n # https://phoenix-documentation.readthedocs.io/en/latest/ch14_MCSensor.html#changing-velocity-measurement-parameters\n # 100 ms sampling period + a moving average window size of 64 (i.e. a 64-tap FIR) = 100/2 ms + (64-1)/2 ms = 81.5 ms.\n # See above for more info on moving average delays.\n setMeasurementDelay(81.5),\n ),\n \"Spark MAX (brushless)\": lambda: (\n self.max_controller_output.set(1),\n self.period.set(0.001),\n self.controller_time_normalized.set(False),\n self.controller_type.set(\"Spark\"),\n # According to a Rev employee on the FRC Discord the window size is 40 so delay = (40-1)/2 ms = 19.5 ms.\n # See above for more info on moving average delays.\n setMeasurementDelay(19.5),\n ),\n \"Spark MAX (brushed)\": lambda: (\n self.max_controller_output.set(1),\n self.period.set(0.001),\n self.controller_time_normalized.set(False),\n self.controller_type.set(\"Spark\"),\n # https://www.revrobotics.com/content/sw/max/sw-docs/cpp/classrev_1_1_c_a_n_encoder.html#a7e6ce792bc0c0558fb944771df572e6a\n # 64-tap FIR = (64-1)/2 ms = 31.5 ms delay.\n # See above for more info on moving average delays.\n setMeasurementDelay(31.5),\n ),\n }\n\n presets.get(self.gain_units_preset.get(), \"Default\")()\n if (\n \"Talon\" in self.gain_units_preset.get()\n or \"Spark\" in self.gain_units_preset.get()\n ):\n self.convert_gains.set(True)\n else:\n self.convert_gains.set(False)\n\n def enableOffboard(*args):\n if self.controller_type.get() == \"Onboard\":\n gearingEntry.configure(state=\"disabled\")\n eprEntry.configure(state=\"disabled\")\n hasFollower.configure(state=\"disabled\")\n followerPeriodEntry.configure(state=\"disabled\")\n elif self.controller_type.get() == \"Talon\":\n gearingEntry.configure(state=\"normal\")\n eprEntry.configure(state=\"normal\")\n hasFollower.configure(state=\"normal\")\n if self.has_follower.get():\n followerPeriodEntry.configure(state=\"normal\")\n else:\n followerPeriodEntry.configure(state=\"disabled\")\n else:\n gearingEntry.configure(state=\"disabled\")\n eprEntry.configure(state=\"disabled\")\n hasFollower.configure(state=\"normal\")\n if self.has_follower.get():\n followerPeriodEntry.configure(state=\"normal\")\n else:\n followerPeriodEntry.configure(state=\"disabled\")\n\n def enableUnitsPerRot(*args):\n if not isRotation(self.units.get()) and isRotation(\n self.stored_data[\"units\"]\n ):\n logger.info(\"Allowing user to modify units per rot\")\n diamEntry.configure(state=\"normal\")\n self.units_per_rot.set(0) # reset the value\n else:\n self.units_per_rot.set(\n convertUnit(\n self.stored_data[\"units\"],\n self.units.get(),\n self.stored_data[\"unitsPerRotation\"],\n )\n )\n diamEntry.configure(state=\"readonly\")\n\n def initialUnitEnable(*args):\n diamEntry.configure(state=\"normal\")\n unitsMenu.configure(state=\"normal\")\n\n def convertUnit(initUnits, finalUnits, unitsPerRot):\n initUnits = Units(initUnits)\n finalUnits = Units(finalUnits)\n if isRotation(finalUnits):\n logger.info(\"Converting to rotational measure (fixed conversion)\")\n return round(\n (1 * Units.ROTATIONS.unit).to(finalUnits.unit).magnitude, 3\n )\n else:\n logger.info(\"Converting from %s to %s measure\", initUnits, finalUnits)\n dataUnitsPerRot = unitsPerRot * initUnits.unit\n return round(dataUnitsPerRot.to(finalUnits.unit).magnitude, 3)\n\n def enableErrorBounds(*args):\n if self.loop_type.get() == \"Position\":\n qPEntry.configure(state=\"normal\")\n else:\n qPEntry.configure(state=\"disabled\")\n\n def defineTestResults(*args):\n trackWidthEntry.configure(state=\"disabled\")\n kGEntry.configure(state=\"disabled\")\n kCosEntry.configure(state=\"disabled\")\n\n test = Tests(self.test.get())\n if test != Tests.DRIVETRAIN:\n dirMenu = OptionMenu(topFrame, self.subset, *sorted(directions))\n self.subset.set(\"Combined\")\n else:\n dirMenu = OptionMenu(topFrame, self.subset, *sorted(subsets))\n self.subset.set(\"All Combined\")\n dirMenu.configure(width=20, state=\"normal\")\n dirMenu.grid(row=0, column=7)\n if test == Tests.DRIVETRAIN or test == Tests.ELEVATOR:\n diamEntry.configure(state=\"normal\")\n if test == Tests.DRIVETRAIN:\n trackWidthEntry.configure(state=\"readonly\")\n else:\n kGEntry.configure(state=\"readonly\")\n else:\n diamEntry.configure(state=\"disabled\")\n if test == Tests.ARM:\n kCosEntry.configure(state=\"readonly\")\n\n def isRotation(units):\n return Units(units) in (Units.ROTATIONS, Units.RADIANS, Units.DEGREES)\n\n # TOP OF WINDOW (FILE SELECTION)\n\n topFrame = Frame(self.mainGUI)\n topFrame.grid(row=0, column=0, columnspan=4)\n\n Button(topFrame, text=\"Select Data File\", command=getFile).grid(\n row=0, column=0, padx=4\n )\n\n fileEntry = Entry(topFrame, width=80)\n fileEntry.grid(row=0, column=1, columnspan=3)\n fileEntry.configure(state=\"readonly\")\n\n Label(topFrame, text=\"Units per rotation:\", anchor=\"e\").grid(\n row=1, column=3, columnspan=2, sticky=\"ew\"\n )\n diamEntry = FloatEntry(topFrame, textvariable=self.units_per_rot)\n diamEntry.grid(row=1, column=5)\n diamEntry.configure(state=\"disabled\")\n\n Label(topFrame, text=\"Subset:\", width=15).grid(row=0, column=6)\n subsets = {\n \"All Combined\",\n \"Forward Left\",\n \"Forward Right\",\n \"Forward Combined\",\n \"Backward Left\",\n \"Backward Right\",\n \"Backward Combined\",\n }\n directions = {\"Combined\", \"Forward\", \"Backward\"}\n dirMenu = OptionMenu(topFrame, self.subset, *sorted(directions))\n dirMenu.configure(width=20, state=\"disabled\")\n dirMenu.grid(row=0, column=7)\n\n Label(topFrame, text=\"Test:\", width=15).grid(row=1, column=6)\n\n testMenu = FloatEntry(topFrame, textvariable=self.test, width=10)\n testMenu.configure(width=10, state=\"readonly\")\n testMenu.grid(row=1, column=7)\n self.test.trace_add(\"write\", defineTestResults)\n\n Label(topFrame, text=\"Units:\", width=10).grid(row=0, column=4)\n\n unitsMenu = OptionMenu(\n topFrame, self.units, *sorted(unit.value for unit in Units)\n )\n unitsMenu.configure(width=10)\n unitsMenu.grid(row=0, column=5, sticky=\"ew\")\n unitsMenu.configure(state=\"disabled\")\n self.units.trace_add(\"write\", enableUnitsPerRot)\n\n for child in topFrame.winfo_children():\n child.grid_configure(padx=1, pady=1)\n\n # FEEDFORWARD ANALYSIS FRAME\n\n ffFrame = Frame(self.mainGUI, bd=2, relief=\"groove\")\n ffFrame.grid(row=1, column=0, columnspan=3, sticky=\"ns\")\n\n Label(ffFrame, text=\"Feedforward Analysis\").grid(row=0, column=0, columnspan=5)\n\n analyzeButton = Button(\n ffFrame, text=\"Analyze Data\", command=runAnalysis, state=\"disabled\"\n )\n analyzeButton.grid(row=1, column=0, sticky=\"ew\")\n\n timePlotsButton = Button(\n ffFrame,\n text=\"Time-Domain Diagnostics\",\n command=plotTimeDomain,\n state=\"disabled\",\n )\n timePlotsButton.grid(row=2, column=0, sticky=\"ew\")\n\n voltPlotsButton = Button(\n ffFrame,\n text=\"Voltage-Domain Diagnostics\",\n command=plotVoltageDomain,\n state=\"disabled\",\n )\n voltPlotsButton.grid(row=3, column=0, sticky=\"ew\")\n\n fancyPlotButton = Button(\n ffFrame, text=\"3D Diagnostics\", command=plot3D, state=\"disabled\"\n )\n fancyPlotButton.grid(row=4, column=0, sticky=\"ew\")\n\n Label(ffFrame, text=\"Accel Window Size:\", anchor=\"e\").grid(\n row=1, column=1, sticky=\"ew\"\n )\n windowEntry = IntEntry(ffFrame, textvariable=self.window_size, width=5)\n windowEntry.grid(row=1, column=2)\n\n Label(ffFrame, text=\"Motion Threshold (units/s):\", anchor=\"e\").grid(\n row=2, column=1, sticky=\"ew\"\n )\n thresholdEntry = FloatEntry(\n ffFrame, textvariable=self.motion_threshold, width=5\n )\n thresholdEntry.grid(row=2, column=2)\n\n Label(ffFrame, text=\"kS:\", anchor=\"e\").grid(row=1, column=3, sticky=\"ew\")\n kSEntry = FloatEntry(ffFrame, textvariable=self.ks, width=10)\n kSEntry.grid(row=1, column=4)\n kSEntry.configure(state=\"readonly\")\n\n Label(ffFrame, text=\"kG:\", anchor=\"e\").grid(row=2, column=3, sticky=\"ew\")\n kGEntry = FloatEntry(ffFrame, textvariable=self.kg, width=10)\n kGEntry.grid(row=2, column=4)\n kGEntry.configure(state=\"disabled\")\n\n Label(ffFrame, text=\"kCos:\", anchor=\"e\").grid(row=3, column=3, sticky=\"ew\")\n kCosEntry = FloatEntry(ffFrame, textvariable=self.kcos, width=10)\n kCosEntry.grid(row=3, column=4)\n kCosEntry.configure(state=\"disabled\")\n\n Label(ffFrame, text=\"kV:\", anchor=\"e\").grid(row=4, column=3, sticky=\"ew\")\n kVEntry = FloatEntry(ffFrame, textvariable=self.kv, width=10)\n kVEntry.grid(row=4, column=4)\n kVEntry.configure(state=\"readonly\")\n\n Label(ffFrame, text=\"kA:\", anchor=\"e\").grid(row=5, column=3, sticky=\"ew\")\n kAEntry = FloatEntry(ffFrame, textvariable=self.ka, width=10)\n kAEntry.grid(row=5, column=4)\n kAEntry.configure(state=\"readonly\")\n\n Label(ffFrame, text=\"r-squared:\", anchor=\"e\").grid(row=6, column=3, sticky=\"ew\")\n rSquareEntry = FloatEntry(ffFrame, textvariable=self.r_square, width=10)\n rSquareEntry.grid(row=6, column=4)\n rSquareEntry.configure(state=\"readonly\")\n\n Label(ffFrame, text=\"Track Width:\", anchor=\"e\").grid(\n row=7, column=3, sticky=\"ew\"\n )\n trackWidthEntry = FloatEntry(ffFrame, textvariable=self.track_width, width=10)\n trackWidthEntry.grid(row=7, column=4)\n trackWidthEntry.configure(state=\"disabled\")\n\n for child in ffFrame.winfo_children():\n child.grid_configure(padx=1, pady=1)\n\n # FEEDBACK ANALYSIS FRAME\n\n fbFrame = Frame(self.mainGUI, bd=2, relief=\"groove\")\n fbFrame.grid(row=1, column=3, columnspan=5)\n\n Label(fbFrame, text=\"Feedback Analysis\").grid(row=0, column=0, columnspan=5)\n\n Label(fbFrame, text=\"Gain Settings Preset:\", anchor=\"e\").grid(\n row=1, column=0, sticky=\"ew\"\n )\n presetChoices = {\n \"Default\",\n \"WPILib (2020-)\",\n \"WPILib (Pre-2020)\",\n \"Talon FX\",\n \"Talon SRX (2020-)\",\n \"Talon SRX (Pre-2020)\",\n \"Spark MAX (brushless)\",\n \"Spark MAX (brushed)\",\n }\n presetMenu = OptionMenu(fbFrame, self.gain_units_preset, *sorted(presetChoices))\n presetMenu.grid(row=1, column=1)\n presetMenu.config(width=12)\n self.gain_units_preset.trace_add(\"write\", presetGains)\n\n Label(fbFrame, text=\"Controller Period (s):\", anchor=\"e\").grid(\n row=2, column=0, sticky=\"ew\"\n )\n periodEntry = FloatEntry(fbFrame, textvariable=self.period, width=10)\n periodEntry.grid(row=2, column=1)\n\n Label(fbFrame, text=\"Max Controller Output:\", anchor=\"e\").grid(\n row=3, column=0, sticky=\"ew\"\n )\n controllerMaxEntry = FloatEntry(\n fbFrame, textvariable=self.max_controller_output, width=10\n )\n controllerMaxEntry.grid(row=3, column=1)\n\n Label(fbFrame, text=\"Time-Normalized Controller:\", anchor=\"e\").grid(\n row=4, column=0, sticky=\"ew\"\n )\n normalizedButton = Checkbutton(\n fbFrame, variable=self.controller_time_normalized\n )\n normalizedButton.grid(row=4, column=1)\n\n Label(fbFrame, text=\"Controller Type:\", anchor=\"e\").grid(\n row=5, column=0, sticky=\"ew\"\n )\n controllerTypes = {\"Onboard\", \"Talon\", \"Spark\"}\n controllerTypeMenu = OptionMenu(\n fbFrame, self.controller_type, *sorted(controllerTypes)\n )\n controllerTypeMenu.grid(row=5, column=1)\n self.controller_type.trace_add(\"write\", enableOffboard)\n\n Label(fbFrame, text=\"Measurement delay (ms):\", anchor=\"e\").grid(\n row=6, column=0, sticky=\"ew\"\n )\n velocityDelay = FloatEntry(\n fbFrame, textvariable=self.measurement_delay, width=10\n )\n velocityDelay.grid(row=6, column=1)\n\n Label(fbFrame, text=\"Post-Encoder Gearing:\", anchor=\"e\").grid(\n row=7, column=0, sticky=\"ew\"\n )\n gearingEntry = FloatEntry(fbFrame, textvariable=self.gearing, width=10)\n gearingEntry.configure(state=\"disabled\")\n gearingEntry.grid(row=7, column=1)\n\n Label(fbFrame, text=\"Encoder EPR:\", anchor=\"e\").grid(\n row=8, column=0, sticky=\"ew\"\n )\n eprEntry = IntEntry(fbFrame, textvariable=self.encoder_epr, width=10)\n eprEntry.configure(state=\"disabled\")\n eprEntry.grid(row=8, column=1)\n\n Label(fbFrame, text=\"Has Follower:\", anchor=\"e\").grid(\n row=9, column=0, sticky=\"ew\"\n )\n hasFollower = Checkbutton(fbFrame, variable=self.has_follower)\n hasFollower.grid(row=9, column=1)\n hasFollower.configure(state=\"disabled\")\n self.has_follower.trace_add(\"write\", enableOffboard)\n\n Label(fbFrame, text=\"Follower Update Period (s):\", anchor=\"e\").grid(\n row=10, column=0, sticky=\"ew\"\n )\n followerPeriodEntry = FloatEntry(\n fbFrame, textvariable=self.follower_period, width=10\n )\n followerPeriodEntry.grid(row=10, column=1)\n followerPeriodEntry.configure(state=\"disabled\")\n\n Label(fbFrame, text=\"Max Acceptable Position Error (units):\", anchor=\"e\").grid(\n row=1, column=2, columnspan=2, sticky=\"ew\"\n )\n qPEntry = FloatEntry(fbFrame, textvariable=self.qp, width=10)\n qPEntry.grid(row=1, column=4)\n qPEntry.configure(state=\"disabled\")\n\n Label(\n fbFrame, text=\"Max Acceptable Velocity Error (units/s):\", anchor=\"e\"\n ).grid(row=2, column=2, columnspan=2, sticky=\"ew\")\n qVEntry = FloatEntry(fbFrame, textvariable=self.qv, width=10)\n qVEntry.grid(row=2, column=4)\n\n Label(fbFrame, text=\"Max Acceptable Control Effort (V):\", anchor=\"e\").grid(\n row=3, column=2, columnspan=2, sticky=\"ew\"\n )\n effortEntry = FloatEntry(fbFrame, textvariable=self.max_effort, width=10)\n effortEntry.grid(row=3, column=4)\n\n Label(fbFrame, text=\"Loop Type:\", anchor=\"e\").grid(\n row=4, column=2, columnspan=2, sticky=\"ew\"\n )\n loopTypes = {\"Position\", \"Velocity\"}\n loopTypeMenu = OptionMenu(fbFrame, self.loop_type, *sorted(loopTypes))\n loopTypeMenu.configure(width=8)\n loopTypeMenu.grid(row=4, column=4)\n self.loop_type.trace_add(\"write\", enableErrorBounds)\n # We reset everything to the selected preset when the user changes the loop type\n # This prevents people from forgetting to change measurement delays\n self.loop_type.trace_add(\"write\", presetGains)\n\n Label(fbFrame, text=\"kV:\", anchor=\"e\").grid(row=5, column=2, sticky=\"ew\")\n kVFBEntry = FloatEntry(fbFrame, textvariable=self.kv, width=10)\n kVFBEntry.grid(row=5, column=3)\n Label(fbFrame, text=\"kA:\", anchor=\"e\").grid(row=6, column=2, sticky=\"ew\")\n kAFBEntry = FloatEntry(fbFrame, textvariable=self.ka, width=10)\n kAFBEntry.grid(row=6, column=3)\n\n calcGainsButton = Button(\n fbFrame,\n text=\"Calculate Optimal Controller Gains\",\n command=calcGains,\n state=\"disabled\",\n )\n calcGainsButton.grid(row=7, column=2, columnspan=3)\n\n Label(fbFrame, text=\"kP:\", anchor=\"e\").grid(row=8, column=2, sticky=\"ew\")\n kPEntry = FloatEntry(\n fbFrame, textvariable=self.kp, width=10, state=\"readonly\"\n ).grid(row=8, column=3)\n\n Label(fbFrame, text=\"kD:\", anchor=\"e\").grid(row=9, column=2, sticky=\"ew\")\n kDEntry = FloatEntry(\n fbFrame, textvariable=self.kd, width=10, state=\"readonly\"\n ).grid(row=9, column=3)\n\n Label(fbFrame, text=\"Convert Gains:\", anchor=\"e\").grid(\n row=8, column=4, sticky=\"ew\"\n )\n convertGains = Checkbutton(fbFrame, variable=self.convert_gains)\n convertGains.grid(row=8, column=5)\n convertGains.configure(state=\"disabled\")\n\n for child in fbFrame.winfo_children():\n child.grid_configure(padx=1, pady=1)\n\n # From 449's R script (note: R is 1-indexed)\n\n def smoothDerivative(self, tm, value, n):\n \"\"\"\n :param tm: time column\n :param value: Value to take the derivative of\n :param n: smoothing parameter\n \"\"\"\n dlen = len(value)\n dt = tm[n:dlen] - tm[: (dlen - n)]\n x = (value[(n):dlen] - value[: (dlen - n)]) / dt\n\n # pad to original length by adding zeros on either side\n return np.pad(\n x, (int(np.ceil(n / 2.0)), int(np.floor(n / 2.0))), mode=\"constant\"\n )\n\n # Create one for one sided and one for 2 sided\n def trim_quasi_testdata(self, data):\n adata = np.abs(data)\n test = Tests(self.test.get())\n if test == Tests.DRIVETRAIN:\n truth = np.all(\n [\n adata[L_ENCODER_V_COL] > self.motion_threshold.get(),\n adata[L_VOLTS_COL] > 0,\n adata[R_ENCODER_V_COL] > self.motion_threshold.get(),\n adata[R_VOLTS_COL] > 0,\n ],\n axis=0,\n )\n else:\n truth = np.all(\n [\n adata[L_ENCODER_V_COL] > self.motion_threshold.get(),\n adata[L_VOLTS_COL] > 0,\n ],\n axis=0,\n )\n\n temp = data.transpose()[truth].transpose()\n\n if temp[TIME_COL].size == 0:\n messagebox.showinfo(\n \"Error!\",\n \"No data in quasistatic test is above motion threshold. \"\n + \"Try running with a smaller motion threshold (use --motion_threshold) \"\n + \"and make sure your encoder is reporting correctly!\",\n )\n return None\n else:\n return temp\n\n def trim_step_testdata(self, data):\n # removes anything before the max acceleration\n max_accel_idx = np.argmax(np.abs(data[PREPARED_ACC_COL]))\n return data[:, max_accel_idx + 1 :]\n\n def compute_accelDrive(self, data, window):\n \"\"\"\n Returned data columns correspond to PREPARED_*\n \"\"\"\n\n # deal with incomplete data\n if len(data[TIME_COL]) < window * 2:\n messagebox.showinfo(\n \"Error!\",\n \"Not enough data points to compute acceleration. \"\n + \"Try running with a smaller window setting or a smaller threshold.\",\n )\n return None\n\n # Compute left/right acceleration\n l_acc = self.smoothDerivative(data[TIME_COL], data[L_ENCODER_V_COL], window)\n r_acc = self.smoothDerivative(data[TIME_COL], data[R_ENCODER_V_COL], window)\n\n l = np.vstack(\n (\n data[TIME_COL],\n data[L_VOLTS_COL],\n data[L_ENCODER_P_COL],\n data[L_ENCODER_V_COL],\n l_acc,\n )\n )\n r = np.vstack(\n (\n data[TIME_COL],\n data[R_VOLTS_COL],\n data[R_ENCODER_P_COL],\n data[R_ENCODER_V_COL],\n r_acc,\n )\n )\n\n return l, r\n\n def compute_accel(self, data, window):\n \"\"\"\n Returned data columns correspond to PREPARED_*\n \"\"\"\n\n # deal with incomplete data\n if len(data[TIME_COL]) < window * 2:\n messagebox.showinfo(\n \"Error!\",\n \"Not enough data points to compute acceleration. \"\n + \"Try running with a smaller window setting or a smaller threshold.\",\n )\n return None\n\n # Compute left/right acceleration\n acc = self.smoothDerivative(data[TIME_COL], data[L_ENCODER_V_COL], window)\n\n dat = np.vstack(\n (\n data[TIME_COL],\n data[L_VOLTS_COL],\n data[L_ENCODER_P_COL],\n data[L_ENCODER_V_COL],\n acc,\n )\n )\n\n return dat\n\n def is_valid(self, *a_tuple):\n for a in a_tuple:\n if a is None:\n return False\n return True\n\n def prepare_data_drivetrain(self, data, window):\n \"\"\"\n Firstly, data should be 'trimmed' to exclude any data points at which the\n robot was not being commanded to do anything.\n\n Secondly, robot acceleration should be calculated from robot velocity and time.\n We have found it effective to do this by taking the slope of the secant line\n of velocity over a 60ms (3 standard loop iterations) window.\n\n Thirdly, data from the quasi-static test should be trimmed to exclude the\n initial period in which the robot is not moving due to static friction\n Fourthly, data from the step-voltage acceleration tests must be trimmed to\n remove the initial 'ramp-up' period that exists due to motor inductance; this\n can be done by simply removing all data points before maximum acceleration is\n reached.\n\n Finally, the data can be analyzed: pool your trimmed data into four data sets\n - one for each side of the robot (left or right) and each direction (forwards\n or backwards).\n\n For each set, run a linear regression of voltage seen at the motor\n (or battery voltage if you do not have Talon SRXs) versus velocity and\n acceleration.\n\n Voltage should be in units of volts, velocity in units of feet per second,\n and acceleration in units of feet per second squared.\n\n Each data pool will then yield three parameters -\n intercept, Kv (the regression coefficient of velocity), and Ka (the regression\n coefficient of acceleration).\n \"\"\"\n # ensure voltage sign matches velocity sign and converts rotation measurements into proper units\n for x in JSON_DATA_KEYS:\n data[x][L_VOLTS_COL] = np.copysign(\n data[x][L_VOLTS_COL], data[x][L_ENCODER_V_COL]\n )\n data[x][R_VOLTS_COL] = np.copysign(\n data[x][R_VOLTS_COL], data[x][R_ENCODER_V_COL]\n )\n data[x][R_ENCODER_V_COL] = (\n np.array(data[x][R_ENCODER_V_COL]) * self.units_per_rot.get()\n ).tolist()\n data[x][L_ENCODER_V_COL] = (\n np.array(data[x][L_ENCODER_V_COL]) * self.units_per_rot.get()\n ).tolist()\n data[x][R_ENCODER_P_COL] = (\n np.array(data[x][R_ENCODER_V_COL]) * self.units_per_rot.get()\n ).tolist()\n data[x][L_ENCODER_P_COL] = (\n np.array(data[x][L_ENCODER_V_COL]) * self.units_per_rot.get()\n ).tolist()\n\n # trim quasi data before computing acceleration\n sf_trim = self.trim_quasi_testdata(data[\"slow-forward\"])\n sb_trim = self.trim_quasi_testdata(data[\"slow-backward\"])\n\n if sf_trim is None or sb_trim is None:\n return [None] * 8\n\n sf_l, sf_r = self.compute_accelDrive(sf_trim, window)\n sb_l, sb_r = self.compute_accelDrive(sb_trim, window)\n\n if sf_l is None or sf_r is None or sb_l is None or sb_r is None:\n return [None] * 8\n\n # trim step data after computing acceleration\n ff_l, ff_r = self.compute_accelDrive(data[\"fast-forward\"], window)\n fb_l, fb_r = self.compute_accelDrive(data[\"fast-backward\"], window)\n\n if ff_l is None or ff_r is None or fb_l is None or fb_r is None:\n return [None] * 8\n\n ff_l = self.trim_step_testdata(ff_l)\n ff_r = self.trim_step_testdata(ff_r)\n fb_l = self.trim_step_testdata(fb_l)\n fb_r = self.trim_step_testdata(fb_r)\n\n dataset = {\n \"Forward Left\": [sf_l, ff_l],\n \"Forward Right\": [sf_r, ff_r],\n \"Backward Left\": [sb_l, fb_l],\n \"Backward Right\": [sb_r, fb_r],\n \"Forward Combined\": [\n np.concatenate((sf_l, sf_r), axis=1),\n np.concatenate((ff_l, ff_r), axis=1),\n ],\n \"Backward Combined\": [\n np.concatenate((sb_l, sb_r), axis=1),\n np.concatenate((fb_l, fb_r), axis=1),\n ],\n \"All Combined\": [\n np.concatenate((sf_l, sb_l, sf_r, sb_r), axis=1),\n np.concatenate((ff_l, fb_l, ff_r, ff_r), axis=1),\n ],\n \"Valid\": self.is_valid(sf_l, sb_l, ff_l, fb_l, sf_r, sb_r, ff_r, fb_r),\n }\n\n return dataset\n\n def prepare_data(self, ogData, window):\n \"\"\"\n Firstly, data should be 'trimmed' to exclude any data points at which the\n robot was not being commanded to do anything.\n\n Secondly, robot acceleration should be calculated from robot velocity and time.\n We have found it effective to do this by taking the slope of the secant line\n of velocity over a 60ms (3 standard loop iterations) window.\n\n Thirdly, data from the quasi-static test should be trimmed to exclude the\n initial period in which the robot is not moving due to static friction\n Fourthly, data from the step-voltage acceleration tests must be trimmed to\n remove the initial 'ramp-up' period that exists due to motor inductance; this\n can be done by simply removing all data points before maximum acceleration is\n reached.\n\n Finally, the data can be analyzed: pool your trimmed data into four data sets\n - one for each side of the robot (left or right) and each direction (forwards\n or backwards).\n\n For each set, run a linear regression of voltage seen at the motor\n (or battery voltage if you do not have Talon SRXs) versus velocity and\n acceleration.\n\n Voltage should be in units of volts, velocity in units of feet per second,\n and acceleration in units of feet per second squared.\n\n Each data pool will then yield three parameters -\n intercept, Kv (the regression coefficient of velocity), and Ka (the regression\n coefficient of acceleration).\n \"\"\"\n # create a copy so original data doesn't get changed\n data = copy.deepcopy(ogData)\n\n test = Tests(self.test.get())\n if test == Tests.DRIVETRAIN:\n return self.prepare_data_drivetrain(data, window)\n else:\n # Ensure voltage points in same direction as velocity\n for x in JSON_DATA_KEYS:\n data[x][L_VOLTS_COL] = np.copysign(\n data[x][L_VOLTS_COL], data[x][L_ENCODER_V_COL]\n )\n data[x][L_ENCODER_V_COL] = (\n np.array(data[x][L_ENCODER_V_COL]) * self.units_per_rot.get()\n ).tolist()\n data[x][L_ENCODER_P_COL] = (\n np.array(data[x][L_ENCODER_V_COL]) * self.units_per_rot.get()\n ).tolist()\n\n # trim quasi data before computing acceleration\n sf_trim = self.trim_quasi_testdata(data[\"slow-forward\"])\n sb_trim = self.trim_quasi_testdata(data[\"slow-backward\"])\n\n if sf_trim is None or sb_trim is None:\n return None, None, None, None\n\n sf = self.compute_accel(sf_trim, window)\n sb = self.compute_accel(sb_trim, window)\n\n if sf is None or sb is None:\n return None, None, None, None\n\n # trim step data after computing acceleration\n ff = self.compute_accel(data[\"fast-forward\"], window)\n fb = self.compute_accel(data[\"fast-backward\"], window)\n\n if ff is None or fb is None:\n return None, None, None, None\n\n ff = self.trim_step_testdata(ff)\n fb = self.trim_step_testdata(fb)\n\n dataset = {\n \"Forward\": [sf, ff],\n \"Backward\": [sb, fb],\n \"Combined\": [\n np.concatenate((sf, sb), axis=1),\n np.concatenate((ff, fb), axis=1),\n ],\n \"Valid\": self.is_valid(sf, sb, ff, fb),\n }\n return dataset\n\n def ols(self, x1, x2, x3, y):\n \"\"\"multivariate linear regression using ordinary least squares\"\"\"\n if x3:\n x = np.array((np.sign(x1), x1, x2, x3)).T\n else:\n x = np.array((np.sign(x1), x1, x2)).T\n model = sm.OLS(y, x)\n return model.fit()\n\n def _plotTimeDomain(self, subset, qu, step):\n vel = np.concatenate((qu[PREPARED_VEL_COL], step[PREPARED_VEL_COL]))\n accel = np.concatenate((qu[PREPARED_ACC_COL], step[PREPARED_ACC_COL]))\n volts = np.concatenate((qu[PREPARED_V_COL], step[PREPARED_V_COL]))\n time = np.concatenate((qu[PREPARED_TM_COL], step[PREPARED_TM_COL]))\n\n # Time-domain plots.\n # These should show if anything went horribly wrong during the tests.\n # Useful for diagnosing the data trim; quasistatic test should look purely linear with no leading 'tail'\n\n plt.figure(subset + \" Time-Domain Plots\")\n\n # quasistatic vel and accel vs time\n ax1 = plt.subplot(221)\n ax1.set_xlabel(\"Time\")\n ax1.set_ylabel(\"Velocity\")\n ax1.set_title(\"Quasistatic velocity vs time\")\n plt.scatter(qu[PREPARED_TM_COL], qu[PREPARED_VEL_COL], marker=\".\", c=\"#000000\")\n\n ax = plt.subplot(222, sharey=ax1)\n ax.set_xlabel(\"Time\")\n ax.set_ylabel(\"Velocity\")\n ax.set_title(\"Dynamic velocity vs time\")\n plt.scatter(\n step[PREPARED_TM_COL], step[PREPARED_VEL_COL], marker=\".\", c=\"#000000\"\n )\n\n # dynamic vel and accel vs time\n ax2 = plt.subplot(223)\n ax2.set_xlabel(\"Time\")\n ax2.set_ylabel(\"Acceleration\")\n ax2.set_title(\"Quasistatic acceleration vs time\")\n plt.scatter(qu[PREPARED_TM_COL], qu[PREPARED_ACC_COL], marker=\".\", c=\"#000000\")\n\n ax = plt.subplot(224, sharey=ax2)\n ax.set_xlabel(\"Time\")\n ax.set_ylabel(\"Acceleration\")\n ax.set_title(\"Dynamic acceleration vs time\")\n plt.scatter(\n step[PREPARED_TM_COL], step[PREPARED_ACC_COL], marker=\".\", c=\"#000000\"\n )\n\n # Fix overlapping axis labels\n plt.tight_layout(pad=0.5)\n\n plt.show()\n\n def _plotVoltageDomain(self, subset, qu, step):\n\n # Voltage-domain plots\n # These should show linearity of velocity/acceleration data with voltage\n # X-axis is not raw voltage, but rather 'portion of voltage corresponding to vel/acc'\n # Both plots should be straight lines through the origin\n # Fit lines will be straight lines through the origin by construction; data should match fit\n\n vel = np.concatenate((qu[PREPARED_VEL_COL], step[PREPARED_VEL_COL]))\n accel = np.concatenate((qu[PREPARED_ACC_COL], step[PREPARED_ACC_COL]))\n volts = np.concatenate((qu[PREPARED_V_COL], step[PREPARED_V_COL]))\n time = np.concatenate((qu[PREPARED_TM_COL], step[PREPARED_TM_COL]))\n\n ks = self.ks.get()\n kv = self.kv.get()\n ka = self.ka.get()\n r_square = self.r_square.get()\n\n kcos = self.kcos.get()\n kg = self.kg.get()\n\n plt.figure(subset + \" Voltage-Domain Plots\")\n\n # quasistatic vel vs. vel-causing voltage\n ax = plt.subplot(211)\n ax.set_xlabel(\"Velocity-Portion Voltage\")\n ax.set_ylabel(\"Velocity\")\n ax.set_title(\"Quasistatic velocity vs velocity-portion voltage\")\n\n test = Tests(self.test.get())\n if test == Tests.ELEVATOR:\n plt.scatter(\n qu[PREPARED_V_COL]\n - kg\n - ks * np.sign(qu[PREPARED_VEL_COL])\n - ka * qu[PREPARED_ACC_COL],\n qu[PREPARED_VEL_COL],\n marker=\".\",\n c=\"#000000\",\n )\n elif test == Tests.ARM:\n plt.scatter(\n qu[PREPARED_V_COL]\n - ks * np.sign(qu[PREPARED_VEL_COL])\n - ka * qu[PREPARED_ACC_COL]\n - kcos * qu[PREPARED_COS_COL],\n qu[PREPARED_VEL_COL],\n marker=\".\",\n c=\"#000000\",\n )\n else:\n plt.scatter(\n qu[PREPARED_V_COL]\n - ks * np.sign(qu[PREPARED_VEL_COL])\n - ka * qu[PREPARED_ACC_COL],\n qu[PREPARED_VEL_COL],\n marker=\".\",\n c=\"#000000\",\n )\n\n # show fit line from multiple regression\n y = np.linspace(np.min(qu[PREPARED_VEL_COL]), np.max(qu[PREPARED_VEL_COL]))\n plt.plot(kv * y, y)\n\n # dynamic accel vs. accel-causing voltage\n ax = plt.subplot(212)\n ax.set_xlabel(\"Acceleration-Portion Voltage\")\n ax.set_ylabel(\"Acceleration\")\n ax.set_title(\"Dynamic acceleration vs acceleration-portion voltage\")\n\n if test == Tests.ELEVATOR:\n plt.scatter(\n step[PREPARED_V_COL]\n - kg\n - ks * np.sign(step[PREPARED_VEL_COL])\n - kv * step[PREPARED_VEL_COL],\n step[PREPARED_ACC_COL],\n marker=\".\",\n c=\"#000000\",\n )\n elif test == Tests.ARM:\n plt.scatter(\n step[PREPARED_V_COL]\n - ks * np.sign(step[PREPARED_VEL_COL])\n - kv * step[PREPARED_VEL_COL]\n - kcos * step[PREPARED_COS_COL],\n step[PREPARED_ACC_COL],\n marker=\".\",\n c=\"#000000\",\n )\n else:\n plt.scatter(\n step[PREPARED_V_COL]\n - ks * np.sign(step[PREPARED_VEL_COL])\n - kv * step[PREPARED_VEL_COL],\n step[PREPARED_ACC_COL],\n marker=\".\",\n c=\"#000000\",\n )\n\n # show fit line from multiple regression\n y = np.linspace(np.min(step[PREPARED_ACC_COL]), np.max(step[PREPARED_ACC_COL]))\n plt.plot(ka * y, y)\n\n # Fix overlapping axis labels\n plt.tight_layout(pad=0.5)\n\n # Supplemental graphs (Elevator and Arm)\n if test == Tests.ELEVATOR or test == Tests.ARM:\n ax = plt.subplot(111)\n # show fit line from multiple regression\n y = np.linspace(np.min(qu[PREPARED_POS_COL]), np.max(qu[PREPARED_POS_COL]))\n\n if test == Tests.ELEVATOR:\n ax.set_xlabel(\"Friction-loss voltage\")\n ax.set_ylabel(\"Velocity\")\n ax.set_title(\"Quasistatic velocity vs friction-loss voltage\")\n plt.scatter(\n qu[PREPARED_V_COL]\n - kg\n - kv * qu[PREPARED_VEL_COL]\n - ka * qu[PREPARED_ACC_COL],\n qu[PREPARED_VEL_COL],\n marker=\".\",\n c=\"#000000\",\n )\n plt.plot(ks * np.sign(y), y)\n else:\n ax.set_xlabel(\"Gravity (cosine)-Portion Voltage\")\n ax.set_ylabel(\"Angle\")\n ax.set_title(\"Quasistatic angle vs gravity-portion voltage\")\n plt.scatter(\n qu[PREPARED_V_COL]\n - ks * np.sign(qu[PREPARED_VEL_COL])\n - kv * qu[PREPARED_VEL_COL]\n - ka * qu[PREPARED_ACC_COL],\n qu[PREPARED_POS_COL],\n marker=\".\",\n c=\"#000000\",\n )\n units = Units(self.units.get())\n if units == Units.DEGREES:\n plt.plot(kcos * np.cos(np.radians(y)), y)\n elif units == Units.RADIANS:\n plt.plot(kcos * np.cos(y), y)\n else:\n plt.plot(kcos * np.cos(math.pi * 2 * y), y)\n plt.tight_layout(pad=0.5)\n\n plt.show()\n\n def _plot3D(self, subset, qu, step):\n\n vel = np.concatenate((qu[PREPARED_VEL_COL], step[PREPARED_VEL_COL]))\n accel = np.concatenate((qu[PREPARED_ACC_COL], step[PREPARED_ACC_COL]))\n volts = np.concatenate((qu[PREPARED_V_COL], step[PREPARED_V_COL]))\n time = np.concatenate((qu[PREPARED_TM_COL], step[PREPARED_TM_COL]))\n\n ks = self.ks.get()\n kv = self.kv.get()\n ka = self.ka.get()\n kcos = self.kcos.get()\n kg = self.kg.get()\n r_square = self.r_square.get()\n\n # Interactive 3d plot of voltage over entire vel-accel plane\n # Really cool, not really any more diagnostically-useful than prior plots but worth seeing\n plt.figure(subset + \" 3D Vel-Accel Plane Plot\")\n\n ax = plt.subplot(111, projection=\"3d\")\n\n # 3D scatterplot\n ax.set_xlabel(\"Velocity\")\n ax.set_ylabel(\"Acceleration\")\n ax.set_zlabel(\"Voltage\")\n\n # Show best fit plane\n vv, aa = np.meshgrid(\n np.linspace(np.min(vel), np.max(vel)),\n np.linspace(np.min(accel), np.max(accel)),\n )\n\n test = Tests(self.test.get())\n if test == Tests.ELEVATOR:\n ax.set_title(\"Friction-adjusted Voltage vs velocity and acceleration\")\n ax.scatter(vel, accel, volts - ks * np.sign(vel))\n ax.plot_surface(vv, aa, kg + kv * vv + ka * aa, alpha=0.2, color=[0, 1, 1])\n elif test == Tests.ARM:\n cos = np.concatenate((qu[PREPARED_COS_COL], step[PREPARED_COS_COL]))\n ax.set_title(\"Cosine-adjusted Voltage vs velocity and acceleration\")\n ax.scatter(vel, accel, volts - kcos * cos)\n ax.plot_surface(\n vv, aa, ks * np.sign(vv) + kv * vv + ka * aa, alpha=0.2, color=[0, 1, 1]\n )\n else:\n ax.set_title(\"Voltage vs velocity and acceleration\")\n ax.scatter(vel, accel, volts)\n ax.plot_surface(\n vv, aa, ks * np.sign(vv) + kv * vv + ka * aa, alpha=0.2, color=[0, 1, 1]\n )\n\n plt.show()\n\n def calcFit(self, qu, step, test):\n vel = np.concatenate((qu[PREPARED_VEL_COL], step[PREPARED_VEL_COL]))\n accel = np.concatenate((qu[PREPARED_ACC_COL], step[PREPARED_ACC_COL]))\n volts = np.concatenate((qu[PREPARED_V_COL], step[PREPARED_V_COL]))\n time = np.concatenate((qu[PREPARED_TM_COL], step[PREPARED_TM_COL]))\n\n test = Tests(test)\n if test == Tests.ELEVATOR:\n fit = self.ols(vel, accel, np.ones(vel.size), volts)\n ks, kv, ka, kg = fit.params\n rsquare = fit.rsquared\n return kg, ks, kv, ka, rsquare\n elif test == Tests.ARM:\n cos = np.concatenate((qu[PREPARED_COS_COL], step[PREPARED_COS_COL]))\n fit = self.ols(vel, accel, cos, volts)\n ks, kv, ka, kcos = fit.params\n rsquare = fit.rsquared\n return ks, kv, ka, kcos, rsquare\n else:\n fit = self.ols(vel, accel, None, volts)\n ks, kv, ka = fit.params\n rsquare = fit.rsquared\n return ks, kv, ka, rsquare\n\n def _calcGainsPos(self, kv, ka, qp, qv, effort, period, position_delay):\n\n # If acceleration requires no effort, velocity becomes an input for position\n # control. We choose an appropriate model in this case to avoid numerical\n # instabilities in LQR.\n if ka > 1e-7:\n A = np.array([[0, 1], [0, -kv / ka]])\n B = np.array([[0], [1 / ka]])\n C = np.array([[1, 0]])\n D = np.array([[0]])\n\n q = [qp, qv] # units and units/s acceptable errors\n r = [effort] # V acceptable actuation effort\n else:\n A = np.array([[0]])\n B = np.array([[1]])\n C = np.array([[1]])\n D = np.array([[0]])\n\n q = [qp] # units acceptable error\n r = [qv] # units/s acceptable error\n sys = cnt.ss(A, B, C, D)\n dsys = sys.sample(period)\n\n # Assign Q and R matrices according to Bryson's rule [1]. The elements\n # of q and r are tunable by the user.\n #\n # [1] 'Bryson's rule' in\n # https://file.tavsys.net/control/state-space-guide.pdf\n Q = np.diag(1.0 / np.square(q))\n R = np.diag(1.0 / np.square(r))\n K = frccnt.lqr(dsys, Q, R)\n\n if position_delay > 0:\n # This corrects the gain to compensate for measurement delay, which\n # can be quite large as a result of filtering for some motor\n # controller and sensor combinations. Note that this will result in\n # an overly conservative (i.e. non-optimal) gain, because we need to\n # have a time-varying control gain to give the system an initial kick\n # in the right direction. The state will converge to zero and the\n # controller gain will converge to the steady-state one the tool outputs.\n #\n # See E.4.2 in\n # https://file.tavsys.net/control/controls-engineering-in-frc.pdf\n delay_in_seconds = position_delay / 1000 # ms -> s\n K = K @ np.linalg.matrix_power(\n dsys.A - dsys.B @ K, round(delay_in_seconds / period)\n )\n\n # With the alternate model, `kp = kv * K[0, 0]` is used because the gain\n # produced by LQR is for velocity. We can use the feedforward equation\n # `u = kv * v` to convert velocity to voltage. `kd = 0` because velocity\n # was an input; we don't need feedback control to command it.\n if ka > 1e-7:\n kp = K[0, 0]\n kd = K[0, 1]\n else:\n kp = kv * K[0, 0]\n kd = 0\n\n return kp, kd\n\n def _calcGainsVel(self, kv, ka, qv, effort, period, velocity_delay):\n\n # If acceleration for velocity control requires no effort, the feedback\n # control gains approach zero. We special-case it here because numerical\n # instabilities arise in LQR otherwise.\n if ka < 1e-7:\n return 0, 0\n\n A = np.array([[-kv / ka]])\n B = np.array([[1 / ka]])\n C = np.array([[1]])\n D = np.array([[0]])\n sys = cnt.ss(A, B, C, D)\n dsys = sys.sample(period)\n\n # Assign Q and R matrices according to Bryson's rule [1]. The elements\n # of q and r are tunable by the user.\n #\n # [1] 'Bryson's rule' in\n # https://file.tavsys.net/control/state-space-guide.pdf\n q = [qv] # units/s acceptable error\n r = [effort] # V acceptable actuation effort\n Q = np.diag(1.0 / np.square(q))\n R = np.diag(1.0 / np.square(r))\n K = frccnt.lqr(dsys, Q, R)\n\n if velocity_delay > 0:\n # This corrects the gain to compensate for measurement delay, which\n # can be quite large as a result of filtering for some motor\n # controller and sensor combinations. Note that this will result in\n # an overly conservative (i.e. non-optimal) gain, because we need to\n # have a time-varying control gain to give the system an initial kick\n # in the right direction. The state will converge to zero and the\n # controller gain will converge to the steady-state one the tool outputs.\n #\n # See E.4.2 in\n # https://file.tavsys.net/control/controls-engineering-in-frc.pdf\n delay_in_seconds = velocity_delay / 1000 # ms -> s\n K = K @ np.linalg.matrix_power(\n dsys.A - dsys.B @ K, round(delay_in_seconds / period)\n )\n\n kp = K[0, 0]\n kd = 0\n\n return kp, kd\n\n\ndef main(dir):\n\n analyzer = Analyzer(dir)\n\n analyzer.mainGUI.title(\"FRC Drive Characterization Tool\")\n\n analyzer.configure_gui()\n analyzer.mainGUI.mainloop()\n\n\nif __name__ == \"__main__\":\n main(os.getcwd())\n"
] | [
[
"numpy.radians",
"numpy.concatenate",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.square",
"matplotlib.pyplot.tight_layout",
"numpy.ceil",
"matplotlib.pyplot.subplot",
"numpy.copysign",
"matplotlib.pyplot.figure",
"numpy.min",
"numpy.floor",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.abs",
"matplotlib.pyplot.scatter",
"matplotlib.use",
"numpy.cos",
"numpy.ones",
"numpy.sign",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
trallard/ChooseViz | [
"07a3a0b6318b9480720705a9d31d2d475fba3762"
] | [
"plots/boxplot-1d/py/plot.py"
] | [
"import matplotlib.pyplot as plt\nimport pandas as pd\n\ndata = pd.read_csv('../../../data/data.csv')\n\nfig, ax = plt.subplots(1)\nbp = ax.boxplot(data['Height'],\n\t whis=1.5,\n\t showmeans=True)\nfig.savefig('plot.png')"
] | [
[
"pandas.read_csv",
"matplotlib.pyplot.subplots"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
fastyangmh/SeriesBAGAN | [
"c8d4a72fdeabb42697b03b8012209314a1f3f66c"
] | [
"data_preparation.py"
] | [
"#import\nfrom argparse import Namespace\nfrom os.path import join\nfrom glob import glob\nimport pandas as pd\nfrom FlowCal.io import FCSData\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom torch.utils.data import Dataset, DataLoader\nfrom typing import TypeVar\nfrom pytorch_lightning import LightningDataModule\nfrom typing import Optional, Union, List, Dict\nimport torch\nimport random\n\nT_co = TypeVar('T_co', covariant=True)\n\n\n#def\ndef create_datamodule(project_parameters):\n return MyLightningDataModule(root=project_parameters.root,\n classes=project_parameters.classes,\n val_size=project_parameters.val_size,\n batch_size=project_parameters.batch_size,\n num_workers=project_parameters.num_workers,\n device=project_parameters.device,\n max_samples=project_parameters.max_samples)\n\n\n#class\nclass MyDataset(Dataset):\n def __init__(self, data, label) -> None:\n super().__init__()\n self.data = data\n self.label = label\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index) -> T_co:\n # the data type of sample is float32\n sample = self.data[index]\n #convert the value of sample to 0~1\n #NOTE: np.finfo(dtype=np.float32).max = 3.4028235e+38\n #sample = sample / np.finfo(dtype=np.float32).max\n sample = (sample - sample.min()) / (sample.max() - sample.min())\n target = self.label[index]\n #the shape of sample is (31,)\n #the shape of target is ()\n return sample, target\n\n\nclass MyLightningDataModule(LightningDataModule):\n def __init__(self, root, classes, val_size, batch_size, num_workers,\n device, max_samples):\n super().__init__()\n self.root = root\n self.classes = classes\n self.class_to_idx = {k: v for v, k in enumerate(self.classes)}\n self.val_size = val_size\n self.batch_size = batch_size\n self.num_workers = num_workers\n self.pin_memory = device == 'cuda' and torch.cuda.is_available()\n self.max_samples = max_samples\n\n def prepare_data(self) -> None:\n data = []\n label = []\n files = sorted(glob(join(self.root, 'raw_fcs/*/*.fcs')))\n df = pd.read_excel(join(self.root, 'EU_label.xlsx'))\n use_indices = pd.read_excel(join(self.root,\n 'EU_marker_channel_mapping.xlsx'),\n usecols=['use'])\n use_indices = np.where(use_indices.values == 1)[0].tolist()\n for f in files:\n data.append(np.array(FCSData(infile=f)[:, use_indices]))\n l = df.loc[df.file_flow_id == f.split('/')[3], 'label'].item()\n l = np.zeros(shape=len(data[-1]),\n dtype=np.int16) + self.class_to_idx[l]\n label.append(l)\n data = np.concatenate(data)\n label = np.concatenate(label)\n if self.max_samples is not None:\n index = random.sample(population=range(len(data)),\n k=self.max_samples)\n data = data[index]\n label = label[index]\n self.data = data\n self.label = label\n\n def setup(self, stage: Optional[str] = None) -> None:\n x_train, x_val, y_train, y_val = train_test_split(\n self.data, self.label, test_size=self.val_size)\n self.train_dataset = MyDataset(data=x_train, label=y_train)\n self.val_dataset = MyDataset(data=x_val, label=y_val)\n\n def train_dataloader(\n self\n ) -> Union[DataLoader, List[DataLoader], Dict[str, DataLoader]]:\n return DataLoader(dataset=self.train_dataset,\n batch_size=self.batch_size,\n shuffle=True,\n num_workers=self.num_workers,\n pin_memory=self.pin_memory)\n\n def val_dataloader(self) -> Union[DataLoader, List[DataLoader]]:\n return DataLoader(dataset=self.val_dataset,\n batch_size=self.batch_size,\n shuffle=False,\n num_workers=self.num_workers,\n pin_memory=self.pin_memory)\n\n def test_dataloader(self) -> Union[DataLoader, List[DataLoader]]:\n return DataLoader(dataset=self.val_dataset,\n batch_size=self.batch_size,\n shuffle=False,\n num_workers=self.num_workers,\n pin_memory=self.pin_memory)\n\n\n#class\n\nif __name__ == '__main__':\n #project_parameters\n project_parameters = Namespace(\n **{\n 'root': 'data/FCS_data/',\n 'classes': ['Healthy', 'Sick'],\n 'batch_size': 32,\n 'val_size': 0.2,\n 'num_workers': 0,\n 'device': 'cpu',\n 'max_samples': None\n })\n\n # create datamodule\n datamodule = create_datamodule(project_parameters=project_parameters)\n\n # prepare data\n datamodule.prepare_data()\n\n # set up data\n datamodule.setup()\n\n # get train, validation, test dataset\n train_dataset = datamodule.train_dataset\n val_dataset = datamodule.val_dataset\n\n # get the first sample and target in the train dataset\n x, y = train_dataset[0]\n\n # display the dimension of sample and target\n print('the dimension of sample: {}'.format(x.shape))\n print('the dimension of target: {}'.format(y.shape))\n"
] | [
[
"torch.utils.data.DataLoader",
"sklearn.model_selection.train_test_split",
"numpy.concatenate",
"torch.cuda.is_available",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
j6mes/NeuralDB | [
"d912c4d3ccecb093dd8ad2ca9c4724d89dd89115"
] | [
"modelling/src/neuraldb/final_scoring.py"
] | [
"#\n# Copyright (c) 2021 Facebook, Inc. and its affiliates.\n#\n# This file is part of NeuralDB.\n# See https://github.com/facebookresearch/NeuralDB for further info.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport glob\nimport json\nfrom collections import OrderedDict, defaultdict\n\nimport numpy as np\nimport pandas as pd\n\nfrom neuraldb.evaluation.scoring_functions import f1\n\n\ndef load_experiment(path):\n\n running_score = defaultdict(lambda: defaultdict(int))\n running_count = defaultdict(lambda: defaultdict(int))\n\n print(path)\n with open(path) as f:\n for line in f:\n instance = json.loads(line)\n actual = instance[\"actual\"]\n prediction = instance[\"prediction\"]\n\n local_score = f1(set(actual), set(prediction))\n\n # relation = instance[\"metadata\"][\"relation\"]\n # running_score[\"relation\"][relation] += local_score\n # running_count[\"relation\"][relation] += 1\n\n qtype = instance[\"metadata\"][\"type\"]\n if qtype in {\"argmin\", \"argmax\", \"min\", \"max\"}:\n qtype = \"minmax\"\n running_score[\"type\"][qtype] += local_score\n running_count[\"type\"][qtype] += 1\n\n running_score[\"all\"][\"\"] += local_score\n running_count[\"all\"][\"\"] += 1\n\n scores = {}\n for k, v in running_score.items():\n for attr, val in v.items():\n score = (\n running_score[k][attr] / running_count[k][attr]\n if running_count[k][attr]\n else 0\n )\n print(f\"Running score: {k}\\t{attr}\\t\\t{score}\")\n scores[\"_\".join([k, attr])] = (\n running_score[k][attr] / running_count[k][attr]\n if running_count[k][attr]\n else 0\n )\n\n return scores\n\n\nif __name__ == \"__main__\":\n ndb_predictions = glob.glob(\n \"consolidated/work/v2.4_25/**/predictions.jsonl\", recursive=True\n )\n all_experiments = []\n for prediction in ndb_predictions:\n print(prediction)\n\n experiment = OrderedDict()\n\n for element in prediction.split(\"/\"):\n if \",\" in element:\n for kvp in element.split(\",\"):\n k, v = kvp.split(\"=\", maxsplit=1)\n experiment[k] = v\n elif \"-\" in element:\n for kvp in element.split(\",\"):\n k, v = kvp.split(\"-\", maxsplit=1)\n experiment[k] = v\n\n # experiment[\"ssg\"] = prediction.replace(\".jsonl\", \"\").rsplit(\"_\", maxsplit=1)[1]\n experiment[\"dataset\"] = prediction.split(\"/\")[2]\n if \"retriever\" not in experiment:\n experiment[\"retriever\"] = \"\"\n experiment[\"path\"] = prediction\n all_experiments.append(experiment)\n\n print(\"Reading by experiment: \\n\\n\\n\")\n for expt in all_experiments:\n expt.update(load_experiment(expt[\"path\"]))\n del expt[\"path\"]\n\n frame = pd.DataFrame(all_experiments)\n frame[frame.select_dtypes(include=[\"number\"]).columns] *= 100\n pd.set_option(\"display.width\", 1000)\n pd.set_option(\"display.max_columns\", None)\n\n aggr = {\"all_\": [np.mean, np.std]}\n aggr.update({k: [np.mean] for k in frame.columns if \"type\" in k})\n pt = pd.pivot_table(\n frame, index=[\"model\", \"generator\", \"retriever\", \"lr\", \"steps\"], aggfunc=aggr\n )\n print(pt)\n"
] | [
[
"pandas.set_option",
"pandas.DataFrame",
"pandas.pivot_table"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
jjmata/robosat | [
"6b38bcf5cbf13bf79c06624d30600df12cfdd486"
] | [
"robosat/datasets.py"
] | [
"\"\"\"PyTorch-compatible datasets.\n\nGuaranteed to implement `__len__`, and `__getitem__`.\n\nSee: http://pytorch.org/docs/0.3.1/data.html\n\"\"\"\n\nimport torch\nfrom PIL import Image\nimport torch.utils.data\n\nfrom robosat.tiles import tiles_from_slippy_map, buffer_tile_image\n\n\n# Single Slippy Map directory structure\nclass SlippyMapTiles(torch.utils.data.Dataset):\n \"\"\"Dataset for images stored in slippy map format.\n \"\"\"\n\n def __init__(self, root, transform=None):\n super().__init__()\n\n self.tiles = []\n self.transform = transform\n\n self.tiles = [(tile, path) for tile, path in tiles_from_slippy_map(root)]\n self.tiles.sort(key=lambda tile: tile[0])\n\n def __len__(self):\n return len(self.tiles)\n\n def __getitem__(self, i):\n tile, path = self.tiles[i]\n image = Image.open(path)\n\n if self.transform is not None:\n image = self.transform(image)\n\n return image, tile\n\n\n# Multiple Slippy Map directories.\n# Think: one with images, one with masks, one with rasterized traces.\nclass SlippyMapTilesConcatenation(torch.utils.data.Dataset):\n \"\"\"Dataset to concate multiple input images stored in slippy map format.\n \"\"\"\n\n def __init__(self, inputs, target, joint_transform=None):\n super().__init__()\n\n # No transformations in the `SlippyMapTiles` instead joint transformations in getitem\n self.joint_transform = joint_transform\n\n self.inputs = [SlippyMapTiles(inp) for inp in inputs]\n self.target = SlippyMapTiles(target)\n\n assert len(set([len(dataset) for dataset in self.inputs])) == 1, \"same number of tiles in all inputs\"\n assert len(self.target) == len(self.inputs[0]), \"same number of tiles in inputs and target\"\n\n def __len__(self):\n return len(self.target)\n\n def __getitem__(self, i):\n # at this point all transformations are applied and we expect to work with raw tensors\n inputs = [dataset[i] for dataset in self.inputs]\n\n images = [image for image, _ in inputs]\n tiles = [tile for _, tile in inputs]\n\n mask, mask_tile = self.target[i]\n\n assert len(set(tiles)) == 1, \"all images are for the same tile\"\n assert tiles[0] == mask_tile, \"image tile is the same as mask tile\"\n\n if self.joint_transform is not None:\n images, mask = self.joint_transform(images, mask)\n\n return torch.cat(images, dim=0), mask, tiles\n\n\n# Todo: once we have the SlippyMapDataset this dataset should wrap\n# it adding buffer and unbuffer glue on top of the raw tile dataset.\nclass BufferedSlippyMapDirectory(torch.utils.data.Dataset):\n \"\"\"Dataset for buffered slippy map tiles with overlap.\n \"\"\"\n\n def __init__(self, root, transform=None, size=512, overlap=32):\n \"\"\"\n Args:\n root: the slippy map directory root with a `z/x/y.png` sub-structure.\n transform: the transformation to run on the buffered tile.\n size: the Slippy Map tile size in pixels\n overlap: the tile border to add on every side; in pixel.\n\n Note:\n The overlap must not span multiple tiles.\n\n Use `unbuffer` to get back the original tile.\n \"\"\"\n\n super().__init__()\n\n assert overlap >= 0\n assert size >= 256\n\n self.transform = transform\n self.size = size\n self.overlap = overlap\n self.tiles = list(tiles_from_slippy_map(root))\n\n def __len__(self):\n return len(self.tiles)\n\n def __getitem__(self, i):\n tile, path = self.tiles[i]\n image = buffer_tile_image(tile, self.tiles, overlap=self.overlap, tile_size=self.size)\n\n if self.transform is not None:\n image = self.transform(image)\n\n return image, torch.IntTensor([tile.x, tile.y, tile.z])\n\n def unbuffer(self, probs):\n \"\"\"Removes borders from segmentation probabilities added to the original tile image.\n\n Args:\n probs: the segmentation probability mask to remove buffered borders.\n\n Returns:\n The probability mask with the original tile's dimensions without added overlap borders.\n \"\"\"\n\n o = self.overlap\n _, x, y = probs.shape\n\n return probs[:, o : x - o, o : y - o]\n"
] | [
[
"torch.IntTensor",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
asahtik/manus-core | [
"f62f53cb604cf71411bde40d2f717e7bdc8b2a27"
] | [
"python/manus/__init__.py"
] | [
"\nimport manus.messages as messages\n\nimport numpy as np\n\nNAME = 'Manus'\nVERSION = 'N/A'\n\ntry:\n with open('/usr/share/manus/version', 'r') as f:\n VERSION = f.read()\nexcept IOError:\n pass\n\nclass MoveTo(object):\n def __init__(self, location, rotation = None, grip=0, speed=1.0):\n self.location = location\n self.rotation = rotation\n self.grip = grip\n self.speed = speed\n\n def generate(self, manipulator):\n seg = messages.TrajectorySegment(gripper=self.grip, required=True, speed= self.speed)\n seg.frame.origin = messages.Point3D(self.location[0], self.location[1], self.location[2])\n if self.rotation is None:\n seg.rotation = False\n else:\n seg.frame.rotation = messages.Rotation3D(self.rotation[0], self.rotation[1], self.rotation[2])\n return seg\n\nclass MoveJoints(object):\n def __init__(self, goal, speed=1.0):\n self.goal = goal\n if not speed is list:\n self.speed = [speed] * len(goal)\n else:\n self.speed = speed\n\n def generate(self, manipulator):\n seg = messages.PlanSegment()\n seg.joints = [messages.JointCommand(j, s) for j, s in zip(self.goal, self.speed)]\n return seg\n\nclass Manipulator(object):\n\n def __init__(self, client, name):\n self.name = name\n self.state = None\n self._client = client\n self._listeners = []\n self._description = messages.ManipulatorDescriptionSubscriber(client, \"%s.description\" % name, self._description_callback)\n self._state = messages.ManipulatorStateSubscriber(client, \"%s.state\" % name, self._state_callback)\n self._move = messages.PlanPublisher(client, \"%s.plan\" % name)\n self._planner = messages.TrajectoryPublisher(client, \"%s.trajectory\" % name)\n self._planstate = messages.PlanStateSubscriber(client, \"%s.planstate\" % name, self._planstate_callback)\n\n def listen(self, listener):\n self._listeners.append(listener)\n\n def unlisten(self, listener):\n try:\n self._listeners.remove(listener)\n except ValueError:\n pass\n\n @property\n def description(self):\n return self._description_data\n\n def move_safe(self, identifier='safe'):\n plan = messages.Plan()\n plan.identifier = identifier\n segment = messages.PlanSegment()\n plan.segments.append(segment)\n self._move.send(plan)\n \n def move(self, identifier, states):\n plan = messages.Plan()\n plan.identifier = identifier\n plan.segments = [s.generate(self) for s in states]\n self._move.send(plan)\n\n def move_joint(self, joint, goal, speed = 1.0, identifier='move joint'):\n segment = self._state_to_segment()\n segment.joints[joint].speed = speed\n segment.joints[joint].goal = goal\n plan = messages.Plan(identifier = identifier)\n plan.segments.append(segment)\n self._move.send(plan)\n self.state.joints[joint].goal = goal\n\n def trajectory(self, identifier, goals):\n msg = messages.Trajectory()\n msg.identifier = identifier\n msg.segments = [s.generate(self) for s in goals]\n self._planner.send(msg)\n\n def _state_callback(self, state):\n self.state = state\n for s in self._listeners:\n s.on_manipulator_state(self, state)\n\n def _description_callback(self, description):\n self._description_data = description\n\n def _state_to_segment(self):\n segment = messages.PlanSegment()\n for j in self.state.joints:\n segment.joints.append(messages.JointCommand(j.goal, j.speed))\n return segment\n\n def _planstate_callback(self, state):\n for s in self._listeners:\n s.on_planner_state(self, state)\n\n def transform(self, joint):\n origin = np.identity(4)\n if self.description is None or joint < 0 or joint >= len(self.state.joints):\n return origin\n for j in xrange(0, joint+1):\n tx, ty, tz, rr, rp, ry = \\\n self._description_data.joints[j].tx, \\\n self._description_data.joints[j].ty, \\\n self._description_data.joints[j].tz, \\\n self._description_data.joints[j].rr, \\\n self._description_data.joints[j].rp, \\\n self._description_data.joints[j].ry\n\n if self._description_data.joints[j].type == messages.JointType.ROTATION:\n if self._description_data.joints[j].axis == messages.JointAxis.X:\n rr = self.state.joints[j].position\n elif self._description_data.joints[j].axis == messages.JointAxis.Y:\n rp = self.state.joints[j].position\n elif self._description_data.joints[j].axis == messages.JointAxis.Z:\n ry = self.state.joints[j].position\n elif self._description_data.joints[j].type == messages.JointType.TRANSLATION:\n if self._description_data.joints[j].axis == messages.JointAxis.X:\n tx = self.state.joints[j].position\n elif self._description_data.joints[j].axis == messages.JointAxis.Y:\n ty = self.state.joints[j].position\n elif self._description_data.joints[j].axis == messages.JointAxis.Z:\n tz = self.state.joints[j].position\n\n sg = np.cos(rr)\n cg = np.cos(rr)\n sb = np.cos(rp)\n cb = np.cos(rp)\n sa = np.cos(ry)\n ca = np.cos(ry)\n\n transform = np.array(( \\\n (ca * cb, ca * sb * sg - sa * cg, ca * sb * cg + sa * sg, tx), \\\n (sa * cb, sa * sb * sg + ca * cg, sa * sb * cg - ca * sg, ty), \\\n (-sb, cb * sg, cb * cg, tz) \\\n (0, 0, 0, 1) \\\n ))\n\n origin = np.matmul(transform, origin)\n\n return origin\n"
] | [
[
"numpy.identity",
"numpy.cos",
"numpy.matmul"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
philstenning/openCV-face | [
"c62dd5b1bdf430b8e4da7ccfc3b11083238ab4de"
] | [
"headless_road.py"
] | [
"import numpy as np\nimport cv2\nimport time\n# from simple_pid import PID\n\ncap = cv2.VideoCapture(0)\n# cap = cv2.VideoCapture('http://192.168.55.6:8080/video')\n\n# set res of camera\nsettings = {\n \"window_x\": 320,\n \"window_y\": 240,\n \"crop_window_height\": 80,\n \"contrast_high\": 255,\n \"contrast_low\": 160,\n \"contrast_auto\": True,\n \"debug_mode\": True,\n \"display_on_screen\": False,\n \"follow_nearest_to_center\": True\n\n}\n\ndata = np.zeros(4, dtype=int)\n\n# contrast_pid = PID(1, .1, .1, setpoint=1)\nprint(cv2.useOptimized())\n\n# do not remove used in the trackbar control.\n\n\ndef nothing(x):\n pass\n\n\ncap.set(3, settings['window_x'])\ncap.set(4, settings['window_y'])\ntime.sleep(2)\n\n# create variables from settings needed at runtime.\ncontrast_low = settings['contrast_low']\nbox_2_position = settings['window_y'] - 80\n\n\n# variables for the frame counter\nframe_counter: int = 0\nstart_time = time.time()\nfps: int = 0\n\n\ndef set_contrast_low(new_value):\n global contrast_low\n print('contrast low: {}'.format(contrast_low))\n contrast_low = contrast_low + int(new_value)\n # we have hit the bottom go back to top and come down again.\n if contrast_low <= 20:\n contrast_low = 255\n\n\ndef create_crop_box(position):\n b = position + 80\n c = 0\n d = 360\n center = 0\n contore_count = 0\n\n cropped_frame = frame[position:b, c:d]\n\n # add the filters\n gray = cv2.cvtColor(cropped_frame, cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(gray, (5, 5), 0)\n\n # convert to a binary filter\n ret, processed_cropped_image = cv2.threshold(\n blur, contrast_low, settings['contrast_high'], cv2.THRESH_BINARY)\n\n kernel = np.ones((5, 5), np.uint8)\n crop_color = cv2.morphologyEx(\n processed_cropped_image, cv2.MORPH_OPEN, kernel)\n\n # create box at top and bottom so we get a nice square to process against.\n cv2.rectangle(crop_color, (0, 0), (d, 10), (0, 0, 0), -1)\n cv2.rectangle(crop_color, (0, 70), (d, b), (0, 0, 0), -1)\n im2, contours, hierarchy = cv2.findContours(\n crop_color, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n contore_count = len(contours)\n\n if 1 <= contore_count <= 2:\n\n ###################\n # TODO: find largest contore and follow it\n # TODO: T junction\n ###################\n\n ##################################################\n # find the contore nearest to center and return it\n ##################################################\n\n # only if there is more than two contours\n if contore_count >= 2:\n if settings['follow_nearest_to_center']:\n center_0 = find_center_of_contour(contours[0])\n center_1 = find_center_of_contour(contours[1])\n\n # remove negative numbers\n width = settings['window_x']/2\n c_0 = 0\n if center_0 < width:\n c_0 = width-center_0\n else:\n c_0 = center_0 - width\n\n # find the nearest to the center.\n c_1 = 0\n if center_1 < width:\n c_1 = width-center_1\n else:\n c_1 = center_1 - width\n\n # and draw the color rectangles around them.\n if c_0 <= c_1:\n center = draw_rectangles(\n contours[0], cropped_frame, center, 'green')\n draw_rectangles(\n contours[1], cropped_frame, center)\n else:\n\n draw_rectangles(\n contours[0], cropped_frame, center)\n center = draw_rectangles(\n contours[1], cropped_frame, center, 'green')\n\n # we only have one so it's green\n else:\n center = draw_rectangles(\n contours[0], cropped_frame, center, 'green')\n\n # area = cv2.contourArea(cnt)\n # print('\\n\\narea\\n')\n # print(area)\n ##################################\n\n # we have too many contours so adjust the contrast\n elif len(contours) >= 3:\n set_contrast_low(5)\n\n else:\n # we have no contours pull it down a lot\n # then let it increese slowly backup\n set_contrast_low(-30)\n\n return crop_color, center, contore_count\n\n\ndef find_center_of_contour(contour):\n (x, y), radius = cv2.minEnclosingCircle(contour)\n img_center = 160\n center = str(-(img_center - int(x)))\n return int(x)\n\n\ndef draw_rectangles(cnt, cropped_frame, center, color='red'):\n '''\n draws the bounding box around the contore drawn on the frame\n and returns the center point\n '''\n r_x, r_y, r_w, r_h = cv2.boundingRect(cnt)\n if color == 'green':\n cv2.rectangle(cropped_frame, (r_x, r_y),\n (r_x+r_w, r_y+r_h), (0, 255, 0), 2)\n else:\n cv2.rectangle(cropped_frame, (r_x, r_y),\n (r_x+r_w, r_y+r_h), (0, 0, 255), 2)\n\n # add center point to image\n (x, y), radius = cv2.minEnclosingCircle(cnt)\n center = (int(x), int(y))\n cv2.circle(cropped_frame, center, 1, (67, 95, 0), 2)\n\n # write center data to screen\n img_center = 160\n res = str(-(img_center - int(x)))\n center_x, center_y = center\n # cv2.putText(cropped_frame, res, (center_x-15, center_y+20), font,\n # 1, (255, 255, 255), 2, cv2.LINE_AA)\n return center\n\n\ndef print_fps(frame, fps):\n text = 'fps:{}'.format(fps)\n # cv2.putText(frame, text, (5, 15), font,\n # 1, (255, 255, 255), 2, cv2.LINE_AA)\n\n\n# read 45 frames and throw them away\n# just lets the camera settle before we\n# start to do any work with it\nfor x in range(45):\n ret, frame = cap.read()\n\n\nwhile(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n frame_counter = frame_counter + 1\n\n # create a crop boxes to work from\n crop_mask_1, center_1, contore_count = create_crop_box(\n 0)\n\n # get current positions of four trackbars\n # the settings may have been updated in the previous call.\n if 1 <= contore_count <= 2:\n\n crop_mask_2, center_2, contore_count_2 = create_crop_box(\n box_2_position)\n if settings['display_on_screen']:\n cv2.imshow('crop_mask_2', crop_mask_2)\n\n ######################\n try:\n x_1, y_1 = center_1\n x_2, y_2 = center_2\n # Draw a line between the two points.\n # cv2.line(frame, center_1, (x_2, y_2+box_2_position),\n # (0, 255, 255), 1)\n c_x = int(40 + (box_2_position/2))\n c_y = 0\n if x_1 >= x_2:\n c_y = x_1 - x_2\n c_y = int(x_2 + (c_y/2))\n else:\n c_y = x_2 - x_1\n c_y = int(x_1 + (c_y/2))\n\n cv2.circle(frame, (c_y, c_x), 10, (0, 255, 0), 2)\n data[0] = x_1\n data[1] = c_y\n data[2] = x_2\n print(data)\n except:\n print('someting bad happened')\n ##################################\n\n # drive the bot\n # TODO FROM HERE\n\n ##################################\n\n # frame counter\n if (time.time() - start_time) >= 1:\n fps = frame_counter\n start_time = time.time()\n data[3] = frame_counter\n frame_counter = 0\n print_fps(frame, fps)\n\n if settings['display_on_screen']:\n cv2.imshow('frame', frame)\n cv2.imshow('crop_mask_1', crop_mask_1)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n\ncap.release()\ncv2.destroyAllWindows()\n"
] | [
[
"numpy.zeros",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
iamatulsingh/QnA-web-api | [
"4db0388248ce5714eaa6bc518ff0b40bb5eb9cf8"
] | [
"demo/squad_seq2seq_glove_train.py"
] | [
"from keras_question_and_answering_system.library.seq2seq_glove import Seq2SeqGloveQA\nfrom keras_question_and_answering_system.library.utility.squad import SquADDataSet\nimport numpy as np\n\n\ndef main():\n random_state = 42\n output_dir_path = './models'\n\n np.random.seed(random_state)\n data_set = SquADDataSet(data_path='./data/SQuAD/train-v1.1.json')\n\n qa = Seq2SeqGloveQA()\n qa.load_glove_model('./very_large_data')\n batch_size = 64\n epochs = 200\n history = qa.fit(data_set, model_dir_path=output_dir_path,\n batch_size=batch_size, epochs=epochs,\n random_state=random_state)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bbw7561135/Plasma-Recipes | [
"b1cf938eea1c72a2d3a2e4c25aca0d35cb426401"
] | [
"TMz/TMz_cart.py"
] | [
"#\n# Created: 01.08.2020\n# Last modified: 06.08.2020\n# @author: Luca Pezzini\n# e-mail : [email protected]\n#\n\n#\n# 2D CARTESIAN TE_z MODE ON YEE MESH (FDTD)\n# Solve the Transverse Magnetic mode equation on the xy-plane (TM_z)\n# using the Finite Difference Time Domain metod (FDTD) on the Yee lattice\n# in simple cartesian geometry. \n# Yee Grid (Staggered grid)\n# E[n-1] H[n-1/2] E[n] H[n+1/2] E[n+1]\n# E[0 1 2 3]\n# B[ 0 1 2 ]\n# The E fields are defined at every unitary space indices (i, j); instead\n# B field is defined at fractional indices (i+0.5, j+0.5) as the Yee\n# It's used the convention to shift i,j+1/2->i,j+1 & n+1/2->n solving the \n# problem of representing fractional indices.\n# Physical parameter are converted in undimensional code units.\n# The time update is done using Leapfrog time-stepping. Here, E-fields\n# i.e Ex and Ey are updated every full time-step and Bz field is updated every \n# half time-step. This is shown by three alternating for-loop updates in groups \n# of two (for E) and one (for B) spanning entire spatial grid inside a main \n# for-loop for time update spanning the entire time-grid.\n#\n\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import pyplot, cm\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndef plot2D(x, y, p):\n fig = pyplot.figure(figsize=(11, 7), dpi=100)\n ax = fig.gca(projection='3d')\n surf = ax.plot_surface(x, y, p[:], rstride=1, cstride=1, cmap=cm.viridis,\n linewidth=0, antialiased=False)\n ax.view_init(30, 225)\n ax.set_xlabel('$x$')\n ax.set_ylabel('$y$')\n plt.show()\n\n#\n# STEP 1: SET PARAMETER & BC!\n#\n\n# Lattice param.\nmaxITER = 100\nx_min, x_max = 0, 1\ny_min, y_max = 0, 1\nnx, ny = 50, 50 # nodes\nLx = int(abs(x_max-x_min))\nLy = int(abs(y_max-y_min))\ndeltax = Lx/(nx-1) # faces are (nodes - 1)\ndeltay = Ly/(ny-1)\ndeltat = 1/(100*math.sqrt(2)) # CFL condition\n\nprint(\"LATTICE PARAMETERS!\")\nprint(\"Numbers of Iterations:\", maxITER)\nprint(\"Time Step:\", deltat)\nprint(\"Domain Length (x, y):\", Lx, Ly)\nprint(\"Number of Nodes (x, y):\", nx, ny)\nprint(\"Increments (x, y):\", deltax, deltay)\n\n#\n# STEP 2: DEF. MESH GRID & PHYSICAL PARAM.!\n# giving the string ‘ij’ returns a meshgrid with matrix\n# indexing, while ‘xy’ returns a meshgrid with Cartesian indexing\n#\n\nx = np.linspace(x_min, x_max, nx)\ny = np.linspace(y_min, y_max, ny)\nxv, yv = np.meshgrid(x, y, indexing='ij')\n\n# Initialization of field matrices\nEz = np.zeros([nx, ny])\nBx = np.zeros([nx, ny])\nBy = np.zeros([nx, ny])\nEz1 = np.zeros([nx, ny])\nBx1 = np.zeros([nx, ny])\nBy1 = np.zeros([nx, ny])\n\n# Initial conditions\nEz[int((nx-1)/2),int((ny-1)/2)] = 0.001\n\n#\n# STEP 3: TIME EVOLUTION OF THE FIELD ON THE GRID!\n#\n\n# Start & End\nxs = 1\nxe = nx-1\nys = 1\nye = ny-1\n\n# Flag\nflag_plt = False\nflag_start = False\n\nprint(\"START SYSTEM EVOLUTION!\")\nfor time in range(maxITER-1):\n \n # BEGIN : spatial update loops for Bz fields\n #for i in range(1, nx-1):\n #for j in range(1, ny-1):\n #Bz1[i+1, j+1] = Bz[i+1, j+1]+deltat*((1/deltay)*(Ex[i+1, j+1]-Ex[i+1, j])-(1/deltax)*(Ey[i+1, j+1]+Ey[i, j+1]))\n # NOTE: B field is calculate with the updated E field so is half a step ahead\n Ez1[xs+1:xe+1, ys+1:ye+1] = Ez[xs+1:xe+1, ys+1:ye+1]+deltat*((1/deltay)*\\\n (Bx[xs+1:xe+1, ys+1:ye+1]-Bx[xs+1:xe+1, ys:ye])-(1/deltax)*\\\n (By[xs+1:xe+1, ys+1:ye+1]+By[xs:xe, ys+1:ye+1]))\n # END : spatial update loops for Bz fields\n \n # Reflective BC for B field\n Ez1[xs,ys:ye]=Ez1[xe,ys:ye] #left-right\n Ez1[xs:xe,ys]=Ez1[xs:xe,ye] #top-bottom\n \n Ez = Ez1\n\n # BEGIN : spatial update loops for Ey and Ex fields\n #for i in range(1, nx-1):\n #for j in range(1, ny-1):\n #Ex1[i+1, j] = Ex[i+1, j]+(deltat/deltay)*(Bz[i+1, j+1]-Bz[i+1, j])\n #Ey1[i, j+1] = Ey[i, j+1]-(deltat/deltax)*(Bz[i+1, j+1]-Bz[i, j+1])\n Bx1[xs+1:xe+1, ys:ye] = Bx[xs+1:xe+1, ys:ye]+(deltat/deltay)*\\\n (Ez[xs+1:xe+1, ys+1:ye+1]-Ez[xs+1:xe+1, ys:ye])\n By1[xs:xe, ys+1:ye+1] = By[xs:xe, ys+1:ye+1]-(deltat/deltax)*\\\n (Ez[xs+1:xe+1, ys+1:ye+1]-Ez[xs:xe, ys+1:ye+1])\n # END : spatial update loops for Ey and Ex fields\n\n # Reflective BC for E field\n Bx1[xs,ys:ye]=Bx1[xe,ys:ye] #left-right\n Bx1[xs:xe,ys]=Bx1[xs:xe,ye] #top-bottom\n By1[xs,ys:ye]=Bx1[xe,ys:ye] #left-right\n By1[xs:xe,ys]=Bx1[xs:xe,ye] #top-bottom\n \n # Update E field\n Bx = Bx1\n By = By1\n\n if flag_plt == False:\n plt.figure(figsize =(12, 10))\n\n plt.pcolor(xv, yv, Ez1)\n plt.title(\"Map of Ez\")\n plt.xlabel(\"y\")\n plt.ylabel(\"x\")\n plt.colorbar()\n\n if flag_start == False:\n command = input(\"Press Enter, then start.\")\n flag_start = True\n \n plt.pause(0.001)\n plt.clf()\n flag_plt=True\n\nprint(\"DONE!\")\n\n#\n# STEP 4: VISUALIZATION!\n#\n\nplot2D(xv, yv, Ez1)\n\nplt.figure(3)\nplt.contourf(xv, yv, Ez1, cmap=plt.cm.jet)\nplt.title(\"Contour of Ez\")\nplt.xlabel(\"y\")\nplt.ylabel(\"x\")\nplt.colorbar()\n\n#plt.figure(2)\n#plt.pcolor(xv, yv, Ez1)\n#plt.title(\"Map of Ez\")\n#plt.xlabel(\"y\")\n#plt.ylabel(\"x\")\n#plt.colorbar()\n\nplt.show()\n\n"
] | [
[
"matplotlib.pyplot.contourf",
"numpy.linspace",
"matplotlib.pyplot.title",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.pcolor",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.xlabel",
"numpy.meshgrid",
"numpy.zeros",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alz/python-skyfield | [
"c240a22600d089bab53c3e7dcd4872fb8f6d3647",
"25d86f61d85406f1eb6cce3314bdb828e22de22c"
] | [
"skyfield/functions.py",
"skyfield/tests/test_against_novas.py"
] | [
"\"\"\"Basic operations that are needed repeatedly throughout Skyfield.\"\"\"\n\nfrom numpy import arcsin, arctan2, array, cos, load, sin, sqrt\nfrom pkgutil import get_data\nfrom skyfield.constants import tau\n\ndef dots(v, u):\n \"\"\"Given one or more vectors in `v` and `u`, return their dot products.\n\n This works whether `v` and `u` each have the shape ``(3,)``, or\n whether they are each whole arrays of corresponding x, y, and z\n coordinates and have shape ``(3, N)``.\n\n \"\"\"\n return (v * u).sum(axis=0)\n\ndef length_of(xyz):\n \"\"\"Given a 3-element array `[x y z]`, return its length.\n\n The three elements can be simple scalars, or the array can be two\n dimensions and offer three whole series of x, y, and z coordinates.\n\n \"\"\"\n return sqrt((xyz * xyz).sum(axis=0))\n\ndef to_polar(xyz):\n \"\"\"Convert ``[x y z]`` into spherical coordinates ``(r, theta, phi)``.\n\n ``r`` - vector length\n ``theta`` - angle above (+) or below (-) the xy-plane\n ``phi`` - angle around the z-axis\n\n The order of the three return values is intended to match ISO 31-11.\n\n \"\"\"\n r = length_of(xyz)\n x, y, z = xyz\n theta = arcsin(z / r)\n phi = arctan2(y, x) % tau\n return r, theta, phi\n\ndef from_polar(r, theta, phi):\n \"\"\"Convert ``(r, theta, phi)`` to Cartesian coordinates ``[x y z]``.\n\n ``r`` - vector length\n ``theta`` - angle above (+) or below (-) the xy-plane\n ``phi`` - angle around the z-axis\n\n The order of the three arguments is intended to match ISO 31-11.\n\n \"\"\"\n rxy = r * cos(theta)\n return array((rxy * cos(phi), rxy * sin(phi), r * sin(theta)))\n\ndef rot_x(theta):\n c = cos(theta)\n s = sin(theta)\n return array([(1.0, 0.0, 0.0), (0.0, c, -s), (0.0, s, c)])\n\ndef rot_y(theta):\n c = cos(theta)\n s = sin(theta)\n return array([(c, 0.0, s), (0.0, 1.0, 0.0), (-s, 0.0, c)])\n\ndef rot_z(theta):\n c = cos(theta)\n s = sin(theta)\n zero = theta * 0.0\n one = zero + 1.0\n return array(((c, -s, zero), (s, c, zero), (zero, zero, one)))\n\ntry:\n from io import BytesIO\nexcept:\n from StringIO import StringIO as BytesIO\n\ndef load_bundled_npy(filename):\n \"\"\"Load a binary NumPy array file that is bundled with Skyfield.\"\"\"\n data = get_data('skyfield', 'data/{0}.npy'.format(filename))\n return load(BytesIO(data))\n",
"'Auto-generated accuracy tests vs NOVAS (see build_novas_tests.py).'\n\nfrom numpy import abs, array, einsum, max\nfrom skyfield import (earthlib, framelib, nutationlib, positionlib,\n precessionlib, starlib, timelib)\nfrom skyfield.api import Topos, load\nfrom skyfield.constants import AU_KM, AU_M\nfrom skyfield.data import hipparcos\nfrom skyfield.functions import length_of\n\nOLD_AU_KM = 149597870.691 # TODO: load from de405\nOLD_AU = AU_KM / OLD_AU_KM\n\none_second = 1.0 / 24.0 / 60.0 / 60.0\narcsecond = 1.0 / 60.0 / 60.0\nra_arcsecond = 24.0 / 360.0 / 60.0 / 60.0\nmeter = 1.0 / AU_M\n\ndef ts():\n yield load.timescale()\n\ndef compare(value, expected_value, epsilon):\n if hasattr(value, 'shape') or hasattr(expected_value, 'shape'):\n assert max(abs(value - expected_value)) <= epsilon\n else:\n assert abs(value - expected_value) <= epsilon\n\ndef de405():\n yield load('de405.bsp')\n\ndef earth():\n eph = load('de405.bsp')\n yield eph[399]\n\ndef test_calendar_date_0():\n compare(timelib.calendar_date(2440423.345833333), array((1969, 7, 20.345833333209157)), 0.0)\n\ndef test_calendar_date_1():\n compare(timelib.calendar_date(2448031.5), array((1990, 5, 19.5)), 0.0)\n\ndef test_calendar_date_2():\n compare(timelib.calendar_date(2451545.0), array((2000, 1, 1.0)), 0.0)\n\ndef test_calendar_date_3():\n compare(timelib.calendar_date(2456164.5), array((2012, 8, 24.5)), 0.0)\n\ndef test_earth_rotation_angle_date0():\n compare(earthlib.earth_rotation_angle(2440423.345833333) * 360.0, 243.3216078027496,\n 0.000001 * arcsecond)\n\ndef test_earth_rotation_angle_date1():\n compare(earthlib.earth_rotation_angle(2448031.5) * 360.0, 237.5118441792128,\n 0.000001 * arcsecond)\n\ndef test_earth_rotation_angle_date2():\n compare(earthlib.earth_rotation_angle(2451545.0) * 360.0, 280.46061837504,\n 0.000001 * arcsecond)\n\ndef test_earth_rotation_angle_date3():\n compare(earthlib.earth_rotation_angle(2456164.5) * 360.0, 333.4965831957672,\n 0.000001 * arcsecond)\n\ndef test_earth_tilt_date0(ts):\n compare(nutationlib.earth_tilt(ts.tdb(jd=2440423.345833333)),\n array((23.443240959852666, 23.445702723464045, 0.15929455696954214, 2.604727521416375, 8.862349000962691)), 0.00001 * arcsecond)\n\ndef test_earth_tilt_date1(ts):\n compare(nutationlib.earth_tilt(ts.tdb(jd=2448031.5)),\n array((23.440530953006782, 23.442178709915066, 0.7110982205507752, 11.628148141964171, 5.931924869819427)), 0.00001 * arcsecond)\n\ndef test_earth_tilt_date2(ts):\n compare(nutationlib.earth_tilt(ts.tdb(jd=2451545.0)),\n array((23.439279444444445, 23.437676833867652, -0.852016747090803, -13.931996330960066, -5.769398076465291)), 0.00001 * arcsecond)\n\ndef test_earth_tilt_date3(ts):\n compare(nutationlib.earth_tilt(ts.tdb(jd=2456164.5)),\n array((23.43763397776759, 23.43645066577372, 0.977087608170215, 15.976729533480038, -4.259923177932873)), 0.00001 * arcsecond)\n\ndef test_equation_of_the_equinoxes_complimentary_terms_date0():\n compare(nutationlib.equation_of_the_equinoxes_complimentary_terms(2440423.345833333),\n array(-1.4592438843164885e-09), 0.0000000000000001 * arcsecond)\n\ndef test_equation_of_the_equinoxes_complimentary_terms_date1():\n compare(nutationlib.equation_of_the_equinoxes_complimentary_terms(2448031.5),\n array(-9.909270679336256e-09), 0.0000000000000001 * arcsecond)\n\ndef test_equation_of_the_equinoxes_complimentary_terms_date2():\n compare(nutationlib.equation_of_the_equinoxes_complimentary_terms(2451545.0),\n array(1.021330096302465e-08), 0.0000000000000001 * arcsecond)\n\ndef test_equation_of_the_equinoxes_complimentary_terms_date3():\n compare(nutationlib.equation_of_the_equinoxes_complimentary_terms(2456164.5),\n array(-1.082315527387237e-08), 0.0000000000000001 * arcsecond)\n\ndef test_forward_frame_tie():\n compare(framelib.ICRS_to_J2000.dot((1.1, 1.2, 1.3)), (1.100000019790573, 1.2000001208396125, 1.2999998717098593), 1e-15)\n\ndef test_reverse_frame_tie():\n compare(framelib.ICRS_to_J2000.T.dot((1.1, 1.2, 1.3)), (1.0999999802094143, 1.1999998791603803, 1.300000128290131), 1e-15)\n\ndef test_fundamental_arguments_date0():\n compare(nutationlib.fundamental_arguments(-0.3044942961441969),\n array((-1.559784616935014, -2.8619278194907483, -2.7748368269156427, -4.947060102171707, 6.178085194718492)), 0.000000002 * arcsecond)\n\ndef test_fundamental_arguments_date1():\n compare(nutationlib.fundamental_arguments(-0.09619438740588637),\n array((-0.8532784044768771, -3.933579124091533, -5.376486844354831, -0.9485312704748627, 5.429677887938805)), 0.000000002 * arcsecond)\n\ndef test_fundamental_arguments_date2():\n compare(nutationlib.fundamental_arguments(0.0),\n array((2.355555743493879, 6.24006012692298, 1.6279050815375191, 5.198466588650503, 2.182439196615671)), 0.000000002 * arcsecond)\n\ndef test_fundamental_arguments_date3():\n compare(nutationlib.fundamental_arguments(0.12647501711156742),\n array((0.15181719486225662, 4.023151622222436, 0.10917837795937814, 1.6234303368860354, -2.086983188457769)), 0.000000002 * arcsecond)\n\ndef test_iau2000a_date0():\n compare(nutationlib.iau2000a(2440423.345833333),\n array([26047275.214163747, 88623490.00962691]), 0.001)\n\ndef test_iau2000a_date1():\n compare(nutationlib.iau2000a(2448031.5),\n array([116281481.4196417, 59319248.69819427]), 0.001)\n\ndef test_iau2000a_date2():\n compare(nutationlib.iau2000a(2451545.0),\n array([-139319963.30960065, -57693980.764652915]), 0.001)\n\ndef test_iau2000a_date3():\n compare(nutationlib.iau2000a(2456164.5),\n array([159767295.3348004, -42599231.779328726]), 0.001)\n\ndef test_julian_date_function_date0():\n compare(timelib.julian_date(-4712, 1, 1, 0.0), 37.5, 0.0)\n\ndef test_julian_date_function_date1():\n compare(timelib.julian_date(-4712, 3, 1, 0.0), 97.5, 0.0)\n\ndef test_julian_date_function_date2():\n compare(timelib.julian_date(-4712, 12, 31, 0.5), 402.5208333333333, 0.0)\n\ndef test_julian_date_function_date3():\n compare(timelib.julian_date(-241, 3, 25, 19.0), 1633120.2916666667, 0.0)\n\ndef test_julian_date_function_date4():\n compare(timelib.julian_date(530, 9, 27, 23.5), 1914908.4791666667, 0.0)\n\ndef test_julian_date_function_date5():\n compare(timelib.julian_date(1976, 3, 7, 12.5), 2442845.0208333335, 0.0)\n\ndef test_julian_date_function_date6():\n compare(timelib.julian_date(2000, 1, 1, 0.0), 2451544.5, 0.0)\n\ndef test_mean_obliquity_date0():\n compare(nutationlib.mean_obliquity(2440423.345833333),\n 84395.6674554696, 0.0) # arcseconds\n\ndef test_mean_obliquity_date1():\n compare(nutationlib.mean_obliquity(2448031.5),\n 84385.91143082442, 0.0) # arcseconds\n\ndef test_mean_obliquity_date2():\n compare(nutationlib.mean_obliquity(2451545.0),\n 84381.406, 0.0) # arcseconds\n\ndef test_mean_obliquity_date3():\n compare(nutationlib.mean_obliquity(2456164.5),\n 84375.48231996332, 0.0) # arcseconds\n\ndef test_nutation_date0(ts):\n matrix = nutationlib.compute_nutation(ts.tdb(jd=2440423.345833333))\n result = einsum('ij...,j...->i...', matrix, [1.1, 1.2, 1.3])\n compare((1.0999795659425045, 1.1999568871469584, 1.3000570847072532),\n result, 1e-14)\n\ndef test_nutation_date1(ts):\n matrix = nutationlib.compute_nutation(ts.tdb(jd=2448031.5))\n result = einsum('ij...,j...->i...', matrix, [1.1, 1.2, 1.3])\n compare((1.0999087778623433, 1.2000195046911977, 1.300059178938428),\n result, 1e-14)\n\ndef test_nutation_date2(ts):\n matrix = nutationlib.compute_nutation(ts.tdb(jd=2451545.0))\n result = einsum('ij...,j...->i...', matrix, [1.1, 1.2, 1.3])\n compare((1.1001092900321017, 1.1999681897164485, 1.2999368806421698),\n result, 1e-14)\n\ndef test_nutation_date3(ts):\n matrix = nutationlib.compute_nutation(ts.tdb(jd=2456164.5))\n result = einsum('ij...,j...->i...', matrix, [1.1, 1.2, 1.3])\n compare((1.0998746654010052, 1.2001050177909849, 1.3000091025381042),\n result, 1e-14)\n\ndef test_precession_date0():\n matrix = precessionlib.compute_precession(2440423.345833333)\n result = einsum('ij...,j...->i...', matrix, [1.1, 1.2, 1.3])\n compare((1.1119856573552391, 1.1924703352076302, 1.296727572578774),\n result, 1e-15)\n\ndef test_precession_date1():\n matrix = precessionlib.compute_precession(2448031.5)\n result = einsum('ij...,j...->i...', matrix, [1.1, 1.2, 1.3])\n compare((1.1037931405410017, 1.1976299348492718, 1.2989700697273823),\n result, 1e-15)\n\ndef test_precession_date2():\n matrix = precessionlib.compute_precession(2451545.0)\n result = einsum('ij...,j...->i...', matrix, [1.1, 1.2, 1.3])\n compare((1.1, 1.1999999999999997, 1.2999999999999998),\n result, 1e-15)\n\ndef test_precession_date3():\n matrix = precessionlib.compute_precession(2456164.5)\n result = einsum('ij...,j...->i...', matrix, [1.1, 1.2, 1.3])\n compare((1.0950034772583117, 1.203103909268923, 1.3013486728367767),\n result, 1e-15)\n\ndef test_sidereal_time_on_date0():\n jd = load.timescale(delta_t=0.0).tt(jd=2440423.345833333)\n compare(earthlib.sidereal_time(jd), 16.195436227057314, 1e-13)\n\ndef test_sidereal_time_with_nonzero_delta_t_on_date0():\n jd = load.timescale(delta_t=99.9).tt(jd=2440423.345833333 + 99.9 * one_second)\n compare(earthlib.sidereal_time(jd), 16.195436229760602, 1e-13)\n\ndef test_sidereal_time_on_date1():\n jd = load.timescale(delta_t=0.0).tt(jd=2448031.5)\n compare(earthlib.sidereal_time(jd), 15.825907460288224, 1e-13)\n\ndef test_sidereal_time_with_nonzero_delta_t_on_date1():\n jd = load.timescale(delta_t=99.9).tt(jd=2448031.5 + 99.9 * one_second)\n compare(earthlib.sidereal_time(jd), 15.825907462991848, 1e-13)\n\ndef test_sidereal_time_on_date2():\n jd = load.timescale(delta_t=0.0).tt(jd=2451545.0)\n compare(earthlib.sidereal_time(jd), 18.69737482696563, 1e-13)\n\ndef test_sidereal_time_with_nonzero_delta_t_on_date2():\n jd = load.timescale(delta_t=99.9).tt(jd=2451545.0 + 99.9 * one_second)\n compare(earthlib.sidereal_time(jd), 18.69737482966941, 1e-13)\n\ndef test_sidereal_time_on_date3():\n jd = load.timescale(delta_t=0.0).tt(jd=2456164.5)\n compare(earthlib.sidereal_time(jd), 22.243908497165812, 1e-13)\n\ndef test_sidereal_time_with_nonzero_delta_t_on_date3():\n jd = load.timescale(delta_t=99.9).tt(jd=2456164.5 + 99.9 * one_second)\n compare(earthlib.sidereal_time(jd), 22.2439084998698, 1e-13)\n\ndef test_star_vector():\n star = starlib.Star(ra_hours=2.530301028, dec_degrees=89.264109444,\n ra_mas_per_year=44.22, dec_mas_per_year=-11.75,\n parallax_mas=7.56, radial_km_per_s=-17.4)\n star.au_km = OLD_AU_KM\n star._compute_vectors()\n compare(star._position_au,\n (276301.52367964364, 215517.39549460335, 27281454.18783122),\n 1e3 * meter)\n compare(star._velocity_au_per_d,\n (-0.006595734315371155, 0.015163885823867606, -0.010102577482634966),\n 1e-3 * meter) # TODO: was 1e-6 before switch to modern au\n\ndef test_refraction0():\n r = earthlib.refraction(-5, 10, 1010)\n compare(r, 0.0, 1e-9 * arcsecond)\n\ndef test_refraction1():\n r = earthlib.refraction(-5, 10, 1013.25)\n compare(r, 0.0, 1e-9 * arcsecond)\n\ndef test_refraction2():\n r = earthlib.refraction(-5, 25, 1010)\n compare(r, 0.0, 1e-9 * arcsecond)\n\ndef test_refraction3():\n r = earthlib.refraction(-5, 25, 1013.25)\n compare(r, 0.0, 1e-9 * arcsecond)\n\ndef test_refraction4():\n r = earthlib.refraction(-1, 10, 1010)\n compare(r, 0.8296919418249878, 1e-9 * arcsecond)\n\ndef test_refraction5():\n r = earthlib.refraction(-1, 10, 1013.25)\n compare(r, 0.8323617426278902, 1e-9 * arcsecond)\n\ndef test_refraction6():\n r = earthlib.refraction(-1, 25, 1010)\n compare(r, 0.7879289246190321, 1e-9 * arcsecond)\n\ndef test_refraction7():\n r = earthlib.refraction(-1, 25, 1013.25)\n compare(r, 0.7904643394754796, 1e-9 * arcsecond)\n\ndef test_refraction8():\n r = earthlib.refraction(15, 10, 1010)\n compare(r, 0.06056215494995108, 1e-9 * arcsecond)\n\ndef test_refraction9():\n r = earthlib.refraction(15, 10, 1013.25)\n compare(r, 0.06075703317132469, 1e-9 * arcsecond)\n\ndef test_refraction10():\n r = earthlib.refraction(15, 25, 1010)\n compare(r, 0.057513724331664955, 1e-9 * arcsecond)\n\ndef test_refraction11():\n r = earthlib.refraction(15, 25, 1013.25)\n compare(r, 0.057698793246593584, 1e-9 * arcsecond)\n\ndef test_refraction12():\n r = earthlib.refraction(89.95, 10, 1010)\n compare(r, 0.0, 1e-9 * arcsecond)\n\ndef test_refraction13():\n r = earthlib.refraction(89.95, 10, 1013.25)\n compare(r, 0.0, 1e-9 * arcsecond)\n\ndef test_refraction14():\n r = earthlib.refraction(89.95, 25, 1010)\n compare(r, 0.0, 1e-9 * arcsecond)\n\ndef test_refraction15():\n r = earthlib.refraction(89.95, 25, 1013.25)\n compare(r, 0.0, 1e-9 * arcsecond)\n\ndef test_refract0():\n alt = earthlib.refract(-90, 10.0, 1010.0)\n compare(alt, -90.0, 1e-9 * arcsecond)\n\ndef test_refract1():\n alt = earthlib.refract(-2, 10.0, 1010.0)\n compare(alt, -2.0, 1e-9 * arcsecond)\n\ndef test_refract2():\n alt = earthlib.refract(-1, 10.0, 1010.0)\n compare(alt, -0.34540033564054795, 1e-9 * arcsecond)\n\ndef test_refract3():\n alt = earthlib.refract(0, 10.0, 1010.0)\n compare(alt, 0.4819388815393779, 1e-9 * arcsecond)\n\ndef test_refract4():\n alt = earthlib.refract(1, 10.0, 1010.0)\n compare(alt, 1.362447444478633, 1e-9 * arcsecond)\n\ndef test_refract5():\n alt = earthlib.refract(3, 10.0, 1010.0)\n compare(alt, 3.227564692764261, 1e-9 * arcsecond)\n\ndef test_refract6():\n alt = earthlib.refract(9, 10.0, 1010.0)\n compare(alt, 9.098059272393698, 1e-9 * arcsecond)\n\ndef test_refract7():\n alt = earthlib.refract(90, 10.0, 1010.0)\n compare(alt, 90.0, 1e-9 * arcsecond)\n\ndef test_from_altaz_0(earth):\n jd = load.timescale(delta_t=0.0).tt(jd=2440423.345833333)\n usno = earth + Topos(\n '38.9215 N', '77.0669 W', elevation_m=92.0)\n a = usno.at(jd).from_altaz(alt_degrees=68.12871390985244, az_degrees=28.979244220884173)\n ra, dec, distance = a.radec(epoch=jd)\n compare(ra.hours, 12.34, 1e-9 * arcsecond)\n compare(dec.degrees, 56.78, 1e-9 * arcsecond)\n\ndef test_from_altaz_1(earth):\n jd = load.timescale(delta_t=0.0).tt(jd=2440423.345833333)\n usno = earth + Topos(\n '38.9215 N', '77.0669 W', elevation_m=92.0)\n a = usno.at(jd).from_altaz(alt_degrees=-17.792497521318964, az_degrees=172.51742180816711)\n ra, dec, distance = a.radec(epoch=jd)\n compare(ra.hours, 12.34, 1e-9 * arcsecond)\n compare(dec.degrees, -67.89, 1e-9 * arcsecond)\n\ndef test_from_altaz_2(earth):\n jd = load.timescale(delta_t=0.0).tt(jd=2448031.5)\n usno = earth + Topos(\n '38.9215 N', '77.0669 W', elevation_m=92.0)\n a = usno.at(jd).from_altaz(alt_degrees=65.8650913573598, az_degrees=34.158756360615946)\n ra, dec, distance = a.radec(epoch=jd)\n compare(ra.hours, 12.34, 1e-9 * arcsecond)\n compare(dec.degrees, 56.78, 1e-9 * arcsecond)\n\ndef test_from_altaz_3(earth):\n jd = load.timescale(delta_t=0.0).tt(jd=2448031.5)\n usno = earth + Topos(\n '38.9215 N', '77.0669 W', elevation_m=92.0)\n a = usno.at(jd).from_altaz(alt_degrees=-18.43186389552551, az_degrees=170.42969631720953)\n ra, dec, distance = a.radec(epoch=jd)\n compare(ra.hours, 12.34, 1e-9 * arcsecond)\n compare(dec.degrees, -67.89, 1e-9 * arcsecond)\n\ndef test_from_altaz_4(earth):\n jd = load.timescale(delta_t=0.0).tt(jd=2451545.0)\n usno = earth + Topos(\n '38.9215 N', '77.0669 W', elevation_m=92.0)\n a = usno.at(jd).from_altaz(alt_degrees=68.47898348962792, az_degrees=332.05109419434154)\n ra, dec, distance = a.radec(epoch=jd)\n compare(ra.hours, 12.34, 1e-9 * arcsecond)\n compare(dec.degrees, 56.78, 1e-9 * arcsecond)\n\ndef test_from_altaz_5(earth):\n jd = load.timescale(delta_t=0.0).tt(jd=2451545.0)\n usno = earth + Topos(\n '38.9215 N', '77.0669 W', elevation_m=92.0)\n a = usno.at(jd).from_altaz(alt_degrees=-17.699091955922242, az_degrees=187.12243108963492)\n ra, dec, distance = a.radec(epoch=jd)\n compare(ra.hours, 12.34, 1e-9 * arcsecond)\n compare(dec.degrees, -67.89, 1e-9 * arcsecond)\n\ndef test_from_altaz_6(earth):\n jd = load.timescale(delta_t=0.0).tt(jd=2456164.5)\n usno = earth + Topos(\n '38.9215 N', '77.0669 W', elevation_m=92.0)\n a = usno.at(jd).from_altaz(alt_degrees=41.36529829114181, az_degrees=316.19259712235026)\n ra, dec, distance = a.radec(epoch=jd)\n compare(ra.hours, 12.34, 1e-9 * arcsecond)\n compare(dec.degrees, 56.78, 1e-9 * arcsecond)\n\ndef test_from_altaz_7(earth):\n jd = load.timescale(delta_t=0.0).tt(jd=2456164.5)\n usno = earth + Topos(\n '38.9215 N', '77.0669 W', elevation_m=92.0)\n a = usno.at(jd).from_altaz(alt_degrees=-29.282626410822033, az_degrees=204.1557062303077)\n ra, dec, distance = a.radec(epoch=jd)\n compare(ra.hours, 12.34, 1e-9 * arcsecond)\n compare(dec.degrees, -67.89, 1e-9 * arcsecond)\n\ndef test_ITRF_to_GCRS_conversion_on_date0():\n jd = load.timescale(delta_t=39.707).tt(jd=2440423.345833333)\n position = positionlib.ITRF_to_GCRS(jd, [1.1, 1.2, 1.3])\n compare(position, (0.5701172053658128, -1.5232987806096392, 1.3017400651201707), 1e-13)\n\ndef test_ITRF_to_GCRS_conversion_on_date1():\n jd = load.timescale(delta_t=57.1136).tt(jd=2448031.5)\n position = positionlib.ITRF_to_GCRS(jd, [1.1, 1.2, 1.3])\n compare(position, (0.41362649279562963, -1.5741081933652488, 1.3004216700893525), 1e-13)\n\ndef test_ITRF_to_GCRS_conversion_on_date2():\n jd = load.timescale(delta_t=63.8285).tt(jd=2451545.0)\n position = positionlib.ITRF_to_GCRS(jd, [1.1, 1.2, 1.3])\n compare(position, (1.3757008573963405, -0.8702954291925735, 1.3000126987400913), 1e-13)\n\ndef test_ITRF_to_GCRS_conversion_on_date3():\n jd = load.timescale(delta_t=66.7846).tt(jd=2456164.5)\n position = positionlib.ITRF_to_GCRS(jd, [1.1, 1.2, 1.3])\n compare(position, (1.5243574049688486, 0.5755748855663746, 1.2980940077752074), 1e-13)\n\ndef test_tdb_minus_tt_on_date0():\n result = timelib.tdb_minus_tt(2440423.345833333)\n compare(result, -0.00046798717637519603, 1e-16)\n\ndef test_tdb_minus_tt_on_date1():\n result = timelib.tdb_minus_tt(2448031.5)\n compare(result, 0.0011585185926349208, 1e-16)\n\ndef test_tdb_minus_tt_on_date2():\n result = timelib.tdb_minus_tt(2451545.0)\n compare(result, -9.575743486095212e-05, 1e-16)\n\ndef test_tdb_minus_tt_on_date3():\n result = timelib.tdb_minus_tt(2456164.5)\n compare(result, -0.001241030165936046, 1e-16)\n\ndef test_mercury_geocentric_date0(de405, ts):\n t = ts.tt(jd=2440423.345833333)\n e = de405['earth'].at(t)\n p = de405['mercury']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 1.3278115470600746, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 7.905384000977572, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 22.332364359841474, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 7.904987228126012, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 22.333433087908823, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 7.874971625095716, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 22.415970392044656, 0.0001 * arcsecond)\n\ndef test_mercury_geocentric_date1(de405, ts):\n t = ts.tt(jd=2448031.5)\n e = de405['earth'].at(t)\n p = de405['mercury']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 0.6507044512046538, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 2.4704717994133576, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 11.2501328449305, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 2.4701282535729665, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 11.248550502940756, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 2.4616767226464757, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 11.207785493244957, 0.0001 * arcsecond)\n\ndef test_mercury_geocentric_date2(de405, ts):\n t = ts.tt(jd=2451545.0)\n e = de405['earth'].at(t)\n p = de405['mercury']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 1.4155249674526948, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 18.13892977357885, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -24.42032494108073, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 18.13851035907211, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -24.420393338459686, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 18.138225455402914, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -24.418845803732086, 0.0001 * arcsecond)\n\ndef test_mercury_geocentric_date3(de405, ts):\n t = ts.tt(jd=2456164.5)\n e = de405['earth'].at(t)\n p = de405['mercury']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 1.1264323486728112, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 9.295934662566733, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 16.68579742896488, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 9.295575039086721, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 16.687409731964937, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 9.307566088097714, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 16.631743449679668, 0.0001 * arcsecond)\n\ndef test_mercury_geocentric_date4(de405, ts):\n t = ts.tt(jd=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])\n e = de405['earth'].at(t)\n p = de405['mercury']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, (1.3278115470600746, 0.6507044512046538, 1.4155249674526948, 1.1264323486728112), 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, (7.905384000977572, 2.4704717994133576, 18.13892977357885, 9.295934662566733), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (22.332364359841474, 11.2501328449305, -24.42032494108073, 16.68579742896488), 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, (7.904987228126012, 2.4701282535729665, 18.13851035907211, 9.295575039086721), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (22.333433087908823, 11.248550502940756, -24.420393338459686, 16.687409731964937), 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, (7.874971625095716, 2.4616767226464757, 18.138225455402914, 9.307566088097714), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (22.415970392044656, 11.207785493244957, -24.418845803732086, 16.631743449679668), 0.0001 * arcsecond)\n\ndef test_venus_geocentric_date0(de405, ts):\n t = ts.tt(jd=2440423.345833333)\n e = de405['earth'].at(t)\n p = de405['venus']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 0.9646045654448725, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 4.966946050917652, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 20.210417323471006, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 4.966656139420439, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 20.210145917097474, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 4.93668626355443, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 20.166644671858105, 0.0001 * arcsecond)\n\ndef test_venus_geocentric_date1(de405, ts):\n t = ts.tt(jd=2448031.5)\n e = de405['earth'].at(t)\n p = de405['venus']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 1.0711674186789975, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 1.161811406279447, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 5.32829157368082, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 1.1615415906820667, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 5.326768071513868, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 1.1534174784892788, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 5.277365365528824, 0.0001 * arcsecond)\n\ndef test_venus_geocentric_date2(de405, ts):\n t = ts.tt(jd=2451545.0)\n e = de405['earth'].at(t)\n p = de405['venus']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 1.1376890757925104, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 15.993350650200568, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -18.451653207795236, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 15.993038357924485, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -18.450881488018126, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 15.992790109710333, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -18.44871897642583, 0.0001 * arcsecond)\n\ndef test_venus_geocentric_date3(de405, ts):\n t = ts.tt(jd=2456164.5)\n e = de405['earth'].at(t)\n p = de405['venus']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 0.7824924286112764, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 7.175585125577371, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 19.874130272238094, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 7.175312328808404, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 19.87477997549141, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 7.188033727750362, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 19.85167856390226, 0.0001 * arcsecond)\n\ndef test_venus_geocentric_date4(de405, ts):\n t = ts.tt(jd=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])\n e = de405['earth'].at(t)\n p = de405['venus']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, (0.9646045654448725, 1.0711674186789975, 1.1376890757925104, 0.7824924286112764), 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, (4.966946050917652, 1.161811406279447, 15.993350650200568, 7.175585125577371), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (20.210417323471006, 5.32829157368082, -18.451653207795236, 19.874130272238094), 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, (4.966656139420439, 1.1615415906820667, 15.993038357924485, 7.175312328808404), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (20.210145917097474, 5.326768071513868, -18.450881488018126, 19.87477997549141), 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, (4.93668626355443, 1.1534174784892788, 15.992790109710333, 7.188033727750362), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (20.166644671858105, 5.277365365528824, -18.44871897642583, 19.85167856390226), 0.0001 * arcsecond)\n\ndef test_mars_geocentric_date0(de405, ts):\n t = ts.tt(jd=2440423.345833333)\n e = de405['earth'].at(t)\n p = de405['mars']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 0.5912188976380217, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 16.0296606272219, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -24.127310308581468, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 16.02988433983068, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -24.128202621801755, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 15.99950982315885, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -24.046277103674843, 0.0001 * arcsecond)\n\ndef test_mars_geocentric_date1(de405, ts):\n t = ts.tt(jd=2448031.5)\n e = de405['earth'].at(t)\n p = de405['mars']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 1.430250679602913, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 23.545034875459514, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -4.8822490432210355, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 23.544892038854186, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -4.88299363089811, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 23.536847630733252, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -4.935089760397492, 0.0001 * arcsecond)\n\ndef test_mars_geocentric_date2(de405, ts):\n t = ts.tt(jd=2451545.0)\n e = de405['earth'].at(t)\n p = de405['mars']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 1.8496039270835372, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 22.034936616343344, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -13.18070741103498, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 22.03468760932384, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -13.182134899635477, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 22.034417492807563, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -13.182689288940116, 0.0001 * arcsecond)\n\ndef test_mars_geocentric_date3(de405, ts):\n t = ts.tt(jd=2456164.5)\n e = de405['earth'].at(t)\n p = de405['mars']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 1.7665523168668773, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 13.894324196598355, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -12.122808318928707, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 13.894132382683363, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -12.121796956140246, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 13.9057161859901, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -12.184654273116957, 0.0001 * arcsecond)\n\ndef test_mars_geocentric_date4(de405, ts):\n t = ts.tt(jd=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])\n e = de405['earth'].at(t)\n p = de405['mars']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, (0.5912188976380217, 1.430250679602913, 1.8496039270835372, 1.7665523168668773), 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, (16.0296606272219, 23.545034875459514, 22.034936616343344, 13.894324196598355), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (-24.127310308581468, -4.8822490432210355, -13.18070741103498, -12.122808318928707), 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, (16.02988433983068, 23.544892038854186, 22.03468760932384, 13.894132382683363), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (-24.128202621801755, -4.88299363089811, -13.182134899635477, -12.121796956140246), 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, (15.99950982315885, 23.536847630733252, 22.034417492807563, 13.9057161859901), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (-24.046277103674843, -4.935089760397492, -13.182689288940116, -12.184654273116957), 0.0001 * arcsecond)\n\ndef test_jupiter_barycenter_geocentric_date0(de405, ts):\n t = ts.tt(jd=2440423.345833333)\n e = de405['earth'].at(t)\n p = de405['jupiter barycenter']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 5.8416003192317465, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 12.104091505864654, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 0.6513409058207986, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 12.103936313614676, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 0.6524656208782568, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 12.07798204538282, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 0.8216129394812305, 0.0001 * arcsecond)\n\ndef test_jupiter_barycenter_geocentric_date1(de405, ts):\n t = ts.tt(jd=2448031.5)\n e = de405['earth'].at(t)\n p = de405['jupiter barycenter']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 5.913287883102948, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 6.765154678701348, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 23.170397700122013, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 6.764854244708427, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 23.170736332068763, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 6.755383083025232, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 23.182684693676578, 0.0001 * arcsecond)\n\ndef test_jupiter_barycenter_geocentric_date2(de405, ts):\n t = ts.tt(jd=2451545.0)\n e = de405['earth'].at(t)\n p = de405['jupiter barycenter']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 4.621126565890217, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 1.5913207023268698, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 8.595887646396902, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 1.5914167941833441, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 8.59631203599914, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 1.5911888424331277, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 8.594250857972387, 0.0001 * arcsecond)\n\ndef test_jupiter_barycenter_geocentric_date3(de405, ts):\n t = ts.tt(jd=2456164.5)\n e = de405['earth'].at(t)\n p = de405['jupiter barycenter']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 5.129958529243068, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 4.822841055032964, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 21.649994488649476, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 4.822764769132736, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 21.64994169521302, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 4.835670404865468, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 21.67058638943795, 0.0001 * arcsecond)\n\ndef test_jupiter_barycenter_geocentric_date4(de405, ts):\n t = ts.tt(jd=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])\n e = de405['earth'].at(t)\n p = de405['jupiter barycenter']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, (5.8416003192317465, 5.913287883102948, 4.621126565890217, 5.129958529243068), 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, (12.104091505864654, 6.765154678701348, 1.5913207023268698, 4.822841055032964), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (0.6513409058207986, 23.170397700122013, 8.595887646396902, 21.649994488649476), 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, (12.103936313614676, 6.764854244708427, 1.5914167941833441, 4.822764769132736), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (0.6524656208782568, 23.170736332068763, 8.59631203599914, 21.64994169521302), 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, (12.07798204538282, 6.755383083025232, 1.5911888424331277, 4.835670404865468), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (0.8216129394812305, 23.182684693676578, 8.594250857972387, 21.67058638943795), 0.0001 * arcsecond)\n\ndef test_saturn_barycenter_geocentric_date0(de405, ts):\n t = ts.tt(jd=2440423.345833333)\n e = de405['earth'].at(t)\n p = de405['saturn barycenter']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 9.382032444401025, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 2.4627748852420206, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 12.045819985925936, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 2.462707593703528, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 12.045735497802628, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 2.4352879582290177, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 11.9115661075769, 0.0001 * arcsecond)\n\ndef test_saturn_barycenter_geocentric_date1(de405, ts):\n t = ts.tt(jd=2448031.5)\n e = de405['earth'].at(t)\n p = de405['saturn barycenter']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 9.420484451056101, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 19.814248756112033, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -20.933390198050763, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 19.81446344451556, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -20.932846451357463, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 19.805277718955743, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -20.958164640919687, 0.0001 * arcsecond)\n\ndef test_saturn_barycenter_geocentric_date2(de405, ts):\n t = ts.tt(jd=2451545.0)\n e = de405['earth'].at(t)\n p = de405['saturn barycenter']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 8.652750126001484, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 2.584400980536592, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 12.616288735770384, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 2.584593321351076, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 12.616983167644802, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 2.584361121508456, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 12.614774672730574, 0.0001 * arcsecond)\n\ndef test_saturn_barycenter_geocentric_date3(de405, ts):\n t = ts.tt(jd=2456164.5)\n e = de405['earth'].at(t)\n p = de405['saturn barycenter']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 10.326368974662916, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 13.628484577191722, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -7.659435207931653, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 13.62827504244793, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -7.658028344724226, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 13.639628746850631, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -7.723201642102626, 0.0001 * arcsecond)\n\ndef test_saturn_barycenter_geocentric_date4(de405, ts):\n t = ts.tt(jd=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])\n e = de405['earth'].at(t)\n p = de405['saturn barycenter']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, (9.382032444401025, 9.420484451056101, 8.652750126001484, 10.326368974662916), 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, (2.4627748852420206, 19.814248756112033, 2.584400980536592, 13.628484577191722), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (12.045819985925936, -20.933390198050763, 12.616288735770384, -7.659435207931653), 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, (2.462707593703528, 19.81446344451556, 2.584593321351076, 13.62827504244793), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (12.045735497802628, -20.932846451357463, 12.616983167644802, -7.658028344724226), 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, (2.4352879582290177, 19.805277718955743, 2.584361121508456, 13.639628746850631), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (11.9115661075769, -20.958164640919687, 12.614774672730574, -7.723201642102626), 0.0001 * arcsecond)\n\ndef test_uranus_barycenter_geocentric_date0(de405, ts):\n t = ts.tt(jd=2440423.345833333)\n e = de405['earth'].at(t)\n p = de405['uranus barycenter']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 18.75197906203834, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 12.087167068351334, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 0.20723926118363256, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 12.087010426255667, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 0.20832526777272883, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 12.061052547705433, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 0.37749969290358576, 0.0001 * arcsecond)\n\ndef test_uranus_barycenter_geocentric_date1(de405, ts):\n t = ts.tt(jd=2448031.5)\n e = de405['earth'].at(t)\n p = de405['uranus barycenter']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 18.622417009295177, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 18.668551452013403, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -23.437331340689163, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 18.668859170516964, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -23.437016930580615, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 18.65936113308538, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -23.447681812488984, 0.0001 * arcsecond)\n\ndef test_uranus_barycenter_geocentric_date2(de405, ts):\n t = ts.tt(jd=2451545.0)\n e = de405['earth'].at(t)\n p = de405['uranus barycenter']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 20.727159134679393, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 21.165586867541418, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -17.018831731314233, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 21.165269485049027, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -17.020267168405784, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 21.164987614252272, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -17.020320613172004, 0.0001 * arcsecond)\n\ndef test_uranus_barycenter_geocentric_date3(de405, ts):\n t = ts.tt(jd=2456164.5)\n e = de405['earth'].at(t)\n p = de405['uranus barycenter']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 19.234768680195387, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 0.4891643148564316, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 2.3565095329111823, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 0.4894463256538988, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 2.358369638516312, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 0.5005500654503398, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 2.429779341040803, 0.0001 * arcsecond)\n\ndef test_uranus_barycenter_geocentric_date4(de405, ts):\n t = ts.tt(jd=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])\n e = de405['earth'].at(t)\n p = de405['uranus barycenter']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, (18.75197906203834, 18.622417009295177, 20.727159134679393, 19.234768680195387), 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, (12.087167068351334, 18.668551452013403, 21.165586867541418, 0.4891643148564316), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (0.20723926118363256, -23.437331340689163, -17.018831731314233, 2.3565095329111823), 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, (12.087010426255667, 18.668859170516964, 21.165269485049027, 0.4894463256538988), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (0.20832526777272883, -23.437016930580615, -17.020267168405784, 2.358369638516312), 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, (12.061052547705433, 18.65936113308538, 21.164987614252272, 0.5005500654503398), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (0.37749969290358576, -23.447681812488984, -17.020320613172004, 2.429779341040803), 0.0001 * arcsecond)\n\ndef test_neptune_barycenter_geocentric_date0(de405, ts):\n t = ts.tt(jd=2440423.345833333)\n e = de405['earth'].at(t)\n p = de405['neptune barycenter']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 29.83221264621946, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 15.637210587139663, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -17.67999613660563, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 15.63739098768298, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -17.68045373026462, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 15.608486730597075, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -17.583793285519313, 0.0001 * arcsecond)\n\ndef test_neptune_barycenter_geocentric_date1(de405, ts):\n t = ts.tt(jd=2448031.5)\n e = de405['earth'].at(t)\n p = de405['neptune barycenter']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 29.490001740438892, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 19.03623522579387, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -21.792864018500975, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 19.036513633320563, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -21.79251066237039, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 19.02716408230529, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -21.808047913986808, 0.0001 * arcsecond)\n\ndef test_neptune_barycenter_geocentric_date2(de405, ts):\n t = ts.tt(jd=2451545.0)\n e = de405['earth'].at(t)\n p = de405['neptune barycenter']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 31.024491920354496, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 20.362841834121518, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -19.21242523937633, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 20.362475439010588, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -19.213645950878377, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 20.36218815756048, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -19.21323379889766, 0.0001 * arcsecond)\n\ndef test_neptune_barycenter_geocentric_date3(de405, ts):\n t = ts.tt(jd=2456164.5)\n e = de405['earth'].at(t)\n p = de405['neptune barycenter']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 28.984118029716345, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 22.252468120719442, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -11.504657215501584, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 22.252825961036415, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -11.50264948264589, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 22.2643158309744, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -11.437330191299896, 0.0001 * arcsecond)\n\ndef test_neptune_barycenter_geocentric_date4(de405, ts):\n t = ts.tt(jd=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])\n e = de405['earth'].at(t)\n p = de405['neptune barycenter']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, (29.83221264621946, 29.490001740438892, 31.024491920354496, 28.984118029716345), 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, (15.637210587139663, 19.03623522579387, 20.362841834121518, 22.252468120719442), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (-17.67999613660563, -21.792864018500975, -19.21242523937633, -11.504657215501584), 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, (15.63739098768298, 19.036513633320563, 20.362475439010588, 22.252825961036415), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (-17.68045373026462, -21.79251066237039, -19.213645950878377, -11.50264948264589), 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, (15.608486730597075, 19.02716408230529, 20.36218815756048, 22.2643158309744), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (-17.583793285519313, -21.808047913986808, -19.21323379889766, -11.437330191299896), 0.0001 * arcsecond)\n\ndef test_pluto_barycenter_geocentric_date0(de405, ts):\n t = ts.tt(jd=2440423.345833333)\n e = de405['earth'].at(t)\n p = de405['pluto barycenter']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 32.312971776632494, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 12.015311208821212, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 16.620557180992588, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 12.01514128380381, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 16.622990160668607, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 11.989232654068259, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 16.792242650891875, 0.0001 * arcsecond)\n\ndef test_pluto_barycenter_geocentric_date1(de405, ts):\n t = ts.tt(jd=2448031.5)\n e = de405['earth'].at(t)\n p = de405['pluto barycenter']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 28.707485955458118, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 15.216302246424346, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -1.3346560528819575, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 15.216661036271791, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -1.3358630622052712, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 15.208581663980876, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -1.3022394883151638, 0.0001 * arcsecond)\n\ndef test_pluto_barycenter_geocentric_date2(de405, ts):\n t = ts.tt(jd=2451545.0)\n e = de405['earth'].at(t)\n p = de405['pluto barycenter']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 31.064412196006614, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 16.761873062250743, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -11.39643313463007, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 16.761526675406767, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -11.396301545071504, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 16.761277438459963, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -11.39428873441123, 0.0001 * arcsecond)\n\ndef test_pluto_barycenter_geocentric_date3(de405, ts):\n t = ts.tt(jd=2456164.5)\n e = de405['earth'].at(t)\n p = de405['pluto barycenter']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 31.69909782133193, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 18.488351288595236, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -19.55219099488885, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 18.488573622605898, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -19.551729414764313, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 18.501338273669152, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -19.541227909743732, 0.0001 * arcsecond)\n\ndef test_pluto_barycenter_geocentric_date4(de405, ts):\n t = ts.tt(jd=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])\n e = de405['earth'].at(t)\n p = de405['pluto barycenter']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, (32.312971776632494, 28.707485955458118, 31.064412196006614, 31.69909782133193), 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, (12.015311208821212, 15.216302246424346, 16.761873062250743, 18.488351288595236), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (16.620557180992588, -1.3346560528819575, -11.39643313463007, -19.55219099488885), 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, (12.01514128380381, 15.216661036271791, 16.761526675406767, 18.488573622605898), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (16.622990160668607, -1.3358630622052712, -11.396301545071504, -19.551729414764313), 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, (11.989232654068259, 15.208581663980876, 16.761277438459963, 18.501338273669152), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (16.792242650891875, -1.3022394883151638, -11.39428873441123, -19.541227909743732), 0.0001 * arcsecond)\n\ndef test_sun_geocentric_date0(de405, ts):\n t = ts.tt(jd=2440423.345833333)\n e = de405['earth'].at(t)\n p = de405['sun']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 1.0160878650466754, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 8.03008088792976, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 20.496475643233936, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 8.02969030304998, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 20.497605463260726, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 8.000108116572395, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 20.58493093599605, 0.0001 * arcsecond)\n\ndef test_sun_geocentric_date1(de405, ts):\n t = ts.tt(jd=2448031.5)\n e = de405['earth'].at(t)\n p = de405['sun']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 1.0118605934887042, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 3.776110727862678, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 19.907832379364574, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 3.775721385487214, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 19.906601181542, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 3.7666292045824337, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 19.879173772309745, 0.0001 * arcsecond)\n\ndef test_sun_geocentric_date2(de405, ts):\n t = ts.tt(jd=2451545.0)\n e = de405['earth'].at(t)\n p = de405['sun']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 0.9833276788862821, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 18.752544254682526, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -23.033309607967187, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 18.752126228091367, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -23.03376015263556, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 18.75183797477899, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -23.032488638722818, 0.0001 * arcsecond)\n\ndef test_sun_geocentric_date3(de405, ts):\n t = ts.tt(jd=2456164.5)\n e = de405['earth'].at(t)\n p = de405['sun']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 1.0107820040799866, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 10.268162490439073, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 10.751933902906119, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 10.267805651450434, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 10.753946960547603, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 10.279264504672039, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 10.688507865341325, 0.0001 * arcsecond)\n\ndef test_sun_geocentric_date4(de405, ts):\n t = ts.tt(jd=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])\n e = de405['earth'].at(t)\n p = de405['sun']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, (1.0160878650466754, 1.0118605934887042, 0.9833276788862821, 1.0107820040799866), 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, (8.03008088792976, 3.776110727862678, 18.752544254682526, 10.268162490439073), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (20.496475643233936, 19.907832379364574, -23.033309607967187, 10.751933902906119), 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, (8.02969030304998, 3.775721385487214, 18.752126228091367, 10.267805651450434), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (20.497605463260726, 19.906601181542, -23.03376015263556, 10.753946960547603), 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, (8.000108116572395, 3.7666292045824337, 18.75183797477899, 10.279264504672039), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (20.58493093599605, 19.879173772309745, -23.032488638722818, 10.688507865341325), 0.0001 * arcsecond)\n\ndef test_moon_geocentric_date0(de405, ts):\n t = ts.tt(jd=2440423.345833333)\n e = de405['earth'].at(t)\n p = de405['moon']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 0.0026034424248854585, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 12.472463241145173, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -4.546618838170065, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 12.472340287066462, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -4.545964408923231, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 12.446262111681095, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -4.378227942512158, 0.0001 * arcsecond)\n\ndef test_moon_geocentric_date1(de405, ts):\n t = ts.tt(jd=2448031.5)\n e = de405['earth'].at(t)\n p = de405['moon']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 0.0024815092296598847, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 23.676443817409496, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 1.8587554901327035, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 23.676289920709518, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 1.857413875990142, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 23.66827809687387, 0.0002 * ra_arcsecond)\n compare(dec.degrees, 1.8051891857266409, 0.0001 * arcsecond)\n\ndef test_moon_geocentric_date2(de405, ts):\n t = ts.tt(jd=2451545.0)\n e = de405['earth'].at(t)\n p = de405['moon']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 0.002690202988513297, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 14.830020573942235, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -10.900635500943373, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 14.829807890359675, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -10.90012775884129, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 14.829573271760747, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -10.897905576904787, 0.0001 * arcsecond)\n\ndef test_moon_geocentric_date3(de405, ts):\n t = ts.tt(jd=2456164.5)\n e = de405['earth'].at(t)\n p = de405['moon']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, 0.0024739078649309238, 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 16.39102815233177, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -20.93676001523414, 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 16.39106196861365, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -20.936774891979848, 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 16.40383113143219, 0.0002 * ra_arcsecond)\n compare(dec.degrees, -20.96508913558473, 0.0001 * arcsecond)\n\ndef test_moon_geocentric_date4(de405, ts):\n t = ts.tt(jd=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])\n e = de405['earth'].at(t)\n p = de405['moon']\n\n distance = length_of((e - p.at(t)).position.au)\n compare(distance * OLD_AU, (0.0026034424248854585, 0.0024815092296598847, 0.002690202988513297, 0.0024739078649309238), 0.01 * meter)\n\n astrometric = e.observe(p)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, (12.472463241145173, 23.676443817409496, 14.830020573942235, 16.39102815233177), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (-4.546618838170065, 1.8587554901327035, -10.900635500943373, -20.93676001523414), 0.0001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, (12.472340287066462, 23.676289920709518, 14.829807890359675, 16.39106196861365), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (-4.545964408923231, 1.857413875990142, -10.90012775884129, -20.936774891979848), 0.0001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, (12.446262111681095, 23.66827809687387, 14.829573271760747, 16.40383113143219), 0.0002 * ra_arcsecond)\n compare(dec.degrees, (-4.378227942512158, 1.8051891857266409, -10.897905576904787, -20.96508913558473), 0.0001 * arcsecond)\n\ndef test_polaris_geocentric_date0(earth):\n e = earth.at(load.timescale().tt(jd=2440423.345833333))\n star = starlib.Star(ra_hours=2.530301028, dec_degrees=89.264109444,\n ra_mas_per_year=44.22, dec_mas_per_year=-11.75,\n parallax_mas=7.56, radial_km_per_s=-17.4)\n\n astrometric = e.observe(star)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 2.5283697499529345, 0.00001 * ra_arcsecond)\n compare(dec.degrees, 89.2642084845529, 0.00001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 2.52280149297809, 0.00001 * ra_arcsecond)\n compare(dec.degrees, 89.25882879505869, 0.00001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 2.0385816433557173, 0.00001 * ra_arcsecond)\n compare(dec.degrees, 89.11999387030946, 0.00001 * arcsecond)\n\ndef test_polaris_geocentric_date1(earth):\n e = earth.at(load.timescale().tt(jd=2448031.5))\n star = starlib.Star(ra_hours=2.530301028, dec_degrees=89.264109444,\n ra_mas_per_year=44.22, dec_mas_per_year=-11.75,\n parallax_mas=7.56, radial_km_per_s=-17.4)\n\n astrometric = e.observe(star)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 2.5296910275944064, 0.00001 * ra_arcsecond)\n compare(dec.degrees, 89.26413894692217, 0.00001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 2.503356852811078, 0.00001 * ra_arcsecond)\n compare(dec.degrees, 89.26201007627152, 0.00001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 2.3329211805288432, 0.00001 * ra_arcsecond)\n compare(dec.degrees, 89.22082922133737, 0.00001 * arcsecond)\n\ndef test_polaris_geocentric_date2(earth):\n e = earth.at(load.timescale().tt(jd=2451545.0))\n star = starlib.Star(ra_hours=2.530301028, dec_degrees=89.264109444,\n ra_mas_per_year=44.22, dec_mas_per_year=-11.75,\n parallax_mas=7.56, radial_km_per_s=-17.4)\n\n astrometric = e.observe(star)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 2.5302921882000127, 0.00001 * ra_arcsecond)\n compare(dec.degrees, 89.26411027119273, 0.00001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 2.544633215462727, 0.00001 * ra_arcsecond)\n compare(dec.degrees, 89.26917874902797, 0.00001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 2.5459982729094564, 0.00001 * ra_arcsecond)\n compare(dec.degrees, 89.26697328449004, 0.00001 * arcsecond)\n\ndef test_polaris_geocentric_date3(earth):\n e = earth.at(load.timescale().tt(jd=2456164.5))\n star = starlib.Star(ra_hours=2.530301028, dec_degrees=89.264109444,\n ra_mas_per_year=44.22, dec_mas_per_year=-11.75,\n parallax_mas=7.56, radial_km_per_s=-17.4)\n\n astrometric = e.observe(star)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, 2.531117065610149, 0.00001 * ra_arcsecond)\n compare(dec.degrees, 89.26406906493733, 0.00001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 2.541609533735535, 0.00001 * ra_arcsecond)\n compare(dec.degrees, 89.25923373182651, 0.00001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 2.8064741334456413, 0.00001 * ra_arcsecond)\n compare(dec.degrees, 89.3136939266471, 0.00001 * arcsecond)\n\ndef test_polaris_geocentric_date4(earth):\n e = earth.at(load.timescale().tt(jd=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5]))\n star = starlib.Star(ra_hours=2.530301028, dec_degrees=89.264109444,\n ra_mas_per_year=44.22, dec_mas_per_year=-11.75,\n parallax_mas=7.56, radial_km_per_s=-17.4)\n\n astrometric = e.observe(star)\n ra, dec, distance = astrometric.radec()\n compare(ra.hours, (2.5283697499529345, 2.5296910275944064, 2.5302921882000127, 2.531117065610149), 0.00001 * ra_arcsecond)\n compare(dec.degrees, (89.2642084845529, 89.26413894692217, 89.26411027119273, 89.26406906493733), 0.00001 * arcsecond)\n\n apparent = astrometric.apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, (2.52280149297809, 2.503356852811078, 2.544633215462727, 2.541609533735535), 0.00001 * ra_arcsecond)\n compare(dec.degrees, (89.25882879505869, 89.26201007627152, 89.26917874902797, 89.25923373182651), 0.00001 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, (2.0385816433557173, 2.3329211805288432, 2.5459982729094564, 2.8064741334456413), 0.00001 * ra_arcsecond)\n compare(dec.degrees, (89.11999387030946, 89.22082922133737, 89.26697328449004, 89.3136939266471), 0.00001 * arcsecond)\n\ndef test_mercury_topocentric_date0(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2440423.345833333)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['mercury']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 7.9049140222444105, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 22.33276016366845, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 7.874898511438327, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 22.415294637224765, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, 46.3212267566032, 0.0005 * arcsecond)\n compare(az.degrees, 262.18590521567705, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, 46.33688339908365, 0.0005 * arcsecond)\n compare(az.degrees, 262.18590521567705, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, 46.33704240110901, 0.0005 * arcsecond)\n compare(az.degrees, 262.18590521567705, 0.0005 * arcsecond)\n\ndef test_mercury_topocentric_date1(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2448031.5)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['mercury']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 2.469959592064856, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 11.24594905426479, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 2.461508188066483, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 11.205182598299666, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, -17.340667089884377, 0.0005 * arcsecond)\n compare(az.degrees, 300.9176579181716, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, -17.340667089884377, 0.0005 * arcsecond)\n compare(az.degrees, 300.9176579181716, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, -17.340667089884377, 0.0005 * arcsecond)\n compare(az.degrees, 300.9176579181716, 0.0005 * arcsecond)\n\ndef test_mercury_topocentric_date2(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2451545.0)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['mercury']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 18.138603904058247, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -24.421550562485436, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 18.138318996641566, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -24.420003066967503, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, -0.12765060376706572, 0.0005 * arcsecond)\n compare(az.degrees, 121.97764361867154, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, 0.36890915770104016, 0.0005 * arcsecond)\n compare(az.degrees, 121.97764361867154, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, 0.3731892291678349, 0.0005 * arcsecond)\n compare(az.degrees, 121.97764361867154, 0.0005 * arcsecond)\n\ndef test_mercury_topocentric_date3(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2456164.5)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['mercury']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 9.29546814256182, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 16.68590812465023, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 9.307459135231527, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 16.630243128506475, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, -9.116616855755964, 0.0005 * arcsecond)\n compare(az.degrees, 300.1420264373104, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, -9.116616855755964, 0.0005 * arcsecond)\n compare(az.degrees, 300.1420264373104, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, -9.116616855755964, 0.0005 * arcsecond)\n compare(az.degrees, 300.1420264373104, 0.0005 * arcsecond)\n\ndef test_mercury_topocentric_date4(de405):\n t = load.timescale(delta_t=0.0).tt(jd=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['mercury']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, (7.9049140222444105, 2.469959592064856, 18.138603904058247, 9.29546814256182), 0.0005 * ra_arcsecond)\n compare(dec.degrees, (22.33276016366845, 11.24594905426479, -24.421550562485436, 16.68590812465023), 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, (7.874898511438327, 2.461508188066483, 18.138318996641566, 9.307459135231527), 0.0005 * ra_arcsecond)\n compare(dec.degrees, (22.415294637224765, 11.205182598299666, -24.420003066967503, 16.630243128506475), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, (46.3212267566032, -17.340667089884377, -0.12765060376706572, -9.116616855755964), 0.0005 * arcsecond)\n compare(az.degrees, (262.18590521567705, 300.9176579181716, 121.97764361867154, 300.1420264373104), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, (46.33688339908365, -17.340667089884377, 0.36890915770104016, -9.116616855755964), 0.0005 * arcsecond)\n compare(az.degrees, (262.18590521567705, 300.9176579181716, 121.97764361867154, 300.1420264373104), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, (46.33704240110901, -17.340667089884377, 0.3731892291678349, -9.116616855755964), 0.0005 * arcsecond)\n compare(az.degrees, (262.18590521567705, 300.9176579181716, 121.97764361867154, 300.1420264373104), 0.0005 * arcsecond)\n\ndef test_venus_topocentric_date0(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2440423.345833333)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['venus']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 4.9665155792599744, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 20.20866872703497, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 4.936546062416392, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 20.165161469755127, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, 11.152374062990575, 0.0005 * arcsecond)\n compare(az.degrees, 287.0030740239532, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, 11.23199275246975, 0.0005 * arcsecond)\n compare(az.degrees, 287.0030740239532, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, 11.232796262162083, 0.0005 * arcsecond)\n compare(az.degrees, 287.0030740239532, 0.0005 * arcsecond)\n\ndef test_venus_topocentric_date1(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2448031.5)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['venus']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 1.1614662937271143, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 5.325222585955545, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 1.1533422187037876, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 5.275819541572404, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, -34.134914076462266, 0.0005 * arcsecond)\n compare(az.degrees, 313.64872862118426, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, -34.134914076462266, 0.0005 * arcsecond)\n compare(az.degrees, 313.64872862118426, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, -34.134914076462266, 0.0005 * arcsecond)\n compare(az.degrees, 313.64872862118426, 0.0005 * arcsecond)\n\ndef test_venus_topocentric_date2(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2451545.0)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['venus']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 15.99311221167692, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -18.45256680288619, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 15.99286396137589, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -18.450404301558034, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, 23.228910604670816, 0.0005 * arcsecond)\n compare(az.degrees, 142.1161398141626, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, 23.266773672986005, 0.0005 * arcsecond)\n compare(az.degrees, 142.1161398141626, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, 23.267157712313676, 0.0005 * arcsecond)\n compare(az.degrees, 142.1161398141626, 0.0005 * arcsecond)\n\ndef test_venus_topocentric_date3(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2456164.5)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['venus']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 7.175218975921811, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 19.87224931182421, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 7.187940160922054, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 19.849149573371733, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, -24.359995410915445, 0.0005 * arcsecond)\n compare(az.degrees, 327.640588969984, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, -24.359995410915445, 0.0005 * arcsecond)\n compare(az.degrees, 327.640588969984, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, -24.359995410915445, 0.0005 * arcsecond)\n compare(az.degrees, 327.640588969984, 0.0005 * arcsecond)\n\ndef test_venus_topocentric_date4(de405):\n t = load.timescale(delta_t=0.0).tt(jd=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['venus']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, (4.9665155792599744, 1.1614662937271143, 15.99311221167692, 7.175218975921811), 0.0005 * ra_arcsecond)\n compare(dec.degrees, (20.20866872703497, 5.325222585955545, -18.45256680288619, 19.87224931182421), 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, (4.936546062416392, 1.1533422187037876, 15.99286396137589, 7.187940160922054), 0.0005 * ra_arcsecond)\n compare(dec.degrees, (20.165161469755127, 5.275819541572404, -18.450404301558034, 19.849149573371733), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, (11.152374062990575, -34.134914076462266, 23.228910604670816, -24.359995410915445), 0.0005 * arcsecond)\n compare(az.degrees, (287.0030740239532, 313.64872862118426, 142.1161398141626, 327.640588969984), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, (11.23199275246975, -34.134914076462266, 23.266773672986005, -24.359995410915445), 0.0005 * arcsecond)\n compare(az.degrees, (287.0030740239532, 313.64872862118426, 142.1161398141626, 327.640588969984), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, (11.232796262162083, -34.134914076462266, 23.267157712313676, -24.359995410915445), 0.0005 * arcsecond)\n compare(az.degrees, (287.0030740239532, 313.64872862118426, 142.1161398141626, 327.640588969984), 0.0005 * arcsecond)\n\ndef test_mars_topocentric_date0(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2440423.345833333)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['mars']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 16.030112454663165, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -24.130883187697044, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 15.999737237126766, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -24.048966502229923, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, -3.540294697028628, 0.0005 * arcsecond)\n compare(az.degrees, 118.34877634707522, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, -3.540294697028628, 0.0005 * arcsecond)\n compare(az.degrees, 118.34877634707522, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, -3.540294697028628, 0.0005 * arcsecond)\n compare(az.degrees, 118.34877634707522, 0.0005 * arcsecond)\n\ndef test_mars_topocentric_date1(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2448031.5)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['mars']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 23.54486790147113, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -4.883946644223003, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 23.53682348628842, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -4.936042744435578, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, -54.1089628741949, 0.0005 * arcsecond)\n compare(az.degrees, 338.0117138951488, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, -54.1089628741949, 0.0005 * arcsecond)\n compare(az.degrees, 338.0117138951488, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, -54.1089628741949, 0.0005 * arcsecond)\n compare(az.degrees, 338.0117138951488, 0.0005 * arcsecond)\n\ndef test_mars_topocentric_date2(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2451545.0)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['mars']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 22.034740913364253, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -13.182784253332377, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 22.03447079524992, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -13.183338672731741, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, -36.90573266459917, 0.0005 * arcsecond)\n compare(az.degrees, 76.12368450672822, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, -36.90573266459917, 0.0005 * arcsecond)\n compare(az.degrees, 76.12368450672822, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, -36.90573266459917, 0.0005 * arcsecond)\n compare(az.degrees, 76.12368450672822, 0.0005 * arcsecond)\n\ndef test_mars_topocentric_date3(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2456164.5)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['mars']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 13.8940809044733, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -12.122804110106655, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 13.905664739133574, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -12.185661905051244, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, 22.094794272017666, 0.0005 * arcsecond)\n compare(az.degrees, 231.6381663847761, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, 22.134776069489533, 0.0005 * arcsecond)\n compare(az.degrees, 231.6381663847761, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, 22.135181528743814, 0.0005 * arcsecond)\n compare(az.degrees, 231.6381663847761, 0.0005 * arcsecond)\n\ndef test_mars_topocentric_date4(de405):\n t = load.timescale(delta_t=0.0).tt(jd=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['mars']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, (16.030112454663165, 23.54486790147113, 22.034740913364253, 13.8940809044733), 0.0005 * ra_arcsecond)\n compare(dec.degrees, (-24.130883187697044, -4.883946644223003, -13.182784253332377, -12.122804110106655), 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, (15.999737237126766, 23.53682348628842, 22.03447079524992, 13.905664739133574), 0.0005 * ra_arcsecond)\n compare(dec.degrees, (-24.048966502229923, -4.936042744435578, -13.183338672731741, -12.185661905051244), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, (-3.540294697028628, -54.1089628741949, -36.90573266459917, 22.094794272017666), 0.0005 * arcsecond)\n compare(az.degrees, (118.34877634707522, 338.0117138951488, 76.12368450672822, 231.6381663847761), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, (-3.540294697028628, -54.1089628741949, -36.90573266459917, 22.134776069489533), 0.0005 * arcsecond)\n compare(az.degrees, (118.34877634707522, 338.0117138951488, 76.12368450672822, 231.6381663847761), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, (-3.540294697028628, -54.1089628741949, -36.90573266459917, 22.135181528743814), 0.0005 * arcsecond)\n compare(az.degrees, (118.34877634707522, 338.0117138951488, 76.12368450672822, 231.6381663847761), 0.0005 * arcsecond)\n\ndef test_jupiter_barycenter_topocentric_date0(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2440423.345833333)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['jupiter barycenter']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 12.103946503374884, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 0.6522085918269475, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 12.077992233588102, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 0.821355893113747, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, 49.40651603144681, 0.0005 * arcsecond)\n compare(az.degrees, 156.07088561561997, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, 49.42056980196601, 0.0005 * arcsecond)\n compare(az.degrees, 156.07088561561997, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, 49.420712533159694, 0.0005 * arcsecond)\n compare(az.degrees, 156.07088561561997, 0.0005 * arcsecond)\n\ndef test_jupiter_barycenter_topocentric_date1(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2448031.5)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['jupiter barycenter']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 6.764836821339949, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 23.17058790055951, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 6.755365668515656, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 23.18253602996423, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, 38.00505126690997, 0.0005 * arcsecond)\n compare(az.degrees, 270.63795554820535, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, 38.02600464378366, 0.0005 * arcsecond)\n compare(az.degrees, 270.63795554820535, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, 38.02621739324931, 0.0005 * arcsecond)\n compare(az.degrees, 270.63795554820535, 0.0005 * arcsecond)\n\ndef test_jupiter_barycenter_topocentric_date2(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2451545.0)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['jupiter barycenter']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 1.5914118935512866, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 8.595923929888196, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 1.5911839414385696, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 8.593862752942394, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, -42.482560972481394, 0.0005 * arcsecond)\n compare(az.degrees, 359.3596746827537, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, -42.482560972481394, 0.0005 * arcsecond)\n compare(az.degrees, 359.3596746827537, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, -42.482560972481394, 0.0005 * arcsecond)\n compare(az.degrees, 359.3596746827537, 0.0005 * arcsecond)\n\ndef test_jupiter_barycenter_topocentric_date3(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2456164.5)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['jupiter barycenter']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 4.82276173655752, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 21.649526689253502, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 4.835667333191383, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 21.670171438742255, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, -29.289013841967986, 0.0005 * arcsecond)\n compare(az.degrees, 4.327425566855523, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, -29.289013841967986, 0.0005 * arcsecond)\n compare(az.degrees, 4.327425566855523, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, -29.289013841967986, 0.0005 * arcsecond)\n compare(az.degrees, 4.327425566855523, 0.0005 * arcsecond)\n\ndef test_jupiter_barycenter_topocentric_date4(de405):\n t = load.timescale(delta_t=0.0).tt(jd=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['jupiter barycenter']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, (12.103946503374884, 6.764836821339949, 1.5914118935512866, 4.82276173655752), 0.0005 * ra_arcsecond)\n compare(dec.degrees, (0.6522085918269475, 23.17058790055951, 8.595923929888196, 21.649526689253502), 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, (12.077992233588102, 6.755365668515656, 1.5911839414385696, 4.835667333191383), 0.0005 * ra_arcsecond)\n compare(dec.degrees, (0.821355893113747, 23.18253602996423, 8.593862752942394, 21.670171438742255), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, (49.40651603144681, 38.00505126690997, -42.482560972481394, -29.289013841967986), 0.0005 * arcsecond)\n compare(az.degrees, (156.07088561561997, 270.63795554820535, 359.3596746827537, 4.327425566855523), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, (49.42056980196601, 38.02600464378366, -42.482560972481394, -29.289013841967986), 0.0005 * arcsecond)\n compare(az.degrees, (156.07088561561997, 270.63795554820535, 359.3596746827537, 4.327425566855523), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, (49.420712533159694, 38.02621739324931, -42.482560972481394, -29.289013841967986), 0.0005 * arcsecond)\n compare(az.degrees, (156.07088561561997, 270.63795554820535, 359.3596746827537, 4.327425566855523), 0.0005 * arcsecond)\n\ndef test_saturn_barycenter_topocentric_date0(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2440423.345833333)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['saturn barycenter']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 2.4626938858905594, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 12.045561201575383, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 2.4352742791152338, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 11.911391441362444, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, -20.662686940324093, 0.0005 * arcsecond)\n compare(az.degrees, 306.01978569992787, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, -20.662686940324093, 0.0005 * arcsecond)\n compare(az.degrees, 306.01978569992787, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, -20.662686940324093, 0.0005 * arcsecond)\n compare(az.degrees, 306.01978569992787, 0.0005 * arcsecond)\n\ndef test_saturn_barycenter_topocentric_date1(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2448031.5)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['saturn barycenter']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 19.814469727768646, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -20.932928080758664, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 19.805283998285297, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -20.958246345579155, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, -48.93337647838982, 0.0005 * arcsecond)\n compare(az.degrees, 76.8837444919445, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, -48.93337647838982, 0.0005 * arcsecond)\n compare(az.degrees, 76.8837444919445, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, -48.93337647838982, 0.0005 * arcsecond)\n compare(az.degrees, 76.8837444919445, 0.0005 * arcsecond)\n\ndef test_saturn_barycenter_topocentric_date2(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2451545.0)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['saturn barycenter']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 2.5845847757319116, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 12.616768688416162, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 2.584352575888522, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 12.614560194137907, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, -36.501918751911674, 0.0005 * arcsecond)\n compare(az.degrees, 341.22347230453323, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, -36.501918751911674, 0.0005 * arcsecond)\n compare(az.degrees, 341.22347230453323, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, -36.501918751911674, 0.0005 * arcsecond)\n compare(az.degrees, 341.22347230453323, 0.0005 * arcsecond)\n\ndef test_saturn_barycenter_topocentric_date3(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2456164.5)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['saturn barycenter']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 13.628268137367913, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -7.658197329820583, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 13.639621846921335, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -7.723370683249701, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, 22.96675851611188, 0.0005 * arcsecond)\n compare(az.degrees, 238.00627672875672, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, 23.005094362956072, 0.0005 * arcsecond)\n compare(az.degrees, 238.00627672875672, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, 23.005483182929098, 0.0005 * arcsecond)\n compare(az.degrees, 238.00627672875672, 0.0005 * arcsecond)\n\ndef test_saturn_barycenter_topocentric_date4(de405):\n t = load.timescale(delta_t=0.0).tt(jd=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['saturn barycenter']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, (2.4626938858905594, 19.814469727768646, 2.5845847757319116, 13.628268137367913), 0.0005 * ra_arcsecond)\n compare(dec.degrees, (12.045561201575383, -20.932928080758664, 12.616768688416162, -7.658197329820583), 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, (2.4352742791152338, 19.805283998285297, 2.584352575888522, 13.639621846921335), 0.0005 * ra_arcsecond)\n compare(dec.degrees, (11.911391441362444, -20.958246345579155, 12.614560194137907, -7.723370683249701), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, (-20.662686940324093, -48.93337647838982, -36.501918751911674, 22.96675851611188), 0.0005 * arcsecond)\n compare(az.degrees, (306.01978569992787, 76.8837444919445, 341.22347230453323, 238.00627672875672), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, (-20.662686940324093, -48.93337647838982, -36.501918751911674, 23.005094362956072), 0.0005 * arcsecond)\n compare(az.degrees, (306.01978569992787, 76.8837444919445, 341.22347230453323, 238.00627672875672), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, (-20.662686940324093, -48.93337647838982, -36.501918751911674, 23.005483182929098), 0.0005 * arcsecond)\n compare(az.degrees, (306.01978569992787, 76.8837444919445, 341.22347230453323, 238.00627672875672), 0.0005 * arcsecond)\n\ndef test_uranus_barycenter_topocentric_date0(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2440423.345833333)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['uranus barycenter']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 12.087016642067397, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 0.20824442104711183, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 12.061058763070791, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 0.37741883683460087, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, 49.06396822144731, 0.0005 * arcsecond)\n compare(az.degrees, 156.65256040205296, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, 49.078192535060566, 0.0005 * arcsecond)\n compare(az.degrees, 156.65256040205296, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, 49.07833699756142, 0.0005 * arcsecond)\n compare(az.degrees, 156.65256040205296, 0.0005 * arcsecond)\n\ndef test_uranus_barycenter_topocentric_date1(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2448031.5)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['uranus barycenter']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 18.668863148648313, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -23.43704804377175, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 18.65936510933368, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -23.447712978993913, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, -37.0259637798912, 0.0005 * arcsecond)\n compare(az.degrees, 91.80748703145906, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, -37.0259637798912, 0.0005 * arcsecond)\n compare(az.degrees, 91.80748703145906, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, -37.0259637798912, 0.0005 * arcsecond)\n compare(az.degrees, 91.80748703145906, 0.0005 * arcsecond)\n\ndef test_uranus_barycenter_topocentric_date2(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2451545.0)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['uranus barycenter']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 21.16527335872666, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -17.020308119118386, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 21.164991487815, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -17.020361566142082, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, -29.175475562665554, 0.0005 * arcsecond)\n compare(az.degrees, 88.85671230431439, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, -29.175475562665554, 0.0005 * arcsecond)\n compare(az.degrees, 88.85671230431439, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, -29.175475562665554, 0.0005 * arcsecond)\n compare(az.degrees, 88.85671230431439, 0.0005 * arcsecond)\n\ndef test_uranus_barycenter_topocentric_date3(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2456164.5)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['uranus barycenter']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 0.48945083888242796, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 2.358286196725548, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 0.5005545778924997, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 2.4296958868419787, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, -14.5260443119261, 0.0005 * arcsecond)\n compare(az.degrees, 74.60219420538265, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, -14.5260443119261, 0.0005 * arcsecond)\n compare(az.degrees, 74.60219420538265, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, -14.5260443119261, 0.0005 * arcsecond)\n compare(az.degrees, 74.60219420538265, 0.0005 * arcsecond)\n\ndef test_uranus_barycenter_topocentric_date4(de405):\n t = load.timescale(delta_t=0.0).tt(jd=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['uranus barycenter']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, (12.087016642067397, 18.668863148648313, 21.16527335872666, 0.48945083888242796), 0.0005 * ra_arcsecond)\n compare(dec.degrees, (0.20824442104711183, -23.43704804377175, -17.020308119118386, 2.358286196725548), 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, (12.061058763070791, 18.65936510933368, 21.164991487815, 0.5005545778924997), 0.0005 * ra_arcsecond)\n compare(dec.degrees, (0.37741883683460087, -23.447712978993913, -17.020361566142082, 2.4296958868419787), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, (49.06396822144731, -37.0259637798912, -29.175475562665554, -14.5260443119261), 0.0005 * arcsecond)\n compare(az.degrees, (156.65256040205296, 91.80748703145906, 88.85671230431439, 74.60219420538265), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, (49.078192535060566, -37.0259637798912, -29.175475562665554, -14.5260443119261), 0.0005 * arcsecond)\n compare(az.degrees, (156.65256040205296, 91.80748703145906, 88.85671230431439, 74.60219420538265), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, (49.07833699756142, -37.0259637798912, -29.175475562665554, -14.5260443119261), 0.0005 * arcsecond)\n compare(az.degrees, (156.65256040205296, 91.80748703145906, 88.85671230431439, 74.60219420538265), 0.0005 * arcsecond)\n\ndef test_neptune_barycenter_topocentric_date0(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2440423.345833333)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['neptune barycenter']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 15.637396931781986, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -17.680489951171502, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 15.608492665044128, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -17.583829722494027, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, 4.86937782636538, 0.0005 * arcsecond)\n compare(az.degrees, 117.29043762875409, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, 5.031511017145419, 0.0005 * arcsecond)\n compare(az.degrees, 117.29043762875409, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, 5.033116634143141, 0.0005 * arcsecond)\n compare(az.degrees, 117.29043762875409, 0.0005 * arcsecond)\n\ndef test_neptune_barycenter_topocentric_date1(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2448031.5)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['neptune barycenter']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 19.036514568239326, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -21.792523874854822, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 19.027165016434417, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -21.808061138689617, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, -40.43318694811052, 0.0005 * arcsecond)\n compare(az.degrees, 86.51833613444356, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, -40.43318694811052, 0.0005 * arcsecond)\n compare(az.degrees, 86.51833613444356, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, -40.43318694811052, 0.0005 * arcsecond)\n compare(az.degrees, 86.51833613444356, 0.0005 * arcsecond)\n\ndef test_neptune_barycenter_topocentric_date2(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2451545.0)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['neptune barycenter']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 20.362478654099593, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -19.213665913911328, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 20.36219137258442, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -19.21325376377245, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, -21.102154672787563, 0.0005 * arcsecond)\n compare(az.degrees, 98.14962081515444, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, -21.102154672787563, 0.0005 * arcsecond)\n compare(az.degrees, 98.14962081515444, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, -21.102154672787563, 0.0005 * arcsecond)\n compare(az.degrees, 98.14962081515444, 0.0005 * arcsecond)\n\ndef test_neptune_barycenter_topocentric_date3(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2456164.5)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['neptune barycenter']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 22.252831344843074, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -11.502690543226894, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 22.26432121506238, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -11.437371208596403, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, 2.41678290499992, 0.0005 * arcsecond)\n compare(az.degrees, 106.8092597257607, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, 2.6713913487620147, 0.0005 * arcsecond)\n compare(az.degrees, 106.8092597257607, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, 2.6738334093305696, 0.0005 * arcsecond)\n compare(az.degrees, 106.8092597257607, 0.0005 * arcsecond)\n\ndef test_neptune_barycenter_topocentric_date4(de405):\n t = load.timescale(delta_t=0.0).tt(jd=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['neptune barycenter']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, (15.637396931781986, 19.036514568239326, 20.362478654099593, 22.252831344843074), 0.0005 * ra_arcsecond)\n compare(dec.degrees, (-17.680489951171502, -21.792523874854822, -19.213665913911328, -11.502690543226894), 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, (15.608492665044128, 19.027165016434417, 20.36219137258442, 22.26432121506238), 0.0005 * ra_arcsecond)\n compare(dec.degrees, (-17.583829722494027, -21.808061138689617, -19.21325376377245, -11.437371208596403), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, (4.86937782636538, -40.43318694811052, -21.102154672787563, 2.41678290499992), 0.0005 * arcsecond)\n compare(az.degrees, (117.29043762875409, 86.51833613444356, 98.14962081515444, 106.8092597257607), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, (5.031511017145419, -40.43318694811052, -21.102154672787563, 2.6713913487620147), 0.0005 * arcsecond)\n compare(az.degrees, (117.29043762875409, 86.51833613444356, 98.14962081515444, 106.8092597257607), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, (5.033116634143141, -40.43318694811052, -21.102154672787563, 2.6738334093305696), 0.0005 * arcsecond)\n compare(az.degrees, (117.29043762875409, 86.51833613444356, 98.14962081515444, 106.8092597257607), 0.0005 * arcsecond)\n\ndef test_pluto_barycenter_topocentric_date0(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2440423.345833333)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['pluto barycenter']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 12.015146948702718, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 16.622956629676764, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 11.989238323883423, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 16.792209116103148, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, 64.72856074651983, 0.0005 * arcsecond)\n compare(az.degrees, 147.2138070056058, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, 64.73630449169308, 0.0005 * arcsecond)\n compare(az.degrees, 147.2138070056058, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, 64.73638314930092, 0.0005 * arcsecond)\n compare(az.degrees, 147.2138070056058, 0.0005 * arcsecond)\n\ndef test_pluto_barycenter_topocentric_date1(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2448031.5)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['pluto barycenter']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 15.216666873470118, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -1.335915234746897, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 15.208587498665665, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -1.3022917220648205, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, 16.233734452123414, 0.0005 * arcsecond)\n compare(az.degrees, 105.3994365631196, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, 16.28889280191291, 0.0005 * arcsecond)\n compare(az.degrees, 105.3994365631196, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, 16.289451329649054, 0.0005 * arcsecond)\n compare(az.degrees, 105.3994365631196, 0.0005 * arcsecond)\n\ndef test_pluto_barycenter_topocentric_date2(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2451545.0)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['pluto barycenter']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 16.761532920101487, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -11.396347593297179, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 16.76128368305737, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -11.39433478419375, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, 22.700996363632996, 0.0005 * arcsecond)\n compare(az.degrees, 127.81134408260581, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, 22.739821647292274, 0.0005 * arcsecond)\n compare(az.degrees, 127.81134408260581, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, 22.74021541578692, 0.0005 * arcsecond)\n compare(az.degrees, 127.81134408260581, 0.0005 * arcsecond)\n\ndef test_pluto_barycenter_topocentric_date3(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2456164.5)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['pluto barycenter']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 18.488579709427018, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -19.551785355075808, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 18.501344365322606, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -19.541283736216652, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, 28.33982206878914, 0.0005 * arcsecond)\n compare(az.degrees, 157.51785266272373, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, 28.370071242061236, 0.0005 * arcsecond)\n compare(az.degrees, 157.51785266272373, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, 28.370378222043662, 0.0005 * arcsecond)\n compare(az.degrees, 157.51785266272373, 0.0005 * arcsecond)\n\ndef test_pluto_barycenter_topocentric_date4(de405):\n t = load.timescale(delta_t=0.0).tt(jd=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['pluto barycenter']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, (12.015146948702718, 15.216666873470118, 16.761532920101487, 18.488579709427018), 0.0005 * ra_arcsecond)\n compare(dec.degrees, (16.622956629676764, -1.335915234746897, -11.396347593297179, -19.551785355075808), 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, (11.989238323883423, 15.208587498665665, 16.76128368305737, 18.501344365322606), 0.0005 * ra_arcsecond)\n compare(dec.degrees, (16.792209116103148, -1.3022917220648205, -11.39433478419375, -19.541283736216652), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, (64.72856074651983, 16.233734452123414, 22.700996363632996, 28.33982206878914), 0.0005 * arcsecond)\n compare(az.degrees, (147.2138070056058, 105.3994365631196, 127.81134408260581, 157.51785266272373), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, (64.73630449169308, 16.28889280191291, 22.739821647292274, 28.370071242061236), 0.0005 * arcsecond)\n compare(az.degrees, (147.2138070056058, 105.3994365631196, 127.81134408260581, 157.51785266272373), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, (64.73638314930092, 16.289451329649054, 22.74021541578692, 28.370378222043662), 0.0005 * arcsecond)\n compare(az.degrees, (147.2138070056058, 105.3994365631196, 127.81134408260581, 157.51785266272373), 0.0005 * arcsecond)\n\ndef test_sun_topocentric_date0(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2440423.345833333)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['sun']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 8.02959789881544, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 20.496678572125123, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 8.000015838288707, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 20.584000539289498, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, 46.72403357148823, 0.0005 * arcsecond)\n compare(az.degrees, 258.5550717845957, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, 46.73947196634687, 0.0005 * arcsecond)\n compare(az.degrees, 258.5550717845957, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, 46.73962875307724, 0.0005 * arcsecond)\n compare(az.degrees, 258.5550717845957, 0.0005 * arcsecond)\n\ndef test_sun_topocentric_date1(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2448031.5)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['sun']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 3.7755906381611175, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 19.90505409109931, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 3.7664985705990794, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 19.87762515818775, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, 2.2209469369832533, 0.0005 * arcsecond)\n compare(az.degrees, 293.95636637272145, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, 2.4868409787793837, 0.0005 * arcsecond)\n compare(az.degrees, 293.95636637272145, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, 2.489379891081029, 0.0005 * arcsecond)\n compare(az.degrees, 293.95636637272145, 0.0005 * arcsecond)\n\ndef test_sun_topocentric_date2(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2451545.0)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['sun']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 18.752264357691004, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -23.03532101826747, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 18.751976099155204, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -23.03404957045815, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, -5.486505415022805, 0.0005 * arcsecond)\n compare(az.degrees, 115.32008451470392, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, -5.486505415022805, 0.0005 * arcsecond)\n compare(az.degrees, 115.32008451470392, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, -5.486505415022805, 0.0005 * arcsecond)\n compare(az.degrees, 115.32008451470392, 0.0005 * arcsecond)\n\ndef test_sun_topocentric_date3(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2456164.5)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['sun']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 10.267679924967121, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 10.752399537108259, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 10.279138748598198, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 10.686961444410377, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, -2.738407691502772, 0.0005 * arcsecond)\n compare(az.degrees, 286.09632001391725, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, -2.738407691502772, 0.0005 * arcsecond)\n compare(az.degrees, 286.09632001391725, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, -2.738407691502772, 0.0005 * arcsecond)\n compare(az.degrees, 286.09632001391725, 0.0005 * arcsecond)\n\ndef test_sun_topocentric_date4(de405):\n t = load.timescale(delta_t=0.0).tt(jd=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['sun']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, (8.02959789881544, 3.7755906381611175, 18.752264357691004, 10.267679924967121), 0.0005 * ra_arcsecond)\n compare(dec.degrees, (20.496678572125123, 19.90505409109931, -23.03532101826747, 10.752399537108259), 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, (8.000015838288707, 3.7664985705990794, 18.751976099155204, 10.279138748598198), 0.0005 * ra_arcsecond)\n compare(dec.degrees, (20.584000539289498, 19.87762515818775, -23.03404957045815, 10.686961444410377), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, (46.72403357148823, 2.2209469369832533, -5.486505415022805, -2.738407691502772), 0.0005 * arcsecond)\n compare(az.degrees, (258.5550717845957, 293.95636637272145, 115.32008451470392, 286.09632001391725), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, (46.73947196634687, 2.4868409787793837, -5.486505415022805, -2.738407691502772), 0.0005 * arcsecond)\n compare(az.degrees, (258.5550717845957, 293.95636637272145, 115.32008451470392, 286.09632001391725), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, (46.73962875307724, 2.489379891081029, -5.486505415022805, -2.738407691502772), 0.0005 * arcsecond)\n compare(az.degrees, (258.5550717845957, 293.95636637272145, 115.32008451470392, 286.09632001391725), 0.0005 * arcsecond)\n\ndef test_moon_topocentric_date0(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2440423.345833333)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['moon']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 12.489955349304845, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -5.189705732227236, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 12.463855411284248, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -5.022075882872161, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, 41.92040135025528, 0.0005 * arcsecond)\n compare(az.degrees, 151.19707488767745, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, 41.938650930940234, 0.0005 * arcsecond)\n compare(az.degrees, 151.19707488767745, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, 41.938836248377605, 0.0005 * arcsecond)\n compare(az.degrees, 151.19707488767745, 0.0005 * arcsecond)\n\ndef test_moon_topocentric_date1(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2448031.5)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['moon']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 23.663473338211578, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 1.227161288913488, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 23.655459675858083, 0.0005 * ra_arcsecond)\n compare(dec.degrees, 1.1749464194383863, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, -47.74510120858602, 0.0005 * arcsecond)\n compare(az.degrees, 338.13295291812307, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, -47.74510120858602, 0.0005 * arcsecond)\n compare(az.degrees, 338.13295291812307, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, -47.74510120858602, 0.0005 * arcsecond)\n compare(az.degrees, 338.13295291812307, 0.0005 * arcsecond)\n\ndef test_moon_topocentric_date2(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2451545.0)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['moon']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 14.845679251156893, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -11.590214641232205, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 14.845444624832663, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -11.58799188846256, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, 36.381265580736255, 0.0005 * arcsecond)\n compare(az.degrees, 156.2971102404744, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, 36.40348032108563, 0.0005 * arcsecond)\n compare(az.degrees, 156.2971102404744, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, 36.403705864717445, 0.0005 * arcsecond)\n compare(az.degrees, 156.2971102404744, 0.0005 * arcsecond)\n\ndef test_moon_topocentric_date3(de405):\n t = load.timescale(delta_t=0.0).tt(jd=2456164.5)\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['moon']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, 16.380804513901573, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -21.79048462924397, 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, 16.393647715389825, 0.0005 * ra_arcsecond)\n compare(dec.degrees, -21.81897641768761, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, 28.439387966372543, 0.0005 * arcsecond)\n compare(az.degrees, 191.29497427201525, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, 28.46951344291743, 0.0005 * arcsecond)\n compare(az.degrees, 191.29497427201525, 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, 28.46981916998486, 0.0005 * arcsecond)\n compare(az.degrees, 191.29497427201525, 0.0005 * arcsecond)\n\ndef test_moon_topocentric_date4(de405):\n t = load.timescale(delta_t=0.0).tt(jd=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])\n earth = de405['earth']\n usno = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)\n\n apparent = usno.at(t).observe(de405['moon']).apparent()\n ra, dec, distance = apparent.radec()\n compare(ra.hours, (12.489955349304845, 23.663473338211578, 14.845679251156893, 16.380804513901573), 0.0005 * ra_arcsecond)\n compare(dec.degrees, (-5.189705732227236, 1.227161288913488, -11.590214641232205, -21.79048462924397), 0.0005 * arcsecond)\n\n ra, dec, distance = apparent.radec(epoch='date')\n compare(ra.hours, (12.463855411284248, 23.655459675858083, 14.845444624832663, 16.393647715389825), 0.0005 * ra_arcsecond)\n compare(dec.degrees, (-5.022075882872161, 1.1749464194383863, -11.58799188846256, -21.81897641768761), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz()\n compare(alt.degrees, (41.92040135025528, -47.74510120858602, 36.381265580736255, 28.439387966372543), 0.0005 * arcsecond)\n compare(az.degrees, (151.19707488767745, 338.13295291812307, 156.2971102404744, 191.29497427201525), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz('standard')\n compare(alt.degrees, (41.938650930940234, -47.74510120858602, 36.40348032108563, 28.46951344291743), 0.0005 * arcsecond)\n compare(az.degrees, (151.19707488767745, 338.13295291812307, 156.2971102404744, 191.29497427201525), 0.0005 * arcsecond)\n\n alt, az, distance = apparent.altaz(10.0, 1010.0)\n compare(alt.degrees, (41.938836248377605, -47.74510120858602, 36.403705864717445, 28.46981916998486), 0.0005 * arcsecond)\n compare(az.degrees, (151.19707488767745, 338.13295291812307, 156.2971102404744, 191.29497427201525), 0.0005 * arcsecond)\n\ndef test_hipparcos_conversion0(earth):\n line = 'H| 11767| |02 31 47.08|+89 15 50.9| 1.97|1|H|037.94614689|+89.26413805| | 7.56| 44.22| -11.74| 0.39| 0.45| 0.48| 0.47| 0.55|-0.16| 0.05| 0.27|-0.01| 0.08| 0.05| 0.04|-0.12|-0.09|-0.36| 1| 1.22| 11767| 2.756|0.003| 2.067|0.003| | 0.636|0.003|T|0.70|0.00|L| | 2.1077|0.0021|0.014|102| | 2.09| 2.13| 3.97|P|1|A|02319+8915|I| 1| 1| | | | | | | | | |S| |P| 8890|B+88 8 | | |0.68|F7:Ib-IIv SB|G\\n'\n star = hipparcos.parse(line)\n compare(star.ra.hours, 2.530301023497941, 0.001 * ra_arcsecond)\n compare(star.dec.degrees, 89.26410950742938, 0.001 * arcsecond)\n ra, dec, distance = earth.at(load.timescale().tt(jd=2440423.345833333)).observe(star).radec()\n compare(ra.hours, 2.5283697000528966, 0.00001 * ra_arcsecond)\n compare(dec.degrees, 89.26420852419295, 0.00001 * arcsecond)\n\ndef test_hipparcos_conversion1(earth):\n line = 'H| 11767| |02 31 47.08|+89 15 50.9| 1.97|1|H|037.94614689|+89.26413805| | 7.56| 44.22| -11.74| 0.39| 0.45| 0.48| 0.47| 0.55|-0.16| 0.05| 0.27|-0.01| 0.08| 0.05| 0.04|-0.12|-0.09|-0.36| 1| 1.22| 11767| 2.756|0.003| 2.067|0.003| | 0.636|0.003|T|0.70|0.00|L| | 2.1077|0.0021|0.014|102| | 2.09| 2.13| 3.97|P|1|A|02319+8915|I| 1| 1| | | | | | | | | |S| |P| 8890|B+88 8 | | |0.68|F7:Ib-IIv SB|G\\n'\n star = hipparcos.parse(line)\n compare(star.ra.hours, 2.530301023497941, 0.001 * ra_arcsecond)\n compare(star.dec.degrees, 89.26410950742938, 0.001 * arcsecond)\n ra, dec, distance = earth.at(load.timescale().tt(jd=2448031.5)).observe(star).radec()\n compare(ra.hours, 2.529691010447949, 0.00001 * ra_arcsecond)\n compare(dec.degrees, 89.26413900274704, 0.00001 * arcsecond)\n\ndef test_hipparcos_conversion2(earth):\n line = 'H| 11767| |02 31 47.08|+89 15 50.9| 1.97|1|H|037.94614689|+89.26413805| | 7.56| 44.22| -11.74| 0.39| 0.45| 0.48| 0.47| 0.55|-0.16| 0.05| 0.27|-0.01| 0.08| 0.05| 0.04|-0.12|-0.09|-0.36| 1| 1.22| 11767| 2.756|0.003| 2.067|0.003| | 0.636|0.003|T|0.70|0.00|L| | 2.1077|0.0021|0.014|102| | 2.09| 2.13| 3.97|P|1|A|02319+8915|I| 1| 1| | | | | | | | | |S| |P| 8890|B+88 8 | | |0.68|F7:Ib-IIv SB|G\\n'\n star = hipparcos.parse(line)\n compare(star.ra.hours, 2.530301023497941, 0.001 * ra_arcsecond)\n compare(star.dec.degrees, 89.26410950742938, 0.001 * arcsecond)\n ra, dec, distance = earth.at(load.timescale().tt(jd=2451545.0)).observe(star).radec()\n compare(ra.hours, 2.5302921836971946, 0.00001 * ra_arcsecond)\n compare(dec.degrees, 89.26411033462212, 0.00001 * arcsecond)\n\ndef test_hipparcos_conversion3(earth):\n line = 'H| 11767| |02 31 47.08|+89 15 50.9| 1.97|1|H|037.94614689|+89.26413805| | 7.56| 44.22| -11.74| 0.39| 0.45| 0.48| 0.47| 0.55|-0.16| 0.05| 0.27|-0.01| 0.08| 0.05| 0.04|-0.12|-0.09|-0.36| 1| 1.22| 11767| 2.756|0.003| 2.067|0.003| | 0.636|0.003|T|0.70|0.00|L| | 2.1077|0.0021|0.014|102| | 2.09| 2.13| 3.97|P|1|A|02319+8915|I| 1| 1| | | | | | | | | |S| |P| 8890|B+88 8 | | |0.68|F7:Ib-IIv SB|G\\n'\n star = hipparcos.parse(line)\n compare(star.ra.hours, 2.530301023497941, 0.001 * ra_arcsecond)\n compare(star.dec.degrees, 89.26410950742938, 0.001 * arcsecond)\n ra, dec, distance = earth.at(load.timescale().tt(jd=2456164.5)).observe(star).radec()\n compare(ra.hours, 2.5311170753257395, 0.00001 * ra_arcsecond)\n compare(dec.degrees, 89.26406913848278, 0.00001 * arcsecond)\n\ndef test_hipparcos_conversion4(earth):\n line = 'H| 11767| |02 31 47.08|+89 15 50.9| 1.97|1|H|037.94614689|+89.26413805| | 7.56| 44.22| -11.74| 0.39| 0.45| 0.48| 0.47| 0.55|-0.16| 0.05| 0.27|-0.01| 0.08| 0.05| 0.04|-0.12|-0.09|-0.36| 1| 1.22| 11767| 2.756|0.003| 2.067|0.003| | 0.636|0.003|T|0.70|0.00|L| | 2.1077|0.0021|0.014|102| | 2.09| 2.13| 3.97|P|1|A|02319+8915|I| 1| 1| | | | | | | | | |S| |P| 8890|B+88 8 | | |0.68|F7:Ib-IIv SB|G\\n'\n star = hipparcos.parse(line)\n compare(star.ra.hours, 2.530301023497941, 0.001 * ra_arcsecond)\n compare(star.dec.degrees, 89.26410950742938, 0.001 * arcsecond)\n ra, dec, distance = earth.at(load.timescale().tt(jd=[2440423.345833333, 2448031.5, 2451545.0, 2456164.5])).observe(star).radec()\n compare(ra.hours, (2.5283697000528966, 2.529691010447949, 2.5302921836971946, 2.5311170753257395), 0.00001 * ra_arcsecond)\n compare(dec.degrees, (89.26420852419295, 89.26413900274704, 89.26411033462212, 89.26406913848278), 0.00001 * arcsecond)\n\n"
] | [
[
"numpy.arcsin",
"numpy.cos",
"numpy.sin",
"numpy.arctan2",
"numpy.array"
],
[
"numpy.array",
"numpy.abs",
"numpy.einsum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
franzhaas/cmlib | [
"e313b1535d2823bc056d9b46973f6485ba781841"
] | [
"source_data/ScientificColourMaps4/nuuk/nuuk.py"
] | [
"# \n# nuuk\n# www.fabiocrameri.ch/visualisation\nfrom matplotlib.colors import LinearSegmentedColormap \n \ncm_data = [[0.013938, 0.35043, 0.55025], \n [0.020952, 0.35139, 0.54864], \n [0.027979, 0.35236, 0.54702], \n [0.035227, 0.35334, 0.54541], \n [0.042266, 0.35433, 0.54381], \n [0.048841, 0.35534, 0.54221], \n [0.054829, 0.35634, 0.54063], \n [0.060556, 0.35736, 0.53905], \n [0.06593, 0.35837, 0.53749], \n [0.071101, 0.35941, 0.53593], \n [0.075962, 0.36045, 0.53439], \n [0.080746, 0.36153, 0.53287], \n [0.08533, 0.36261, 0.53136], \n [0.089868, 0.36371, 0.52988], \n [0.094279, 0.36482, 0.5284], \n [0.098577, 0.36597, 0.52696], \n [0.10283, 0.36712, 0.52552], \n [0.10704, 0.36831, 0.52412], \n [0.11119, 0.36951, 0.52274], \n [0.11527, 0.37073, 0.52139], \n [0.11938, 0.37199, 0.52006], \n [0.12344, 0.37328, 0.51879], \n [0.12753, 0.3746, 0.51753], \n [0.13162, 0.37594, 0.5163], \n [0.13566, 0.37732, 0.51512], \n [0.13974, 0.37872, 0.51398], \n [0.14388, 0.38017, 0.51287], \n [0.14798, 0.38165, 0.51181], \n [0.15211, 0.38318, 0.51079], \n [0.15631, 0.38472, 0.50983], \n [0.16045, 0.38632, 0.50892], \n [0.16469, 0.38796, 0.50806], \n [0.16897, 0.38964, 0.50724], \n [0.17324, 0.39138, 0.50648], \n [0.17759, 0.39314, 0.50578], \n [0.18192, 0.39495, 0.50515], \n [0.18636, 0.39681, 0.50459], \n [0.1908, 0.39872, 0.50408], \n [0.1953, 0.40069, 0.50363], \n [0.19982, 0.40268, 0.50326], \n [0.20445, 0.40473, 0.50295], \n [0.20906, 0.40683, 0.50272], \n [0.21374, 0.40898, 0.50256], \n [0.21847, 0.41117, 0.50247], \n [0.22325, 0.41342, 0.50246], \n [0.22807, 0.41571, 0.50252], \n [0.2329, 0.41805, 0.50266], \n [0.23783, 0.42043, 0.50287], \n [0.24276, 0.42286, 0.50316], \n [0.24777, 0.42535, 0.50353], \n [0.2528, 0.42788, 0.50398], \n [0.25786, 0.43044, 0.5045], \n [0.26299, 0.43305, 0.50509], \n [0.26813, 0.4357, 0.50575], \n [0.27334, 0.43839, 0.5065], \n [0.27857, 0.44111, 0.50732], \n [0.28382, 0.44389, 0.50822], \n [0.28912, 0.44668, 0.50918], \n [0.29443, 0.44952, 0.51021], \n [0.2998, 0.45239, 0.5113], \n [0.30518, 0.45528, 0.51247], \n [0.3106, 0.45821, 0.5137], \n [0.31603, 0.46116, 0.51499], \n [0.32146, 0.46415, 0.51633], \n [0.32693, 0.46715, 0.51775], \n [0.33245, 0.47018, 0.51921], \n [0.33794, 0.47322, 0.52072], \n [0.34347, 0.4763, 0.52228], \n [0.34901, 0.47939, 0.5239], \n [0.35457, 0.48249, 0.52555], \n [0.36012, 0.4856, 0.52725], \n [0.3657, 0.48874, 0.52899], \n [0.37127, 0.49189, 0.53075], \n [0.37687, 0.49505, 0.53256], \n [0.38245, 0.49822, 0.53438], \n [0.38804, 0.50139, 0.53625], \n [0.39364, 0.50458, 0.53813], \n [0.39923, 0.50777, 0.54003], \n [0.40483, 0.51096, 0.54195], \n [0.41044, 0.51418, 0.54389], \n [0.41602, 0.51739, 0.54584], \n [0.42159, 0.52059, 0.54779], \n [0.42719, 0.52381, 0.54976], \n [0.43276, 0.52703, 0.55172], \n [0.43832, 0.53024, 0.55368], \n [0.44387, 0.53345, 0.55564], \n [0.44941, 0.53667, 0.55761], \n [0.45494, 0.53988, 0.55955], \n [0.46044, 0.5431, 0.56148], \n [0.46594, 0.54631, 0.56341], \n [0.47143, 0.54951, 0.56529], \n [0.47688, 0.5527, 0.56718], \n [0.48232, 0.55589, 0.56903], \n [0.48773, 0.55908, 0.57086], \n [0.49311, 0.56227, 0.57266], \n [0.49848, 0.56543, 0.57442], \n [0.5038, 0.56859, 0.57616], \n [0.5091, 0.57175, 0.57784], \n [0.51437, 0.57489, 0.5795], \n [0.51959, 0.57801, 0.58109], \n [0.52478, 0.58113, 0.58265], \n [0.52993, 0.58423, 0.58416], \n [0.53504, 0.58732, 0.58563], \n [0.54009, 0.59038, 0.58702], \n [0.54511, 0.59344, 0.58837], \n [0.55007, 0.59647, 0.58966], \n [0.55498, 0.59949, 0.59089], \n [0.55983, 0.60247, 0.59206], \n [0.56462, 0.60544, 0.59316], \n [0.56935, 0.60839, 0.59419], \n [0.57403, 0.61131, 0.59515], \n [0.57863, 0.61419, 0.59605], \n [0.58317, 0.61706, 0.59688], \n [0.58764, 0.61991, 0.59763], \n [0.59205, 0.62271, 0.59831], \n [0.59637, 0.62548, 0.59892], \n [0.60062, 0.62824, 0.59946], \n [0.6048, 0.63095, 0.59992], \n [0.6089, 0.63363, 0.6003], \n [0.61291, 0.63628, 0.6006], \n [0.61686, 0.63888, 0.60084], \n [0.62072, 0.64146, 0.601], \n [0.62449, 0.644, 0.60108], \n [0.62819, 0.64651, 0.60109], \n [0.6318, 0.64898, 0.60103], \n [0.63533, 0.65141, 0.6009], \n [0.63879, 0.65381, 0.6007], \n [0.64216, 0.65617, 0.60042], \n [0.64545, 0.65849, 0.60008], \n [0.64866, 0.66077, 0.59967], \n [0.6518, 0.66303, 0.5992], \n [0.65484, 0.66524, 0.59865], \n [0.65783, 0.66742, 0.59805], \n [0.66073, 0.66957, 0.59739], \n [0.66357, 0.67168, 0.59667], \n [0.66632, 0.67376, 0.59589], \n [0.66902, 0.6758, 0.59506], \n [0.67164, 0.67782, 0.59417], \n [0.67419, 0.67979, 0.59324], \n [0.67669, 0.68174, 0.59226], \n [0.67912, 0.68367, 0.59122], \n [0.68149, 0.68557, 0.59013], \n [0.68381, 0.68743, 0.58901], \n [0.68607, 0.68928, 0.58785], \n [0.68829, 0.69109, 0.58665], \n [0.69044, 0.69289, 0.58541], \n [0.69256, 0.69465, 0.58413], \n [0.69462, 0.6964, 0.58282], \n [0.69665, 0.69813, 0.58148], \n [0.69863, 0.69984, 0.58011], \n [0.70058, 0.70152, 0.5787], \n [0.70248, 0.7032, 0.57727], \n [0.70436, 0.70486, 0.57582], \n [0.7062, 0.7065, 0.57433], \n [0.70801, 0.70812, 0.57284], \n [0.7098, 0.70974, 0.57131], \n [0.71155, 0.71134, 0.56976], \n [0.71328, 0.71293, 0.5682], \n [0.71499, 0.71451, 0.56663], \n [0.71669, 0.71608, 0.56502], \n [0.71835, 0.71765, 0.56343], \n [0.72001, 0.7192, 0.5618], \n [0.72164, 0.72074, 0.56016], \n [0.72326, 0.72228, 0.5585], \n [0.72488, 0.72382, 0.55686], \n [0.72647, 0.72536, 0.55518], \n [0.72806, 0.72689, 0.5535], \n [0.72964, 0.72842, 0.55183], \n [0.73121, 0.72995, 0.55013], \n [0.73278, 0.73147, 0.54844], \n [0.73434, 0.733, 0.54674], \n [0.7359, 0.73453, 0.54504], \n [0.73746, 0.73606, 0.54333], \n [0.73902, 0.73759, 0.54162], \n [0.74057, 0.73914, 0.53992], \n [0.74213, 0.74068, 0.53823], \n [0.7437, 0.74223, 0.53652], \n [0.74526, 0.74379, 0.53482], \n [0.74684, 0.74536, 0.53313], \n [0.74843, 0.74694, 0.53145], \n [0.75002, 0.74853, 0.52978], \n [0.75163, 0.75013, 0.52812], \n [0.75324, 0.75176, 0.52648], \n [0.75488, 0.75339, 0.52484], \n [0.75653, 0.75505, 0.52323], \n [0.7582, 0.75672, 0.52162], \n [0.75989, 0.75843, 0.52005], \n [0.76161, 0.76015, 0.51852], \n [0.76335, 0.7619, 0.517], \n [0.76513, 0.76369, 0.51551], \n [0.76693, 0.76552, 0.51407], \n [0.76877, 0.76737, 0.51266], \n [0.77066, 0.76926, 0.5113], \n [0.77258, 0.77121, 0.50999], \n [0.77455, 0.77319, 0.50874], \n [0.77658, 0.77523, 0.50754], \n [0.77865, 0.77734, 0.50641], \n [0.78079, 0.77949, 0.50535], \n [0.78298, 0.78171, 0.50439], \n [0.78525, 0.784, 0.50349], \n [0.78759, 0.78636, 0.50269], \n [0.79, 0.78879, 0.50201], \n [0.7925, 0.7913, 0.50143], \n [0.79508, 0.79391, 0.50096], \n [0.79775, 0.7966, 0.50063], \n [0.80052, 0.79939, 0.50043], \n [0.80338, 0.80227, 0.50037], \n [0.80635, 0.80526, 0.50048], \n [0.80942, 0.80836, 0.50075], \n [0.81259, 0.81156, 0.5012], \n [0.81589, 0.81486, 0.50183], \n [0.81928, 0.81828, 0.50264], \n [0.8228, 0.82182, 0.50366], \n [0.82643, 0.82546, 0.5049], \n [0.83017, 0.82921, 0.50633], \n [0.83401, 0.83307, 0.50801], \n [0.83796, 0.83703, 0.5099], \n [0.84202, 0.84109, 0.51201], \n [0.84616, 0.84525, 0.51438], \n [0.8504, 0.84949, 0.51696], \n [0.85473, 0.85381, 0.51977], \n [0.85912, 0.85821, 0.52282], \n [0.86359, 0.86268, 0.5261], \n [0.86811, 0.86719, 0.52959], \n [0.87267, 0.87175, 0.53329], \n [0.87726, 0.87634, 0.53722], \n [0.88189, 0.88096, 0.54131], \n [0.88653, 0.8856, 0.54562], \n [0.89117, 0.89023, 0.55009], \n [0.89581, 0.89486, 0.55473], \n [0.90042, 0.89947, 0.55952], \n [0.90502, 0.90405, 0.56444], \n [0.90957, 0.90861, 0.56949], \n [0.91409, 0.91312, 0.57466], \n [0.91855, 0.91759, 0.57994], \n [0.92297, 0.922, 0.5853], \n [0.92731, 0.92635, 0.59072], \n [0.9316, 0.93065, 0.59623], \n [0.93582, 0.93487, 0.60179], \n [0.93997, 0.93904, 0.60741], \n [0.94405, 0.94314, 0.61304], \n [0.94806, 0.94717, 0.61873], \n [0.952, 0.95114, 0.62444], \n [0.95587, 0.95504, 0.63016], \n [0.95968, 0.95888, 0.6359], \n [0.96341, 0.96265, 0.64165], \n [0.96709, 0.96637, 0.64741], \n [0.97071, 0.97004, 0.65316], \n [0.97427, 0.97366, 0.65892], \n [0.97778, 0.97723, 0.66469], \n [0.98124, 0.98075, 0.67046], \n [0.98466, 0.98424, 0.67621], \n [0.98803, 0.98769, 0.68196], \n [0.99138, 0.99111, 0.68772], \n [0.99468, 0.9945, 0.69347], \n [0.99797, 0.99787, 0.69922]] \n \nnuuk_map = LinearSegmentedColormap.from_list('nuuk', cm_data) \n# For use of \"viscm view\" \ntest_cm = nuuk_map \n \nif __name__ == \"__main__\": \n import matplotlib.pyplot as plt \n import numpy as np \n \n try: \n from viscm import viscm \n viscm(nuuk_map) \n except ImportError: \n print(\"viscm not found, falling back on simple display\") \n plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto', \n cmap=nuuk_map) \n plt.show() \n"
] | [
[
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplotlib.pyplot.show",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
thhapke/ddathlete_di_operators | [
"7fb88db7c883e6bcdfe9d0ac1582798c7f3bd204"
] | [
"TrainingSummary/trainingsummary.py"
] | [
"from datetime import datetime\nimport math\nimport sys\n\nimport pytz\n\nimport pandas as pd\n\n\ndef on_input(msg):\n \n att = dict(msg.attributes)\n \n header = [c[\"name\"] for c in msg.attributes['table']['columns']]\n df = pd.DataFrame(msg.body, columns=header)\n df['TIMESTAMP'] = pd.to_datetime(df.TIMESTAMP)\n \n df['INDEX'] = df['TIMESTAMP']\n df = df.set_index('INDEX')\n #df.tz_localize('utc')\n df = df.tz_convert(None)\n \n log('Process {} of {} - records:{}'.format(att['table_name'],att['year'],df.shape[0] ))\n \n if att['table_name'] in ['SWIMMING_POOL','SWIMMING_OPEN_WATER'] :\n df['TEMPERATURE'] = -273\n if att['table_name'] == \"CYCLING_INDOOR\" :\n df['DISTANCE'] = 0\n\n aggreg = {'DATE':'first','TIMESTAMP':['min','max'],\\\n 'DISTANCE':['min','max'],\\\n 'POWER':['min','max','mean'],'HEAR_TRATE':['min','max','mean'],\\\n 'CADENCE':['min','max','mean'],'TEMPERATURE':'max'}\n tdf = df.groupby('TRAINING_ID').agg(aggreg).reset_index()\n tdf.columns = ['_'.join(col).upper() for col in tdf.columns]\n \n \n tdf['SPORT_TYPE'] = att['table_name']\n tdf['TRAINING_TYPE'] = 'Unknown'\n tdf['DISTANCE'] = tdf['DISTANCE_MAX'] - tdf['DISTANCE_MIN']\n tdf.rename(columns={\"TRAINING_ID_\": \"TRAINING_ID\", \"DATE_FIRST\":\"DATE\", \n \"TEMPERATURE_MAX\":\"TEMPERATURE\"},inplace = True)\n \n tdf['TRAINING_NO'] = 1\n tdf['TRAINING_NO'] = tdf.groupby('DATE')['TRAINING_NO'].cumsum()\n \n \n tdf['DURATION'] = (tdf['TIMESTAMP_MAX'] - tdf['TIMESTAMP_MIN']).dt.total_seconds()\n tdf['TIMESTAMP_START'] = tdf['TIMESTAMP_MIN'].dt.strftime('%Y-%m-%d %H:%M:%S')\n tdf['TIMESTAMP_END'] = tdf['TIMESTAMP_MAX'].dt.strftime('%Y-%m-%d %H:%M:%S')\n \n # cast \n tdf['DISTANCE'] = tdf['DISTANCE'].astype('float')\n tdf['DURATION'] = tdf['DURATION'].astype('int')\n tdf['HEART_RATE_MIN'] = tdf['HEART_RATE_MIN'].astype('int')\n tdf['HEART_RATE_MAX'] = tdf['HEART_RATE_MAX'].astype('int')\n tdf['HEART_RATE_MEAN'] = tdf['HEART_RATE_MEAN'].astype('int')\n tdf['CADENCE_MIN'] = tdf['CADENCE_MIN'].astype('float')\n tdf['CADENCE_MAX'] = tdf['CADENCE_MAX'].astype('float')\n tdf['CADENCE_MEAN'] = tdf['CADENCE_MEAN'].astype('float')\n tdf['POWER_MIN'] = tdf['POWER_MIN'].astype('float')\n tdf['POWER_MAX'] = tdf['POWER_MAX'].astype('float')\n tdf['POWER_MEAN'] = tdf['POWER_MEAN'].astype('float')\n\n # sort dataframe according to target table\n tdf = tdf[['TRAINING_ID','DATE','SPORT_TYPE','TRAINING_NO','TRAINING_TYPE','DISTANCE','DURATION',\\\n 'TIMESTAMP_START','TIMESTAMP_END','POWER_MIN','POWER_MAX','POWER_MEAN','HEART_RATE_MIN','HEART_RATE_MAX', \\\n 'HEART_RATE_MEAN','CADENCE_MIN','CADENCE_MAX','CADENCE_MEAN','TEMPERATURE']]\n \n log('Length of df: {}'.format(len(tdf)))\n \n att = msg.attributes\n att[\"table\"] = {\"columns\":[\n {\"class\":str(tdf[tdf.columns[0]].dtype),\"tdf_name\":tdf.columns[0],\"name\":\"TRAINING_ID\",\"nullable\":False,\"type\":{\"hana\":\"BIGINT\"}},\n {\"class\":str(tdf[tdf.columns[1]].dtype),\"tdf_name\":tdf.columns[1],\"name\":\"DATE\",\"nullable\":True,\"type\":{\"hana\":\"DAYDATE\"}},\n {\"class\":str(tdf[tdf.columns[2]].dtype),\"tdf_name\":tdf.columns[2],\"name\":\"SPORT_TYPE\",\"nullable\":False,\"size\":25,\"type\":{\"hana\":\"NVARCHAR\"}},\n {\"class\":str(tdf[tdf.columns[3]].dtype),\"tdf_name\":tdf.columns[3],\"name\":\"TRAINING_NO\",\"nullable\":True,\"type\":{\"hana\":\"INTEGER\"}},\n {\"class\":str(tdf[tdf.columns[4]].dtype),\"tdf_name\":tdf.columns[4],\"name\":\"TRAINING_TYPE\",\"nullable\":True,\"size\":25,\"type\":{\"hana\":\"NVARCHAR\"}},\n {\"class\":str(tdf[tdf.columns[5]].dtype),\"tdf_name\":tdf.columns[5],\"name\":\"DISTANCE\",\"nullable\":True,\"type\":{\"hana\":\"DOUBLE\"}},\n {\"class\":str(tdf[tdf.columns[6]].dtype),\"tdf_name\":tdf.columns[6],\"name\":\"DURATION\",\"nullable\":True,\"type\":{\"hana\":\"INTEGER\"}},\n {\"class\":str(tdf[tdf.columns[7]].dtype),\"tdf_name\":tdf.columns[7],\"name\":\"TIMESTAMP_START\",\"nullable\":True,\"type\":{\"hana\":\"LONGDATE\"}},\n {\"class\":str(tdf[tdf.columns[8]].dtype),\"tdf_name\":tdf.columns[8],\"name\":\"TIMESTAMP_END\",\"nullable\":True,\"type\":{\"hana\":\"LONGDATE\"}},\n {\"class\":str(tdf[tdf.columns[9]].dtype),\"tdf_name\":tdf.columns[9],\"name\":\"POWER_MIN\",\"nullable\":True,\"type\":{\"hana\":\"DOUBLE\"}},\n {\"class\":str(tdf[tdf.columns[10]].dtype),\"tdf_name\":tdf.columns[10],\"name\":\"POWER_MAX\",\"nullable\":True,\"type\":{\"hana\":\"DOUBLE\"}},\n {\"class\":str(tdf[tdf.columns[11]].dtype),\"tdf_name\":tdf.columns[11],\"name\":\"POWER_MEAN\",\"nullable\":True,\"type\":{\"hana\":\"DOUBLE\"}},\n {\"class\":str(tdf[tdf.columns[12]].dtype),\"tdf_name\":tdf.columns[12],\"name\":\"HEARTRATE_MIN\",\"nullable\":True,\"type\":{\"hana\":\"INTEGER\"}},\n {\"class\":str(tdf[tdf.columns[13]].dtype),\"tdf_name\":tdf.columns[13],\"name\":\"HEARTRATE_MAX\",\"nullable\":True,\"type\":{\"hana\":\"INTEGER\"}},\n {\"class\":str(tdf[tdf.columns[14]].dtype),\"tdf_name\":tdf.columns[14],\"name\":\"HEARTRATE_MEAN\",\"nullable\":True,\"type\":{\"hana\":\"INTEGER\"}},\n {\"class\":str(tdf[tdf.columns[15]].dtype),\"tdf_name\":tdf.columns[15],\"name\":\"CADENCE_MIN\",\"nullable\":True,\"type\":{\"hana\":\"DOUBLE\"}},\n {\"class\":str(tdf[tdf.columns[16]].dtype),\"tdf_name\":tdf.columns[16],\"name\":\"CADENCE_MAX\",\"nullable\":True,\"type\":{\"hana\":\"DOUBLE\"}},\n {\"class\":str(tdf[tdf.columns[17]].dtype),\"tdf_name\":tdf.columns[17],\"name\":\"CADENCE_MEAN\",\"nullable\":True,\"type\":{\"hana\":\"DOUBLE\"}},\n {\"class\":str(tdf[tdf.columns[18]].dtype),\"tdf_name\":tdf.columns[18],\"name\":\"TEMPERATURE\",\"nullable\":True,\"type\":{\"hana\":\"DOUBLE\"}}],\"name\":\"TRAINING_SUMMARY\",\"version\":1}\n\n\n data = tdf.values.tolist()\n api.send(\"output\", api.Message(attributes = att,body = data))\n \n\n\napi.set_port_callback(\"input\", on_input)\n\n"
] | [
[
"pandas.to_datetime",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
janlight/gsoc-wav2vec2 | [
"4d241553137ba0c3ac5acb4670c5653512b17854"
] | [
"tests/test_wav2vec2.py"
] | [
"import unittest\nfrom functools import partial\n\nimport tensorflow as tf\n\nimport numpy as np\nimport tensorflow_hub as hub\nfrom convert_torch_to_tf import get_tf_pretrained_model\nfrom utils import is_torch_available, is_transformers_available, requires_lib\nfrom wav2vec2 import CTCLoss, Wav2Vec2Config, Wav2Vec2ForCTC, Wav2Vec2Model, Wav2Vec2Processor\nfrom wav2vec2.tensorflow_addons import Conv1DWithWeightNorm\n\n\nif is_torch_available():\n import torch\n import torch.nn as nn\n\nif is_transformers_available():\n from transformers import (\n Wav2Vec2CTCTokenizer as HFWav2Vec2CTCTokenizer,\n Wav2Vec2FeatureExtractor as HFWav2Vec2FeatureExtractor,\n Wav2Vec2ForCTC as HFWav2Vec2ForCTC,\n Wav2Vec2Model as HFWav2Vec2Model\n )\n\nMODEL_ID = \"vasudevgupta/gsoc-wav2vec2-960h\"\nHF_MODEL_ID = \"facebook/wav2vec2-base-960h\"\nHF_MODEL_IDS = [\"facebook/wav2vec2-base-960h\", \"facebook/wav2vec2-base\"]\nSEED = 0\n\n\nclass Wav2Vec2Tester(unittest.TestCase):\n def _get_batches(self):\n batch, _ = tf.audio.decode_wav(tf.io.read_file(\"data/sample.wav\"))\n batch = tf.transpose(batch, perm=(1, 0))\n\n tf.random.set_seed(SEED)\n batch = tf.concat([batch, tf.random.normal(batch.shape)], axis=0)\n hf_batch = torch.from_numpy(batch.numpy()).float()\n\n np.random.seed(SEED)\n labels = np.random.randint(1, 30, size=(2, 24))\n tf_labels = tf.convert_to_tensor(labels, dtype=tf.int32)\n hf_labels = torch.from_numpy(labels).long()\n return batch, hf_batch, tf_labels, hf_labels\n\n @partial(requires_lib, lib=[\"torch\", \"transformers\"])\n def _test_inference(self, model_id, hf_model_id, test_graph_mode=False):\n @tf.function(autograph=True, jit_compile=True)\n def tf_forward(*args, **kwargs):\n return tf_model(*args, **kwargs)\n\n batch, hf_batch, _, _ = self._get_batches()\n\n tf_model = Wav2Vec2Model.from_pretrained(model_id, input_shape=batch.shape)\n hf_model = HFWav2Vec2Model.from_pretrained(hf_model_id)\n\n if tf_model.config.is_robust:\n attention_mask = np.ones(batch.shape, dtype=np.int32)\n attention_mask[0, -1000:] = attention_mask[1, -132:] = 0\n hf_attention_mask = torch.from_numpy(attention_mask)\n attention_mask = tf.convert_to_tensor(attention_mask)\n else:\n attention_mask = hf_attention_mask = None\n\n if test_graph_mode:\n tf_out = tf_forward(batch, attention_mask=attention_mask, training=False)\n else:\n tf_out = tf_model(batch, attention_mask=attention_mask, training=False)\n with torch.no_grad():\n hf_out = hf_model(hf_batch, attention_mask=hf_attention_mask)\n\n tf_logits = tf_out.numpy()\n hf_logits = hf_out[\"last_hidden_state\"].numpy()\n\n assert tf_logits.shape == hf_logits.shape, \"Oops, logits shape is not matching\"\n assert np.allclose(\n hf_logits, tf_logits, atol=1e-3\n ), f\"difference: {np.max(hf_logits - tf_logits)}\"\n\n def test_inference(self):\n model_id, hf_model_id = \"vasudevgupta/gsoc-wav2vec2\", \"facebook/wav2vec2-base\"\n self._test_inference(model_id, hf_model_id, test_graph_mode=False)\n\n def test_wav2vec2_robust(self):\n model_id, hf_model_id = \"vasudevgupta/gsoc-wav2vec2-robust\", \"facebook/wav2vec2-large-robust\"\n self._test_inference(model_id, hf_model_id, test_graph_mode=False)\n\n def test_wav2vec2_xlsr(self):\n model_id, hf_model_id = \"vasudevgupta/gsoc-wav2vec2-xlsr-53\", \"facebook/wav2vec2-large-xlsr-53\"\n self._test_inference(model_id, hf_model_id, test_graph_mode=False)\n\n def test_jit_and_graph_mode(self):\n model_id, hf_model_id = \"vasudevgupta/gsoc-wav2vec2\", \"facebook/wav2vec2-base\"\n self._test_inference(model_id, hf_model_id, test_graph_mode=True)\n\n @partial(requires_lib, lib=[\"transformers\"])\n def test_feature_extractor(self):\n batch, hf_batch, _, _ = self._get_batches()\n tf_processor = Wav2Vec2Processor(is_tokenizer=False)\n hf_processor = HFWav2Vec2FeatureExtractor.from_pretrained(HF_MODEL_ID)\n\n tf_out = tf_processor(batch)\n hf_out = hf_processor(hf_batch.numpy().tolist())[\"input_values\"]\n assert np.allclose(\n tf_out, hf_out, atol=0.01\n ), f\"difference:, {np.max(hf_out - tf_out)}\"\n\n def test_end2end(self):\n model_id = \"vasudevgupta/gsoc-wav2vec2-960h\"\n hf_model_id = \"facebook/wav2vec2-base-960h\"\n self._test_end2end(model_id, hf_model_id)\n\n @partial(requires_lib, lib=[\"torch\", \"transformers\"])\n def _test_end2end(self, model_id, hf_model_id):\n # data loading\n b1 = tf.transpose(\n tf.audio.decode_wav(tf.io.read_file(\"data/sample.wav\"))[0], perm=(1, 0)\n )\n b2 = tf.transpose(\n tf.audio.decode_wav(tf.io.read_file(\"data/SA2.wav\"))[0], perm=(1, 0)\n )\n batch = tf.concat([b1[:, :40000], b2[:, :40000]], axis=0)\n\n # data processing\n tf_processor = Wav2Vec2Processor(is_tokenizer=False)\n hf_processor = HFWav2Vec2FeatureExtractor.from_pretrained(hf_model_id)\n\n hf_batch = hf_processor(batch.numpy().tolist())[\"input_values\"]\n hf_batch = torch.tensor(hf_batch, dtype=torch.float)\n batch = tf_processor(batch)\n\n assert batch.shape == hf_batch.shape\n assert np.allclose(\n batch, hf_batch, atol=1e-5\n ), f\"difference:, {np.max(batch - hf_batch)}\"\n\n # model inference\n tf_model = Wav2Vec2ForCTC.from_pretrained(model_id, input_shape=batch.shape)\n hf_model = HFWav2Vec2ForCTC.from_pretrained(hf_model_id)\n\n if tf_model.config.is_robust:\n attention_mask = tf.ones(batch.shape)\n hf_attention_mask = torch.tensor(attention_mask.numpy())\n else:\n attention_mask = hf_attention_mask = None\n\n tf_out = tf_model(batch, attention_mask=attention_mask, training=False)\n with torch.no_grad():\n hf_out = hf_model(hf_batch, attention_mask=hf_attention_mask)[\"logits\"]\n tf_out = tf_out.numpy()\n hf_out = hf_out.numpy()\n\n assert tf_out.shape == hf_out.shape\n assert np.allclose(\n hf_out, tf_out, atol=0.004\n ), f\"difference:, {np.max(hf_out - tf_out)}\"\n\n # decoding\n tf_tokenizer = Wav2Vec2Processor(\n is_tokenizer=True, vocab_path=\"data/vocab.json\"\n )\n hf_tokenizer = HFWav2Vec2CTCTokenizer.from_pretrained(hf_model_id)\n\n tf_out = np.argmax(tf_out, axis=-1).squeeze()\n hf_out = np.argmax(hf_out, axis=-1).squeeze()\n\n tf_pred = [tf_tokenizer.decode(output) for output in tf_out.tolist()]\n hf_pred = hf_tokenizer.batch_decode(hf_out)\n assert tf_pred == hf_pred, f\"{tf_pred} VS {hf_pred}\"\n\n @partial(requires_lib, lib=[\"transformers\", \"torch\"])\n def test_conversion_script(self):\n for hf_model_id in HF_MODEL_IDS:\n config = Wav2Vec2Config()\n tf_model, hf_model = get_tf_pretrained_model(\n config,\n hf_model_id,\n verbose=False,\n with_lm_head=True,\n )\n batch, hf_batch, _, _ = self._get_batches()\n tf_logits = tf_model(batch).numpy()\n with torch.no_grad():\n hf_logits = hf_model(hf_batch, return_dict=False)\n hf_logits = hf_logits[0].numpy()\n assert np.allclose(\n hf_logits, tf_logits, atol=0.004\n ), f\"difference: {np.max(hf_logits - tf_logits)}\"\n\n @partial(requires_lib, lib=[\"torch\", \"transformers\"])\n def test_loss_autograph(self):\n \"\"\"\n This is very important test and shows how model forward pass should be written.\n\n Note:\n 1. `Wav2Vec2ForCTC.call()` & `CTCLoss.__call__` both works in eager mode.\n 2. In graph mode, `Wav2Vec2ForCTC.call()` doesn't work with `jit_compile=False` while it works when `jit_compile=True`.\n 3. In graph mode, `CTCLoss.__call__` doesn't work with `jit_compile=True` while it works when `jit_compile=False`.\n \"\"\"\n\n @tf.function(jit_compile=True)\n def tf_forward(batch):\n return tf_model(batch, training=False)\n\n @tf.function\n def compute_loss(batch, labels):\n batch = tf_forward(batch)\n loss = loss_fn(labels, batch)\n return loss, batch\n\n batch, hf_batch, tf_labels, hf_labels = self._get_batches()\n\n tf_model = Wav2Vec2ForCTC.from_pretrained(MODEL_ID, input_shape=batch.shape)\n loss_fn = CTCLoss(tf_model.config, batch.shape)\n\n hf_model = HFWav2Vec2ForCTC.from_pretrained(HF_MODEL_ID)\n\n tf_loss, tf_logits = compute_loss(batch, labels=tf_labels)\n with torch.no_grad():\n hf_out = hf_model(hf_batch, labels=hf_labels)\n\n hf_loss = hf_out[\"loss\"].numpy()\n tf_loss = tf_loss.numpy()\n\n assert (\n tf_logits.shape == hf_out[\"logits\"].shape\n ), \"Oops, logits shape is not matching\"\n\n logits_difference = np.max(hf_out[\"logits\"].numpy() - tf_logits.numpy())\n assert np.allclose(\n hf_out[\"logits\"].numpy(), tf_logits.numpy(), atol=0.004\n ), f\"difference: {logits_difference}\"\n\n assert np.allclose(\n tf_loss, hf_loss, atol=1e-3\n ), f\"difference: {np.max(tf_loss - hf_loss)}\"\n\n @partial(requires_lib, lib=[\"torch\"])\n def test_conv_weight_norm(self):\n bsz = 2\n seqlen = 128\n c_in = 32\n filters = 16\n kernal_size = 3\n padding = 1\n num_groups = 2\n\n np.random.seed(SEED)\n array = np.random.uniform(size=(bsz, seqlen, c_in))\n tf_tensor = tf.convert_to_tensor(array, dtype=tf.float32)\n\n # `nn.Conv1d` accepts (batch_size, channels, seqlen)\n torch_tensor = torch.tensor(array, dtype=torch.float32).transpose(2, 1)\n\n tf_layer = Conv1DWithWeightNorm(\n filters, kernal_size, padding=padding, groups=num_groups\n )\n tf_layer(tf_tensor) # build tensorflow weights\n\n torch_layer = nn.Conv1d(\n c_in, filters, kernal_size, padding=padding, groups=num_groups\n )\n torch_layer = nn.utils.weight_norm(torch_layer, dim=2)\n\n # torch & tensorflow weights should be equal\n torch_layer.weight_v.data = torch.tensor(\n np.transpose(tf_layer.variables[1].numpy(), axes=(2, 1, 0))\n )\n torch_layer.bias.data = torch.tensor(tf_layer.variables[0].numpy())\n torch_layer.weight_g.data = torch.tensor(\n np.transpose(tf_layer.variables[2].numpy(), axes=(2, 1, 0))\n )\n\n # forward pass\n with torch.no_grad():\n torch_out = torch_layer(torch_tensor).transpose(2, 1).numpy()\n tf_out = tf_layer(tf_tensor).numpy()\n\n assert np.allclose(\n torch_out, tf_out, atol=1e-4\n ), f\"Difference: {torch_out} vs {tf_out}\"\n\n\nclass TFhubTester(unittest.TestCase):\n def _get_batch(self):\n tf.random.set_seed(SEED)\n batch = tf.random.normal((2, 246000))\n attention_mask = np.ones(batch.shape, dtype=np.float32)\n attention_mask[0, -1000:] = attention_mask[1, -132:] = 0.0\n attention_mask = tf.constant(attention_mask, dtype=tf.float32)\n return batch, attention_mask\n\n def _test_hub_model(self, hub_id, tf_model):\n batch, _ = self._get_batch()\n tfhub_model = hub.KerasLayer(hub_id, trainable=False)\n tfhub_out = tf.function(tfhub_model, jit_compile=True)(batch).numpy()\n out = tf_model(batch).numpy()\n assert np.allclose(tfhub_out, out, atol=1e-3), f\"Difference: {tfhub_out} vs {out}\"\n\n def _test_hub_robust_model(self, hub_id, tf_model):\n batch, attention_mask = self._get_batch()\n tfhub_model = hub.KerasLayer(hub_id, trainable=False)\n tfhub_out = tf.function(tfhub_model, jit_compile=True)((batch, attention_mask)).numpy()\n out = tf_model(batch, attention_mask=attention_mask).numpy()\n assert np.allclose(tfhub_out, out, atol=1e-2), f\"Difference: {tfhub_out} vs {out}\"\n\n def test_wav2vec2_base(self):\n hub_id = \"https://tfhub.dev/vasudevgupta7/wav2vec2/1\"\n tf_model = Wav2Vec2Model.from_pretrained(\"vasudevgupta/gsoc-wav2vec2\")\n self._test_hub_model(hub_id, tf_model)\n\n def test_wav2vec2_base_960h(self):\n hub_id = \"https://tfhub.dev/vasudevgupta7/wav2vec2-960h/1\"\n tf_model = Wav2Vec2ForCTC.from_pretrained(\"vasudevgupta/gsoc-wav2vec2-960h\")\n self._test_hub_model(hub_id, tf_model)\n\n def test_wav2vec2_xlsr_53(self):\n hub_id = \"src/wav2vec2_xlsr_53\" # \"https://tfhub.dev/vasudevgupta7/wav2vec2-xlsr-53/1\"\n tf_model = Wav2Vec2Model.from_pretrained(\"vasudevgupta/gsoc-wav2vec2-xlsr-53\")\n self._test_hub_robust_model(hub_id, tf_model)\n\n def test_wav2vec2_robust(self):\n hub_id = \"src/wav2vec2_robust\" # \"https://tfhub.dev/vasudevgupta7/wav2vec2-robust/1\"\n tf_model = Wav2Vec2Model.from_pretrained(\"vasudevgupta/gsoc-wav2vec2-robust\")\n self._test_hub_robust_model(hub_id, tf_model)\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.concat",
"numpy.max",
"torch.no_grad",
"tensorflow.random.set_seed",
"numpy.random.randint",
"numpy.allclose",
"torch.from_numpy",
"torch.tensor",
"numpy.argmax",
"tensorflow.function",
"torch.nn.Conv1d",
"tensorflow.transpose",
"tensorflow.constant",
"numpy.random.seed",
"torch.nn.utils.weight_norm",
"tensorflow.ones",
"numpy.ones",
"tensorflow.io.read_file",
"numpy.random.uniform",
"tensorflow.random.normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SivaK18/MachineLearning | [
"299b5ee1f57969885f60f8d1c6461a3e97c8873b"
] | [
"logic.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jan 30 09:19:01 2019\r\n\r\n@author: sivak\r\n\"\"\"\r\n\r\nimport numpy as np\r\ninputs = np.array([[0,0],[0,1],[1,0],[1,1]])\r\n# AND data\r\nANDtargets = np.array([[0],[0],[0],[1]])\r\n# OR data\r\nORtargets = np.array([[0],[1],[1],[1]])\r\n# XOR data\r\nXORtargets = np.array([[0],[1],[1],[0]])\r\nimport pcn as pcn_logic_eg\r\n\r\nprint (\"AND logic function\")\r\npAND = pcn_logic_eg.pcn(inputs,ANDtargets)\r\npAND.pcntrain(inputs,ANDtargets,0.25,6)\r\n\r\nprint (\"OR logic function\")\r\npOR = pcn_logic_eg.pcn(inputs,ORtargets)\r\npOR.pcntrain(inputs,ORtargets,0.25,6)\r\n\r\nprint (\"XOR logic function\")\r\npXOR = pcn_logic_eg.pcn(inputs,XORtargets)\r\npXOR.pcntrain(inputs,XORtargets,0.25,6)"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
deargen/DearCascadedWx | [
"86d4ec252b788af67ca84c665d293f426b64f826"
] | [
"src/models/ffnnrf.py"
] | [
"from models.neuralnet import SurvivalNeuralNet\n# from models.feedforwardnet import SurvivalFeedForwardNet\nfrom keras.models import Model\nfrom keras.layers import Input, Dense, Dropout\nfrom keras.regularizers import L1L2\nimport numpy as np\nimport pandas as pd\nimport _pickle as cPickle\nfrom keras.utils import to_categorical\nfrom wx_hyperparam import WxHyperParameter\nfrom wx_core import DoFeatureSelectionWX\nfrom sklearn.utils import shuffle\n#from lifelines import CoxPHFitter, KaplanMeierFitter\nfrom sklearn.metrics import roc_auc_score\n# from sklearn.feature_selection import VarianceThreshold\n#from sklearn import svm\n#import xgboost as xgb\nfrom sklearn.ensemble import RandomForestClassifier\nimport os\nimport models.utils as helper\n\nclass SurvivalFFNNRF(SurvivalNeuralNet):\n def __init__(self, model_name, cancer, omics_type, out_folder, epochs=1000, vecdim=10):\n super(SurvivalFFNNRF, self).__init__(model_name, cancer, omics_type, out_folder, epochs)\n self.vecdim = vecdim\n self.selected_idx = None\n self.random_seed = 1\n self.cancer_type = cancer\n self.omics_type = omics_type\n self.out_folder = out_folder\n\n def feature_selection(self, x, c, s, xnames, fold, sel_f_num, dev_index): \n def get_sel_idx(high_th_year, low_th_year, feature_list, sel_feature_num):\n high_risk_th = high_th_year*365\n low_risk_th = low_th_year*365\n high_risk_group, low_risk_group = helper.get_risk_group(x,c,s,high_risk_th,low_risk_th)\n trn_x, trn_y = helper.get_train(high_risk_group, low_risk_group, is_categori_y=False, seed=self.random_seed)#without validation set\n\n clf = RandomForestClassifier()\n clf.fit(trn_x,trn_y)\n f_scores = clf.feature_importances_\n coef_idx_sort = np.argsort(f_scores)[::-1]\n\n return coef_idx_sort[:sel_feature_num]\n\n def get_cascaded_sel_idx(high_th_year, low_th_year, feature_list, set_feature, sel_feature_num, div_ratio = 4):\n high_risk_th = high_th_year*365\n low_risk_th = low_th_year*365\n high_risk_group, low_risk_group = helper.get_risk_group(x,c,s,high_risk_th,low_risk_th)\n #trn_x, trn_y, val_x, val_y = get_train_val(high_risk_group, low_risk_group)\n trn_x, trn_y = helper.get_train(high_risk_group, low_risk_group, is_categori_y=False, seed=self.random_seed)#without validation set\n if len(set_feature):\n trn_x = trn_x[:,set_feature]\n #val_x = val_x[:,set_feature]\n feature_num = trn_x.shape[1]\n\n if sel_feature_num == 0:\n sel_gene_num = int(max(sel_feature_num, feature_num/div_ratio))\n else:\n sel_gene_num = sel_feature_num\n\n clf = RandomForestClassifier()\n clf.fit(trn_x,trn_y)\n f_scores = clf.feature_importances_\n coef_idx_sort = np.argsort(f_scores)[::-1] \n sel_idx = coef_idx_sort[:sel_gene_num]\n\n return sel_idx \n\n save_feature_file = self.out_folder+'/FFNNRF/selected_features_'+self.cancer_type+'_'+self.omics_type+'_'+str(fold)+'.csv'\n\n if os.path.isfile(save_feature_file):\n df = pd.read_csv(save_feature_file)\n sort_index = df['index'].values\n final_sel_idx = sort_index[:sel_f_num]\n else:\n \n if self.out_folder.split('_')[1] == 'cas':\n if self.omics_type == 'mrna':\n if self.cancer_type == 'BRCA':\n div_ratio = 2\n else:\n div_ratio = 4\n\n step1_sel_idx = get_cascaded_sel_idx(3,3,xnames,[], 0, div_ratio = div_ratio)\n step2_sel_idx = get_cascaded_sel_idx(2,4,step1_sel_idx,step1_sel_idx, 0, div_ratio = div_ratio)\n sel_f_num_write = len(step2_sel_idx)\n step3_sel_idx = get_cascaded_sel_idx(1,5,step2_sel_idx,step1_sel_idx[step2_sel_idx], sel_f_num_write, div_ratio = div_ratio)\n final_sel_idx = step1_sel_idx[step2_sel_idx[step3_sel_idx]] \n else:\n sel_f_num_write = len(xnames)\n final_sel_idx = get_sel_idx(3,3,xnames,sel_f_num_write)\n\n with open(save_feature_file,'wt') as wFile:\n wFile.writelines(\"gene,coef,index\\n\")\n for n,name in enumerate(xnames[final_sel_idx]):\n wFile.writelines(str(name.split('|')[0])+','+str(sel_f_num_write - n)+','+str(final_sel_idx[n])+'\\n')\n \n final_sel_idx = final_sel_idx[:sel_f_num]\n\n return final_sel_idx \n\n def get_model(self, input_size, dropout):\n input_dim = input_size\n # reg = L1L2(l1=1.0, l2=0.5)\n reg = None\n inputs = Input((input_dim,))\n if dropout == 0.0:\n z = inputs#without dropout\n else:\n z = Dropout(dropout)(inputs)\n outputs = Dense(1, kernel_initializer='zeros', bias_initializer='zeros',\n kernel_regularizer=reg,\n activity_regularizer=reg,\n bias_regularizer=reg)(z)\n model = Model(inputs=inputs, outputs=outputs)\n # model.summary()\n return model\n\n def preprocess_eval(self, x):\n x_new = x[:,self.sel_idx]\n return x_new\n\n def preprocess(self, x, c, s, xnames, fold, n_sel, dev_index):\n sel_idx = self.feature_selection(x, c, s, xnames, fold, n_sel, dev_index)\n self.sel_idx = sel_idx\n x_new = x[:,sel_idx]\n return x_new"
] | [
[
"numpy.argsort",
"pandas.read_csv",
"sklearn.ensemble.RandomForestClassifier"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
OmranKaddah/Disentangled-Representation-Learning | [
"cd6a2a8bf643532df6e134bafd893476d50bcc78"
] | [
"source_code/lib/dist.py"
] | [
"import math\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom lib.functions import STHeaviside\n\neps = 1e-8\n\n\nclass Normal(nn.Module):\n \"\"\"Samples from a Normal distribution using the reparameterization trick.\n \"\"\"\n\n def __init__(self, mu=0, sigma=1):\n super(Normal, self).__init__()\n self.normalization = torch.Tensor([np.log(2 * np.pi)]).requires_grad_()\n\n self.mu = torch.Tensor([mu]).requires_grad_()\n self.logsigma = torch.Tensor([math.log(sigma)]).requires_grad_()\n\n def _check_inputs(self, size, mu_logsigma):\n if size is None and mu_logsigma is None:\n raise ValueError(\n 'Either one of size or params should be provided.')\n elif size is not None and mu_logsigma is not None:\n mu = mu_logsigma.select(-1, 0).expand(size)\n logsigma = mu_logsigma.select(-1, 1).expand(size)\n return mu, logsigma\n elif size is not None:\n mu = self.mu.expand(size)\n logsigma = self.logsigma.expand(size)\n return mu, logsigma\n elif mu_logsigma is not None:\n mu = mu_logsigma.select(-1, 0)\n logsigma = mu_logsigma.select(-1, 1)\n return mu, logsigma\n else:\n raise ValueError(\n 'Given invalid inputs: size={}, mu_logsigma={})'.format(\n size, mu_logsigma))\n\n def sample(self, size=None, params=None):\n mu, logsigma = self._check_inputs(size, params)\n std_z = torch.randn_like(mu)\n sample = std_z * torch.exp(logsigma) + mu\n return sample\n\n def log_density(self, sample, params=None):\n if params is not None:\n mu, logsigma = self._check_inputs(None, params)\n else:\n mu, logsigma = self._check_inputs(sample.size(), None)\n mu = mu.type_as(sample)\n logsigma = logsigma.type_as(sample)\n\n c = self.normalization.type_as(sample.data)\n inv_sigma = torch.exp(-logsigma)\n tmp = (sample - mu) * inv_sigma\n return -0.5 * (tmp * tmp + 2 * logsigma + c)\n\n def NLL(self, params, sample_params=None):\n \"\"\"Analytically computes\n E_N(mu_2,sigma_2^2) [ - log N(mu_1, sigma_1^2) ]\n If mu_2, and sigma_2^2 are not provided, defaults to entropy.\n \"\"\"\n mu, logsigma = self._check_inputs(None, params)\n if sample_params is not None:\n sample_mu, sample_logsigma = self._check_inputs(None, sample_params)\n else:\n sample_mu, sample_logsigma = mu, logsigma\n\n c = self.normalization.type_as(sample_mu.data)\n nll = logsigma.mul(-2).exp() * (sample_mu - mu).pow(2) \\\n + torch.exp(sample_logsigma.mul(2) - logsigma.mul(2)) + 2 * logsigma + c\n return nll.mul(0.5)\n\n def kld(self, params):\n \"\"\"Computes KL(q||p) where q is the given distribution and p\n is the standard Normal distribution.\n \"\"\"\n mu, logsigma = self._check_inputs(None, params)\n # see Appendix B from VAE paper:\n # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014\n # https://arxiv.org/abs/1312.6114\n # 0.5 * sum(1 + log(sigma^2) - mean^2 - sigma^2)\n kld = logsigma.mul(2).add(1) - mu.pow(2) - logsigma.exp().pow(2)\n kld.mul_(-0.5)\n return kld\n\n def get_params(self):\n return torch.cat([self.mu, self.logsigma])\n\n @property\n def nparams(self):\n return 2\n\n @property\n def ndim(self):\n return 1\n\n @property\n def is_reparameterizable(self):\n return True\n\n def __repr__(self):\n tmpstr = self.__class__.__name__ + ' ({:.3f}, {:.3f})'.format(\n self.mu.data[0], self.logsigma.exp().data[0])\n return tmpstr\n\n\nclass Laplace(nn.Module):\n \"\"\"Samples from a Laplace distribution using the reparameterization trick.\n \"\"\"\n\n def __init__(self, mu=0, scale=1):\n super(Laplace, self).__init__()\n self.normalization = torch.Tensor([-math.log(2)]).requires_grad_()\n\n self.mu = torch.Tensor([mu]).requires_grad_()\n self.logscale = torch.Tensor([math.log(scale)]).requires_grad_()\n\n def _check_inputs(self, size, mu_logscale):\n if size is None and mu_logscale is None:\n raise ValueError(\n 'Either one of size or params should be provided.')\n elif size is not None and mu_logscale is not None:\n mu = mu_logscale.select(-1, 0).expand(size)\n logscale = mu_logscale.select(-1, 1).expand(size)\n return mu, logscale\n elif size is not None:\n mu = self.mu.expand(size)\n logscale = self.logscale.expand(size)\n return mu, logscale\n elif mu_logscale is not None:\n mu = mu_logscale.select(-1, 0)\n logscale = mu_logscale.select(-1, 1)\n return mu, logscale\n else:\n raise ValueError(\n 'Given invalid inputs: size={}, mu_logscale={})'.format(\n size, mu_logscale))\n\n def sample(self, size=None, params=None):\n mu, logscale = self._check_inputs(size, params)\n scale = torch.exp(logscale)\n # Unif(-0.5, 0.5)\n u = torch.rand_like(mu) - 0.5\n sample = mu - scale * torch.sign(u) * torch.log(1 - 2 * torch.abs(u) + eps)\n return sample\n\n def log_density(self, sample, params=None):\n if params is not None:\n mu, logscale = self._check_inputs(None, params)\n else:\n mu, logscale = self._check_inputs(sample.size(), None)\n mu = mu.type_as(sample)\n logscale = logscale.type_as(sample)\n\n c = self.normalization.type_as(sample.data)\n inv_scale = torch.exp(-logscale)\n ins_exp = - torch.abs(sample - mu) * inv_scale\n return ins_exp + c - logscale\n\n def get_params(self):\n return torch.cat([self.mu, self.logscale])\n\n @property\n def nparams(self):\n return 2\n\n @property\n def ndim(self):\n return 1\n\n @property\n def is_reparameterizable(self):\n return True\n\n def __repr__(self):\n tmpstr = self.__class__.__name__ + ' ({:.3f}, {:.3f})'.format(\n self.mu.data[0], self.logscale.exp().data[0])\n return tmpstr\n\nclass Gumbel_Softmax(nn.Module):\n \"\"\"\n CATEGORICAL REPARAMETERIZATION WITH GUMBEL-SOFTMAX\n Jang et al, 2017\n \"\"\"\n @staticmethod\n def sample( alpha, temperature, training =True):\n \"\"\"\n Samples from a gumbel-softmax distribution using the reparameterization\n trick.\n Parameters\n ----------\n alpha : torch.Tensor\n Parameters of the gumbel-softmax distribution. Shape (N, D)\n \"\"\"\n EPS = 1e-12\n if training:\n # Sample from gumbel distribution\n unif = torch.rand(alpha.size()).cuda()\n \n\n gumbel = -torch.log(-torch.log(unif + EPS) + EPS)\n # Reparameterize to create gumbel softmax sample\n log_alpha = torch.log(alpha + EPS)\n logit = (log_alpha + gumbel) / temperature\n return F.softmax(logit, dim=1)\n else:\n # In reconstruction mode, pick most likely sample\n _, max_alpha = torch.max(alpha, dim=1)\n one_hot_samples = torch.zeros(alpha.size())\n # On axis 1 of one_hot_samples, scatter the value 1 at indices\n # max_alpha. Note the view is because scatter_ only accepts 2D\n # tensors.\n one_hot_samples.scatter_(1, max_alpha.view(-1, 1).data.cpu(), 1)\n \n one_hot_samples = one_hot_samples.cuda()\n return one_hot_samples\n @staticmethod\n def log_density(pis, samples, temperature):\n \"\"\"\n Parameters:\n alphas(pis): list of scalars of type float torch.tensors\n the list should sum up to one. In the original paper (Jang et al, 2017)\n they are refered to as pi.\n \"\"\"\n k = pis.shape[-1]\n consts = math.log(math.gamma(k)* temperature**(k-1))\n logsum_probs_sampels = -k * torch.log(pis.div(samples.pow(temperature)).sum(-1))\n logmulti_probs_sampels = pis.div(samples.pow(temperature + 1)).log().sum(-1)\n return consts + logsum_probs_sampels + logmulti_probs_sampels\n \nclass Bernoulli(nn.Module):\n \"\"\"Samples from a Bernoulli distribution where the probability is given\n by the sigmoid of the given parameter.\n \"\"\"\n\n def __init__(self, p=0.5, stgradient=False):\n super(Bernoulli, self).__init__()\n p = torch.Tensor([p])\n self.p = torch.log(p / (1 - p) + eps).requires_grad_()\n self.stgradient = stgradient\n\n def _check_inputs(self, size, ps):\n if size is None and ps is None:\n raise ValueError(\n 'Either one of size or params should be provided.')\n elif size is not None and ps is not None:\n if ps.ndimension() > len(size):\n return ps.squeeze(-1).expand(size)\n else:\n return ps.expand(size)\n elif size is not None:\n return self.p.expand(size)\n elif ps is not None:\n return ps\n else:\n raise ValueError(\n 'Given invalid inputs: size={}, ps={})'.format(size, ps))\n\n def _sample_logistic(self, size):\n u = torch.rand(size).requires_grad_()\n l = torch.log(u + eps) - torch.log(1 - u + eps)\n return l\n\n def sample(self, size=None, params=None):\n presigm_ps = self._check_inputs(size, params)\n logp = F.logsigmoid(presigm_ps)\n logq = F.logsigmoid(-presigm_ps)\n l = self._sample_logistic(logp.size()).type_as(presigm_ps)\n z = logp - logq + l\n b = STHeaviside.apply(z)\n return b if self.stgradient else b.detach()\n\n def log_density(self, sample, params=None):\n presigm_ps = self._check_inputs(sample.size(), params).type_as(sample)\n p = (F.sigmoid(presigm_ps) + eps) * (1 - 2 * eps)\n logp = sample * torch.log(p + eps) + (1 - sample) * torch.log(1 - p + eps)\n return logp\n\n def get_params(self):\n return self.p\n\n @property\n def nparams(self):\n return 1\n\n @property\n def ndim(self):\n return 1\n\n @property\n def is_reparameterizable(self):\n return self.stgradient\n\n def __repr__(self):\n tmpstr = self.__class__.__name__ + ' ({:.3f})'.format(\n torch.sigmoid(self.p.data)[0])\n return tmpstr"
] | [
[
"torch.randn_like",
"torch.abs",
"torch.nn.functional.softmax",
"torch.sigmoid",
"numpy.log",
"torch.max",
"torch.Tensor",
"torch.cat",
"torch.rand_like",
"torch.sign",
"torch.nn.functional.logsigmoid",
"torch.exp",
"torch.nn.functional.sigmoid",
"torch.log",
"torch.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
p328188467/edenas | [
"82fc62528cb25a228d011f2e30f984969d012882"
] | [
"src/fashion_minst/micro_controller.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom src.controller import Controller\nfrom src.utils import get_train_ops\nfrom src.common_ops import stack_lstm\n\nfrom tensorflow.python.training import moving_averages\n\nclass MicroController(Controller):\n def __init__(self,\n search_for=\"both\",\n search_whole_channels=False,\n num_branches=6,\n num_cells=6,\n lstm_size=28,\n lstm_num_layers=2,\n lstm_keep_prob=1.0,\n tanh_constant=None,\n op_tanh_reduce=1.0,\n temperature=None,\n lr_init=1e-3,\n lr_dec_start=0,\n lr_dec_every=100,\n lr_dec_rate=0.9,\n l2_reg=0,\n entropy_weight=None,\n clip_mode=None,\n grad_bound=None,\n use_critic=False,\n bl_dec=0.999,\n optim_algo=\"adam\",\n sync_replicas=False,\n num_aggregate=None,\n num_replicas=None,\n name=\"controller\",\n **kwargs):\n\n print(\"-\" * 80)\n print(\"Building ConvController\")\n\n self.search_for = search_for\n self.search_whole_channels = search_whole_channels\n self.num_cells = num_cells\n self.num_branches = num_branches\n\n self.lstm_size = lstm_size\n self.lstm_num_layers = lstm_num_layers \n self.lstm_keep_prob = lstm_keep_prob\n self.tanh_constant = tanh_constant\n self.op_tanh_reduce = op_tanh_reduce\n self.temperature = temperature\n self.lr_init = lr_init\n self.lr_dec_start = lr_dec_start\n self.lr_dec_every = lr_dec_every\n self.lr_dec_rate = lr_dec_rate\n self.l2_reg = l2_reg\n self.entropy_weight = entropy_weight\n self.clip_mode = clip_mode\n self.grad_bound = grad_bound\n self.use_critic = use_critic\n self.bl_dec = bl_dec\n\n self.optim_algo = optim_algo\n self.sync_replicas = sync_replicas\n self.num_aggregate = num_aggregate\n self.num_replicas = num_replicas\n self.name = name\n\n self._create_params()\n arc_seq_1, entropy_1, log_prob_1, c, h = self._build_sampler(use_bias=True)\n arc_seq_2, entropy_2, log_prob_2, _, _ = self._build_sampler(prev_c=c, prev_h=h)\n self.sample_arc = (arc_seq_1, arc_seq_2)\n self.sample_entropy = entropy_1 + entropy_2\n self.sample_log_prob = log_prob_1 + log_prob_2\n\n def _create_params(self):\n initializer = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)\n with tf.variable_scope(self.name, initializer=initializer):\n with tf.variable_scope(\"lstm\"):\n self.w_lstm = []\n for layer_id in range(self.lstm_num_layers):\n with tf.variable_scope(\"layer_{}\".format(layer_id)):\n w = tf.get_variable(\"w\", [2 * self.lstm_size, 4 * self.lstm_size])\n self.w_lstm.append(w)\n\n self.g_emb = tf.get_variable(\"g_emb\", [1, self.lstm_size])\n with tf.variable_scope(\"emb\"):\n self.w_emb = tf.get_variable(\"w\", [self.num_branches, self.lstm_size])\n with tf.variable_scope(\"softmax\"):\n self.w_soft = tf.get_variable(\"w\", [self.lstm_size, self.num_branches])\n b_init = np.array([10.0, 10.0] + [0] * (self.num_branches - 2),\n dtype=np.float32)\n self.b_soft = tf.get_variable(\n \"b\", [1, self.num_branches],\n initializer=tf.constant_initializer(b_init))\n\n b_soft_no_learn = np.array(\n [0.25, 0.25] + [-0.25] * (self.num_branches - 2), dtype=np.float32)\n b_soft_no_learn = np.reshape(b_soft_no_learn, [1, self.num_branches])\n self.b_soft_no_learn = tf.constant(b_soft_no_learn, dtype=tf.float32)\n\n with tf.variable_scope(\"attention\"):\n self.w_attn_1 = tf.get_variable(\"w_1\", [self.lstm_size, self.lstm_size])\n self.w_attn_2 = tf.get_variable(\"w_2\", [self.lstm_size, self.lstm_size])\n self.v_attn = tf.get_variable(\"v\", [self.lstm_size, 1])\n\n def _build_sampler(self, prev_c=None, prev_h=None, use_bias=False):\n \"\"\"Build the sampler ops and the log_prob ops.\"\"\"\n\n print (\"-\" * 80)\n print (\"Build controller sampler\")\n\n anchors = tf.TensorArray(\n tf.float32, size=self.num_cells + 2, clear_after_read=False)\n anchors_w_1 = tf.TensorArray(\n tf.float32, size=self.num_cells + 2, clear_after_read=False)\n arc_seq = tf.TensorArray(tf.int32, size=self.num_cells * 4)\n if prev_c is None:\n assert prev_h is None, \"prev_c and prev_h must both be None\"\n prev_c = [tf.zeros([1, self.lstm_size], tf.float32)\n for _ in range(self.lstm_num_layers)]\n prev_h = [tf.zeros([1, self.lstm_size], tf.float32)\n for _ in range(self.lstm_num_layers)]\n inputs = self.g_emb\n\n for layer_id in range(2):\n next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)\n prev_c, prev_h = next_c, next_h\n anchors = anchors.write(layer_id, tf.zeros_like(next_h[-1]))\n anchors_w_1 = anchors_w_1.write(\n layer_id, tf.matmul(next_h[-1], self.w_attn_1))\n\n def _condition(layer_id, *args):\n return tf.less(layer_id, self.num_cells + 2)\n\n def _body(layer_id, inputs, prev_c, prev_h, anchors, anchors_w_1, arc_seq,\n entropy, log_prob):\n indices = tf.range(0, layer_id, dtype=tf.int32)\n start_id = 4 * (layer_id - 2)\n prev_layers = []\n for i in range(2): # index_1, index_2\n next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)\n prev_c, prev_h = next_c, next_h\n query = anchors_w_1.gather(indices)\n query = tf.reshape(query, [layer_id, self.lstm_size])\n query = tf.tanh(query + tf.matmul(next_h[-1], self.w_attn_2))\n query = tf.matmul(query, self.v_attn)\n logits = tf.reshape(query, [1, layer_id])\n if self.temperature is not None:\n logits /= self.temperature\n if self.tanh_constant is not None:\n logits = self.tanh_constant * tf.tanh(logits)\n index = tf.multinomial(logits, 1)\n index = tf.to_int32(index)\n index = tf.reshape(index, [1])\n arc_seq = arc_seq.write(start_id + 2 * i, index)\n curr_log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=index)\n log_prob += curr_log_prob\n curr_ent = tf.stop_gradient(tf.nn.softmax_cross_entropy_with_logits(\n logits=logits, labels=tf.nn.softmax(logits)))\n entropy += curr_ent\n prev_layers.append(anchors.read(tf.reduce_sum(index)))\n inputs = prev_layers[-1]\n\n for i in range(2): # op_1, op_2\n next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)\n prev_c, prev_h = next_c, next_h\n logits = tf.matmul(next_h[-1], self.w_soft) + self.b_soft\n if self.temperature is not None:\n logits /= self.temperature\n if self.tanh_constant is not None:\n op_tanh = self.tanh_constant / self.op_tanh_reduce\n logits = op_tanh * tf.tanh(logits)\n if use_bias:\n logits += self.b_soft_no_learn\n op_id = tf.multinomial(logits, 1)\n op_id = tf.to_int32(op_id)\n op_id = tf.reshape(op_id, [1])\n arc_seq = arc_seq.write(start_id + 2 * i + 1, op_id)\n curr_log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=op_id)\n log_prob += curr_log_prob\n curr_ent = tf.stop_gradient(tf.nn.softmax_cross_entropy_with_logits(\n logits=logits, labels=tf.nn.softmax(logits)))\n entropy += curr_ent\n inputs = tf.nn.embedding_lookup(self.w_emb, op_id)\n\n next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)\n anchors = anchors.write(layer_id, next_h[-1])\n anchors_w_1 = anchors_w_1.write(layer_id, tf.matmul(next_h[-1], self.w_attn_1))\n inputs = self.g_emb\n\n return (layer_id + 1, inputs, next_c, next_h, anchors, anchors_w_1,\n arc_seq, entropy, log_prob)\n\n loop_vars = [\n tf.constant(2, dtype=tf.int32, name=\"layer_id\"),\n inputs,\n prev_c,\n prev_h,\n anchors,\n anchors_w_1,\n arc_seq,\n tf.constant([0.0], dtype=tf.float32, name=\"entropy\"),\n tf.constant([0.0], dtype=tf.float32, name=\"log_prob\"),\n ]\n \n loop_outputs = tf.while_loop(_condition, _body, loop_vars,\n parallel_iterations=1)\n\n arc_seq = loop_outputs[-3].stack()\n arc_seq = tf.reshape(arc_seq, [-1])\n entropy = tf.reduce_sum(loop_outputs[-2])\n log_prob = tf.reduce_sum(loop_outputs[-1])\n\n last_c = loop_outputs[-7]\n last_h = loop_outputs[-6]\n\n return arc_seq, entropy, log_prob, last_c, last_h\n\n def build_trainer(self, child_model):\n child_model.build_valid_rl()\n self.valid_acc = (tf.to_float(child_model.valid_shuffle_acc) /\n tf.to_float(child_model.batch_size))\n self.reward = self.valid_acc\n\n if self.entropy_weight is not None:\n self.reward += self.entropy_weight * self.sample_entropy\n\n self.sample_log_prob = tf.reduce_sum(self.sample_log_prob)\n self.baseline = tf.Variable(0.0, dtype=tf.float32, trainable=False)\n baseline_update = tf.assign_sub(\n self.baseline, (1 - self.bl_dec) * (self.baseline - self.reward))\n\n with tf.control_dependencies([baseline_update]):\n self.reward = tf.identity(self.reward)\n\n self.loss = self.sample_log_prob * (self.reward - self.baseline)\n self.train_step = tf.Variable(0, dtype=tf.int32, trainable=False, name=\"train_step\")\n\n tf_variables = [var for var in tf.trainable_variables() if var.name.startswith(self.name)]\n print(\"-\" * 80)\n for var in tf_variables:\n print(var)\n\n self.train_op, self.lr, self.grad_norm, self.optimizer = get_train_ops(\n self.loss,\n tf_variables,\n self.train_step,\n clip_mode=self.clip_mode,\n grad_bound=self.grad_bound,\n l2_reg=self.l2_reg,\n lr_init=self.lr_init,\n lr_dec_start=self.lr_dec_start,\n lr_dec_every=self.lr_dec_every,\n lr_dec_rate=self.lr_dec_rate,\n optim_algo=self.optim_algo,\n sync_replicas=self.sync_replicas,\n num_aggregate=self.num_aggregate,\n num_replicas=self.num_replicas)\n\n self.skip_rate = tf.constant(0.0, dtype=tf.float32)\n"
] | [
[
"tensorflow.get_variable",
"tensorflow.control_dependencies",
"tensorflow.zeros",
"tensorflow.reduce_sum",
"tensorflow.tanh",
"tensorflow.to_int32",
"tensorflow.while_loop",
"tensorflow.Variable",
"tensorflow.random_uniform_initializer",
"numpy.reshape",
"tensorflow.to_float",
"tensorflow.trainable_variables",
"tensorflow.matmul",
"tensorflow.less",
"tensorflow.TensorArray",
"tensorflow.identity",
"tensorflow.zeros_like",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"numpy.array",
"tensorflow.nn.embedding_lookup",
"tensorflow.nn.softmax",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.reshape",
"tensorflow.constant_initializer",
"tensorflow.assign_sub",
"tensorflow.variable_scope",
"tensorflow.multinomial"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
algorithmiaio/langpacks | [
"0e54c2680d8882053ed84e11952c0c1c3866048c"
] | [
"templates/pytorch-1.6.x-python38/src/__ALGO__.py"
] | [
"import Algorithmia\nimport torch as th\n\n\"\"\"\nExample Input:\n{\n \"matrix_a\": [[0, 1], [1, 0]],\n \"matrix_b\": [[25, 25], [11, 11]]\n}\n\nExpected Output:\n{\n \"product\": [[11, 11], [25, 25]]\n}\n\"\"\"\n\nclass InputObject:\n def __init__(self, input_dict):\n \"\"\"\n Creates an instance of the InputObject, which checks the format of data and throws exceptions if anything is\n missing.\n \"matrix_a\" and \"matrix_b\" must be the same shape.\n :param A - Matrix A, converted from a json list into a torch cuda Tensor.\n :param B - Matrix B, converted from a json list into a torch cuda Tensor.\n \"\"\"\n if isinstance(input_dict, dict):\n if {'matrix_a', 'matrix_b'} <= input_dict.keys():\n self.A = convert(input_dict['matrix_a'])\n self.B = convert(input_dict['matrix_b'])\n else:\n raise Exception(\"'matrix_a' and 'matrix_b' must be defined.\")\n else:\n raise Exception('input must be a json object.')\n if self.A.shape[-1] != self.B.shape[0]:\n raise Exception('inner dimensions between A and B must be the same.\\n A: {} B: {}'\n .format(self.A.shape[-1], self.B.shape[0]))\n\n\ndef convert(list_array):\n \"\"\"\n Converts a json list into a torch Tensor object.\n \"\"\"\n th_tensor = th.tensor(list_array).float()\n gpu_tensor = th_tensor.cuda()\n return gpu_tensor\n\ndef apply(input):\n \"\"\"\n Calculates the dot product of two matricies using pytorch, with a cudnn backend.\n Returns the product as the output.\n \"\"\"\n input = InputObject(input)\n C = th.mm(input.A, input.B)\n z = C.cpu().numpy().tolist()\n output = {'product': z}\n return output\n"
] | [
[
"torch.mm",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
canary-for-cognition/multimodal-dl-framework | [
"7733376b05840e2b3dead438dd3981db9694b6ae"
] | [
"dataset/alzheimer/scripts/utils/rename_tobii_heatmaps.py"
] | [
"import os\nimport shutil\n\nimport pandas as pd\nfrom tqdm import tqdm\n\n\ndef get_preprocessed_file_name(filenames_to_pid_map: Dict, task: str, raw_filename: str) -> str:\n \"\"\"\n Creates and returns the PID-based file name for the preprocessed data item\n :param filenames_to_pid_map: a map from raw file names to PIDs\n :param task: the name of the current task\n :param raw_filename: the raw file name of the currently processed item\n :return: the PID-based file name for the preprocessed data item\n \"\"\"\n pid = filenames_to_pid_map[filenames_to_pid_map[task] == raw_filename[:-4]][\"pid\"].values[0]\n return pid + \".png\"\n\n\ndef get_paths_to_preprocessed(path_to_task: str, labels_map: Dict) -> Dict:\n \"\"\"\n Creates and returns the paths to the preprocessed data items\n :param path_to_task: the path to the data related to the currently processed task\n :param labels_map: a map with the name of positive and negative labels\n :return:\n \"\"\"\n base_path_to_preprocessed = os.path.join(path_to_task, \"preprocessed\")\n path_to_preprocessed_pos = os.path.join(base_path_to_preprocessed, labels_map[\"pos\"])\n path_to_preprocessed_neg = os.path.join(base_path_to_preprocessed, labels_map[\"neg\"])\n os.makedirs(base_path_to_preprocessed, exist_ok=True)\n os.makedirs(path_to_preprocessed_pos, exist_ok=True)\n os.makedirs(path_to_preprocessed_neg, exist_ok=True)\n return {\n \"neg\": path_to_preprocessed_neg,\n \"pos\": path_to_preprocessed_pos\n }\n\n\ndef main():\n # Path to the file containing the CSV mapping filenames to PIDs for each task\n path_to_filenames_to_pid_map = os.path.join(\"metadata\", \"heatmaps_name_to_pid.csv\")\n\n # Labels for the classification problem\n negative_label = \"0_healthy\"\n positive_label = \"1_alzheimer\"\n\n # List of tasks matching the names of the subdirectories in \"./tasks\"\n tasks = [\"cookie_theft\", \"memory\", \"reading\"]\n\n filenames_to_pid_map = pd.read_csv(path_to_filenames_to_pid_map)\n\n labels_map = {\n \"neg\": negative_label,\n \"pos\": positive_label\n }\n\n # Iterate over tasks\n for task in tasks:\n path_to_task = os.path.join(\"tasks\", task)\n path_to_raw = os.path.join(path_to_task, \"raw\")\n paths_to_preprocessed = get_paths_to_preprocessed(path_to_task, labels_map)\n\n # Iterate over files for each task\n for raw_filename in tqdm(os.listdir(path_to_raw), desc=\"\\n Processing files for task: {} \\n\".format(task)):\n preprocessed_filename = get_preprocessed_file_name(filenames_to_pid_map, task, raw_filename)\n label_id = preprocessed_filename[0]\n path_to_preprocessed_file = os.path.join(paths_to_preprocessed[label_id], preprocessed_filename)\n path_to_raw_file = os.path.join(path_to_raw, raw_filename)\n shutil.copy(path_to_raw_file, path_to_preprocessed_file)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
yummydeli/machine_learning | [
"54471182ac21ef0eee26557a7bd6f3a3dc3a09bd"
] | [
"poi_mining/biz/LSA/split_by_cid.py"
] | [
"#!/usr/bin/env python\n# encoding:utf-8\n\n# ##############################################################################\n# The MIT License (MIT)\n#\n# Copyright (c) [2015] [baidu.com]\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n# ##############################################################################\n\n\"\"\"\ndocstring for module\n\"\"\"\n\n\nimport pandas\n\nframe = pandas.read_table('cid2pidnames.gbk',\n sep='\\x01',\n encoding='GBK',\n names=[u'name', u'cid2'])\nkeys = frame.cid2.unique()\n\nfor k in keys:\n frame.loc[frame.cid2 == k, [u'name']]\\\n .set_index('name')\\\n .to_csv('data/names.c%s' % k, sep='\\t', encoding='GBK', header=False)\n"
] | [
[
"pandas.read_table"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ivan-alles/spleeter | [
"2e8fbb3105eeede6a9978bfb8c8cf651d23dec20"
] | [
"simplespleeter.py"
] | [
"\"\"\"\n Simplified version without training and architectural sugar.\n\"\"\"\n\nimport os\nimport json\n\nimport numpy as np\nimport librosa\nimport soundfile\nfrom librosa.core import istft, stft\nfrom scipy.signal.windows import hann\n\nfrom functools import partial\nfrom typing import Any, Dict, Iterable, Optional\n\n# pyright: reportMissingImports=false\n# pylint: disable=import-error\nimport tensorflow as tf\nfrom tensorflow.compat.v1 import logging\nfrom tensorflow.compat.v1.keras.initializers import he_uniform\nfrom tensorflow.keras.layers import (\n ELU,\n BatchNormalization,\n Concatenate,\n Conv2D,\n Conv2DTranspose,\n Dropout,\n LeakyReLU,\n Multiply,\n ReLU,\n Softmax,\n)\n\nplaceholder = tf.compat.v1.placeholder\n\nfrom spleeter.utils.tensor import pad_and_partition, pad_and_reshape\n\n\n\nCONFIG_DIR = os.path.join(os.path.dirname(__file__), 'configs')\nMODEL_DIR = os.path.join(os.path.dirname(__file__), 'pretrained_models')\nOUTPUT_DIR = os.path.join(os.path.dirname(__file__), 'output')\n\ndef _get_conv_activation_layer(params: Dict) -> Any:\n \"\"\"\n > To be documented.\n\n Parameters:\n params (Dict):\n\n Returns:\n Any:\n Required Activation function.\n \"\"\"\n conv_activation: str = params.get(\"conv_activation\")\n if conv_activation == \"ReLU\":\n return ReLU()\n elif conv_activation == \"ELU\":\n return ELU()\n return LeakyReLU(0.2)\n\ndef _get_deconv_activation_layer(params: Dict) -> Any:\n \"\"\"\n > To be documented.\n\n Parameters:\n params (Dict):\n\n Returns:\n Any:\n Required Activation function.\n \"\"\"\n deconv_activation: str = params.get(\"deconv_activation\")\n if deconv_activation == \"LeakyReLU\":\n return LeakyReLU(0.2)\n elif deconv_activation == \"ELU\":\n return ELU()\n return ReLU()\n\n\ndef apply_unet(\n input_tensor: tf.Tensor,\n output_name: str = \"output\",\n params: Optional[Dict] = None,\n output_mask_logit: bool = False,\n) -> Any:\n \"\"\"\n Apply a convolutionnal U-net to model a single instrument (one U-net\n is used for each instrument).\n\n Parameters:\n input_tensor (tensorflow.Tensor):\n output_name (str):\n params (Optional[Dict]):\n output_mask_logit (bool):\n \"\"\"\n logging.info(f\"Apply unet for {output_name}\")\n conv_n_filters = params.get(\"conv_n_filters\", [16, 32, 64, 128, 256, 512])\n conv_activation_layer = _get_conv_activation_layer(params)\n deconv_activation_layer = _get_deconv_activation_layer(params)\n kernel_initializer = he_uniform(seed=50)\n conv2d_factory = partial(\n Conv2D, strides=(2, 2), padding=\"same\", kernel_initializer=kernel_initializer\n )\n # First layer.\n conv1 = conv2d_factory(conv_n_filters[0], (5, 5))(input_tensor)\n batch1 = BatchNormalization(axis=-1)(conv1)\n rel1 = conv_activation_layer(batch1)\n # Second layer.\n conv2 = conv2d_factory(conv_n_filters[1], (5, 5))(rel1)\n batch2 = BatchNormalization(axis=-1)(conv2)\n rel2 = conv_activation_layer(batch2)\n # Third layer.\n conv3 = conv2d_factory(conv_n_filters[2], (5, 5))(rel2)\n batch3 = BatchNormalization(axis=-1)(conv3)\n rel3 = conv_activation_layer(batch3)\n # Fourth layer.\n conv4 = conv2d_factory(conv_n_filters[3], (5, 5))(rel3)\n batch4 = BatchNormalization(axis=-1)(conv4)\n rel4 = conv_activation_layer(batch4)\n # Fifth layer.\n conv5 = conv2d_factory(conv_n_filters[4], (5, 5))(rel4)\n batch5 = BatchNormalization(axis=-1)(conv5)\n rel5 = conv_activation_layer(batch5)\n # Sixth layer\n conv6 = conv2d_factory(conv_n_filters[5], (5, 5))(rel5)\n batch6 = BatchNormalization(axis=-1)(conv6)\n _ = conv_activation_layer(batch6)\n #\n #\n conv2d_transpose_factory = partial(\n Conv2DTranspose,\n strides=(2, 2),\n padding=\"same\",\n kernel_initializer=kernel_initializer,\n )\n #\n up1 = conv2d_transpose_factory(conv_n_filters[4], (5, 5))((conv6))\n up1 = deconv_activation_layer(up1)\n batch7 = BatchNormalization(axis=-1)(up1)\n drop1 = Dropout(0.5)(batch7)\n merge1 = Concatenate(axis=-1)([conv5, drop1])\n #\n up2 = conv2d_transpose_factory(conv_n_filters[3], (5, 5))((merge1))\n up2 = deconv_activation_layer(up2)\n batch8 = BatchNormalization(axis=-1)(up2)\n drop2 = Dropout(0.5)(batch8)\n merge2 = Concatenate(axis=-1)([conv4, drop2])\n #\n up3 = conv2d_transpose_factory(conv_n_filters[2], (5, 5))((merge2))\n up3 = deconv_activation_layer(up3)\n batch9 = BatchNormalization(axis=-1)(up3)\n drop3 = Dropout(0.5)(batch9)\n merge3 = Concatenate(axis=-1)([conv3, drop3])\n #\n up4 = conv2d_transpose_factory(conv_n_filters[1], (5, 5))((merge3))\n up4 = deconv_activation_layer(up4)\n batch10 = BatchNormalization(axis=-1)(up4)\n merge4 = Concatenate(axis=-1)([conv2, batch10])\n #\n up5 = conv2d_transpose_factory(conv_n_filters[0], (5, 5))((merge4))\n up5 = deconv_activation_layer(up5)\n batch11 = BatchNormalization(axis=-1)(up5)\n merge5 = Concatenate(axis=-1)([conv1, batch11])\n #\n up6 = conv2d_transpose_factory(1, (5, 5), strides=(2, 2))((merge5))\n up6 = deconv_activation_layer(up6)\n batch12 = BatchNormalization(axis=-1)(up6)\n # Last layer to ensure initial shape reconstruction.\n if not output_mask_logit:\n up7 = Conv2D(\n 2,\n (4, 4),\n dilation_rate=(2, 2),\n activation=\"sigmoid\",\n padding=\"same\",\n kernel_initializer=kernel_initializer,\n )((batch12))\n output = Multiply(name=output_name)([up7, input_tensor])\n return output\n return Conv2D(\n 2,\n (4, 4),\n dilation_rate=(2, 2),\n padding=\"same\",\n kernel_initializer=kernel_initializer,\n )((batch12))\n\n\nclass Separator:\n EPSILON = 1e-10\n\n def __init__(self, model_name):\n with open(os.path.join(CONFIG_DIR, model_name, 'base_config.json')) as f:\n self._params = json.load(f)\n\n self._sample_rate = self._params[\"sample_rate\"]\n self._tf_graph = tf.Graph()\n self._prediction_generator = None\n self._features = None\n self._session = None\n\n self.stft_input_name = f\"{self._params['mix_name']}_stft\"\n\n def __del__(self) -> None:\n if self._session:\n self._session.close()\n\n @property\n def input_names(self):\n return [\"audio_id\", self.stft_input_name]\n\n def get_input_dict_placeholders(self):\n features = {\n self.stft_input_name: placeholder(\n tf.complex64,\n shape=(\n None,\n self._params[\"frame_length\"] // 2 + 1,\n self._params[\"n_channels\"],\n ),\n name=self.stft_input_name,\n ),\n \"audio_id\": placeholder(tf.string, name=\"audio_id\"),\n }\n return features\n\n def get_feed_dict(self, features, stft, audio_id):\n return\n\n\n def _stft(\n self, data: np.ndarray, inverse: bool = False, length: Optional[int] = None\n ) -> np.ndarray:\n \"\"\"\n Single entrypoint for both stft and istft. This computes stft and\n istft with librosa on stereo data. The two channels are processed\n separately and are concatenated together in the result. The\n expected input formats are: (n_samples, 2) for stft and (T, F, 2)\n for istft.\n\n Parameters:\n data (numpy.array):\n Array with either the waveform or the complex spectrogram\n depending on the parameter inverse\n inverse (bool):\n (Optional) Should a stft or an istft be computed.\n length (Optional[int]):\n\n Returns:\n numpy.ndarray:\n Stereo data as numpy array for the transform. The channels\n are stored in the last dimension.\n \"\"\"\n assert not (inverse and length is None)\n data = np.asfortranarray(data)\n N = self._params[\"frame_length\"]\n H = self._params[\"frame_step\"]\n win = hann(N, sym=False)\n fstft = istft if inverse else stft\n win_len_arg = {\"win_length\": None, \"length\": None} if inverse else {\"n_fft\": N}\n n_channels = data.shape[-1]\n out = []\n for c in range(n_channels):\n d = (\n np.concatenate((np.zeros((N,)), data[:, c], np.zeros((N,))))\n if not inverse\n else data[:, :, c].T\n )\n s = fstft(d, hop_length=H, window=win, center=False, **win_len_arg)\n if inverse:\n s = s[N : N + length]\n s = np.expand_dims(s.T, 2 - inverse)\n out.append(s)\n if len(out) == 1:\n return out[0]\n return np.concatenate(out, axis=2 - inverse)\n\n def _build_stft_feature(self):\n \"\"\"Compute STFT of waveform and slice the STFT in segment\n with the right length to feed the network.\n \"\"\"\n\n stft_name = 'mix_stft'\n spec_name = 'mix_spectrogram'\n\n if stft_name not in self._features:\n raise NotImplementedError('This code was never used.')\n\n if spec_name not in self._features:\n self._features[spec_name] = tf.abs(\n pad_and_partition(self._features[stft_name], self._params['T'])\n )[:, :, : self._params['F'], :]\n\n def _extend_mask(self, mask):\n \"\"\"Extend mask, from reduced number of frequency bin to the number of\n frequency bin in the STFT.\n\n :param mask: restricted mask\n :returns: extended mask\n :raise ValueError: If invalid mask_extension parameter is set.\n \"\"\"\n extension = self._params[\"mask_extension\"]\n # Extend with average\n # (dispatch according to energy in the processed band)\n if extension == \"average\":\n extension_row = tf.reduce_mean(mask, axis=2, keepdims=True)\n # Extend with 0\n # (avoid extension artifacts but not conservative separation)\n elif extension == \"zeros\":\n mask_shape = tf.shape(mask)\n extension_row = tf.zeros((mask_shape[0], mask_shape[1], 1, mask_shape[-1]))\n else:\n raise ValueError(f\"Invalid mask_extension parameter {extension}\")\n n_extra_row = self._params['frame_length'] // 2 + 1 - self._params['F']\n extension = tf.tile(extension_row, [1, 1, n_extra_row, 1])\n return tf.concat([mask, extension], axis=2)\n\n def _build_masks(self):\n \"\"\"\n Compute masks from the output spectrograms of the model.\n :return:\n \"\"\"\n input_tensor = self._features['mix_spectrogram']\n\n output_dict = {}\n for instrument in self._params['instrument_list']:\n out_name = f\"{instrument}_spectrogram\"\n # outputs[out_name] = function(\n # input_tensor, output_name=out_name, params=params or {}\n # )\n output_dict[out_name] = apply_unet(\n input_tensor, instrument, self._params[\"model\"][\"params\"])\n\n separation_exponent = self._params[\"separation_exponent\"]\n output_sum = (\n tf.reduce_sum(\n [e ** separation_exponent for e in output_dict.values()], axis=0\n )\n + self.EPSILON\n )\n out = {}\n\n for instrument in self._params['instrument_list']:\n output = output_dict[f\"{instrument}_spectrogram\"]\n # Compute mask with the model.\n instrument_mask = (\n output ** separation_exponent + (self.EPSILON / len(output_dict))\n ) / output_sum\n # Extend mask;\n instrument_mask = self._extend_mask(instrument_mask)\n # Stack back mask.\n old_shape = tf.shape(instrument_mask)\n new_shape = tf.concat(\n [[old_shape[0] * old_shape[1]], old_shape[2:]], axis=0\n )\n instrument_mask = tf.reshape(instrument_mask, new_shape)\n # Remove padded part (for mask having the same size as STFT);\n\n stft_feature = self._features['mix_stft']\n instrument_mask = instrument_mask[: tf.shape(stft_feature)[0], ...]\n out[instrument] = instrument_mask\n self._masks = out\n\n def _get_session(self):\n if self._session is None:\n saver = tf.compat.v1.train.Saver()\n model_directory = os.path.join(MODEL_DIR, self._params[\"model_dir\"])\n latest_checkpoint = tf.train.latest_checkpoint(model_directory)\n self._session = tf.compat.v1.Session()\n saver.restore(self._session, latest_checkpoint)\n return self._session\n\n def separate_waveform(self, waveform: np.ndarray):\n \"\"\"\n Performs separation with librosa backend for STFT.\n\n Parameters:\n waveform (numpy.ndarray):\n Waveform to be separated (as a numpy array)\n audio_descriptor (AudioDescriptor):\n \"\"\"\n with self._tf_graph.as_default():\n # out = {}\n self._features = self.get_input_dict_placeholders()\n\n self._build_stft_feature()\n\n self._build_masks()\n\n stft = self._stft(waveform)\n if stft.shape[-1] == 1:\n stft = np.concatenate([stft, stft], axis=-1)\n elif stft.shape[-1] > 2:\n stft = stft[:, :2]\n sess = self._get_session()\n feed_dict = {\n self._features[\"audio_id\"]: 'my-audio',\n self._features[self.stft_input_name]: stft\n }\n outputs = sess.run(\n self._masks,\n feed_dict=feed_dict\n )\n\n converted = {}\n for inst in self._params['instrument_list']:\n converted[inst] = self._stft(\n outputs[inst] * stft, inverse=True, length=waveform.shape[0]\n )\n return converted\n\n def separate_file(self, audio_file):\n waveform, sr = librosa.load(audio_file, sr=self._sample_rate)\n if waveform.ndim == 1:\n waveform = np.stack([waveform, waveform], 1)\n result = self.separate_waveform(waveform)\n fn, ext = os.path.splitext(os.path.basename(audio_file))\n out_dir = os.path.join(OUTPUT_DIR, fn)\n os.makedirs(out_dir, exist_ok=True)\n for instrument, output in result.items():\n soundfile.write(os.path.join(out_dir, instrument + '.wav'), output, self._sample_rate)\n\n\n def gradient_opt(self, audio_file):\n waveform, sr = librosa.load(audio_file, sr=self._sample_rate)\n if waveform.ndim == 1:\n waveform = np.stack([waveform, waveform], 1)\n\n with self._tf_graph.as_default():\n self._features = self.get_input_dict_placeholders()\n\n self._build_stft_feature()\n\n self._build_masks()\n\n stft = self._stft(waveform)\n if stft.shape[-1] == 1:\n stft = np.concatenate([stft, stft], axis=-1)\n elif stft.shape[-1] > 2:\n stft = stft[:, :2]\n sess = self._get_session()\n\n loss = -tf.reduce_mean(self._masks['vocals']) + tf.reduce_mean(self._masks['accompaniment'])\n gradient = tf.gradients(loss, self._features[self.stft_input_name])[0]\n\n for i in range(100):\n feed_dict = {\n self._features[\"audio_id\"]: 'my-audio',\n self._features[self.stft_input_name]: stft\n }\n g = sess.run(\n gradient,\n feed_dict=feed_dict\n )\n\n lr = 10000\n # stft += lr * g / (np.abs(g).mean() + 1e-7)\n stft += lr * g\n\n audio = self._stft(stft, inverse=True, length=waveform.shape[0])\n soundfile.write(os.path.join(OUTPUT_DIR, f'opt-{i:03d}.wav'), audio, self._sample_rate)\n\n"
] | [
[
"numpy.expand_dims",
"tensorflow.concat",
"tensorflow.keras.layers.ELU",
"tensorflow.zeros",
"tensorflow.compat.v1.keras.initializers.he_uniform",
"numpy.concatenate",
"tensorflow.compat.v1.train.Saver",
"tensorflow.keras.layers.Concatenate",
"tensorflow.Graph",
"tensorflow.keras.layers.LeakyReLU",
"tensorflow.keras.layers.Conv2D",
"tensorflow.gradients",
"numpy.stack",
"tensorflow.keras.layers.Multiply",
"numpy.zeros",
"tensorflow.tile",
"tensorflow.keras.layers.ReLU",
"scipy.signal.windows.hann",
"tensorflow.shape",
"numpy.asfortranarray",
"tensorflow.train.latest_checkpoint",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.compat.v1.Session",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.compat.v1.logging.info",
"tensorflow.keras.layers.Dropout"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
CrazySherman/models | [
"0bcc77dcdf504c0e7a4cbc5e29798d33ed77f693"
] | [
"research/deeplab/common.py"
] | [
"# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Provides flags that are common to scripts.\n\nCommon flags from train/eval/vis/export_model.py are collected in this script.\n\"\"\"\nimport collections\nimport copy\nimport json\n\nimport tensorflow as tf\n\nflags = tf.app.flags\n\n# Flags for input preprocessing.\n\nflags.DEFINE_integer('min_resize_value', None,\n 'Desired size of the smaller image side.')\n\nflags.DEFINE_integer('max_resize_value', None,\n 'Maximum allowed size of the larger image side.')\n\nflags.DEFINE_integer('resize_factor', None,\n 'Resized dimensions are multiple of factor plus one.')\n\n# Model dependent flags.\n\nflags.DEFINE_integer('logits_kernel_size', 1,\n 'The kernel size for the convolutional kernel that '\n 'generates logits.')\n\n# When using 'mobilent_v2', we set atrous_rates = decoder_output_stride = None.\n# When using 'xception_65' or 'resnet_v1' model variants, we set\n# atrous_rates = [6, 12, 18] (output stride 16) and decoder_output_stride = 4.\n# See core/feature_extractor.py for supported model variants.\nflags.DEFINE_string('model_variant', 'mobilenet_v2', 'DeepLab model variant.')\n\nflags.DEFINE_multi_float('image_pyramid', None,\n 'Input scales for multi-scale feature extraction.')\n\nflags.DEFINE_boolean('add_image_level_feature', True,\n 'Add image level feature.')\n\nflags.DEFINE_multi_integer(\n 'image_pooling_crop_size', None,\n 'Image pooling crop size [height, width] used in the ASPP module. When '\n 'value is None, the model performs image pooling with \"crop_size\". This'\n 'flag is useful when one likes to use different image pooling sizes.')\n\nflags.DEFINE_boolean('aspp_with_batch_norm', True,\n 'Use batch norm parameters for ASPP or not.')\n\nflags.DEFINE_boolean('aspp_with_separable_conv', True,\n 'Use separable convolution for ASPP or not.')\n\n# Defaults to None. Set multi_grid = [1, 2, 4] when using provided\n# 'resnet_v1_{50,101}_beta' checkpoints.\nflags.DEFINE_multi_integer('multi_grid', None,\n 'Employ a hierarchy of atrous rates for ResNet.')\n\nflags.DEFINE_float('depth_multiplier', 1.0,\n 'Multiplier for the depth (number of channels) for all '\n 'convolution ops used in MobileNet.')\n\n# For `xception_65`, use decoder_output_stride = 4. For `mobilenet_v2`, use\n# decoder_output_stride = None.\nflags.DEFINE_integer('decoder_output_stride', None,\n 'The ratio of input to output spatial resolution when '\n 'employing decoder to refine segmentation results.')\n\nflags.DEFINE_boolean('decoder_use_separable_conv', True,\n 'Employ separable convolution for decoder or not.')\n\nflags.DEFINE_enum('merge_method', 'max', ['max', 'avg'],\n 'Scheme to merge multi scale features.')\n\nflags.DEFINE_string(\n 'dense_prediction_cell_json',\n '',\n 'A JSON file that specifies the dense prediction cell.')\n\nFLAGS = flags.FLAGS\n\n# Constants\n\n# Perform semantic segmentation predictions.\nOUTPUT_TYPE = 'semantic'\nOUTPUT_TYPE_BASE = 'classification'\n\n# Semantic segmentation item names.\nLABELS_CLASS = 'labels_class'\nIMAGE = 'image'\nHEIGHT = 'height'\nWIDTH = 'width'\nIMAGE_NAME = 'image_name'\nLABEL = 'label'\nORIGINAL_IMAGE = 'original_image'\n\n# Test set name.\nTEST_SET = 'test'\n\n\nclass ModelOptions(\n collections.namedtuple('ModelOptions', [\n 'outputs_to_num_classes',\n 'crop_size',\n 'atrous_rates',\n 'output_stride',\n 'merge_method',\n 'add_image_level_feature',\n 'image_pooling_crop_size',\n 'aspp_with_batch_norm',\n 'aspp_with_separable_conv',\n 'multi_grid',\n 'decoder_output_stride',\n 'decoder_use_separable_conv',\n 'logits_kernel_size',\n 'model_variant',\n 'depth_multiplier',\n 'dense_prediction_cell_config',\n ])):\n \"\"\"Immutable class to hold model options.\"\"\"\n\n __slots__ = ()\n\n def __new__(cls,\n outputs_to_num_classes,\n crop_size=None,\n atrous_rates=None,\n output_stride=8):\n \"\"\"Constructor to set default values.\n\n Args:\n outputs_to_num_classes: A dictionary from output type to the number of\n classes. For example, for the task of semantic segmentation with 21\n semantic classes, we would have outputs_to_num_classes['semantic'] = 21.\n crop_size: A tuple [crop_height, crop_width].\n atrous_rates: A list of atrous convolution rates for ASPP.\n output_stride: The ratio of input to output spatial resolution.\n\n Returns:\n A new ModelOptions instance.\n \"\"\"\n dense_prediction_cell_config = None\n if FLAGS.dense_prediction_cell_json:\n with tf.gfile.Open(FLAGS.dense_prediction_cell_json, 'r') as f:\n dense_prediction_cell_config = json.load(f)\n\n return super(ModelOptions, cls).__new__(\n cls, outputs_to_num_classes, crop_size, atrous_rates, output_stride,\n FLAGS.merge_method, FLAGS.add_image_level_feature,\n FLAGS.image_pooling_crop_size, FLAGS.aspp_with_batch_norm,\n FLAGS.aspp_with_separable_conv, FLAGS.multi_grid,\n FLAGS.decoder_output_stride, FLAGS.decoder_use_separable_conv,\n FLAGS.logits_kernel_size, FLAGS.model_variant, FLAGS.depth_multiplier,\n dense_prediction_cell_config)\n\n def __deepcopy__(self, memo):\n return ModelOptions(copy.deepcopy(self.outputs_to_num_classes),\n self.crop_size,\n self.atrous_rates,\n self.output_stride)\n"
] | [
[
"tensorflow.gfile.Open"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sxyd/Traffic_Control_Benchmark | [
"9f539c0101b198f2789859966a1dbdc3c6b160a2"
] | [
"utils/plot_net_trafficLights.py"
] | [
"#!/usr/bin/env python\n# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo\n# Copyright (C) 2008-2020 German Aerospace Center (DLR) and others.\n# This program and the accompanying materials are made available under the\n# terms of the Eclipse Public License 2.0 which is available at\n# https://www.eclipse.org/legal/epl-2.0/\n# This Source Code may also be made available under the following Secondary\n# Licenses when the conditions for such availability set forth in the Eclipse\n# Public License 2.0 are satisfied: GNU General Public License, version 2\n# or later which is available at\n# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html\n# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later\n\n# @file plot_net_trafficLights.py\n# @author Daniel Krajzewicz\n# @author Michael Behrisch\n# @date 2013-10-28\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nif 'SUMO_HOME' in os.environ:\n tools = os.path.join(os.environ['SUMO_HOME'], 'tools')\n sys.path.append(tools)\nelse:\n sys.exit(\"Please declare the environment variable 'SUMO_HOME'\")\n\nimport sumolib # noqa\nfrom sumolib.visualization import helpers # noqa\nimport matplotlib.pyplot as plt # noqa\n\n\ndef main(args=None):\n \"\"\"The main function; parses options and plots\"\"\"\n # ---------- build and read options ----------\n from optparse import OptionParser\n optParser = OptionParser()\n optParser.add_option(\"-n\", \"--net\", dest=\"net\", metavar=\"FILE\",\n help=\"Defines the network to read\")\n optParser.add_option(\"-v\", \"--verbose\", dest=\"verbose\", action=\"store_true\",\n default=False, help=\"If set, the script says what it's doing\")\n optParser.add_option(\"-w\", \"--width\", dest=\"width\",\n type=\"float\", default=20, help=\"Defines the width of the dots\")\n optParser.add_option(\"-c\", \"--color\", dest=\"color\",\n default='r', help=\"Defines the dot color\")\n optParser.add_option(\"--edge-width\", dest=\"defaultWidth\",\n type=\"float\", default=1, help=\"Defines the edge width\")\n optParser.add_option(\"--edge-color\", dest=\"defaultColor\",\n default='k', help=\"Defines the edge color\")\n\n # standard plot options\n helpers.addInteractionOptions(optParser)\n helpers.addPlotOptions(optParser)\n # parse\n options, remaining_args = optParser.parse_args(args=args)\n\n if options.net is None:\n print(\"Error: a network to load must be given.\")\n return 1\n if options.verbose:\n print(\"Reading network from '%s'\" % options.net)\n net = sumolib.net.readNet(options.net)\n\n tlsn = {}\n for tid in net._id2tls:\n t = net._id2tls[tid]\n tlsn[tid] = set()\n for c in t._connections:\n n = c[0].getEdge().getToNode()\n tlsn[tid].add(n)\n\n tlspX = []\n tlspY = []\n for tid in tlsn:\n x = 0\n y = 0\n n = 0\n for node in tlsn[tid]:\n x += node._coord[0]\n y += node._coord[1]\n n = n + 1\n x = x / n\n y = y / n\n tlspX.append(x)\n tlspY.append(y)\n\n fig, ax = helpers.openFigure(options)\n ax.set_aspect(\"equal\", None, 'C')\n helpers.plotNet(net, {}, {}, options)\n plt.plot(tlspX, tlspY, options.color, linestyle='None',\n marker='o', markersize=options.width, label='Traffic light')\n options.nolegend = False\n helpers.closeFigure(fig, ax, options)\n\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv))\n"
] | [
[
"matplotlib.pyplot.plot"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
piotr-karon/realworld-starter-kit | [
"6285e4b5913fe5e99d72e9178eb4b1db246d02c9"
] | [
"experiments/render-tests-avg.py"
] | [
"#!/usr/bin/env python3\n\nimport json\nimport os\nfrom pathlib import Path\n\nimport numpy as np\nfrom natsort import natsorted\n\ntry:\n from docopt import docopt\n from marko.ext.gfm import gfm\n import pygal\n from pygal.style import Style, DefaultStyle\nexcept ImportError as e:\n raise Exception('Some external dependencies not found, install them using: pip install -r requirements.txt') from e\n\n\ndef render():\n suffix = '.avg.checks.bench.json'\n suites = {}\n for filepath in Path('').glob(f'*{suffix}'):\n name = filepath.name[:-len(suffix)]\n print(f'Loading {filepath} as {name}.')\n with open(filepath) as fp:\n suites[name] = json.load(fp)\n\n names = natsorted(suites.keys())\n\n figure_filenames = render_figures(names, suites)\n\n out_filename = Path('bench-results.md')\n with open(out_filename, 'w') as out:\n cwd = os.getcwd().split(os.sep)[-2:]\n print(f'# Benchmark of {\", \".join(names)} in {cwd}', file=out)\n\n notes_file = Path('notes.md')\n if notes_file.exists():\n print(f'Including {notes_file} in resulting Markdown.')\n with notes_file.open() as fp:\n out.write(fp.read())\n else:\n print(f'File {notes_file} does not exist, create it to include it in resulting Markdown.')\n\n # print('## General Info & Checks', file=out)\n # render_checks(names, suites, out)\n\n print('## Graphs', file=out)\n print('*The graphs are interactive, view the rendered HTML locally to enjoy it.*\\n', file=out)\n for filename in figure_filenames:\n # Use HTML instead of Markdown image to specify the width\n print(f'<img type=\"image/svg+xml\" src=\"{filename}\" alt=\"{filename}\" width=\"49%\"/>', file=out)\n\n print(f'Markdown output written to {out_filename}.')\n\n render_html(out_filename, Path('bench-results.html'))\n\n\ndef render_checks(names, suites, out):\n print(f'|Check|{\"|\".join(names)}|', file=out)\n print(f'|{\"|\".join([\"---\"] * (len(names) + 1))}|', file=out)\n\n per_impl_checks = {name: suite['checks'] for name, suite in suites.items()}\n check_names = sorted(set().union(*(checks.keys() for checks in per_impl_checks.values())))\n\n def sanitize(value):\n if type(value) is float:\n value = float(f'{value:.3g}') # round to 3 significant figures\n return str(int(value) if value >= 100 else value)\n return str(value)\n\n for check_name in check_names:\n values = [sanitize(per_impl_checks[name].get(check_name)) for name in names]\n if len(values) > 1 and len(set(values)) > 1:\n values = [f'**{value}**' for value in values]\n print(f'|{check_name}|{\"|\".join(values)}|', file=out)\n\n\nFIGURE_FUNCS = []\n\n\ndef figure(func):\n \"\"\"Simple decorator to mark a function as a figure generator.\"\"\"\n FIGURE_FUNCS.append(func)\n return func\n\n\ndef render_figures(names, suites):\n filenames = []\n\n config = pygal.Config(legend_at_bottom=True, style=DefaultStyle)\n\n for figure_func in FIGURE_FUNCS:\n chart = figure_func(names, suites, config.copy())\n filename = f'bench-results.{figure_func.__name__}.svg'\n chart.render_to_file(filename)\n filenames.append(filename)\n\n return filenames\n\n\n@figure\ndef startup_time_figure(names, suites, config):\n all_vals = [suites[name]['startup_max'] for name in names]\n mx = np.max(all_vals)\n\n config.range = (0, mx + 0.1)\n chart = pygal.Bar(config, value_formatter=lambda x: \"{:0.2f}s\".format(x))\n chart.title = 'Czas uruchomienia (s)'\n\n for name in names:\n vals = [{'value': suites[name]['startup_avg'],\n 'ci': {'low': suites[name]['startup_min'], 'high': suites[name]['startup_max']}}]\n # print(vals)\n chart.add(name, vals)\n return chart\n\n\n@figure\ndef errors_vs_connections_figure(names, suites, config):\n all_vals = [suites[name]['stats'] for name in names]\n flat = [item for sublist in all_vals for item in sublist]\n print(flat)\n all_rates = [\n div_or_none(s['request_errors_new_avg'], s['request_errors_new_avg'] + s['requests_new_avg'], scale=100) for s\n in flat]\n mx = np.max(all_rates)\n\n config.range = (0, mx + mx * 0.1)\n chart = pygal.Line(config, value_formatter=lambda x: \"{:0.2f}%\".format(x))\n chart.title = 'Współczynnik liczby błędów względem liczby połączeń (%)'\n connections_x_labels(chart, suites, skip=0)\n for name in names:\n chart.add(name, [\n div_or_none(s['request_errors_new_avg'], s['request_errors_new_avg'] + s['requests_new_avg'], scale=100)\n for s in suites[name]['stats'][0:]])\n return chart\n\n\n@figure\ndef requests_vs_connections_figure(names, suites, config):\n vals = [[x['requests_per_s_avg'] for x in suites[name]['stats']] for name in names]\n print(vals)\n mx = np.max(vals)\n\n config.range = (0, mx + mx * 0.1)\n config.min_scale = 6\n chart = pygal.Line(config, value_formatter=lambda x: \"{:0.0f}\".format(x))\n chart.title = 'Liczba sukcesów na sekundę względem liczby połączeń (Zapytań/s)'\n connections_x_labels(chart, suites, skip=0)\n for name in names:\n # print(suites[name]['stats'])\n # vals = [{'value': x['requests_per_s_avg'], 'ci': {'low': x['requests_per_s_min'], 'high': x['requests_per_s_max']}} for x in suites[name]['stats']]\n vals = [{'value': x['requests_per_s_avg']} for x in suites[name]['stats']]\n chart.add(name, vals)\n return chart\n\n\n@figure\ndef latency_vs_connections_50_figure(names, suites, config):\n return latency_vs_connections_figure(50, names, suites, config)\n\n\n@figure\ndef latency_vs_connections_90_figure(names, suites, config):\n return latency_vs_connections_figure(90, names, suites, config)\n\n\n@figure\ndef latency_vs_connections_99_figure(names, suites, config):\n return latency_vs_connections_figure(99, names, suites, config)\n\n\ndef latency_vs_connections_figure(percentile, names, suites, config):\n all_vals = [[s[f'latency_{percentile}p_ms_avg'] for s in suites[name]['stats'][0:]] for name in names]\n mx = np.max(all_vals)\n mn = np.min(all_vals)\n\n config.range = (mn - mn * .5, mx + mx * .5)\n chart = pygal.Line(config, logarithmic=True, value_formatter=lambda x: \"{:0.0f}\".format(x))\n chart.title = f'{percentile}. centyl czasu odpowiedzi względem liczby połączeń (ms)'\n connections_x_labels(chart, suites, skip=0)\n for name in names:\n chart.add(name, [s[f'latency_{percentile}p_ms_avg']\n for s in suites[name]['stats'][0:]])\n return chart\n\n\n@figure\ndef max_mem_usage_figure(names, suites, config):\n all_vals = [[s['mem_usage_mb_avg'] for s in suites[name]['stats']] for name in names]\n mx = np.max(all_vals)\n\n config.range = (0, mx + .1 * mx)\n chart = pygal.Line(config, value_formatter=lambda x: \"{:0.0f}\".format(x))\n chart.title = 'Maksymalne zużycie pamięci względem liczby połączeń (MiB)'\n connections_x_labels(chart, suites)\n for name in names:\n chart.add(name, [s['mem_usage_mb_avg'] for s in suites[name]['stats']])\n return chart\n\n\n@figure\ndef max_mem_usage_per_requests_figure(names, suites, config):\n all_vals = [[div_or_none(s['mem_usage_mb_avg'], s['requests_per_s_avg']) for s in suites[name]['stats'][0:]] for name in names]\n mx = np.max(all_vals)\n\n config.range = (0, mx + .1 * mx)\n config.min_scale = 6\n chart = pygal.Line(config, value_formatter=lambda x: \"{:0.3f}\".format(x))\n chart.title = 'Maksymalne zużycie pamięci per liczba sukcesów na sekundę (MiB-sekunda/Zapytanie)'\n connections_x_labels(chart, suites, skip=0)\n for name in names:\n chart.add(name,\n [div_or_none(s['mem_usage_mb_avg'], s['requests_per_s_avg']) for s in suites[name]['stats'][0:]])\n return chart\n\n\n@figure\ndef cpu_figure(names, suites, config):\n\n mx = np.max([[s['cpu_new_s_avg'] for s in suites[name]['stats'][0:]] for name in names])\n config.range = (0, mx + mx * 0.1)\n chart = pygal.Line(config, value_formatter=lambda x: \"{:0.3f}\".format(x))\n chart.title = 'Wykorzystanie czasu procesora w czasie rundy testów (sekundy CPU)'\n connections_x_labels(chart, suites, skip=0)\n for name in names:\n chart.add(name, [s['cpu_new_s_avg'] for s in suites[name]['stats'][0:]])\n return chart\n\n\n@figure\ndef cpu_per_request_figure(names, suites, config):\n mx = np.max([[div_or_none(s['cpu_new_s_avg'], s['requests_new_avg'], scale=1000) for s in\n suites[name]['stats'][0:]] for name in names])\n config.range = (0, mx + mx * 0.1)\n\n chart = pygal.Line(config, value_formatter=lambda x: \"{:0.3f}\".format(x))\n chart.title = 'Wykorzystanie czasu procesora per poprawna odpowiedź (milisekundy CPU/Req)'\n connections_x_labels(chart, suites, skip=0)\n for name in names:\n chart.add(name, [div_or_none(s['cpu_new_s_avg'], s['requests_new_avg'], scale=1000) for s in\n suites[name]['stats'][0:]])\n return chart\n\n\n@figure\ndef cpu_vs_requests_figure(names, suites, config):\n all_vls = [[s['requests_total_avg'] for s in suites[name]['stats']] for name in names]\n mx = np.max(all_vls)\n config.range = (0, mx + mx * 0.1)\n config.min_scale = 6\n\n chart = pygal.XY(config, value_formatter=lambda x: \"{:0.0f}\".format(x), series_formatter=lambda x: \"{:0.2f}\".format(x))\n chart.title = 'Skumulowana liczba poprawnych odpowiedzi względem skumulowanego czasu CPU'\n chart.x_title = 'sekundy CPU'\n chart.y_title = 'skumulowana liczba poprawnych odpowiedzi'\n for name in names:\n chart.add(name, [\n {'value': (s['cpu_total_s_avg'], s['requests_total_avg']),\n 'label': f'After {s[\"connections\"]} connections round.'}\n for s in suites[name]['stats']\n ])\n return chart\n\n\ndef connections_x_labels(chart, suites, skip=0):\n chart.x_labels = [f\"{s['connections']} conn's\" if s['connections'] else s['message']\n for s in next(iter(suites.values()))['stats']][skip:]\n chart.x_label_rotation = -30\n\n\ndef div_or_none(numerator, denominator, scale=1):\n if not denominator:\n return None\n return scale * numerator / denominator\n\n\nHTML_PREFIX = '''<!DOCTYPE html>\n<html lang=\"en\">\n <head>\n <meta charset=\"utf-8\">\n <title>Benchmark Report</title>\n </head>\n <body>\n'''\nHTML_SUFFIX = ''' </body>\n</html>\n'''\n\n\ndef render_html(md_file, html_file):\n with open(md_file) as in_fp, open(html_file, 'w') as out_fp:\n rs = in_fp.read()\n html = gfm(rs)\n # Replace <img> by <embed> for pygal interactivity, http://www.pygal.org/en/latest/documentation/web.html\n html = html.replace('<img', '<embed')\n # Replace link to md with link to .html for better browsability at HTML level.\n html = html.replace('/README.md\">full benchmark', '/README.html\">full benchmark')\n out_fp.write(HTML_PREFIX)\n out_fp.write(html)\n out_fp.write(HTML_SUFFIX)\n print(f'HTML output written to {html_file.resolve().as_uri()}.')\n\n\nif __name__ == '__main__':\n # args = docopt(__doc__)\n render()\n"
] | [
[
"numpy.max",
"numpy.min"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
threefoldtech/threebot_prebuilt | [
"1f0e1c65c14cef079cd80f73927d7c8318755c48"
] | [
"sandbox/lib/jumpscale/JumpscaleLibs/data/numtools/NumTools.py"
] | [
"from Jumpscale import j\nimport numpy\nimport struct\nimport math\n\nJSBASE = j.baseclasses.object\n\n\nclass NumTools(j.baseclasses.object):\n\n __jslocation__ = \"j.tools.numtools\"\n\n def __init__(self):\n JSBASE.__init__(self)\n self.__imports__ = \"numpy\"\n self._currencies = {}\n\n @property\n def currencies(self):\n currency = j.clients.currencylayer.get(\"numtools\")\n return currency.cur2usd\n\n def _getYearFromMonthId(self, monthid, startyear=0):\n \"\"\"\n @param monthid is an int representing a month over\n a period of time e.g. month 24, is the 24th month\n \"\"\"\n year = numpy.floor(float(monthid) / 12) + startyear\n return int(round(year))\n\n def getMonthsArrayForXYear(self, X, initvalue=None):\n \"\"\"\n return array which represents all months of X year, each value = None\n \"\"\"\n # create array for 36 months\n months = []\n for i in range(X * 12 + 1):\n months.append(initvalue)\n return months\n\n def getYearAndMonthFromMonthId(self, monthid, startyear=0):\n \"\"\"\n @param monthid is an int representing a month over\n a period of time e.g. month 24, is the 24th moth\n @return returns year e.g. 1999 and month in the year\n \"\"\"\n monthid = monthid - 1\n year = self._getYearFromMonthId(monthid)\n month = monthid - year * 12\n year += startyear\n return [year, month + 1]\n\n def roundDown(self, value, floatnr=0):\n value = value * (10 ** floatnr)\n return round(numpy.floor(value) / (10 ** floatnr), floatnr)\n\n def roundUp(self, value, floatnr=0):\n value = value * (10 ** floatnr)\n return round(numpy.ceil(value) / (10 ** floatnr), floatnr)\n\n def interpolateList(self, tointerpolate, left=0, right=None, floatnr=None):\n \"\"\"\n interpolates a list (array)\n if will fill in the missing information of an array\n each None value in array will be interpolated\n \"\"\"\n xp = []\n fp = []\n x = []\n isint = True\n allNone = True\n\n for x2 in tointerpolate:\n if x2 is not None:\n allNone = False\n if allNone:\n tointerpolate = [0.0 for item in tointerpolate]\n\n for xpos in range(len(tointerpolate)):\n if not tointerpolate[xpos] is None and not j.data.types.int.check(tointerpolate[xpos]):\n isint = False\n if tointerpolate[xpos] is None:\n x.append(xpos)\n if tointerpolate[xpos] is not None:\n xp.append(xpos)\n fp.append(tointerpolate[xpos])\n if len(x) > 0 and len(xp) > 0:\n result = numpy.interp(x, xp, fp, left, right)\n\n result2 = {}\n for t in range(len(result)):\n result2[x[t]] = result[t]\n result3 = []\n for xpos in range(len(tointerpolate)):\n if xpos in result2:\n result3.append(result2[xpos])\n else:\n result3.append(tointerpolate[xpos])\n if isint:\n result3 = [int(round(item, 0)) for item in result3]\n else:\n if floatnr is not None:\n result3 = [round(float(item), floatnr) for item in result3]\n else:\n result3 = [float(item) for item in result3]\n else:\n result3 = tointerpolate\n\n return result3\n\n def collapseDictOfArraysOfFloats(self, dictOfArrays):\n \"\"\"\n format input {key:[,,,]}\n \"\"\"\n result = []\n for x in range(len(dictOfArrays[keys()[0]])):\n result[x] = 0.0\n for key in list(dictOfArrays.keys()):\n result[x] += dictOfArrays[key][x]\n return result\n\n def collapseDictOfDictOfArraysOfFloats(self, data):\n \"\"\"\n format input {key:{key:[,,,]},key:{key:[,,,]},...}\n \"\"\"\n result = {}\n key1 = list(data.keys())[0] # first element key\n key2 = list(data[key1].keys())[0]\n nrX = len(data[key1][key2])\n\n for x in range(nrX):\n for key in list(data.keys()): # the keys we want to ignore (collapse)\n datasub = data[key]\n for keysub in list(datasub.keys()):\n if keysub not in result:\n result[keysub] = []\n for y in range(0, nrX):\n result[keysub].append(0.0)\n result[keysub][x] += datasub[keysub][x]\n return result\n\n def setMinValueInArray(self, array, minval):\n result = []\n for item in array:\n if item < minval:\n result.append(minval)\n else:\n result.append(item)\n return result\n\n def text2val(self, value, curcode=\"usd\"):\n \"\"\"\n value can be 10%,0.1,100,1m,1k m=million\n USD/EUR/CH/EGP/GBP are also understood\n all gets translated to usd\n e.g.: 10%\n e.g.: 10EUR or 10 EUR (spaces are stripped)\n e.g.: 0.1mEUR or 0.1m EUR or 100k EUR or 100000 EUR\n\n\n j.tools.numtools.text2val(\"0.1mEUR\")\n\n \"\"\"\n d = j.data.types.numeric.str2bytes(value)\n return j.data.types.numeric.bytes2cur(d, curcode=curcode)\n\n def int_to_bitstring(self, val):\n \"\"\"\n bitstring is like '10101011'\n \"\"\"\n if j.data.types.int.check(val):\n bits = \"{0:b}\".format(val)\n else:\n raise j.exceptions.Base(\"bits need to be an integer\")\n\n while (len(bits)) < 8:\n bits = \"0%s\" % bits\n\n return bits\n\n def bitstring8_to_int(self, val):\n if not j.data.types.string.check(val):\n raise j.exceptions.Base(\"bits need to be string\")\n if len(val) != 8:\n raise j.exceptions.Base(\"bitstring needs to be 8 char\")\n return int(val, 2)\n\n def bitstring_set_bit(self, bits, pos=7):\n \"\"\"\n bitstring is like '10101011'\n\n give bits as string of 8 chars or as int\n\n set a bit in the byte\n\n pos 7 means most left bit, 0 is most right bit (least value)\n\n \"\"\"\n\n bitsnew = self.int_to_bitstring(int(math.pow(2, pos)))\n\n if not j.data.types.string.check(bits):\n bits = self.int_to_bitstring(bits)\n\n bits = int(bits, 2) | int(bitsnew, 2)\n\n return bits\n\n def bitstring_get_bit(self, bits, pos=7):\n \"\"\"\n bitstring is like '10101011'\n\n give bits as string of 8 chars or as int\n\n get a bit in the byte\n\n pos 7 means most left bit, 0 is most right bit (least value)\n\n \"\"\"\n\n if not j.data.types.string.check(bits):\n bits = self.int_to_bitstring(bits)\n\n bitsnew = self.int_to_bitstring(int(math.pow(2, pos)))\n\n res = int(bits, 2) & int(bitsnew, 2)\n\n return res > 0\n\n def listint_to_bin(self, llist, meta=\"00000000\"):\n \"\"\"\n convert list of integers to binary\n\n @PARM meta are the bits of a byte, the first one is reserved\n for short or long format, 1 if long format\n\n \"\"\"\n shortFormat = True\n for item in llist:\n if item > 65535:\n shortFormat = False\n meta = self.bitstring_set_bit(meta, 7)\n break\n if shortFormat:\n meta = self.bitstring8_to_int(meta)\n\n bindata = b\"\"\n if shortFormat:\n sformat = \"<H\"\n else:\n sformat = \"<I\"\n for item in llist:\n bindata += struct.pack(sformat, item)\n return struct.pack(\"<B\", meta) + bindata\n\n def bin_to_listint(self, bindata):\n \"\"\"\n for each 4 bytes convert to int\n \"\"\"\n res = []\n if self.bitstring_get_bit(self.int_to_bitstring(bindata[0]), 7):\n # longformat\n sformat = \"<I\"\n else:\n sformat = \"<H\"\n\n bindata = bindata[1:]\n\n for item in struct.iter_unpack(sformat, bindata):\n res.append(item[0])\n return res\n\n def test(self):\n \"\"\"\n kosmos 'j.tools.numtools.test()'\n \"\"\"\n assert self.text2val(\"10k\") == 10000.0\n\n assert (1 / self.currencies[\"egp\"]) * 10000000 == self.text2val(\"10 m egp\")\n assert (1 / self.currencies[\"egp\"]) * 10000000 == self.text2val(\"10m egp\")\n assert (1 / self.currencies[\"egp\"]) * 10000000 == self.text2val(\"10mEGP\")\n\n assert self.int_to_bitstring(10) == \"00001010\"\n assert self.bitstring8_to_int(\"00001010\") == 10\n\n assert self.bitstring_set_bit(\"00000000\", 7) == 128\n assert self.bitstring_set_bit(\"00000000\", 0) == 1\n\n assert self.bitstring_get_bit(\"00000000\", 0) == False\n assert self.bitstring_get_bit(128, 7)\n assert self.bitstring_get_bit(\"00000001\", 0)\n assert self.bitstring_get_bit(\"00000011\", 1)\n assert self.bitstring_get_bit(\"00000011\", 2) == False\n assert self.bitstring_get_bit(\"10000011\", 7)\n assert self.bitstring_get_bit(\"00000011\", 7) == False\n\n llist0 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n bbin = self.listint_to_bin(llist0)\n llist = self.bin_to_listint(bbin)\n assert llist == llist0\n assert len(bbin) == 21\n\n # now the binary struct will be double as big because there is 1 long\n # int in (above 65000)\n llist2 = [1, 2, 3, 400000, 5, 6, 7, 8, 9, 10]\n bbin2 = self.listint_to_bin(llist2)\n assert len(bbin2) == 41\n llist3 = self.bin_to_listint(bbin2)\n assert llist3 == llist2\n\n # max value in short format\n llist2 = [1, 2, 3, 65535, 5, 6, 7, 8, 9, 10]\n bbin2 = self.listint_to_bin(llist2)\n assert len(bbin2) == 21\n llist3 = self.bin_to_listint(bbin2)\n assert llist3 == llist2\n"
] | [
[
"numpy.ceil",
"numpy.interp",
"numpy.floor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
blakezim/CAMP | [
"a42a407dc62151ab8a7eb4be3aee1318b984502c"
] | [
"camp/StructuredGridOperators/UnaryOperators/AffineTransformFilter.py"
] | [
"import torch\n\nfrom ...Core.StructuredGridClass import StructuredGrid\nfrom .ApplyGridFilter import ApplyGrid\nfrom ._UnaryFilter import Filter\n\n#TODO Check this filter to make sure the affine and translation are correct\n\n\nclass AffineTransform(Filter):\n def __init__(self, target_landmarks=None, source_landmarks=None, affine=None, rigid=False,\n interp_mode='bilinear', device='cpu', dtype=torch.float32):\n super(AffineTransform, self).__init__()\n\n self.device = device\n self.dtype = dtype\n self.rigid = rigid\n self.interp_mode = interp_mode\n\n if target_landmarks is not None and source_landmarks is not None:\n if target_landmarks.shape != source_landmarks.shape:\n raise RuntimeError(\n 'Shape of target and source landmarks do not match: '\n f' Target Shape: {target_landmarks.shape}, Source Shape: {source_landmarks.shape}'\n )\n self.source_landmarks = source_landmarks\n self.target_landmarks = target_landmarks\n self.dim = len(self.source_landmarks[0])\n\n else:\n self.dim = len(affine) - 1\n self.affine = affine\n\n @staticmethod\n def Create(target_landmarks=None, source_landmarks=None, affine=None, rigid=False,\n interp_mode='bilinear', device='cpu', dtype=torch.float32):\n \"\"\"\n Returns an Affine Transform Filter that can be applied to type :class:`~Core.StructuredGrid`. This can be\n initiated using a pair of landmarks (target and source) or with a pre-defined affine transformation (affine).\n Either both target and source landmarks must be provided OR a pre-defined affine.\n\n :param target_landmarks: Target or unmoving landmarks selected in the target space. This tensor should be of\n size Nxdim where N is the number of landmarks and dim is the dimensionality of the\n :class:`~Core.StructuredGrid` the affine will be applied to.\n :type target_landmarks: tensor, optional\n :param source_landmarks: Source or moving landmarks selected in the source space. This tensor should be of\n size Nxdim where N is the number of landmarks and dim is the dimensionality of the\n :class:`~Core.StructuredGrid` the affine will be applied to.\n :type source_landmarks: tensor, optional\n :param affine: Pre-defined affine. This should be of shape (dim + 1)x(dim + 1) where the added dimension\n stores the translation.\n :type affine: tensor, optional\n :param rigid: If the affine should be reduced to rigid transform only. Default is False.\n :type rigid: bool\n :param interp_mode: Resampling interpolation mode to be used when applying the defromation - one of 'bilinear'\n or 'nearest'. Default: 'bilinear'\n :type interp_mode: str\n :param device: Memory location for the created filter - one of 'cpu', 'cuda', or 'cuda:X' where X\n specifies the device identifier. Default: 'cpu'\n :type device: str\n :param dtype: Data type for the filter attributes. Specified from torch memory types. Default:\n 'torch.float32'\n :type dtype: str\n\n .. note:: When mode='bilinear' and the input is 5-D, the interpolation mode used internally will actually be\n trilinear. However, when the input is 4-D, the interpolation mode will legitimately be bilinear.\n\n :return: Affine transform filter object with the specified parameters.\n \"\"\"\n aff = AffineTransform(target_landmarks, source_landmarks, affine, rigid, interp_mode, device, dtype)\n aff = aff.to(device)\n aff = aff.type(dtype)\n\n if affine is not None:\n aff.affine = affine\n else:\n aff._solve_affine()\n\n # Can't add StructuredGrid to the register buffer, so we need to make sure they are on the right device\n for attr, val in aff.__dict__.items():\n if type(val).__name__ == 'StructuredGrid':\n val.to_(device)\n val.to_type_(dtype)\n else:\n pass\n\n return aff\n\n def _solve_affine(self):\n\n source_landmarks_centered = self.source_landmarks - self.source_landmarks.mean(0)\n target_landmarks_centered = self.target_landmarks - self.target_landmarks.mean(0)\n\n # Solve for the transform between the points\n self.affine = torch.matmul(\n torch.matmul(\n target_landmarks_centered.t(), source_landmarks_centered\n ),\n torch.matmul(\n source_landmarks_centered.t(), source_landmarks_centered\n ).inverse()\n )\n\n if self.rigid:\n u, _, vt = torch.svd(self.affine)\n self.affine = torch.matmul(u, vt.t())\n\n # Solve for the translation\n self.translation = self.target_landmarks.mean(0) - torch.matmul(self.affine,\n self.source_landmarks.mean(0).t()).t()\n\n def forward(self, x, out_grid=None, xyz_affine=False):\n \"\"\"\n Resamples the :class:`Core.StructuredGrid` through the affine attribute onto the same grid or the out_grid if\n out_grid is provided.\n\n :param x: :class:`StructuredGrid` to be transformed by the affine attribute.\n :type x: :class:`Core.StructuredGrid`\n :param out_grid: An optional additional grid that specifies the output grid. If not specified, the output grid\n will be the same as the input grid (x).\n :type out_grid: :class:`Core.StructuredGrid`, optional\n :param xyz_affine: Is affine xyz ordered instead of zyx?\n :type xyz_affine: bool, optional\n :return: Affine transformed :class:`StructredGrid`\n \"\"\"\n\n # Create the grid\n if out_grid is not None:\n aff_grid = StructuredGrid.FromGrid(out_grid, channels=self.dim)\n else:\n aff_grid = StructuredGrid.FromGrid(x, channels=self.dim)\n aff_grid.set_to_identity_lut_()\n\n # Want to bring the grid the other direction\n affine = torch.eye(self.dim + 1, device=self.device, dtype=self.dtype)\n\n if 'target_landmarks' in self.__dict__:\n affine[0:self.dim, 0:self.dim] = self.affine\n affine[0:self.dim, self.dim] = self.translation\n else:\n affine = self.affine.clone()\n\n affine = affine.inverse()\n a = affine[0:self.dim, 0:self.dim]\n t = affine[-0:self.dim, self.dim]\n\n if xyz_affine:\n aff_grid.data = aff_grid.data.flip(0)\n\n aff_grid.data = torch.matmul(a, aff_grid.data.permute(list(range(1, self.dim + 1)) + [0]).unsqueeze(-1))\n aff_grid.data = (aff_grid.data.squeeze() + t).permute([self.dim] + list(range(0, self.dim)))\n\n if xyz_affine:\n aff_grid.data = aff_grid.data.flip(0)\n\n x_tf = ApplyGrid.Create(aff_grid, device=aff_grid.device,\n dtype=aff_grid.dtype, interp_mode=self.interp_mode)(x, out_grid=out_grid)\n return x_tf\n"
] | [
[
"torch.svd",
"torch.eye"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
drscotthawley/room-shape | [
"d2b27786bfe7639d4aff59712500b678472cf102"
] | [
"modes_from_dims.py"
] | [
"#! /usr/bin/env python\n# Author: Scott Hawley\n\n# Can the system learn the Rayleigh equation?\n# Given the dimensions of the room, can the network learn to generate a (sorted) list of resonant frequencies?\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Dense, Activation, Dropout\nfrom keras.callbacks import ProgbarLogger, ModelCheckpoint, ReduceLROnPlateau, Callback, EarlyStopping\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.advanced_activations import ELU, PReLU\nfrom os.path import isfile\nimport random\n\n\nmydtype = np.float32\n\n\ndef gen_data(N,subset_size=20, invert_dims=False):\n # Inputs:\n # N = number of \"rooms\" to generate\n # subset_size = how many frequencies for each room to use\n max_nx = 5 # highest harmonic number to use\n vs = 1130 # speed of sound in ft/s\n minsize, maxsize = 5, 50 # size range of rooms to generate\n subset_size = (max_nx)**3 # grab this many modes, currently we take all of them\n X = np.zeros([N,subset_size],dtype=mydtype) # array to hold mode freqs as inputs to NN\n Y = np.zeros([N,3],dtype=mydtype) # target room dimensions\n\n indexes = np.array(list(range( (max_nx+1)**3)), dtype=mydtype)\n nx = np.floor( indexes / ((max_nx+1)**2)) # [0,0,0,0,1,1,1,1]\n ny = np.floor(indexes / (max_nx+1)) % (max_nx+1) # [0,0,1,1,0,0,1,1]\n nz = indexes % (max_nx+1) # [0,1,0,1,0,1,0,1]\n\n for i in range(N): # create (partial) list of mode frequencies for N rooms\n dims = np.random.uniform(low=minsize,high=maxsize,size=3)\n dims.sort() # just to try to avoid redunancy/confusion in training\n \n ''' The following is more legible, but slower:\n #freqs = []\n #for nx in range(max_nx+1):\n # for ny in range(max_nx+1):\n # for nz in range(max_nx+1): \n # if (nx+ny+nz > 0): # zero frequency is too easy ;-)\n # f = vs/2*np.sqrt( (nx/dims[0])**2 + (ny/dims[1])**2 + (nz/dims[2])**2)\n # freqs.append(f)\n '''\n # The following is faster that the above:\n freqs = vs/2*np.sqrt( (nx/dims[0])**2 + (ny/dims[1])**2 + (nz/dims[2])**2)\n\n\n rand_sample = [ freqs[i] for i in sorted(random.sample(range(len(freqs)), subset_size)) ]\n #rand_sample.sort() # doesn't matter / doesn't really help # maybe this is cheating, but let's help the network\n rand_sample = np.array(rand_sample,dtype=mydtype)\n #rand_sample = vs/(rand_sample+1e-6)\n\n X[i,:] = rand_sample\n\n #X[i,-1] = dims[0]*dims[1]*dims[2] # Make it easier: give it the room volume too\n\n if (invert_dims):\n Y[i,:] = 1/np.array(dims,dtype=mydtype) # give it 1/lengths\n else:\n Y[i,:] = np.array(dims,dtype=mydtype) \n\n return X, Y\n \n\ndef make_model(X, Y, n_hidden, weights_file, n_layers=7, dropout_fac=0.2):\n\n if ( isfile(weights_file) ):\n print ('Weights file detected. Loading from ',weights_file)\n model = load_model(weights_file)\n else:\n print('No weights file detected, so starting from scratch.')\n\n # create and fit Multilayer Perceptron model\n model = Sequential()\n model.add(Dense(n_hidden, input_shape=(X.shape[1],), kernel_initializer=\"he_uniform\"))\n model.add(BatchNormalization(axis=1))\n model.add(ELU(alpha=1.0)) \n model.add(Dropout(dropout_fac))\n\n for l in range(n_layers-1):\n model.add(Dense( int(n_hidden/(2**(l))) ))\n model.add(BatchNormalization(axis=1))\n model.add(ELU(alpha=1.0)) \n model.add(Dropout(dropout_fac))\n\n model.add(Dense(Y.shape[1]))\n model.compile(loss='mse', optimizer='nadam') #, metrics=['accuracy'])\n model.summary()\n return model\n\ndef calc_mse(Y_test,Y_pred):\n return ((Y_test - Y_pred)**2).mean()\n\n\ndef test_predict(model,X_test,Y_test,invert_dims=False):\n GREEN = \"\\033[0;32m\"\n RESET = \"\\033[0;0m\"\n\n print(\"\\n Predicting.... Sample results: (invert_dims =\",invert_dims,\")\")\n\n Y_pred = model.predict(X_test)\n\n if (invert_dims): # invert back\n test_data = 1/Y_test\n pred_data = 1/Y_pred\n else:\n test_data = Y_test\n pred_data = Y_pred\n\n \n for i in range(5):# Y_pred.shape[0]):\n print(\" test_data[\",i,\"] = \",test_data[i],\", pred_data[\",i,\"] = \",pred_data[i],sep=\"\")\n #print(GREEN,\" 1/test_data[\",i,\"] = \",1/test_data[i],\", 1/pred_data[\",i,\"] = \",\n # 1/pred_data[i],RESET,sep=\"\")\n #score = model.evaluate(X_test, Y_test, verbose=False) \n score = calc_mse(test_data,pred_data)\n\n print('Overall test score: mse loss: ',score) #Loss on test\n #print('Test accuracy: ', score[1])\n print(\"\")\n return\n\n\n\ndef main():\n np.random.seed(2)\n\n # parameters for 'size' of run\n n_hidden = 200\n n_layers=7\n dropout_fac = 0\n batch_size = 100\n n_train = 300000 \n n_val = 20000\n n_test =10000\n grab_modes = 100 # take a subsample of this many modes from list of frequencies\n invert_dims = False # 22.1958 with MAPE and True, 13.1486 False\n\n print(\"Generating Data...\")\n print(\" ...Testing\")\n X_test, Y_test = gen_data(n_test,subset_size=grab_modes, invert_dims=invert_dims)\n print(\" ...Validation\")\n X_val, Y_val = gen_data(n_val,subset_size=grab_modes, invert_dims=invert_dims)\n print(\" ...Training\")\n X_train, Y_train = gen_data(n_train,subset_size=grab_modes, invert_dims=invert_dims)\n\n weights_file = \"weights.hdf5\"\n model = make_model(X_train, Y_train, n_hidden, weights_file, n_layers=n_layers, dropout_fac=dropout_fac)\n\n # callbacks\n checkpoint = ModelCheckpoint(filepath=weights_file, verbose=1, save_best_only=True)\n reduce_lr = ReduceLROnPlateau(monitor='val_loss', verbose=1, factor=0.2, patience=4, min_lr=0.0001)\n earlystop = EarlyStopping(patience=6)\n class testing_callback(Callback): \n def on_epoch_end(self, epoch, logs={}):\n test_predict(model,X_test,Y_test,invert_dims=invert_dims)\n return\n testing_cb = testing_callback()\n\n # Training Loop\n n_epochs= 1000\n model.fit(X_train, Y_train, epochs=n_epochs, batch_size=batch_size, verbose=1, validation_data=(X_val, Y_val), \n callbacks =[checkpoint, earlystop, testing_cb, reduce_lr, ProgbarLogger()])\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.sqrt",
"numpy.random.seed",
"numpy.floor",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
takanori-fujiwara/multidr | [
"7df887ede573af4aaad24fea19dca456d7a52ff1"
] | [
"multidr/cl.py"
] | [
"import numpy as np\nfrom scipy.stats import pearsonr\n\nfrom ccpca import CCPCA\n\n\nclass CL():\n \"\"\"TDR: Two-step dimensionality reduction (DR) to project a third-order\n tensor onto a lower-dimensional space\n\n Parameters\n ----------\n learner: Class Object for DR, optional, (default=None)\n Contrastive representation learning class object. Any class\n object that (1) has fit as a class method, (2) can take two matrices as\n the first parameters of fit, and (3) has get_feat_contribs as a class\n method (e.g., ccPCA, https://github.com/takanori-fujiwara/ccpca).\n If None, ccPCA is set as a learner.\n Attributes\n ----------\n learner: the same with the parameter above.\n fcs: array-like, shape(n_features, 1)\n Feature contributions.\n ----------\n Examples\n --------\n >>> import numpy as np\n >>> import matplotlib.pyplot as plt\n >>> from sklearn.decomposition import PCA\n >>> from sklearn.cluster import SpectralClustering\n >>> from umap import UMAP\n\n >>> from multidr.tdr import TDR\n >>> from multidr.cl import CL\n\n >>> X = np.load('./data/air_quality/tensor.npy')\n >>> tdr = TDR(first_learner=PCA(n_components=1),\n ... second_learner=UMAP(n_components=2,\n ... n_neighbors=7,\n ... min_dist=0.15))\n\n >>> results = tdr.fit_transform(X,\n ... first_scaling=True,\n ... second_scaling=False,\n ... verbose=True)\n\n >>> clustering = SpectralClustering(n_clusters=3,\n ... assign_labels=\"discretize\",\n ... random_state=0).fit(results['Z_n_dt'])\n\n >>> plt.figure(figsize=(6, 6))\n >>> plt.scatter(results['Z_n_dt'][:, 0],\n ... results['Z_n_dt'][:, 1],\n ... s=5,\n ... c=clustering.labels_)\n >>> plt.title('Z_n_dt with spectral clustering')\n >>> plt.show()\n\n >>> Y_nt = tdr.Y_tn.transpose()\n\n >>> cl = CL()\n\n >>> plt.figure(figsize=(8, 4))\n\n >>> for cluster_id in np.unique(clustering.labels_):\n ... cluster = Y_nt[clustering.labels_ == cluster_id, :]\n ... others = Y_nt[clustering.labels_ != cluster_id, :]\n ... cl.fit(cluster, others, var_thres_ratio=0.5, max_log_alpha=2)\n ... plt.plot(cl.fcs, c=plt.get_cmap('Accent')(cluster_id))\n\n >>> plt.xlabel('time')\n >>> plt.ylabel('Feature contribution (without scaling)')\n >>> plt.title('Feature cotributions')\n >>> plt.show()\n \"\"\"\n def __init__(self, learner=None):\n self.learner = None\n self.fcs = None\n self.set_learner(learner)\n\n def fit(self, K, R, **contrast_learner_kwargs):\n \"\"\"If using auto alpha selection, find the best contrast parameter alpha\n first. Otherwise, input alpha value is used for fit. Then, fit using\n cPCA with the (best) alpha. For cPCA, a matrix E concatenating K and R\n will be used as a foreground dataset and R will be used as a background\n dataset.\n\n Parameters\n ----------\n K: array-like, shape(n_samples1, n_components)\n A target cluster.\n R: array of array-like, shape(n_samples2, n_components)\n Background datasets.\n contrast_learner_kwargs: additional keywards for input parameters.\n e.g., for ccPCA, var_thres_ratio=0.5, max_log_alpha=2\n Returns\n -------\n self.\n \"\"\"\n self.learner.fit(K, R, **contrast_learner_kwargs)\n self.fcs = self.learner.get_feat_contribs()\n\n X = np.vstack((K, R))\n selected = np.array([1] * K.shape[0] + [0] * R.shape[0])\n\n ## adjust fcs direcrtion\n\n # check pearson corr between \"selected or not\" and \"feature values\"\n # if selected rows tend to have higher values corr becomes positive\n corr_selected_fval = np.array(\n [pearsonr(selected, X[:, col])[0] for col in range(X.shape[1])])\n\n # compute score of agreement of correlation and fcs directions\n # (more correlated or higher absolute value of fcs will have heavier weights)\n agreement_score = np.sum(corr_selected_fval * self.fcs)\n\n if agreement_score < 0:\n self.fcs = -self.fcs\n\n return self\n\n def set_learner(self, learner):\n \"\"\"Set a contrastive representation learning method.\n\n Parameters\n ----------\n learner: Class object for contrastive learning.\n Contrastive representation learning class object. Any class\n object that (1) has fit as a class method, (2) can take two matrices as\n the first parameters of fit, and (3) has get_feat_contribs as a class\n method (e.g., ccPCA, https://github.com/takanori-fujiwara/ccpca).\n If None, ccPCA is set as a learner.\n Returns\n -------\n self.\n \"\"\"\n if learner is None:\n self.learner = CCPCA()\n else:\n self.learner = learner\n"
] | [
[
"scipy.stats.pearsonr",
"numpy.array",
"numpy.sum",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
PhilippBongartz/ChessTransformer | [
"41b819cd97fb78205cf6fb09f005e816d2a251f7"
] | [
"chess_transformer_utils.py"
] | [
"#!/usr/bin/python\n# -*- coding: latin-1 -*-\n\nimport chess\n\nimport chess.pgn\n\nimport numpy as np\n\nimport random\n\n\n\n\n\n# all possible pairs of starting square and target square\n\ncolumn_numbers = {\n 'a':1,\n 'b':2,\n 'c':3,\n 'd':4,\n 'e':5,\n 'f':6,\n 'g':7,\n 'h':8\n}\n\ncolumns = 'abcdefgh'\nrows = '12345678'\nsquares = []\nfor c in columns:\n for r in rows:\n squares.append(''.join([c,r]))\n \nsquare_pairs = []\nfor sq1 in squares:\n for sq2 in squares:\n square_pairs.append(''.join([sq1,sq2]))\n\nindex2moves = [] # ['0-0','0-0-0'] Rochade ist e1g1 etc.\nfor pair in square_pairs:\n if pair[0:2]!=pair[2:]: \n if pair[0] == pair[2]: # gerade hoch\n index2moves.append(pair)\n if pair[1] == pair[3]: # gerade seitwärts\n index2moves.append(pair)\n if abs(int(pair[1])-int(pair[3])) == abs(column_numbers[pair[0]]-column_numbers[pair[2]]): #diagonal\n index2moves.append(pair)\n if abs(int(pair[1])-int(pair[3])) == 1 and abs(column_numbers[pair[0]]-column_numbers[pair[2]])==2: # Springer\n index2moves.append(pair)\n if abs(int(pair[1])-int(pair[3])) == 2 and abs(column_numbers[pair[0]]-column_numbers[pair[2]])==1: # Springer\n index2moves.append(pair)\n \n if (pair[3]=='8' and pair[1]=='7') or (pair[3]=='1' and pair[1]=='2'): # Umwandlungen\n if abs(column_numbers[pair[0]]-column_numbers[pair[2]])<=1:\n index2moves.append(pair+'q')\n index2moves.append(pair+'n')\n index2moves.append(pair+'b')\n index2moves.append(pair+'r')\n\nindex2moves = list(set(index2moves))\nindex2moves = sorted(index2moves)\n\nmove2label = {}\nfor i,move in enumerate(index2moves + ['0000']): # + Nullmove\n move2label[move] = i\n\n# Hier die pure-transformer 64*64 output version: In der Reihenfolge der Token, d.h. auch der FEN.\n# output[startsquareindex][zielsquareindex] \npure_square = {}\nfor row in range(8,0,-1):\n for column in 'abcdefgh':\n pure_square[column+str(row)] = len(pure_square)\n\n\n# Tokenization of a chess position\n\ntoken2piece = '0KkQqBbNnRrPp'\n\nfen2token_dict = {\n 'K':np.array([1]),\n 'k':np.array([2]),\n 'Q':np.array([3]),\n 'q':np.array([4]),\n 'B':np.array([5]),\n 'b':np.array([6]),\n 'N':np.array([7]),\n 'n':np.array([8]),\n 'R':np.array([9]),\n 'r':np.array([10]),\n 'P':np.array([11]),\n 'p':np.array([12]),\n '1':np.array([0]),\n '2':np.array([0,0]),\n '3':np.array([0,0,0]),\n '4':np.array([0,0,0,0]),\n '5':np.array([0,0,0,0,0]),\n '6':np.array([0,0,0,0,0,0]),\n '7':np.array([0,0,0,0,0,0,0]),\n '8':np.array([0,0,0,0,0,0,0,0]),\n '/':np.array([]),\n}\n\ncastling2token = {\n 'KQkq':np.array([13,14,15,16]),\n 'Qkq':np.array([0,14,15,16]),\n 'Kkq':np.array([13,0,15,16]),\n 'KQq':np.array([13,14,0,16]),\n 'KQk':np.array([13,14,15,0]),\n 'KQ':np.array([13,14,0,0]),\n 'Kk':np.array([14,0,15,0]),\n 'Kq':np.array([13,0,0,16]),\n 'Qk':np.array([0,14,15,0]),\n 'Qq':np.array([0,14,0,16]),\n 'kq':np.array([0,0,15,16]),\n 'K':np.array([13,0,0,0]),\n 'Q':np.array([0,14,0,0]),\n 'k':np.array([0,0,15,0]),\n 'q':np.array([0,0,0,16]),\n '-':np.array([0,0,0,0]),\n}\n\ncolumn2token = {\n 'a':np.array([17]),\n 'b':np.array([18]),\n 'c':np.array([19]),\n 'd':np.array([20]),\n 'e':np.array([21]),\n 'f':np.array([22]),\n 'g':np.array([23]),\n 'h':np.array([24]),\n '-':np.array([0])\n}\n\ncolor2token = {\n 'w':np.array([25]),\n 'b':np.array([26])\n}\n\ndef elo2token(elo):\n if elo == -1: # no elo\n token = 27\n elif elo < 1500:\n token = 28\n elif elo>=2700:\n token = 39\n else:\n token = 28 + (elo-1500)//100\n return np.array([token])\n\n\n \ndef fen2token(fen, white_elo = -1, black_elo = -1, move_list = [], elo = True):\n token_listen = []\n liste1 = fen.split()\n\n for l in liste1[0]:\n #print(l,fen2token_dict[l])\n token_listen.append(fen2token_dict[l])\n \n token_listen.append(color2token[liste1[1]]) # Wer am Zug ist\n \n token_listen.append(castling2token[liste1[2]]) # Rochaderechte\n \n token_listen.append(column2token[liste1[3][0]]) # en passant column\n\n if elo:\n token_listen.append(elo2token(white_elo))\n token_listen.append(elo2token(black_elo))\n \n if move_list:\n for move in move_list:\n token_listen.append(np.array([move2label[move]+40]))\n\n # Außerdem gibt es noch Halbzüge seit Bauernzug/Schlagzug und Zug der Partie. Die lassen wir weg.\n \n tokens = np.concatenate(token_listen)\n tokens = tokens.reshape((1,-1))\n return tokens\n\nfen2vector_dict = {\n 'K':np.array([1,0,0,0,0,0,0,0,0,0,0,0]),\n 'k':np.array([0,1,0,0,0,0,0,0,0,0,0,0]),\n 'Q':np.array([0,0,1,0,0,0,0,0,0,0,0,0]),\n 'q':np.array([0,0,0,1,0,0,0,0,0,0,0,0]),\n 'B':np.array([0,0,0,0,1,0,0,0,0,0,0,0]),\n 'b':np.array([0,0,0,0,0,1,0,0,0,0,0,0]),\n 'N':np.array([0,0,0,0,0,0,1,0,0,0,0,0]),\n 'n':np.array([0,0,0,0,0,0,0,1,0,0,0,0]),\n 'R':np.array([0,0,0,0,0,0,0,0,1,0,0,0]),\n 'r':np.array([0,0,0,0,0,0,0,0,0,1,0,0]),\n 'P':np.array([0,0,0,0,0,0,0,0,0,0,1,0]),\n 'p':np.array([0,0,0,0,0,0,0,0,0,0,0,1]),\n '1':np.array([0,0,0,0,0,0,0,0,0,0,0,0]*1),\n '2':np.array([0,0,0,0,0,0,0,0,0,0,0,0]*2),\n '3':np.array([0,0,0,0,0,0,0,0,0,0,0,0]*3),\n '4':np.array([0,0,0,0,0,0,0,0,0,0,0,0]*4),\n '5':np.array([0,0,0,0,0,0,0,0,0,0,0,0]*5),\n '6':np.array([0,0,0,0,0,0,0,0,0,0,0,0]*6),\n '7':np.array([0,0,0,0,0,0,0,0,0,0,0,0]*7),\n '8':np.array([0,0,0,0,0,0,0,0,0,0,0,0]*8),\n '/':np.array([]),\n}\n\ncastling2vector = {\n 'KQkq':np.array([1,1,1,1]),\n 'Qkq':np.array([0,1,1,1]),\n 'Kkq':np.array([1,0,1,1]),\n 'KQq':np.array([1,1,0,1]),\n 'KQk':np.array([1,1,1,0]),\n 'KQ':np.array([1,1,0,0]),\n 'Kk':np.array([1,0,1,0]),\n 'Kq':np.array([1,0,0,1]),\n 'Qk':np.array([0,1,1,0]),\n 'Qq':np.array([0,1,0,1]),\n 'kq':np.array([0,0,1,1]),\n 'K':np.array([1,0,0,0]),\n 'Q':np.array([0,1,0,0]),\n 'k':np.array([0,0,1,0]),\n 'q':np.array([0,0,0,1]),\n '-':np.array([0,0,0,0]),\n}\n\ncolumn2vector = {\n 'a':np.array([1,0,0,0,0,0,0,0]),\n 'b':np.array([0,1,0,0,0,0,0,0]),\n 'c':np.array([0,0,1,0,0,0,0,0]),\n 'd':np.array([0,0,0,1,0,0,0,0]),\n 'e':np.array([0,0,0,0,1,0,0,0]),\n 'f':np.array([0,0,0,0,0,1,0,0]),\n 'g':np.array([0,0,0,0,0,0,1,0]),\n 'h':np.array([0,0,0,0,0,0,0,1]),\n '-':np.array([0,0,0,0,0,0,0,0])\n}\n\ncolor2vector = {\n 'w':np.array([1,0]),\n 'b':np.array([0,1])\n}\n\ndef fen2vector(fen):\n vector_listen = []\n liste1 = fen.split()\n\n for l in liste1[0]:\n #print(l,fen2token_dict[l])\n vector_listen.append(fen2vector_dict[l])\n \n vector_listen.append(color2vector[liste1[1]]) # Wer am Zug ist\n \n vector_listen.append(castling2vector[liste1[2]]) # Rochaderechte\n \n vector_listen.append(column2vector[liste1[3][0]]) # en passant column\n \n # Außerdem gibt es noch Halbzüge seit Bauernzug/Schlagzug und Zug der Partie. Die lassen wir weg.\n \n vector = np.concatenate(vector_listen)\n vector = vector.reshape((1,-1)) # batch of one\n return vector\n\n\n\n\n\n\n# OLD VERSION:\n# validation_steps wird als Parameter an fit übergeben == total_validation_samples / batchsize \ndef validationdata(path, batchsize, bis_game_number, stepnumber=60000, target = 'legalmove', aws = False):\n \"\"\"\n targets können 'legalmove' sein, d.h. startfeld-zielfeld paare die tatsächlich auftreten können.\n 'outcome', d.h. sieg, niederlage, remis\n 'squarepairs', d.h. alle combinationen von start und zielfeld TODO\n 'startingsquare', d.h. nur das startfeld TODO\n \"\"\" \n #if aws: \n # from smart_open import open\n\n outcome_dict = {\n '1-0':np.array(0),\n '0-1':np.array(1),\n '1/2-1/2':np.array(2),\n }\n\n with open(path,encoding='latin-1') as database:\n print(chess.__version__,(chess.__version__ == '0.23.11'))\n current_game = ''\n batch = []\n labels = []\n count = 0\n step_count = 0\n while current_game != None:\n if count >= bis_game_number: # eternal loop\n database.seek(0)\n count = 0\n step_count = 0\n \n if step_count >= stepnumber: # eternal loop\n database.seek(0)\n count=0\n step_count = 0\n\n current_game = chess.pgn.read_game(database)\n board = current_game.board()\n count+=1\n\n if chess.__version__ == '0.23.11':\n current_game_moves = [move for move in current_game.main_line()]\n else:\n current_game_moves = [move for move in current_game.mainline_moves()]\n\n use_game = True\n if target == 'outcome':\n headers = current_game.headers\n if 'Result' in headers:\n if headers['Result'] in ['1/2-1/2','0-1','1-0']:\n outcome = outcome_dict[headers['Result']]\n else:\n use_game = False\n else:\n use_game = False\n\n if current_game_moves and '0000' not in current_game_moves and use_game:\n for move in current_game_moves:\n #print(move)\n fen = board.fen()\n tokens = fen2token(fen)\n batch.append(tokens)\n if target == 'legalmove':\n labels.append(move2label[move.uci()])\n elif target == 'outcome':\n labels.append(outcome)\n elif target == 'squarepairs':\n movestring = move.uci()\n startfeld_index = pure_square[movestring[:2]]\n zielfeld_index = pure_square[movestring[2:4]]\n label = np.zeros(64*64)\n label = label.reshape((64,64))\n label[startfeld_index,zielfeld_index] = 1.0\n label = label.reshape((64*64)) \n labels.append(label)\n\n board.push(move)\n step_count += 1\n \n if len(batch)==batchsize:\n batch_tensor = np.concatenate(batch)\n yield batch_tensor, np.array(labels)\n batch = []\n labels = []\n\n\n\n# OLD VERSION:\ndef trainingsdata(path, fraction, batchsize, from_game_number = 0, bis_game_number = 100000000, target = 'legalmove', aws = False):\n \"\"\"\n targets können 'legalmove' sein, d.h. startfeld-zielfeld paare die tatsächlich auftreten können.\n 'outcome', d.h. sieg, niederlage, remis\n 'squarepairs', d.h. alle combinationen von start und zielfeld TODO\n 'startingsquare', d.h. nur das startfeld TODO\n \"\"\"\n\n outcome_dict = {\n '1-0':np.array(0),\n '0-1':np.array(1),\n '1/2-1/2':np.array(2),\n }\n\n #if aws: \n # from smart_open import open\n\n with open(path,encoding='latin-1') as database:\n current_game = ''\n batch = []\n labels = []\n count = 0\n while True: \n # erstmal das erste game suchen, ohne parsen:\n #header = chess.pgn.read_headers(database)\n\n ## Skimming geht nicht auf der Colab python chess-version\n #while count < from_game_number:\n # count+=1\n # header = chess.pgn.read_headers(database)\n \n while current_game != None:\n #if count%1000 == 0:\n # print(count)\n \n current_game = chess.pgn.read_game(database)\n board = current_game.board()\n count+=1\n \n if from_game_number < count < bis_game_number:\n if chess.__version__ == '0.23.11':\n current_game_moves = [move for move in current_game.main_line()]\n else:\n current_game_moves = [move for move in current_game.mainline_moves()]\n current_game_movestrings = [move.uci() for move in current_game_moves]\n\n use_game = True\n if target == 'outcome':\n headers = current_game.headers\n if 'Result' in headers:\n if headers['Result'] in ['1/2-1/2','0-1','1-0']:\n outcome = outcome_dict[headers['Result']]\n else:\n use_game = False\n else:\n use_game = False\n\n\n if current_game_moves and '0000' not in current_game_movestrings and use_game:\n for move in current_game_moves:\n #print(move)\n rant = random.random()\n if rant < fraction:\n #print(\"Game no:\",count,rant)\n #print(board.fen())\n fen = board.fen()\n tokens = fen2token(fen)\n batch.append(tokens)\n if target == 'legalmove':\n labels.append(move2label[move.uci()])\n elif target == 'outcome':\n labels.append(outcome)\n elif target == 'squarepairs':\n movestring = move.uci()\n startfeld_index = pure_square[movestring[:2]]\n zielfeld_index = pure_square[movestring[2:4]]\n label = np.zeros(64*64)\n label = label.reshape((64,64))\n label[startfeld_index,zielfeld_index] = 1.0\n label = label.reshape((64*64)) \n labels.append(label)\n\n if len(batch)==batchsize:\n batch_tensor = np.concatenate(batch)\n yield batch_tensor, np.array(labels)\n batch = []\n labels = []\n board.push(move)\n \n #header = chess.pgn.read_headers(database)\n \n if count >= bis_game_number:\n current_game = None\n \n database.seek(0) # wieder von vorne\n count = 0\n\n\n\n\n\n\n\n\n\n\n\noutcome_dict = {\n '1-0':np.array(0),\n '0-1':np.array(1),\n '1/2-1/2':np.array(2),\n}\n\n# Encapsulation für bessere Lesbarkeit\ndef game_can_be_used(target,headers):\n if target == 'outcome':\n if 'Result' in headers:\n if headers['Result'] in ['1/2-1/2','0-1','1-0']:\n return True\n else:\n return False\n else:\n return False\n return True\n \n\ndef move_strings(current_game):\n if chess.__version__ == '0.23.11':\n current_game_moves = [move for move in current_game.main_line()]\n else:\n current_game_moves = [move for move in current_game.mainline_moves()]\n current_game_movestrings = [move.uci() for move in current_game_moves]\n return current_game_movestrings,current_game_moves\n\n\ndef get_elos(headers):\n white_elo = -1\n black_elo = -1\n \n if 'BlackElo' in headers:\n try:\n black_elo = int(headers['BlackElo'])\n except:\n pass\n\n if 'WhiteElo' in headers:\n try:\n white_elo = int(headers['WhiteElo'])\n except:\n pass\n \n return white_elo, black_elo\n\n\n# Current data generator\ndef data_generator(path, fraction, batchsize, pool_size = 1, from_game_number = 0, bis_game_number = 100000000, target = 'legalmove', elo = False, move_tokens = 0, validation = False):\n \"\"\"\n targets können 'legalmove' sein, d.h. startfeld-zielfeld paare die tatsächlich auftreten können.\n 'outcome', d.h. sieg, niederlage, remis\n 'squarepairs', d.h. alle combinationen von start und zielfeld\n 'startingsquare', d.h. nur das startfeld TODO\n \"\"\"\n #if aws: \n # from smart_open import open\n \n sample_label_pool = []\n\n with open(path,encoding='latin-1') as database:\n current_game = ''\n count = 0\n while True: \n while current_game != None:\n current_game = chess.pgn.read_game(database)\n if current_game == None:\n break\n\n board = current_game.board()\n count+=1\n\n use_game = game_can_be_used(target,current_game.headers)\n if target == 'outcome' and use_game:\n outcome = outcome_dict[current_game.headers['Result']]\n \n if not (from_game_number < count < bis_game_number):\n use_game = False\n \n current_game_moves = []\n current_game_movestrings = []\n if use_game:\n current_game_movestrings,current_game_moves = move_strings(current_game)\n \n if not (current_game_moves and '0000' not in current_game_movestrings and use_game):\n use_game = False\n \n if use_game:\n move_list = []\n for t in range(move_tokens):\n move_list.append('0000')\n \n for move in current_game_moves:\n rant = random.random()\n move_list.append(move.uci())\n if rant < fraction:\n\n white_elo, black_elo = -1,-1\n if elo:\n white_elo, black_elo = get_elos(current_game.headers)\n \n fen = board.fen()\n tokens = fen2token(fen, white_elo=white_elo, black_elo=black_elo, move_list=move_list[-1*(move_tokens+1):-1], elo=elo )\n \n \n if target == 'legalmove':\n label = move2label[move.uci()]\n elif target == 'outcome':\n label = outcome\n elif target == 'squarepairs':\n movestring = move.uci()\n startfeld_index = pure_square[movestring[:2]]\n zielfeld_index = pure_square[movestring[2:4]]\n label = np.zeros(64*64)\n label = label.reshape((64,64))\n label[startfeld_index,zielfeld_index] = 1.0\n label = label.reshape((64*64)) \n \n sample_label_pool.append((tokens,label))\n \n if len(sample_label_pool)==batchsize * pool_size:\n random.shuffle(sample_label_pool)\n batch = [t for (t,l) in sample_label_pool[:batchsize]]\n labels = [l for (t,l) in sample_label_pool[:batchsize]]\n batch_tensor = np.concatenate(batch)\n yield batch_tensor, np.array(labels)\n sample_label_pool = sample_label_pool[batchsize:] # besser in place\n \n board.push(move)\n\n if count >= bis_game_number:\n current_game = None\n if validation:\n return\n \n database.seek(0) # wieder von vorne\n current_game = ''\n count = 0 \n"
] | [
[
"numpy.concatenate",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BorgwardtLab/graphkernels-review | [
"3dfc2fad64d4159722f06db11b555fc568997fcf"
] | [
"src/convert_to_text.py"
] | [
"#!/usr/bin/env python3\n#\n# convert_to_text.py: Converts a set of a graphs to a textual\n# representation in terms of their corresponding adjacencies.\n# This format is used by the MLG kernel.\n\nimport argparse\nimport glob\nimport logging\nimport os\nimport sys\nimport traceback\n\nimport igraph as ig\nimport numpy as np\n\nfrom tqdm import tqdm\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('DIRECTORY', type=str, help='Input directory')\n parser.add_argument(\n '-f', '--force', action='store_true',\n default=False,\n help='If specified, overwrites data'\n )\n parser.add_argument(\n '-o', '--output',\n required=True,\n type=str,\n help='Output directory'\n )\n\n args = parser.parse_args()\n\n logging.basicConfig(\n level=logging.INFO,\n format=None\n )\n\n logging.info('Loading graphs...')\n\n # Get all filenames; this ensures that the shell does *not* complain\n # about the length of the argument list.\n filenames = sorted(\n glob.glob(os.path.join(args.DIRECTORY, '*.pickle'))\n )\n\n os.makedirs(args.output, exist_ok=True)\n\n name = os.path.basename(args.output)\n adjacency_name = os.path.join(args.output, name + '_A.txt')\n node_labels_name = os.path.join(args.output, name + '_N.txt')\n graph_labels_name = os.path.join(args.output, name + '_L.txt')\n\n if os.path.exists(adjacency_name) or \\\n os.path.exists(nodel_labels_name) or \\\n os.path.exists(graph_labels_name):\n if not args.force:\n logging.info('Output path already exists. Exiting.')\n sys.exit(0)\n\n # Finally, load the graphs (notice that there is no need to load\n # them if we would overwrite data anyway, so this ordering makes\n # more sense).\n graphs = [\n ig.read(filename, format='picklez') for filename in\n tqdm(filenames, desc='File')\n ]\n\n # Store node labels\n y = np.array([g['label'] for g in graphs])\n np.savetxt(graph_labels_name, y, delimiter=' ', fmt='%d')\n\n with open(adjacency_name, 'w') as f, open(node_labels_name, 'w') as g:\n\n # Write header: number of graphs in total in the file\n print(len(graphs), file=f)\n print(len(graphs), file=g)\n\n for graph in graphs:\n A = graph.get_adjacency(attribute=None)\n A = np.array(A.data)\n\n # Make sure that this matrix is really symmetric\n assert A.shape[0] == A.shape[1]\n\n # Print adjacency matrix size, followed by the matrix\n # itself, and store it.\n print(A.shape[0], file=f)\n np.savetxt(f, A, delimiter=' ', fmt='%d')\n\n # Print number of node labels, followed by the label vector\n # it self, and store it. First, we have to check whether an\n # internal label exists. If not, we also use the degree.\n\n if 'label' in graph.vs.attributes():\n labels = np.array(graph.vs['label'])\n else:\n labels = np.array(graph.degree())\n\n print(len(labels), file=g)\n np.savetxt(g, labels, delimiter=' ', fmt='%d')\n"
] | [
[
"numpy.savetxt",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
GuilleGorines/nfcore-pikavirus-legacy | [
"a588a58fa082cd1802db5271b366e7d74d19e22f"
] | [
"bin/graphs_coverage.py"
] | [
"#!/usr/bin/env python\n\n# USAGE:\n#\n# graphs_coverage.py Samplename coveragefiles\n# \n# Calculates basic coverage statistics for coverage files provided. Samplename needed for file naming.\n#\n# This script has been developed exclusively for nf-core/pikavirus, and we cannot\n# assure its functioning in any other context. However, feel free to use any part\n# of it if desired.\n\n# Imports\nimport sys\nimport os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Needed functions\ndef weighted_avg_and_std(df,values, weights):\n average = np.average(df[values], weights=df[weights])\n variance = np.average((df[values]-average)**2, weights=df[weights])\n \n return (average, variance**0.5)\n\ndef calculate_weighted_median(df, values, weights):\n cumsum = df[weights].cumsum()\n cutoff = df[weights].sum() / 2.\n \n return df[cumsum >= cutoff][values].iloc[0]\n\n\n# args managent\noutfile_name=sys.argv[1]\nspecies_data=sys.argv[2]\n\ncoverage_files=sys.argv[3:]\n\nwith open(species_data) as species_data:\n species_data = species_data.readlines()\n\nspecies_data = [line.split(\"\\t\") for line in species_data]\nspecies_data = [[line[3], line[4], line[6].strip(\"\\n\")] for line in species_data]\n\n\n# Remove the extension of the file (so it matches the filename)\nextensions = [\".fna.gz\",\".fna\"]\n\nfor item in species_data:\n for extension in extensions:\n if item[2].endswith(extension):\n item[2]=item[2].replace(extension,\"\")\n\nfor item in coverage_files:\n\n match_name_coverage = os.path.basename(item).split(\"_vs_\")[0]\n\n for name in species_data:\n if name[2] == match_name_coverage:\n species = name[0]\n subspecies = name[1]\n\n with open(item,\"r\") as infile:\n infiledata = [line.strip(\"\\n\") for line in infile.readlines()]\n infiledata = [line.split(\"\\t\") for line in infiledata]\n \n for line in infiledata:\n if line[0] == \"genome\":\n line[0] = f\"{species}_{subspecies}_genome\"\n\n with open(item,\"w\") as outfile:\n for line in infiledata:\n filedata =\"\\t\".join(line)\n outfile.write(f\"{filedata}\\t{species}\\t{subspecies}\\n\")\n\ndataframe_list = []\n\nfor filename in coverage_files:\n tmp_dataframe = pd.read_csv(filename,sep=\"\\t\",header=None)\n dataframe_list.append(tmp_dataframe)\n\ndf = pd.concat(dataframe_list)\n\ndf.columns=[\"gnm\",\"covThreshold\",\"fractionAtThisCoverage\",\"genomeLength\",\"diffFracBelowThreshold\",\"Species\",\"Subspecies\"]\n\ndf[\"diffFracBelowThreshold_cumsum\"] = df.groupby('gnm')['diffFracBelowThreshold'].transform(pd.Series.cumsum)\ndf[\"diffFracAboveThreshold\"] = 1 - df[\"diffFracBelowThreshold_cumsum\"]\ndf[\"diffFracAboveThreshold_percentage\"] = df[\"diffFracAboveThreshold\"]*100\n\ndata = {\"gnm\":[],\"species\":[],\"subspecies\":[],\"covMean\":[],\"covMin\":[],\"covMax\":[],\"covSD\":[],\"covMedian\":[],\n \"x1-x4\":[],\"x5-x10\":[],\"x10-x19\":[],\">x20\":[],\"total\":[]}\n\nfor name, df_grouped in df.groupby(\"gnm\"):\n\n mean, covsd = weighted_avg_and_std(df_grouped,\"covThreshold\",\"diffFracBelowThreshold\")\n \n if mean == 0:\n continue\n \n minimum = min(df_grouped[\"covThreshold\"])\n maximum = max(df_grouped[\"covThreshold\"])\n median = calculate_weighted_median(df_grouped,\"covThreshold\",\"diffFracBelowThreshold\")\n \n data[\"gnm\"].append(name)\n data[\"species\"].append(\"\".join(set(df_grouped[\"Species\"])))\n data[\"subspecies\"].append(\"\".join(set(df_grouped[\"Subspecies\"])))\n data[\"covMean\"].append(mean)\n data[\"covMin\"].append(minimum)\n data[\"covMax\"].append(maximum)\n data[\"covSD\"].append(covsd)\n data[\"covMedian\"].append(median)\n \n y0=df_grouped.diffFracBelowThreshold[(df_grouped[\"covThreshold\"] >= 1) & (df_grouped[\"covThreshold\"] < 5)].sum()\n y1=df_grouped.diffFracBelowThreshold[(df_grouped[\"covThreshold\"] >= 5) & (df_grouped[\"covThreshold\"] < 10)].sum()\n y2=df_grouped.diffFracBelowThreshold[(df_grouped[\"covThreshold\"] >= 10) & (df_grouped[\"covThreshold\"] < 20)].sum()\n y3=df_grouped.diffFracBelowThreshold[(df_grouped[\"covThreshold\"] >= 20)].sum()\n y4=y0+y1+y2+y3\n \n data[\"x1-x4\"].append(y0)\n data[\"x5-x10\"].append(y1)\n data[\"x10-x19\"].append(y2)\n data[\">x20\"].append(y3)\n data[\"total\"].append(y4)\n \n\n plt.figure() \n df_grouped.plot.line(x=\"covThreshold\",\n y=\"diffFracAboveThreshold_percentage\",\n legend=None)\n plt.title(name)\n plt.xlabel(\"Coverage Threshold\")\n plt.ylabel(\"% of reads above threshold\")\n\n plt.savefig(f\"{name}.pdf\")\n plt.close()\n\nnewcov = pd.DataFrame.from_dict(data)\nnewcov.to_csv(f\"{outfile_name}_table.csv\")\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.close",
"pandas.DataFrame.from_dict",
"matplotlib.pyplot.xlabel",
"numpy.average",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
shrenik-jain/ComVEX | [
"93622de3a4771cda13b14f8bba52990eb47c2409",
"93622de3a4771cda13b14f8bba52990eb47c2409"
] | [
"examples/EfficientDet/demo.py",
"tests/test_ViT.py"
] | [
"import os\nimport sys\n\nsys.path.insert(0, os.getcwd())\n\nimport torch \n\nfrom comvex.efficientdet import EfficientDetObjectDetectionConfig, EfficientDetObjectDetection\n\n\nif __name__ == \"__main__\":\n\n efficientdet_config = EfficientDetObjectDetectionConfig.D0(10, 20)\n efficientdet = EfficientDetObjectDetection(efficientdet_config)\n\n x = torch.randn(1, 3, 512, 512)\n pred_class, pred_box = efficientdet(x)\n\n print(\"Input Shape:\\n\", x.shape)\n for idx, out in enumerate(pred_class):\n print(f\"Class Output Shape ({idx}):\\n\", out.shape)\n\n for idx, out in enumerate(pred_box):\n print(f\"Box Output Shape ({idx}):\\n\", out.shape)",
"import gc\nimport torch\nfrom .utils import *\n\n# === Import model-related objects ===\nfrom comvex.vit import ViTConfig, ViTWithLinearClassifier\n\n# === Instantiate your Model ===\n# - For specializations\nspecializations = [attr for attr in dir(ViTConfig) if attr.startswith(\"ViT\")]\n\n# === Settings ===\n# - Required:\ninput_shape = (1, 3, 224, 224)\nexpected_shape = (1, 10)\n# - Optional:\nkwargs = {}\nkwargs['num_classes'] = 10\n\n# === Test Cases ===\n# Default test for specializations\ndef test_forward():\n for spec in specializations:\n print(spec)\n config = getattr(ViTConfig, spec)(**kwargs)\n model = ViTWithLinearClassifier(config)\n model.eval()\n\n x = torch.randn(input_shape)\n out = model(x)\n\n assert_output_shape_wrong(out, expected_shape)\n assert_output_has_nan(out)\n \n del model\n gc.collect()"
] | [
[
"torch.randn"
],
[
"torch.randn"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
6un9-h0-Dan/pytext | [
"3b5102819bcf043dc4799ede1a4ae0b558aacb04"
] | [
"pytext/loss/loss.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nfrom enum import Enum\n\nimport torch\nimport torch.nn.functional as F\nfrom pytext.config import ConfigBase\nfrom pytext.config.component import Component, ComponentType\nfrom pytext.utils import loss as loss_utils, precision\nfrom pytext.utils.cuda import FloatTensor\nfrom torch import nn\n\n\nclass SourceType(Enum):\n LOG_PROBS = \"log_probs\"\n LOGITS = \"logits\"\n PROBS = \"probs\"\n\n\nclass Loss(Component):\n \"\"\"Base class for loss functions\"\"\"\n\n __COMPONENT_TYPE__ = ComponentType.LOSS\n\n def __init__(self, config=None, *args, **kwargs):\n super().__init__(config)\n\n def __call__(self, logit, targets, reduce=True):\n raise NotImplementedError\n\n\nclass CrossEntropyLoss(Loss):\n class Config(ConfigBase):\n pass\n\n def __init__(self, config, ignore_index=-100, weight=None, *args, **kwargs):\n self.ignore_index = ignore_index\n self.weight = weight\n\n def __call__(self, logits, targets, reduce=True):\n # Don't change to F.cross_entropy() because @barlaso suggested not doing so.\n # There's some wisdom from fairseq folks that it's the preferred way.\n # Needs more testing before we can change to using F.cross_entropy().\n return F.nll_loss(\n F.log_softmax(logits, 1, dtype=torch.float32),\n targets,\n weight=self.weight,\n ignore_index=self.ignore_index,\n reduction=\"mean\" if reduce else \"none\",\n )\n\n\nclass NLLLoss(Loss):\n def __init__(self, config, ignore_index=-100, weight=None, *args, **kwargs):\n self.ignore_index = ignore_index\n self.weight = weight\n\n def __call__(self, log_probs, targets, reduce=True):\n return F.nll_loss(\n log_probs,\n targets,\n ignore_index=self.ignore_index,\n reduction=\"mean\" if reduce else \"none\",\n weight=self.weight,\n )\n\n\nclass BinaryCrossEntropyLoss(Loss):\n class Config(ConfigBase):\n reweight_negative: bool = True\n reduce: bool = True\n\n def __call__(self, logits, targets, reduce=True):\n \"\"\"\n Computes 1-vs-all binary cross entropy loss for multiclass\n classification.\n \"\"\"\n # Converts targets to one-hot representation. Dim: [batch, n_classes]\n targets = (\n (\n FloatTensor(targets.size(0), logits.size(1))\n .zero_()\n .scatter_(1, targets.unsqueeze(1).data, 1)\n )\n if len(logits.size()) > 1 # If multi-class classification.\n else targets.float()\n )\n\n \"\"\"\n `F.binary_cross_entropy` or `torch.nn.BCELoss.` requires the\n output of the previous function be already a FloatTensor.\n \"\"\"\n # This weighting applies uniform class weights.\n # examples_per_class = one_hot_target.sum(0).clamp(min=1)\n # total_positive = examples_per_class.sum()\n # weights = total_positive.unsqueeze(0) / examples_per_class\n\n loss = F.binary_cross_entropy_with_logits(\n precision.maybe_float(logits), targets, reduction=\"none\"\n )\n\n if self.config.reweight_negative:\n # This makes sure we have same weights for all negative classes and\n # single positive class. Weight is 1 for the correct class and\n # 1 / (n - 1) for other ones.\n weights = targets + (1.0 - targets) / max(1, targets.size(1) - 1.0)\n loss = loss * weights\n\n return loss.sum(-1).mean() if reduce else loss.sum(-1)\n\n\nclass CosineEmbeddingLoss(Loss):\n class Config(ConfigBase):\n margin: float = 0.0\n\n def __init__(self, config, *args, **kwargs):\n self.margin = config.margin\n\n def __call__(self, embeddings, targets, reduce=True):\n if len(embeddings) != 2:\n raise ValueError(\n f\"Number of embeddings must be 2. Found {len(embeddings)} embeddings.\"\n )\n return F.cosine_embedding_loss(\n embeddings[0],\n embeddings[1],\n targets,\n margin=self.margin,\n reduction=\"mean\" if reduce else \"none\",\n )\n\n\nclass MultiLabelSoftMarginLoss(Loss):\n class Config(ConfigBase):\n pass\n\n def __call__(self, m_out, targets, reduce=True):\n \"\"\"\n Computes multi-label classification loss\n see details in torch.nn.MultiLabelSoftMarginLoss\n \"\"\"\n\n num_classes = m_out.size()[1]\n target_labels = targets[0]\n\n # each label list is padded by -1 to make every\n # observation example has the same length of list of labels\n # since -1 is out of the index range\n # add 1 to target_labels temporarily\n tmp_target_labels = target_labels + 1\n\n # the idea is similar to one_hot_targets\n # the following encoding supports multi-label task\n # need to delete the first-column endoing since\n # it's for the padded label -1\n n_hot_targets = (\n FloatTensor(target_labels.size(0), num_classes + 1)\n .zero_()\n .scatter_(1, tmp_target_labels, 1)\n )[:, 1:]\n\n \"\"\"\n `F.multilabel_soft_margin_loss` or `torch.nn.MultiLabelSoftMarginLoss.`\n requires the\n output of the previous function be already a FloatTensor.\n \"\"\"\n\n # default: equal weight for each class\n # the losses are averaged over observations for each mini-batch\n\n loss = F.multilabel_soft_margin_loss(\n precision.maybe_float(m_out), n_hot_targets, reduction=\"mean\"\n )\n\n return loss\n\n\nclass AUCPRHingeLoss(nn.Module, Loss):\n \"\"\"area under the precision-recall curve loss,\n Reference: \"Scalable Learning of Non-Decomposable Objectives\", Section 5 \\\n TensorFlow Implementation: \\\n https://github.com/tensorflow/models/tree/master/research/global_objectives\\\n \"\"\"\n\n class Config(ConfigBase):\n \"\"\"\n Attributes:\n precision_range_lower (float): the lower range of precision values over\n which to compute AUC. Must be nonnegative, `\\leq precision_range_upper`,\n and `leq 1.0`.\n precision_range_upper (float): the upper range of precision values over\n which to compute AUC. Must be nonnegative, `\\geq precision_range_lower`,\n and `leq 1.0`.\n num_classes (int): number of classes(aka labels)\n num_anchors (int): The number of grid points used to approximate the\n Riemann sum.\n \"\"\"\n\n precision_range_lower: float = 0.0\n precision_range_upper: float = 1.0\n num_classes: int = 1\n num_anchors: int = 20\n\n def __init__(self, config, weights=None, *args, **kwargs):\n \"\"\"Args:\n config: Config containing `precision_range_lower`, `precision_range_upper`,\n `num_classes`, `num_anchors`\n \"\"\"\n nn.Module.__init__(self)\n Loss.__init__(self, config)\n\n self.num_classes = self.config.num_classes\n self.num_anchors = self.config.num_anchors\n self.precision_range = (\n self.config.precision_range_lower,\n self.config.precision_range_upper,\n )\n\n # Create precision anchor values and distance between anchors.\n # coresponding to [alpha_t] and [delta_t] in the paper.\n # precision_values: 1D `Tensor` of shape [K], where `K = num_anchors`\n # delta: Scalar (since we use equal distance between anchors)\n self.precision_values, self.delta = loss_utils.range_to_anchors_and_delta(\n self.precision_range, self.num_anchors\n )\n\n # notation is [b_k] in paper, Parameter of shape [C, K]\n # where `C = number of classes` `K = num_anchors`\n self.biases = nn.Parameter(\n FloatTensor(self.config.num_classes, self.config.num_anchors).zero_()\n )\n self.lambdas = nn.Parameter(\n FloatTensor(self.config.num_classes, self.config.num_anchors).data.fill_(\n 1.0\n )\n )\n\n def forward(self, logits, targets, reduce=True, size_average=True, weights=None):\n \"\"\"\n Args:\n logits: Variable :math:`(N, C)` where `C = number of classes`\n targets: Variable :math:`(N)` where each value is\n `0 <= targets[i] <= C-1`\n weights: Coefficients for the loss. Must be a `Tensor` of shape\n [N] or [N, C], where `N = batch_size`, `C = number of classes`.\n size_average (bool, optional): By default, the losses are averaged\n over observations for each minibatch. However, if the field\n sizeAverage is set to False, the losses are instead summed\n for each minibatch. Default: ``True``\n reduce (bool, optional): By default, the losses are averaged or summed over\n observations for each minibatch depending on size_average. When reduce\n is False, returns a loss per input/target element instead and ignores\n size_average. Default: True\n \"\"\"\n C = 1 if logits.dim() == 1 else logits.size(1)\n\n if self.num_classes != C:\n raise ValueError(\n \"num classes is %d while logits width is %d\" % (self.num_classes, C)\n )\n\n labels, weights = AUCPRHingeLoss._prepare_labels_weights(\n logits, targets, weights=weights\n )\n\n # Lagrange multipliers\n # Lagrange multipliers are required to be nonnegative.\n # Their gradient is reversed so that they are maximized\n # (rather than minimized) by the optimizer.\n # 1D `Tensor` of shape [K], where `K = num_anchors`\n lambdas = loss_utils.lagrange_multiplier(self.lambdas)\n # print(\"lambdas: {}\".format(lambdas))\n\n # A `Tensor` of Shape [N, C, K]\n hinge_loss = loss_utils.weighted_hinge_loss(\n labels.unsqueeze(-1),\n logits.unsqueeze(-1) - self.biases,\n positive_weights=1.0 + lambdas * (1.0 - self.precision_values),\n negative_weights=lambdas * self.precision_values,\n )\n\n # 1D tensor of shape [C]\n class_priors = loss_utils.build_class_priors(labels, weights=weights)\n\n # lambda_term: Tensor[C, K]\n # according to paper, lambda_term = lambda * (1 - precision) * |Y^+|\n # where |Y^+| is number of postive examples = N * class_priors\n lambda_term = class_priors.unsqueeze(-1) * (\n lambdas * (1.0 - self.precision_values)\n )\n\n per_anchor_loss = weights.unsqueeze(-1) * hinge_loss - lambda_term\n\n # Riemann sum over anchors, and normalized by precision range\n # loss: Tensor[N, C]\n loss = per_anchor_loss.sum(2) * self.delta\n loss /= self.precision_range[1] - self.precision_range[0]\n\n if not reduce:\n return loss\n elif size_average:\n return loss.mean()\n else:\n return loss.sum()\n\n @staticmethod\n def _prepare_labels_weights(logits, targets, weights=None):\n \"\"\"\n Args:\n logits: Variable :math:`(N, C)` where `C = number of classes`\n targets: Variable :math:`(N)` where each value is\n `0 <= targets[i] <= C-1`\n weights: Coefficients for the loss. Must be a `Tensor` of shape\n [N] or [N, C], where `N = batch_size`, `C = number of classes`.\n Returns:\n labels: Tensor of shape [N, C], one-hot representation\n weights: Tensor of shape broadcastable to labels\n \"\"\"\n N, C = logits.size()\n # Converts targets to one-hot representation. Dim: [N, C]\n labels = FloatTensor(N, C).zero_().scatter(1, targets.unsqueeze(1).data, 1)\n\n if weights is None:\n weights = FloatTensor(N).data.fill_(1.0)\n\n if weights.dim() == 1:\n weights.unsqueeze_(-1)\n\n return labels, weights\n\n\nclass KLDivergenceBCELoss(Loss):\n class Config(ConfigBase):\n temperature: float = 1.0\n hard_weight: float = 0.0\n\n def __init__(self, config, ignore_index=-100, weight=None, *args, **kwargs):\n assert 0.0 <= config.hard_weight < 1.0\n\n self.ignore_index = ignore_index\n self.weight = weight\n self.t = config.temperature\n self.hard_weight = config.hard_weight\n\n def __call__(self, logits, targets, reduce=True):\n \"\"\"\n Computes Kullback-Leibler divergence loss for multiclass classification\n probability distribution computed by BinaryCrossEntropyLoss loss\n \"\"\"\n hard_targets, _, soft_targets_logits = targets\n # we clamp the probability between (1e-20, 1 - 1e-20) to avoid log(0) problem\n # in the calculation of KLDivergence\n soft_targets = F.sigmoid(FloatTensor(soft_targets_logits) / self.t).clamp(\n 1e-20, 1 - 1e-20\n )\n probs = F.sigmoid(logits / self.t).clamp(1e-20, 1 - 1e-20)\n probs_neg = probs.neg().add(1).clamp(1e-20, 1 - 1e-20)\n soft_targets_neg = soft_targets.neg().add(1).clamp(1e-20, 1 - 1e-20)\n if self.weight is not None:\n soft_loss = (\n F.kl_div(probs.log(), soft_targets, reduction=\"none\") * self.weight\n + F.kl_div(probs_neg.log(), soft_targets_neg, reduction=\"none\")\n * self.weight\n )\n if reduce:\n soft_loss = soft_loss.mean()\n else:\n soft_loss = F.kl_div(\n probs.log(), soft_targets, reduction=\"mean\" if reduce else \"none\"\n ) + F.kl_div(\n probs_neg.log(),\n soft_targets_neg,\n reduction=\"mean\" if reduce else \"none\",\n )\n soft_loss *= self.t ** 2 # see https://arxiv.org/pdf/1503.02531.pdf\n\n hard_loss = 0.0\n if self.hard_weight > 0.0:\n one_hot_targets = (\n FloatTensor(hard_targets.size(0), logits.size(1))\n .zero_()\n .scatter_(1, hard_targets.unsqueeze(1).data, 1)\n )\n hard_loss = F.binary_cross_entropy_with_logits(\n logits,\n one_hot_targets,\n reduction=\"mean\" if reduce else \"none\",\n weight=self.weight,\n )\n\n return (1.0 - self.hard_weight) * soft_loss + self.hard_weight * hard_loss\n\n\nclass KLDivergenceCELoss(Loss):\n class Config(ConfigBase):\n temperature: float = 1.0\n hard_weight: float = 0.0\n\n def __init__(self, config, ignore_index=-100, weight=None, *args, **kwargs):\n # ignore_index not easily added to kl_div loss, don't support this until needed\n assert ignore_index < 0\n assert 0.0 <= config.hard_weight < 1.0\n\n self.weight = weight\n self.t = config.temperature\n self.hard_weight = config.hard_weight\n\n def __call__(self, logits, targets, reduce=True, combine_loss=True):\n \"\"\"\n Computes Kullback-Leibler divergence loss for multiclass classification\n probability distribution computed by CrossEntropyLoss loss.\n For, KL-divergence, batchmean is the right way to reduce, not just mean.\n \"\"\"\n hard_targets, _, soft_targets_logits = targets\n soft_targets = F.softmax(soft_targets_logits.float() / self.t, dim=1)\n soft_targets = soft_targets.clamp(1e-10, 1 - 1e-10)\n log_probs = F.log_softmax(logits / self.t, 1)\n\n if self.weight is not None:\n soft_loss = (\n F.kl_div(log_probs, soft_targets, reduction=\"none\") * self.weight\n )\n # soft_loss dim is batch_size * num_labels, while hard_loss is just\n # batch size, we have to still reduce soft_loss by the labels\n # dimension in order to be able to add the two losses.\n soft_loss = (\n torch.sum(soft_loss, dim=1).mean()\n if reduce\n else torch.sum(soft_loss, dim=1)\n )\n else:\n soft_loss = F.kl_div(\n log_probs, soft_targets, reduction=\"batchmean\" if reduce else \"none\"\n )\n\n soft_loss *= self.t ** 2 # See https://arxiv.org/pdf/1503.02531.pdf\n hard_loss = 0.0\n if self.hard_weight > 0.0:\n hard_loss = F.cross_entropy(\n logits,\n hard_targets,\n reduction=\"mean\" if reduce else \"none\",\n weight=self.weight,\n )\n\n return (\n (1.0 - self.hard_weight) * soft_loss + self.hard_weight * hard_loss\n if combine_loss\n else (soft_loss, hard_loss)\n )\n\n\nclass PairwiseRankingLoss(Loss):\n \"\"\"\n Given embeddings for a query, positive response and negative response\n computes pairwise ranking hinge loss\n \"\"\"\n\n class Config(ConfigBase):\n margin: float = 1.0\n\n @staticmethod\n def get_similarities(embeddings):\n pos_embed, neg_embed, query_embed = embeddings\n pos_similarity = F.cosine_similarity(query_embed, pos_embed)\n neg_similarity = F.cosine_similarity(query_embed, neg_embed)\n return pos_similarity, neg_similarity, query_embed.size(0)\n\n def __call__(self, logits, targets, reduce=True):\n pos_similarity, neg_similarity, batch_size = self.get_similarities(logits)\n targets_local = FloatTensor(batch_size)\n targets_local.fill_(1) # 1: pos_similarity should be higher than neg_similarity\n return F.margin_ranking_loss(\n pos_similarity, neg_similarity, targets_local, self.config.margin\n )\n\n\nclass MAELoss(Loss):\n \"\"\"\n Mean absolute error or L1 loss, for regression tasks.\n \"\"\"\n\n class Config(ConfigBase):\n pass\n\n def __call__(self, predictions, targets, reduce=True):\n return F.l1_loss(predictions, targets, reduction=\"mean\" if reduce else \"none\")\n\n\nclass MSELoss(Loss):\n \"\"\"\n Mean squared error or L2 loss, for regression tasks.\n \"\"\"\n\n class Config(ConfigBase):\n pass\n\n def __call__(self, predictions, targets, reduce=True):\n return F.mse_loss(predictions, targets, reduction=\"mean\" if reduce else \"none\")\n\n\nclass LabelSmoothedCrossEntropyLoss(Loss):\n class Config(ConfigBase):\n beta: float = 0.1\n source: SourceType = SourceType.LOGITS\n use_entropy: bool = False\n\n def __init__(self, config, ignore_index=-100, weight=None, *args, **kwargs):\n # weight values other than 1.0 gives inconsistent behavior\n # Refer: https://github.com/pytorch/pytorch/issues/17577\n if weight is not None:\n assert torch.sum(torch.abs(weight - 1.0)) < 1e-7\n\n self.ignore_index = ignore_index\n self.weight = weight\n self.beta = config.beta\n self.source = config.source\n self.use_entropy = config.use_entropy\n\n def __call__(self, logits, targets, reduce=True):\n \"\"\"\n If use_entropy is False, returns the cross-entropy loss alongwith the KL divergence of the\n discrete uniform distribution with the logits. Refer to section 3.2\n If use_entopy is True, uses the entropy of the output distribution as\n the smoothing loss (i.e., higher entropy, better). Refer to section 3\n https://arxiv.org/pdf/1701.06548.pdf\n \"\"\"\n\n if self.use_entropy:\n # loss is negative of entropy\n probs = F.softmax(logits, dim=1)\n log_probs = torch.log(probs)\n label_smoothing_loss = torch.sum(log_probs * probs, dim=1)\n else:\n # negative KL-div has an additional log(num_classes) term but ignored\n # here because it doesn't contribute to optimization\n if self.source == SourceType.LOGITS:\n log_probs = F.log_softmax(logits, dim=1)\n elif self.source == SourceType.PROBS:\n log_probs = logits.log()\n else:\n log_probs = logits\n label_smoothing_loss = -1 * log_probs.mean(dim=1)\n\n if reduce:\n non_ignored = targets != self.ignore_index\n if non_ignored.any():\n label_smoothing_loss = torch.mean(label_smoothing_loss[non_ignored])\n else:\n label_smoothing_loss = torch.tensor(0.0, device=logits.device)\n\n cross_entropy_loss = F.nll_loss(\n log_probs,\n targets,\n ignore_index=self.ignore_index,\n reduction=\"mean\" if reduce else \"none\",\n weight=self.weight,\n )\n\n return (1.0 - self.beta) * cross_entropy_loss + self.beta * label_smoothing_loss\n\n\nclass LabelSmoothedCrossEntropyLengthLoss(Loss):\n class Config(LabelSmoothedCrossEntropyLoss.Config):\n lengths_weight: float = 0.25\n beta_2: float = 0.25\n assert_valid_targets: bool = True\n\n def __init__(self, config, weight=None, ignore_index=-100):\n # weight values other than 1.0 gives inconsistent behavior\n # Refer: https://github.com/pytorch/pytorch/issues/17577\n if weight is not None:\n assert torch.sum(torch.abs(weight - 1.0)) < 1e-7\n\n self.lengths_weight = config.lengths_weight\n self.assert_valid_targets = config.assert_valid_targets\n self.label_smoothing_loss = LabelSmoothedCrossEntropyLoss(\n config, ignore_index=ignore_index, weight=weight\n )\n\n self.length_loss = LabelSmoothedCrossEntropyLoss(\n config=LabelSmoothedCrossEntropyLoss.Config(\n beta=config.beta_2,\n use_entropy=config.use_entropy,\n source=SourceType.LOG_PROBS,\n )\n )\n\n def __call__(self, logits, targets, length_log_probs, length_targets, reduce=True):\n label_loss = self.label_smoothing_loss(logits, targets, reduce=reduce)\n\n max_supported_dim = length_log_probs.size(1)\n length_targets = length_targets.unsqueeze(-1)\n\n if self.assert_valid_targets:\n assert not torch.any(\n length_targets >= max_supported_dim\n ), f\"max_supported_dim: {max_supported_dim}, Total Violations : {str(length_targets[length_targets >= max_supported_dim].flatten().tolist())}\"\n else:\n length_targets[length_targets >= max_supported_dim] = max_supported_dim - 1\n\n length_loss = self.length_loss(\n logits=length_log_probs, targets=length_targets.view(-1), reduce=reduce\n )\n\n total_loss = label_loss + self.lengths_weight * length_loss\n\n return total_loss, {\"label_loss\": label_loss, \"length_loss\": length_loss}\n"
] | [
[
"torch.nn.functional.kl_div",
"torch.mean",
"torch.nn.functional.softmax",
"torch.abs",
"torch.nn.functional.nll_loss",
"torch.nn.functional.margin_ranking_loss",
"torch.nn.functional.l1_loss",
"torch.sum",
"torch.tensor",
"torch.nn.functional.sigmoid",
"torch.nn.functional.cosine_embedding_loss",
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.nn.functional.mse_loss",
"torch.nn.functional.cosine_similarity",
"torch.log",
"torch.nn.Module.__init__",
"torch.nn.functional.log_softmax",
"torch.nn.functional.cross_entropy",
"torch.any"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
QNLSydney/qcodes-measurements | [
"09dd8dfeaa1b413484ce058df08f99df2640271e"
] | [
"qcodes_measurements/tools/time.py"
] | [
"import time\nimport numpy as np\n\nfrom qcodes.dataset.measurements import Measurement\n\nfrom .measure import Setpoint, _flush_buffers, _run_functions, _plot_sweep\nfrom ..logging import get_logger\nlogger = get_logger(\"tools.time\")\n\ndef _interruptible_sleep(sleep_time):\n while sleep_time > 1:\n time.sleep(1)\n sleep_time -= 1\n time.sleep(sleep_time)\n return\n\n@_plot_sweep\ndef sweep_time(*param_meas, delay=10, until=None,\n win=None, append=False, plot_params=None, annotation=None,\n atstart=(), ateach=(), atend=()):\n \"\"\"\n Run a time sweep, with a delay between each point. This sweep will run for `until` seconds,\n or indefinitely if until is None\n\n Args:\n *param_meas (Iterable[Parameter]): A list of the parameters to be measured at each of the\n set points. For now, these MUST be simple parameters. Arrays cannot be measured.\n\n win (Optional[PlotWindow]): The plot window to add plots to. If this value is None, the sweep\n will not be live plotted.\n\n append (bool): If this parameter is true, the trace will be appended to an existing window.\n\n plot_params (Optional[Iterable[Parameter]]): A list of parameters to plot. If not passed or None,\n all measured parameters will be automatically plotted.\n\n atstart (Optional[Union[Callable,Iterable[Callable]]]): A function or list of functions\n to be run before the measurement is started. The functions will be run BEFORE the parameters\n are inserted into the measurement, hence if some parameters require setup before they are run,\n they can be inserted here.\n\n ateach (Optional[Union[Callable,Iterable[Callable]]]): A function or list of functions\n to be run after each time the sweep parameter is set. These functions will be run AFTER\n the delay, and so is suitable if an instrument requires a call to capture a trace before\n the parameter can be read.\n\n atend (Optional[Union[Callable,Iterable[Callable]]]): A function or list of functions\n to be run at the end of a trace. This is run AFTER the data is saved into the database,\n and after parameters are set back to their starting points (if setback is True), and\n can therefore be used to read the data that was taken and potentially do some post analysis.\n\n Returns:\n (iw, win): ID is the trace id of the saved wave, win is a handle to the plot window that was created\n for the purposes of liveplotting.\n \"\"\"\n _flush_buffers(*param_meas)\n\n # Register setpoints\n m = Measurement()\n m.register_custom_parameter(\"time\", label=\"Time\", unit=\"s\")\n\n _run_functions(atstart)\n\n # Keep track of data and plots\n plt_data = {}\n time_data = np.full((1,), np.nan)\n array_size = 1\n curr_point = 0\n\n # If plot_params is not given, plot all measured parameters\n if plot_params is None:\n plot_params = param_meas\n\n # Set up parameters\n for param in param_meas:\n m.register_parameter(param, setpoints=(\"time\", ))\n\n # Create plot window\n if win is not None and param in plot_params:\n plot = win.addPlot(name=param.full_name,\n title=f\"{param.full_name} ({param.label})\")\n plot.left_axis.label = param.label\n plot.left_axis.unit = param.unit\n plot.bot_axis.label = \"Time\"\n plot.bot_axis.unit = \"s\"\n plotdata = plot.plot(setpoint_x=time_data, name=param.name, pen=(255,0,0))\n plt_data[param] = (plot, plotdata, np.full((1,), np.nan))\n\n if win is not None and annotation is not None:\n win.items[0].textbox(annotation)\n\n try:\n with m.run() as datasaver:\n start_time = time.monotonic()\n win.win_title += f\"{datasaver.run_id}\"\n for pd in plt_data.values():\n pd[0].plot_title += f\" (id: {datasaver.run_id})\"\n while True:\n # Update each parameter\n data = [(\"time\", time.monotonic()-start_time)]\n time_data[curr_point] = data[-1][1]\n\n _run_functions(ateach, param_vals=(Setpoint(\"time\", curr_point, data[-1][1])))\n\n if until is not None and time_data[curr_point] > until:\n break\n\n for param in param_meas:\n val = param()\n if val is None:\n val = np.nan\n data.append((param, val))\n if param in plot_params:\n plt_data[param][2][curr_point] = data[-1][1]\n plt_data[param][1].xData = time_data\n plt_data[param][1].update(plt_data[param][2])\n\n curr_point += 1\n\n # Resize plot arrays if necessary\n if array_size == curr_point:\n array_size *= 2\n logger.debug(\"New plot array size: %d\", array_size)\n time_data.resize(array_size)\n time_data[array_size//2:] = np.nan\n for pld in plt_data.values():\n pld[2].resize(array_size)\n pld[2][array_size//2:] = np.nan\n\n datasaver.add_result(*data)\n\n # Wait until the next point time. Try to keep track of how long it\n # took for equipment to respond\n next_time = start_time + delay*curr_point\n while time.monotonic() < next_time:\n sleep_time = max(0, min(0.01, time.monotonic() - next_time))\n _interruptible_sleep(sleep_time)\n except KeyboardInterrupt:\n print(f\"Trace cancelled with Ctrl-C\")\n print(f\"Ending plot at time {time.monotonic() - start_time}.\")\n finally:\n _run_functions(atend)\n\n return datasaver.run_id"
] | [
[
"numpy.full"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
matan-arnon/howdy | [
"bea79c00a79b286d632875338f89c1b3ba3ac277"
] | [
"howdy/src/cli/add.py"
] | [
"# Save the face of the user in encoded form\n\n# Import required modules\nimport time\nimport os\nimport sys\nimport json\nimport configparser\nimport builtins\nimport numpy as np\n\nfrom recorders.video_capture import VideoCapture\nfrom i18n import _\n\n# Try to import dlib and give a nice error if we can't\n# Add should be the first point where import issues show up\ntry:\n\timport dlib\nexcept ImportError as err:\n\tprint(err)\n\n\tprint(_(\"\\nCan't import the dlib module, check the output of\"))\n\tprint(\"pip3 show dlib\")\n\tsys.exit(1)\n\n# OpenCV needs to be imported after dlib\nimport cv2\n\n# Get the absolute path to the current directory\npath = os.path.abspath(__file__ + \"/..\")\n\n# Test if at lest 1 of the data files is there and abort if it's not\nif not os.path.isfile(path + \"/../dlib-data/shape_predictor_5_face_landmarks.dat\"):\n\tprint(_(\"Data files have not been downloaded, please run the following commands:\"))\n\tprint(\"\\n\\tcd \" + os.path.realpath(path + \"/../dlib-data\"))\n\tprint(\"\\tsudo ./install.sh\\n\")\n\tsys.exit(1)\n\n# Read config from disk\nconfig = configparser.ConfigParser()\nconfig.read(path + \"/../config.ini\")\n\nuse_cnn = config.getboolean(\"core\", \"use_cnn\", fallback=False)\nif use_cnn:\n\tface_detector = dlib.cnn_face_detection_model_v1(path + \"/../dlib-data/mmod_human_face_detector.dat\")\nelse:\n\tface_detector = dlib.get_frontal_face_detector()\n\npose_predictor = dlib.shape_predictor(path + \"/../dlib-data/shape_predictor_5_face_landmarks.dat\")\nface_encoder = dlib.face_recognition_model_v1(path + \"/../dlib-data/dlib_face_recognition_resnet_model_v1.dat\")\n\nuser = builtins.howdy_user\n# The permanent file to store the encoded model in\nenc_file = path + \"/../models/\" + user + \".dat\"\n# Known encodings\nencodings = []\n\n# Make the ./models folder if it doesn't already exist\nif not os.path.exists(path + \"/../models\"):\n\tprint(_(\"No face model folder found, creating one\"))\n\tos.makedirs(path + \"/../models\")\n\n# To try read a premade encodings file if it exists\ntry:\n\tencodings = json.load(open(enc_file))\nexcept FileNotFoundError:\n\tencodings = []\n\n# Print a warning if too many encodings are being added\nif len(encodings) > 3:\n\tprint(_(\"NOTICE: Each additional model slows down the face recognition engine slightly\"))\n\tprint(_(\"Press Ctrl+C to cancel\\n\"))\n\n# Make clear what we are doing if not human\nif not builtins.howdy_args.plain:\n\tprint(_(\"Adding face model for the user \") + user)\n\n# Set the default label\nlabel = \"Initial model\"\n\n# Get the label from the cli arguments if provided\nif builtins.howdy_args.arguments:\n\tlabel = builtins.howdy_args.arguments[0]\n\n# If models already exist, set that default label\nelif encodings:\n\tlabel = _(\"Model #\") + str(len(encodings) + 1)\n\n# Keep de default name if we can't ask questions\nif builtins.howdy_args.y:\n\tprint(_('Using default label \"%s\" because of -y flag') % (label, ))\nelse:\n\t# Ask the user for a custom label\n\tlabel_in = input(_(\"Enter a label for this new model [{}]: \").format(label))\n\n\t# Set the custom label (if any) and limit it to 24 characters\n\tif label_in != \"\":\n\t\tlabel = label_in[:24]\n\n# Remove illegal characters\nif \",\" in label:\n\tprint(_(\"NOTICE: Removing illegal character \\\",\\\" from model name\"))\n\tlabel = label.replace(\",\", \"\")\n\n# Prepare the metadata for insertion\ninsert_model = {\n\t\"time\": int(time.time()),\n\t\"label\": label,\n\t\"id\": len(encodings),\n\t\"data\": []\n}\n\n# Set up video_capture\nvideo_capture = VideoCapture(config)\n\nprint(_(\"\\nPlease look straight into the camera\"))\n\n# Give the user time to read\ntime.sleep(2)\n\n# Will contain found face encodings\nenc = []\n# Count the number of read frames\nframes = 0\n# Count the number of illuminated read frames\nvalid_frames = 0\n# Count the number of illuminated frames that\n# were rejected for being too dark\ndark_tries = 0\n# Track the running darkness total\ndark_running_total = 0\nface_locations = None\n\ndark_threshold = config.getfloat(\"video\", \"dark_threshold\")\n\nclahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n\n# Loop through frames till we hit a timeout\nwhile frames < 60:\n\tframes += 1\n\t# Grab a single frame of video\n\tframe, gsframe = video_capture.read_frame()\n\tgsframe = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\tgsframe = clahe.apply(gsframe)\n\n\t# Create a histogram of the image with 8 values\n\thist = cv2.calcHist([gsframe], [0], None, [8], [0, 256])\n\t# All values combined for percentage calculation\n\thist_total = np.sum(hist)\n\n\t# Calculate frame darkness\n\tdarkness = (hist[0] / hist_total * 100)\n\n\t# If the image is fully black due to a bad camera read,\n\t# skip to the next frame\n\tif (hist_total == 0) or (darkness == 100):\n\t\tcontinue\n\n\t# Include this frame in calculating our average session brightness\n\tdark_running_total += darkness\n\tvalid_frames += 1\n\n\t# If the image exceeds darkness threshold due to subject distance,\n\t# skip to the next frame\n\tif (darkness > dark_threshold):\n\t\tdark_tries += 1\n\t\tcontinue\n\n\t# Get all faces from that frame as encodings\n\tface_locations = face_detector(gsframe, 1)\n\n\t# If we've found at least one, we can continue\n\tif face_locations:\n\t\tbreak\n\nvideo_capture.release()\n\n# If we've found no faces, try to determine why\nif face_locations is None or not face_locations:\n\tif valid_frames == 0:\n\t\tprint(_(\"Camera saw only black frames - is IR emitter working?\"))\n\telif valid_frames == dark_tries:\n\t\tprint(_(\"All frames were too dark, please check dark_threshold in config\"))\n\t\tprint(_(\"Average darkness: {avg}, Threshold: {threshold}\").format(avg=str(dark_running_total / valid_frames), threshold=str(dark_threshold)))\n\telse:\n\t\tprint(_(\"No face detected, aborting\"))\n\tsys.exit(1)\n\n# If more than 1 faces are detected we can't know wich one belongs to the user\nelif len(face_locations) > 1:\n\tprint(_(\"Multiple faces detected, aborting\"))\n\tsys.exit(1)\n\nface_location = face_locations[0]\nif use_cnn:\n\tface_location = face_location.rect\n\n# Get the encodings in the frame\nface_landmark = pose_predictor(frame, face_location)\nface_encoding = np.array(face_encoder.compute_face_descriptor(frame, face_landmark, 1))\n\ninsert_model[\"data\"].append(face_encoding.tolist())\n\n# Insert full object into the list\nencodings.append(insert_model)\n\n# Save the new encodings to disk\nwith open(enc_file, \"w\") as datafile:\n\tjson.dump(encodings, datafile)\n\n# Give let the user know how it went\nprint(_(\"\"\"\\nScan complete\nAdded a new model to \"\"\") + user)\n"
] | [
[
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Flsahkong/seeDiff | [
"730eaca8528d22ed3aa6b4dbc1965828a697cf9a"
] | [
"lib/model/faster_rcnn/faster_rcnn_global_local.py"
] | [
"import random\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torchvision.models as models\nfrom torch.autograd import Variable\nimport numpy as np\nfrom model.utils.config import cfg\nfrom model.rpn.rpn import _RPN\nfrom model.roi_pooling.modules.roi_pool import _RoIPooling\nfrom model.roi_crop.modules.roi_crop import _RoICrop\nfrom model.roi_align.modules.roi_align import RoIAlignAvg\nfrom model.rpn.proposal_target_layer_cascade import _ProposalTargetLayer\nimport time\nimport pdb\nfrom model.utils.net_utils import _smooth_l1_loss, _crop_pool_layer, _affine_grid_gen, _affine_theta,grad_reverse\n\nclass _fasterRCNN(nn.Module):\n \"\"\" faster RCNN \"\"\"\n def __init__(self, classes, class_agnostic,lc,gc):\n super(_fasterRCNN, self).__init__()\n self.classes = classes\n self.n_classes = len(classes)\n self.class_agnostic = class_agnostic\n # loss\n self.RCNN_loss_cls = 0\n self.RCNN_loss_bbox = 0\n self.lc = lc\n self.gc = gc\n # define rpn\n self.RCNN_rpn = _RPN(self.dout_base_model)\n self.RCNN_proposal_target = _ProposalTargetLayer(self.n_classes)\n self.RCNN_roi_pool = _RoIPooling(cfg.POOLING_SIZE, cfg.POOLING_SIZE, 1.0/16.0)\n self.RCNN_roi_align = RoIAlignAvg(cfg.POOLING_SIZE, cfg.POOLING_SIZE, 1.0/16.0)\n\n self.grid_size = cfg.POOLING_SIZE * 2 if cfg.CROP_RESIZE_WITH_MAX_POOL else cfg.POOLING_SIZE\n self.RCNN_roi_crop = _RoICrop()\n\n def forward(self, im_data, im_info, gt_boxes, num_boxes,target=False,eta=1.0):\n batch_size = im_data.size(0)\n\n im_info = im_info.data\n gt_boxes = gt_boxes.data\n num_boxes = num_boxes.data\n\n # feed image data to base model to obtain base feature map\n base_feat1 = self.RCNN_base1(im_data)\n if self.lc:\n d_pixel, _ = self.netD_pixel(grad_reverse(base_feat1, lambd=eta))\n #print(d_pixel)\n if not target:\n _, feat_pixel = self.netD_pixel(base_feat1.detach())\n else:\n d_pixel = self.netD_pixel(grad_reverse(base_feat1, lambd=eta))\n base_feat = self.RCNN_base2(base_feat1)\n if self.gc:\n domain_p, _ = self.netD(grad_reverse(base_feat, lambd=eta))\n if target:\n return d_pixel,domain_p#, diff\n _,feat = self.netD(base_feat.detach())\n else:\n domain_p = self.netD(grad_reverse(base_feat, lambd=eta))\n if target:\n return d_pixel,domain_p#,diff\n # feed base feature map tp RPN to obtain rois\n rois, rpn_loss_cls, rpn_loss_bbox = self.RCNN_rpn(base_feat, im_info, gt_boxes, num_boxes)\n\n # if it is training phrase, then use ground trubut bboxes for refining\n if self.training:\n roi_data = self.RCNN_proposal_target(rois, gt_boxes, num_boxes)\n rois, rois_label, rois_target, rois_inside_ws, rois_outside_ws = roi_data\n\n rois_label = Variable(rois_label.view(-1).long())\n rois_target = Variable(rois_target.view(-1, rois_target.size(2)))\n rois_inside_ws = Variable(rois_inside_ws.view(-1, rois_inside_ws.size(2)))\n rois_outside_ws = Variable(rois_outside_ws.view(-1, rois_outside_ws.size(2)))\n else:\n rois_label = None\n rois_target = None\n rois_inside_ws = None\n rois_outside_ws = None\n rpn_loss_cls = 0\n rpn_loss_bbox = 0\n\n rois = Variable(rois)\n # do roi pooling based on predicted rois\n\n if cfg.POOLING_MODE == 'crop':\n # pdb.set_trace()\n # pooled_feat_anchor = _crop_pool_layer(base_feat, rois.view(-1, 5))\n grid_xy = _affine_grid_gen(rois.view(-1, 5), base_feat.size()[2:], self.grid_size)\n grid_yx = torch.stack([grid_xy.data[:,:,:,1], grid_xy.data[:,:,:,0]], 3).contiguous()\n pooled_feat = self.RCNN_roi_crop(base_feat, Variable(grid_yx).detach())\n if cfg.CROP_RESIZE_WITH_MAX_POOL:\n pooled_feat = F.max_pool2d(pooled_feat, 2, 2)\n elif cfg.POOLING_MODE == 'align':\n pooled_feat = self.RCNN_roi_align(base_feat, rois.view(-1, 5))\n elif cfg.POOLING_MODE == 'pool':\n pooled_feat = self.RCNN_roi_pool(base_feat, rois.view(-1,5))\n\n # feed pooled features to top model\n pooled_feat = self._head_to_tail(pooled_feat)\n #feat_pixel = torch.zeros(feat_pixel.size()).cuda()\n if self.lc:\n feat_pixel = feat_pixel.view(1, -1).repeat(pooled_feat.size(0), 1)\n pooled_feat = torch.cat((feat_pixel, pooled_feat), 1)\n if self.gc:\n feat = feat.view(1, -1).repeat(pooled_feat.size(0), 1)\n pooled_feat = torch.cat((feat, pooled_feat), 1)\n # compute bbox offset\n\n # compute bbox offset\n bbox_pred = self.RCNN_bbox_pred(pooled_feat)\n if self.training and not self.class_agnostic:\n bbox_pred_view = bbox_pred.view(bbox_pred.size(0), int(bbox_pred.size(1) / 4), 4)\n bbox_pred_select = torch.gather(bbox_pred_view, 1, rois_label.view(rois_label.size(0), 1, 1).expand(rois_label.size(0), 1, 4))\n bbox_pred = bbox_pred_select.squeeze(1)\n\n # compute object classification probability\n cls_score = self.RCNN_cls_score(pooled_feat)\n cls_prob = F.softmax(cls_score, 1)\n\n RCNN_loss_cls = 0\n RCNN_loss_bbox = 0\n\n if self.training:\n # classification loss\n RCNN_loss_cls = F.cross_entropy(cls_score, rois_label)\n\n # bounding box regression L1 loss\n RCNN_loss_bbox = _smooth_l1_loss(bbox_pred, rois_target, rois_inside_ws, rois_outside_ws)\n\n\n cls_prob = cls_prob.view(batch_size, rois.size(1), -1)\n bbox_pred = bbox_pred.view(batch_size, rois.size(1), -1)\n\n return rois, cls_prob, bbox_pred, rpn_loss_cls, rpn_loss_bbox, RCNN_loss_cls, RCNN_loss_bbox, rois_label,d_pixel, domain_p#,diff\n\n def _init_weights(self):\n def normal_init(m, mean, stddev, truncated=False):\n \"\"\"\n weight initalizer: truncated normal and random normal.\n \"\"\"\n # x is a parameter\n if truncated:\n m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation\n else:\n m.weight.data.normal_(mean, stddev)\n m.bias.data.zero_()\n\n normal_init(self.RCNN_rpn.RPN_Conv, 0, 0.01, cfg.TRAIN.TRUNCATED)\n normal_init(self.RCNN_rpn.RPN_cls_score, 0, 0.01, cfg.TRAIN.TRUNCATED)\n normal_init(self.RCNN_rpn.RPN_bbox_pred, 0, 0.01, cfg.TRAIN.TRUNCATED)\n normal_init(self.RCNN_cls_score, 0, 0.01, cfg.TRAIN.TRUNCATED)\n normal_init(self.RCNN_bbox_pred, 0, 0.001, cfg.TRAIN.TRUNCATED)\n\n def create_architecture(self):\n self._init_modules()\n self._init_weights()\n"
] | [
[
"torch.nn.functional.softmax",
"torch.cat",
"torch.nn.functional.cross_entropy",
"torch.stack",
"torch.nn.functional.max_pool2d",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xingchenwan/nasbowl | [
"0abaa91b6ce436655a7488f75ed5aeca8df71246"
] | [
"kernels/multiscale_laplacian.py"
] | [
"from .weisfilerlehman import GraphKernels\nfrom grakel.utils import graph_from_networkx\nfrom grakel_replace.multiscale_laplacian import MultiscaleLaplacianFast as MLF, MultiscaleLaplacian as ML\nimport torch\nfrom .utils import transform_to_undirected\n\n\nclass MultiscaleLaplacian(GraphKernels):\n def __init__(self,\n n: int = 1,\n n_jobs: int = 1,\n random_state=None,\n gamma: float = 0.01,\n heta: float = 0.01,\n max_n_eigs: int = 3,\n n_vertex_samples: int = 5,\n fast: bool = True,\n node_label: str = 'op_name',\n edge_label: tuple = None,\n return_tensor: bool = True,\n reindex_node_label: bool = True,\n **kwargs\n ):\n super(MultiscaleLaplacian, self).__init__(**kwargs)\n self.n = n\n self.random_state = random_state\n self.gamma = gamma\n self.heta = heta\n self.max_n_eigs = max_n_eigs\n self.n_vertex_samples = n_vertex_samples\n if fast:\n self.kern = MLF(n_jobs, normalize=True, random_state=random_state,\n L=n, P=max_n_eigs, gamma=gamma, heta=heta,\n n_samples=n_vertex_samples)\n else:\n self.kern = ML(n_jobs, True, False, L=n,\n gamma=gamma, heta=heta)\n self.node_label = node_label\n self.edge_label = edge_label\n self.return_tensor = return_tensor\n self._gram = None\n self._train = None\n self.reindex_node_label = reindex_node_label\n self.check_dict = {}\n self.__name__ = 'MultiscaleLaplacian'\n\n def _reindex_node_label(self, gr: list):\n \"\"\"It seems that MLK needs numeric node features. Reindex the feature\"\"\"\n gr_copy = gr[:]\n idx = 0\n for i, g in enumerate(gr_copy):\n for node, attr in g.nodes(data=True):\n if attr[self.node_label] not in self.check_dict.keys():\n self.check_dict.update({attr[self.node_label]: idx})\n # Assign the index\n gr_copy[i].nodes[node][self.node_label] = idx\n idx += 1\n else:\n gr_copy[i].nodes[node][self.node_label] = self.check_dict[attr[self.node_label]]\n return gr_copy\n\n def fit_transform(self, gr: list, rebuild_model=False, save_gram_matrix=False, **kwargs):\n if rebuild_model is False and self._gram is not None:\n return self._gram\n gr = transform_to_undirected(gr)\n if self.reindex_node_label:\n gr = self._reindex_node_label(gr)\n gr_ = graph_from_networkx(gr, self.node_label, self.edge_label)\n K = self.kern.fit_transform(gr_)\n if self.return_tensor:\n K = torch.tensor(K)\n if save_gram_matrix:\n self._gram = K.clone()\n self._train = gr[:]\n return K\n\n def transform(self, gr: list, ):\n gr = transform_to_undirected(gr)\n if self.reindex_node_label:\n gr = self._reindex_node_label(gr)\n gr_ = graph_from_networkx(gr, self.node_label, self.edge_label)\n K = self.kern.transform(gr_)\n if self.return_tensor:\n K = torch.tensor(K)\n return K\n"
] | [
[
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tiredamage42/WikiCrawler | [
"264d3511dec20bcf9c633315d3d6c93aa64cbe5c"
] | [
"dense_layer.py"
] | [
"import tensorflow.compat.v1 as tf\nimport graph_utils\ntf.disable_v2_behavior()\n\n'''\nfully connected dense layer\n'''\nclass DenseLayer():\n def __init__(self, layer_name, features):\n self.is_built = False\n self.layer_name = layer_name\n self.features = features\n\n def __call__(self, in_tensor):\n with tf.variable_scope(self.layer_name) as scope:\n in_shape_g = tf.shape(in_tensor)\n in_shape_l = in_tensor.get_shape().as_list()\n\n if not self.is_built:\n self.w = graph_utils.get_variable(\n 'weights', [in_shape_l[-1], self.features], dtype=tf.float32, \n initializer=None, trainable=True, save_var=True\n )\n self.b = graph_utils.get_variable(\n 'biases', [self.features], dtype=tf.float32, \n initializer=None, trainable=True, save_var=True\n )\n \n self.is_built = True\n\n # if we get a tensor of shape [ batch, sequence, features ]\n # reshape so it's shape [ batch * sequence, features ]\n needs_reshape = len(in_shape_l) != 2\n if needs_reshape:\n in_tensor = tf.reshape(in_tensor, [ -1, in_shape_l[-1] ], name=\"flatten\")\n\n # matrix multiplication\n out_t = tf.matmul(in_tensor, self.w, name=\"mx\")\n\n # add bias\n out_t = tf.add(out_t, self.b, name=\"add_bias\")\n \n # reshape back to [ batch, sequence, self.features ]\n # after our matrix multiplication\n if needs_reshape:\n out_t = tf.reshape(out_t, tf.concat([in_shape_g[:-1], tf.constant([self.features])], -1))\n \n return out_t\n\n "
] | [
[
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.matmul",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.add",
"tensorflow.compat.v1.constant"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.